hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
00f76541a149c40b48201c6d4e62802f94867ac3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void relabel2Kernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
i = i * colsComponents + j;
i = i + (colsComponents * frameRows * idx);
if (components[i] == previousLabel) {
components[i] = newLabel;
}
} | 00f76541a149c40b48201c6d4e62802f94867ac3.cu | #include "includes.h"
__global__ void relabel2Kernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
i = i * colsComponents + j;
i = i + (colsComponents * frameRows * idx);
if (components[i] == previousLabel) {
components[i] = newLabel;
}
} |
37b43546092a9ef9e7f58816a0bda89b3353ffd1.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <hipsparse.h>
#include <hip/hip_runtime.h>
#include <mkl.h>
#include <mkl_spblas.h>
#include "loadMatrixMarket.h"
//#define FLOAT
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(hipError_t stat, const char *file, int line) {
if (stat != hipSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", hipGetErrorString(stat), file, line);
}
}
#define cusparseErrCheck(stat) { cusparseErrCheck_((stat), __FILE__, __LINE__); }
void cusparseErrCheck_(hipsparseStatus_t stat, const char *file, int line) {
if (stat != HIPSPARSE_STATUS_SUCCESS) {
fprintf(stderr, "CUSPARSE Error: %d %s %d\n", stat, file, line);
}
}
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(
stderr,
"-- Usage examples --\n"
" %s inline_1.mtx type: run with inline_1 matrix in matrix market format\n",
argv[0]);
return -1;
}
CSR matrixA;
int outputbase = 0;
loadMatrixMarket(argv[1], &matrixA, outputbase, 0 /*transpose =false*/);
const int A_num_rows = matrixA.n;
const int A_num_cols = matrixA.n;
const int A_num_nnz = matrixA.nnz;
int *hA_csrOffsets = matrixA.rowptr;
int *hA_columns = matrixA.colidx;
// index pointer on device
int *dA_csrOffsets, *dA_columns;
#ifdef FLOAT
float alpha = (float)1.0;
float beta = (float)0.0;
float *hA_values = (float*)malloc(A_num_nnz*sizeof(float));
#pragma omp parallel for
for(int i =0; i < A_num_nnz; i++){
hA_values[i] = (float) matrixA.values[i];
}
float *Y = (float*)malloc(A_num_rows*sizeof(float)); // result
float *X = (float*)malloc(A_num_cols*sizeof(float));
#pragma omp parallel for
for (int i = 0; i < A_num_cols; i++){ X[i] = (float) 1.0;}
// device
float *dA_values;
float *dX;
float *dY;
#else
double alpha = (double)1.0;
double beta = (double)0.0;
double *hA_values = matrixA.values;
double *Y = (double*)malloc(A_num_rows*sizeof(double));
double *X = (double*)malloc(A_num_cols*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < A_num_cols; i++){ X[i] = (double)1.0;}
//device
double *dA_values;
double *dX;
double *dY;
#endif
// Allocate device memory to store the sparse CSR representation of A
cudaErrCheck(hipMalloc((void**) &dA_csrOffsets, (A_num_rows + 1) * sizeof(int)));
cudaErrCheck(hipMalloc((void **)&dA_columns, A_num_nnz*sizeof(int)));
#ifdef FLOAT
cudaErrCheck(hipMalloc((void **)&dA_values, A_num_nnz*sizeof(float)));
#else
cudaErrCheck(hipMalloc((void **)&dA_values, A_num_nnz*sizeof(double)));
#endif
// Allocate device memory to store the X and Y
#ifdef FLOAT
cudaErrCheck(hipMalloc((void **)&dX, A_num_cols*sizeof(float)));
cudaErrCheck(hipMalloc((void **)&dY, A_num_rows*sizeof(float)));
#else
cudaErrCheck(hipMalloc((void **)&dX, A_num_cols*sizeof(double)));
cudaErrCheck(hipMalloc((void **)&dY, A_num_rows*sizeof(double)));
#endif
// transfer data to device
// Transfer the input vectors and dense matrix A to the device
cudaErrCheck(hipMemcpy(dA_csrOffsets, hA_csrOffsets, (A_num_rows+1)*sizeof(int), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(dA_columns, hA_columns, A_num_nnz*sizeof(int), hipMemcpyHostToDevice));
#ifdef FLOAT
cudaErrCheck(hipMemcpy(dA_values, hA_values, A_num_nnz*sizeof(float), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(dX, X , A_num_cols*sizeof(float), hipMemcpyHostToDevice));
#else
cudaErrCheck(hipMemcpy(dA_values, hA_values, A_num_nnz*sizeof(double), hipMemcpyHostToDevice));
cudaErrCheck(hipMemcpy(dX, X, A_num_cols*sizeof(double), hipMemcpyHostToDevice));
#endif
// CUSPARSE APIs
hipsparseHandle_t handle = 0;
hipsparseSpMatDescr_t matA;
hipsparseDnVecDescr_t vecX, vecY;
void* dBuffer = NULL;
size_t bufferSize = 0;
cusparseErrCheck(hipsparseCreate(&handle));
// Create sparse matrix A in CSR format
#ifdef FLOAT
cusparseErrCheck( hipsparseCreateCsr(&matA, A_num_rows, A_num_cols, A_num_nnz,
dA_csrOffsets, dA_columns, dA_values,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_32F));
// Create dense vector X
cusparseErrCheck( hipsparseCreateDnVec(&vecX, A_num_cols, dX, HIP_R_32F) );
// Create dense vector y
cusparseErrCheck( hipsparseCreateDnVec(&vecY, A_num_rows, dY, HIP_R_32F) );
// allocate an external buffer if needed
cusparseErrCheck( hipsparseSpMV_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, HIP_R_32F,
HIPSPARSE_MV_ALG_DEFAULT, &bufferSize) );
#else
cusparseErrCheck( hipsparseCreateCsr(&matA, A_num_rows, A_num_cols, A_num_nnz,
dA_csrOffsets, dA_columns, dA_values,
HIPSPARSE_INDEX_32I, HIPSPARSE_INDEX_32I,
HIPSPARSE_INDEX_BASE_ZERO, HIP_R_64F));
// Create dense vector X
cusparseErrCheck( hipsparseCreateDnVec(&vecX, A_num_cols, dX, HIP_R_64F) );
// Create dense vector y
cusparseErrCheck( hipsparseCreateDnVec(&vecY, A_num_rows, dY, HIP_R_64F) );
// allocate an external buffer if needed
cusparseErrCheck( hipsparseSpMV_bufferSize(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, HIP_R_64F,
HIPSPARSE_MV_ALG_DEFAULT, &bufferSize) );
#endif
cudaErrCheck( hipMalloc(&dBuffer, bufferSize));
// execute SpMV
// Timing the solve
hipEvent_t start;
hipEvent_t stop;
cudaErrCheck(hipEventCreate(&start));
cudaErrCheck(hipEventCreate(&stop));
int REPEAT = 50;
float times [REPEAT];
for (int i = 0; i < REPEAT; i++) {
hipEventRecord(start);
#ifdef FLOAT
cusparseErrCheck( hipsparseSpMV(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, HIP_R_32F,
HIPSPARSE_MV_ALG_DEFAULT, dBuffer));
#else
cusparseErrCheck( hipsparseSpMV(handle, HIPSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, HIP_R_64F,
HIPSPARSE_MV_ALG_DEFAULT, dBuffer) );
#endif
hipEventRecord(stop);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
times[i] = milliseconds;
}
float sum_times = 0.0f;
for (int i = 0; i < REPEAT; i++) {
sum_times += times[i];
}
#ifdef FLOAT
printf ("SINGLE PRECISION SPMV ");
#else
printf ("DOUBLE PRECISION SPMV ");
#endif
printf ("solve time (microseconds) = %f\n", (sum_times/REPEAT)*1000);
// device result check
#ifdef FLOAT
cudaErrCheck( hipMemcpy(Y, dY, A_num_rows * sizeof(float),
hipMemcpyDeviceToHost));
#else
cudaErrCheck( hipMemcpy(Y, dY, A_num_rows * sizeof(double),
hipMemcpyDeviceToHost) )
#endif
// use mkl to check result
sparse_matrix_t mklA;
sparse_status_t stat;
#ifdef FLOAT
stat = mkl_sparse_s_create_csr(&mklA, SPARSE_INDEX_BASE_ZERO, A_num_rows, A_num_cols,
hA_csrOffsets, hA_csrOffsets + 1,
hA_columns, hA_values);
#else
stat = mkl_sparse_d_create_csr(&mklA, SPARSE_INDEX_BASE_ZERO, A_num_rows, A_num_cols,
hA_csrOffsets, hA_csrOffsets + 1,
hA_columns, hA_values);
#endif
if (SPARSE_STATUS_SUCCESS != stat) {
fprintf(stderr, "Failed to create mkl csr\n");
return -1;
}
#ifdef FLOAT
float *result = (float*)malloc(sizeof(float)*A_num_rows);
float error = 0;
#else
double *result = (double*)malloc(sizeof(double)*A_num_rows);
double error = 0;
#endif
matrix_descr descA;
descA.type = SPARSE_MATRIX_TYPE_GENERAL;
descA.diag = SPARSE_DIAG_NON_UNIT;
#ifdef FLOAT
mkl_sparse_s_mv(SPARSE_OPERATION_NON_TRANSPOSE, 1, mklA, descA, X, 0, result);
#else
mkl_sparse_d_mv(SPARSE_OPERATION_NON_TRANSPOSE, 1, mklA, descA, X, 0, result);
#endif
#pragma omp parallel for
for(int i = 0; i < A_num_rows; i++) {
error += abs(result[i] - Y[i]);
}
printf ("Error= %e\n", error/A_num_cols);
// destroy matrix/vector descriptors
cudaErrCheck(hipEventDestroy(start));
cudaErrCheck(hipEventDestroy(stop));
free(hA_values);
free(hA_csrOffsets);
free(hA_columns);
free(X);
free(Y);
free(result);
cudaErrCheck(hipFree(dY));
cudaErrCheck(hipFree(dX));
cudaErrCheck(hipFree(dA_values));
cudaErrCheck(hipFree(dA_csrOffsets));
cudaErrCheck(hipFree(dA_columns));
cudaErrCheck(hipFree(dBuffer));
cusparseErrCheck( hipsparseDestroySpMat(matA));
cusparseErrCheck( hipsparseDestroyDnVec(vecX));
cusparseErrCheck( hipsparseDestroyDnVec(vecY));
cusparseErrCheck( hipsparseDestroy(handle) );
return 0;
}
| 37b43546092a9ef9e7f58816a0bda89b3353ffd1.cu | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <cusparse.h>
#include <cuda.h>
#include <mkl.h>
#include <mkl_spblas.h>
#include "loadMatrixMarket.h"
//#define FLOAT
// Define some error checking macros.
#define cudaErrCheck(stat) { cudaErrCheck_((stat), __FILE__, __LINE__); }
void cudaErrCheck_(cudaError_t stat, const char *file, int line) {
if (stat != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s %s %d\n", cudaGetErrorString(stat), file, line);
}
}
#define cusparseErrCheck(stat) { cusparseErrCheck_((stat), __FILE__, __LINE__); }
void cusparseErrCheck_(cusparseStatus_t stat, const char *file, int line) {
if (stat != CUSPARSE_STATUS_SUCCESS) {
fprintf(stderr, "CUSPARSE Error: %d %s %d\n", stat, file, line);
}
}
int main(int argc, char **argv)
{
if (argc < 2) {
fprintf(
stderr,
"-- Usage examples --\n"
" %s inline_1.mtx type: run with inline_1 matrix in matrix market format\n",
argv[0]);
return -1;
}
CSR matrixA;
int outputbase = 0;
loadMatrixMarket(argv[1], &matrixA, outputbase, 0 /*transpose =false*/);
const int A_num_rows = matrixA.n;
const int A_num_cols = matrixA.n;
const int A_num_nnz = matrixA.nnz;
int *hA_csrOffsets = matrixA.rowptr;
int *hA_columns = matrixA.colidx;
// index pointer on device
int *dA_csrOffsets, *dA_columns;
#ifdef FLOAT
float alpha = (float)1.0;
float beta = (float)0.0;
float *hA_values = (float*)malloc(A_num_nnz*sizeof(float));
#pragma omp parallel for
for(int i =0; i < A_num_nnz; i++){
hA_values[i] = (float) matrixA.values[i];
}
float *Y = (float*)malloc(A_num_rows*sizeof(float)); // result
float *X = (float*)malloc(A_num_cols*sizeof(float));
#pragma omp parallel for
for (int i = 0; i < A_num_cols; i++){ X[i] = (float) 1.0;}
// device
float *dA_values;
float *dX;
float *dY;
#else
double alpha = (double)1.0;
double beta = (double)0.0;
double *hA_values = matrixA.values;
double *Y = (double*)malloc(A_num_rows*sizeof(double));
double *X = (double*)malloc(A_num_cols*sizeof(double));
#pragma omp parallel for
for (int i = 0; i < A_num_cols; i++){ X[i] = (double)1.0;}
//device
double *dA_values;
double *dX;
double *dY;
#endif
// Allocate device memory to store the sparse CSR representation of A
cudaErrCheck(cudaMalloc((void**) &dA_csrOffsets, (A_num_rows + 1) * sizeof(int)));
cudaErrCheck(cudaMalloc((void **)&dA_columns, A_num_nnz*sizeof(int)));
#ifdef FLOAT
cudaErrCheck(cudaMalloc((void **)&dA_values, A_num_nnz*sizeof(float)));
#else
cudaErrCheck(cudaMalloc((void **)&dA_values, A_num_nnz*sizeof(double)));
#endif
// Allocate device memory to store the X and Y
#ifdef FLOAT
cudaErrCheck(cudaMalloc((void **)&dX, A_num_cols*sizeof(float)));
cudaErrCheck(cudaMalloc((void **)&dY, A_num_rows*sizeof(float)));
#else
cudaErrCheck(cudaMalloc((void **)&dX, A_num_cols*sizeof(double)));
cudaErrCheck(cudaMalloc((void **)&dY, A_num_rows*sizeof(double)));
#endif
// transfer data to device
// Transfer the input vectors and dense matrix A to the device
cudaErrCheck(cudaMemcpy(dA_csrOffsets, hA_csrOffsets, (A_num_rows+1)*sizeof(int), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(dA_columns, hA_columns, A_num_nnz*sizeof(int), cudaMemcpyHostToDevice));
#ifdef FLOAT
cudaErrCheck(cudaMemcpy(dA_values, hA_values, A_num_nnz*sizeof(float), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(dX, X , A_num_cols*sizeof(float), cudaMemcpyHostToDevice));
#else
cudaErrCheck(cudaMemcpy(dA_values, hA_values, A_num_nnz*sizeof(double), cudaMemcpyHostToDevice));
cudaErrCheck(cudaMemcpy(dX, X, A_num_cols*sizeof(double), cudaMemcpyHostToDevice));
#endif
// CUSPARSE APIs
cusparseHandle_t handle = 0;
cusparseSpMatDescr_t matA;
cusparseDnVecDescr_t vecX, vecY;
void* dBuffer = NULL;
size_t bufferSize = 0;
cusparseErrCheck(cusparseCreate(&handle));
// Create sparse matrix A in CSR format
#ifdef FLOAT
cusparseErrCheck( cusparseCreateCsr(&matA, A_num_rows, A_num_cols, A_num_nnz,
dA_csrOffsets, dA_columns, dA_values,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_32F));
// Create dense vector X
cusparseErrCheck( cusparseCreateDnVec(&vecX, A_num_cols, dX, CUDA_R_32F) );
// Create dense vector y
cusparseErrCheck( cusparseCreateDnVec(&vecY, A_num_rows, dY, CUDA_R_32F) );
// allocate an external buffer if needed
cusparseErrCheck( cusparseSpMV_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, CUDA_R_32F,
CUSPARSE_MV_ALG_DEFAULT, &bufferSize) );
#else
cusparseErrCheck( cusparseCreateCsr(&matA, A_num_rows, A_num_cols, A_num_nnz,
dA_csrOffsets, dA_columns, dA_values,
CUSPARSE_INDEX_32I, CUSPARSE_INDEX_32I,
CUSPARSE_INDEX_BASE_ZERO, CUDA_R_64F));
// Create dense vector X
cusparseErrCheck( cusparseCreateDnVec(&vecX, A_num_cols, dX, CUDA_R_64F) );
// Create dense vector y
cusparseErrCheck( cusparseCreateDnVec(&vecY, A_num_rows, dY, CUDA_R_64F) );
// allocate an external buffer if needed
cusparseErrCheck( cusparseSpMV_bufferSize(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, CUDA_R_64F,
CUSPARSE_MV_ALG_DEFAULT, &bufferSize) );
#endif
cudaErrCheck( cudaMalloc(&dBuffer, bufferSize));
// execute SpMV
// Timing the solve
cudaEvent_t start;
cudaEvent_t stop;
cudaErrCheck(cudaEventCreate(&start));
cudaErrCheck(cudaEventCreate(&stop));
int REPEAT = 50;
float times [REPEAT];
for (int i = 0; i < REPEAT; i++) {
cudaEventRecord(start);
#ifdef FLOAT
cusparseErrCheck( cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, CUDA_R_32F,
CUSPARSE_MV_ALG_DEFAULT, dBuffer));
#else
cusparseErrCheck( cusparseSpMV(handle, CUSPARSE_OPERATION_NON_TRANSPOSE,
&alpha, matA, vecX, &beta, vecY, CUDA_R_64F,
CUSPARSE_MV_ALG_DEFAULT, dBuffer) );
#endif
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
times[i] = milliseconds;
}
float sum_times = 0.0f;
for (int i = 0; i < REPEAT; i++) {
sum_times += times[i];
}
#ifdef FLOAT
printf ("SINGLE PRECISION SPMV ");
#else
printf ("DOUBLE PRECISION SPMV ");
#endif
printf ("solve time (microseconds) = %f\n", (sum_times/REPEAT)*1000);
// device result check
#ifdef FLOAT
cudaErrCheck( cudaMemcpy(Y, dY, A_num_rows * sizeof(float),
cudaMemcpyDeviceToHost));
#else
cudaErrCheck( cudaMemcpy(Y, dY, A_num_rows * sizeof(double),
cudaMemcpyDeviceToHost) )
#endif
// use mkl to check result
sparse_matrix_t mklA;
sparse_status_t stat;
#ifdef FLOAT
stat = mkl_sparse_s_create_csr(&mklA, SPARSE_INDEX_BASE_ZERO, A_num_rows, A_num_cols,
hA_csrOffsets, hA_csrOffsets + 1,
hA_columns, hA_values);
#else
stat = mkl_sparse_d_create_csr(&mklA, SPARSE_INDEX_BASE_ZERO, A_num_rows, A_num_cols,
hA_csrOffsets, hA_csrOffsets + 1,
hA_columns, hA_values);
#endif
if (SPARSE_STATUS_SUCCESS != stat) {
fprintf(stderr, "Failed to create mkl csr\n");
return -1;
}
#ifdef FLOAT
float *result = (float*)malloc(sizeof(float)*A_num_rows);
float error = 0;
#else
double *result = (double*)malloc(sizeof(double)*A_num_rows);
double error = 0;
#endif
matrix_descr descA;
descA.type = SPARSE_MATRIX_TYPE_GENERAL;
descA.diag = SPARSE_DIAG_NON_UNIT;
#ifdef FLOAT
mkl_sparse_s_mv(SPARSE_OPERATION_NON_TRANSPOSE, 1, mklA, descA, X, 0, result);
#else
mkl_sparse_d_mv(SPARSE_OPERATION_NON_TRANSPOSE, 1, mklA, descA, X, 0, result);
#endif
#pragma omp parallel for
for(int i = 0; i < A_num_rows; i++) {
error += abs(result[i] - Y[i]);
}
printf ("Error= %e\n", error/A_num_cols);
// destroy matrix/vector descriptors
cudaErrCheck(cudaEventDestroy(start));
cudaErrCheck(cudaEventDestroy(stop));
free(hA_values);
free(hA_csrOffsets);
free(hA_columns);
free(X);
free(Y);
free(result);
cudaErrCheck(cudaFree(dY));
cudaErrCheck(cudaFree(dX));
cudaErrCheck(cudaFree(dA_values));
cudaErrCheck(cudaFree(dA_csrOffsets));
cudaErrCheck(cudaFree(dA_columns));
cudaErrCheck(cudaFree(dBuffer));
cusparseErrCheck( cusparseDestroySpMat(matA));
cusparseErrCheck( cusparseDestroyDnVec(vecX));
cusparseErrCheck( cusparseDestroyDnVec(vecY));
cusparseErrCheck( cusparseDestroy(handle) );
return 0;
}
|
ba48029b5e7e5641e9cab18c7075b4f42ae3e9ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
using namespace std::chrono;
using namespace std;
#define n (1 << 2)
__global__ void matrix_multiplication_kernel(int *d_a, int *d_b, int *d_c){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n || j >= n) return;
d_c[i*n + j] = 0;
for(int k=0; k<n; k++){
d_c[i*n+j] += d_a[i*n+k] * d_b[k*n+j];
}
}
int main(){
size_t bytes = n*n*sizeof(int);
int *h_a, *h_b, *h_c;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
for(int i = 0; i < n*n; i++){
h_a[i] = i;
h_b[i] = i;
}
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_a[i*n + j] << "\t";
// }
// cout << endl;
// }
// cout << "*" << endl;
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_b[i*n + j] << "\t";
// }
// cout << endl;
// }
cout << "cpu: " << endl;
auto start = high_resolution_clock::now();
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
h_c[i*n + j] = 0;
for(int k=0; k<n; k++){
h_c[i*n+j] += h_a[i*n+k] * h_b[k*n+j];
}
}
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
cout << "cpu time: " << duration.count() << endl;
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_c[i*n + j] << "\t";
// }
// cout << endl;
// }
cout << "gpu: " << endl;
start = high_resolution_clock::now();
int *h_d = (int*)malloc(bytes);
int *d_a, *d_b, *d_d;
hipMalloc((void**) &d_a, bytes);
hipMalloc((void**) &d_b, bytes);
hipMalloc((void**) &d_d, bytes);
hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice);
int BLOCK_SIZE = 32;
int GRID_SIZE = (n/BLOCK_SIZE) + 1;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
hipLaunchKernelGGL(( matrix_multiplication_kernel), dim3(grid),dim3(block), 0, 0, d_a,d_b,d_d);
hipDeviceSynchronize();
hipMemcpy(h_d, d_d, bytes, hipMemcpyDeviceToHost);
for(int i = 0; i < n; i++){
for(int j=0; j<n; j++){
cout << h_d[i*n + j] << "\t";
}
cout << endl;
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "gpu time: " << duration.count() << endl;
bool error_occurred = false;
for(int i = 0; i < n; i++){
for(int j=0; j<n; j++){
if(h_d[i*n + j] - h_c[i*n + j] != 0){
cout << "Some error occurred" <<endl;
error_occurred = true;
}
}
}
if(error_occurred == false) cout << "No error" <<endl;
} | ba48029b5e7e5641e9cab18c7075b4f42ae3e9ac.cu | #include <iostream>
#include <chrono>
using namespace std::chrono;
using namespace std;
#define n (1 << 2)
__global__ void matrix_multiplication_kernel(int *d_a, int *d_b, int *d_c){
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n || j >= n) return;
d_c[i*n + j] = 0;
for(int k=0; k<n; k++){
d_c[i*n+j] += d_a[i*n+k] * d_b[k*n+j];
}
}
int main(){
size_t bytes = n*n*sizeof(int);
int *h_a, *h_b, *h_c;
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
for(int i = 0; i < n*n; i++){
h_a[i] = i;
h_b[i] = i;
}
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_a[i*n + j] << "\t";
// }
// cout << endl;
// }
// cout << "*" << endl;
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_b[i*n + j] << "\t";
// }
// cout << endl;
// }
cout << "cpu: " << endl;
auto start = high_resolution_clock::now();
for(int i=0; i<n; i++){
for(int j=0; j<n; j++){
h_c[i*n + j] = 0;
for(int k=0; k<n; k++){
h_c[i*n+j] += h_a[i*n+k] * h_b[k*n+j];
}
}
}
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
cout << "cpu time: " << duration.count() << endl;
// for(int i = 0; i < n; i++){
// for(int j=0; j<n; j++){
// cout << h_c[i*n + j] << "\t";
// }
// cout << endl;
// }
cout << "gpu: " << endl;
start = high_resolution_clock::now();
int *h_d = (int*)malloc(bytes);
int *d_a, *d_b, *d_d;
cudaMalloc((void**) &d_a, bytes);
cudaMalloc((void**) &d_b, bytes);
cudaMalloc((void**) &d_d, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int BLOCK_SIZE = 32;
int GRID_SIZE = (n/BLOCK_SIZE) + 1;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
matrix_multiplication_kernel<<<grid,block>>>(d_a,d_b,d_d);
cudaDeviceSynchronize();
cudaMemcpy(h_d, d_d, bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < n; i++){
for(int j=0; j<n; j++){
cout << h_d[i*n + j] << "\t";
}
cout << endl;
}
stop = high_resolution_clock::now();
duration = duration_cast<microseconds>(stop - start);
cout << "gpu time: " << duration.count() << endl;
bool error_occurred = false;
for(int i = 0; i < n; i++){
for(int j=0; j<n; j++){
if(h_d[i*n + j] - h_c[i*n + j] != 0){
cout << "Some error occurred" <<endl;
error_occurred = true;
}
}
}
if(error_occurred == false) cout << "No error" <<endl;
} |
8491151c219ce82c1b08629cff853703fef84be3.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include "GlobalState.h"
#include "PatchMatch.h"
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/opencv.hpp>
#include <sys/stat.h>
#include <sys/types.h>
// Includes CUDA
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_texture_types.h>
#include <hip/hip_vector_types.h>
float Evaluate(cv::Mat standard, cv::Mat myMap)
{
double sum = standard.rows * standard.cols;
int count = 0;
int ans = 0;
for (int i = 0; i < standard.rows; i ++)
{
for (int j = 0; j < standard.cols; j ++)
{
ans = standard.ptr<uchar>(i)[j] - myMap.ptr<float>(i)[j];
//1bad pixelsdisparity maps3
if (ans > 3 || ans < -3) count ++;
}
}
double result = (count + 0.0)/sum;
std::cout << std::setiosflags(std::ios::fixed);
std::cout << std::setprecision(2) << result * 100 << "\\%" << std::endl;
return result * 100;
}
bool check_image(const cv::Mat &image, std::string name="Image")
{
if(!image.data)
{
std::cerr <<name <<" data not loaded.\n";
return false;
}
return true;
}
bool check_dimensions(const cv::Mat &img1, const cv::Mat &img2)
{
if(img1.cols != img2.cols or img1.rows != img2.rows)
{
std::cerr << "Images' dimensions do not corresponds.";
return false;
}
return true;
}
void addImageToTextureFloatColor(std::vector<cv::Mat>& imgs, hipTextureObject_t texs[], hipArray* cuArray[])
{
for(int i = 0; i < imgs.size(); ++i)
{
int rows = imgs[i].rows;
int cols = imgs[i].cols;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float4>();
hipMallocArray(&cuArray[i], &channelDesc, cols, rows);
hipMemcpy2DToArray(cuArray[i], 0, 0, imgs[i].ptr<float>(), imgs[i].step[0], cols * sizeof(float) * 4, rows, hipMemcpyHostToDevice);
struct hipResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = hipResourceTypeArray;
res_desc.res.array.array = cuArray[i];
struct hipTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = hipAddressModeWrap;
tex_desc.addressMode[1] = hipAddressModeWrap;
tex_desc.filterMode = hipFilterModeLinear;
tex_desc.readMode = hipReadModeElementType;
tex_desc.normalizedCoords = 0;
hipCreateTextureObject(&(texs[i]), &res_desc, &tex_desc, NULL);
}
}
void addImageToTextureFloatGray(std::vector<cv::Mat>& imgs, hipTextureObject_t texs[], hipArray* cuArray[])
{
for(int i = 0; i < imgs.size(); ++i)
{
int rows = imgs[i].rows;
int cols = imgs[i].cols;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipMallocArray(&cuArray[i], &channelDesc, cols, rows);
hipMemcpy2DToArray(cuArray[i], 0, 0, imgs[i].ptr<float>(), imgs[i].step[0], cols * sizeof(float), rows, hipMemcpyHostToDevice);
struct hipResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = hipResourceTypeArray;
res_desc.res.array.array = cuArray[i];
struct hipTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = hipAddressModeWrap;
tex_desc.addressMode[1] = hipAddressModeWrap;
tex_desc.filterMode = hipFilterModeLinear;
tex_desc.readMode = hipReadModeElementType;
tex_desc.normalizedCoords = 0;
hipCreateTextureObject(&(texs[i]), &res_desc, &tex_desc, NULL);
}
}
std::vector<std::string> name = {"Aloe", "Baby1", "Baby2", "Baby3", "Bowling1",
"Bowling2", "Cloth1", "Cloth2", "Cloth3", "Cloth4", "Flowerpots",
"Lampshade1", "Lampshade2", "Midd1", "Midd2", "Monopoly",
"Plastic", "Rocks1", "Rocks2", "Wood1", "Wood2"};
int main(int argc, char** argv)
{
//
const float alpha = 0.9f;
const float gamma = 10.0f;
const float tau_c = 10.0f;
const float tau_g = 2.0f;
mkdir("resultImages1", S_IRWXU);
const time_t start = time(NULL);
for (int i = 0; i < name.size(); i ++)
{
std::cout << name[i] << std::endl;
//
std::string str = "resultImages1/" + name[i];
const char * dir = str.c_str();
mkdir(dir, S_IRWXU);
//images
cv::Mat img1 = cv::imread("dataset/" + name[i] + "/view1.png" , cv::IMREAD_COLOR );
cv::Mat img2 = cv::imread("dataset/" + name[i] + "/view5.png" , cv::IMREAD_COLOR );
if ( (!img1.data) || (!img2.data))
{
printf("Please input right data~~\n");
return -1;
}
// Image loading check
if(!check_image(img1, "Image 1") or !check_image(img2, "Image 2"))
return 1;
// Image sizes check
if(!check_dimensions(img1, img2))
return 1;
AlgorithmParameters* params = new AlgorithmParameters();
params->min_disparity = 1.0f;
params->max_disparity = 80.f;
params->box_width = 11;
params->box_height = 11;
params->tau_color = tau_c;
params->tau_gradient = tau_g;
params->alpha = alpha;
params->gamma = gamma;
GlobalState* gs = new GlobalState();
gs->params = params;
int rows = img1.rows;
int cols = img1.cols;
std::vector<cv::Mat> imgs = {img1, img2};
if(params->color_processing)
{
std::vector<cv::Mat> img_color_float_alpha(2);
std::vector<cv::Mat> img_color_float(2);
for(int i = 0; i < imgs.size(); ++i)
{
img_color_float_alpha[i] = cv::Mat::zeros(rows, cols, CV_32FC4);
cv::Mat alpha(rows, cols, CV_32FC1);
std::vector<cv::Mat> channels(3);
imgs[i].convertTo(img_color_float[i], CV_32FC3);
cv::split(img_color_float[i], channels);
channels.push_back(alpha);
cv::merge(channels, img_color_float_alpha[i]);
}
addImageToTextureFloatColor(img_color_float_alpha, gs->imgs, gs->cuArray);
}
else
{
std::vector<cv::Mat> img_grayscale(2);
std::vector<cv::Mat> img_grayscale_float(2);
for(int i = 0; i < imgs.size(); ++i)
{
cv::cvtColor(imgs[i], img_grayscale[i], cv::COLOR_BGR2GRAY);
img_grayscale[i].convertTo(img_grayscale_float[i], CV_32F, 1.0 / 255);
}
addImageToTextureFloatGray(img_grayscale_float, gs->imgs, gs->cuArray);
}
gs->lines->resize(rows, cols);
runCuda(*gs);
hipDeviceSynchronize();
cv::Mat disp(rows, cols, CV_32FC1);
for(int i = 0; i < rows; ++i)
{
for(int j = 0; j < cols; ++j)
{
disp.at<float>(i, j) = gs->lines->norm4[cols * i + j].w;
}
}
for(int i = 0; i < 2; ++i)
{
hipFreeArray(gs->cuArray[i]);
hipDestroyTextureObject(gs->imgs[i]);
}
delete gs;
delete params;
hipDeviceSynchronize();
try
{
cv::medianBlur(disp, disp, 3);
disp = disp * 3;
cv::imwrite( "resultImages1/" + name[i] + "/" + name[i] + "_disp1.png", disp);
cv::Mat standardLeft = cv::imread("dataset/" + name[i] + "/disp1.png", -1);
float error_rate_left = Evaluate(standardLeft, disp);
disp.convertTo(disp, CV_8U);
cv::imshow("disp", disp);
cv::waitKey(1);
}
catch(std::exception &e)
{
std::cerr << "Disparity save error.\n" <<e.what();
return 1;
}
checkForLastCudaError();
}
return 0;
}
| 8491151c219ce82c1b08629cff853703fef84be3.cu | #include <iostream>
#include "GlobalState.h"
#include "PatchMatch.h"
#include <iostream>
#include <vector>
#include <string>
#include <opencv2/opencv.hpp>
#include <sys/stat.h>
#include <sys/types.h>
// Includes CUDA
#include <cuda_runtime.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_texture_types.h>
#include <vector_types.h>
float Evaluate(cv::Mat standard, cv::Mat myMap)
{
double sum = standard.rows * standard.cols;
int count = 0;
int ans = 0;
for (int i = 0; i < standard.rows; i ++)
{
for (int j = 0; j < standard.cols; j ++)
{
ans = standard.ptr<uchar>(i)[j] - myMap.ptr<float>(i)[j];
//与原图灰度相差大于1可认为是bad pixels,因为增强对比度,所以disparity maps都乘以3显示
if (ans > 3 || ans < -3) count ++;
}
}
double result = (count + 0.0)/sum;
std::cout << std::setiosflags(std::ios::fixed);
std::cout << std::setprecision(2) << result * 100 << "\\%" << std::endl;
return result * 100;
}
bool check_image(const cv::Mat &image, std::string name="Image")
{
if(!image.data)
{
std::cerr <<name <<" data not loaded.\n";
return false;
}
return true;
}
bool check_dimensions(const cv::Mat &img1, const cv::Mat &img2)
{
if(img1.cols != img2.cols or img1.rows != img2.rows)
{
std::cerr << "Images' dimensions do not corresponds.";
return false;
}
return true;
}
void addImageToTextureFloatColor(std::vector<cv::Mat>& imgs, cudaTextureObject_t texs[], cudaArray* cuArray[])
{
for(int i = 0; i < imgs.size(); ++i)
{
int rows = imgs[i].rows;
int cols = imgs[i].cols;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float4>();
cudaMallocArray(&cuArray[i], &channelDesc, cols, rows);
cudaMemcpy2DToArray(cuArray[i], 0, 0, imgs[i].ptr<float>(), imgs[i].step[0], cols * sizeof(float) * 4, rows, cudaMemcpyHostToDevice);
struct cudaResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = cudaResourceTypeArray;
res_desc.res.array.array = cuArray[i];
struct cudaTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = cudaAddressModeWrap;
tex_desc.addressMode[1] = cudaAddressModeWrap;
tex_desc.filterMode = cudaFilterModeLinear;
tex_desc.readMode = cudaReadModeElementType;
tex_desc.normalizedCoords = 0;
cudaCreateTextureObject(&(texs[i]), &res_desc, &tex_desc, NULL);
}
}
void addImageToTextureFloatGray(std::vector<cv::Mat>& imgs, cudaTextureObject_t texs[], cudaArray* cuArray[])
{
for(int i = 0; i < imgs.size(); ++i)
{
int rows = imgs[i].rows;
int cols = imgs[i].cols;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaMallocArray(&cuArray[i], &channelDesc, cols, rows);
cudaMemcpy2DToArray(cuArray[i], 0, 0, imgs[i].ptr<float>(), imgs[i].step[0], cols * sizeof(float), rows, cudaMemcpyHostToDevice);
struct cudaResourceDesc res_desc;
memset(&res_desc, 0, sizeof(res_desc));
res_desc.resType = cudaResourceTypeArray;
res_desc.res.array.array = cuArray[i];
struct cudaTextureDesc tex_desc;
memset(&tex_desc, 0, sizeof(tex_desc));
tex_desc.addressMode[0] = cudaAddressModeWrap;
tex_desc.addressMode[1] = cudaAddressModeWrap;
tex_desc.filterMode = cudaFilterModeLinear;
tex_desc.readMode = cudaReadModeElementType;
tex_desc.normalizedCoords = 0;
cudaCreateTextureObject(&(texs[i]), &res_desc, &tex_desc, NULL);
}
}
std::vector<std::string> name = {"Aloe", "Baby1", "Baby2", "Baby3", "Bowling1",
"Bowling2", "Cloth1", "Cloth2", "Cloth3", "Cloth4", "Flowerpots",
"Lampshade1", "Lampshade2", "Midd1", "Midd2", "Monopoly",
"Plastic", "Rocks1", "Rocks2", "Wood1", "Wood2"};
int main(int argc, char** argv)
{
// 参数
const float alpha = 0.9f;
const float gamma = 10.0f;
const float tau_c = 10.0f;
const float tau_g = 2.0f;
mkdir("resultImages1", S_IRWXU);
const time_t start = time(NULL);
for (int i = 0; i < name.size(); i ++)
{
std::cout << name[i] << std::endl;
//在目的文件夹中创建相应的文件夹,以便存入图片
std::string str = "resultImages1/" + name[i];
const char * dir = str.c_str();
mkdir(dir, S_IRWXU);
//读取images文件夹中的源图片
cv::Mat img1 = cv::imread("dataset/" + name[i] + "/view1.png" , cv::IMREAD_COLOR );
cv::Mat img2 = cv::imread("dataset/" + name[i] + "/view5.png" , cv::IMREAD_COLOR );
if ( (!img1.data) || (!img2.data))
{
printf("Please input right data~~\n");
return -1;
}
// Image loading check
if(!check_image(img1, "Image 1") or !check_image(img2, "Image 2"))
return 1;
// Image sizes check
if(!check_dimensions(img1, img2))
return 1;
AlgorithmParameters* params = new AlgorithmParameters();
params->min_disparity = 1.0f;
params->max_disparity = 80.f;
params->box_width = 11;
params->box_height = 11;
params->tau_color = tau_c;
params->tau_gradient = tau_g;
params->alpha = alpha;
params->gamma = gamma;
GlobalState* gs = new GlobalState();
gs->params = params;
int rows = img1.rows;
int cols = img1.cols;
std::vector<cv::Mat> imgs = {img1, img2};
if(params->color_processing)
{
std::vector<cv::Mat> img_color_float_alpha(2);
std::vector<cv::Mat> img_color_float(2);
for(int i = 0; i < imgs.size(); ++i)
{
img_color_float_alpha[i] = cv::Mat::zeros(rows, cols, CV_32FC4);
cv::Mat alpha(rows, cols, CV_32FC1);
std::vector<cv::Mat> channels(3);
imgs[i].convertTo(img_color_float[i], CV_32FC3);
cv::split(img_color_float[i], channels);
channels.push_back(alpha);
cv::merge(channels, img_color_float_alpha[i]);
}
addImageToTextureFloatColor(img_color_float_alpha, gs->imgs, gs->cuArray);
}
else
{
std::vector<cv::Mat> img_grayscale(2);
std::vector<cv::Mat> img_grayscale_float(2);
for(int i = 0; i < imgs.size(); ++i)
{
cv::cvtColor(imgs[i], img_grayscale[i], cv::COLOR_BGR2GRAY);
img_grayscale[i].convertTo(img_grayscale_float[i], CV_32F, 1.0 / 255);
}
addImageToTextureFloatGray(img_grayscale_float, gs->imgs, gs->cuArray);
}
gs->lines->resize(rows, cols);
runCuda(*gs);
cudaDeviceSynchronize();
cv::Mat disp(rows, cols, CV_32FC1);
for(int i = 0; i < rows; ++i)
{
for(int j = 0; j < cols; ++j)
{
disp.at<float>(i, j) = gs->lines->norm4[cols * i + j].w;
}
}
for(int i = 0; i < 2; ++i)
{
cudaFreeArray(gs->cuArray[i]);
cudaDestroyTextureObject(gs->imgs[i]);
}
delete gs;
delete params;
cudaDeviceSynchronize();
try
{
cv::medianBlur(disp, disp, 3);
disp = disp * 3;
cv::imwrite( "resultImages1/" + name[i] + "/" + name[i] + "_disp1.png", disp);
cv::Mat standardLeft = cv::imread("dataset/" + name[i] + "/disp1.png", -1);
float error_rate_left = Evaluate(standardLeft, disp);
disp.convertTo(disp, CV_8U);
cv::imshow("disp", disp);
cv::waitKey(1);
}
catch(std::exception &e)
{
std::cerr << "Disparity save error.\n" <<e.what();
return 1;
}
checkForLastCudaError();
}
return 0;
}
|
b6f72085bf1adcd52f4b20200b9376ad43eafd8f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
int main(int argc,char **argv)
{
std::ofstream myfile;
myfile.open ("seq_scan.csv");
const unsigned int times = 10;
for (unsigned int i = 0; i<30; i++)
{
const unsigned int IN_SIZE = 1<<i;
const unsigned int IN_BYTES = sizeof(unsigned int)*IN_SIZE;
const unsigned int OUT_SIZE = 1<<i;
const unsigned int OUT_BYTES = sizeof(unsigned int)*OUT_SIZE;
printf("\ni = %d\n", i);
printf("\n ARRAY_SIZE = %d\n", IN_SIZE);
printf("\n ARRAY_BYTES = %d\n", IN_BYTES);
unsigned int * h_in = (unsigned int*)malloc(IN_BYTES);
unsigned int * h_out = (unsigned int*)malloc(OUT_BYTES);
for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;}
// setting up time
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// running the code on the CPU $times times
for (unsigned int k = 0; k<times; k++)
{
h_out[0] = h_in[0];
for (unsigned int l = 1; l < IN_SIZE; ++l)
{
h_out[l] = h_out[l-1] + h_in[l];
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf(" time: %.5f\n", elapsedTime);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
| b6f72085bf1adcd52f4b20200b9376ad43eafd8f.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <fstream>
int main(int argc,char **argv)
{
std::ofstream myfile;
myfile.open ("seq_scan.csv");
const unsigned int times = 10;
for (unsigned int i = 0; i<30; i++)
{
const unsigned int IN_SIZE = 1<<i;
const unsigned int IN_BYTES = sizeof(unsigned int)*IN_SIZE;
const unsigned int OUT_SIZE = 1<<i;
const unsigned int OUT_BYTES = sizeof(unsigned int)*OUT_SIZE;
printf("\ni = %d\n", i);
printf("\n ARRAY_SIZE = %d\n", IN_SIZE);
printf("\n ARRAY_BYTES = %d\n", IN_BYTES);
unsigned int * h_in = (unsigned int*)malloc(IN_BYTES);
unsigned int * h_out = (unsigned int*)malloc(OUT_BYTES);
for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;}
// setting up time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// running the code on the CPU $times times
for (unsigned int k = 0; k<times; k++)
{
h_out[0] = h_in[0];
for (unsigned int l = 1; l < IN_SIZE; ++l)
{
h_out[l] = h_out[l-1] + h_in[l];
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// calculating time
float elapsedTime = .0f;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime = elapsedTime / ((float) times);
printf(" time: %.5f\n", elapsedTime);
myfile << elapsedTime << ",";
}
myfile.close();
return 0;
}
|
6d3eacc018aaadf3d55b11afc44c6761d143aa48.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void set_cnt(double* d_buf_y, double* d_buf_z, int dNple, int tbY, int tbZ, int bSizeY, int bSizeZ, int ichunk, int jchunk, int kchunk)
{
int tt = blockIdx.x;
int zC = blockIdx.z;
int yC = blockIdx.y;
int seq = threadIdx.x;
d_buf_y(seq,tt,yC,zC) = -1;
d_buf_z(seq,tt,yC,zC) = -1;
}
__global__ void set_d_buf_y(double* d_buf_y, int dNple, int tbY, int tbZ, int bSizeY, int ichunk, int jchunk, int kchunk, int func, int rank, int avgAngrp)
{
int tt = blockIdx.x;
int seq = blockIdx.y;
int zC = blockIdx.z;
int aa = threadIdx.x; //angle
//int ii = threadIdx.y;
for(int ii = threadIdx.y; ii<ichunk; ii+=32)
{
volatile double* d_buf_y_pull_base;
if (func==0) d_buf_y_pull_base = &d_buf_y(seq,tt,0,zC);
else d_buf_y_pull_base = &d_buf_y(seq,tt,tbY,zC);
#define d_buf_y_pull(a,b,c) d_buf_y_pull_base[c + WARP_SIZE * ( b + kchunk * a )]
if(aa==0 && ii==0) d_buf_y_pull(0,0,0)=dNple*(avgAngrp+1);
d_buf_y_pull_base += 1;
for(int kk=0;kk<kchunk;kk++)
{
double tmp=ii + ichunk*(kk + kchunk * aa) ;
//if( func==0) d_buf_y_pull(ii,kk,aa) = tmp;
if( func==0) d_buf_y_pull(ii,kk,aa) = 0.;
else
{
if (d_buf_y_pull(ii,kk,aa) != tmp)
printf("Y:%d:disagree at tt=%d,seq=%d,zC=%d,aa=%d,ii=%d,kk=%d got %f but %f\n",rank, tt,seq,zC,aa,ii,kk, d_buf_y_pull(ii,kk,aa), tmp);
}
}
}
#undef d_buf_y_pull
}
__global__ void set_d_buf_z(double* d_buf_z, int dNple, int tbY, int tbZ, int bSizeZ, int ichunk, int jchunk, int kchunk,int func, int rank, int avgAngrp)
{
int tt = blockIdx.x;
int seq = blockIdx.y;
int yC = blockIdx.z;
//int aa = threadIdx.x; //angle
int aa = threadIdx.x;
//int ii = threadIdx.y;
for(int ii = threadIdx.y; ii< ichunk;ii+=32)
{
volatile double* d_buf_z_pull_base;
if( func==0) d_buf_z_pull_base = &d_buf_z(seq,tt,yC,0);
else d_buf_z_pull_base = &d_buf_z(seq,tt,yC,tbZ);
#define d_buf_z_pull(a,b,c) d_buf_z_pull_base[c + WARP_SIZE * ( b + jchunk * a )]
if(aa==0 && ii==0) d_buf_z_pull(0,0,0)=dNple*(avgAngrp+1);
d_buf_z_pull_base += 1;
for(int jj=0;jj<jchunk;jj++)
{
double tmp=ii + ichunk*(jj + jchunk*aa);
//if (func==0) d_buf_z_pull(ii,jj,aa) = tmp;
if (func==0) d_buf_z_pull(ii,jj,aa) = 0.;
else
{
if (d_buf_z_pull(ii,jj,aa) != tmp) printf("Z:%d:disagree at tt=%d,seq=%d,yC=%d,aa=%d,ii=%d,jj=%d got %f but %f %p\n",rank, tt,seq,yC,aa,ii,jj,d_buf_z_pull(ii,jj,aa), tmp, &d_buf_z_pull(ii,jj,aa));
}
}
}
#undef d_duf_z_pull
}
__global__ void chk_d_buf_y(double* d_buf_y, int dNple, int tbY, int tbZ, int bSizeY, int jchunk, int kchunk)
{
int tt = blockIdx.x;
int seq = blockIdx.y;
int zC = blockIdx.z;
//int aa = threadIdx.x; //angle
int aa = threadIdx.y;
int ii = threadIdx.x;
volatile double* d_buf_y_pull_baseI = &d_buf_y(seq,tt,0,zC) + 1;
volatile double* d_buf_y_pull_baseO = &d_buf_y(seq,tt,tbY,zC) + 1;
#define d_buf_y_pullI(a,b,c) d_buf_y_pull_baseO[c + WARP_SIZE * ( b + jchunk * a )]
#define d_buf_y_pullO(a,b,c) d_buf_y_pull_baseI[c + WARP_SIZE * ( b + jchunk * a )]
for(int kk=0;kk<kchunk;kk++)
if(d_buf_y_pullI(ii,kk,aa) != d_buf_y_pullO(ii,kk,aa))
printf("disagree at tt=%d,seq=%d,zC=%d,ii=%d,kk=%d,aa=%d,\n",tt,seq,zC,ii,kk,aa);
#undef d_buf_y_pullI
#undef d_buf_y_pullO
}
| 6d3eacc018aaadf3d55b11afc44c6761d143aa48.cu | __global__ void set_cnt(double* d_buf_y, double* d_buf_z, int dNple, int tbY, int tbZ, int bSizeY, int bSizeZ, int ichunk, int jchunk, int kchunk)
{
int tt = blockIdx.x;
int zC = blockIdx.z;
int yC = blockIdx.y;
int seq = threadIdx.x;
d_buf_y(seq,tt,yC,zC) = -1;
d_buf_z(seq,tt,yC,zC) = -1;
}
__global__ void set_d_buf_y(double* d_buf_y, int dNple, int tbY, int tbZ, int bSizeY, int ichunk, int jchunk, int kchunk, int func, int rank, int avgAngrp)
{
int tt = blockIdx.x;
int seq = blockIdx.y;
int zC = blockIdx.z;
int aa = threadIdx.x; //angle
//int ii = threadIdx.y;
for(int ii = threadIdx.y; ii<ichunk; ii+=32)
{
volatile double* d_buf_y_pull_base;
if (func==0) d_buf_y_pull_base = &d_buf_y(seq,tt,0,zC);
else d_buf_y_pull_base = &d_buf_y(seq,tt,tbY,zC);
#define d_buf_y_pull(a,b,c) d_buf_y_pull_base[c + WARP_SIZE * ( b + kchunk * a )]
if(aa==0 && ii==0) d_buf_y_pull(0,0,0)=dNple*(avgAngrp+1);
d_buf_y_pull_base += 1;
for(int kk=0;kk<kchunk;kk++)
{
double tmp=ii + ichunk*(kk + kchunk * aa) ;
//if( func==0) d_buf_y_pull(ii,kk,aa) = tmp;
if( func==0) d_buf_y_pull(ii,kk,aa) = 0.;
else
{
if (d_buf_y_pull(ii,kk,aa) != tmp)
printf("Y:%d:disagree at tt=%d,seq=%d,zC=%d,aa=%d,ii=%d,kk=%d got %f but %f\n",rank, tt,seq,zC,aa,ii,kk, d_buf_y_pull(ii,kk,aa), tmp);
}
}
}
#undef d_buf_y_pull
}
__global__ void set_d_buf_z(double* d_buf_z, int dNple, int tbY, int tbZ, int bSizeZ, int ichunk, int jchunk, int kchunk,int func, int rank, int avgAngrp)
{
int tt = blockIdx.x;
int seq = blockIdx.y;
int yC = blockIdx.z;
//int aa = threadIdx.x; //angle
int aa = threadIdx.x;
//int ii = threadIdx.y;
for(int ii = threadIdx.y; ii< ichunk;ii+=32)
{
volatile double* d_buf_z_pull_base;
if( func==0) d_buf_z_pull_base = &d_buf_z(seq,tt,yC,0);
else d_buf_z_pull_base = &d_buf_z(seq,tt,yC,tbZ);
#define d_buf_z_pull(a,b,c) d_buf_z_pull_base[c + WARP_SIZE * ( b + jchunk * a )]
if(aa==0 && ii==0) d_buf_z_pull(0,0,0)=dNple*(avgAngrp+1);
d_buf_z_pull_base += 1;
for(int jj=0;jj<jchunk;jj++)
{
double tmp=ii + ichunk*(jj + jchunk*aa);
//if (func==0) d_buf_z_pull(ii,jj,aa) = tmp;
if (func==0) d_buf_z_pull(ii,jj,aa) = 0.;
else
{
if (d_buf_z_pull(ii,jj,aa) != tmp) printf("Z:%d:disagree at tt=%d,seq=%d,yC=%d,aa=%d,ii=%d,jj=%d got %f but %f %p\n",rank, tt,seq,yC,aa,ii,jj,d_buf_z_pull(ii,jj,aa), tmp, &d_buf_z_pull(ii,jj,aa));
}
}
}
#undef d_duf_z_pull
}
__global__ void chk_d_buf_y(double* d_buf_y, int dNple, int tbY, int tbZ, int bSizeY, int jchunk, int kchunk)
{
int tt = blockIdx.x;
int seq = blockIdx.y;
int zC = blockIdx.z;
//int aa = threadIdx.x; //angle
int aa = threadIdx.y;
int ii = threadIdx.x;
volatile double* d_buf_y_pull_baseI = &d_buf_y(seq,tt,0,zC) + 1;
volatile double* d_buf_y_pull_baseO = &d_buf_y(seq,tt,tbY,zC) + 1;
#define d_buf_y_pullI(a,b,c) d_buf_y_pull_baseO[c + WARP_SIZE * ( b + jchunk * a )]
#define d_buf_y_pullO(a,b,c) d_buf_y_pull_baseI[c + WARP_SIZE * ( b + jchunk * a )]
for(int kk=0;kk<kchunk;kk++)
if(d_buf_y_pullI(ii,kk,aa) != d_buf_y_pullO(ii,kk,aa))
printf("disagree at tt=%d,seq=%d,zC=%d,ii=%d,kk=%d,aa=%d,\n",tt,seq,zC,ii,kk,aa);
#undef d_buf_y_pullI
#undef d_buf_y_pullO
}
|
009ce099f4a37232f2a07d251da8a2c2a812b336.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Currently, `initializeElementsTo`, if executed in a thread whose
* `i` is calculated to be greater than `N`, will try to access a value
* outside the range of `a`.
*
* Refactor the kernel defintition to prevent our of range accesses.
*/
__global__ void initializeElementsTo(int initialValue, int *a, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N)
a[i] = initialValue;
}
int main()
{
/*
* Do not modify `N`.
*/
int N = 1000;
int *a;
size_t size = N * sizeof(int);
hipMallocManaged(&a, size);
/*
* Assume we have reason to want the number of threads
* fixed at `256`: do not modify `threads_per_block`.
*/
size_t threads_per_block = 256;
/*
* Assign a value to `number_of_blocks` that will
* allow for a working execution configuration given
* the fixed values for `N` and `threads_per_block`.
*/
size_t number_of_blocks = (N + threads_per_block - 1)/ threads_per_block;
int initialValue = 6;
hipLaunchKernelGGL(( initializeElementsTo), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, initialValue, a, N);
hipDeviceSynchronize();
/*
* Check to make sure all values in `a`, were initialized.
*/
for (int i = 0; i < N; ++i)
{
if(a[i] != initialValue)
{
printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]);
exit(1);
}
}
printf("SUCCESS!\n");
hipFree(a);
}
| 009ce099f4a37232f2a07d251da8a2c2a812b336.cu | #include <stdio.h>
/*
* Currently, `initializeElementsTo`, if executed in a thread whose
* `i` is calculated to be greater than `N`, will try to access a value
* outside the range of `a`.
*
* Refactor the kernel defintition to prevent our of range accesses.
*/
__global__ void initializeElementsTo(int initialValue, int *a, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N)
a[i] = initialValue;
}
int main()
{
/*
* Do not modify `N`.
*/
int N = 1000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
/*
* Assume we have reason to want the number of threads
* fixed at `256`: do not modify `threads_per_block`.
*/
size_t threads_per_block = 256;
/*
* Assign a value to `number_of_blocks` that will
* allow for a working execution configuration given
* the fixed values for `N` and `threads_per_block`.
*/
size_t number_of_blocks = (N + threads_per_block - 1)/ threads_per_block;
int initialValue = 6;
initializeElementsTo<<<number_of_blocks, threads_per_block>>>(initialValue, a, N);
cudaDeviceSynchronize();
/*
* Check to make sure all values in `a`, were initialized.
*/
for (int i = 0; i < N; ++i)
{
if(a[i] != initialValue)
{
printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]);
exit(1);
}
}
printf("SUCCESS!\n");
cudaFree(a);
}
|
317f6d3ea25eeb505296364f6925fd68a0e2b52e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <stdio.h>
#include <stdlib.h>
#include <metrics/dispersion.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/mr/device/allocator.hpp>
#include <raft/random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
template <typename T>
struct DispersionInputs {
T tolerance;
int dim, clusters;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const DispersionInputs<T>& dims)
{
return os;
}
template <typename T>
class DispersionTest : public ::testing::TestWithParam<DispersionInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<DispersionInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.clusters * params.dim;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
raft::allocate(data, len);
raft::allocate(counts, params.clusters);
raft::allocate(exp_mean, params.dim);
raft::allocate(act_mean, params.dim);
r.uniform(data, len, (T)-1.0, (T)1.0, stream);
r.uniformInt(counts, params.clusters, 1, 100, stream);
std::vector<int> h_counts(params.clusters, 0);
raft::update_host(&(h_counts[0]), counts, params.clusters, stream);
npoints = 0;
for (const auto& val : h_counts) {
npoints += val;
}
actualVal =
dispersion(data, counts, act_mean, params.clusters, npoints, params.dim, allocator, stream);
expectedVal = T(0);
std::vector<T> h_data(len, T(0));
raft::update_host(&(h_data[0]), data, len, stream);
std::vector<T> mean(params.dim, T(0));
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
mean[j] += h_data[i * params.dim + j] * T(h_counts[i]);
}
}
for (int i = 0; i < params.dim; ++i) {
mean[i] /= T(npoints);
}
raft::update_device(exp_mean, &(mean[0]), params.dim, stream);
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
auto diff = h_data[i * params.dim + j] - mean[j];
expectedVal += diff * diff * T(h_counts[i]);
}
}
expectedVal = sqrt(expectedVal);
CUDA_CHECK(hipStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(hipStreamDestroy(stream));
CUDA_CHECK(hipFree(data));
CUDA_CHECK(hipFree(counts));
CUDA_CHECK(hipFree(exp_mean));
CUDA_CHECK(hipFree(act_mean));
}
protected:
DispersionInputs<T> params;
T *data, *exp_mean, *act_mean;
int* counts;
hipStream_t stream;
int npoints;
std::shared_ptr<raft::mr::device::allocator> allocator;
T expectedVal, actualVal;
};
const std::vector<DispersionInputs<float>> inputsf = {
{0.001f, 10, 1000, 1234ULL}, {0.001f, 100, 100, 1234ULL}, {0.001f, 1000, 1000, 1234ULL}};
typedef DispersionTest<float> DispersionTestF;
TEST_P(DispersionTestF, Result)
{
auto eq = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestF, ::testing::ValuesIn(inputsf));
const std::vector<DispersionInputs<double>> inputsd = {
{0.001, 10, 1000, 1234ULL}, {0.001, 100, 100, 1234ULL}, {0.001, 1000, 1000, 1234ULL}};
typedef DispersionTest<double> DispersionTestD;
TEST_P(DispersionTestD, Result)
{
auto eq = raft::CompareApprox<double>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestD, ::testing::ValuesIn(inputsd));
} // end namespace Metrics
} // end namespace MLCommon
| 317f6d3ea25eeb505296364f6925fd68a0e2b52e.cu | /*
* Copyright (c) 2019-2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <raft/cudart_utils.h>
#include <stdio.h>
#include <stdlib.h>
#include <metrics/dispersion.cuh>
#include <raft/cuda_utils.cuh>
#include <raft/mr/device/allocator.hpp>
#include <raft/random/rng.cuh>
#include <vector>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
template <typename T>
struct DispersionInputs {
T tolerance;
int dim, clusters;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const DispersionInputs<T>& dims)
{
return os;
}
template <typename T>
class DispersionTest : public ::testing::TestWithParam<DispersionInputs<T>> {
protected:
void SetUp() override
{
params = ::testing::TestWithParam<DispersionInputs<T>>::GetParam();
raft::random::Rng r(params.seed);
int len = params.clusters * params.dim;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new raft::mr::device::default_allocator);
raft::allocate(data, len);
raft::allocate(counts, params.clusters);
raft::allocate(exp_mean, params.dim);
raft::allocate(act_mean, params.dim);
r.uniform(data, len, (T)-1.0, (T)1.0, stream);
r.uniformInt(counts, params.clusters, 1, 100, stream);
std::vector<int> h_counts(params.clusters, 0);
raft::update_host(&(h_counts[0]), counts, params.clusters, stream);
npoints = 0;
for (const auto& val : h_counts) {
npoints += val;
}
actualVal =
dispersion(data, counts, act_mean, params.clusters, npoints, params.dim, allocator, stream);
expectedVal = T(0);
std::vector<T> h_data(len, T(0));
raft::update_host(&(h_data[0]), data, len, stream);
std::vector<T> mean(params.dim, T(0));
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
mean[j] += h_data[i * params.dim + j] * T(h_counts[i]);
}
}
for (int i = 0; i < params.dim; ++i) {
mean[i] /= T(npoints);
}
raft::update_device(exp_mean, &(mean[0]), params.dim, stream);
for (int i = 0; i < params.clusters; ++i) {
for (int j = 0; j < params.dim; ++j) {
auto diff = h_data[i * params.dim + j] - mean[j];
expectedVal += diff * diff * T(h_counts[i]);
}
}
expectedVal = sqrt(expectedVal);
CUDA_CHECK(cudaStreamSynchronize(stream));
}
void TearDown() override
{
CUDA_CHECK(cudaStreamDestroy(stream));
CUDA_CHECK(cudaFree(data));
CUDA_CHECK(cudaFree(counts));
CUDA_CHECK(cudaFree(exp_mean));
CUDA_CHECK(cudaFree(act_mean));
}
protected:
DispersionInputs<T> params;
T *data, *exp_mean, *act_mean;
int* counts;
cudaStream_t stream;
int npoints;
std::shared_ptr<raft::mr::device::allocator> allocator;
T expectedVal, actualVal;
};
const std::vector<DispersionInputs<float>> inputsf = {
{0.001f, 10, 1000, 1234ULL}, {0.001f, 100, 100, 1234ULL}, {0.001f, 1000, 1000, 1234ULL}};
typedef DispersionTest<float> DispersionTestF;
TEST_P(DispersionTestF, Result)
{
auto eq = raft::CompareApprox<float>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestF, ::testing::ValuesIn(inputsf));
const std::vector<DispersionInputs<double>> inputsd = {
{0.001, 10, 1000, 1234ULL}, {0.001, 100, 100, 1234ULL}, {0.001, 1000, 1000, 1234ULL}};
typedef DispersionTest<double> DispersionTestD;
TEST_P(DispersionTestD, Result)
{
auto eq = raft::CompareApprox<double>(params.tolerance);
ASSERT_TRUE(devArrMatch(exp_mean, act_mean, params.dim, eq));
ASSERT_TRUE(match(expectedVal, actualVal, eq));
}
INSTANTIATE_TEST_CASE_P(DispersionTests, DispersionTestD, ::testing::ValuesIn(inputsd));
} // end namespace Metrics
} // end namespace MLCommon
|
8f24ac3ba9899f70dfe89991d0fecb81ebea53c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// DeconvSingleInputExecution.cpp
// MNN
//
// Created by MNN on 2022/03/04.
// Copyright 2018, Alibaba Group Holding Limited
//
#include "DeconvSingleInputExecution.hpp"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
__global__ void DeconvKernelReorder(const float* B, half* BP, int kw, int kh, int ic, int oc, int icPack) {
int kernelCount = kw * kh;
int e = oc * kernelCount;
int l = ic;
int eDiv = UP_DIV(e, MATMULPACK);
int eAlign = eDiv * MATMULPACK;
int lDiv = UP_DIV(l, icPack);
int lAlign = lDiv * icPack;
int maxCount = eAlign * lAlign;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int lR = indexO % icPack;
int tmp = indexO / icPack;
int eR = tmp % MATMULPACK;
int tmp2 = tmp / MATMULPACK;
int lC = tmp2 % lDiv;
int eC = tmp2 / lDiv;
half* dst = BP + indexO;
int sL = lC * icPack + lR;//ic_idx
int sE = eC * MATMULPACK + eR;
if (sL >= ic) {
*dst = 0.0;
continue;
}
int oEC = sE / (kernelCount);//oc_idx
int oEk = sE % kernelCount;//khw_idx
if (sE >= e) {
*dst = 0.0;
continue;
}
const float* src = B + sL * kernelCount * oc + oEk + oEC * kernelCount;
*dst = *src;
}
}
template<typename T>
__global__ void DeconvInputRerange(const int count,
const InputReorderParameter* param,
const T* Inp,
__half* InpRe
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
int l = 16 * param->lpack_size;
int h = 16 * param->hpack_size;
int lIndex = i % l;
int hIndex = i / l;
int lU = lIndex / 16;
int lR = lIndex % 16;
int hU = hIndex / 16;
int hR = hIndex % 16;
__half* dst = InpRe + hU * param->lpack_size * 16 * 16 + lU * 16 * 16 + lR + hR * 16;
if(hIndex >= param->h_size || lIndex >= param->l_size) {
dst[0] = (__half)0.0;
break;
}
const int channel_pack = ((param->l_size + 7) / 8) * 8;
T value = Inp[hIndex * channel_pack + lIndex];
dst[0] = (half)value;
}
}
template <typename Dtype>
__global__ void Col2Im(const int n, const Dtype* data_col,
const int batch, const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
const float* bias, Dtype* data_im
) {
const int channel_pack = ((channels+7) / 8) * 8;
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
Dtype val = 0;
const int c_p = index % channel_pack;
const int idx_tmp = index / channel_pack;
const int b_im = idx_tmp / (width * height);
const int hw = idx_tmp % (width * height);
const int c_im = c_p;
const int w_im = hw % width + pad_w;
const int h_im = hw / width + pad_h;
if(c_im >= channels) {
data_im[index] = val;
break;
}
if(nullptr != bias) {
val += (Dtype)bias[c_im];
}
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int data_col_index = ((((c_im * kernel_h + h_k) * kernel_w + w_k) * batch + b_im) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
DeconvSingleInputExecution::Resource::Resource(Backend* bn, const MNN::Op* op) {
mBackend = bn;
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto common = conv->common();
mKernelInfo.kernelX = common->kernelX();
mKernelInfo.kernelY = common->kernelY();
mKernelInfo.groups = common->group();
mKernelInfo.strideX = common->strideX();
mKernelInfo.strideY = common->strideY();
mKernelInfo.dilateX = common->dilateX();
mKernelInfo.dilateY = common->dilateY();
mKernelInfo.activationType = common->relu() ? 1 : (common->relu6() ? 2 : 0);
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
mKernelInfo.kernelN = common->outputCount();
mKernelInfo.kernelC = weightSize / mKernelInfo.kernelN / mKernelInfo.kernelX / mKernelInfo.kernelY;
MatMulParam param;
int e = mKernelInfo.kernelN * mKernelInfo.kernelX * mKernelInfo.kernelY;
int l = mKernelInfo.kernelC;
int h = 0;
param.elh[0] = e;
param.elh[1] = l;
param.elh[2] = h;
param.elhPack[0] = UP_DIV(e, 16);
param.elhPack[1] = UP_DIV(l, 16);
param.elhPack[2] = UP_DIV(h, 16);
param.aStride[0] = 1;
param.aStride[1] = e;
param.aStride[2] = 0;
param.bStride[0] = 0;
param.bStride[1] = h;
param.bStride[2] = 1;
auto gpuParam = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(MatMulParam));
auto tempCacheBuffer = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(weightSize * sizeof(float));
float* cacheWeight = (float*)((uint8_t*)tempCacheBuffer.first + tempCacheBuffer.second);
runtime->memcpy(cacheWeight, filterDataPtr, weightSize * sizeof(float), MNNMemcpyHostToDevice);
runtime->memcpy((uint8_t*)gpuParam.first + gpuParam.second, ¶m, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Reorder weight
weightTensor.reset(Tensor::createDevice<int16_t>({param.elhPack[0] * param.elhPack[1] * (MATMULPACK * MATMULPACK)}));
bn->onAcquireBuffer(weightTensor.get(), Backend::STATIC);
mFilter = (void *)weightTensor.get()->buffer().device;
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
hipLaunchKernelGGL(( DeconvKernelReorder), dim3(cores), dim3(threadNumbers), 0, 0, (float*)cacheWeight, (half*)mFilter,
mKernelInfo.kernelX, mKernelInfo.kernelY, mKernelInfo.kernelC, mKernelInfo.kernelN, MATMULPACK);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempCacheBuffer);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(gpuParam);
// Copy Bias
int biasSize = conv->bias()->size();
biasTensor.reset(Tensor::createDevice<float>({biasSize}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
cuda_check(hipMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), hipMemcpyHostToDevice));
}
DeconvSingleInputExecution::Resource::~Resource() {
// Do nothing
}
DeconvSingleInputExecution::DeconvSingleInputExecution(Backend* backend, const MNN::Op* op, std::shared_ptr<Resource> res) : Execution(backend), mOp(op) {
mResource = res;
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mGpuMatMulParam = staticPool->alloc(sizeof(MatMulParam));
mGpuCol2ImParam = staticPool->alloc(sizeof(Col2ImParameter));
mGpuInpReorderParam = staticPool->alloc(sizeof(InputReorderParameter));
}
DeconvSingleInputExecution::~DeconvSingleInputExecution() {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mGpuMatMulParam);
staticPool->free(mGpuCol2ImParam);
staticPool->free(mGpuInpReorderParam);
}
bool DeconvSingleInputExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (!mValid) {
return false;
}
if (nullptr == dst) {
return true;
}
auto dstExe = new DeconvSingleInputExecution(bn, op, mResource);
*dst = dstExe;
return true;
}
ErrorCode DeconvSingleInputExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto input = inputs[0], output = outputs[0];
const int UNIT = 1;
auto convCommon = mOp->main_as_Convolution2D()->common();
// Input Rerange Param
mInpReorderParameter.hw_size = input->height() * input->width();
mInpReorderParameter.ic_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.ib_stride = mInpReorderParameter.hw_size * input->channel();
mInpReorderParameter.oc_stride = mInpReorderParameter.ib_stride;
mInpReorderParameter.ob_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.l_size = input->channel();
mInpReorderParameter.h_size = input->batch() * mInpReorderParameter.hw_size;
mInpReorderParameter.lpack_size = UP_DIV(mInpReorderParameter.l_size, 16);
mInpReorderParameter.hpack_size = UP_DIV(mInpReorderParameter.h_size, 16);
runtime->memcpy((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second, &mInpReorderParameter, sizeof(InputReorderParameter), MNNMemcpyHostToDevice);
// Col2Im Param
auto pad = ConvolutionCommon::convolutionTransposePad(input, output, mOp->main_as_Convolution2D()->common());
mCol2ImParamter.dilateX = convCommon->dilateX();
mCol2ImParamter.dilateY = convCommon->dilateY();
mCol2ImParamter.strideX = convCommon->strideX();
mCol2ImParamter.strideY = convCommon->strideY();
mCol2ImParamter.ic = input->channel();
mCol2ImParamter.oc = output->channel();
mCol2ImParamter.kernelX = convCommon->kernelX();
mCol2ImParamter.kernelY = convCommon->kernelY();
mCol2ImParamter.padX = pad.first;
mCol2ImParamter.padY = pad.second;
mCol2ImParamter.ih = input->height();
mCol2ImParamter.iw = input->width();
mCol2ImParamter.oh = output->height();
mCol2ImParamter.ow = output->width();
mCol2ImParamter.ob = output->batch();
runtime->memcpy((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second, &mCol2ImParamter, sizeof(Col2ImParameter), MNNMemcpyHostToDevice);
// Matmul Param
int e = output->channel() * mCol2ImParamter.kernelX * mCol2ImParamter.kernelY;
int l = input->channel();
int h = input->height() * input->width() * output->batch();
mMatMulParam.elh[0] = e;
mMatMulParam.elh[1] = l;
mMatMulParam.elh[2] = h;
mMatMulParam.elhPack[0] = UP_DIV(e, 16);
mMatMulParam.elhPack[1] = UP_DIV(l, 16);
mMatMulParam.elhPack[2] = UP_DIV(h, 16);
mMatMulParam.bStride[0] = 0;
mMatMulParam.bStride[1] = input->height() * input->width();
mMatMulParam.bStride[2] = 1;
mMatMulParam.cStride[0] = h;
mMatMulParam.cStride[1] = 0;
mMatMulParam.cStride[2] = 1;
mMatMulParam.aPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.aPStride[1] = 256;
mMatMulParam.aPStride[2] = 16;
mMatMulParam.bPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.bPStride[1] = 256;
mMatMulParam.bPStride[2] = 16;
if (convCommon->relu()) {
mMatMulParam.minValue = 0.0f;
}
if (convCommon->relu6()) {
mMatMulParam.minValue = 0.0f;
mMatMulParam.maxValue = 6.0f;
}
runtime->memcpy((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second, &mMatMulParam, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Alloc temp cuda memory
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
auto buffer1 = pool->alloc(sizeof(float) * mMatMulParam.elhPack[0] * mMatMulParam.elhPack[2]* MATMULPACK * MATMULPACK);
auto buffer2 = pool->alloc(sizeof(__half) * mMatMulParam.elhPack[1] * mMatMulParam.elhPack[2] * MATMULPACK * MATMULPACK);
mIm2ColBuffer = (float*)((uint8_t*)buffer1.first + buffer1.second);
mInputBuffer = (__half*)((uint8_t*)buffer2.first + buffer2.second);
pool->free(buffer2);
pool->free(buffer1);
return NO_ERROR;
}
ErrorCode DeconvSingleInputExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
//MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_);
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
const void *input_addr = (const void*)inputs[0]->deviceId();
const void *filter_addr = mResource->mFilter;
const void *bias_addr = mResource->mBias;
void *output_addr = (void*)outputs[0]->deviceId();
auto gpuInpReorder = (const InputReorderParameter*)((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second);
auto gpuCol2Im = (const Col2ImParameter*)((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second);
auto gpuMatMul = (const MatMulParam*)((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second);
const int rerangeCount = mInpReorderParameter.lpack_size * mInpReorderParameter.hpack_size * 16 * 16;
int inp_block_num = runtime->blocks_num(rerangeCount);
int inp_thread_num = runtime->threads_num();
// Do input Rerange
//runtime->memset(mInputBuffer, 0, mMatMulParam.elhPack[2] * mMatMulParam.elhPack[1] * MATMULPACK * MATMULPACK * sizeof(__half));
if(bytes == 4) {
hipLaunchKernelGGL(( DeconvInputRerange), dim3(inp_block_num), dim3(inp_thread_num), 0, 0, rerangeCount, gpuInpReorder, (const float*)input_addr, mInputBuffer);
// Do Gemm operation
GemmPackedMain(runtime, &mMatMulParam, gpuMatMul, (float*)mIm2ColBuffer, (const half*)filter_addr, (const half*)mInputBuffer, nullptr, bytes, false, false);
} else {
hipLaunchKernelGGL(( DeconvInputRerange), dim3(inp_block_num), dim3(inp_thread_num), 0, 0, rerangeCount, gpuInpReorder, (const half*)input_addr, mInputBuffer);
// Do Gemm operation
GemmPackedMain(runtime, &mMatMulParam, gpuMatMul, (half*)mIm2ColBuffer, (const half*)filter_addr, (const half*)mInputBuffer, nullptr, bytes, false, false);
}
// Do Col2Im trans
int height_col = mCol2ImParamter.ih;
int width_col = mCol2ImParamter.iw;
int num_kernels = mCol2ImParamter.ob * UP_DIV(mCol2ImParamter.oc, 8) * mCol2ImParamter.oh * mCol2ImParamter.ow * 8;
int col2im_block_num = runtime->blocks_num(num_kernels);
int col2im_thread_num = runtime->threads_num();
// printf("col2im:%d, %d-%d-%d-%d-%d-%d\n %d-%d-%d-%d-%d-%d\n %d-%d\n", mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc, \
// mCol2ImParamter.ih, mCol2ImParamter.iw, mCol2ImParamter.ic, \
// mCol2ImParamter.padX, mCol2ImParamter.padY, mCol2ImParamter.kernelX, mCol2ImParamter.kernelY, mCol2ImParamter.strideX, mCol2ImParamter.strideY, \
// col2im_block_num, col2im_thread_num);
if(bytes == 4) {
hipLaunchKernelGGL(( Col2Im<float>), dim3(col2im_block_num), dim3(col2im_thread_num), 0, 0,
num_kernels, (const float*)mIm2ColBuffer, mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc,
mCol2ImParamter.kernelY, mCol2ImParamter.kernelX, mCol2ImParamter.padY, mCol2ImParamter.padX,
mCol2ImParamter.strideY, mCol2ImParamter.strideX, mCol2ImParamter.dilateY, mCol2ImParamter.dilateX,
height_col, width_col, (const float*)bias_addr, (float *)output_addr);
} else {
hipLaunchKernelGGL(( Col2Im<half>), dim3(col2im_block_num), dim3(col2im_thread_num), 0, 0,
num_kernels, (const half*)mIm2ColBuffer, mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc,
mCol2ImParamter.kernelY, mCol2ImParamter.kernelX, mCol2ImParamter.padY, mCol2ImParamter.padX,
mCol2ImParamter.strideY, mCol2ImParamter.strideX, mCol2ImParamter.dilateY, mCol2ImParamter.dilateX,
height_col, width_col, (const float*)bias_addr, (half *)output_addr);
}
return NO_ERROR;
}
class CUDADeconvolutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (nullptr != op->main_as_Convolution2D()->quanParameter()) {
auto quan = op->main_as_Convolution2D()->quanParameter();
if (1 == quan->type() || 2 == quan->type()) {
MNN_PRINT("cuda Deconv quant type 1 or 2 not support\n");
return nullptr;
}
}
if(inputs.size() == 3) {
MNN_PRINT("Deconv inputs size:3 not support\n");
return nullptr;
} else if(inputs.size() == 1) {
std::shared_ptr<DeconvSingleInputExecution::Resource> resource(new DeconvSingleInputExecution::Resource(backend, op));
return new DeconvSingleInputExecution(backend, op, resource);
} else {
MNN_PRINT("Deconv inputs size:%d not support", (int)inputs.size());
return nullptr;
}
}
};
CUDACreatorRegister<CUDADeconvolutionCreator> __DeConvExecution(OpType_Deconvolution);
}// namespace CUDA
}// namespace MNN
| 8f24ac3ba9899f70dfe89991d0fecb81ebea53c4.cu | //
// DeconvSingleInputExecution.cpp
// MNN
//
// Created by MNN on 2022/03/04.
// Copyright © 2018, Alibaba Group Holding Limited
//
#include "DeconvSingleInputExecution.hpp"
#include "MNNCUDADefine.hpp"
#include "MNNCUDAFunction.cuh"
namespace MNN {
namespace CUDA {
__global__ void DeconvKernelReorder(const float* B, half* BP, int kw, int kh, int ic, int oc, int icPack) {
int kernelCount = kw * kh;
int e = oc * kernelCount;
int l = ic;
int eDiv = UP_DIV(e, MATMULPACK);
int eAlign = eDiv * MATMULPACK;
int lDiv = UP_DIV(l, icPack);
int lAlign = lDiv * icPack;
int maxCount = eAlign * lAlign;
for (size_t indexO = blockIdx.x * blockDim.x + threadIdx.x; indexO < maxCount; indexO += blockDim.x * gridDim.x) {
int lR = indexO % icPack;
int tmp = indexO / icPack;
int eR = tmp % MATMULPACK;
int tmp2 = tmp / MATMULPACK;
int lC = tmp2 % lDiv;
int eC = tmp2 / lDiv;
half* dst = BP + indexO;
int sL = lC * icPack + lR;//ic_idx
int sE = eC * MATMULPACK + eR;
if (sL >= ic) {
*dst = 0.0;
continue;
}
int oEC = sE / (kernelCount);//oc_idx
int oEk = sE % kernelCount;//khw_idx
if (sE >= e) {
*dst = 0.0;
continue;
}
const float* src = B + sL * kernelCount * oc + oEk + oEC * kernelCount;
*dst = *src;
}
}
template<typename T>
__global__ void DeconvInputRerange(const int count,
const InputReorderParameter* param,
const T* Inp,
__half* InpRe
) {
for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) {
int l = 16 * param->lpack_size;
int h = 16 * param->hpack_size;
int lIndex = i % l;
int hIndex = i / l;
int lU = lIndex / 16;
int lR = lIndex % 16;
int hU = hIndex / 16;
int hR = hIndex % 16;
__half* dst = InpRe + hU * param->lpack_size * 16 * 16 + lU * 16 * 16 + lR + hR * 16;
if(hIndex >= param->h_size || lIndex >= param->l_size) {
dst[0] = (__half)0.0;
break;
}
const int channel_pack = ((param->l_size + 7) / 8) * 8;
T value = Inp[hIndex * channel_pack + lIndex];
dst[0] = (half)value;
}
}
template <typename Dtype>
__global__ void Col2Im(const int n, const Dtype* data_col,
const int batch, const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int pad_h, const int pad_w,
const int stride_h, const int stride_w,
const int dilation_h, const int dilation_w,
const int height_col, const int width_col,
const float* bias, Dtype* data_im
) {
const int channel_pack = ((channels+7) / 8) * 8;
for (size_t index = blockIdx.x * blockDim.x + threadIdx.x; index < (n); index += blockDim.x * gridDim.x) {
Dtype val = 0;
const int c_p = index % channel_pack;
const int idx_tmp = index / channel_pack;
const int b_im = idx_tmp / (width * height);
const int hw = idx_tmp % (width * height);
const int c_im = c_p;
const int w_im = hw % width + pad_w;
const int h_im = hw / width + pad_h;
if(c_im >= channels) {
data_im[index] = val;
break;
}
if(nullptr != bias) {
val += (Dtype)bias[c_im];
}
int kernel_extent_w = (kernel_w - 1) * dilation_w + 1;
int kernel_extent_h = (kernel_h - 1) * dilation_h + 1;
// compute the start and end of the output
const int w_col_start =
(w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1;
const int w_col_end = min(w_im / stride_w + 1, width_col);
const int h_col_start =
(h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1;
const int h_col_end = min(h_im / stride_h + 1, height_col);
// TODO: use LCM of stride and dilation to avoid unnecessary loops
for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) {
for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) {
int h_k = (h_im - h_col * stride_h);
int w_k = (w_im - w_col * stride_w);
if (h_k % dilation_h == 0 && w_k % dilation_w == 0) {
h_k /= dilation_h;
w_k /= dilation_w;
const int data_col_index = ((((c_im * kernel_h + h_k) * kernel_w + w_k) * batch + b_im) *
height_col + h_col) * width_col + w_col;
val += data_col[data_col_index];
}
}
}
data_im[index] = val;
}
}
DeconvSingleInputExecution::Resource::Resource(Backend* bn, const MNN::Op* op) {
mBackend = bn;
auto runtime = static_cast<CUDABackend*>(bn)->getCUDARuntime();
auto conv = op->main_as_Convolution2D();
auto common = conv->common();
mKernelInfo.kernelX = common->kernelX();
mKernelInfo.kernelY = common->kernelY();
mKernelInfo.groups = common->group();
mKernelInfo.strideX = common->strideX();
mKernelInfo.strideY = common->strideY();
mKernelInfo.dilateX = common->dilateX();
mKernelInfo.dilateY = common->dilateY();
mKernelInfo.activationType = common->relu() ? 1 : (common->relu6() ? 2 : 0);
//weight host->device
const float* filterDataPtr = nullptr;
int weightSize = 0;
std::shared_ptr<ConvolutionCommon::Int8Common> quanCommon;
ConvolutionCommon::getConvParameters(&quanCommon, conv, &filterDataPtr, &weightSize);
mKernelInfo.kernelN = common->outputCount();
mKernelInfo.kernelC = weightSize / mKernelInfo.kernelN / mKernelInfo.kernelX / mKernelInfo.kernelY;
MatMulParam param;
int e = mKernelInfo.kernelN * mKernelInfo.kernelX * mKernelInfo.kernelY;
int l = mKernelInfo.kernelC;
int h = 0;
param.elh[0] = e;
param.elh[1] = l;
param.elh[2] = h;
param.elhPack[0] = UP_DIV(e, 16);
param.elhPack[1] = UP_DIV(l, 16);
param.elhPack[2] = UP_DIV(h, 16);
param.aStride[0] = 1;
param.aStride[1] = e;
param.aStride[2] = 0;
param.bStride[0] = 0;
param.bStride[1] = h;
param.bStride[2] = 1;
auto gpuParam = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(sizeof(MatMulParam));
auto tempCacheBuffer = static_cast<CUDABackend*>(bn)->getStaticBufferPool()->alloc(weightSize * sizeof(float));
float* cacheWeight = (float*)((uint8_t*)tempCacheBuffer.first + tempCacheBuffer.second);
runtime->memcpy(cacheWeight, filterDataPtr, weightSize * sizeof(float), MNNMemcpyHostToDevice);
runtime->memcpy((uint8_t*)gpuParam.first + gpuParam.second, ¶m, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Reorder weight
weightTensor.reset(Tensor::createDevice<int16_t>({param.elhPack[0] * param.elhPack[1] * (MATMULPACK * MATMULPACK)}));
bn->onAcquireBuffer(weightTensor.get(), Backend::STATIC);
mFilter = (void *)weightTensor.get()->buffer().device;
auto& prop = runtime->prop();
int cores = prop.multiProcessorCount;
int threadNumbers = prop.maxThreadsPerBlock;
DeconvKernelReorder<<<cores, threadNumbers>>>((float*)cacheWeight, (half*)mFilter,
mKernelInfo.kernelX, mKernelInfo.kernelY, mKernelInfo.kernelC, mKernelInfo.kernelN, MATMULPACK);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(tempCacheBuffer);
static_cast<CUDABackend*>(bn)->getStaticBufferPool()->free(gpuParam);
// Copy Bias
int biasSize = conv->bias()->size();
biasTensor.reset(Tensor::createDevice<float>({biasSize}));
bn->onAcquireBuffer(biasTensor.get(), Backend::STATIC);
mBias = (void *)biasTensor.get()->buffer().device;
cuda_check(cudaMemcpy(mBias, conv->bias()->data(), conv->bias()->size()*sizeof(float), cudaMemcpyHostToDevice));
}
DeconvSingleInputExecution::Resource::~Resource() {
// Do nothing
}
DeconvSingleInputExecution::DeconvSingleInputExecution(Backend* backend, const MNN::Op* op, std::shared_ptr<Resource> res) : Execution(backend), mOp(op) {
mResource = res;
auto runtime = static_cast<CUDABackend*>(backend)->getCUDARuntime();
auto staticPool = static_cast<CUDABackend*>(backend)->getStaticBufferPool();
mGpuMatMulParam = staticPool->alloc(sizeof(MatMulParam));
mGpuCol2ImParam = staticPool->alloc(sizeof(Col2ImParameter));
mGpuInpReorderParam = staticPool->alloc(sizeof(InputReorderParameter));
}
DeconvSingleInputExecution::~DeconvSingleInputExecution() {
auto staticPool = static_cast<CUDABackend*>(backend())->getStaticBufferPool();
staticPool->free(mGpuMatMulParam);
staticPool->free(mGpuCol2ImParam);
staticPool->free(mGpuInpReorderParam);
}
bool DeconvSingleInputExecution::onClone(Backend* bn, const Op* op, Execution** dst) {
if (!mValid) {
return false;
}
if (nullptr == dst) {
return true;
}
auto dstExe = new DeconvSingleInputExecution(bn, op, mResource);
*dst = dstExe;
return true;
}
ErrorCode DeconvSingleInputExecution::onResize(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
auto input = inputs[0], output = outputs[0];
const int UNIT = 1;
auto convCommon = mOp->main_as_Convolution2D()->common();
// Input Rerange Param
mInpReorderParameter.hw_size = input->height() * input->width();
mInpReorderParameter.ic_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.ib_stride = mInpReorderParameter.hw_size * input->channel();
mInpReorderParameter.oc_stride = mInpReorderParameter.ib_stride;
mInpReorderParameter.ob_stride = mInpReorderParameter.hw_size;
mInpReorderParameter.l_size = input->channel();
mInpReorderParameter.h_size = input->batch() * mInpReorderParameter.hw_size;
mInpReorderParameter.lpack_size = UP_DIV(mInpReorderParameter.l_size, 16);
mInpReorderParameter.hpack_size = UP_DIV(mInpReorderParameter.h_size, 16);
runtime->memcpy((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second, &mInpReorderParameter, sizeof(InputReorderParameter), MNNMemcpyHostToDevice);
// Col2Im Param
auto pad = ConvolutionCommon::convolutionTransposePad(input, output, mOp->main_as_Convolution2D()->common());
mCol2ImParamter.dilateX = convCommon->dilateX();
mCol2ImParamter.dilateY = convCommon->dilateY();
mCol2ImParamter.strideX = convCommon->strideX();
mCol2ImParamter.strideY = convCommon->strideY();
mCol2ImParamter.ic = input->channel();
mCol2ImParamter.oc = output->channel();
mCol2ImParamter.kernelX = convCommon->kernelX();
mCol2ImParamter.kernelY = convCommon->kernelY();
mCol2ImParamter.padX = pad.first;
mCol2ImParamter.padY = pad.second;
mCol2ImParamter.ih = input->height();
mCol2ImParamter.iw = input->width();
mCol2ImParamter.oh = output->height();
mCol2ImParamter.ow = output->width();
mCol2ImParamter.ob = output->batch();
runtime->memcpy((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second, &mCol2ImParamter, sizeof(Col2ImParameter), MNNMemcpyHostToDevice);
// Matmul Param
int e = output->channel() * mCol2ImParamter.kernelX * mCol2ImParamter.kernelY;
int l = input->channel();
int h = input->height() * input->width() * output->batch();
mMatMulParam.elh[0] = e;
mMatMulParam.elh[1] = l;
mMatMulParam.elh[2] = h;
mMatMulParam.elhPack[0] = UP_DIV(e, 16);
mMatMulParam.elhPack[1] = UP_DIV(l, 16);
mMatMulParam.elhPack[2] = UP_DIV(h, 16);
mMatMulParam.bStride[0] = 0;
mMatMulParam.bStride[1] = input->height() * input->width();
mMatMulParam.bStride[2] = 1;
mMatMulParam.cStride[0] = h;
mMatMulParam.cStride[1] = 0;
mMatMulParam.cStride[2] = 1;
mMatMulParam.aPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.aPStride[1] = 256;
mMatMulParam.aPStride[2] = 16;
mMatMulParam.bPStride[0] = 256 * mMatMulParam.elhPack[1];
mMatMulParam.bPStride[1] = 256;
mMatMulParam.bPStride[2] = 16;
if (convCommon->relu()) {
mMatMulParam.minValue = 0.0f;
}
if (convCommon->relu6()) {
mMatMulParam.minValue = 0.0f;
mMatMulParam.maxValue = 6.0f;
}
runtime->memcpy((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second, &mMatMulParam, sizeof(MatMulParam), MNNMemcpyHostToDevice);
// Alloc temp cuda memory
auto pool = static_cast<CUDABackend*>(backend())->getBufferPool();
auto buffer1 = pool->alloc(sizeof(float) * mMatMulParam.elhPack[0] * mMatMulParam.elhPack[2]* MATMULPACK * MATMULPACK);
auto buffer2 = pool->alloc(sizeof(__half) * mMatMulParam.elhPack[1] * mMatMulParam.elhPack[2] * MATMULPACK * MATMULPACK);
mIm2ColBuffer = (float*)((uint8_t*)buffer1.first + buffer1.second);
mInputBuffer = (__half*)((uint8_t*)buffer2.first + buffer2.second);
pool->free(buffer2);
pool->free(buffer1);
return NO_ERROR;
}
ErrorCode DeconvSingleInputExecution::onExecute(const std::vector<Tensor*> &inputs, const std::vector<Tensor*> &outputs) {
//MNN_PRINT("cuda convSingleInput onExecute in, inputsize:%d %d\n", (int)inputs.size(), workspace_size_);
MNN_ASSERT(inputs.size() == 1);
MNN_ASSERT(outputs.size() == 1);
auto bytes = static_cast<CUDABackend*>(backend())->getBytes(inputs[0]);
auto runtime = static_cast<CUDABackend*>(backend())->getCUDARuntime();
const void *input_addr = (const void*)inputs[0]->deviceId();
const void *filter_addr = mResource->mFilter;
const void *bias_addr = mResource->mBias;
void *output_addr = (void*)outputs[0]->deviceId();
auto gpuInpReorder = (const InputReorderParameter*)((uint8_t*)mGpuInpReorderParam.first + mGpuInpReorderParam.second);
auto gpuCol2Im = (const Col2ImParameter*)((uint8_t*)mGpuCol2ImParam.first + mGpuCol2ImParam.second);
auto gpuMatMul = (const MatMulParam*)((uint8_t*)mGpuMatMulParam.first + mGpuMatMulParam.second);
const int rerangeCount = mInpReorderParameter.lpack_size * mInpReorderParameter.hpack_size * 16 * 16;
int inp_block_num = runtime->blocks_num(rerangeCount);
int inp_thread_num = runtime->threads_num();
// Do input Rerange
//runtime->memset(mInputBuffer, 0, mMatMulParam.elhPack[2] * mMatMulParam.elhPack[1] * MATMULPACK * MATMULPACK * sizeof(__half));
if(bytes == 4) {
DeconvInputRerange<<<inp_block_num, inp_thread_num>>>(rerangeCount, gpuInpReorder, (const float*)input_addr, mInputBuffer);
// Do Gemm operation
GemmPackedMain(runtime, &mMatMulParam, gpuMatMul, (float*)mIm2ColBuffer, (const half*)filter_addr, (const half*)mInputBuffer, nullptr, bytes, false, false);
} else {
DeconvInputRerange<<<inp_block_num, inp_thread_num>>>(rerangeCount, gpuInpReorder, (const half*)input_addr, mInputBuffer);
// Do Gemm operation
GemmPackedMain(runtime, &mMatMulParam, gpuMatMul, (half*)mIm2ColBuffer, (const half*)filter_addr, (const half*)mInputBuffer, nullptr, bytes, false, false);
}
// Do Col2Im trans
int height_col = mCol2ImParamter.ih;
int width_col = mCol2ImParamter.iw;
int num_kernels = mCol2ImParamter.ob * UP_DIV(mCol2ImParamter.oc, 8) * mCol2ImParamter.oh * mCol2ImParamter.ow * 8;
int col2im_block_num = runtime->blocks_num(num_kernels);
int col2im_thread_num = runtime->threads_num();
// printf("col2im:%d, %d-%d-%d-%d-%d-%d\n %d-%d-%d-%d-%d-%d\n %d-%d\n", mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc, \
// mCol2ImParamter.ih, mCol2ImParamter.iw, mCol2ImParamter.ic, \
// mCol2ImParamter.padX, mCol2ImParamter.padY, mCol2ImParamter.kernelX, mCol2ImParamter.kernelY, mCol2ImParamter.strideX, mCol2ImParamter.strideY, \
// col2im_block_num, col2im_thread_num);
if(bytes == 4) {
Col2Im<float><<<col2im_block_num, col2im_thread_num>>>(
num_kernels, (const float*)mIm2ColBuffer, mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc,
mCol2ImParamter.kernelY, mCol2ImParamter.kernelX, mCol2ImParamter.padY, mCol2ImParamter.padX,
mCol2ImParamter.strideY, mCol2ImParamter.strideX, mCol2ImParamter.dilateY, mCol2ImParamter.dilateX,
height_col, width_col, (const float*)bias_addr, (float *)output_addr);
} else {
Col2Im<half><<<col2im_block_num, col2im_thread_num>>>(
num_kernels, (const half*)mIm2ColBuffer, mCol2ImParamter.ob, mCol2ImParamter.oh, mCol2ImParamter.ow, mCol2ImParamter.oc,
mCol2ImParamter.kernelY, mCol2ImParamter.kernelX, mCol2ImParamter.padY, mCol2ImParamter.padX,
mCol2ImParamter.strideY, mCol2ImParamter.strideX, mCol2ImParamter.dilateY, mCol2ImParamter.dilateX,
height_col, width_col, (const float*)bias_addr, (half *)output_addr);
}
return NO_ERROR;
}
class CUDADeconvolutionCreator : public CUDABackend::Creator {
public:
virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
const MNN::Op* op, Backend* backend) const override {
if (nullptr != op->main_as_Convolution2D()->quanParameter()) {
auto quan = op->main_as_Convolution2D()->quanParameter();
if (1 == quan->type() || 2 == quan->type()) {
MNN_PRINT("cuda Deconv quant type 1 or 2 not support\n");
return nullptr;
}
}
if(inputs.size() == 3) {
MNN_PRINT("Deconv inputs size:3 not support\n");
return nullptr;
} else if(inputs.size() == 1) {
std::shared_ptr<DeconvSingleInputExecution::Resource> resource(new DeconvSingleInputExecution::Resource(backend, op));
return new DeconvSingleInputExecution(backend, op, resource);
} else {
MNN_PRINT("Deconv inputs size:%d not support", (int)inputs.size());
return nullptr;
}
}
};
CUDACreatorRegister<CUDADeconvolutionCreator> __DeConvExecution(OpType_Deconvolution);
}// namespace CUDA
}// namespace MNN
|
3a8445d5b3a604dff144019b989b71c410e08375.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(hipError_t e) {
if (e != hipSuccess) {
printf("\nCUDA error: %s\n", hipGetErrorString(e));
exit(1);
}
}
void cuda_print_float_array(float *array_GPU, size_t size) {
//allocate temporary array for printing
float* mem = (float*) malloc(sizeof (float) *size);
//transfer data from device
hipMemcpy(mem, array_GPU, sizeof (float) *size, hipMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a float representing the sum
********************************/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
float likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((float) (I[ind[index * numOnes + x]] - 100), 2) - pow((float) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a float representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ float d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((float) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
float randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((float) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a float representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
float randn(int * seed, int index) {
/*Box-Muller algorithm*/
float u = randu(seed, index);
float v = randu(seed, index);
float cosine = cos(2 * PI * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
float test_randn(int * seed, int index) {
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = randu(seed, index);
float v = randu(seed, index);
float cosine = cos(2 * pi * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ float d_randn(int * seed, int index) {
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2 * pi * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles) {
int x;
float sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original float version.
* Takes in a float and returns an integer that approximates to that float
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ float dev_round_float(float value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((float) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float* partial_sums, float * CDF, float * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ float u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((float) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((float) (Nparticles));
}
}
__global__ void sum_kernel(float* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
float sum = 0.0;
int num_blocks = ceil((float) Nparticles / (float) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * xj, float * yj, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, float* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ float buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((float) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_float() to be consistent with roundfloat
indX = dev_round_float(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_float(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a float and returns an integer that approximates to that float
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
float roundfloat(float value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
float distance = sqrt(pow((float) (x - radius + 1), 2) + pow((float) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
float distance = sqrt(pow((float) (x - posX), 2) + pow((float) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundfloat(IszY / 2.0);
int y0 = (int) roundfloat(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(float * CDF, int lengthCDF, float value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
float xe = roundfloat(IszY / 2.0);
float ye = roundfloat(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int *objxy;
hipHostMalloc((void **) &objxy, countOnes * 2 * sizeof (int), hipHostMallocWriteCombined);
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float *weights;
hipHostMalloc((void **) &weights, sizeof (float) *Nparticles, hipHostMallocWriteCombined);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((float) (Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *) malloc(sizeof (float) *Nparticles);
float * arrayX = (float *) malloc(sizeof (float) *Nparticles);
float * arrayY = (float *) malloc(sizeof (float) *Nparticles);
float * xj;
float * yj;
hipHostMalloc((void **) &xj, sizeof (float) *Nparticles, hipHostMallocWriteCombined);
hipHostMalloc((void **) &yj, sizeof (float) *Nparticles, hipHostMallocWriteCombined);
float * CDF = (float *) malloc(sizeof (float) *Nparticles);
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
float * u = (float *) malloc(sizeof (float) *Nparticles);
float * u_GPU;
int * seed_GPU;
float* partial_sums;
//CUDA memory allocation
check_error(hipMalloc((void **) &arrayX_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &arrayY_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &xj_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &yj_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &CDF_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &u_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &likelihood_GPU, sizeof (float) *Nparticles));
//set likelihood to zero
check_error(hipMemset((void *) likelihood_GPU, 0, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &weights_GPU, sizeof (float) *Nparticles));
check_error(hipMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(hipMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(hipMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(hipMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(hipMalloc((void **) &partial_sums, sizeof (float) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
hipStream_t stream1, stream2, stream3, stream4, stream5, stream6;
hipStreamCreate(&stream1);
hipStreamCreate(&stream2);
hipStreamCreate(&stream3);
hipStreamCreate(&stream4);
hipStreamCreate(&stream5);
hipStreamCreate(&stream6);
check_error(hipMemcpyAsync(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, hipMemcpyHostToDevice, stream1));
check_error(hipMemcpyAsync(objxy_GPU, objxy, sizeof (int) *2 * countOnes, hipMemcpyHostToDevice, stream2));
check_error(hipMemcpyAsync(weights_GPU, weights, sizeof (float) *Nparticles, hipMemcpyHostToDevice, stream3));
check_error(hipMemcpyAsync(xj_GPU, xj, sizeof (float) *Nparticles, hipMemcpyHostToDevice, stream4));
check_error(hipMemcpyAsync(yj_GPU, yj, sizeof (float) *Nparticles, hipMemcpyHostToDevice, stream5));
check_error(hipMemcpyAsync(seed_GPU, seed, sizeof (int) *Nparticles, hipMemcpyHostToDevice, stream6));
hipDeviceSynchronize();
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((float) Nparticles / (float) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
hipDeviceSynchronize();
long long back_time = get_time();
hipFree(xj_GPU);
hipFree(yj_GPU);
hipFree(CDF_GPU);
hipFree(u_GPU);
hipFree(likelihood_GPU);
hipFree(I_GPU);
hipFree(objxy_GPU);
hipFree(ind_GPU);
hipFree(seed_GPU);
hipFree(partial_sums);
long long free_time = get_time();
check_error(hipMemcpyAsync(arrayX, arrayX_GPU, sizeof (float) *Nparticles, hipMemcpyDeviceToHost, stream1));
long long arrayX_time = get_time();
check_error(hipMemcpyAsync(arrayY, arrayY_GPU, sizeof (float) *Nparticles, hipMemcpyDeviceToHost, stream2));
long long arrayY_time = get_time();
check_error(hipMemcpyAsync(weights, weights_GPU, sizeof (float) *Nparticles, hipMemcpyDeviceToHost, stream3));
hipDeviceSynchronize();
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
float distance = sqrt(pow((float) (xe - (int) roundfloat(IszY / 2.0)), 2) + pow((float) (ye - (int) roundfloat(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
hipFree(weights_GPU);
hipFree(arrayY_GPU);
hipFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
hipHostFree(xj);
hipHostFree(yj);
free(CDF);
free(ind);
free(u);
hipHostFree(objxy);
hipHostFree(weights);
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipStreamDestroy(stream3);
hipStreamDestroy(stream4);
hipStreamDestroy(stream5);
hipStreamDestroy(stream6);
}
int main(int argc, char * argv[]) {
char* usage = "float.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
| 3a8445d5b3a604dff144019b989b71c410e08375.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#define BLOCK_X 16
#define BLOCK_Y 16
#define PI 3.1415926535897932
const int threads_per_block = 512;
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
void cuda_print_float_array(float *array_GPU, size_t size) {
//allocate temporary array for printing
float* mem = (float*) malloc(sizeof (float) *size);
//transfer data from device
cudaMemcpy(mem, array_GPU, sizeof (float) *size, cudaMemcpyDeviceToHost);
printf("PRINTING ARRAY VALUES\n");
//print values in memory
for (size_t i = 0; i < size; ++i) {
printf("[%d]:%0.6f\n", i, mem[i]);
}
printf("FINISHED PRINTING ARRAY VALUES\n");
//clean up memory
free(mem);
mem = NULL;
}
/********************************
* CALC LIKELIHOOD SUM
* DETERMINES THE LIKELIHOOD SUM BASED ON THE FORMULA: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* param 1 I 3D matrix
* param 2 current ind array
* param 3 length of ind array
* returns a float representing the sum
********************************/
__device__ float calcLikelihoodSum(unsigned char * I, int * ind, int numOnes, int index) {
float likelihoodSum = 0.0;
int x;
for (x = 0; x < numOnes; x++)
likelihoodSum += (pow((float) (I[ind[index * numOnes + x]] - 100), 2) - pow((float) (I[ind[index * numOnes + x]] - 228), 2)) / 50.0;
return likelihoodSum;
}
/****************************
CDF CALCULATE
CALCULATES CDF
param1 CDF
param2 weights
param3 Nparticles
*****************************/
__device__ void cdfCalc(float * CDF, float * weights, int Nparticles) {
int x;
CDF[0] = weights[0];
for (x = 1; x < Nparticles; x++) {
CDF[x] = weights[x] + CDF[x - 1];
}
}
/*****************************
* RANDU
* GENERATES A UNIFORM DISTRIBUTION
* returns a float representing a randomily generated number from a uniform distribution with range [0, 1)
******************************/
__device__ float d_randu(int * seed, int index) {
int M = INT_MAX;
int A = 1103515245;
int C = 12345;
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((float) M));
}/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
float randu(int * seed, int index) {
int num = A * seed[index] + C;
seed[index] = num % M;
return fabs(seed[index] / ((float) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a float representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
float randn(int * seed, int index) {
/*Box-Muller algorithm*/
float u = randu(seed, index);
float v = randu(seed, index);
float cosine = cos(2 * PI * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
float test_randn(int * seed, int index) {
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = randu(seed, index);
float v = randu(seed, index);
float cosine = cos(2 * pi * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
__device__ float d_randn(int * seed, int index) {
//Box-Muller algortihm
float pi = 3.14159265358979323846;
float u = d_randu(seed, index);
float v = d_randu(seed, index);
float cosine = cos(2 * pi * v);
float rt = -2 * log(u);
return sqrt(rt) * cosine;
}
/****************************
UPDATE WEIGHTS
UPDATES WEIGHTS
param1 weights
param2 likelihood
param3 Nparcitles
****************************/
__device__ float updateWeights(float * weights, float * likelihood, int Nparticles) {
int x;
float sum = 0;
for (x = 0; x < Nparticles; x++) {
weights[x] = weights[x] * exp(likelihood[x]);
sum += weights[x];
}
return sum;
}
__device__ int findIndexBin(float * CDF, int beginIndex, int endIndex, float value) {
if (endIndex < beginIndex)
return -1;
int middleIndex;
while (endIndex > beginIndex) {
middleIndex = beginIndex + ((endIndex - beginIndex) / 2);
if (CDF[middleIndex] >= value) {
if (middleIndex == 0)
return middleIndex;
else if (CDF[middleIndex - 1] < value)
return middleIndex;
else if (CDF[middleIndex - 1] == value) {
while (CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if (CDF[middleIndex] > value)
endIndex = middleIndex - 1;
else
beginIndex = middleIndex + 1;
}
return -1;
}
/** added this function. was missing in original float version.
* Takes in a float and returns an integer that approximates to that float
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
__device__ float dev_round_float(float value) {
int newValue = (int) (value);
if (value - newValue < .5f)
return newValue;
else
return newValue++;
}
/*****************************
* CUDA Find Index Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: weights
* param8: Nparticles
*****************************/
__global__ void find_index_kernel(float * arrayX, float * arrayY, float * CDF, float * u, float * xj, float * yj, float * weights, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i < Nparticles) {
int index = -1;
int x;
for (x = 0; x < Nparticles; x++) {
if (CDF[x] >= u[i]) {
index = x;
break;
}
}
if (index == -1) {
index = Nparticles - 1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
//weights[i] = 1 / ((float) (Nparticles)); //moved this code to the beginning of likelihood kernel
}
__syncthreads();
}
__global__ void normalize_weights_kernel(float * weights, int Nparticles, float* partial_sums, float * CDF, float * u, int * seed) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
__shared__ float u1, sumWeights;
if(0 == threadIdx.x)
sumWeights = partial_sums[0];
__syncthreads();
if (i < Nparticles) {
weights[i] = weights[i] / sumWeights;
}
__syncthreads();
if (i == 0) {
cdfCalc(CDF, weights, Nparticles);
u[0] = (1 / ((float) (Nparticles))) * d_randu(seed, i); // do this to allow all threads in all blocks to use the same u1
}
__syncthreads();
if(0 == threadIdx.x)
u1 = u[0];
__syncthreads();
if (i < Nparticles) {
u[i] = u1 + i / ((float) (Nparticles));
}
}
__global__ void sum_kernel(float* partial_sums, int Nparticles) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
if (i == 0) {
int x;
float sum = 0.0;
int num_blocks = ceil((float) Nparticles / (float) threads_per_block);
for (x = 0; x < num_blocks; x++) {
sum += partial_sums[x];
}
partial_sums[0] = sum;
}
}
/*****************************
* CUDA Likelihood Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param2.5: CDF
* param3: ind
* param4: objxy
* param5: likelihood
* param6: I
* param6.5: u
* param6.75: weights
* param7: Nparticles
* param8: countOnes
* param9: max_size
* param10: k
* param11: IszY
* param12: Nfr
*****************************/
__global__ void likelihood_kernel(float * arrayX, float * arrayY, float * xj, float * yj, float * CDF, int * ind, int * objxy, float * likelihood, unsigned char * I, float * u, float * weights, int Nparticles, int countOnes, int max_size, int k, int IszY, int Nfr, int *seed, float* partial_sums) {
int block_id = blockIdx.x;
int i = blockDim.x * block_id + threadIdx.x;
int y;
int indX, indY;
__shared__ float buffer[512];
if (i < Nparticles) {
arrayX[i] = xj[i];
arrayY[i] = yj[i];
weights[i] = 1 / ((float) (Nparticles)); //Donnie - moved this line from end of find_index_kernel to prevent all weights from being reset before calculating position on final iteration.
arrayX[i] = arrayX[i] + 1.0 + 5.0 * d_randn(seed, i);
arrayY[i] = arrayY[i] - 2.0 + 2.0 * d_randn(seed, i);
}
__syncthreads();
if (i < Nparticles) {
for (y = 0; y < countOnes; y++) {
//added dev_round_float() to be consistent with roundfloat
indX = dev_round_float(arrayX[i]) + objxy[y * 2 + 1];
indY = dev_round_float(arrayY[i]) + objxy[y * 2];
ind[i * countOnes + y] = abs(indX * IszY * Nfr + indY * Nfr + k);
if (ind[i * countOnes + y] >= max_size)
ind[i * countOnes + y] = 0;
}
likelihood[i] = calcLikelihoodSum(I, ind, countOnes, i);
likelihood[i] = likelihood[i] / countOnes;
weights[i] = weights[i] * exp(likelihood[i]); //Donnie Newell - added the missing exponential function call
}
buffer[threadIdx.x] = 0.0;
__syncthreads();
if (i < Nparticles) {
buffer[threadIdx.x] = weights[i];
}
__syncthreads();
//this doesn't account for the last block that isn't full
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
buffer[threadIdx.x] += buffer[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
partial_sums[blockIdx.x] = buffer[0];
}
__syncthreads();
}
/**
* Takes in a float and returns an integer that approximates to that float
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
float roundfloat(float value) {
int newValue = (int) (value);
if (value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, unsigned char * array3D, int * dimX, int * dimY, int * dimZ) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
if (array3D[x * *dimY * *dimZ + y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(unsigned char * array3D, int * dimX, int * dimY, int * dimZ, int * seed) {
int x, y, z;
for (x = 0; x < *dimX; x++) {
for (y = 0; y < *dimY; y++) {
for (z = 0; z < *dimZ; z++) {
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (unsigned char) (5 * randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius) {
int diameter = radius * 2 - 1;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
float distance = sqrt(pow((float) (x - radius + 1), 2) + pow((float) (y - radius + 1), 2));
if (distance < radius)
disk[x * diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(unsigned char * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error) {
int startX = posX - error;
while (startX < 0)
startX++;
int startY = posY - error;
while (startY < 0)
startY++;
int endX = posX + error;
while (endX > dimX)
endX--;
int endY = posY + error;
while (endY > dimY)
endY--;
int x, y;
for (x = startX; x < endX; x++) {
for (y = startY; y < endY; y++) {
float distance = sqrt(pow((float) (x - posX), 2) + pow((float) (y - posY), 2));
if (distance < error)
matrix[x * dimY * dimZ + y * dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(unsigned char * matrix, int dimX, int dimY, int dimZ, int error, unsigned char * newMatrix) {
int x, y, z;
for (z = 0; z < dimZ; z++) {
for (x = 0; x < dimX; x++) {
for (y = 0; y < dimY; y++) {
if (matrix[x * dimY * dimZ + y * dimZ + z] == 1) {
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, int * neighbors, int radius) {
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius * 2 - 1;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (se[x * diameter + y]) {
neighbors[neighY * 2] = (int) (y - center);
neighbors[neighY * 2 + 1] = (int) (x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the background intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(unsigned char * I, int IszX, int IszY, int Nfr, int * seed) {
int k;
int max_size = IszX * IszY * Nfr;
/*get object centers*/
int x0 = (int) roundfloat(IszY / 2.0);
int y0 = (int) roundfloat(IszX / 2.0);
I[x0 * IszY * Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for (k = 1; k < Nfr; k++) {
xk = abs(x0 + (k-1));
yk = abs(y0 - 2 * (k-1));
pos = yk * IszY * Nfr + xk * Nfr + k;
if (pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
unsigned char * newMatrix = (unsigned char *) malloc(sizeof (unsigned char) * IszX * IszY * Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for (x = 0; x < IszX; x++) {
for (y = 0; y < IszY; y++) {
for (k = 0; k < Nfr; k++) {
I[x * IszY * Nfr + y * Nfr + k] = newMatrix[x * IszY * Nfr + y * Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(float * CDF, int lengthCDF, float value) {
int index = -1;
int x;
for (x = 0; x < lengthCDF; x++) {
if (CDF[x] >= value) {
index = x;
break;
}
}
if (index == -1) {
return lengthCDF - 1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(unsigned char * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles) {
int max_size = IszX * IszY*Nfr;
//original particle centroid
float xe = roundfloat(IszY / 2.0);
float ye = roundfloat(IszX / 2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius * 2 - 1;
int * disk = (int*) malloc(diameter * diameter * sizeof (int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for (x = 0; x < diameter; x++) {
for (y = 0; y < diameter; y++) {
if (disk[x * diameter + y] == 1)
countOnes++;
}
}
int *objxy;
cudaHostAlloc((void **) &objxy, countOnes * 2 * sizeof (int), cudaHostAllocWriteCombined);
getneighbors(disk, countOnes, objxy, radius);
//initial weights are all equal (1/Nparticles)
float *weights;
cudaHostAlloc((void **) &weights, sizeof (float) *Nparticles, cudaHostAllocWriteCombined);
for (x = 0; x < Nparticles; x++) {
weights[x] = 1 / ((float) (Nparticles));
}
//initial likelihood to 0.0
float * likelihood = (float *) malloc(sizeof (float) *Nparticles);
float * arrayX = (float *) malloc(sizeof (float) *Nparticles);
float * arrayY = (float *) malloc(sizeof (float) *Nparticles);
float * xj;
float * yj;
cudaHostAlloc((void **) &xj, sizeof (float) *Nparticles, cudaHostAllocWriteCombined);
cudaHostAlloc((void **) &yj, sizeof (float) *Nparticles, cudaHostAllocWriteCombined);
float * CDF = (float *) malloc(sizeof (float) *Nparticles);
//GPU copies of arrays
float * arrayX_GPU;
float * arrayY_GPU;
float * xj_GPU;
float * yj_GPU;
float * CDF_GPU;
float * likelihood_GPU;
unsigned char * I_GPU;
float * weights_GPU;
int * objxy_GPU;
int * ind = (int*) malloc(sizeof (int) *countOnes * Nparticles);
int * ind_GPU;
float * u = (float *) malloc(sizeof (float) *Nparticles);
float * u_GPU;
int * seed_GPU;
float* partial_sums;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &likelihood_GPU, sizeof (float) *Nparticles));
//set likelihood to zero
check_error(cudaMemset((void *) likelihood_GPU, 0, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &weights_GPU, sizeof (float) *Nparticles));
check_error(cudaMalloc((void **) &I_GPU, sizeof (unsigned char) *IszX * IszY * Nfr));
check_error(cudaMalloc((void **) &objxy_GPU, sizeof (int) *2 * countOnes));
check_error(cudaMalloc((void **) &ind_GPU, sizeof (int) *countOnes * Nparticles));
check_error(cudaMalloc((void **) &seed_GPU, sizeof (int) *Nparticles));
check_error(cudaMalloc((void **) &partial_sums, sizeof (float) *Nparticles));
//Donnie - this loop is different because in this kernel, arrayX and arrayY
// are set equal to xj before every iteration, so effectively, arrayX and
// arrayY will be set to xe and ye before the first iteration.
for (x = 0; x < Nparticles; x++) {
xj[x] = xe;
yj[x] = ye;
}
int k;
int indX, indY;
//start send
long long send_start = get_time();
cudaStream_t stream1, stream2, stream3, stream4, stream5, stream6;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaStreamCreate(&stream5);
cudaStreamCreate(&stream6);
check_error(cudaMemcpyAsync(I_GPU, I, sizeof (unsigned char) *IszX * IszY*Nfr, cudaMemcpyHostToDevice, stream1));
check_error(cudaMemcpyAsync(objxy_GPU, objxy, sizeof (int) *2 * countOnes, cudaMemcpyHostToDevice, stream2));
check_error(cudaMemcpyAsync(weights_GPU, weights, sizeof (float) *Nparticles, cudaMemcpyHostToDevice, stream3));
check_error(cudaMemcpyAsync(xj_GPU, xj, sizeof (float) *Nparticles, cudaMemcpyHostToDevice, stream4));
check_error(cudaMemcpyAsync(yj_GPU, yj, sizeof (float) *Nparticles, cudaMemcpyHostToDevice, stream5));
check_error(cudaMemcpyAsync(seed_GPU, seed, sizeof (int) *Nparticles, cudaMemcpyHostToDevice, stream6));
cudaDeviceSynchronize();
long long send_end = get_time();
printf("TIME TO SEND TO GPU: %f\n", elapsed_time(send_start, send_end));
int num_blocks = ceil((float) Nparticles / (float) threads_per_block);
for (k = 1; k < Nfr; k++) {
likelihood_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, xj_GPU, yj_GPU, CDF_GPU, ind_GPU, objxy_GPU, likelihood_GPU, I_GPU, u_GPU, weights_GPU, Nparticles, countOnes, max_size, k, IszY, Nfr, seed_GPU, partial_sums);
sum_kernel << < num_blocks, threads_per_block >> > (partial_sums, Nparticles);
normalize_weights_kernel << < num_blocks, threads_per_block >> > (weights_GPU, Nparticles, partial_sums, CDF_GPU, u_GPU, seed_GPU);
find_index_kernel << < num_blocks, threads_per_block >> > (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, weights_GPU, Nparticles);
}//end loop
//block till kernels are finished
cudaDeviceSynchronize();
long long back_time = get_time();
cudaFree(xj_GPU);
cudaFree(yj_GPU);
cudaFree(CDF_GPU);
cudaFree(u_GPU);
cudaFree(likelihood_GPU);
cudaFree(I_GPU);
cudaFree(objxy_GPU);
cudaFree(ind_GPU);
cudaFree(seed_GPU);
cudaFree(partial_sums);
long long free_time = get_time();
check_error(cudaMemcpyAsync(arrayX, arrayX_GPU, sizeof (float) *Nparticles, cudaMemcpyDeviceToHost, stream1));
long long arrayX_time = get_time();
check_error(cudaMemcpyAsync(arrayY, arrayY_GPU, sizeof (float) *Nparticles, cudaMemcpyDeviceToHost, stream2));
long long arrayY_time = get_time();
check_error(cudaMemcpyAsync(weights, weights_GPU, sizeof (float) *Nparticles, cudaMemcpyDeviceToHost, stream3));
cudaDeviceSynchronize();
long long back_end_time = get_time();
printf("GPU Execution: %lf\n", elapsed_time(send_end, back_time));
printf("FREE TIME: %lf\n", elapsed_time(back_time, free_time));
printf("TIME TO SEND BACK: %lf\n", elapsed_time(back_time, back_end_time));
printf("SEND ARRAY X BACK: %lf\n", elapsed_time(free_time, arrayX_time));
printf("SEND ARRAY Y BACK: %lf\n", elapsed_time(arrayX_time, arrayY_time));
printf("SEND WEIGHTS BACK: %lf\n", elapsed_time(arrayY_time, back_end_time));
xe = 0;
ye = 0;
// estimate the object location by expected values
for (x = 0; x < Nparticles; x++) {
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
float distance = sqrt(pow((float) (xe - (int) roundfloat(IszY / 2.0)), 2) + pow((float) (ye - (int) roundfloat(IszX / 2.0)), 2));
printf("%lf\n", distance);
//CUDA freeing of memory
cudaFree(weights_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free regular memory
free(likelihood);
free(arrayX);
free(arrayY);
cudaFreeHost(xj);
cudaFreeHost(yj);
free(CDF);
free(ind);
free(u);
cudaFreeHost(objxy);
cudaFreeHost(weights);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaStreamDestroy(stream4);
cudaStreamDestroy(stream5);
cudaStreamDestroy(stream6);
}
int main(int argc, char * argv[]) {
char* usage = "float.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if (argc != 9) {
printf("%s\n", usage);
return 0;
}
//check args deliminators
if (strcmp(argv[1], "-x") || strcmp(argv[3], "-y") || strcmp(argv[5], "-z") || strcmp(argv[7], "-np")) {
printf("%s\n", usage);
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if (sscanf(argv[2], "%d", &IszX) == EOF) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if (IszX <= 0) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[4], "%d", &IszY) == EOF) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if (IszY <= 0) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[6], "%d", &Nfr) == EOF) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if (Nfr <= 0) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if (sscanf(argv[8], "%d", &Nparticles) == EOF) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if (Nparticles <= 0) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *) malloc(sizeof (int) *Nparticles);
int i;
for (i = 0; i < Nparticles; i++)
seed[i] = time(0) * i;
//malloc matrix
unsigned char * I = (unsigned char *) malloc(sizeof (unsigned char) *IszX * IszY * Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
6efaffd0830c80013802050d329c93825b039d59.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void test(const int *in, int *answer) {
const int tid = threadIdx.x;
if (tid == 0) printf("hello!\n");
int sum = in[tid];
if (tid == 0) printf("sum[0] is %d\n", sum);
*answer = sum;
}
int main() {
int *h_data, *d_data;
int N = 64;
int *h_answer, *d_answer;
h_data = (int *)malloc(N*sizeof(int));
h_answer = (int *)malloc(sizeof(int));
hipMalloc(&d_data, N*sizeof(int));
hipMalloc(&d_answer, sizeof(int));
for (int i = 0; i < N; i += 1) {
h_data[i] = N - i;
}
hipMemcpy(d_data, h_data, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1), dim3(N), 0, 0, h_data, d_answer);
hipMemcpy(h_answer, d_answer, sizeof(int), hipMemcpyDeviceToHost);
printf("The answer is %d\n", *h_answer);
} | 6efaffd0830c80013802050d329c93825b039d59.cu | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void test(const int *in, int *answer) {
const int tid = threadIdx.x;
if (tid == 0) printf("hello!\n");
int sum = in[tid];
if (tid == 0) printf("sum[0] is %d\n", sum);
*answer = sum;
}
int main() {
int *h_data, *d_data;
int N = 64;
int *h_answer, *d_answer;
h_data = (int *)malloc(N*sizeof(int));
h_answer = (int *)malloc(sizeof(int));
cudaMalloc(&d_data, N*sizeof(int));
cudaMalloc(&d_answer, sizeof(int));
for (int i = 0; i < N; i += 1) {
h_data[i] = N - i;
}
cudaMemcpy(d_data, h_data, N*sizeof(int), cudaMemcpyHostToDevice);
test<<<1, N>>>(h_data, d_answer);
cudaMemcpy(h_answer, d_answer, sizeof(int), cudaMemcpyDeviceToHost);
printf("The answer is %d\n", *h_answer);
} |
8931e32bad7516f68f453f6ec18e0ba02d3e500a.hip | // !!! This is a file automatically generated by hipify!!!
/*
float matrix implementation
author: Yu Xiang
Date: 04/10/2011
*/
extern "C"
{
#include "matrix.h"
}
#include <hip/hip_runtime.h>
#if TORCH_HIP_VERSION>=5000
#include <helper_cuda.h>
#define cutilSafeCall checkCudaErrors
#else
#include "cutil_inline.h"
#endif
CUMATRIX read_cumatrix(FILE *fp)
{
int i;
CUMATRIX matrix;
/* initialization */
matrix.dims_num = 0;
matrix.dims = NULL;
matrix.length = 0;
matrix.data = NULL;
/* read dimension */
if(fscanf(fp, "%d", &matrix.dims_num) != 1)
{
printf("fscanf failed\n");
return matrix;
}
/* allocate dims */
matrix.dims = (int*)malloc(sizeof(int)*matrix.dims_num);
matrix.length = 1;
for(i = 0; i < matrix.dims_num; i++)
{
if(fscanf(fp, "%d", matrix.dims+i) != 1)
{
printf("fscanf failed\n");
return matrix;
}
matrix.length *= matrix.dims[i];
}
/* allocate data */
matrix.data = (float*)malloc(sizeof(float)*matrix.length);
for(i = 0; i < matrix.length; i++)
{
if(fscanf(fp, "%f", matrix.data+i) != 1)
{
printf("fscanf failed\n");
return matrix;
}
}
return matrix;
}
void write_cumatrix(CUMATRIX *pmat, FILE *fp)
{
int i;
fprintf(fp, "%d\n", pmat->dims_num);
for(i = 0; i < pmat->dims_num; i++)
fprintf(fp, "%d ", pmat->dims[i]);
fprintf(fp, "\n");
for(i = 0; i < pmat->length; i++)
fprintf(fp, "%.12f ", pmat->data[i]);
}
void print_cumatrix(CUMATRIX *pmat)
{
int i;
printf("dims_num = %d\n", pmat->dims_num);
for(i = 0; i < pmat->dims_num; i++)
printf("%d ", pmat->dims[i]);
printf("\n");
for(i = 0; i < pmat->length; i++)
printf("%f ", pmat->data[i]);
printf("\n");
}
void free_cumatrix(CUMATRIX *pmat)
{
if(pmat->dims != NULL)
free(pmat->dims);
if(pmat->data != NULL)
free(pmat->data);
}
CUMATRIX alloc_device_cumatrix(CUMATRIX mat_host)
{
CUMATRIX mat_device;
int dims_num = mat_host.dims_num;
int length = mat_host.length;
/* allocate device memory */
mat_device.dims_num = dims_num;
cutilSafeCall(hipMalloc((void**)&(mat_device.dims), sizeof(int)*dims_num));
mat_device.length = length;
cutilSafeCall(hipMalloc((void**)&(mat_device.data), sizeof(float)*length));
/* copy host memory to device */
cutilSafeCall(hipMemcpy(mat_device.dims, mat_host.dims, sizeof(int)*dims_num, hipMemcpyHostToDevice) );
cutilSafeCall(hipMemcpy(mat_device.data, mat_host.data, sizeof(float)*length, hipMemcpyHostToDevice) );
return mat_device;
}
void free_device_cumatrix(CUMATRIX *pmat)
{
cutilSafeCall(hipFree(pmat->dims));
cutilSafeCall(hipFree(pmat->data));
}
/* pad 3d matrix */
CUMATRIX pad_3d_maxtrix(CUMATRIX A, int padx, int pady)
{
int x, y, z, nx, ny, nz;
CUMATRIX B;
nx = A.dims[1];
ny = A.dims[0];
nz = A.dims[2];
B.dims_num = 3;
B.dims = (int*)malloc(sizeof(int)*3);
B.dims[0] = ny+2*pady;
B.dims[1] = nx+2*padx;
B.dims[2] = nz;
B.length = B.dims[0] * B.dims[1] * B.dims[2];
B.data = (float*)malloc(sizeof(float)*B.length);
memset(B.data, 0, sizeof(float)*B.length);
for(z = 0; z < B.dims[2]; z++)
{
for(x = padx; x < B.dims[1]-padx; x++)
{
for(y = pady; y < B.dims[0]-pady; y++)
B.data[z*B.dims[0]*B.dims[1] + x*B.dims[0] + y] = A.data[z*nx*ny + (x-padx)*ny + y-pady];
}
}
return B;
}
/* copy matrix */
CUMATRIX copy_cumatrix(CUMATRIX A)
{
CUMATRIX B;
B.dims_num = A.dims_num;
B.dims = (int*)malloc(sizeof(int)*B.dims_num);
memcpy(B.dims, A.dims, sizeof(int)*B.dims_num);
B.length = A.length;
B.data = (float*)malloc(sizeof(float)*B.length);
memcpy(B.data, A.data, sizeof(float)*B.length);
return B;
}
| 8931e32bad7516f68f453f6ec18e0ba02d3e500a.cu | /*
float matrix implementation
author: Yu Xiang
Date: 04/10/2011
*/
extern "C"
{
#include "matrix.h"
}
#include <cuda.h>
#if CUDA_VERSION>=5000
#include <helper_cuda.h>
#define cutilSafeCall checkCudaErrors
#else
#include "cutil_inline.h"
#endif
CUMATRIX read_cumatrix(FILE *fp)
{
int i;
CUMATRIX matrix;
/* initialization */
matrix.dims_num = 0;
matrix.dims = NULL;
matrix.length = 0;
matrix.data = NULL;
/* read dimension */
if(fscanf(fp, "%d", &matrix.dims_num) != 1)
{
printf("fscanf failed\n");
return matrix;
}
/* allocate dims */
matrix.dims = (int*)malloc(sizeof(int)*matrix.dims_num);
matrix.length = 1;
for(i = 0; i < matrix.dims_num; i++)
{
if(fscanf(fp, "%d", matrix.dims+i) != 1)
{
printf("fscanf failed\n");
return matrix;
}
matrix.length *= matrix.dims[i];
}
/* allocate data */
matrix.data = (float*)malloc(sizeof(float)*matrix.length);
for(i = 0; i < matrix.length; i++)
{
if(fscanf(fp, "%f", matrix.data+i) != 1)
{
printf("fscanf failed\n");
return matrix;
}
}
return matrix;
}
void write_cumatrix(CUMATRIX *pmat, FILE *fp)
{
int i;
fprintf(fp, "%d\n", pmat->dims_num);
for(i = 0; i < pmat->dims_num; i++)
fprintf(fp, "%d ", pmat->dims[i]);
fprintf(fp, "\n");
for(i = 0; i < pmat->length; i++)
fprintf(fp, "%.12f ", pmat->data[i]);
}
void print_cumatrix(CUMATRIX *pmat)
{
int i;
printf("dims_num = %d\n", pmat->dims_num);
for(i = 0; i < pmat->dims_num; i++)
printf("%d ", pmat->dims[i]);
printf("\n");
for(i = 0; i < pmat->length; i++)
printf("%f ", pmat->data[i]);
printf("\n");
}
void free_cumatrix(CUMATRIX *pmat)
{
if(pmat->dims != NULL)
free(pmat->dims);
if(pmat->data != NULL)
free(pmat->data);
}
CUMATRIX alloc_device_cumatrix(CUMATRIX mat_host)
{
CUMATRIX mat_device;
int dims_num = mat_host.dims_num;
int length = mat_host.length;
/* allocate device memory */
mat_device.dims_num = dims_num;
cutilSafeCall(cudaMalloc((void**)&(mat_device.dims), sizeof(int)*dims_num));
mat_device.length = length;
cutilSafeCall(cudaMalloc((void**)&(mat_device.data), sizeof(float)*length));
/* copy host memory to device */
cutilSafeCall(cudaMemcpy(mat_device.dims, mat_host.dims, sizeof(int)*dims_num, cudaMemcpyHostToDevice) );
cutilSafeCall(cudaMemcpy(mat_device.data, mat_host.data, sizeof(float)*length, cudaMemcpyHostToDevice) );
return mat_device;
}
void free_device_cumatrix(CUMATRIX *pmat)
{
cutilSafeCall(cudaFree(pmat->dims));
cutilSafeCall(cudaFree(pmat->data));
}
/* pad 3d matrix */
CUMATRIX pad_3d_maxtrix(CUMATRIX A, int padx, int pady)
{
int x, y, z, nx, ny, nz;
CUMATRIX B;
nx = A.dims[1];
ny = A.dims[0];
nz = A.dims[2];
B.dims_num = 3;
B.dims = (int*)malloc(sizeof(int)*3);
B.dims[0] = ny+2*pady;
B.dims[1] = nx+2*padx;
B.dims[2] = nz;
B.length = B.dims[0] * B.dims[1] * B.dims[2];
B.data = (float*)malloc(sizeof(float)*B.length);
memset(B.data, 0, sizeof(float)*B.length);
for(z = 0; z < B.dims[2]; z++)
{
for(x = padx; x < B.dims[1]-padx; x++)
{
for(y = pady; y < B.dims[0]-pady; y++)
B.data[z*B.dims[0]*B.dims[1] + x*B.dims[0] + y] = A.data[z*nx*ny + (x-padx)*ny + y-pady];
}
}
return B;
}
/* copy matrix */
CUMATRIX copy_cumatrix(CUMATRIX A)
{
CUMATRIX B;
B.dims_num = A.dims_num;
B.dims = (int*)malloc(sizeof(int)*B.dims_num);
memcpy(B.dims, A.dims, sizeof(int)*B.dims_num);
B.length = A.length;
B.data = (float*)malloc(sizeof(float)*B.length);
memcpy(B.data, A.data, sizeof(float)*B.length);
return B;
}
|
e3815bb34bade78726ea6b9c57b7abdb12a8a875.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__device__ static void TRN(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number)
{
//
float C=40;
float k=0.25;
float vr=-65;
float vt=-45;
float G_up=5;
float G_down=5;
float a=0.015;
float b=10;
float c=-55;
float d=50;
float v_peak=0;
float I;
float I_distal;
float I_proximal;
float v=neuro[number].v;
float u=neuro[number].u;
I=Ix[number].I;
//Izhikevich model
if(v>-65){b=2;}else{b=10;}
v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C;
u=u+tau*a*(b*(v-vr)-u);
spike[number]=0;
if(v>v_peak)
{
v=c;
u=u+d;
spike[number]=1;
}
neuro[number].v=v;
neuro[number].u=u;
Ix[number].I=0;
}
__global__ static void TRN_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM)
{
const int TRNd = threadIdx.x;
const int bid = blockIdx.x;
int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1]+THREAD_NUM[2]*BLOCK_NUM[2]+THREAD_NUM[3]*BLOCK_NUM[3])*10+(bid * THREAD_NUM[4] + TRNd)*10;
/*****************/
if((number+0)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+0);}
/****************/
if((number+1)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+1);}
/****************/
if((number+2)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+2);}
/*****************/
if((number+3)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+3);}
/*****************/
if((number+4)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+4);}
/*****************/
if((number+5)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+5);}
/****************/
if((number+6)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+6);}
/*****************/
if((number+7)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+7);}
/*****************/
if((number+8)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+8);}
/*****************/
if((number+9)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+9);}
}
| e3815bb34bade78726ea6b9c57b7abdb12a8a875.cu | #include "cuda_runtime.h"
#include <stdio.h>
__device__ static void TRN(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int number)
{
//设置神经元计算参数
float C=40;
float k=0.25;
float vr=-65;
float vt=-45;
float G_up=5;
float G_down=5;
float a=0.015;
float b=10;
float c=-55;
float d=50;
float v_peak=0;
float I;
float I_distal;
float I_proximal;
float v=neuro[number].v;
float u=neuro[number].u;
I=Ix[number].I;
//Izhikevich model
if(v>-65){b=2;}else{b=10;}
v=v+tau*(k*(v-vr)*(v-vt)-u+I)/C;
u=u+tau*a*(b*(v-vr)-u);
spike[number]=0;
if(v>v_peak)
{
v=c;
u=u+d;
spike[number]=1;
}
neuro[number].v=v;
neuro[number].u=u;
Ix[number].I=0;
}
__global__ static void TRN_neuron(int *input,struct axon *neuro, unsigned char *spike,struct neuron_I *Ix, int *boxnum, int *THREAD_NUM, int *BLOCK_NUM)
{
const int TRNd = threadIdx.x;
const int bid = blockIdx.x;
int number=(THREAD_NUM[0]*BLOCK_NUM[0]+THREAD_NUM[1]*BLOCK_NUM[1]+THREAD_NUM[2]*BLOCK_NUM[2]+THREAD_NUM[3]*BLOCK_NUM[3])*10+(bid * THREAD_NUM[4] + TRNd)*10;
/********第一个神经元虚拟计算内核*********/
if((number+0)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+0);}
/********第二个神经元虚拟计算内核********/
if((number+1)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+1);}
/********第三个神经元虚拟计算内核********/
if((number+2)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+2);}
/********第四个神经元虚拟计算内核*********/
if((number+3)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+3);}
/********第五个神经元虚拟计算内核*********/
if((number+4)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+4);}
/********第六个神经元虚拟计算内核*********/
if((number+5)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+5);}
/********第七个神经元虚拟计算内核********/
if((number+6)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+6);}
/********第八个神经元虚拟计算内核*********/
if((number+7)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+7);}
/********第九个神经元虚拟计算内核*********/
if((number+8)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+8);}
/********第十个神经元虚拟计算内核*********/
if((number+9)<=boxnum[2])
{TRN(input,neuro,spike,Ix,number+9);}
}
|
474141752fb98890bbc7d04b7fef4d5cc1a77043.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @brief
* test_utils
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <cstdlib>
#include <vector>
#include "k2/csrc/fsa.h"
#include "k2/csrc/host/fsa_util.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/nvtx.h"
#include "k2/csrc/test_utils.h"
namespace k2 {
void ToNotTopSorted(Fsa *fsa) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsa->Context()->GetDeviceType(), kCpu);
int32_t num_states = fsa->TotSize(0);
std::vector<int32_t> order(num_states);
std::iota(order.begin(), order.end(), 0);
std::random_shuffle(order.begin() + 1, order.end() - 1);
Array1<Arc> &arcs = fsa->values;
Arc *arcs_data = arcs.Data();
int32_t num_arcs = arcs.Dim();
for (int32_t i = 0; i != num_arcs; ++i) {
int32_t src_state = arcs_data[i].src_state;
int32_t dest_state = arcs_data[i].dest_state;
arcs_data[i].src_state = order[src_state];
arcs_data[i].dest_state = order[dest_state];
}
auto lambda_comp = [](const Arc &a, const Arc &b) -> bool {
return a.src_state < b.src_state;
};
std::sort(arcs_data, arcs_data + num_arcs, lambda_comp);
for (int32_t i = 0; i != num_arcs; ++i) {
arcs_data[i].score = i;
}
bool error = true;
*fsa = FsaFromArray1(arcs, &error);
K2_CHECK(!error);
}
Fsa GetRandFsa() {
NVTX_RANGE(K2_FUNC);
k2host::RandFsaOptions opts;
opts.num_syms = 5 + RandInt(0, 100);
opts.num_states = 10 + RandInt(0, 2000);
opts.num_arcs = opts.num_states * 4 + RandInt(0, 100);
opts.allow_empty = false;
opts.acyclic = true;
k2host::RandFsaGenerator generator(opts);
k2host::Array2Size<int32_t> fsa_size;
generator.GetSizes(&fsa_size);
FsaCreator creator(fsa_size);
k2host::Fsa host_fsa = creator.GetHostFsa();
generator.GetOutput(&host_fsa);
Fsa ans = creator.GetFsa();
ToNotTopSorted(&ans);
return ans;
}
Array1<int32_t> GenerateRandomIndexes(ContextPtr context, bool allow_minus_one,
int32_t dim, int32_t max_value) {
std::vector<int32_t> indexes(dim);
int32_t start = allow_minus_one ? -1 : 0;
for (int32_t &i : indexes) {
int32_t tmp = RandInt(-max_value, max_value);
i = ::max(tmp, start);
}
return Array1<int32_t>(context, indexes);
}
} // namespace k2
| 474141752fb98890bbc7d04b7fef4d5cc1a77043.cu | /**
* @brief
* test_utils
*
* @copyright
* Copyright (c) 2020 Mobvoi Inc. (authors: Fangjun Kuang)
*
* @copyright
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <cstdlib>
#include <vector>
#include "k2/csrc/fsa.h"
#include "k2/csrc/host/fsa_util.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/math.h"
#include "k2/csrc/nvtx.h"
#include "k2/csrc/test_utils.h"
namespace k2 {
void ToNotTopSorted(Fsa *fsa) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsa->Context()->GetDeviceType(), kCpu);
int32_t num_states = fsa->TotSize(0);
std::vector<int32_t> order(num_states);
std::iota(order.begin(), order.end(), 0);
std::random_shuffle(order.begin() + 1, order.end() - 1);
Array1<Arc> &arcs = fsa->values;
Arc *arcs_data = arcs.Data();
int32_t num_arcs = arcs.Dim();
for (int32_t i = 0; i != num_arcs; ++i) {
int32_t src_state = arcs_data[i].src_state;
int32_t dest_state = arcs_data[i].dest_state;
arcs_data[i].src_state = order[src_state];
arcs_data[i].dest_state = order[dest_state];
}
auto lambda_comp = [](const Arc &a, const Arc &b) -> bool {
return a.src_state < b.src_state;
};
std::sort(arcs_data, arcs_data + num_arcs, lambda_comp);
for (int32_t i = 0; i != num_arcs; ++i) {
arcs_data[i].score = i;
}
bool error = true;
*fsa = FsaFromArray1(arcs, &error);
K2_CHECK(!error);
}
Fsa GetRandFsa() {
NVTX_RANGE(K2_FUNC);
k2host::RandFsaOptions opts;
opts.num_syms = 5 + RandInt(0, 100);
opts.num_states = 10 + RandInt(0, 2000);
opts.num_arcs = opts.num_states * 4 + RandInt(0, 100);
opts.allow_empty = false;
opts.acyclic = true;
k2host::RandFsaGenerator generator(opts);
k2host::Array2Size<int32_t> fsa_size;
generator.GetSizes(&fsa_size);
FsaCreator creator(fsa_size);
k2host::Fsa host_fsa = creator.GetHostFsa();
generator.GetOutput(&host_fsa);
Fsa ans = creator.GetFsa();
ToNotTopSorted(&ans);
return ans;
}
Array1<int32_t> GenerateRandomIndexes(ContextPtr context, bool allow_minus_one,
int32_t dim, int32_t max_value) {
std::vector<int32_t> indexes(dim);
int32_t start = allow_minus_one ? -1 : 0;
for (int32_t &i : indexes) {
int32_t tmp = RandInt(-max_value, max_value);
i = std::max(tmp, start);
}
return Array1<int32_t>(context, indexes);
}
} // namespace k2
|
pool_switches.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// NOTE: the meanings of x/y here are switched.
// Code assumes dimensions are x, y, channels, samples.
__global__ void pool_switches
(unsigned int* idx,
const float* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ; // offset by channel/sample
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
unsigned int bestIdx = y1 * width + x1 ;
float value, bestValue = data[bestIdx] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
value = data[y * width + x] ;
if (value > bestValue) {
bestValue = value ;
bestIdx = y * width + x ;
}
}
}
// return best index. must add the channel/sample offset, plus 1 for one-based indexes
idx[pooledIndex] = bestIdx + pz * (width*height) + 1 ;
}
}
| pool_switches.cu |
// NOTE: the meanings of x/y here are switched.
// Code assumes dimensions are x, y, channels, samples.
__global__ void pool_switches
(unsigned int* idx,
const float* data,
const int pooledWidth,
const int pooledHeight,
const int pooledVolume,
const int width,
const int height,
const int poolWidth,
const int poolHeight,
const int strideX,
const int strideY,
const int padLeft,
const int padTop)
{
int pooledIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (pooledIndex < pooledVolume) {
int px = pooledIndex ;
int py = px / pooledWidth ;
int pz = py / pooledHeight ;
px %= pooledWidth ;
py %= pooledHeight ;
data += pz * (width*height) ; // offset by channel/sample
int x1 = px * strideX - padLeft ;
int y1 = py * strideY - padTop ;
int x2 = min(x1 + poolWidth, width) ;
int y2 = min(y1 + poolHeight, height) ;
x1 = max(x1, 0) ;
y1 = max(y1, 0) ;
unsigned int bestIdx = y1 * width + x1 ;
float value, bestValue = data[bestIdx] ;
for (int y = y1 ; y < y2 ; ++y) {
for (int x = x1 ; x < x2 ; ++x) {
value = data[y * width + x] ;
if (value > bestValue) {
bestValue = value ;
bestIdx = y * width + x ;
}
}
}
// return best index. must add the channel/sample offset, plus 1 for one-based indexes
idx[pooledIndex] = bestIdx + pz * (width*height) + 1 ;
}
}
|
a69b2a7128c8ca9428a7da512b34690542392f19.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Lars Janssen 12882712, Aron de Ruijter
Implementation of a wave equation simulation, parallelized on the GPU using
CUDA.
*/
#include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "simulate.hh"
using namespace std;
/* Utility function, use to do error checking for CUDA calls
Use this function like this:
checkCudaCall(<cuda_call>);
For example:
checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
Special case to check the result of the last kernel invocation:
kernel<<<...>>>(...);
checkCudaCall(hipGetLastError());
*/
static void checkCudaCall(hipError_t result)
{
if (result != hipSuccess)
{
cerr << "cuda error: " << hipGetErrorString(result) << endl;
exit(EXIT_FAILURE);
}
}
/*
This is the kernel, which runs on the GPU. It checks if the point we want
to change is not the first or last one, as they stay 0. Then we update it
and change the old and current array accordingly.
*/
__global__ void WaveKernel(double* old_GPU, double* current_GPU,
double* next_GPU, int i_max)
{
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
if(index > 0 && index < i_max - 1)
{
next_GPU[index] = 2 * current_GPU[index] - old_GPU[index] + 0.15 *
(current_GPU[index-1] - (2 * current_GPU[index] - current_GPU[index+1]));
old_GPU[index] = current_GPU[index];
current_GPU[index] = next_GPU[index];
}
}
/*
This function that prepares and copies data to the GPU, runs the kernel, and then
copies the result back.
*/
void WaveCuda(double* old_array, double* current_array, double* next_array,
int block_size, const long t_max, int i_max)
{
/*
This allocates the vectors on the GPU, each time checking if we were
successfull.
*/
double* old_GPU = NULL;
checkCudaCall(hipMalloc((void **) &old_GPU, i_max * sizeof(double)));
if (old_GPU == NULL)
{
cerr << "Could not allocate the old array on GPU." << endl;
return;
}
double* current_GPU = NULL;
checkCudaCall(hipMalloc((void **) ¤t_GPU, i_max * sizeof(double)));
if (current_GPU == NULL)
{
checkCudaCall(hipFree(old_GPU));
cerr << "Could not allocate the current array on GPU." << endl;
return;
}
double* next_GPU = NULL;
checkCudaCall(hipMalloc((void **) &next_GPU, i_max * sizeof(double)));
if (next_GPU == NULL)
{
checkCudaCall(hipFree(old_GPU));
checkCudaCall(hipFree(current_GPU));
cerr << "Could not allocate the next array on GPU." << endl;
return;
}
/*
This copies the old array and the current array to the GPU.
*/
checkCudaCall(hipMemcpy(old_GPU, old_array, i_max * sizeof(double),
hipMemcpyHostToDevice));
checkCudaCall(hipMemcpy(current_GPU, current_array, i_max * sizeof(double),
hipMemcpyHostToDevice));
/*
This executes the wave kernel for every timestep.
*/
for (int t = 0; t < t_max; t++)
{
hipLaunchKernelGGL(( WaveKernel), dim3(i_max/block_size + 1), dim3(block_size), 0, 0, old_GPU, current_GPU,
next_GPU, i_max);
/*
This checks whether the kernel invocation was succesful.
*/
checkCudaCall(hipGetLastError());
}
/*
This copies the result back to the host
*/
checkCudaCall(hipMemcpy(current_array, current_GPU, i_max * sizeof(double),
hipMemcpyDeviceToHost));
/*
This cleans up the data on the GPU.
*/
checkCudaCall(hipFree(old_GPU));
checkCudaCall(hipFree(current_GPU));
checkCudaCall(hipFree(next_GPU));
}
/*
Function that will simulate the wave equation, parallelized using CUDA.
i_max: how many data points are on a single wave
t_max: how many iterations the simulation should run
num_threads: how many threads to use (excluding the main threads)
old_array: array of size i_max filled with data for t-1
current_array: array of size i_max filled with data for t
next_array: array of size i_max. You should fill this with t+1
*/
double *simulate(const long i_max, const long t_max, const long block_size,
double *old_array, double *current_array, double *next_array)
{
WaveCuda(old_array, current_array, next_array, block_size, t_max, i_max);
return current_array;
}
| a69b2a7128c8ca9428a7da512b34690542392f19.cu | /*
Lars Janssen 12882712, Aron de Ruijter
Implementation of a wave equation simulation, parallelized on the GPU using
CUDA.
*/
#include <cstdlib>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "simulate.hh"
using namespace std;
/* Utility function, use to do error checking for CUDA calls
Use this function like this:
checkCudaCall(<cuda_call>);
For example:
checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t)));
Special case to check the result of the last kernel invocation:
kernel<<<...>>>(...);
checkCudaCall(cudaGetLastError());
*/
static void checkCudaCall(cudaError_t result)
{
if (result != cudaSuccess)
{
cerr << "cuda error: " << cudaGetErrorString(result) << endl;
exit(EXIT_FAILURE);
}
}
/*
This is the kernel, which runs on the GPU. It checks if the point we want
to change is not the first or last one, as they stay 0. Then we update it
and change the old and current array accordingly.
*/
__global__ void WaveKernel(double* old_GPU, double* current_GPU,
double* next_GPU, int i_max)
{
unsigned index = blockIdx.x * blockDim.x + threadIdx.x;
if(index > 0 && index < i_max - 1)
{
next_GPU[index] = 2 * current_GPU[index] - old_GPU[index] + 0.15 *
(current_GPU[index-1] - (2 * current_GPU[index] - current_GPU[index+1]));
old_GPU[index] = current_GPU[index];
current_GPU[index] = next_GPU[index];
}
}
/*
This function that prepares and copies data to the GPU, runs the kernel, and then
copies the result back.
*/
void WaveCuda(double* old_array, double* current_array, double* next_array,
int block_size, const long t_max, int i_max)
{
/*
This allocates the vectors on the GPU, each time checking if we were
successfull.
*/
double* old_GPU = NULL;
checkCudaCall(cudaMalloc((void **) &old_GPU, i_max * sizeof(double)));
if (old_GPU == NULL)
{
cerr << "Could not allocate the old array on GPU." << endl;
return;
}
double* current_GPU = NULL;
checkCudaCall(cudaMalloc((void **) ¤t_GPU, i_max * sizeof(double)));
if (current_GPU == NULL)
{
checkCudaCall(cudaFree(old_GPU));
cerr << "Could not allocate the current array on GPU." << endl;
return;
}
double* next_GPU = NULL;
checkCudaCall(cudaMalloc((void **) &next_GPU, i_max * sizeof(double)));
if (next_GPU == NULL)
{
checkCudaCall(cudaFree(old_GPU));
checkCudaCall(cudaFree(current_GPU));
cerr << "Could not allocate the next array on GPU." << endl;
return;
}
/*
This copies the old array and the current array to the GPU.
*/
checkCudaCall(cudaMemcpy(old_GPU, old_array, i_max * sizeof(double),
cudaMemcpyHostToDevice));
checkCudaCall(cudaMemcpy(current_GPU, current_array, i_max * sizeof(double),
cudaMemcpyHostToDevice));
/*
This executes the wave kernel for every timestep.
*/
for (int t = 0; t < t_max; t++)
{
WaveKernel<<<i_max/block_size + 1, block_size>>>(old_GPU, current_GPU,
next_GPU, i_max);
/*
This checks whether the kernel invocation was succesful.
*/
checkCudaCall(cudaGetLastError());
}
/*
This copies the result back to the host
*/
checkCudaCall(cudaMemcpy(current_array, current_GPU, i_max * sizeof(double),
cudaMemcpyDeviceToHost));
/*
This cleans up the data on the GPU.
*/
checkCudaCall(cudaFree(old_GPU));
checkCudaCall(cudaFree(current_GPU));
checkCudaCall(cudaFree(next_GPU));
}
/*
Function that will simulate the wave equation, parallelized using CUDA.
i_max: how many data points are on a single wave
t_max: how many iterations the simulation should run
num_threads: how many threads to use (excluding the main threads)
old_array: array of size i_max filled with data for t-1
current_array: array of size i_max filled with data for t
next_array: array of size i_max. You should fill this with t+1
*/
double *simulate(const long i_max, const long t_max, const long block_size,
double *old_array, double *current_array, double *next_array)
{
WaveCuda(old_array, current_array, next_array, block_size, t_max, i_max);
return current_array;
}
|
37562c952bb44fb397455b1fb249d6761f7d0da4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gpu_img_transform.cuh"
__global__ void transform_img(const unsigned char *input, unsigned char* output,
std::size_t nb_cols, std::size_t nb_rows,
const char *conv_mat, ConvolutionMatrixProperties *conv_mat_properties)
{
long ith_col = blockIdx.x * blockDim.x + threadIdx.x;
long jth_row = blockIdx.y * blockDim.y + threadIdx.y;
if (ith_col + conv_mat_properties->start_index >= 0 && ith_col < nb_cols + conv_mat_properties->start_index &&
jth_row + conv_mat_properties->start_index >= 0 && jth_row < nb_rows + conv_mat_properties->start_index)
{
long j_local = jth_row + conv_mat_properties->start_index;
long i_local;
long i_max = ith_col + conv_mat_properties->start_index + conv_mat_properties->size;
long j_max = j_local + conv_mat_properties->size;
long rgb[3] = {0, 0, 0};
for( long j_inc = 0; j_local < j_max; j_local++){
i_local = i_max - conv_mat_properties->size;
long index = 3 * (j_local * (long)nb_cols + i_local);
for( ; i_local < i_max; i_local++, j_inc++ ){
rgb[0] += conv_mat[j_inc] * input[ index ];
rgb[1] += conv_mat[j_inc] * input[ index + 1 ];
rgb[2] += conv_mat[j_inc] * input[ index + 2 ];
index += 3;
}
}
for( long i = 0, j = 3 * (jth_row * (long)nb_cols + ith_col); i < 3; i++, j++)
output[j] = rgb[i] / conv_mat_properties->divisor;
}
}
__global__ void transform_img_shared(const unsigned char *input, unsigned char* output,
std::size_t nb_cols_global, std::size_t nb_rows_global,
const char *conv_mat, ConvolutionMatrixProperties *conv_prop)
{
extern __shared__ unsigned char sh[];
long ith_col_global = blockIdx.x * (blockDim.x + conv_prop->start_index - 1) + threadIdx.x;
long jth_row_global = blockIdx.y * (blockDim.y + conv_prop->start_index - 1) + threadIdx.y;
long ith_col = threadIdx.x;
long jth_row = threadIdx.y;
long nb_rows = blockDim.y;
long nb_cols = blockDim.x;
if (ith_col_global < nb_cols_global && jth_row_global < nb_rows_global)
{
long index = 3 * (jth_row * nb_cols + ith_col);
long index_global = 3 * (jth_row_global * (long)nb_cols_global + (long)ith_col_global);
sh[index] = input[index_global ];
sh[index + 1] = input[index_global + 1];
sh[index + 2] = input[index_global + 2];
}
__syncthreads();
if ( ( ith_col_global > 0 && ith_col_global < nb_cols_global ) &&
( jth_row_global > 0 && jth_row_global < nb_rows_global) &&
( ith_col + conv_prop->start_index >= 0 && ith_col < nb_cols + conv_prop->start_index ) &&
( jth_row + conv_prop->start_index >= 0 && jth_row < nb_rows + conv_prop->start_index) )
{
long j_local = jth_row + conv_prop->start_index;
long i_local;
long i_max = ith_col + conv_prop->start_index + conv_prop->size;
long j_max = j_local + conv_prop->size;
long rgb[3] = {0, 0, 0};
for( long j_inc = 0; j_local < j_max; j_local++){
i_local = i_max - conv_prop->size;
long index = 3 * (j_local * nb_cols + i_local);
for( ; i_local < i_max; i_local++, j_inc++ ){
rgb[0] += conv_mat[j_inc] * sh[ index ];
rgb[1] += conv_mat[j_inc] * sh[ index + 1 ];
rgb[2] += conv_mat[j_inc] * sh[ index + 2 ];
index += 3;
}
}
for( long i = 0, j = 3 * (jth_row_global * (long)nb_cols_global + ith_col_global); i < 3; i++, j++)
output[j] = rgb[i] / conv_prop->divisor;
}
}
void GpuImgTransform::initMemory(cv::Mat &m_in, Pointers &dev, Pointers &host, long size, int conv_mat_length){
hipHostMalloc(&host.rgb.in, size);
std::memcpy(host.rgb.in, m_in.data, size);
hipMalloc(&dev.rgb.in, size);
hipMalloc(&dev.rgb.out, size);
hipMalloc(&dev.convolution.matrix, conv_mat_length * sizeof(char));
hipMalloc(&dev.convolution.prop, sizeof(ConvolutionMatrixProperties));
hipMemcpy(dev.rgb.in, host.rgb.in, size, hipMemcpyHostToDevice);
hipMemcpy(dev.convolution.matrix, host.convolution.matrix, conv_mat_length, hipMemcpyHostToDevice);
hipMemcpy(dev.convolution.prop , host.convolution.prop, sizeof(ConvolutionMatrixProperties), hipMemcpyHostToDevice);
}
void GpuImgTransform::freeMemory(Pointers &dev, Pointers &host){
hipFree(dev.rgb.in);
hipFree(dev.rgb.out);
hipFree(dev.convolution.matrix);
hipFree(dev.convolution.prop);
hipHostFree(host.rgb.in);
}
int GpuImgTransform::execute(cv::Mat &m_in, cv::Mat &m_out, GpuUtilExecutionInfo &info)
{
auto rows = m_in.rows;
auto cols = m_in.cols;
Pointers dev;
Pointers host;
host.convolution.prop = &info.conv_properties;
host.convolution.matrix = info.conv_matrix;
int size = 3 * rows * cols;
int conv_mat_length = info.conv_properties.size * info.conv_properties.size;
initMemory(m_in, dev, host, size, conv_mat_length);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Mesure du temps de calcul du kernel uniquement.
hipEventRecord(start);
dim3 grid0((cols - 1) / info.block.x + 1, (rows - 1) / info.block.y + 1);
hipLaunchKernelGGL(( transform_img), dim3(grid0), dim3(info.block) , 0, 0, dev.rgb.in, dev.rgb.out, cols, rows, dev.convolution.matrix,
dev.convolution.prop);
for( int kth_pass = 1; kth_pass < info.nb_pass; kth_pass++){
swapPointers(&dev.rgb.in, &dev.rgb.out);
hipLaunchKernelGGL(( transform_img), dim3(grid0), dim3(info.block) , 0, 0, dev.rgb.in, dev.rgb.out, cols, rows, dev.convolution.matrix,
dev.convolution.prop);
}
hipDeviceSynchronize();
hipEventRecord(stop);
hipMemcpy(m_out.data, dev.rgb.out, size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float duration;
hipEventElapsedTime(&duration, start, stop);
std::cout << "time=" << duration << " ms" << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
freeMemory(dev, host);
return 0;
}
int GpuImgTransform::executeSharedMemMode(cv::Mat &m_in, cv::Mat &m_out, GpuUtilExecutionInfo &info){
auto rows = m_in.rows;
auto cols = m_in.cols;
Pointers dev;
Pointers host;
host.convolution.prop = &info.conv_properties;
host.convolution.matrix = info.conv_matrix;
int size = 3 * rows * cols;
int conv_mat_length = info.conv_properties.size * info.conv_properties.size;
initMemory(m_in, dev, host, size, conv_mat_length);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Mesure du temps de calcul du kernel uniquement.
hipEventRecord(start);
dim3 grid0((cols - 1) / (info.block.x - 1 + info.conv_properties.start_index) + 1,
(rows - 1) / (info.block.y - 1 + info.conv_properties.start_index) + 1);
hipLaunchKernelGGL(( transform_img_shared), dim3(grid0), dim3(info.block), 3 * info.block.x * info.block.y, 0, dev.rgb.in, dev.rgb.out, cols, rows,
dev.convolution.matrix, dev.convolution.prop);
for( int kth_pass = 1; kth_pass < info.nb_pass; kth_pass++){
swapPointers(&dev.rgb.in, &dev.rgb.out);
hipLaunchKernelGGL(( transform_img_shared), dim3(grid0), dim3(info.block), 3 * info.block.x * info.block.y, 0, dev.rgb.in, dev.rgb.out, cols, rows,
dev.convolution.matrix, dev.convolution.prop);
}
hipDeviceSynchronize();
hipEventRecord(stop);
hipMemcpy(m_out.data, dev.rgb.out, size, hipMemcpyDeviceToHost);
hipEventSynchronize(stop);
float duration;
hipEventElapsedTime(&duration, start, stop);
std::cout << "time=" << duration << " ms" << std::endl;
hipEventDestroy(start);
hipEventDestroy(stop);
freeMemory(dev, host);
return 0;
}
| 37562c952bb44fb397455b1fb249d6761f7d0da4.cu | #include "gpu_img_transform.cuh"
__global__ void transform_img(const unsigned char *input, unsigned char* output,
std::size_t nb_cols, std::size_t nb_rows,
const char *conv_mat, ConvolutionMatrixProperties *conv_mat_properties)
{
long ith_col = blockIdx.x * blockDim.x + threadIdx.x;
long jth_row = blockIdx.y * blockDim.y + threadIdx.y;
if (ith_col + conv_mat_properties->start_index >= 0 && ith_col < nb_cols + conv_mat_properties->start_index &&
jth_row + conv_mat_properties->start_index >= 0 && jth_row < nb_rows + conv_mat_properties->start_index)
{
long j_local = jth_row + conv_mat_properties->start_index;
long i_local;
long i_max = ith_col + conv_mat_properties->start_index + conv_mat_properties->size;
long j_max = j_local + conv_mat_properties->size;
long rgb[3] = {0, 0, 0};
for( long j_inc = 0; j_local < j_max; j_local++){
i_local = i_max - conv_mat_properties->size;
long index = 3 * (j_local * (long)nb_cols + i_local);
for( ; i_local < i_max; i_local++, j_inc++ ){
rgb[0] += conv_mat[j_inc] * input[ index ];
rgb[1] += conv_mat[j_inc] * input[ index + 1 ];
rgb[2] += conv_mat[j_inc] * input[ index + 2 ];
index += 3;
}
}
for( long i = 0, j = 3 * (jth_row * (long)nb_cols + ith_col); i < 3; i++, j++)
output[j] = rgb[i] / conv_mat_properties->divisor;
}
}
__global__ void transform_img_shared(const unsigned char *input, unsigned char* output,
std::size_t nb_cols_global, std::size_t nb_rows_global,
const char *conv_mat, ConvolutionMatrixProperties *conv_prop)
{
extern __shared__ unsigned char sh[];
long ith_col_global = blockIdx.x * (blockDim.x + conv_prop->start_index - 1) + threadIdx.x;
long jth_row_global = blockIdx.y * (blockDim.y + conv_prop->start_index - 1) + threadIdx.y;
long ith_col = threadIdx.x;
long jth_row = threadIdx.y;
long nb_rows = blockDim.y;
long nb_cols = blockDim.x;
if (ith_col_global < nb_cols_global && jth_row_global < nb_rows_global)
{
long index = 3 * (jth_row * nb_cols + ith_col);
long index_global = 3 * (jth_row_global * (long)nb_cols_global + (long)ith_col_global);
sh[index] = input[index_global ];
sh[index + 1] = input[index_global + 1];
sh[index + 2] = input[index_global + 2];
}
__syncthreads();
if ( ( ith_col_global > 0 && ith_col_global < nb_cols_global ) &&
( jth_row_global > 0 && jth_row_global < nb_rows_global) &&
( ith_col + conv_prop->start_index >= 0 && ith_col < nb_cols + conv_prop->start_index ) &&
( jth_row + conv_prop->start_index >= 0 && jth_row < nb_rows + conv_prop->start_index) )
{
long j_local = jth_row + conv_prop->start_index;
long i_local;
long i_max = ith_col + conv_prop->start_index + conv_prop->size;
long j_max = j_local + conv_prop->size;
long rgb[3] = {0, 0, 0};
for( long j_inc = 0; j_local < j_max; j_local++){
i_local = i_max - conv_prop->size;
long index = 3 * (j_local * nb_cols + i_local);
for( ; i_local < i_max; i_local++, j_inc++ ){
rgb[0] += conv_mat[j_inc] * sh[ index ];
rgb[1] += conv_mat[j_inc] * sh[ index + 1 ];
rgb[2] += conv_mat[j_inc] * sh[ index + 2 ];
index += 3;
}
}
for( long i = 0, j = 3 * (jth_row_global * (long)nb_cols_global + ith_col_global); i < 3; i++, j++)
output[j] = rgb[i] / conv_prop->divisor;
}
}
void GpuImgTransform::initMemory(cv::Mat &m_in, Pointers &dev, Pointers &host, long size, int conv_mat_length){
cudaMallocHost(&host.rgb.in, size);
std::memcpy(host.rgb.in, m_in.data, size);
cudaMalloc(&dev.rgb.in, size);
cudaMalloc(&dev.rgb.out, size);
cudaMalloc(&dev.convolution.matrix, conv_mat_length * sizeof(char));
cudaMalloc(&dev.convolution.prop, sizeof(ConvolutionMatrixProperties));
cudaMemcpy(dev.rgb.in, host.rgb.in, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev.convolution.matrix, host.convolution.matrix, conv_mat_length, cudaMemcpyHostToDevice);
cudaMemcpy(dev.convolution.prop , host.convolution.prop, sizeof(ConvolutionMatrixProperties), cudaMemcpyHostToDevice);
}
void GpuImgTransform::freeMemory(Pointers &dev, Pointers &host){
cudaFree(dev.rgb.in);
cudaFree(dev.rgb.out);
cudaFree(dev.convolution.matrix);
cudaFree(dev.convolution.prop);
cudaFreeHost(host.rgb.in);
}
int GpuImgTransform::execute(cv::Mat &m_in, cv::Mat &m_out, GpuUtilExecutionInfo &info)
{
auto rows = m_in.rows;
auto cols = m_in.cols;
Pointers dev;
Pointers host;
host.convolution.prop = &info.conv_properties;
host.convolution.matrix = info.conv_matrix;
int size = 3 * rows * cols;
int conv_mat_length = info.conv_properties.size * info.conv_properties.size;
initMemory(m_in, dev, host, size, conv_mat_length);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Mesure du temps de calcul du kernel uniquement.
cudaEventRecord(start);
dim3 grid0((cols - 1) / info.block.x + 1, (rows - 1) / info.block.y + 1);
transform_img<<< grid0, info.block >>>(dev.rgb.in, dev.rgb.out, cols, rows, dev.convolution.matrix,
dev.convolution.prop);
for( int kth_pass = 1; kth_pass < info.nb_pass; kth_pass++){
swapPointers(&dev.rgb.in, &dev.rgb.out);
transform_img<<< grid0, info.block >>>(dev.rgb.in, dev.rgb.out, cols, rows, dev.convolution.matrix,
dev.convolution.prop);
}
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(m_out.data, dev.rgb.out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float duration;
cudaEventElapsedTime(&duration, start, stop);
std::cout << "time=" << duration << " ms" << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
freeMemory(dev, host);
return 0;
}
int GpuImgTransform::executeSharedMemMode(cv::Mat &m_in, cv::Mat &m_out, GpuUtilExecutionInfo &info){
auto rows = m_in.rows;
auto cols = m_in.cols;
Pointers dev;
Pointers host;
host.convolution.prop = &info.conv_properties;
host.convolution.matrix = info.conv_matrix;
int size = 3 * rows * cols;
int conv_mat_length = info.conv_properties.size * info.conv_properties.size;
initMemory(m_in, dev, host, size, conv_mat_length);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Mesure du temps de calcul du kernel uniquement.
cudaEventRecord(start);
dim3 grid0((cols - 1) / (info.block.x - 1 + info.conv_properties.start_index) + 1,
(rows - 1) / (info.block.y - 1 + info.conv_properties.start_index) + 1);
transform_img_shared<<<grid0, info.block, 3 * info.block.x * info.block.y>>>(dev.rgb.in, dev.rgb.out, cols, rows,
dev.convolution.matrix, dev.convolution.prop);
for( int kth_pass = 1; kth_pass < info.nb_pass; kth_pass++){
swapPointers(&dev.rgb.in, &dev.rgb.out);
transform_img_shared<<<grid0, info.block, 3 * info.block.x * info.block.y>>>(dev.rgb.in, dev.rgb.out, cols, rows,
dev.convolution.matrix, dev.convolution.prop);
}
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(m_out.data, dev.rgb.out, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float duration;
cudaEventElapsedTime(&duration, start, stop);
std::cout << "time=" << duration << " ms" << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
freeMemory(dev, host);
return 0;
}
|
09ad49b6341e919d1d29d82e176928ac5a6015e2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
#include <math.h>
#include "clusteringSub.h"
using namespace std;
__global__ void clusteringSub(float **var, int **intVar){
int nfib = *intVar[6];
int *cluster = intVar[35];
int *clusterAccess = intVar[36];
int m, n, ind, N;
int m_start, limit, dum;
bool combine;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
dum = nfib / (blockDim.x*gridDim.x);
m_start = tid*dum;
limit = (tid + 1)*dum;
combine = false;
for (m = m_start; m < limit; m++){
if(clusterAccess[m] == 1){
N = 0;
for (ind = 0; ind < nfib; ind++){
N += cluster[m*nfib + ind];
}
if (N == 1){
clusterAccess[m] = 0;
}
}
}
for (m = m_start; m < limit; m++){
if (clusterAccess[m] == 1){
for (n = m_start; n < limit; n++){
if ((clusterAccess[n] == 1) && (m != n)){
for (ind = 0; ind < nfib; ind++){
if ((cluster[n*nfib + ind] == 1) && (cluster[m*nfib + ind] == cluster[n*nfib + ind])){
combine = true;
clusterAccess[n] = 0;
break;
}
}
if (combine){
cluster[m*nfib + n] = 1;
for (ind = m_start; ind < nfib; ind++){
if (cluster[n*nfib + ind] == 1){
cluster[m*nfib + ind] = 1;
}
}
n = m_start;
combine = false;
}
}
}
}
}
}
| 09ad49b6341e919d1d29d82e176928ac5a6015e2.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include "clusteringSub.h"
using namespace std;
__global__ void clusteringSub(float **var, int **intVar){
int nfib = *intVar[6];
int *cluster = intVar[35];
int *clusterAccess = intVar[36];
int m, n, ind, N;
int m_start, limit, dum;
bool combine;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
dum = nfib / (blockDim.x*gridDim.x);
m_start = tid*dum;
limit = (tid + 1)*dum;
combine = false;
for (m = m_start; m < limit; m++){
if(clusterAccess[m] == 1){
N = 0;
for (ind = 0; ind < nfib; ind++){
N += cluster[m*nfib + ind];
}
if (N == 1){
clusterAccess[m] = 0;
}
}
}
for (m = m_start; m < limit; m++){
if (clusterAccess[m] == 1){
for (n = m_start; n < limit; n++){
if ((clusterAccess[n] == 1) && (m != n)){
for (ind = 0; ind < nfib; ind++){
if ((cluster[n*nfib + ind] == 1) && (cluster[m*nfib + ind] == cluster[n*nfib + ind])){
combine = true;
clusterAccess[n] = 0;
break;
}
}
if (combine){
cluster[m*nfib + n] = 1;
for (ind = m_start; ind < nfib; ind++){
if (cluster[n*nfib + ind] == 1){
cluster[m*nfib + ind] = 1;
}
}
n = m_start;
combine = false;
}
}
}
}
}
}
|
4193547d1fcafb8db1757fb25e3c88d58b49379b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void updZ(float *z, float *f, float tz, float beta, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float a, b, t;
if (px<nx && py<ny)
{
// compute the gradient
a = 0;
b = 0;
if (px<(nx - 1)) a = f[idx + 1] - f[idx];
if (py<(ny - 1)) b = f[idx + nx] - f[idx];
// update z
a = z[2 * idx + 0] + tz*a;
b = z[2 * idx + 1] + tz*b;
t = sqrtf(beta + a*a + b*b);
t = t<1. ? 1. : 1. / t;
z[2 * idx + 0] = a*t;
z[2 * idx + 1] = b*t;
}
} | 4193547d1fcafb8db1757fb25e3c88d58b49379b.cu | #include "includes.h"
__global__ void updZ(float *z, float *f, float tz, float beta, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float a, b, t;
if (px<nx && py<ny)
{
// compute the gradient
a = 0;
b = 0;
if (px<(nx - 1)) a = f[idx + 1] - f[idx];
if (py<(ny - 1)) b = f[idx + nx] - f[idx];
// update z
a = z[2 * idx + 0] + tz*a;
b = z[2 * idx + 1] + tz*b;
t = sqrtf(beta + a*a + b*b);
t = t<1. ? 1. : 1. / t;
z[2 * idx + 0] = a*t;
z[2 * idx + 1] = b*t;
}
} |
e0633b6551948689e492a1424d1e8f89a9bb7ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <rocblas.h>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/util_img.hpp"
namespace caffe {
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
ResizeBlob_gpu(bottom[0], (*top)[0], this->locs_[0],this->locs_[1],this->locs_[2],this->locs_[3]);
ResizeParameter resize_param = this->layer_param_.resize_param();
Dtype theMultiple = resize_param.multiple_scale();
caffe_gpu_scal((*top)[0]->count(), theMultiple, (*top)[0]->mutable_gpu_data());
}
template <typename Dtype>
__global__ void kernel_ResizeBackward(const int nthreads, const Dtype* top_diff, const int top_step,
Dtype* bottom_diff, const int bottom_step,
const Dtype* loc1,const Dtype* weight1, const Dtype* loc2, const Dtype* weight2,
const Dtype* loc3,const Dtype* weight3, const Dtype* loc4, const Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_diff_offset = bottom_step*index;
int top_diff_offset = top_step*index;
for (int idx = 0; idx < top_step; ++idx)
{
bottom_diff[bottom_diff_offset + int(loc1[idx])] += top_diff[top_diff_offset+idx]*weight1[idx];
bottom_diff[bottom_diff_offset + int(loc2[idx])] += top_diff[top_diff_offset+idx]*weight2[idx];
bottom_diff[bottom_diff_offset + int(loc3[idx])] += top_diff[top_diff_offset+idx]*weight3[idx];
bottom_diff[bottom_diff_offset + int(loc4[idx])] += top_diff[top_diff_offset+idx]*weight4[idx];
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
Dtype* top_diff = top[0]->mutable_gpu_diff();
const Dtype* loc1 = this->locs_[0]->gpu_data();
const Dtype* weight1 = this->locs_[0]->gpu_diff();
const Dtype* loc2 = this->locs_[1]->gpu_data();
const Dtype* weight2 = this->locs_[1]->gpu_diff();
const Dtype* loc3 = this->locs_[2]->gpu_data();
const Dtype* weight3 = this->locs_[2]->gpu_diff();
const Dtype* loc4 = this->locs_[3]->gpu_data();
const Dtype* weight4 = this->locs_[3]->gpu_diff();
caffe::caffe_gpu_set((*bottom)[0]->count(),Dtype(0),bottom_diff);
const int top_step = top[0]->offset(0,1);
const int bottom_step = (*bottom)[0]->offset(0,1);
int loop_n = this->out_num_ * this->out_channels_;
hipLaunchKernelGGL(( kernel_ResizeBackward<Dtype>) , dim3(CAFFE_GET_BLOCKS(loop_n)), dim3(CAFFE_CUDA_NUM_THREADS) , 0, 0,
loop_n, top_diff, top_step,
bottom_diff, bottom_step,
loc1,weight1, loc2, weight2,
loc3,weight3,loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(ResizeLayer);
} // namespace caffe
| e0633b6551948689e492a1424d1e8f89a9bb7ac8.cu | // Copyright 2014 BVLC and contributors.
#include <cublas_v2.h>
#include <vector>
#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/vision_layers.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/util/util_img.hpp"
namespace caffe {
template <typename Dtype>
void ResizeLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
ResizeBlob_gpu(bottom[0], (*top)[0], this->locs_[0],this->locs_[1],this->locs_[2],this->locs_[3]);
ResizeParameter resize_param = this->layer_param_.resize_param();
Dtype theMultiple = resize_param.multiple_scale();
caffe_gpu_scal((*top)[0]->count(), theMultiple, (*top)[0]->mutable_gpu_data());
}
template <typename Dtype>
__global__ void kernel_ResizeBackward(const int nthreads, const Dtype* top_diff, const int top_step,
Dtype* bottom_diff, const int bottom_step,
const Dtype* loc1,const Dtype* weight1, const Dtype* loc2, const Dtype* weight2,
const Dtype* loc3,const Dtype* weight3, const Dtype* loc4, const Dtype* weight4)
{
CUDA_KERNEL_LOOP(index, nthreads) {
int bottom_diff_offset = bottom_step*index;
int top_diff_offset = top_step*index;
for (int idx = 0; idx < top_step; ++idx)
{
bottom_diff[bottom_diff_offset + int(loc1[idx])] += top_diff[top_diff_offset+idx]*weight1[idx];
bottom_diff[bottom_diff_offset + int(loc2[idx])] += top_diff[top_diff_offset+idx]*weight2[idx];
bottom_diff[bottom_diff_offset + int(loc3[idx])] += top_diff[top_diff_offset+idx]*weight3[idx];
bottom_diff[bottom_diff_offset + int(loc4[idx])] += top_diff[top_diff_offset+idx]*weight4[idx];
}
}
}
template <typename Dtype>
void ResizeLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
Dtype* top_diff = top[0]->mutable_gpu_diff();
const Dtype* loc1 = this->locs_[0]->gpu_data();
const Dtype* weight1 = this->locs_[0]->gpu_diff();
const Dtype* loc2 = this->locs_[1]->gpu_data();
const Dtype* weight2 = this->locs_[1]->gpu_diff();
const Dtype* loc3 = this->locs_[2]->gpu_data();
const Dtype* weight3 = this->locs_[2]->gpu_diff();
const Dtype* loc4 = this->locs_[3]->gpu_data();
const Dtype* weight4 = this->locs_[3]->gpu_diff();
caffe::caffe_gpu_set((*bottom)[0]->count(),Dtype(0),bottom_diff);
const int top_step = top[0]->offset(0,1);
const int bottom_step = (*bottom)[0]->offset(0,1);
int loop_n = this->out_num_ * this->out_channels_;
kernel_ResizeBackward<Dtype> <<<CAFFE_GET_BLOCKS(loop_n), CAFFE_CUDA_NUM_THREADS >>>(
loop_n, top_diff, top_step,
bottom_diff, bottom_step,
loc1,weight1, loc2, weight2,
loc3,weight3,loc4, weight4);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_CLASS(ResizeLayer);
} // namespace caffe
|
e8b1402eb3c2b816746440b52cef68f1f1b9d4db.hip | // !!! This is a file automatically generated by hipify!!!
// /home/ubuntu/Desktop/GPU/main.c
// nvcc main.cu -o test -lstdc++ -lpthread -lcufft -lpcap -std=c++11 -lpcap
#include <pcap.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <pthread.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/ip_icmp.h>
#include <net/ethernet.h>
#include <netinet/if_ether.h>
#include <netinet/ether.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <memory.h>
#include <malloc.h>
#include <iostream>
//--------------CUDA----------------
#include <hip/hip_runtime.h>
//#include <device_launch_parameters.h>
//#include <hip/device_functions.h>
#include <hipfft.h>
//#include <hipfftXt.h>
//-------------------------------------
// ----------------------------------------
#define NFFT 16384 //
#define PI 3.1415926f
#define UWC 1500.0f //
#define FS 100000 //
#define threadsPerBlock 512
#define d 0.07f
#define FL 100.0f
#define FH 4000.0f
#define TL 17
#define CHANNUM 16
#define FRAMELEN 65536
#define DOWNSAMPLE 4
#define FIRORDER 2048
#define FILTER_FRAME (2*FRAMELEN)
#define BEAMNUM 91
#define THREADNUMPERBLK 256
#define ARRAYNUM 15
#define STARTBEAM 15
#define ENDBEAM 75
#define MAXTRACETARNUM 3
#define M 3
#define ONLINEMODE 0
#define FILEMODE 1
#define DEST_PORT 0
#define PSD_LEN 20
#define PSD_AVG_NUM 8
#define EPS 1e-8
#define SMOOTH_N 100
#define LINE_NUM 16
#define DEM_RST_LEN 1024
#define VECTOR_P_IDX 22
#define VECTOR_X_IDX 16
#define VECTOR_Y_IDX 18
// -----------------------------------------------------
void *ReadBoard1Data(void *lParam);
void *ReadBoard2Data(void *lParam);
void *DataFormatting(void *lParam);
void *ReceiveNetwork(void *lParam);
void *ArraySignalProcessing(void *lParam);
//-----------------------------------------------------
pthread_mutex_t count_lock_BoardDataReady;
pthread_mutex_t count_lock_Board1DataReady;
pthread_mutex_t count_lock_Board2DataReady;
pthread_mutex_t count_lock_FrameDataReady;
pthread_cond_t cond_BoardDataReady;
pthread_cond_t cond_Board1DataReady;
pthread_cond_t cond_Board2DataReady;
pthread_cond_t cond_FrameDataReady;
unsigned int count_BoardDataReady;
unsigned int count_Board1DataReady;
unsigned int count_Board2DataReady;
unsigned int count_FrameDataReady;
//-----------------------------------------------------
int *DataBufA_B1 = NULL;
int *DataBufB_B1 = NULL;
int *DataBufA_B2 = NULL;
int *DataBufB_B2 = NULL;
float *ChannDataBufA = NULL;
float *ChannDataBufB = NULL;
float *DownSamplingDataBufA = NULL;
float *DownSamplingDataBufB = NULL;
//---------------------------------------------------
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h);
float window(int type,int n,int i,float beta);
float kaiser(int i,int n,float beta);
float bessel0(float x);
void findpeak(float *data, int *p,int dn);
void findvalley(float *data, int *p,int dn);
bool peakdetection(int beamidx,float *be,int *valley,float threshold);
void rbub(float *p,int *idx,int n);
void MySmooth(float *datain,int nDataLen,float *paraA,int nParaLen,int nOrder,int nWindow,int nStep,float *dataout);
void CalSmoothPara(float *para);
//-----------------------------------------------------
//
float fSmoothA[4][SMOOTH_N]={0.0}; //
float fPlineInfo[MAXTRACETARNUM][LINE_NUM][4]={0};//
float fDlineInfo[MAXTRACETARNUM][LINE_NUM][2]={0};//
int nPlineNum = 0;
int nDlineNum = 0;
int nVectorPlineNum = 0;
float fVectorPlineInfo[LINE_NUM][4]={0}; //
//
int DemFreqBandNum=0; //10
float DemStartFreq[10]={0.0}; //
float DemEndFreq[10]={0.0}; //
// -----------------------------------------------------------
int main()
{
pthread_t t_ReceiveNetworkData;
pthread_t t_DataFormatting;
pthread_t t_ArraySignalProcessing;
pthread_t t_ReadBoard1Data;
pthread_t t_ReadBoard2Data;
cond_BoardDataReady = PTHREAD_COND_INITIALIZER;
cond_Board1DataReady = PTHREAD_COND_INITIALIZER;
cond_Board2DataReady = PTHREAD_COND_INITIALIZER;
cond_FrameDataReady = PTHREAD_COND_INITIALIZER;
count_lock_BoardDataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_Board1DataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_Board2DataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_FrameDataReady = PTHREAD_MUTEX_INITIALIZER;
pthread_create(&t_ArraySignalProcessing,NULL,ArraySignalProcessing,(void *)NULL);
pthread_create(&t_DataFormatting,NULL,DataFormatting,(void *)NULL);
#if ONLINEMODE
pthread_create(&t_ReceiveNetworkData,NULL,ReceiveNetwork,(void *)NULL);
#endif
#if FILEMODE
pthread_create(&t_ReadBoard1Data,NULL,ReadBoard1Data,(void *)NULL);
pthread_create(&t_ReadBoard2Data,NULL,ReadBoard2Data,(void *)NULL);
#endif
pthread_join(t_ArraySignalProcessing, NULL);
return 0;
}
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h)
{
int i,n2,mid;
float sum = 0;
float s,wc1,wc2,beta = 0,delay;
float fln = fl / fs;
float fhn = fh / fs;
beta = 6;
if((n%2)==0)
{
n2=n/2-1;
mid=1;
}
else
{
n2=n/2;
mid=0;
}
delay=n/2.0;
wc1=2.0*PI*fln;
if(band>=3) wc2=2.0*PI*fhn;
switch(band)
{
case 1://
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)/(PI*s))*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=wc1/PI;
for(i=0;i<=n;i++)
{
sum=sum+*(h+i);
}
for(i=0;i<=n;i++)
{
*(h+i)=*(h+i)/fabs(sum);
}
break;
}
case 2: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(PI*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=1.0-wc1/PI;
break;
}
case 3: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc2*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc2-wc1)/PI;
break;
}
case 4: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)+sin(PI*s)-sin(wc2*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc1+PI-wc2)/PI;
break;
}
}
return 0;
}
float window(int type,int n,int i,float beta)
{
int k;
float w=1.0;
switch(type)
{
case 1: //
{
w=1.0;
break;
}
case 2: //
{
k=(n-2)/10;
if(i<=k) w=0.5*(1.0-cos(i*PI/(k+1)));
if(i>n-k-2) w=0.5*(1.0-cos((n-i-1)*PI/(k+1)));
break;
}
case 3: //
{
w=1.0-fabs(1.0-2*i/(n-1.0));
break;
}
case 4: //
{
w=0.5*(1.0-cos(2*i*PI/(n-1.0)));
break;
}
case 5: //
{
w=0.54-0.46*cos(2*i*PI/(n-1.0));
break;
}
case 6: //
{
w=0.42-0.5*cos(2*i*PI/(n-1.0))+0.08*cos(4*i*PI/(n-1.0));
break;
}
case 7: //
{
w=kaiser(i,n,beta);
break;
}
}
return(w);
}
float kaiser(int i,int n,float beta) //
{
float a,w,a2,b1,b2,beta1;
b1=bessel0(beta);
a=2.0*i/(float)(n-1)-1.0;
a2=a*a;
beta1=beta*sqrt(1.0-a2);
b2=bessel0(beta1);
w=b2/b1;
return(w);
}
float bessel0(float x) //
{
int i;
float dd,y,d2,sum = 0;
y=x/2.0;
dd=1.0;
for(i=1;i<=25;i++)
{
dd=dd*y/i;
d2=dd*dd;
sum=sum+d2;
if(d2<sum*(1.0e-8)) break;
}
return(sum);
}
__global__ void PhiShiftFactorGen(hipfftComplex *XNSS)
{
int bid = 0,tid = 0;
float tt = 0.0f;
float angle=0.0f;
float det[ARRAYNUM];
float MovePoints[ARRAYNUM];
bid = blockIdx.x;
tid = threadIdx.x;
angle=float(tid*PI/(BEAMNUM-1));
for(int i=0;i<ARRAYNUM;i++)
{
det[i]=i*d*cos(angle)/UWC;
MovePoints[i]=det[i]*FS/DOWNSAMPLE;
tt=MovePoints[i]*2*PI*bid/NFFT;
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].x = cos(tt);
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].y = sin(tt);
}
}
void findpeak(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i+j)>=dn)
{
a1=*(data+i+j-dn);
}
else
{
a1=*(data+i+j);
}
if (a0>a1)
{
acc=acc+1;
}
}
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i-j)<0)
{
a1=*(data+i-j+dn);
}
else
{
a1=*(data+i-j);
}
if (a0>a1)
{
acc1=acc1+1;
}
}
if ((acc==10) && (acc1==10))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
void findvalley(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i+j)>=dn)
{
break;
}
else
{
a1=*(data+i+j);
}
if (a0<a1)
{
acc=acc+1;
}
}
if(j<5) //
{
acc = 5;
}
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i-j)<0)
{
break;
}
else
{
a1=*(data+i-j);
}
if (a0<a1)
{
acc1=acc1+1;
}
}
if(j<5) //
{
acc1 = 5;
}
if ((acc==5) && (acc1==5))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
bool peakdetection(int beamidx,float *be,int *valley,float threshold)
{
int index = 0,ll=0;
float pvr1 = 1.0,pvr2 = 1.0;
if(beamidx >= STARTBEAM && beamidx <= ENDBEAM)
{
for(ll=beamidx+1;ll<BEAMNUM;ll++)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll<=BEAMNUM-1)
{
pvr1 = be[beamidx] / be[index];
}
for(ll=beamidx-1;ll>=0;ll--)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll>=0)
{
pvr2 = be[beamidx] / be[index];
}
if(pvr1 >= threshold && pvr2 >= threshold)
{
return true;
}
else
{
return false;
}
}
else
{
return false;
}
}
void rbub(float *p,int *idx,int n)
{
int m,k,j,i,xx;
float dd;
k=0;
m=n-1;
while (k<m)
{
j=m-1; m=0;
for(i=k; i<=j; i++)
{
if(p[i]<p[i+1])
{
dd=p[i];
p[i]=p[i+1];
p[i+1]=dd;
xx = idx[i];
idx[i] = idx[i+1];
idx[i+1] = xx;
m=i;
}
}
j=k+1;
k=0;
for (i=m; i>=j; i--)
{
if(p[i-1]<p[i])
{
dd=p[i];
p[i]=p[i-1];
p[i-1]=d;
xx = idx[i];
idx[i] = idx[i-1];
idx[i-1] = xx;
k=i;
}
}
}
return;
}
void MySmooth(float *datain,int nDataLen,float *paraA,int nParaLen,int nOrder,int nWindow,int nStep,float *dataout)
{
int nFrameNum,ii,jj,nFrameCnt,idx;
float rr[4]={0};
float fsmooth_tmp[SMOOTH_N]={0};
float fsmooth_tmp2[SMOOTH_N]={0};
nFrameNum=(nDataLen-nWindow)/nStep+1;
for (nFrameCnt=0;nFrameCnt<nFrameNum;nFrameCnt++)
{
if(nFrameCnt==0)
{
memcpy(fsmooth_tmp,datain,nWindow*sizeof(float));
}
else
{
memcpy(&fsmooth_tmp[nWindow-nStep],&datain[nWindow+(nFrameCnt-1)*nStep],nStep*sizeof(float));
}
for (ii=0;ii<nOrder;ii++)
{
rr[ii]=0.0;
for (jj=0;jj<nWindow;jj++)
{
rr[ii]+=fsmooth_tmp[jj]*fSmoothA[ii][jj];
}
}
memset(fsmooth_tmp2,0,SMOOTH_N*sizeof(float));
for (ii=0;ii<nWindow;ii++)
{
for (jj=0;jj<nOrder;jj++)
{
fsmooth_tmp2[ii]+=rr[jj]*fSmoothA[jj][ii];
}
}
memcpy(&dataout[nFrameCnt*nStep],fsmooth_tmp2,nStep*sizeof(float));
memcpy(fsmooth_tmp,&fsmooth_tmp2[nStep],(nWindow-nStep)*sizeof(float));
}//for (nFrameCnt=0;nFrameCnt<nFrameNum-1;nFrameCnt++)
if ((nFrameNum*nStep+nWindow)-nDataLen<nStep)
{
idx=(nFrameNum*nStep+nWindow)-nDataLen;
memcpy(fsmooth_tmp,&fsmooth_tmp2[nStep-idx],(nWindow-nStep+idx)*sizeof(float));
memcpy(&fsmooth_tmp[nWindow-nStep+idx],&datain[nWindow+(nFrameNum-1)*nStep],(nStep-idx)*sizeof(float));
for (ii=0;ii<nOrder;ii++)
{
rr[ii]=0.0;
for (jj=0;jj<nWindow;jj++)
{
rr[ii]+=fsmooth_tmp[jj]*fSmoothA[ii][jj];
}
}
memset(fsmooth_tmp2,0,SMOOTH_N*sizeof(float));
for (ii=0;ii<nWindow;ii++)
{
for (jj=0;jj<nOrder;jj++)
{
fsmooth_tmp2[ii]+=rr[jj]*fSmoothA[jj][ii];
}
}
memcpy(&dataout[nFrameNum*nStep],&fsmooth_tmp2[idx],(nWindow-idx)*sizeof(float));
}
else//if ((nFrameNum*nStep+nWindow)-nDataLen<nStep)
{
memcpy(&dataout[nFrameNum*nStep],&fsmooth_tmp2[nStep],(nWindow-nStep)*sizeof(float));
}//if ((nFrameNum*nStep+nWindow)-nDataLen<nStep)
}
void CalSmoothPara(float *para)
{
float fpara[4][SMOOTH_N];
float ftmp,ftmp2,ftmp3;
int ii,jj;
ftmp=sqrtf((float)(SMOOTH_N));
ftmp=1.0/ftmp;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[0][ii]=ftmp;
}
ftmp2=0;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[1][ii]=(float)(ii-(SMOOTH_N-1)/2);
fpara[2][ii]=fpara[1][ii]*fpara[1][ii];
ftmp2+=fpara[2][ii];
fpara[3][ii]=fpara[2][ii]*fpara[1][ii];
}
ftmp=1.0/sqrtf(ftmp2);
ftmp3=0;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[1][ii]=fpara[1][ii]*ftmp;
ftmp3+=fpara[1][ii]*fpara[3][ii];
}
ftmp=0;
ftmp2=ftmp2/(float)(SMOOTH_N);
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[2][ii]=fpara[2][ii]-ftmp2;
ftmp+=fpara[2][ii]*fpara[2][ii];
}
ftmp=1.0/sqrtf(ftmp);
ftmp2=0;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[2][ii]=fpara[2][ii]*ftmp;
fpara[3][ii]=fpara[3][ii]-ftmp3*fpara[1][ii];
ftmp2+=fpara[3][ii]*fpara[3][ii];
}
ftmp=1.0/sqrtf(ftmp2);
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[3][ii]=fpara[3][ii]*ftmp;
}
memcpy(para,&fpara[0][0],sizeof(float)*4*SMOOTH_N);
}
__global__ void FD_Beamform(hipfftComplex *dev_fft,hipfftReal *dev_energy,hipfftComplex *PhiArray,int nfl,int nfh)
{
__shared__ float Mabs[THREADNUMPERBLK];
float tempX=0.0f;
float tempY=0.0f;
hipComplex XNSS;
hipComplex XFFTafterPinYi;
float ax = 0.0f,ay=0.0f,bx=0.0f,by=0.0f;
float energyEachBoShu = 0.0f;
int bid = 0,tid = 0;
int beamidx = 0, freqidx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
beamidx = bid % BEAMNUM;
freqidx = bid / BEAMNUM*THREADNUMPERBLK+tid;
if(tid==0)
{
memset(Mabs,0,sizeof(float)*THREADNUMPERBLK);
}
__syncthreads();
//
tempX=0.0;
tempY=0.0;
for(int i=0;i<ARRAYNUM;i++)
{
XNSS.x=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].x;
XNSS.y=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].y;
ax=dev_fft[i*(NFFT/2+1)+freqidx].x;
ay=dev_fft[i*(NFFT/2+1)+freqidx].y;
bx=XNSS.x;
by=XNSS.y;
if (freqidx>= nfl && freqidx<=nfh)
{
XFFTafterPinYi.x=ax*bx-ay*by;
XFFTafterPinYi.y=ax*by+bx*ay;
}
else
{
XFFTafterPinYi.x=0;
XFFTafterPinYi.y=0;
}
tempX=tempX+ XFFTafterPinYi.x;
tempY=tempY+ XFFTafterPinYi.y;
}
Mabs[tid]=tempX*tempX+tempY*tempY;
//
__syncthreads();
//
if(tid==0)
{
energyEachBoShu=0.0f;
for(int k=0;k<THREADNUMPERBLK;k++)
{
energyEachBoShu=energyEachBoShu+Mabs[k];
}
dev_energy[bid]= energyEachBoShu;
}
}
__global__ void MatrixSumRow(hipfftReal *dev_energy,hipfftReal *sum_energy,int nrow,int ncol)
{
int bid = 0;
int row = 0,col = 0;
float sum = 0.0;
bid = blockIdx.x;
row = nrow;
col = ncol;
for(int ii = 0;ii<row;ii++)
{
sum = sum+dev_energy[ii*col+bid];
}
sum_energy[bid] = sum;
}
__global__ void DownSamplingFilter(hipfftComplex *dev_fft_sig,hipfftComplex *dev_fft_filter,hipfftComplex *dev_fft_yk,int FFTN)
{
int bid = 0,tid = 0;
hipComplex Sigk;
hipComplex Hk;
int chanIdx = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (CHANNUM*2);
freqIdx = bid / (CHANNUM*2)*THREADNUMPERBLK+tid;
Sigk.x = dev_fft_sig[chanIdx*FFTN+freqIdx].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+freqIdx].y;
Hk.x = dev_fft_filter[freqIdx].x;
Hk.y = dev_fft_filter[freqIdx].y;
dev_fft_yk[chanIdx*FFTN+freqIdx].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+freqIdx].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
if( bid/(CHANNUM*2)>= 255 && tid == THREADNUMPERBLK-1)
{
Sigk.x = dev_fft_sig[chanIdx*FFTN+FFTN/2].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+FFTN/2].y;
Hk.x = dev_fft_filter[FFTN/2].x;
Hk.y = dev_fft_filter[FFTN/2].y;
dev_fft_yk[chanIdx*FFTN+FFTN/2].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+FFTN/2].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
}
}
__global__ void IFFTNormalize(hipfftReal *dev_fft_yout,hipfftReal *dev_databuff,int FFTN)
{
int bid = 0,tid = 0;
int chanIdx = 0;
int timeIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (CHANNUM*2);
timeIdx = bid / (CHANNUM*2)*THREADNUMPERBLK+tid+FFTN/4;
//if(bid < CHANNUM*2 && tid == 0)
//{
// memcpy(dev_databuff+chanIdx*FFTN/DOWNSAMPLE,dev_databuff+chanIdx*FFTN/DOWNSAMPLE+FFTN/DOWNSAMPLE/2,FFTN/DOWNSAMPLE/2*sizeof(float));
//}
if(timeIdx % DOWNSAMPLE == 0)
{
dev_databuff[chanIdx*FFTN/DOWNSAMPLE + FFTN/DOWNSAMPLE/2 + (timeIdx-FFTN/4)/DOWNSAMPLE] = dev_fft_yout[chanIdx*FFTN+timeIdx] / FFTN;
}
}
__global__ void DelayFilterGen(float *h,int m,float theta,float *tau,int *dI)
{
int bid = 0,tid = 0;
int k=0;
float dfs = 0.0;
int DI = 0;
__shared__ float sum;
bid = blockIdx.x;
tid = threadIdx.x;
if(tid == 0)
{
sum = 0.0;
dfs = bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE);
DI = int(bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE)+0.5);
tau[bid] =dfs-DI;
dI[bid] = DI;
//printf("bid=%d,m=%d,theta = %.3f,dfs = %.3f,DI = %d\n",bid,m,theta,dfs,DI);
}
//
__syncthreads();
k = tid-m;
h[bid*(2*m+1)+tid] = sin(k*1.0*PI-tau[bid]*PI+0.000001)/(k*1.0*PI-tau[bid]*PI+0.000001);
//
__syncthreads();
if(tid == 0)
{
for(int k=0;k<2*m+1;k++)
{
sum = sum + h[bid*(2*m+1)+k];
}
}
__syncthreads();
h[bid*(2*m+1)+tid] = h[bid*(2*m+1)+tid]/sum;
}
__global__ void FineDelayFilter(hipfftReal *dev_xin,hipfftReal *dev_yout,hipfftReal *delayfilter,int m)
{
int bid,tid;
float x=0.0,h=0.0;
float sum = 0.0;
bid = blockIdx.x;
tid = threadIdx.x;
__shared__ float y[2*M+1];
if(tid == 0)
{
for(int ii=0;ii<2*m;ii++)
{
y[ii] = 0.0;
}
}
if(bid-2*m+tid >= 0 && bid-2*m+tid < (FILTER_FRAME/DOWNSAMPLE))
{
x = dev_xin[bid-2*m+tid];
}
if(2*m-tid >=0)
{
h = delayfilter[2*m-tid];
}
y[tid] = x*h;
//if(bid == 24855)
//{
// printf("bid = %d,x=%.8f,h=%.8f,y=%.8f\n",bid,x,h,y);
//}
//
__syncthreads();
if(tid == 0)
{
sum = 0.0;
for(int jj=0;jj<2*m+1;jj++)
{
sum = sum + y[jj];
}
dev_yout[bid] = sum;
//if(bid == 24855)
//{
// printf("bid = %d,dev_yout=%.8f\n",bid,dev_yout[bid]);
//}
}
}
__global__ void Psd(hipfftComplex *Xk,hipfftReal *Xabs, int N)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
Xabs[freqIdx] = (Xk[freqIdx].x*Xk[freqIdx].x+Xk[freqIdx].y*Xk[freqIdx].y) / N;
}
__global__ void PsdAverage(hipfftReal *Xabs,hipfftReal *Xk_avg)
{
int bid = 0,tid = 0;
int freqIdx = 0;
float sum = 0.0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
for(int ii = 0;ii<PSD_AVG_NUM;ii++)
{
sum += Xabs[ii*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)+freqIdx] / PSD_AVG_NUM;
}
Xk_avg[freqIdx] = 10*log10((sum+EPS)/1e-12);
}
__global__ void PsdSub(hipfftReal *Xk_avg,hipfftReal *Xk_smooth,hipfftReal *Xk_diff,int idx1,int idx2)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
if(freqIdx >= idx1 && freqIdx <= idx2)
{
Xk_diff[freqIdx] = Xk_avg[freqIdx] - Xk_smooth[freqIdx];
}
else
{
Xk_diff[freqIdx] = 0;
}
}
__global__ void FrequencyDomainFilter(hipfftComplex *Xk,float deltaf,float StartFreq,float EndFreq)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
if(freqIdx * deltaf < StartFreq || freqIdx * deltaf > EndFreq)
{
Xk[freqIdx].x = 0;
Xk[freqIdx].y = 0;
}
}
__global__ void SignalSqr(hipfftReal *X)
{
int bid = 0,tid = 0;
int sigIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
sigIdx = bid*THREADNUMPERBLK+tid;
X[sigIdx] = X[sigIdx]*X[sigIdx];
}
__global__ void DemonAdd(hipfftComplex *Xk,hipfftReal *Xabs, int N)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
Xabs[freqIdx] += (Xk[freqIdx].x*Xk[freqIdx].x+Xk[freqIdx].y*Xk[freqIdx].y) / N;
}
__global__ void DemonSub(hipfftReal *Xk_avg,hipfftReal *Xk_smooth,hipfftReal *Xk_diff)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid;
Xk_diff[freqIdx] = Xk_avg[freqIdx] - Xk_smooth[freqIdx];
if(Xk_diff[freqIdx] < 0)
{
Xk_diff[freqIdx] = 0;
}
}
float VectorThetSPF(hipfftComplex P_f, hipfftComplex Vx_f, hipfftComplex Vy_f)
{
float fTheta=0.0;
float sina=-P_f.y*Vy_f.x+P_f.x*Vy_f.y;
float cosa=-P_f.y*Vx_f.x+P_f.x*Vx_f.y;
fTheta=atan2(sina, cosa)*180/PI;
return fTheta;
}
void *ArraySignalProcessing(void *lParam)
{
//int retval = -1;
int BUF_FLAG = 0;
int FrameNum = 0;
//-----------------Downsampling filter-------------------------------
float h[FIRORDER+1] = {0.0};
float fl = 100.0f,fh = 10e3f;
hipError_t cudaStatus;
hipfftReal *dev_x=NULL;
hipfftReal *dev_h=NULL;
hipfftComplex *dev_fft_x=NULL;
hipfftComplex *dev_fft_h=NULL;
hipfftComplex *dev_fft_y=NULL;
hipfftReal *dev_y=NULL;
hipfftReal *dev_chanbuff=NULL;
float *FilteredDataout = NULL;
float *DownSamplingData = NULL;
hipfftHandle Hplan;
hipfftHandle Xplan;
hipfftHandle Yplan;
//----------------------------------------------------------------
//--------------------------Process Time Test---------------------
hipEvent_t start1;
hipEvent_t stop1;
float msecTotal = 0.0f;
//----------------------------------------------------------------
//--------------------------Beamforming and Tracing---------------
int nfl = (int)((2000.0/(FS/DOWNSAMPLE)*NFFT)+0.5);
int nfh = (int)((4000.0/(FS/DOWNSAMPLE)*NFFT)+0.5);
// int FreqbinPerThread = (int)((nfh-nfl+1)/(THREADNUMPERBLK*1.0) + 0.5);
int BlockRowNum = 0;
hipfftComplex *dev_fft=NULL;
hipfftReal *dev_energy=NULL;
hipfftReal *sum_energy=NULL;
hipfftComplex *PhiArray = NULL;
hipfftHandle Beamplan;
float c[BEAMNUM]={0.0};
hipfftComplex *sk=NULL;
float *debugvar = NULL;
int peak[BEAMNUM]={0};
int valley[BEAMNUM]={0};
// bool traced[BEAMNUM] = {false};
// int tracedbeamIdx = -1;
float pretracedtarget[BEAMNUM] = {0.0};
int pretracedtargetIdx[BEAMNUM] = {-1};
int pretracedtargetNum = 0;
int tracedtargetbeam[MAXTRACETARNUM][2];
// float *tracebeam = NULL;
// int beammatrix[5][BEAMNUM] = {0};
int i0,i1,i2;
float r0,r1,r2;
float delta_index = 0;
float tracedtargetangle[3] = {0.0};
hipfftReal *dev_delayFilter = NULL;
hipfftReal *dev_tau = NULL;
float delayfiltercoff[ARRAYNUM*(2*M+1)] = {0.0};
float delaytau[ARRAYNUM] = {0.0};
hipfftReal *dev_delayfilterout = NULL;
hipfftReal *dev_delayfilterbuf = NULL;
int *dev_dI = NULL;
int delaydI[ARRAYNUM] = {0};
float *sourcedata = NULL;
float *shiftdata = NULL;
float *delayfilteroutdata = NULL;
hipfftReal *dev_delaychandata = NULL;
hipfftReal *dev_beamdata = NULL;
float *beamdata = NULL;
//----------------------------------------------------------------
//----------------------------Psd and DEMON-----------------------
hipfftReal *dev_tracebeam=NULL;
hipfftComplex *dev_tracebeam_spec=NULL;
hipfftReal *dev_tracebeam_psd=NULL;
hipfftReal *dev_tracebeam_psd_avg = NULL;
hipfftComplex *dev_tracebeam_demonspec=NULL;
hipfftComplex *dev_tracebeam_demonspec_band=NULL;
hipfftReal *dev_tracebeam_demon=NULL;
hipfftReal *dev_tracebeam_demon_band_data=NULL;
hipfftHandle PSDplan;
hipfftHandle DEMONplan;
hipfftHandle DEMONBandplan;
float *trace_beam_psd = NULL;
float fDf;
int idx1;
int idx2;
int idxLen;
float *trace_beam_psd_smooth = NULL;
hipfftReal *dev_tracebeam_psd_S = NULL;
hipfftReal *dev_tracebeam_psd_E = NULL;
float fPsdEVar=0.0;
float *trace_beam_demon = NULL;
float *trace_beam_demon_smooth = NULL;
hipfftReal *dev_trace_beam_demon_cut = NULL;
hipfftReal *dev_tracebeam_demon_S = NULL;
hipfftReal *dev_tracebeam_demon_E = NULL;
float fDemonEVar=0.0;
//----------------------------------------------------
hipfftReal *dev_vector_p_buf=NULL;
hipfftReal *dev_vector_x_buf=NULL;
hipfftReal *dev_vector_y_buf=NULL;
hipfftComplex *dev_vector_p_spec=NULL;
hipfftComplex *dev_vector_x_spec=NULL;
hipfftComplex *dev_vector_y_spec=NULL;
hipfftReal *dev_vector_p_psd =NULL;
hipfftReal *dev_vector_psd_avg=NULL;
float *vector_p_psd = NULL;
float *vector_p_psd_smooth = NULL;
hipfftReal *dev_vector_p_psd_S = NULL;
hipfftReal *dev_vector_p_psd_E = NULL;
float fVectorPsdEVar=0.0;
//----------------------------------------------------------------
if(DownSamplingDataBufA != NULL)
{
free(DownSamplingDataBufA);
DownSamplingDataBufA = NULL;
}
DownSamplingDataBufA = (float *)malloc(FILTER_FRAME*CHANNUM*2*sizeof(float));
memset(DownSamplingDataBufA,0,FILTER_FRAME*CHANNUM*2*sizeof(float));
if(DownSamplingDataBufB != NULL)
{
free(DownSamplingDataBufB);
DownSamplingDataBufB = NULL;
}
DownSamplingDataBufB = (float *)malloc(FILTER_FRAME*CHANNUM*2*sizeof(float));
memset(DownSamplingDataBufB,0,FILTER_FRAME*CHANNUM*2*sizeof(float));
//-------------------------------------------------------------
FilteredDataout = (float *)malloc(FILTER_FRAME/DOWNSAMPLE*sizeof(float));
memset(FilteredDataout,0,FILTER_FRAME/DOWNSAMPLE*sizeof(float));
DownSamplingData = (float *)malloc(FRAMELEN*sizeof(float));
memset(DownSamplingData,0,FRAMELEN*sizeof(float));
hipfftComplex *Xk_real = NULL;
Xk_real = (hipfftComplex *)malloc(FILTER_FRAME*sizeof(hipfftComplex));
memset(Xk_real,0,FILTER_FRAME*sizeof(hipfftComplex));
FILE *fp = NULL;
fp = fopen("BeamEng.bin","wb");
FILE *fplog = NULL;
fplog = fopen("ProcessLog.txt","w");
FILE *fpbeam = NULL;
fpbeam = fopen("Beam.bin","wb");
// int retvalprint = 0;
//------------------------------------------------------------
hipfftPlan1d(&Hplan, FILTER_FRAME, HIPFFT_R2C, 1);
hipfftPlan1d(&Xplan, FILTER_FRAME, HIPFFT_R2C, 1);
hipfftPlan1d(&Yplan, FILTER_FRAME, HIPFFT_C2R, 1);
hipfftPlan1d(&PSDplan, PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), HIPFFT_R2C, 1);
hipfftPlan1d(&DEMONplan, PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), HIPFFT_R2C, 1);
hipfftPlan1d(&DEMONBandplan, PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), HIPFFT_C2R, 1);
cudaStatus = hipMalloc((void **)&dev_x, sizeof(hipfftReal)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != hipSuccess)
{
printf (" dev_x hipMalloc Error! \n ");
}
hipMemset((void **)&dev_x,0,sizeof(hipfftReal)*FILTER_FRAME*CHANNUM*2);
cudaStatus = hipMalloc((void **)&dev_h, sizeof(hipfftReal)*FILTER_FRAME);
if (cudaStatus != hipSuccess)
{
printf ("dev_h hipMalloc Error! \n ");
}
hipMemset((void **)&dev_h,0,sizeof(hipfftReal)*FILTER_FRAME);
cudaStatus = hipMalloc((void **)&dev_y, sizeof(hipfftReal)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != hipSuccess)
{
printf ("dev_y hipMalloc Error! \n ");
}
hipMemset((void **)&dev_y,0,sizeof(hipfftReal)*FILTER_FRAME*CHANNUM*2);
cudaStatus = hipMalloc((void **)&dev_fft_x,sizeof(hipfftComplex)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_x hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_x,0,sizeof(hipfftComplex)*FILTER_FRAME*CHANNUM*2);
cudaStatus = hipMalloc((void **)&dev_fft_h,sizeof(hipfftComplex)*FILTER_FRAME);
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_h hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_h,0,sizeof(hipfftComplex)*FILTER_FRAME);
cudaStatus = hipMalloc((void **)&dev_fft_y,sizeof(hipfftComplex)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != hipSuccess)
{
printf ("dev_fft_y hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft_y,0,sizeof(hipfftComplex)*FILTER_FRAME*CHANNUM*2);
cudaStatus = hipMalloc((void **)&dev_chanbuff,sizeof(hipfftReal)*FILTER_FRAME/DOWNSAMPLE*CHANNUM*2);
if (cudaStatus != hipSuccess)
{
printf ("dev_chanbuff hipMalloc Error! \n ");
}
hipMemset((void **)&dev_chanbuff,0,sizeof(hipfftReal)*FILTER_FRAME/DOWNSAMPLE*CHANNUM*2);
fir1(FIRORDER,3,fl,fh,FS,5,h);
hipMemcpy(dev_h,h,sizeof(hipfftReal)*FIRORDER,hipMemcpyHostToDevice);
hipfftExecR2C(Hplan,(hipfftReal *)&dev_h[0],(hipfftComplex *)&dev_fft_h[0]);
BlockRowNum = NFFT/2/THREADNUMPERBLK;
cudaStatus = hipMalloc((void**)&dev_energy,BEAMNUM*BlockRowNum*sizeof(hipfftReal));
if (cudaStatus != hipSuccess)
{
printf ("dev_energy hipMalloc Error! \n ");
}
hipMemset((void **)&dev_energy,0,BEAMNUM*BlockRowNum*sizeof(hipfftReal));
cudaStatus = hipMalloc((void**)&sum_energy,BEAMNUM*sizeof(hipfftReal));
if (cudaStatus != hipSuccess)
{
printf ("sum_energy hipMalloc Error! \n ");
}
hipMemset((void **)&sum_energy,0,BEAMNUM*sizeof(hipfftReal));
cudaStatus = hipMalloc((void**)&PhiArray,ARRAYNUM*BEAMNUM*(NFFT/2)*sizeof(hipfftComplex));
if (cudaStatus != hipSuccess)
{
printf ("PhiArray hipMalloc Error! \n ");
}
hipMemset((void **)&PhiArray,0,ARRAYNUM*BEAMNUM*(NFFT/2)*sizeof(hipfftComplex));
cudaStatus = hipMalloc((void **)&dev_fft,sizeof(hipfftComplex)*(NFFT/2+1)*ARRAYNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_fft hipMalloc Error! \n ");
}
hipMemset((void **)&dev_fft,0,sizeof(hipfftComplex)*(NFFT/2+1)*ARRAYNUM);
hipfftPlan1d(&Beamplan,NFFT,HIPFFT_R2C, 1);
hipLaunchKernelGGL(( PhiShiftFactorGen), dim3(NFFT/2),dim3(BEAMNUM), 0, 0, PhiArray);
sk = (hipfftComplex *)malloc(sizeof(hipfftComplex)*(NFFT/2+1)*ARRAYNUM);
memset(sk,0,sizeof(hipfftComplex)*(NFFT/2+1)*ARRAYNUM);
debugvar = (float *)malloc(sizeof(float)*BEAMNUM*BlockRowNum);
memset(debugvar,0, sizeof(float)*BEAMNUM*BlockRowNum);
for(int ii = 0;ii<MAXTRACETARNUM;ii++)
{
tracedtargetbeam[ii][0] = -1;
tracedtargetbeam[ii][1] = -1;
tracedtargetangle[ii] = -1.0f;
}
cudaStatus = hipMalloc((void **)&dev_delayFilter,sizeof(hipfftReal)*(2*M+1)*ARRAYNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_delayFilter hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delayFilter,0,sizeof(hipfftReal)*(2*M+1)*ARRAYNUM);
cudaStatus = hipMalloc((void **)&dev_tau,sizeof(hipfftReal)*ARRAYNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tau hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tau,0,sizeof(hipfftReal)*ARRAYNUM);
cudaStatus = hipMalloc((void **)&dev_delayfilterout,sizeof(hipfftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE+2*M));
if (cudaStatus != hipSuccess)
{
printf ("dev_delayfilterout hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delayfilterout,0,sizeof(hipfftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE+2*M));
cudaStatus = hipMalloc((void **)&dev_delayfilterbuf,sizeof(hipfftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE));
if (cudaStatus != hipSuccess)
{
printf ("dev_delayfilterbuf hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delayfilterbuf,0,sizeof(hipfftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE));
cudaStatus = hipMalloc((void **)&dev_dI,sizeof(int)*ARRAYNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_dI hipMalloc Error! \n ");
}
hipMemset((void **)&dev_dI,0,sizeof(int)*ARRAYNUM);
cudaStatus = hipMalloc((void **)&dev_delaychandata,sizeof(int)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_delaychandata hipMalloc Error! \n ");
}
hipMemset((void **)&dev_delaychandata,0,sizeof(int)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_beamdata,sizeof(int)*MAXTRACETARNUM*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_beamdata hipMalloc Error! \n ");
}
hipMemset((void **)&dev_beamdata,0,sizeof(int)*MAXTRACETARNUM*(FILTER_FRAME/DOWNSAMPLE/2));
sourcedata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
memset(sourcedata,0,(FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
shiftdata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
memset(shiftdata,0,(FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
delayfilteroutdata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE+2*M)*sizeof(float));
memset(delayfilteroutdata,0,(FILTER_FRAME/DOWNSAMPLE+2*M)*sizeof(float));
beamdata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
memset(beamdata,0,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
cudaStatus = hipMalloc((void **)&dev_tracebeam,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = hipMalloc((void **)&dev_tracebeam_spec,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_spec hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_spec,0,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = hipMalloc((void **)&dev_tracebeam_psd,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM*PSD_AVG_NUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_psd hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_psd,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM*PSD_AVG_NUM);
cudaStatus = hipMalloc((void **)&dev_tracebeam_psd_avg,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_psd_avg hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_psd_avg,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = hipMalloc((void **)&dev_tracebeam_demonspec,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_demonspec hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_demonspec,0,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = hipMalloc((void **)&dev_tracebeam_demonspec_band,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_demonspec_band hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_demonspec_band,0,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_tracebeam_demon,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_demon hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_demon,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = hipMalloc((void **)&dev_tracebeam_demon_band_data,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_demon_band_data hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_demon_band_data,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
trace_beam_psd = (float *)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(trace_beam_psd,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
trace_beam_psd_smooth = (float *)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(trace_beam_psd_smooth,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
cudaStatus = hipMalloc((void **)&dev_tracebeam_psd_S,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_psd_S hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_psd_S,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
cudaStatus = hipMalloc((void **)&dev_tracebeam_psd_E,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_psd_E hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_psd_E,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
trace_beam_demon = (float *)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
memset(trace_beam_demon,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
trace_beam_demon_smooth = (float *)malloc(DEM_RST_LEN*sizeof(float));
memset(trace_beam_demon_smooth,0,DEM_RST_LEN*sizeof(float));
cudaStatus = hipMalloc((void **)&dev_tracebeam_demon_S,sizeof(hipfftReal)*DEM_RST_LEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_demon_S hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_demon_S,0,sizeof(hipfftReal)*DEM_RST_LEN);
cudaStatus = hipMalloc((void **)&dev_tracebeam_demon_E,sizeof(hipfftReal)*DEM_RST_LEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_tracebeam_demon_E hipMalloc Error! \n ");
}
hipMemset((void **)&dev_tracebeam_demon_E,0,sizeof(hipfftReal)*DEM_RST_LEN);
cudaStatus = hipMalloc((void **)&dev_trace_beam_demon_cut,sizeof(hipfftReal)*DEM_RST_LEN);
if (cudaStatus != hipSuccess)
{
printf ("dev_trace_beam_demon_cut hipMalloc Error! \n ");
}
hipMemset((void **)&dev_trace_beam_demon_cut,0,sizeof(hipfftReal)*DEM_RST_LEN);
//------------------------------------------------------------------------------
cudaStatus = hipMalloc((void **)&dev_vector_p_buf,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_p_buf hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_p_buf,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_vector_x_buf,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_x_buf hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_x_buf,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_vector_y_buf,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_y_buf hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_y_buf,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_vector_p_psd,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM);
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_p_psd hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_p_psd,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM);
cudaStatus = hipMalloc((void **)&dev_vector_p_spec,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_p_spec hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_p_spec,0,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_vector_x_spec,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_x_spec hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_x_spec,0,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_vector_y_spec,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_y_spec hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_y_spec,0,sizeof(hipfftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = hipMalloc((void **)&dev_vector_psd_avg,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_psd_avg hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_psd_avg,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
vector_p_psd = (float*)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(vector_p_psd,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
vector_p_psd_smooth = (float*)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(vector_p_psd_smooth,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
cudaStatus = hipMalloc((void **)&dev_vector_p_psd_S,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_p_psd_S hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_p_psd_S,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
cudaStatus = hipMalloc((void **)&dev_vector_p_psd_E,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != hipSuccess)
{
printf ("dev_vector_p_psd_E hipMalloc Error! \n ");
}
hipMemset((void **)&dev_vector_p_psd_E,0,sizeof(hipfftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
//--------------------------------------------------------------------------------------------
fDf=FS/DOWNSAMPLE*1.0/(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
idx1=(int)(10/fDf);
idx2=(int)(5000/fDf);
idxLen=idx2-idx1+1;
DemFreqBandNum = 4;
DemStartFreq[0] = 2000.0;
DemEndFreq[0] = 4000.0;
DemStartFreq[1] = 4000.0;
DemEndFreq[1] = 6000.0;
DemStartFreq[2] = 6000.0;
DemEndFreq[2] = 8000.0;
DemStartFreq[3] = 8000.0;
DemEndFreq[3] = 10000.0;
//
CalSmoothPara(&fSmoothA[0][0]);
//-----------------------------------------------------------------------------------------
hipEventCreate(&start1);
hipEventCreate(&stop1);
while (1)
{
pthread_mutex_lock(&count_lock_FrameDataReady);
while (count_FrameDataReady == 0)
{
pthread_cond_wait(&cond_FrameDataReady,&count_lock_FrameDataReady);
}
count_FrameDataReady = count_FrameDataReady -1;
pthread_mutex_unlock(&count_lock_FrameDataReady);
FrameNum++;
if(BUF_FLAG == 0)
{
for(int ii=0;ii<CHANNUM*2;ii++)
{
memmove(DownSamplingDataBufA+ii*FILTER_FRAME,DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,FRAMELEN*sizeof(float));
memcpy(DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,ChannDataBufA+ii*FRAMELEN,FRAMELEN*sizeof(float));
}
hipMemcpy(dev_x,DownSamplingDataBufA,sizeof(hipfftReal)*FILTER_FRAME*CHANNUM*2,hipMemcpyHostToDevice);
BUF_FLAG = 1;
}
else
{
for(int ii=0;ii<CHANNUM*2;ii++)
{
memmove(DownSamplingDataBufA+ii*FILTER_FRAME,DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,FRAMELEN*sizeof(float));
memcpy(DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,ChannDataBufB+ii*FRAMELEN,FRAMELEN*sizeof(float));
}
hipMemcpy(dev_x,DownSamplingDataBufA,sizeof(hipfftReal)*FILTER_FRAME*CHANNUM*2,hipMemcpyHostToDevice);
BUF_FLAG = 0;
}
hipEventRecord(start1,NULL);
//-----------------------------------------(1)---------------------------------------------------
for(int jj=0;jj<CHANNUM*2;jj++)
{
hipfftExecR2C(Xplan,(hipfftReal *)&dev_x[jj*FILTER_FRAME],(hipfftComplex *)&dev_fft_x[jj*FILTER_FRAME]);
}
hipLaunchKernelGGL(( DownSamplingFilter), dim3(CHANNUM*2*(FILTER_FRAME/2/THREADNUMPERBLK)),dim3(THREADNUMPERBLK), 0, 0, dev_fft_x,dev_fft_h,dev_fft_y,FILTER_FRAME);
for(int jj=0;jj<CHANNUM*2;jj++)
{
hipfftExecC2R(Yplan,(hipfftComplex *)&dev_fft_y[jj*FILTER_FRAME],(hipfftReal *)&dev_y[jj*FILTER_FRAME]);
hipMemcpy(dev_chanbuff+jj*FILTER_FRAME/DOWNSAMPLE,dev_chanbuff+jj*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,FILTER_FRAME/DOWNSAMPLE/2*sizeof(float),hipMemcpyDeviceToDevice);
}
hipLaunchKernelGGL(( IFFTNormalize), dim3(CHANNUM*2*(FILTER_FRAME/2/THREADNUMPERBLK)),dim3(THREADNUMPERBLK), 0, 0, dev_y,dev_chanbuff,FILTER_FRAME);
//-----------------------------------------(1)---------------------------------------------------
//-----------------------------------------(2)---------------------------------------------------
for (int ii=0;ii<ARRAYNUM;ii++)
{
hipfftExecR2C(Beamplan,(hipfftReal *)&dev_chanbuff[ii*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2],(hipfftComplex *)&dev_fft[ii*(NFFT/2+1)]);
}
hipLaunchKernelGGL(( FD_Beamform), dim3(BlockRowNum*BEAMNUM),dim3(THREADNUMPERBLK), 0, 0, dev_fft,dev_energy,PhiArray,nfl,nfh);
hipLaunchKernelGGL(( MatrixSumRow), dim3(BEAMNUM),dim3(1), 0, 0, dev_energy,sum_energy,BlockRowNum,BEAMNUM);
printf("success 0!\n");
hipMemcpy(c,sum_energy,BEAMNUM*sizeof(float),hipMemcpyDeviceToHost);
// fwrite(c,sizeof(float),BEAMNUM,fp);
//-----------------------------------------(2)-----------------------------------------------
//-----------------------------------------(3)------------------------------------------
//
memset(peak,0,BEAMNUM*sizeof(int));
memset(valley,0,BEAMNUM*sizeof(int));
findpeak(c,peak,BEAMNUM);
findvalley(c,valley,BEAMNUM);
bool targetexist = false;
memset(pretracedtarget,0,sizeof(float)*BEAMNUM);
memset(pretracedtargetIdx,0,sizeof(int)*BEAMNUM);
pretracedtargetNum = 0;
for(int kk=0;kk<BEAMNUM;kk++)
{
if(peak[kk] == 1)
{
//
int jj=0;
for(jj=0;jj<MAXTRACETARNUM;jj++)
{
//
if(abs(tracedtargetbeam[jj][0]-kk)<6 && tracedtargetbeam[jj][0]>0)
{
break;
}
}
if(jj==MAXTRACETARNUM) //
{
targetexist = peakdetection(kk,c,valley,2.0);
}
else //
{
targetexist = peakdetection(kk,c,valley,1.2);
}
if(targetexist)
{
pretracedtarget[pretracedtargetNum] = c[kk];
pretracedtargetIdx[pretracedtargetNum] = kk;
pretracedtargetNum++;
}
}
}
rbub(pretracedtarget,pretracedtargetIdx,BEAMNUM);
if(FrameNum == 115)
{
FrameNum = FrameNum;
}
for(int kk=0;kk<pretracedtargetNum;kk++)
{
int jj=0;
for(jj=0;jj<MAXTRACETARNUM;jj++)
{
//
if(abs(tracedtargetbeam[jj][0]-pretracedtargetIdx[kk])<6 && tracedtargetbeam[jj][0]>0)
{
tracedtargetbeam[jj][0] = pretracedtargetIdx[kk];
tracedtargetbeam[jj][1] = FrameNum;
break;
}
}
if(jj==MAXTRACETARNUM) //
{
int ii = 0;
for(ii=0;ii<MAXTRACETARNUM;ii++)
{
//
if(tracedtargetbeam[ii][0] < 0)
{
break;
}
}
if(ii < MAXTRACETARNUM) //
{
tracedtargetbeam[ii][0] = pretracedtargetIdx[kk];
tracedtargetbeam[ii][1] = FrameNum;
}
}
}
//
for(int jj=0;jj<MAXTRACETARNUM;jj++)
{
if(tracedtargetbeam[jj][0] >0 && FrameNum - tracedtargetbeam[jj][1] >= 5)
{
tracedtargetbeam[jj][0] = -1;
tracedtargetbeam[jj][1] = -1;
tracedtargetangle[jj] = -1.0f;
}
}
//-----------------------------------------(3)-------------------------------------
//-----------------------------------------(4) ------------------------------
for(int jj = 0;jj<MAXTRACETARNUM;jj++)
{
if(tracedtargetbeam[jj][0] >0) //
{
//
i0 = tracedtargetbeam[jj][0]-1;
i1 = tracedtargetbeam[jj][0];
i2 = tracedtargetbeam[jj][0]+1;
r0 = c[i0];
r1 = c[i1];
r2 = c[i2];
delta_index = (r2-r0)/(4*r1-2*r0-2*r2);
tracedtargetangle[jj] = (i1+delta_index)*180.0/BEAMNUM;
hipLaunchKernelGGL(( DelayFilterGen), dim3(ARRAYNUM),dim3(2*M+1), 0, 0, dev_delayFilter,M,tracedtargetangle[jj],dev_tau,dev_dI);
//DelayFilterGen<<<ARRAYNUM,2*M+1>>>(dev_delayFilter,M,60.292690,dev_tau,dev_dI);
hipMemcpy(delayfiltercoff,dev_delayFilter,sizeof(hipfftReal)*ARRAYNUM*(2*M+1),hipMemcpyDeviceToHost);
hipMemcpy(delaytau,dev_tau,sizeof(hipfftReal)*ARRAYNUM,hipMemcpyDeviceToHost);
hipMemcpy(delaydI,dev_dI,sizeof(int)*ARRAYNUM,hipMemcpyDeviceToHost);
for(int kk = 0;kk<ARRAYNUM;kk++)
{
if(delaydI[kk] >= 0)
{
hipMemcpy(dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE)+delaydI[kk],dev_chanbuff+kk*(FILTER_FRAME/DOWNSAMPLE),sizeof(hipfftReal)*((FILTER_FRAME/DOWNSAMPLE)-delaydI[kk]),hipMemcpyDeviceToDevice);
}
else
{
hipMemcpy(dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE),dev_chanbuff+kk*(FILTER_FRAME/DOWNSAMPLE)-delaydI[kk],sizeof(hipfftReal)*((FILTER_FRAME/DOWNSAMPLE)+delaydI[kk]),hipMemcpyDeviceToDevice);
}
//hipMemcpy(sourcedata,dev_chanbuff+kk*(FILTER_FRAME/DOWNSAMPLE),(FILTER_FRAME/DOWNSAMPLE)*sizeof(float),hipMemcpyDeviceToHost);
//hipMemcpy(shiftdata,dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE),(FILTER_FRAME/DOWNSAMPLE)*sizeof(float),hipMemcpyDeviceToHost);
if(fabs(delaytau[kk]) > 0.0001)
{
hipLaunchKernelGGL(( FineDelayFilter), dim3((FILTER_FRAME/DOWNSAMPLE+2*M)),dim3(2*M+1), 0, 0, (hipfftReal *)&dev_delayfilterbuf[kk*FILTER_FRAME/DOWNSAMPLE],(hipfftReal *)&dev_delayfilterout[kk*(FILTER_FRAME/DOWNSAMPLE+2*M)],(hipfftReal *)&dev_delayFilter[kk*(2*M+1)],M);
}
else
{
hipMemcpy(dev_delayfilterout+kk*(FILTER_FRAME/DOWNSAMPLE+2*M)+M,dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE),sizeof(hipfftReal)*(FILTER_FRAME/DOWNSAMPLE),hipMemcpyDeviceToDevice);
}
hipMemcpy(dev_delaychandata+kk*(FILTER_FRAME/DOWNSAMPLE/2),dev_delayfilterout+kk*(FILTER_FRAME/DOWNSAMPLE+2*M)+M+FILTER_FRAME/DOWNSAMPLE/4,sizeof(hipfftReal)*FILTER_FRAME/DOWNSAMPLE/2,hipMemcpyDeviceToDevice);
}
hipLaunchKernelGGL(( MatrixSumRow), dim3(FILTER_FRAME/DOWNSAMPLE/2),dim3(1), 0, 0, dev_delaychandata,dev_beamdata+jj*FILTER_FRAME/DOWNSAMPLE/2,ARRAYNUM,FILTER_FRAME/DOWNSAMPLE/2);
hipMemcpy(beamdata,dev_beamdata+jj*FILTER_FRAME/DOWNSAMPLE/2,FILTER_FRAME/DOWNSAMPLE/2*sizeof(float),hipMemcpyDeviceToHost);
//fwrite(beamdata,sizeof(float),FILTER_FRAME/DOWNSAMPLE/2,fpbeam);
printf("success 1!\n");
//
hipMemcpy(dev_tracebeam+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)+(FILTER_FRAME/DOWNSAMPLE/2),(FILTER_FRAME/DOWNSAMPLE/2)*(PSD_LEN-1)*sizeof(hipfftReal),hipMemcpyDeviceToDevice);
hipMemcpy(dev_tracebeam+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_beamdata+jj*FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(hipfftReal),hipMemcpyDeviceToDevice);
//
hipMemcpy(dev_tracebeam_psd+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM,dev_tracebeam_psd+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM+PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(hipfftReal),hipMemcpyDeviceToDevice);
hipfftExecR2C(PSDplan,(hipfftReal *)&dev_tracebeam[jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)],(hipfftComplex *)&dev_tracebeam_spec[jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)]);
hipLaunchKernelGGL(( Psd), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_tracebeam_spec+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam_psd+jj*PSD_LEN*PSD_AVG_NUM*(FILTER_FRAME/DOWNSAMPLE/2)+(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
//
hipLaunchKernelGGL(( PsdAverage), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_tracebeam_psd+jj*PSD_LEN*PSD_AVG_NUM*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam_psd_avg+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
hipMemcpy(trace_beam_psd,dev_tracebeam_psd_avg+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),hipMemcpyDeviceToHost);
//fwrite(trace_beam_psd,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
//
MySmooth(trace_beam_psd+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 3, SMOOTH_N, 5, trace_beam_psd_smooth+idx1);
MySmooth(trace_beam_psd_smooth+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 2, SMOOTH_N, 5, trace_beam_psd_smooth+idx1);
hipMemcpy(dev_tracebeam_psd_S,trace_beam_psd_smooth,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),hipMemcpyHostToDevice);
//
hipLaunchKernelGGL(( PsdSub), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_tracebeam_psd_avg+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam_psd_S,dev_tracebeam_psd_E,idx1,idx2);
hipMemcpy(trace_beam_psd_smooth,dev_tracebeam_psd_E,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),hipMemcpyDeviceToHost);
//fwrite(trace_beam_psd_smooth,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
//
fPsdEVar=0.0;
for (int ii=idx1;ii<=idx2;ii++)
{
fPsdEVar+=trace_beam_psd_smooth[ii]*trace_beam_psd_smooth[ii];
}
fPsdEVar/=(float)(idx2-idx1+1);
fPsdEVar=sqrtf(fPsdEVar);
printf("success 2!\n");
//
for(int ii =0;ii<DemFreqBandNum;ii++)
{
hipMemcpy(dev_tracebeam_demonspec_band,dev_tracebeam_spec+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(hipfftComplex),hipMemcpyDeviceToDevice);
//
hipLaunchKernelGGL(( FrequencyDomainFilter), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_tracebeam_demonspec_band,fDf,DemStartFreq[ii],DemEndFreq[ii]);
hipfftExecC2R(DEMONBandplan,dev_tracebeam_demonspec_band,dev_tracebeam_demon_band_data);
hipLaunchKernelGGL(( SignalSqr), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_tracebeam_demon_band_data);
hipfftExecR2C(DEMONplan,dev_tracebeam_demon_band_data,dev_tracebeam_demonspec);
hipLaunchKernelGGL(( DemonAdd), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_tracebeam_demonspec,dev_tracebeam_demon+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
}
hipMemcpy(trace_beam_demon,dev_tracebeam_demon+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),DEM_RST_LEN*sizeof(float),hipMemcpyDeviceToHost);
//
for(int ii=0;ii<6;ii++)
{
trace_beam_demon[ii] = trace_beam_demon[6];
}
//fwrite(trace_beam_demon,sizeof(float),DEM_RST_LEN,fpbeam);
//
MySmooth(trace_beam_demon, DEM_RST_LEN, &fSmoothA[0][0], SMOOTH_N, 3, SMOOTH_N, 5, trace_beam_demon_smooth);
MySmooth(trace_beam_demon_smooth, DEM_RST_LEN, &fSmoothA[0][0], SMOOTH_N, 2, SMOOTH_N, 5, trace_beam_demon_smooth);
//fwrite(trace_beam_demon_smooth,sizeof(float),DEM_RST_LEN,fpbeam);
hipMemcpy(dev_trace_beam_demon_cut,trace_beam_demon,DEM_RST_LEN*sizeof(hipfftReal),hipMemcpyHostToDevice);
hipMemcpy(dev_tracebeam_demon_S,trace_beam_demon_smooth,DEM_RST_LEN*sizeof(hipfftReal),hipMemcpyHostToDevice);
//fwrite(trace_beam_demon_smooth,sizeof(float),DEM_RST_LEN,fpbeam)
hipLaunchKernelGGL(( DemonSub), dim3(DEM_RST_LEN),dim3(1), 0, 0, dev_trace_beam_demon_cut,dev_tracebeam_demon_S,dev_tracebeam_demon_E);
hipMemcpy(trace_beam_demon_smooth,dev_tracebeam_demon_E,DEM_RST_LEN*sizeof(hipfftReal),hipMemcpyDeviceToHost);
//fwrite(trace_beam_demon_smooth,sizeof(float),DEM_RST_LEN,fpbeam);
fDemonEVar=0.0;
for (int ii=0;ii<DEM_RST_LEN;ii++)
{
fDemonEVar+=trace_beam_demon_smooth[ii]*trace_beam_demon_smooth[ii];
}
fDemonEVar/=(float)(DEM_RST_LEN);
fDemonEVar=sqrtf(fDemonEVar);
printf("success 3!\n");
//
int ll = 0;
if(FrameNum >= 8)
{
nPlineNum = 0;
memset(fPlineInfo,0,MAXTRACETARNUM*LINE_NUM*4*sizeof(float));
for(int ii=idx1;ii<=idx2;ii++)
{
if(trace_beam_psd_smooth[ii]>4.0*fPsdEVar && trace_beam_psd_smooth[ii]>trace_beam_psd_smooth[ii-1] && trace_beam_psd_smooth[ii]>trace_beam_psd_smooth[ii+1] )
{
if(nPlineNum<LINE_NUM)
{
//
for(ll = 0;ll<nPlineNum;ll++)
{
if(fabs(fPlineInfo[jj][ll][1]-(float)ii*fDf)<1.0)
{
break;
}
}
if(ll == nPlineNum)
{
fPlineInfo[jj][nPlineNum][0] = trace_beam_psd_smooth[ii]; //
fPlineInfo[jj][nPlineNum][1] = (float)ii*fDf; //
fPlineInfo[jj][nPlineNum][2] = trace_beam_psd[ii];
fPlineInfo[jj][nPlineNum][3] = tracedtargetangle[jj];
if(fPlineInfo[jj][nPlineNum][3] > 180.0)
{
fPlineInfo[jj][nPlineNum][3] -= 360.0;
}
else if(fPlineInfo[jj][nPlineNum][3] < -180.0)
{
fPlineInfo[jj][nPlineNum][3] += 360.0;
}
nPlineNum++;
}
else if(trace_beam_psd_smooth[ii] > fPlineInfo[jj][ll][0])
{
fPlineInfo[jj][ll][0] = trace_beam_psd_smooth[ii];
fPlineInfo[jj][ll][1] = (float)ii*fDf;
fPlineInfo[jj][ll][2] = trace_beam_psd[ii];
fPlineInfo[jj][ll][3] = tracedtargetangle[jj];;
if(fPlineInfo[jj][ll][3] > 180.0)
{
fPlineInfo[jj][ll][3] -= 360.0;
}
else if(fPlineInfo[jj][ll][3] < -180.0)
{
fPlineInfo[jj][ll][3] += 360.0;
}
}
}
}
}
nDlineNum = 0;
memset(fDlineInfo,0,MAXTRACETARNUM*LINE_NUM*2*sizeof(float));
for(int ii = 4;ii<DEM_RST_LEN-1;ii++)
{
if(trace_beam_demon_smooth[ii]>6.0*fDemonEVar && trace_beam_demon_smooth[ii]>trace_beam_demon_smooth[ii-1] && trace_beam_demon_smooth[ii]>trace_beam_demon_smooth[ii+1])
{
if(nDlineNum<LINE_NUM)
{
fDlineInfo[jj][nDlineNum][0]=trace_beam_demon_smooth[jj];
fDlineInfo[jj][nDlineNum][1]=ii*fDf;
nDlineNum++;
}
}
}
//for(int ii = 0;ii<nDlineNum;ii++)
//{
// printf("%d:%.3f\n",ii+1,fDlineInfo[jj][ii][1]);
//}
}
}
}
printf("success 4!\n");
//-----------------------------------------(4) ------------------------------------------
//-----------------------------------------(5) ----------------------------------------------------
hipMemcpy(dev_vector_p_buf,dev_vector_p_buf+(FILTER_FRAME/DOWNSAMPLE/2),(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),hipMemcpyDeviceToDevice);
hipMemcpy(dev_vector_p_buf+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_chanbuff+VECTOR_P_IDX*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),hipMemcpyDeviceToDevice);
hipMemcpy(dev_vector_x_buf,dev_vector_x_buf+(FILTER_FRAME/DOWNSAMPLE/2),(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),hipMemcpyDeviceToDevice);
hipMemcpy(dev_vector_x_buf+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_chanbuff+VECTOR_X_IDX*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),hipMemcpyDeviceToDevice);
hipMemcpy(dev_vector_y_buf,dev_vector_y_buf+(FILTER_FRAME/DOWNSAMPLE/2),(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),hipMemcpyDeviceToDevice);
hipMemcpy(dev_vector_y_buf+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_chanbuff+VECTOR_Y_IDX*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),hipMemcpyDeviceToDevice);
hipfftExecR2C(PSDplan,(hipfftReal *)&dev_vector_p_buf[0],(hipfftComplex *)&dev_vector_p_spec[0]);
hipfftExecR2C(PSDplan,(hipfftReal *)&dev_vector_x_buf[0],(hipfftComplex *)&dev_vector_x_spec[0]);
hipfftExecR2C(PSDplan,(hipfftReal *)&dev_vector_y_buf[0],(hipfftComplex *)&dev_vector_y_spec[0]);
hipMemcpy(dev_vector_p_psd,dev_vector_p_psd+PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(hipfftReal),hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( Psd), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_vector_p_spec,dev_vector_p_psd+(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
hipLaunchKernelGGL(( PsdAverage), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_vector_p_psd,dev_vector_psd_avg);
hipMemcpy(vector_p_psd,dev_vector_psd_avg,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),hipMemcpyDeviceToHost);
fwrite(vector_p_psd,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
MySmooth(vector_p_psd+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 3, SMOOTH_N, 5, vector_p_psd_smooth+idx1);
MySmooth(vector_p_psd_smooth+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 2, SMOOTH_N, 5, vector_p_psd_smooth+idx1);
hipMemcpy(dev_vector_p_psd_S,vector_p_psd_smooth,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( PsdSub), dim3(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK),dim3(THREADNUMPERBLK), 0, 0, dev_vector_psd_avg,dev_vector_p_psd_S,dev_vector_p_psd_E,idx1,idx2);
hipMemcpy(vector_p_psd_smooth,dev_vector_p_psd_E,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),hipMemcpyDeviceToHost);
//fwrite(vector_p_psd_smooth,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
fVectorPsdEVar=0.0;
for (int ii=idx1;ii<=idx2;ii++)
{
fVectorPsdEVar+=vector_p_psd_smooth[ii]*vector_p_psd_smooth[ii];
}
fVectorPsdEVar/=(float)(idx2-idx1+1);
fVectorPsdEVar=sqrtf(fVectorPsdEVar);
printf("success 5!\n");
//
int ll = 0;
if(FrameNum >= 8)
{
nVectorPlineNum = 0;
memset(fVectorPlineInfo,0,LINE_NUM*4*sizeof(float));
for(int ii=idx1;ii<=idx2;ii++)
{
if(vector_p_psd_smooth[ii]>4.0*fVectorPsdEVar && vector_p_psd_smooth[ii]>vector_p_psd_smooth[ii-1] && vector_p_psd_smooth[ii]>vector_p_psd_smooth[ii+1] )
{
if(nVectorPlineNum<LINE_NUM)
{
//
for(ll = 0;ll<nVectorPlineNum;ll++)
{
if(fabs(fVectorPlineInfo[ll][1]-(float)ii*fDf)<1.0)
{
break;
}
}
if(ll == nVectorPlineNum)
{
fVectorPlineInfo[nVectorPlineNum][0] = vector_p_psd_smooth[ii]; //
fVectorPlineInfo[nVectorPlineNum][1] = (float)ii*fDf; //
fVectorPlineInfo[nVectorPlineNum][2] = vector_p_psd[ii];
//fVectorPlineInfo[nVectorPlineNum][3] = tracedtargetangle[jj];
hipfftComplex P_f,Vx_f,Vy_f;
hipMemcpy(&P_f,dev_vector_p_spec+ii,sizeof(hipfftComplex),hipMemcpyDeviceToHost);
hipMemcpy(&Vx_f,dev_vector_x_spec+ii,sizeof(hipfftComplex),hipMemcpyDeviceToHost);
hipMemcpy(&Vy_f,dev_vector_y_spec+ii,sizeof(hipfftComplex),hipMemcpyDeviceToHost);
if(FrameNum == 20)
{
FrameNum = FrameNum;
}
fVectorPlineInfo[nVectorPlineNum][3] = VectorThetSPF(P_f, Vx_f, Vy_f);
if(fVectorPlineInfo[nVectorPlineNum][3] > 180.0)
{
fVectorPlineInfo[nVectorPlineNum][3] -= 360.0;
}
else if(fVectorPlineInfo[nVectorPlineNum][3] < -180.0)
{
fVectorPlineInfo[nVectorPlineNum][3] += 360.0;
}
nVectorPlineNum++;
}
else if(vector_p_psd_smooth[ii] > fVectorPlineInfo[ll][0])
{
fVectorPlineInfo[ll][0] = vector_p_psd_smooth[ii];
fVectorPlineInfo[ll][1] = (float)ii*fDf;
fVectorPlineInfo[ll][2] = vector_p_psd[ii];
hipfftComplex P_f,Vx_f,Vy_f;
hipMemcpy(&P_f,dev_vector_p_spec+ii,sizeof(hipfftComplex),hipMemcpyDeviceToHost);
hipMemcpy(&Vx_f,dev_vector_x_spec+ii,sizeof(hipfftComplex),hipMemcpyDeviceToHost);
hipMemcpy(&Vy_f,dev_vector_y_spec+ii,sizeof(hipfftComplex),hipMemcpyDeviceToHost);
fVectorPlineInfo[nVectorPlineNum][3] = VectorThetSPF(P_f, Vx_f, Vy_f);
if(fVectorPlineInfo[ll][3] > 180.0)
{
fVectorPlineInfo[ll][3] -= 360.0;
}
else if(fVectorPlineInfo[ll][3] < -180.0)
{
fVectorPlineInfo[ll][3] += 360.0;
}
}
}
}
}
}
for(int ii = 0;ii<nVectorPlineNum;ii++)
{
printf("fVectorPlineInfo %d:%.3f\n",ii+1,fVectorPlineInfo[ii][3]);
}
//------------------------------------------------------------------------------------------------
hipEventRecord(stop1,NULL);
hipEventSynchronize(stop1);
hipEventElapsedTime(&msecTotal,start1,stop1);
printf("%d:%f;%d,%d;%d,%d;%d,%d\n",FrameNum,msecTotal,tracedtargetbeam[0][0],tracedtargetbeam[0][1],tracedtargetbeam[1][0],tracedtargetbeam[1][1],tracedtargetbeam[2][0],tracedtargetbeam[2][1]);
printf("\n");
fprintf(fplog,"%d:%f;%d,%d;%d,%d;%d,%d\n",FrameNum,msecTotal,tracedtargetbeam[0][0],tracedtargetbeam[0][1],tracedtargetbeam[1][0],tracedtargetbeam[1][1],tracedtargetbeam[2][0],tracedtargetbeam[2][1]);
fflush(fplog);
}
}
void *DataFormatting(void *lParam)
{
//int retval1 = -1;
//int retval2 = -1;
int BUF_FLAG = 0;
int temp = 0;
if(ChannDataBufA != NULL)
{
free(ChannDataBufA);
ChannDataBufA = NULL;
}
ChannDataBufA = (float *)malloc(FRAMELEN*CHANNUM*2*sizeof(float));
memset(ChannDataBufA,0,FRAMELEN*CHANNUM*2*sizeof(float));
if(ChannDataBufB != NULL)
{
free(ChannDataBufB);
ChannDataBufB = NULL;
}
ChannDataBufB = (float *)malloc(FRAMELEN*CHANNUM*2*sizeof(float));
memset(ChannDataBufB,0,FRAMELEN*CHANNUM*2*sizeof(float));
while (1)
{
//#if ONLINEMODE
// pthread_mutex_lock(&count_lock_BoardDataReady);
// while (count_BoardDataReady == 0)
// {
// pthread_cond_wait(&cond_BoardDataReady,&count_lock_BoardDataReady);
// }
// count_BoardDataReady = count_BoardDataReady -1;
// pthread_mutex_unlock(&count_lock_BoardDataReady);
//#endif
//#if FILEMODE
pthread_mutex_lock(&count_lock_Board1DataReady);
while (count_Board1DataReady == 0)
{
pthread_cond_wait(&cond_Board1DataReady,&count_lock_Board1DataReady);
}
count_Board1DataReady = count_Board1DataReady -1;
pthread_mutex_unlock(&count_lock_Board1DataReady);
pthread_mutex_lock(&count_lock_Board2DataReady);
while (count_Board2DataReady == 0)
{
pthread_cond_wait(&cond_Board2DataReady,&count_lock_Board2DataReady);
}
count_Board2DataReady = count_Board2DataReady -1;
pthread_mutex_unlock(&count_lock_Board2DataReady);
if(BUF_FLAG == 0)
{
for(int ii=0;ii<CHANNUM;ii++)
{
for(int jj=0;jj<FRAMELEN;jj++)
{
temp = DataBufA_B1[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufA[ii*FRAMELEN+jj] = temp*1.0/pow(2.0,23) * 2.5;
temp = DataBufA_B2[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufA[ii*FRAMELEN+jj+FRAMELEN*CHANNUM] = temp*1.0/pow(2.0,23) * 2.5;
}
}
BUF_FLAG = 1;
printf("DataFormatting Finished!\n");
pthread_mutex_lock(&count_lock_FrameDataReady);
pthread_cond_signal(&cond_FrameDataReady);
count_FrameDataReady = count_FrameDataReady+1;
pthread_mutex_unlock(&count_lock_FrameDataReady);
}
else
{
for(int ii=0;ii<CHANNUM;ii++)
{
for(int jj=0;jj<FRAMELEN;jj++)
{
temp = DataBufB_B1[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufB[ii*FRAMELEN+jj] = temp*1.0/pow(2.0,23) * 2.5;
temp = DataBufB_B2[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufB[ii*FRAMELEN+jj+FRAMELEN*CHANNUM] = temp*1.0/pow(2.0,23) * 2.5;
}
}
BUF_FLAG = 0;
printf("DataFormatting Finished!\n");
pthread_mutex_lock(&count_lock_FrameDataReady);
pthread_cond_signal(&cond_FrameDataReady);
count_FrameDataReady = count_FrameDataReady+1;
pthread_mutex_unlock(&count_lock_FrameDataReady);
}
//#endif
}
}
void *ReceiveNetwork(void *lParam)
{
char errBuf[PCAP_ERRBUF_SIZE], *device;
pcap_t *handle;
bpf_u_int32 mask;
bpf_u_int32 net;
struct bpf_program filter;
char filter_app[] = "udp dst port 0"; //setting the filter package
struct pcap_pkthdr packet;
const u_char *pktStr;
char packtype = 0;
short portnumber = 0;
char sourceid = 0;
char FramenumN1 = -1, FramenumN2 = -1;
char LastFramenumN1 = 0, LastFramenumN2 = 0;
int readbufb1[TL*CHANNUM+1],readbufb2[TL*CHANNUM+1];
int BUF_FLAG_B1=0,BUF_FLAG_B2;
int *pBuf_B1 = NULL,*pBuf_B2 = NULL;
int *pCounter_B1 = NULL,*pCounter_B2 = NULL;
int CounterA_B1 = FRAMELEN,CounterB_B1 = FRAMELEN;
int CounterA_B2 = FRAMELEN,CounterB_B2 = FRAMELEN;
int temp = 0;
int FrameNum1 = 0,FrameNum2 = 0, FrameNum = 0;
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B1 != NULL)
{
free(DataBufB_B1);
DataBufB_B1 = NULL;
}
DataBufB_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B1,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufA_B2 != NULL)
{
free(DataBufA_B2);
DataBufA_B2 = NULL;
}
DataBufA_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B2,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B2 != NULL)
{
free(DataBufB_B2);
DataBufB_B2 = NULL;
}
DataBufB_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B2,0,FRAMELEN*CHANNUM*sizeof(int));
//get the name of the first device suitable for capture
device = pcap_lookupdev(errBuf);
if ( device )
{
printf("success: device: %s\n",device);
}
else
{
printf("error: %s\n",errBuf);
return 0;
}
//open network device for packet capture
handle = pcap_open_live(device,BUFSIZ,1,0,errBuf);
//look up into from the capture device
pcap_lookupnet(device,&net,&mask,errBuf);
printf("net=%x mask=%x\n",net,mask);
//compiles the filter expression into a bpf filter rogram
printf("compiles the filter expression into a bpf filter program\r\n");
pcap_compile(handle,&filter,filter_app,0,net);
//load the filter program into the packet capture device
printf("load the filter program into the packet capture device\r\n");
pcap_setfilter(handle,&filter);
while (1)
{
//printf("before Received data!\n");
pktStr = pcap_next(handle,&packet);
//printf("Received data!\n");
if(pktStr != NULL)
{
//printf("Received data!\n");
//
memcpy((char *)&portnumber,pktStr+37,sizeof(char));
memcpy((char *)&portnumber+1,pktStr+36,sizeof(char));
if (portnumber == DEST_PORT)
{
//
memcpy(&packtype,pktStr+45,sizeof(char));
memcpy(&sourceid,pktStr+43,sizeof(char));
if (packtype == 0x10) // if packet is ADC packet
{
if(sourceid == 1)
{
FrameNum1++;
memcpy(readbufb1,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
FramenumN1 = *(pktStr+44);
FramenumN1 = FramenumN1 >> 2;
if (FrameNum1 == 1)
{
LastFramenumN1 = FramenumN1;
}
else
{
if (FramenumN1 != LastFramenumN1+1 && FramenumN1+63 != LastFramenumN1)
{
printf("Lost Board1 data package!\n");
}
LastFramenumN1 = FramenumN1;
}
}
if(sourceid == 2)
{
FrameNum2++;
memcpy(readbufb2,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
FramenumN2 = *(pktStr+44);
FramenumN2 = FramenumN2 >> 2;
if (FrameNum2 == 1)
{
LastFramenumN2 = FramenumN2;
}
else
{
if (FramenumN2 != LastFramenumN2+1 && FramenumN2+63 != LastFramenumN2)
{
printf("Lost Board2 data package!\n");
}
LastFramenumN2 = FramenumN2;
}
}
if (FramenumN1 == FramenumN2 && FramenumN2 >= 0) //receive both board data
{
//-----------------board1 data accumulate---------------------------
if(0 == BUF_FLAG_B1)
{
pBuf_B1 = DataBufA_B1;
pCounter_B1 = &CounterA_B1;
}
else
{
pBuf_B1 = DataBufB_B1;
pCounter_B1 = &CounterB_B1;
}
if(*(pCounter_B1)>=TL) //
{
memcpy(pBuf_B1+FRAMELEN*CHANNUM-(*(pCounter_B1))*CHANNUM,readbufb1+1,TL*CHANNUM*sizeof(int));
*(pCounter_B1) = *(pCounter_B1)-TL;
}
else
{
temp = TL - *(pCounter_B1);
memcpy(pBuf_B1+FRAMELEN*CHANNUM-(*(pCounter_B1))*CHANNUM,readbufb1+1,(*(pCounter_B1))*CHANNUM*sizeof(int));
*(pCounter_B1)= FRAMELEN;
if(0 == BUF_FLAG_B1)
{
memcpy(DataBufB_B1+FRAMELEN*CHANNUM-CounterB_B1*CHANNUM,readbufb1+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterB_B1 = CounterB_B1 - temp;
BUF_FLAG_B1 = 1;
}
else //
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA_B1*CHANNUM,readbufb1+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterA_B1 = CounterA_B1 - temp;
BUF_FLAG_B1 = 0;
}
pthread_mutex_lock(&count_lock_Board1DataReady);
pthread_cond_signal(&cond_Board1DataReady);
count_Board1DataReady = count_Board1DataReady+1;
pthread_mutex_unlock(&count_lock_Board1DataReady);
// printf("ReceiveNetworkData A Finished!\n");
}
//-----------------board2 data accumulate---------------------------
if(0 == BUF_FLAG_B2)
{
pBuf_B2 = DataBufA_B2;
pCounter_B2 = &CounterA_B2;
}
else
{
pBuf_B2 = DataBufB_B2;
pCounter_B2 = &CounterB_B2;
}
if(*(pCounter_B2)>=TL) //
{
memcpy(pBuf_B2+FRAMELEN*CHANNUM-(*(pCounter_B2))*CHANNUM,readbufb2+1,TL*CHANNUM*sizeof(int));
*(pCounter_B2) = *(pCounter_B2)-TL;
}
else
{
temp = TL - *(pCounter_B2);
memcpy(pBuf_B2+FRAMELEN*CHANNUM-(*(pCounter_B2))*CHANNUM,readbufb2+1,(*(pCounter_B2))*CHANNUM*sizeof(int));
*(pCounter_B2)= FRAMELEN;
if(0 == BUF_FLAG_B2)
{
memcpy(DataBufB_B2+FRAMELEN*CHANNUM-CounterB_B2*CHANNUM,readbufb2+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterB_B2 = CounterB_B2 - temp;
BUF_FLAG_B2 = 1;
}
else
{
memcpy(DataBufA_B2+FRAMELEN*CHANNUM-CounterA_B2*CHANNUM,readbufb2+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterA_B2 = CounterA_B2 - temp;
BUF_FLAG_B2 = 0;
}
pthread_mutex_lock(&count_lock_Board2DataReady);
pthread_cond_signal(&cond_Board2DataReady);
count_Board2DataReady = count_Board2DataReady+1;
pthread_mutex_unlock(&count_lock_Board2DataReady);
// printf("ReceiveNetworkData B Finished!\n");
}
}
}
}
}
//printf("ReceiveNetworkData Finished!\n");
//pthread_mutex_lock(&count_lock_BoardDataReady);
//pthread_cond_signal(&cond_BoardDataReady);
//count_BoardDataReady = count_BoardDataReady+1;
//pthread_mutex_unlock(&count_lock_BoardDataReady);
}
}
void *ReadBoard1Data(void *lParam)
{
int fileindex = 0;
std::string FilePath = "/home/ubuntu/Desktop/GPU/uwrn/";
std::string FileNamePre = "Board1_ADC_";
std::string FileIdx = std::to_string(fileindex);
std::string FileNameSur = ".bin";
std::string FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
int DataFileNum = 18;
FILE *fp = NULL;
//int readbytes = 0;
int readbuf[TL*CHANNUM+1];
int BUF_FLAG=0;
int *pBuf = NULL;
int *pCounter = NULL;
int CounterA = FRAMELEN,CounterB = FRAMELEN;
int temp = 0;
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B1 != NULL)
{
free(DataBufB_B1);
DataBufB_B1 = NULL;
}
DataBufB_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B1,0,FRAMELEN*CHANNUM*sizeof(int));
//
for(int ii=0;ii<DataFileNum;ii++)
{
fileindex = ii;
FileIdx = std::to_string(fileindex);
FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
if(fp != NULL)
{
fclose(fp);
fp = NULL;
}
fp = fopen(FileName.c_str(),"rb");
for(int jj=0;jj<8e4;jj++)
{
usleep(TL*1e6 / FS);
fread(readbuf,sizeof(int),TL*CHANNUM+1,fp);
if(0 == BUF_FLAG)
{
pBuf = DataBufA_B1;
pCounter = &CounterA;
}
else
{
pBuf = DataBufB_B1;
pCounter = &CounterB;
}
if(*(pCounter)>=TL) //
{
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,TL*CHANNUM*sizeof(int));
*(pCounter) = *(pCounter)-TL;
}
else
{
temp = TL - *(pCounter);
//
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,(*(pCounter))*CHANNUM*sizeof(int));
//
*(pCounter)= FRAMELEN;
//
if(0 == BUF_FLAG) //
{
memcpy(DataBufB_B1+FRAMELEN*CHANNUM-CounterB*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterB = CounterB - temp;
//
BUF_FLAG = 1;
}
else //
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterA = CounterA - temp;
//
BUF_FLAG = 0;
}
//
//SetEvent(g_hReadBoard1ThreadReadyEnvent);
pthread_mutex_lock(&count_lock_Board1DataReady);
pthread_cond_signal(&cond_Board1DataReady);
count_Board1DataReady = count_Board1DataReady+1;
pthread_mutex_unlock(&count_lock_Board1DataReady);
}
}
}
return NULL;
}
void *ReadBoard2Data(void *lParam)
{
int fileindex = 0;
std::string FilePath = "/home/ubuntu/Desktop/GPU/uwrn/";
std::string FileNamePre = "Board2_ADC_";
std::string FileIdx = std::to_string(fileindex);
std::string FileNameSur = ".bin";
std::string FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
int DataFileNum = 18;
FILE *fp = NULL;
//int readbytes = 0;
int readbuf[TL*CHANNUM+1];
int BUF_FLAG=0;
int *pBuf = NULL;
int *pCounter = NULL;
int CounterA = FRAMELEN,CounterB = FRAMELEN;
int temp = 0;
if(DataBufA_B2 != NULL)
{
free(DataBufA_B2);
DataBufA_B2 = NULL;
}
DataBufA_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B2,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B2 != NULL)
{
free(DataBufB_B2);
DataBufB_B2 = NULL;
}
DataBufB_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B2,0,FRAMELEN*CHANNUM*sizeof(int));
//
for(int ii=0;ii<DataFileNum;ii++)
{
fileindex = ii;
FileIdx = std::to_string(fileindex);
FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
if(fp != NULL)
{
fclose(fp);
fp = NULL;
}
fp = fopen(FileName.c_str(),"rb");
for(int jj=0;jj<8e4;jj++)
{
usleep(TL*1e6 / FS);
fread(readbuf,sizeof(int),TL*CHANNUM+1,fp);
if(0 == BUF_FLAG)
{
pBuf = DataBufA_B2;
pCounter = &CounterA;
}
else
{
pBuf = DataBufB_B2;
pCounter = &CounterB;
}
if(*(pCounter)>=TL) //
{
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,TL*CHANNUM*sizeof(int));
*(pCounter) = *(pCounter)-TL;
}
else
{
temp = TL - *(pCounter);
//
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,(*(pCounter))*CHANNUM*sizeof(int));
//
*(pCounter)= FRAMELEN;
//
if(0 == BUF_FLAG) //
{
memcpy(DataBufB_B2+FRAMELEN*CHANNUM-CounterB*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterB = CounterB - temp;
//
BUF_FLAG = 1;
}
else //
{
memcpy(DataBufA_B2+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterA = CounterA - temp;
//
BUF_FLAG = 0;
}
//
//SetEvent(g_hReadBoard2ThreadReadyEnvent);
pthread_mutex_lock(&count_lock_Board2DataReady);
pthread_cond_signal(&cond_Board2DataReady);
count_Board2DataReady = count_Board2DataReady+1;
pthread_mutex_unlock(&count_lock_Board2DataReady);
}
}
}
return NULL;
}
| e8b1402eb3c2b816746440b52cef68f1f1b9d4db.cu | // /home/ubuntu/Desktop/GPU/main.c
// nvcc main.cu -o test -lstdc++ -lpthread -lcufft -lpcap -std=c++11 -lpcap
#include <pcap.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <pthread.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
#include <netinet/ip_icmp.h>
#include <net/ethernet.h>
#include <netinet/if_ether.h>
#include <netinet/ether.h>
#include <arpa/inet.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <memory.h>
#include <malloc.h>
#include <iostream>
//--------------CUDA----------------
#include <cuda_runtime.h>
//#include <device_launch_parameters.h>
//#include <device_functions.h>
#include <cufft.h>
//#include <cufftXt.h>
//-------------------------------------
// ----------------------------------------
#define NFFT 16384 //
#define PI 3.1415926f
#define UWC 1500.0f //
#define FS 100000 //
#define threadsPerBlock 512
#define d 0.07f
#define FL 100.0f
#define FH 4000.0f
#define TL 17
#define CHANNUM 16
#define FRAMELEN 65536
#define DOWNSAMPLE 4
#define FIRORDER 2048
#define FILTER_FRAME (2*FRAMELEN)
#define BEAMNUM 91
#define THREADNUMPERBLK 256
#define ARRAYNUM 15
#define STARTBEAM 15
#define ENDBEAM 75
#define MAXTRACETARNUM 3
#define M 3
#define ONLINEMODE 0
#define FILEMODE 1
#define DEST_PORT 0
#define PSD_LEN 20
#define PSD_AVG_NUM 8
#define EPS 1e-8
#define SMOOTH_N 100
#define LINE_NUM 16
#define DEM_RST_LEN 1024
#define VECTOR_P_IDX 22
#define VECTOR_X_IDX 16
#define VECTOR_Y_IDX 18
// -----------------------------------------------------
void *ReadBoard1Data(void *lParam);
void *ReadBoard2Data(void *lParam);
void *DataFormatting(void *lParam);
void *ReceiveNetwork(void *lParam);
void *ArraySignalProcessing(void *lParam);
//-----------------------------------------------------
pthread_mutex_t count_lock_BoardDataReady;
pthread_mutex_t count_lock_Board1DataReady;
pthread_mutex_t count_lock_Board2DataReady;
pthread_mutex_t count_lock_FrameDataReady;
pthread_cond_t cond_BoardDataReady;
pthread_cond_t cond_Board1DataReady;
pthread_cond_t cond_Board2DataReady;
pthread_cond_t cond_FrameDataReady;
unsigned int count_BoardDataReady;
unsigned int count_Board1DataReady;
unsigned int count_Board2DataReady;
unsigned int count_FrameDataReady;
//-----------------------------------------------------
int *DataBufA_B1 = NULL;
int *DataBufB_B1 = NULL;
int *DataBufA_B2 = NULL;
int *DataBufB_B2 = NULL;
float *ChannDataBufA = NULL;
float *ChannDataBufB = NULL;
float *DownSamplingDataBufA = NULL;
float *DownSamplingDataBufB = NULL;
//---------------------------------------------------
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h);
float window(int type,int n,int i,float beta);
float kaiser(int i,int n,float beta);
float bessel0(float x);
void findpeak(float *data, int *p,int dn);
void findvalley(float *data, int *p,int dn);
bool peakdetection(int beamidx,float *be,int *valley,float threshold);
void rbub(float *p,int *idx,int n);
void MySmooth(float *datain,int nDataLen,float *paraA,int nParaLen,int nOrder,int nWindow,int nStep,float *dataout);
void CalSmoothPara(float *para);
//-----------------------------------------------------
//功率谱分析
float fSmoothA[4][SMOOTH_N]={0.0}; //滑动窗正交多项式拟合时所用规范正交向量
float fPlineInfo[MAXTRACETARNUM][LINE_NUM][4]={0};//功率谱信息
float fDlineInfo[MAXTRACETARNUM][LINE_NUM][2]={0};//解调谱信息
int nPlineNum = 0;
int nDlineNum = 0;
int nVectorPlineNum = 0;
float fVectorPlineInfo[LINE_NUM][4]={0}; //功率谱信息
//解调谱分析
int DemFreqBandNum=0; //解调谱分析分频带数,默认最多分10个频带
float DemStartFreq[10]={0.0}; //解调谱分析分频带起始频率
float DemEndFreq[10]={0.0}; //解调谱分析分频带结束频率
// -----------------------------------------------------------
int main()
{
pthread_t t_ReceiveNetworkData;
pthread_t t_DataFormatting;
pthread_t t_ArraySignalProcessing;
pthread_t t_ReadBoard1Data;
pthread_t t_ReadBoard2Data;
cond_BoardDataReady = PTHREAD_COND_INITIALIZER;
cond_Board1DataReady = PTHREAD_COND_INITIALIZER;
cond_Board2DataReady = PTHREAD_COND_INITIALIZER;
cond_FrameDataReady = PTHREAD_COND_INITIALIZER;
count_lock_BoardDataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_Board1DataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_Board2DataReady = PTHREAD_MUTEX_INITIALIZER;
count_lock_FrameDataReady = PTHREAD_MUTEX_INITIALIZER;
pthread_create(&t_ArraySignalProcessing,NULL,ArraySignalProcessing,(void *)NULL);
pthread_create(&t_DataFormatting,NULL,DataFormatting,(void *)NULL);
#if ONLINEMODE
pthread_create(&t_ReceiveNetworkData,NULL,ReceiveNetwork,(void *)NULL);
#endif
#if FILEMODE
pthread_create(&t_ReadBoard1Data,NULL,ReadBoard1Data,(void *)NULL);
pthread_create(&t_ReadBoard2Data,NULL,ReadBoard2Data,(void *)NULL);
#endif
pthread_join(t_ArraySignalProcessing, NULL);
return 0;
}
int fir1(int n,int band,float fl,float fh,float fs,int wn, float *h)
{
int i,n2,mid;
float sum = 0;
float s,wc1,wc2,beta = 0,delay;
float fln = fl / fs;
float fhn = fh / fs;
beta = 6;
if((n%2)==0)
{
n2=n/2-1;
mid=1;
}
else
{
n2=n/2;
mid=0;
}
delay=n/2.0;
wc1=2.0*PI*fln;
if(band>=3) wc2=2.0*PI*fhn;
switch(band)
{
case 1://
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)/(PI*s))*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=wc1/PI;
for(i=0;i<=n;i++)
{
sum=sum+*(h+i);
}
for(i=0;i<=n;i++)
{
*(h+i)=*(h+i)/fabs(sum);
}
break;
}
case 2: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(PI*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=1.0-wc1/PI;
break;
}
case 3: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc2*s)-sin(wc1*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc2-wc1)/PI;
break;
}
case 4: //
{
for (i=0;i<=n2;i++)
{
s=i-delay;
*(h+i)=(sin(wc1*s)+sin(PI*s)-sin(wc2*s))/(PI*s);
*(h+i)=*(h+i)*window(wn,n+1,i,beta);
*(h+n-i)=*(h+i);
}
if(mid==1) *(h+n/2)=(wc1+PI-wc2)/PI;
break;
}
}
return 0;
}
float window(int type,int n,int i,float beta)
{
int k;
float w=1.0;
switch(type)
{
case 1: //
{
w=1.0;
break;
}
case 2: //
{
k=(n-2)/10;
if(i<=k) w=0.5*(1.0-cos(i*PI/(k+1)));
if(i>n-k-2) w=0.5*(1.0-cos((n-i-1)*PI/(k+1)));
break;
}
case 3: //
{
w=1.0-fabs(1.0-2*i/(n-1.0));
break;
}
case 4: //
{
w=0.5*(1.0-cos(2*i*PI/(n-1.0)));
break;
}
case 5: //
{
w=0.54-0.46*cos(2*i*PI/(n-1.0));
break;
}
case 6: //
{
w=0.42-0.5*cos(2*i*PI/(n-1.0))+0.08*cos(4*i*PI/(n-1.0));
break;
}
case 7: //
{
w=kaiser(i,n,beta);
break;
}
}
return(w);
}
float kaiser(int i,int n,float beta) //
{
float a,w,a2,b1,b2,beta1;
b1=bessel0(beta);
a=2.0*i/(float)(n-1)-1.0;
a2=a*a;
beta1=beta*sqrt(1.0-a2);
b2=bessel0(beta1);
w=b2/b1;
return(w);
}
float bessel0(float x) //
{
int i;
float dd,y,d2,sum = 0;
y=x/2.0;
dd=1.0;
for(i=1;i<=25;i++)
{
dd=dd*y/i;
d2=dd*dd;
sum=sum+d2;
if(d2<sum*(1.0e-8)) break;
}
return(sum);
}
__global__ void PhiShiftFactorGen(cufftComplex *XNSS)
{
int bid = 0,tid = 0;
float tt = 0.0f;
float angle=0.0f;
float det[ARRAYNUM];
float MovePoints[ARRAYNUM];
bid = blockIdx.x;
tid = threadIdx.x;
angle=float(tid*PI/(BEAMNUM-1));
for(int i=0;i<ARRAYNUM;i++)
{
det[i]=i*d*cos(angle)/UWC;
MovePoints[i]=det[i]*FS/DOWNSAMPLE;
tt=MovePoints[i]*2*PI*bid/NFFT;
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].x = cos(tt);
XNSS[tid*ARRAYNUM*NFFT/2+i*NFFT/2+bid].y = sin(tt);
}
}
void findpeak(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i+j)>=dn)
{
a1=*(data+i+j-dn);
}
else
{
a1=*(data+i+j);
}
if (a0>a1)
{
acc=acc+1;
}
}
a0=*(data+i);
//
for(j=1;j<11;j++)
{
if ((i-j)<0)
{
a1=*(data+i-j+dn);
}
else
{
a1=*(data+i-j);
}
if (a0>a1)
{
acc1=acc1+1;
}
}
if ((acc==10) && (acc1==10))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
void findvalley(float *data, int *p,int dn)
{
int acc=0,acc1=0;
int i,j;
float a0=0.0,a1=0.0;
for(i=0;i<dn;i++)
{
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i+j)>=dn)
{
break;
}
else
{
a1=*(data+i+j);
}
if (a0<a1)
{
acc=acc+1;
}
}
if(j<5) //
{
acc = 5;
}
a0=*(data+i);
//
for(j=1;j<6;j++)
{
if ((i-j)<0)
{
break;
}
else
{
a1=*(data+i-j);
}
if (a0<a1)
{
acc1=acc1+1;
}
}
if(j<5) //
{
acc1 = 5;
}
if ((acc==5) && (acc1==5))
{
*(p+i)=1;
}
acc=0;
acc1=0;
}
}
bool peakdetection(int beamidx,float *be,int *valley,float threshold)
{
int index = 0,ll=0;
float pvr1 = 1.0,pvr2 = 1.0;
if(beamidx >= STARTBEAM && beamidx <= ENDBEAM)
{
for(ll=beamidx+1;ll<BEAMNUM;ll++)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll<=BEAMNUM-1)
{
pvr1 = be[beamidx] / be[index];
}
for(ll=beamidx-1;ll>=0;ll--)
{
if(valley[ll] == 1)
{
index = ll;
break;
}
}
if(ll>=0)
{
pvr2 = be[beamidx] / be[index];
}
if(pvr1 >= threshold && pvr2 >= threshold)
{
return true;
}
else
{
return false;
}
}
else
{
return false;
}
}
void rbub(float *p,int *idx,int n)
{
int m,k,j,i,xx;
float dd;
k=0;
m=n-1;
while (k<m)
{
j=m-1; m=0;
for(i=k; i<=j; i++)
{
if(p[i]<p[i+1])
{
dd=p[i];
p[i]=p[i+1];
p[i+1]=dd;
xx = idx[i];
idx[i] = idx[i+1];
idx[i+1] = xx;
m=i;
}
}
j=k+1;
k=0;
for (i=m; i>=j; i--)
{
if(p[i-1]<p[i])
{
dd=p[i];
p[i]=p[i-1];
p[i-1]=d;
xx = idx[i];
idx[i] = idx[i-1];
idx[i-1] = xx;
k=i;
}
}
}
return;
}
void MySmooth(float *datain,int nDataLen,float *paraA,int nParaLen,int nOrder,int nWindow,int nStep,float *dataout)
{
int nFrameNum,ii,jj,nFrameCnt,idx;
float rr[4]={0};
float fsmooth_tmp[SMOOTH_N]={0};
float fsmooth_tmp2[SMOOTH_N]={0};
nFrameNum=(nDataLen-nWindow)/nStep+1;
for (nFrameCnt=0;nFrameCnt<nFrameNum;nFrameCnt++)
{
if(nFrameCnt==0)
{
memcpy(fsmooth_tmp,datain,nWindow*sizeof(float));
}
else
{
memcpy(&fsmooth_tmp[nWindow-nStep],&datain[nWindow+(nFrameCnt-1)*nStep],nStep*sizeof(float));
}
for (ii=0;ii<nOrder;ii++)
{
rr[ii]=0.0;
for (jj=0;jj<nWindow;jj++)
{
rr[ii]+=fsmooth_tmp[jj]*fSmoothA[ii][jj];
}
}
memset(fsmooth_tmp2,0,SMOOTH_N*sizeof(float));
for (ii=0;ii<nWindow;ii++)
{
for (jj=0;jj<nOrder;jj++)
{
fsmooth_tmp2[ii]+=rr[jj]*fSmoothA[jj][ii];
}
}
memcpy(&dataout[nFrameCnt*nStep],fsmooth_tmp2,nStep*sizeof(float));
memcpy(fsmooth_tmp,&fsmooth_tmp2[nStep],(nWindow-nStep)*sizeof(float));
}//for (nFrameCnt=0;nFrameCnt<nFrameNum-1;nFrameCnt++)
if ((nFrameNum*nStep+nWindow)-nDataLen<nStep)
{
idx=(nFrameNum*nStep+nWindow)-nDataLen;
memcpy(fsmooth_tmp,&fsmooth_tmp2[nStep-idx],(nWindow-nStep+idx)*sizeof(float));
memcpy(&fsmooth_tmp[nWindow-nStep+idx],&datain[nWindow+(nFrameNum-1)*nStep],(nStep-idx)*sizeof(float));
for (ii=0;ii<nOrder;ii++)
{
rr[ii]=0.0;
for (jj=0;jj<nWindow;jj++)
{
rr[ii]+=fsmooth_tmp[jj]*fSmoothA[ii][jj];
}
}
memset(fsmooth_tmp2,0,SMOOTH_N*sizeof(float));
for (ii=0;ii<nWindow;ii++)
{
for (jj=0;jj<nOrder;jj++)
{
fsmooth_tmp2[ii]+=rr[jj]*fSmoothA[jj][ii];
}
}
memcpy(&dataout[nFrameNum*nStep],&fsmooth_tmp2[idx],(nWindow-idx)*sizeof(float));
}
else//if ((nFrameNum*nStep+nWindow)-nDataLen<nStep)
{
memcpy(&dataout[nFrameNum*nStep],&fsmooth_tmp2[nStep],(nWindow-nStep)*sizeof(float));
}//if ((nFrameNum*nStep+nWindow)-nDataLen<nStep)
}
void CalSmoothPara(float *para)
{
float fpara[4][SMOOTH_N];
float ftmp,ftmp2,ftmp3;
int ii,jj;
ftmp=sqrtf((float)(SMOOTH_N));
ftmp=1.0/ftmp;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[0][ii]=ftmp;
}
ftmp2=0;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[1][ii]=(float)(ii-(SMOOTH_N-1)/2);
fpara[2][ii]=fpara[1][ii]*fpara[1][ii];
ftmp2+=fpara[2][ii];
fpara[3][ii]=fpara[2][ii]*fpara[1][ii];
}
ftmp=1.0/sqrtf(ftmp2);
ftmp3=0;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[1][ii]=fpara[1][ii]*ftmp;
ftmp3+=fpara[1][ii]*fpara[3][ii];
}
ftmp=0;
ftmp2=ftmp2/(float)(SMOOTH_N);
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[2][ii]=fpara[2][ii]-ftmp2;
ftmp+=fpara[2][ii]*fpara[2][ii];
}
ftmp=1.0/sqrtf(ftmp);
ftmp2=0;
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[2][ii]=fpara[2][ii]*ftmp;
fpara[3][ii]=fpara[3][ii]-ftmp3*fpara[1][ii];
ftmp2+=fpara[3][ii]*fpara[3][ii];
}
ftmp=1.0/sqrtf(ftmp2);
for (ii=0;ii<SMOOTH_N;ii++)
{
fpara[3][ii]=fpara[3][ii]*ftmp;
}
memcpy(para,&fpara[0][0],sizeof(float)*4*SMOOTH_N);
}
__global__ void FD_Beamform(cufftComplex *dev_fft,cufftReal *dev_energy,cufftComplex *PhiArray,int nfl,int nfh)
{
__shared__ float Mabs[THREADNUMPERBLK];
float tempX=0.0f;
float tempY=0.0f;
cuComplex XNSS;
cuComplex XFFTafterPinYi;
float ax = 0.0f,ay=0.0f,bx=0.0f,by=0.0f;
float energyEachBoShu = 0.0f;
int bid = 0,tid = 0;
int beamidx = 0, freqidx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
beamidx = bid % BEAMNUM;
freqidx = bid / BEAMNUM*THREADNUMPERBLK+tid;
if(tid==0)
{
memset(Mabs,0,sizeof(float)*THREADNUMPERBLK);
}
__syncthreads();
//
tempX=0.0;
tempY=0.0;
for(int i=0;i<ARRAYNUM;i++)
{
XNSS.x=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].x;
XNSS.y=PhiArray[beamidx*ARRAYNUM*(NFFT/2)+i*(NFFT/2)+freqidx].y;
ax=dev_fft[i*(NFFT/2+1)+freqidx].x;
ay=dev_fft[i*(NFFT/2+1)+freqidx].y;
bx=XNSS.x;
by=XNSS.y;
if (freqidx>= nfl && freqidx<=nfh)
{
XFFTafterPinYi.x=ax*bx-ay*by;
XFFTafterPinYi.y=ax*by+bx*ay;
}
else
{
XFFTafterPinYi.x=0;
XFFTafterPinYi.y=0;
}
tempX=tempX+ XFFTafterPinYi.x;
tempY=tempY+ XFFTafterPinYi.y;
}
Mabs[tid]=tempX*tempX+tempY*tempY;
//
__syncthreads();
//
if(tid==0)
{
energyEachBoShu=0.0f;
for(int k=0;k<THREADNUMPERBLK;k++)
{
energyEachBoShu=energyEachBoShu+Mabs[k];
}
dev_energy[bid]= energyEachBoShu;
}
}
__global__ void MatrixSumRow(cufftReal *dev_energy,cufftReal *sum_energy,int nrow,int ncol)
{
int bid = 0;
int row = 0,col = 0;
float sum = 0.0;
bid = blockIdx.x;
row = nrow;
col = ncol;
for(int ii = 0;ii<row;ii++)
{
sum = sum+dev_energy[ii*col+bid];
}
sum_energy[bid] = sum;
}
__global__ void DownSamplingFilter(cufftComplex *dev_fft_sig,cufftComplex *dev_fft_filter,cufftComplex *dev_fft_yk,int FFTN)
{
int bid = 0,tid = 0;
cuComplex Sigk;
cuComplex Hk;
int chanIdx = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (CHANNUM*2);
freqIdx = bid / (CHANNUM*2)*THREADNUMPERBLK+tid;
Sigk.x = dev_fft_sig[chanIdx*FFTN+freqIdx].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+freqIdx].y;
Hk.x = dev_fft_filter[freqIdx].x;
Hk.y = dev_fft_filter[freqIdx].y;
dev_fft_yk[chanIdx*FFTN+freqIdx].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+freqIdx].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
if( bid/(CHANNUM*2)>= 255 && tid == THREADNUMPERBLK-1)
{
Sigk.x = dev_fft_sig[chanIdx*FFTN+FFTN/2].x;
Sigk.y = dev_fft_sig[chanIdx*FFTN+FFTN/2].y;
Hk.x = dev_fft_filter[FFTN/2].x;
Hk.y = dev_fft_filter[FFTN/2].y;
dev_fft_yk[chanIdx*FFTN+FFTN/2].x = Sigk.x*Hk.x-Sigk.y*Hk.y;
dev_fft_yk[chanIdx*FFTN+FFTN/2].y = Sigk.x*Hk.y+Sigk.y*Hk.x;
}
}
__global__ void IFFTNormalize(cufftReal *dev_fft_yout,cufftReal *dev_databuff,int FFTN)
{
int bid = 0,tid = 0;
int chanIdx = 0;
int timeIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
chanIdx = bid % (CHANNUM*2);
timeIdx = bid / (CHANNUM*2)*THREADNUMPERBLK+tid+FFTN/4;
//if(bid < CHANNUM*2 && tid == 0)
//{
// memcpy(dev_databuff+chanIdx*FFTN/DOWNSAMPLE,dev_databuff+chanIdx*FFTN/DOWNSAMPLE+FFTN/DOWNSAMPLE/2,FFTN/DOWNSAMPLE/2*sizeof(float));
//}
if(timeIdx % DOWNSAMPLE == 0)
{
dev_databuff[chanIdx*FFTN/DOWNSAMPLE + FFTN/DOWNSAMPLE/2 + (timeIdx-FFTN/4)/DOWNSAMPLE] = dev_fft_yout[chanIdx*FFTN+timeIdx] / FFTN;
}
}
__global__ void DelayFilterGen(float *h,int m,float theta,float *tau,int *dI)
{
int bid = 0,tid = 0;
int k=0;
float dfs = 0.0;
int DI = 0;
__shared__ float sum;
bid = blockIdx.x;
tid = threadIdx.x;
if(tid == 0)
{
sum = 0.0;
dfs = bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE);
DI = int(bid*d*cos(theta/180.0*PI)/UWC*(FS/DOWNSAMPLE)+0.5);
tau[bid] =dfs-DI;
dI[bid] = DI;
//printf("bid=%d,m=%d,theta = %.3f,dfs = %.3f,DI = %d\n",bid,m,theta,dfs,DI);
}
//
__syncthreads();
k = tid-m;
h[bid*(2*m+1)+tid] = sin(k*1.0*PI-tau[bid]*PI+0.000001)/(k*1.0*PI-tau[bid]*PI+0.000001);
//
__syncthreads();
if(tid == 0)
{
for(int k=0;k<2*m+1;k++)
{
sum = sum + h[bid*(2*m+1)+k];
}
}
__syncthreads();
h[bid*(2*m+1)+tid] = h[bid*(2*m+1)+tid]/sum;
}
__global__ void FineDelayFilter(cufftReal *dev_xin,cufftReal *dev_yout,cufftReal *delayfilter,int m)
{
int bid,tid;
float x=0.0,h=0.0;
float sum = 0.0;
bid = blockIdx.x;
tid = threadIdx.x;
__shared__ float y[2*M+1];
if(tid == 0)
{
for(int ii=0;ii<2*m;ii++)
{
y[ii] = 0.0;
}
}
if(bid-2*m+tid >= 0 && bid-2*m+tid < (FILTER_FRAME/DOWNSAMPLE))
{
x = dev_xin[bid-2*m+tid];
}
if(2*m-tid >=0)
{
h = delayfilter[2*m-tid];
}
y[tid] = x*h;
//if(bid == 24855)
//{
// printf("bid = %d,x=%.8f,h=%.8f,y=%.8f\n",bid,x,h,y);
//}
//
__syncthreads();
if(tid == 0)
{
sum = 0.0;
for(int jj=0;jj<2*m+1;jj++)
{
sum = sum + y[jj];
}
dev_yout[bid] = sum;
//if(bid == 24855)
//{
// printf("bid = %d,dev_yout=%.8f\n",bid,dev_yout[bid]);
//}
}
}
__global__ void Psd(cufftComplex *Xk,cufftReal *Xabs, int N)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
Xabs[freqIdx] = (Xk[freqIdx].x*Xk[freqIdx].x+Xk[freqIdx].y*Xk[freqIdx].y) / N;
}
__global__ void PsdAverage(cufftReal *Xabs,cufftReal *Xk_avg)
{
int bid = 0,tid = 0;
int freqIdx = 0;
float sum = 0.0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
for(int ii = 0;ii<PSD_AVG_NUM;ii++)
{
sum += Xabs[ii*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)+freqIdx] / PSD_AVG_NUM;
}
Xk_avg[freqIdx] = 10*log10((sum+EPS)/1e-12);
}
__global__ void PsdSub(cufftReal *Xk_avg,cufftReal *Xk_smooth,cufftReal *Xk_diff,int idx1,int idx2)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
if(freqIdx >= idx1 && freqIdx <= idx2)
{
Xk_diff[freqIdx] = Xk_avg[freqIdx] - Xk_smooth[freqIdx];
}
else
{
Xk_diff[freqIdx] = 0;
}
}
__global__ void FrequencyDomainFilter(cufftComplex *Xk,float deltaf,float StartFreq,float EndFreq)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
if(freqIdx * deltaf < StartFreq || freqIdx * deltaf > EndFreq)
{
Xk[freqIdx].x = 0;
Xk[freqIdx].y = 0;
}
}
__global__ void SignalSqr(cufftReal *X)
{
int bid = 0,tid = 0;
int sigIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
sigIdx = bid*THREADNUMPERBLK+tid;
X[sigIdx] = X[sigIdx]*X[sigIdx];
}
__global__ void DemonAdd(cufftComplex *Xk,cufftReal *Xabs, int N)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid*THREADNUMPERBLK+tid;
Xabs[freqIdx] += (Xk[freqIdx].x*Xk[freqIdx].x+Xk[freqIdx].y*Xk[freqIdx].y) / N;
}
__global__ void DemonSub(cufftReal *Xk_avg,cufftReal *Xk_smooth,cufftReal *Xk_diff)
{
int bid = 0,tid = 0;
int freqIdx = 0;
bid = blockIdx.x;
tid = threadIdx.x;
freqIdx = bid;
Xk_diff[freqIdx] = Xk_avg[freqIdx] - Xk_smooth[freqIdx];
if(Xk_diff[freqIdx] < 0)
{
Xk_diff[freqIdx] = 0;
}
}
float VectorThetSPF(cufftComplex P_f, cufftComplex Vx_f, cufftComplex Vy_f)
{
float fTheta=0.0;
float sina=-P_f.y*Vy_f.x+P_f.x*Vy_f.y;
float cosa=-P_f.y*Vx_f.x+P_f.x*Vx_f.y;
fTheta=atan2(sina, cosa)*180/PI;
return fTheta;
}
void *ArraySignalProcessing(void *lParam)
{
//int retval = -1;
int BUF_FLAG = 0;
int FrameNum = 0;
//-----------------Downsampling filter-------------------------------
float h[FIRORDER+1] = {0.0};
float fl = 100.0f,fh = 10e3f;
cudaError cudaStatus;
cufftReal *dev_x=NULL;
cufftReal *dev_h=NULL;
cufftComplex *dev_fft_x=NULL;
cufftComplex *dev_fft_h=NULL;
cufftComplex *dev_fft_y=NULL;
cufftReal *dev_y=NULL;
cufftReal *dev_chanbuff=NULL;
float *FilteredDataout = NULL;
float *DownSamplingData = NULL;
cufftHandle Hplan;
cufftHandle Xplan;
cufftHandle Yplan;
//----------------------------------------------------------------
//--------------------------Process Time Test---------------------
cudaEvent_t start1;
cudaEvent_t stop1;
float msecTotal = 0.0f;
//----------------------------------------------------------------
//--------------------------Beamforming and Tracing---------------
int nfl = (int)((2000.0/(FS/DOWNSAMPLE)*NFFT)+0.5);
int nfh = (int)((4000.0/(FS/DOWNSAMPLE)*NFFT)+0.5);
// int FreqbinPerThread = (int)((nfh-nfl+1)/(THREADNUMPERBLK*1.0) + 0.5);
int BlockRowNum = 0;
cufftComplex *dev_fft=NULL;
cufftReal *dev_energy=NULL;
cufftReal *sum_energy=NULL;
cufftComplex *PhiArray = NULL;
cufftHandle Beamplan;
float c[BEAMNUM]={0.0};
cufftComplex *sk=NULL;
float *debugvar = NULL;
int peak[BEAMNUM]={0};
int valley[BEAMNUM]={0};
// bool traced[BEAMNUM] = {false};
// int tracedbeamIdx = -1;
float pretracedtarget[BEAMNUM] = {0.0};
int pretracedtargetIdx[BEAMNUM] = {-1};
int pretracedtargetNum = 0;
int tracedtargetbeam[MAXTRACETARNUM][2];
// float *tracebeam = NULL;
// int beammatrix[5][BEAMNUM] = {0};
int i0,i1,i2;
float r0,r1,r2;
float delta_index = 0;
float tracedtargetangle[3] = {0.0};
cufftReal *dev_delayFilter = NULL;
cufftReal *dev_tau = NULL;
float delayfiltercoff[ARRAYNUM*(2*M+1)] = {0.0};
float delaytau[ARRAYNUM] = {0.0};
cufftReal *dev_delayfilterout = NULL;
cufftReal *dev_delayfilterbuf = NULL;
int *dev_dI = NULL;
int delaydI[ARRAYNUM] = {0};
float *sourcedata = NULL;
float *shiftdata = NULL;
float *delayfilteroutdata = NULL;
cufftReal *dev_delaychandata = NULL;
cufftReal *dev_beamdata = NULL;
float *beamdata = NULL;
//----------------------------------------------------------------
//----------------------------Psd and DEMON-----------------------
cufftReal *dev_tracebeam=NULL;
cufftComplex *dev_tracebeam_spec=NULL;
cufftReal *dev_tracebeam_psd=NULL;
cufftReal *dev_tracebeam_psd_avg = NULL;
cufftComplex *dev_tracebeam_demonspec=NULL;
cufftComplex *dev_tracebeam_demonspec_band=NULL;
cufftReal *dev_tracebeam_demon=NULL;
cufftReal *dev_tracebeam_demon_band_data=NULL;
cufftHandle PSDplan;
cufftHandle DEMONplan;
cufftHandle DEMONBandplan;
float *trace_beam_psd = NULL;
float fDf;
int idx1;
int idx2;
int idxLen;
float *trace_beam_psd_smooth = NULL;
cufftReal *dev_tracebeam_psd_S = NULL;
cufftReal *dev_tracebeam_psd_E = NULL;
float fPsdEVar=0.0;
float *trace_beam_demon = NULL;
float *trace_beam_demon_smooth = NULL;
cufftReal *dev_trace_beam_demon_cut = NULL;
cufftReal *dev_tracebeam_demon_S = NULL;
cufftReal *dev_tracebeam_demon_E = NULL;
float fDemonEVar=0.0;
//-----------------------矢量通道处理-----------------------------
cufftReal *dev_vector_p_buf=NULL;
cufftReal *dev_vector_x_buf=NULL;
cufftReal *dev_vector_y_buf=NULL;
cufftComplex *dev_vector_p_spec=NULL;
cufftComplex *dev_vector_x_spec=NULL;
cufftComplex *dev_vector_y_spec=NULL;
cufftReal *dev_vector_p_psd =NULL;
cufftReal *dev_vector_psd_avg=NULL;
float *vector_p_psd = NULL;
float *vector_p_psd_smooth = NULL;
cufftReal *dev_vector_p_psd_S = NULL;
cufftReal *dev_vector_p_psd_E = NULL;
float fVectorPsdEVar=0.0;
//----------------------------------------------------------------
if(DownSamplingDataBufA != NULL)
{
free(DownSamplingDataBufA);
DownSamplingDataBufA = NULL;
}
DownSamplingDataBufA = (float *)malloc(FILTER_FRAME*CHANNUM*2*sizeof(float));
memset(DownSamplingDataBufA,0,FILTER_FRAME*CHANNUM*2*sizeof(float));
if(DownSamplingDataBufB != NULL)
{
free(DownSamplingDataBufB);
DownSamplingDataBufB = NULL;
}
DownSamplingDataBufB = (float *)malloc(FILTER_FRAME*CHANNUM*2*sizeof(float));
memset(DownSamplingDataBufB,0,FILTER_FRAME*CHANNUM*2*sizeof(float));
//-------------------------------------------------------------
FilteredDataout = (float *)malloc(FILTER_FRAME/DOWNSAMPLE*sizeof(float));
memset(FilteredDataout,0,FILTER_FRAME/DOWNSAMPLE*sizeof(float));
DownSamplingData = (float *)malloc(FRAMELEN*sizeof(float));
memset(DownSamplingData,0,FRAMELEN*sizeof(float));
cufftComplex *Xk_real = NULL;
Xk_real = (cufftComplex *)malloc(FILTER_FRAME*sizeof(cufftComplex));
memset(Xk_real,0,FILTER_FRAME*sizeof(cufftComplex));
FILE *fp = NULL;
fp = fopen("BeamEng.bin","wb");
FILE *fplog = NULL;
fplog = fopen("ProcessLog.txt","w");
FILE *fpbeam = NULL;
fpbeam = fopen("Beam.bin","wb");
// int retvalprint = 0;
//------------------------------------------------------------
cufftPlan1d(&Hplan, FILTER_FRAME, CUFFT_R2C, 1);
cufftPlan1d(&Xplan, FILTER_FRAME, CUFFT_R2C, 1);
cufftPlan1d(&Yplan, FILTER_FRAME, CUFFT_C2R, 1);
cufftPlan1d(&PSDplan, PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), CUFFT_R2C, 1);
cufftPlan1d(&DEMONplan, PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), CUFFT_R2C, 1);
cufftPlan1d(&DEMONBandplan, PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), CUFFT_C2R, 1);
cudaStatus = cudaMalloc((void **)&dev_x, sizeof(cufftReal)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != cudaSuccess)
{
printf (" dev_x cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_x,0,sizeof(cufftReal)*FILTER_FRAME*CHANNUM*2);
cudaStatus = cudaMalloc((void **)&dev_h, sizeof(cufftReal)*FILTER_FRAME);
if (cudaStatus != cudaSuccess)
{
printf ("dev_h cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_h,0,sizeof(cufftReal)*FILTER_FRAME);
cudaStatus = cudaMalloc((void **)&dev_y, sizeof(cufftReal)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_y cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_y,0,sizeof(cufftReal)*FILTER_FRAME*CHANNUM*2);
cudaStatus = cudaMalloc((void **)&dev_fft_x,sizeof(cufftComplex)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_x cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_x,0,sizeof(cufftComplex)*FILTER_FRAME*CHANNUM*2);
cudaStatus = cudaMalloc((void **)&dev_fft_h,sizeof(cufftComplex)*FILTER_FRAME);
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_h cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_h,0,sizeof(cufftComplex)*FILTER_FRAME);
cudaStatus = cudaMalloc((void **)&dev_fft_y,sizeof(cufftComplex)*FILTER_FRAME*CHANNUM*2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft_y cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft_y,0,sizeof(cufftComplex)*FILTER_FRAME*CHANNUM*2);
cudaStatus = cudaMalloc((void **)&dev_chanbuff,sizeof(cufftReal)*FILTER_FRAME/DOWNSAMPLE*CHANNUM*2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_chanbuff cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_chanbuff,0,sizeof(cufftReal)*FILTER_FRAME/DOWNSAMPLE*CHANNUM*2);
fir1(FIRORDER,3,fl,fh,FS,5,h);
cudaMemcpy(dev_h,h,sizeof(cufftReal)*FIRORDER,cudaMemcpyHostToDevice);
cufftExecR2C(Hplan,(cufftReal *)&dev_h[0],(cufftComplex *)&dev_fft_h[0]);
BlockRowNum = NFFT/2/THREADNUMPERBLK;
cudaStatus = cudaMalloc((void**)&dev_energy,BEAMNUM*BlockRowNum*sizeof(cufftReal));
if (cudaStatus != cudaSuccess)
{
printf ("dev_energy cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_energy,0,BEAMNUM*BlockRowNum*sizeof(cufftReal));
cudaStatus = cudaMalloc((void**)&sum_energy,BEAMNUM*sizeof(cufftReal));
if (cudaStatus != cudaSuccess)
{
printf ("sum_energy cudaMalloc Error! \n ");
}
cudaMemset((void **)&sum_energy,0,BEAMNUM*sizeof(cufftReal));
cudaStatus = cudaMalloc((void**)&PhiArray,ARRAYNUM*BEAMNUM*(NFFT/2)*sizeof(cufftComplex));
if (cudaStatus != cudaSuccess)
{
printf ("PhiArray cudaMalloc Error! \n ");
}
cudaMemset((void **)&PhiArray,0,ARRAYNUM*BEAMNUM*(NFFT/2)*sizeof(cufftComplex));
cudaStatus = cudaMalloc((void **)&dev_fft,sizeof(cufftComplex)*(NFFT/2+1)*ARRAYNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_fft cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_fft,0,sizeof(cufftComplex)*(NFFT/2+1)*ARRAYNUM);
cufftPlan1d(&Beamplan,NFFT,CUFFT_R2C, 1);
PhiShiftFactorGen<<<NFFT/2,BEAMNUM>>>(PhiArray);
sk = (cufftComplex *)malloc(sizeof(cufftComplex)*(NFFT/2+1)*ARRAYNUM);
memset(sk,0,sizeof(cufftComplex)*(NFFT/2+1)*ARRAYNUM);
debugvar = (float *)malloc(sizeof(float)*BEAMNUM*BlockRowNum);
memset(debugvar,0, sizeof(float)*BEAMNUM*BlockRowNum);
for(int ii = 0;ii<MAXTRACETARNUM;ii++)
{
tracedtargetbeam[ii][0] = -1;
tracedtargetbeam[ii][1] = -1;
tracedtargetangle[ii] = -1.0f;
}
cudaStatus = cudaMalloc((void **)&dev_delayFilter,sizeof(cufftReal)*(2*M+1)*ARRAYNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_delayFilter cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delayFilter,0,sizeof(cufftReal)*(2*M+1)*ARRAYNUM);
cudaStatus = cudaMalloc((void **)&dev_tau,sizeof(cufftReal)*ARRAYNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tau cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tau,0,sizeof(cufftReal)*ARRAYNUM);
cudaStatus = cudaMalloc((void **)&dev_delayfilterout,sizeof(cufftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE+2*M));
if (cudaStatus != cudaSuccess)
{
printf ("dev_delayfilterout cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delayfilterout,0,sizeof(cufftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE+2*M));
cudaStatus = cudaMalloc((void **)&dev_delayfilterbuf,sizeof(cufftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE));
if (cudaStatus != cudaSuccess)
{
printf ("dev_delayfilterbuf cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delayfilterbuf,0,sizeof(cufftReal)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE));
cudaStatus = cudaMalloc((void **)&dev_dI,sizeof(int)*ARRAYNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_dI cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_dI,0,sizeof(int)*ARRAYNUM);
cudaStatus = cudaMalloc((void **)&dev_delaychandata,sizeof(int)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_delaychandata cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_delaychandata,0,sizeof(int)*ARRAYNUM*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_beamdata,sizeof(int)*MAXTRACETARNUM*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_beamdata cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_beamdata,0,sizeof(int)*MAXTRACETARNUM*(FILTER_FRAME/DOWNSAMPLE/2));
sourcedata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
memset(sourcedata,0,(FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
shiftdata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
memset(shiftdata,0,(FILTER_FRAME/DOWNSAMPLE)*sizeof(float));
delayfilteroutdata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE+2*M)*sizeof(float));
memset(delayfilteroutdata,0,(FILTER_FRAME/DOWNSAMPLE+2*M)*sizeof(float));
beamdata = (float *)malloc((FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
memset(beamdata,0,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
cudaStatus = cudaMalloc((void **)&dev_tracebeam,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_spec,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_spec cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_spec,0,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_psd,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM*PSD_AVG_NUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_psd cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_psd,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM*PSD_AVG_NUM);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_psd_avg,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_psd_avg cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_psd_avg,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_demonspec,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_demonspec cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_demonspec,0,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_demonspec_band,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_demonspec_band cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_demonspec_band,0,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_tracebeam_demon,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_demon cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_demon,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*MAXTRACETARNUM);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_demon_band_data,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_demon_band_data cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_demon_band_data,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
trace_beam_psd = (float *)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(trace_beam_psd,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
trace_beam_psd_smooth = (float *)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(trace_beam_psd_smooth,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
cudaStatus = cudaMalloc((void **)&dev_tracebeam_psd_S,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_psd_S cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_psd_S,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_psd_E,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_psd_E cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_psd_E,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
trace_beam_demon = (float *)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
memset(trace_beam_demon,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float));
trace_beam_demon_smooth = (float *)malloc(DEM_RST_LEN*sizeof(float));
memset(trace_beam_demon_smooth,0,DEM_RST_LEN*sizeof(float));
cudaStatus = cudaMalloc((void **)&dev_tracebeam_demon_S,sizeof(cufftReal)*DEM_RST_LEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_demon_S cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_demon_S,0,sizeof(cufftReal)*DEM_RST_LEN);
cudaStatus = cudaMalloc((void **)&dev_tracebeam_demon_E,sizeof(cufftReal)*DEM_RST_LEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_tracebeam_demon_E cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_tracebeam_demon_E,0,sizeof(cufftReal)*DEM_RST_LEN);
cudaStatus = cudaMalloc((void **)&dev_trace_beam_demon_cut,sizeof(cufftReal)*DEM_RST_LEN);
if (cudaStatus != cudaSuccess)
{
printf ("dev_trace_beam_demon_cut cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_trace_beam_demon_cut,0,sizeof(cufftReal)*DEM_RST_LEN);
//------------------------------矢量通道变量------------------------------------------------
cudaStatus = cudaMalloc((void **)&dev_vector_p_buf,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_p_buf cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_p_buf,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_vector_x_buf,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_x_buf cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_x_buf,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_vector_y_buf,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_y_buf cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_y_buf,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_vector_p_psd,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM);
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_p_psd cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_p_psd,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM);
cudaStatus = cudaMalloc((void **)&dev_vector_p_spec,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_p_spec cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_p_spec,0,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_vector_x_spec,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_x_spec cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_x_spec,0,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_vector_y_spec,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_y_spec cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_y_spec,0,sizeof(cufftComplex)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaStatus = cudaMalloc((void **)&dev_vector_psd_avg,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_psd_avg cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_psd_avg,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
vector_p_psd = (float*)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(vector_p_psd,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
vector_p_psd_smooth = (float*)malloc(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
memset(vector_p_psd_smooth,0,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float));
cudaStatus = cudaMalloc((void **)&dev_vector_p_psd_S,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_p_psd_S cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_p_psd_S,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
cudaStatus = cudaMalloc((void **)&dev_vector_p_psd_E,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
if (cudaStatus != cudaSuccess)
{
printf ("dev_vector_p_psd_E cudaMalloc Error! \n ");
}
cudaMemset((void **)&dev_vector_p_psd_E,0,sizeof(cufftReal)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
//--------------------------------------------------------------------------------------------
fDf=FS/DOWNSAMPLE*1.0/(PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
idx1=(int)(10/fDf);
idx2=(int)(5000/fDf);
idxLen=idx2-idx1+1;
DemFreqBandNum = 4;
DemStartFreq[0] = 2000.0;
DemEndFreq[0] = 4000.0;
DemStartFreq[1] = 4000.0;
DemEndFreq[1] = 6000.0;
DemStartFreq[2] = 6000.0;
DemEndFreq[2] = 8000.0;
DemStartFreq[3] = 8000.0;
DemEndFreq[3] = 10000.0;
//生成滑动窗正交多项式拟合时所用规范正交向量
CalSmoothPara(&fSmoothA[0][0]);
//-----------------------------------------------------------------------------------------
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
while (1)
{
pthread_mutex_lock(&count_lock_FrameDataReady);
while (count_FrameDataReady == 0)
{
pthread_cond_wait(&cond_FrameDataReady,&count_lock_FrameDataReady);
}
count_FrameDataReady = count_FrameDataReady -1;
pthread_mutex_unlock(&count_lock_FrameDataReady);
FrameNum++;
if(BUF_FLAG == 0)
{
for(int ii=0;ii<CHANNUM*2;ii++)
{
memmove(DownSamplingDataBufA+ii*FILTER_FRAME,DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,FRAMELEN*sizeof(float));
memcpy(DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,ChannDataBufA+ii*FRAMELEN,FRAMELEN*sizeof(float));
}
cudaMemcpy(dev_x,DownSamplingDataBufA,sizeof(cufftReal)*FILTER_FRAME*CHANNUM*2,cudaMemcpyHostToDevice);
BUF_FLAG = 1;
}
else
{
for(int ii=0;ii<CHANNUM*2;ii++)
{
memmove(DownSamplingDataBufA+ii*FILTER_FRAME,DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,FRAMELEN*sizeof(float));
memcpy(DownSamplingDataBufA+ii*FILTER_FRAME+FRAMELEN,ChannDataBufB+ii*FRAMELEN,FRAMELEN*sizeof(float));
}
cudaMemcpy(dev_x,DownSamplingDataBufA,sizeof(cufftReal)*FILTER_FRAME*CHANNUM*2,cudaMemcpyHostToDevice);
BUF_FLAG = 0;
}
cudaEventRecord(start1,NULL);
//-----------------------------------------(1)信号滤波降采样---------------------------------------------------
for(int jj=0;jj<CHANNUM*2;jj++)
{
cufftExecR2C(Xplan,(cufftReal *)&dev_x[jj*FILTER_FRAME],(cufftComplex *)&dev_fft_x[jj*FILTER_FRAME]);
}
DownSamplingFilter<<<CHANNUM*2*(FILTER_FRAME/2/THREADNUMPERBLK),THREADNUMPERBLK>>>(dev_fft_x,dev_fft_h,dev_fft_y,FILTER_FRAME);
for(int jj=0;jj<CHANNUM*2;jj++)
{
cufftExecC2R(Yplan,(cufftComplex *)&dev_fft_y[jj*FILTER_FRAME],(cufftReal *)&dev_y[jj*FILTER_FRAME]);
cudaMemcpy(dev_chanbuff+jj*FILTER_FRAME/DOWNSAMPLE,dev_chanbuff+jj*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,FILTER_FRAME/DOWNSAMPLE/2*sizeof(float),cudaMemcpyDeviceToDevice);
}
IFFTNormalize<<<CHANNUM*2*(FILTER_FRAME/2/THREADNUMPERBLK),THREADNUMPERBLK>>>(dev_y,dev_chanbuff,FILTER_FRAME);
//-----------------------------------------(1)信号滤波降采样结束---------------------------------------------------
//-----------------------------------------(2)频域波束形成---------------------------------------------------
for (int ii=0;ii<ARRAYNUM;ii++)
{
cufftExecR2C(Beamplan,(cufftReal *)&dev_chanbuff[ii*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2],(cufftComplex *)&dev_fft[ii*(NFFT/2+1)]);
}
FD_Beamform<<<BlockRowNum*BEAMNUM,THREADNUMPERBLK>>>(dev_fft,dev_energy,PhiArray,nfl,nfh);
MatrixSumRow<<<BEAMNUM,1>>>(dev_energy,sum_energy,BlockRowNum,BEAMNUM);
printf("success 0!\n");
cudaMemcpy(c,sum_energy,BEAMNUM*sizeof(float),cudaMemcpyDeviceToHost);
// fwrite(c,sizeof(float),BEAMNUM,fp);
//-----------------------------------------(2)频域波束形成结束-----------------------------------------------
//-----------------------------------------(3)波束能量检测------------------------------------------
//
memset(peak,0,BEAMNUM*sizeof(int));
memset(valley,0,BEAMNUM*sizeof(int));
findpeak(c,peak,BEAMNUM);
findvalley(c,valley,BEAMNUM);
bool targetexist = false;
memset(pretracedtarget,0,sizeof(float)*BEAMNUM);
memset(pretracedtargetIdx,0,sizeof(int)*BEAMNUM);
pretracedtargetNum = 0;
for(int kk=0;kk<BEAMNUM;kk++)
{
if(peak[kk] == 1)
{
//
int jj=0;
for(jj=0;jj<MAXTRACETARNUM;jj++)
{
//
if(abs(tracedtargetbeam[jj][0]-kk)<6 && tracedtargetbeam[jj][0]>0)
{
break;
}
}
if(jj==MAXTRACETARNUM) //
{
targetexist = peakdetection(kk,c,valley,2.0);
}
else //
{
targetexist = peakdetection(kk,c,valley,1.2);
}
if(targetexist)
{
pretracedtarget[pretracedtargetNum] = c[kk];
pretracedtargetIdx[pretracedtargetNum] = kk;
pretracedtargetNum++;
}
}
}
rbub(pretracedtarget,pretracedtargetIdx,BEAMNUM);
if(FrameNum == 115)
{
FrameNum = FrameNum;
}
for(int kk=0;kk<pretracedtargetNum;kk++)
{
int jj=0;
for(jj=0;jj<MAXTRACETARNUM;jj++)
{
//
if(abs(tracedtargetbeam[jj][0]-pretracedtargetIdx[kk])<6 && tracedtargetbeam[jj][0]>0)
{
tracedtargetbeam[jj][0] = pretracedtargetIdx[kk];
tracedtargetbeam[jj][1] = FrameNum;
break;
}
}
if(jj==MAXTRACETARNUM) //
{
int ii = 0;
for(ii=0;ii<MAXTRACETARNUM;ii++)
{
//
if(tracedtargetbeam[ii][0] < 0)
{
break;
}
}
if(ii < MAXTRACETARNUM) //
{
tracedtargetbeam[ii][0] = pretracedtargetIdx[kk];
tracedtargetbeam[ii][1] = FrameNum;
}
}
}
//
for(int jj=0;jj<MAXTRACETARNUM;jj++)
{
if(tracedtargetbeam[jj][0] >0 && FrameNum - tracedtargetbeam[jj][1] >= 5)
{
tracedtargetbeam[jj][0] = -1;
tracedtargetbeam[jj][1] = -1;
tracedtargetangle[jj] = -1.0f;
}
}
//-----------------------------------------(3)波束能量检测-------------------------------------
//-----------------------------------------(4) 波束跟踪、跟踪波束 ------------------------------
for(int jj = 0;jj<MAXTRACETARNUM;jj++)
{
if(tracedtargetbeam[jj][0] >0) //有跟踪目标
{
//波束内插
i0 = tracedtargetbeam[jj][0]-1;
i1 = tracedtargetbeam[jj][0];
i2 = tracedtargetbeam[jj][0]+1;
r0 = c[i0];
r1 = c[i1];
r2 = c[i2];
delta_index = (r2-r0)/(4*r1-2*r0-2*r2);
tracedtargetangle[jj] = (i1+delta_index)*180.0/BEAMNUM;
DelayFilterGen<<<ARRAYNUM,2*M+1>>>(dev_delayFilter,M,tracedtargetangle[jj],dev_tau,dev_dI);
//DelayFilterGen<<<ARRAYNUM,2*M+1>>>(dev_delayFilter,M,60.292690,dev_tau,dev_dI);
cudaMemcpy(delayfiltercoff,dev_delayFilter,sizeof(cufftReal)*ARRAYNUM*(2*M+1),cudaMemcpyDeviceToHost);
cudaMemcpy(delaytau,dev_tau,sizeof(cufftReal)*ARRAYNUM,cudaMemcpyDeviceToHost);
cudaMemcpy(delaydI,dev_dI,sizeof(int)*ARRAYNUM,cudaMemcpyDeviceToHost);
for(int kk = 0;kk<ARRAYNUM;kk++)
{
if(delaydI[kk] >= 0)
{
cudaMemcpy(dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE)+delaydI[kk],dev_chanbuff+kk*(FILTER_FRAME/DOWNSAMPLE),sizeof(cufftReal)*((FILTER_FRAME/DOWNSAMPLE)-delaydI[kk]),cudaMemcpyDeviceToDevice);
}
else
{
cudaMemcpy(dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE),dev_chanbuff+kk*(FILTER_FRAME/DOWNSAMPLE)-delaydI[kk],sizeof(cufftReal)*((FILTER_FRAME/DOWNSAMPLE)+delaydI[kk]),cudaMemcpyDeviceToDevice);
}
//cudaMemcpy(sourcedata,dev_chanbuff+kk*(FILTER_FRAME/DOWNSAMPLE),(FILTER_FRAME/DOWNSAMPLE)*sizeof(float),cudaMemcpyDeviceToHost);
//cudaMemcpy(shiftdata,dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE),(FILTER_FRAME/DOWNSAMPLE)*sizeof(float),cudaMemcpyDeviceToHost);
if(fabs(delaytau[kk]) > 0.0001)
{
FineDelayFilter<<<(FILTER_FRAME/DOWNSAMPLE+2*M),2*M+1>>>((cufftReal *)&dev_delayfilterbuf[kk*FILTER_FRAME/DOWNSAMPLE],(cufftReal *)&dev_delayfilterout[kk*(FILTER_FRAME/DOWNSAMPLE+2*M)],(cufftReal *)&dev_delayFilter[kk*(2*M+1)],M);
}
else
{
cudaMemcpy(dev_delayfilterout+kk*(FILTER_FRAME/DOWNSAMPLE+2*M)+M,dev_delayfilterbuf+kk*(FILTER_FRAME/DOWNSAMPLE),sizeof(cufftReal)*(FILTER_FRAME/DOWNSAMPLE),cudaMemcpyDeviceToDevice);
}
cudaMemcpy(dev_delaychandata+kk*(FILTER_FRAME/DOWNSAMPLE/2),dev_delayfilterout+kk*(FILTER_FRAME/DOWNSAMPLE+2*M)+M+FILTER_FRAME/DOWNSAMPLE/4,sizeof(cufftReal)*FILTER_FRAME/DOWNSAMPLE/2,cudaMemcpyDeviceToDevice);
}
MatrixSumRow<<<FILTER_FRAME/DOWNSAMPLE/2,1>>>(dev_delaychandata,dev_beamdata+jj*FILTER_FRAME/DOWNSAMPLE/2,ARRAYNUM,FILTER_FRAME/DOWNSAMPLE/2);
cudaMemcpy(beamdata,dev_beamdata+jj*FILTER_FRAME/DOWNSAMPLE/2,FILTER_FRAME/DOWNSAMPLE/2*sizeof(float),cudaMemcpyDeviceToHost);
//fwrite(beamdata,sizeof(float),FILTER_FRAME/DOWNSAMPLE/2,fpbeam);
printf("success 1!\n");
//功率谱
cudaMemcpy(dev_tracebeam+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)+(FILTER_FRAME/DOWNSAMPLE/2),(FILTER_FRAME/DOWNSAMPLE/2)*(PSD_LEN-1)*sizeof(cufftReal),cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_tracebeam+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_beamdata+jj*FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(cufftReal),cudaMemcpyDeviceToDevice);
//功率谱缓冲区移位
cudaMemcpy(dev_tracebeam_psd+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM,dev_tracebeam_psd+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*PSD_AVG_NUM+PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(cufftReal),cudaMemcpyDeviceToDevice);
cufftExecR2C(PSDplan,(cufftReal *)&dev_tracebeam[jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)],(cufftComplex *)&dev_tracebeam_spec[jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)]);
Psd<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_tracebeam_spec+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam_psd+jj*PSD_LEN*PSD_AVG_NUM*(FILTER_FRAME/DOWNSAMPLE/2)+(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
//功率谱平均
PsdAverage<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_tracebeam_psd+jj*PSD_LEN*PSD_AVG_NUM*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam_psd_avg+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
cudaMemcpy(trace_beam_psd,dev_tracebeam_psd_avg+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),cudaMemcpyDeviceToHost);
//fwrite(trace_beam_psd,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
//功率谱平滑
MySmooth(trace_beam_psd+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 3, SMOOTH_N, 5, trace_beam_psd_smooth+idx1);
MySmooth(trace_beam_psd_smooth+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 2, SMOOTH_N, 5, trace_beam_psd_smooth+idx1);
cudaMemcpy(dev_tracebeam_psd_S,trace_beam_psd_smooth,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),cudaMemcpyHostToDevice);
//计算差值谱
PsdSub<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_tracebeam_psd_avg+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),dev_tracebeam_psd_S,dev_tracebeam_psd_E,idx1,idx2);
cudaMemcpy(trace_beam_psd_smooth,dev_tracebeam_psd_E,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),cudaMemcpyDeviceToHost);
//fwrite(trace_beam_psd_smooth,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
//计算差值谱方差
fPsdEVar=0.0;
for (int ii=idx1;ii<=idx2;ii++)
{
fPsdEVar+=trace_beam_psd_smooth[ii]*trace_beam_psd_smooth[ii];
}
fPsdEVar/=(float)(idx2-idx1+1);
fPsdEVar=sqrtf(fPsdEVar);
printf("success 2!\n");
//解调谱
for(int ii =0;ii<DemFreqBandNum;ii++)
{
cudaMemcpy(dev_tracebeam_demonspec_band,dev_tracebeam_spec+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(cufftComplex),cudaMemcpyDeviceToDevice);
//频域带通滤波
FrequencyDomainFilter<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_tracebeam_demonspec_band,fDf,DemStartFreq[ii],DemEndFreq[ii]);
cufftExecC2R(DEMONBandplan,dev_tracebeam_demonspec_band,dev_tracebeam_demon_band_data);
SignalSqr<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_tracebeam_demon_band_data);
cufftExecR2C(DEMONplan,dev_tracebeam_demon_band_data,dev_tracebeam_demonspec);
DemonAdd<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_tracebeam_demonspec,dev_tracebeam_demon+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2), PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2));
}
cudaMemcpy(trace_beam_demon,dev_tracebeam_demon+jj*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),DEM_RST_LEN*sizeof(float),cudaMemcpyDeviceToHost);
//前四个点赋值
for(int ii=0;ii<6;ii++)
{
trace_beam_demon[ii] = trace_beam_demon[6];
}
//fwrite(trace_beam_demon,sizeof(float),DEM_RST_LEN,fpbeam);
//解调谱平滑
MySmooth(trace_beam_demon, DEM_RST_LEN, &fSmoothA[0][0], SMOOTH_N, 3, SMOOTH_N, 5, trace_beam_demon_smooth);
MySmooth(trace_beam_demon_smooth, DEM_RST_LEN, &fSmoothA[0][0], SMOOTH_N, 2, SMOOTH_N, 5, trace_beam_demon_smooth);
//fwrite(trace_beam_demon_smooth,sizeof(float),DEM_RST_LEN,fpbeam);
cudaMemcpy(dev_trace_beam_demon_cut,trace_beam_demon,DEM_RST_LEN*sizeof(cufftReal),cudaMemcpyHostToDevice);
cudaMemcpy(dev_tracebeam_demon_S,trace_beam_demon_smooth,DEM_RST_LEN*sizeof(cufftReal),cudaMemcpyHostToDevice);
//fwrite(trace_beam_demon_smooth,sizeof(float),DEM_RST_LEN,fpbeam)
DemonSub<<<DEM_RST_LEN,1>>>(dev_trace_beam_demon_cut,dev_tracebeam_demon_S,dev_tracebeam_demon_E);
cudaMemcpy(trace_beam_demon_smooth,dev_tracebeam_demon_E,DEM_RST_LEN*sizeof(cufftReal),cudaMemcpyDeviceToHost);
//fwrite(trace_beam_demon_smooth,sizeof(float),DEM_RST_LEN,fpbeam);
fDemonEVar=0.0;
for (int ii=0;ii<DEM_RST_LEN;ii++)
{
fDemonEVar+=trace_beam_demon_smooth[ii]*trace_beam_demon_smooth[ii];
}
fDemonEVar/=(float)(DEM_RST_LEN);
fDemonEVar=sqrtf(fDemonEVar);
printf("success 3!\n");
//线谱提取
int ll = 0;
if(FrameNum >= 8)
{
nPlineNum = 0;
memset(fPlineInfo,0,MAXTRACETARNUM*LINE_NUM*4*sizeof(float));
for(int ii=idx1;ii<=idx2;ii++)
{
if(trace_beam_psd_smooth[ii]>4.0*fPsdEVar && trace_beam_psd_smooth[ii]>trace_beam_psd_smooth[ii-1] && trace_beam_psd_smooth[ii]>trace_beam_psd_smooth[ii+1] )
{
if(nPlineNum<LINE_NUM)
{
//线谱归并
for(ll = 0;ll<nPlineNum;ll++)
{
if(fabs(fPlineInfo[jj][ll][1]-(float)ii*fDf)<1.0)
{
break;
}
}
if(ll == nPlineNum)
{
fPlineInfo[jj][nPlineNum][0] = trace_beam_psd_smooth[ii]; //信噪比
fPlineInfo[jj][nPlineNum][1] = (float)ii*fDf; //线谱信噪比
fPlineInfo[jj][nPlineNum][2] = trace_beam_psd[ii];
fPlineInfo[jj][nPlineNum][3] = tracedtargetangle[jj];
if(fPlineInfo[jj][nPlineNum][3] > 180.0)
{
fPlineInfo[jj][nPlineNum][3] -= 360.0;
}
else if(fPlineInfo[jj][nPlineNum][3] < -180.0)
{
fPlineInfo[jj][nPlineNum][3] += 360.0;
}
nPlineNum++;
}
else if(trace_beam_psd_smooth[ii] > fPlineInfo[jj][ll][0])
{
fPlineInfo[jj][ll][0] = trace_beam_psd_smooth[ii];
fPlineInfo[jj][ll][1] = (float)ii*fDf;
fPlineInfo[jj][ll][2] = trace_beam_psd[ii];
fPlineInfo[jj][ll][3] = tracedtargetangle[jj];;
if(fPlineInfo[jj][ll][3] > 180.0)
{
fPlineInfo[jj][ll][3] -= 360.0;
}
else if(fPlineInfo[jj][ll][3] < -180.0)
{
fPlineInfo[jj][ll][3] += 360.0;
}
}
}
}
}
nDlineNum = 0;
memset(fDlineInfo,0,MAXTRACETARNUM*LINE_NUM*2*sizeof(float));
for(int ii = 4;ii<DEM_RST_LEN-1;ii++)
{
if(trace_beam_demon_smooth[ii]>6.0*fDemonEVar && trace_beam_demon_smooth[ii]>trace_beam_demon_smooth[ii-1] && trace_beam_demon_smooth[ii]>trace_beam_demon_smooth[ii+1])
{
if(nDlineNum<LINE_NUM)
{
fDlineInfo[jj][nDlineNum][0]=trace_beam_demon_smooth[jj];
fDlineInfo[jj][nDlineNum][1]=ii*fDf;
nDlineNum++;
}
}
}
//for(int ii = 0;ii<nDlineNum;ii++)
//{
// printf("%d:%.3f\n",ii+1,fDlineInfo[jj][ii][1]);
//}
}
}
}
printf("success 4!\n");
//-----------------------------------------(4) 波束跟踪、跟踪波束 ------------------------------------------
//-----------------------------------------(5) 矢量处理----------------------------------------------------
cudaMemcpy(dev_vector_p_buf,dev_vector_p_buf+(FILTER_FRAME/DOWNSAMPLE/2),(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vector_p_buf+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_chanbuff+VECTOR_P_IDX*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vector_x_buf,dev_vector_x_buf+(FILTER_FRAME/DOWNSAMPLE/2),(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vector_x_buf+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_chanbuff+VECTOR_X_IDX*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vector_y_buf,dev_vector_y_buf+(FILTER_FRAME/DOWNSAMPLE/2),(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),cudaMemcpyDeviceToDevice);
cudaMemcpy(dev_vector_y_buf+(PSD_LEN-1)*(FILTER_FRAME/DOWNSAMPLE/2),dev_chanbuff+VECTOR_Y_IDX*FILTER_FRAME/DOWNSAMPLE+FILTER_FRAME/DOWNSAMPLE/2,(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(float),cudaMemcpyDeviceToDevice);
cufftExecR2C(PSDplan,(cufftReal *)&dev_vector_p_buf[0],(cufftComplex *)&dev_vector_p_spec[0]);
cufftExecR2C(PSDplan,(cufftReal *)&dev_vector_x_buf[0],(cufftComplex *)&dev_vector_x_spec[0]);
cufftExecR2C(PSDplan,(cufftReal *)&dev_vector_y_buf[0],(cufftComplex *)&dev_vector_y_spec[0]);
cudaMemcpy(dev_vector_p_psd,dev_vector_p_psd+PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)*sizeof(cufftReal),cudaMemcpyDeviceToDevice);
Psd<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_vector_p_spec,dev_vector_p_psd+(PSD_AVG_NUM-1)*PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2);
PsdAverage<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_vector_p_psd,dev_vector_psd_avg);
cudaMemcpy(vector_p_psd,dev_vector_psd_avg,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),cudaMemcpyDeviceToHost);
fwrite(vector_p_psd,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
MySmooth(vector_p_psd+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 3, SMOOTH_N, 5, vector_p_psd_smooth+idx1);
MySmooth(vector_p_psd_smooth+idx1, idxLen, &fSmoothA[0][0], SMOOTH_N, 2, SMOOTH_N, 5, vector_p_psd_smooth+idx1);
cudaMemcpy(dev_vector_p_psd_S,vector_p_psd_smooth,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),cudaMemcpyHostToDevice);
PsdSub<<<PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2/THREADNUMPERBLK,THREADNUMPERBLK>>>(dev_vector_psd_avg,dev_vector_p_psd_S,dev_vector_p_psd_E,idx1,idx2);
cudaMemcpy(vector_p_psd_smooth,dev_vector_p_psd_E,PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2*sizeof(float),cudaMemcpyDeviceToHost);
//fwrite(vector_p_psd_smooth,sizeof(float),PSD_LEN*(FILTER_FRAME/DOWNSAMPLE/2)/2,fpbeam);
fVectorPsdEVar=0.0;
for (int ii=idx1;ii<=idx2;ii++)
{
fVectorPsdEVar+=vector_p_psd_smooth[ii]*vector_p_psd_smooth[ii];
}
fVectorPsdEVar/=(float)(idx2-idx1+1);
fVectorPsdEVar=sqrtf(fVectorPsdEVar);
printf("success 5!\n");
//线谱提取
int ll = 0;
if(FrameNum >= 8)
{
nVectorPlineNum = 0;
memset(fVectorPlineInfo,0,LINE_NUM*4*sizeof(float));
for(int ii=idx1;ii<=idx2;ii++)
{
if(vector_p_psd_smooth[ii]>4.0*fVectorPsdEVar && vector_p_psd_smooth[ii]>vector_p_psd_smooth[ii-1] && vector_p_psd_smooth[ii]>vector_p_psd_smooth[ii+1] )
{
if(nVectorPlineNum<LINE_NUM)
{
//线谱归并
for(ll = 0;ll<nVectorPlineNum;ll++)
{
if(fabs(fVectorPlineInfo[ll][1]-(float)ii*fDf)<1.0)
{
break;
}
}
if(ll == nVectorPlineNum)
{
fVectorPlineInfo[nVectorPlineNum][0] = vector_p_psd_smooth[ii]; //信噪比
fVectorPlineInfo[nVectorPlineNum][1] = (float)ii*fDf; //线谱信噪比
fVectorPlineInfo[nVectorPlineNum][2] = vector_p_psd[ii];
//fVectorPlineInfo[nVectorPlineNum][3] = tracedtargetangle[jj];
cufftComplex P_f,Vx_f,Vy_f;
cudaMemcpy(&P_f,dev_vector_p_spec+ii,sizeof(cufftComplex),cudaMemcpyDeviceToHost);
cudaMemcpy(&Vx_f,dev_vector_x_spec+ii,sizeof(cufftComplex),cudaMemcpyDeviceToHost);
cudaMemcpy(&Vy_f,dev_vector_y_spec+ii,sizeof(cufftComplex),cudaMemcpyDeviceToHost);
if(FrameNum == 20)
{
FrameNum = FrameNum;
}
fVectorPlineInfo[nVectorPlineNum][3] = VectorThetSPF(P_f, Vx_f, Vy_f);
if(fVectorPlineInfo[nVectorPlineNum][3] > 180.0)
{
fVectorPlineInfo[nVectorPlineNum][3] -= 360.0;
}
else if(fVectorPlineInfo[nVectorPlineNum][3] < -180.0)
{
fVectorPlineInfo[nVectorPlineNum][3] += 360.0;
}
nVectorPlineNum++;
}
else if(vector_p_psd_smooth[ii] > fVectorPlineInfo[ll][0])
{
fVectorPlineInfo[ll][0] = vector_p_psd_smooth[ii];
fVectorPlineInfo[ll][1] = (float)ii*fDf;
fVectorPlineInfo[ll][2] = vector_p_psd[ii];
cufftComplex P_f,Vx_f,Vy_f;
cudaMemcpy(&P_f,dev_vector_p_spec+ii,sizeof(cufftComplex),cudaMemcpyDeviceToHost);
cudaMemcpy(&Vx_f,dev_vector_x_spec+ii,sizeof(cufftComplex),cudaMemcpyDeviceToHost);
cudaMemcpy(&Vy_f,dev_vector_y_spec+ii,sizeof(cufftComplex),cudaMemcpyDeviceToHost);
fVectorPlineInfo[nVectorPlineNum][3] = VectorThetSPF(P_f, Vx_f, Vy_f);
if(fVectorPlineInfo[ll][3] > 180.0)
{
fVectorPlineInfo[ll][3] -= 360.0;
}
else if(fVectorPlineInfo[ll][3] < -180.0)
{
fVectorPlineInfo[ll][3] += 360.0;
}
}
}
}
}
}
for(int ii = 0;ii<nVectorPlineNum;ii++)
{
printf("fVectorPlineInfo %d:%.3f\n",ii+1,fVectorPlineInfo[ii][3]);
}
//------------------------------------------------------------------------------------------------
cudaEventRecord(stop1,NULL);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&msecTotal,start1,stop1);
printf("%d:%f;%d,%d;%d,%d;%d,%d\n",FrameNum,msecTotal,tracedtargetbeam[0][0],tracedtargetbeam[0][1],tracedtargetbeam[1][0],tracedtargetbeam[1][1],tracedtargetbeam[2][0],tracedtargetbeam[2][1]);
printf("\n");
fprintf(fplog,"%d:%f;%d,%d;%d,%d;%d,%d\n",FrameNum,msecTotal,tracedtargetbeam[0][0],tracedtargetbeam[0][1],tracedtargetbeam[1][0],tracedtargetbeam[1][1],tracedtargetbeam[2][0],tracedtargetbeam[2][1]);
fflush(fplog);
}
}
void *DataFormatting(void *lParam)
{
//int retval1 = -1;
//int retval2 = -1;
int BUF_FLAG = 0;
int temp = 0;
if(ChannDataBufA != NULL)
{
free(ChannDataBufA);
ChannDataBufA = NULL;
}
ChannDataBufA = (float *)malloc(FRAMELEN*CHANNUM*2*sizeof(float));
memset(ChannDataBufA,0,FRAMELEN*CHANNUM*2*sizeof(float));
if(ChannDataBufB != NULL)
{
free(ChannDataBufB);
ChannDataBufB = NULL;
}
ChannDataBufB = (float *)malloc(FRAMELEN*CHANNUM*2*sizeof(float));
memset(ChannDataBufB,0,FRAMELEN*CHANNUM*2*sizeof(float));
while (1)
{
//#if ONLINEMODE
// pthread_mutex_lock(&count_lock_BoardDataReady);
// while (count_BoardDataReady == 0)
// {
// pthread_cond_wait(&cond_BoardDataReady,&count_lock_BoardDataReady);
// }
// count_BoardDataReady = count_BoardDataReady -1;
// pthread_mutex_unlock(&count_lock_BoardDataReady);
//#endif
//#if FILEMODE
pthread_mutex_lock(&count_lock_Board1DataReady);
while (count_Board1DataReady == 0)
{
pthread_cond_wait(&cond_Board1DataReady,&count_lock_Board1DataReady);
}
count_Board1DataReady = count_Board1DataReady -1;
pthread_mutex_unlock(&count_lock_Board1DataReady);
pthread_mutex_lock(&count_lock_Board2DataReady);
while (count_Board2DataReady == 0)
{
pthread_cond_wait(&cond_Board2DataReady,&count_lock_Board2DataReady);
}
count_Board2DataReady = count_Board2DataReady -1;
pthread_mutex_unlock(&count_lock_Board2DataReady);
if(BUF_FLAG == 0)
{
for(int ii=0;ii<CHANNUM;ii++)
{
for(int jj=0;jj<FRAMELEN;jj++)
{
temp = DataBufA_B1[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufA[ii*FRAMELEN+jj] = temp*1.0/pow(2.0,23) * 2.5;
temp = DataBufA_B2[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufA[ii*FRAMELEN+jj+FRAMELEN*CHANNUM] = temp*1.0/pow(2.0,23) * 2.5;
}
}
BUF_FLAG = 1;
printf("DataFormatting Finished!\n");
pthread_mutex_lock(&count_lock_FrameDataReady);
pthread_cond_signal(&cond_FrameDataReady);
count_FrameDataReady = count_FrameDataReady+1;
pthread_mutex_unlock(&count_lock_FrameDataReady);
}
else
{
for(int ii=0;ii<CHANNUM;ii++)
{
for(int jj=0;jj<FRAMELEN;jj++)
{
temp = DataBufB_B1[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufB[ii*FRAMELEN+jj] = temp*1.0/pow(2.0,23) * 2.5;
temp = DataBufB_B2[jj*CHANNUM+ii];
temp = temp<<8;
temp = temp>>8;
ChannDataBufB[ii*FRAMELEN+jj+FRAMELEN*CHANNUM] = temp*1.0/pow(2.0,23) * 2.5;
}
}
BUF_FLAG = 0;
printf("DataFormatting Finished!\n");
pthread_mutex_lock(&count_lock_FrameDataReady);
pthread_cond_signal(&cond_FrameDataReady);
count_FrameDataReady = count_FrameDataReady+1;
pthread_mutex_unlock(&count_lock_FrameDataReady);
}
//#endif
}
}
void *ReceiveNetwork(void *lParam)
{
char errBuf[PCAP_ERRBUF_SIZE], *device;
pcap_t *handle;
bpf_u_int32 mask;
bpf_u_int32 net;
struct bpf_program filter;
char filter_app[] = "udp dst port 0"; //setting the filter package
struct pcap_pkthdr packet;
const u_char *pktStr;
char packtype = 0;
short portnumber = 0;
char sourceid = 0;
char FramenumN1 = -1, FramenumN2 = -1;
char LastFramenumN1 = 0, LastFramenumN2 = 0;
int readbufb1[TL*CHANNUM+1],readbufb2[TL*CHANNUM+1];
int BUF_FLAG_B1=0,BUF_FLAG_B2;
int *pBuf_B1 = NULL,*pBuf_B2 = NULL;
int *pCounter_B1 = NULL,*pCounter_B2 = NULL;
int CounterA_B1 = FRAMELEN,CounterB_B1 = FRAMELEN;
int CounterA_B2 = FRAMELEN,CounterB_B2 = FRAMELEN;
int temp = 0;
int FrameNum1 = 0,FrameNum2 = 0, FrameNum = 0;
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B1 != NULL)
{
free(DataBufB_B1);
DataBufB_B1 = NULL;
}
DataBufB_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B1,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufA_B2 != NULL)
{
free(DataBufA_B2);
DataBufA_B2 = NULL;
}
DataBufA_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B2,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B2 != NULL)
{
free(DataBufB_B2);
DataBufB_B2 = NULL;
}
DataBufB_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B2,0,FRAMELEN*CHANNUM*sizeof(int));
//get the name of the first device suitable for capture
device = pcap_lookupdev(errBuf);
if ( device )
{
printf("success: device: %s\n",device);
}
else
{
printf("error: %s\n",errBuf);
return 0;
}
//open network device for packet capture
handle = pcap_open_live(device,BUFSIZ,1,0,errBuf);
//look up into from the capture device
pcap_lookupnet(device,&net,&mask,errBuf);
printf("net=%x mask=%x\n",net,mask);
//compiles the filter expression into a bpf filter rogram
printf("compiles the filter expression into a bpf filter program\r\n");
pcap_compile(handle,&filter,filter_app,0,net);
//load the filter program into the packet capture device
printf("load the filter program into the packet capture device\r\n");
pcap_setfilter(handle,&filter);
while (1)
{
//printf("before Received data!\n");
pktStr = pcap_next(handle,&packet);
//printf("Received data!\n");
if(pktStr != NULL)
{
//printf("Received data!\n");
//读取目的端口号
memcpy((char *)&portnumber,pktStr+37,sizeof(char));
memcpy((char *)&portnumber+1,pktStr+36,sizeof(char));
if (portnumber == DEST_PORT)
{
//读取包类型
memcpy(&packtype,pktStr+45,sizeof(char));
memcpy(&sourceid,pktStr+43,sizeof(char));
if (packtype == 0x10) // if packet is ADC packet
{
if(sourceid == 1)
{
FrameNum1++;
memcpy(readbufb1,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
FramenumN1 = *(pktStr+44);
FramenumN1 = FramenumN1 >> 2;
if (FrameNum1 == 1)
{
LastFramenumN1 = FramenumN1;
}
else
{
if (FramenumN1 != LastFramenumN1+1 && FramenumN1+63 != LastFramenumN1)
{
printf("Lost Board1 data package!\n");
}
LastFramenumN1 = FramenumN1;
}
}
if(sourceid == 2)
{
FrameNum2++;
memcpy(readbufb2,pktStr+42,(TL*CHANNUM+1)*sizeof(int));
FramenumN2 = *(pktStr+44);
FramenumN2 = FramenumN2 >> 2;
if (FrameNum2 == 1)
{
LastFramenumN2 = FramenumN2;
}
else
{
if (FramenumN2 != LastFramenumN2+1 && FramenumN2+63 != LastFramenumN2)
{
printf("Lost Board2 data package!\n");
}
LastFramenumN2 = FramenumN2;
}
}
if (FramenumN1 == FramenumN2 && FramenumN2 >= 0) //receive both board data
{
//-----------------board1 data accumulate---------------------------
if(0 == BUF_FLAG_B1)
{
pBuf_B1 = DataBufA_B1;
pCounter_B1 = &CounterA_B1;
}
else
{
pBuf_B1 = DataBufB_B1;
pCounter_B1 = &CounterB_B1;
}
if(*(pCounter_B1)>=TL) //
{
memcpy(pBuf_B1+FRAMELEN*CHANNUM-(*(pCounter_B1))*CHANNUM,readbufb1+1,TL*CHANNUM*sizeof(int));
*(pCounter_B1) = *(pCounter_B1)-TL;
}
else
{
temp = TL - *(pCounter_B1);
memcpy(pBuf_B1+FRAMELEN*CHANNUM-(*(pCounter_B1))*CHANNUM,readbufb1+1,(*(pCounter_B1))*CHANNUM*sizeof(int));
*(pCounter_B1)= FRAMELEN;
if(0 == BUF_FLAG_B1)
{
memcpy(DataBufB_B1+FRAMELEN*CHANNUM-CounterB_B1*CHANNUM,readbufb1+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterB_B1 = CounterB_B1 - temp;
BUF_FLAG_B1 = 1;
}
else //
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA_B1*CHANNUM,readbufb1+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterA_B1 = CounterA_B1 - temp;
BUF_FLAG_B1 = 0;
}
pthread_mutex_lock(&count_lock_Board1DataReady);
pthread_cond_signal(&cond_Board1DataReady);
count_Board1DataReady = count_Board1DataReady+1;
pthread_mutex_unlock(&count_lock_Board1DataReady);
// printf("ReceiveNetworkData A Finished!\n");
}
//-----------------board2 data accumulate---------------------------
if(0 == BUF_FLAG_B2)
{
pBuf_B2 = DataBufA_B2;
pCounter_B2 = &CounterA_B2;
}
else
{
pBuf_B2 = DataBufB_B2;
pCounter_B2 = &CounterB_B2;
}
if(*(pCounter_B2)>=TL) //
{
memcpy(pBuf_B2+FRAMELEN*CHANNUM-(*(pCounter_B2))*CHANNUM,readbufb2+1,TL*CHANNUM*sizeof(int));
*(pCounter_B2) = *(pCounter_B2)-TL;
}
else
{
temp = TL - *(pCounter_B2);
memcpy(pBuf_B2+FRAMELEN*CHANNUM-(*(pCounter_B2))*CHANNUM,readbufb2+1,(*(pCounter_B2))*CHANNUM*sizeof(int));
*(pCounter_B2)= FRAMELEN;
if(0 == BUF_FLAG_B2)
{
memcpy(DataBufB_B2+FRAMELEN*CHANNUM-CounterB_B2*CHANNUM,readbufb2+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterB_B2 = CounterB_B2 - temp;
BUF_FLAG_B2 = 1;
}
else
{
memcpy(DataBufA_B2+FRAMELEN*CHANNUM-CounterA_B2*CHANNUM,readbufb2+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
CounterA_B2 = CounterA_B2 - temp;
BUF_FLAG_B2 = 0;
}
pthread_mutex_lock(&count_lock_Board2DataReady);
pthread_cond_signal(&cond_Board2DataReady);
count_Board2DataReady = count_Board2DataReady+1;
pthread_mutex_unlock(&count_lock_Board2DataReady);
// printf("ReceiveNetworkData B Finished!\n");
}
}
}
}
}
//printf("ReceiveNetworkData Finished!\n");
//pthread_mutex_lock(&count_lock_BoardDataReady);
//pthread_cond_signal(&cond_BoardDataReady);
//count_BoardDataReady = count_BoardDataReady+1;
//pthread_mutex_unlock(&count_lock_BoardDataReady);
}
}
void *ReadBoard1Data(void *lParam)
{
int fileindex = 0;
std::string FilePath = "/home/ubuntu/Desktop/GPU/uwrn/";
std::string FileNamePre = "Board1_ADC_";
std::string FileIdx = std::to_string(fileindex);
std::string FileNameSur = ".bin";
std::string FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
int DataFileNum = 18;
FILE *fp = NULL;
//int readbytes = 0;
int readbuf[TL*CHANNUM+1];
int BUF_FLAG=0;
int *pBuf = NULL;
int *pCounter = NULL;
int CounterA = FRAMELEN,CounterB = FRAMELEN;
int temp = 0;
if(DataBufA_B1 != NULL)
{
free(DataBufA_B1);
DataBufA_B1 = NULL;
}
DataBufA_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B1,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B1 != NULL)
{
free(DataBufB_B1);
DataBufB_B1 = NULL;
}
DataBufB_B1 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B1,0,FRAMELEN*CHANNUM*sizeof(int));
//
for(int ii=0;ii<DataFileNum;ii++)
{
fileindex = ii;
FileIdx = std::to_string(fileindex);
FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
if(fp != NULL)
{
fclose(fp);
fp = NULL;
}
fp = fopen(FileName.c_str(),"rb");
for(int jj=0;jj<8e4;jj++)
{
usleep(TL*1e6 / FS);
fread(readbuf,sizeof(int),TL*CHANNUM+1,fp);
if(0 == BUF_FLAG)
{
pBuf = DataBufA_B1;
pCounter = &CounterA;
}
else
{
pBuf = DataBufB_B1;
pCounter = &CounterB;
}
if(*(pCounter)>=TL) //
{
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,TL*CHANNUM*sizeof(int));
*(pCounter) = *(pCounter)-TL;
}
else
{
temp = TL - *(pCounter);
//
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,(*(pCounter))*CHANNUM*sizeof(int));
//
*(pCounter)= FRAMELEN;
//
if(0 == BUF_FLAG) //
{
memcpy(DataBufB_B1+FRAMELEN*CHANNUM-CounterB*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterB = CounterB - temp;
//
BUF_FLAG = 1;
}
else //
{
memcpy(DataBufA_B1+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterA = CounterA - temp;
//
BUF_FLAG = 0;
}
//
//SetEvent(g_hReadBoard1ThreadReadyEnvent);
pthread_mutex_lock(&count_lock_Board1DataReady);
pthread_cond_signal(&cond_Board1DataReady);
count_Board1DataReady = count_Board1DataReady+1;
pthread_mutex_unlock(&count_lock_Board1DataReady);
}
}
}
return NULL;
}
void *ReadBoard2Data(void *lParam)
{
int fileindex = 0;
std::string FilePath = "/home/ubuntu/Desktop/GPU/uwrn/";
std::string FileNamePre = "Board2_ADC_";
std::string FileIdx = std::to_string(fileindex);
std::string FileNameSur = ".bin";
std::string FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
int DataFileNum = 18;
FILE *fp = NULL;
//int readbytes = 0;
int readbuf[TL*CHANNUM+1];
int BUF_FLAG=0;
int *pBuf = NULL;
int *pCounter = NULL;
int CounterA = FRAMELEN,CounterB = FRAMELEN;
int temp = 0;
if(DataBufA_B2 != NULL)
{
free(DataBufA_B2);
DataBufA_B2 = NULL;
}
DataBufA_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufA_B2,0,FRAMELEN*CHANNUM*sizeof(int));
if(DataBufB_B2 != NULL)
{
free(DataBufB_B2);
DataBufB_B2 = NULL;
}
DataBufB_B2 = (int *)malloc(FRAMELEN*CHANNUM*sizeof(int));
memset(DataBufB_B2,0,FRAMELEN*CHANNUM*sizeof(int));
//
for(int ii=0;ii<DataFileNum;ii++)
{
fileindex = ii;
FileIdx = std::to_string(fileindex);
FileName = FilePath + FileNamePre + FileIdx + FileNameSur;
if(fp != NULL)
{
fclose(fp);
fp = NULL;
}
fp = fopen(FileName.c_str(),"rb");
for(int jj=0;jj<8e4;jj++)
{
usleep(TL*1e6 / FS);
fread(readbuf,sizeof(int),TL*CHANNUM+1,fp);
if(0 == BUF_FLAG)
{
pBuf = DataBufA_B2;
pCounter = &CounterA;
}
else
{
pBuf = DataBufB_B2;
pCounter = &CounterB;
}
if(*(pCounter)>=TL) //
{
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,TL*CHANNUM*sizeof(int));
*(pCounter) = *(pCounter)-TL;
}
else
{
temp = TL - *(pCounter);
//
memcpy(pBuf+FRAMELEN*CHANNUM-(*(pCounter))*CHANNUM,readbuf+1,(*(pCounter))*CHANNUM*sizeof(int));
//
*(pCounter)= FRAMELEN;
//
if(0 == BUF_FLAG) //
{
memcpy(DataBufB_B2+FRAMELEN*CHANNUM-CounterB*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterB = CounterB - temp;
//
BUF_FLAG = 1;
}
else //
{
memcpy(DataBufA_B2+FRAMELEN*CHANNUM-CounterA*CHANNUM,readbuf+(TL-temp)*CHANNUM+1,temp*CHANNUM*sizeof(int));
//
CounterA = CounterA - temp;
//
BUF_FLAG = 0;
}
//
//SetEvent(g_hReadBoard2ThreadReadyEnvent);
pthread_mutex_lock(&count_lock_Board2DataReady);
pthread_cond_signal(&cond_Board2DataReady);
count_Board2DataReady = count_Board2DataReady+1;
pthread_mutex_unlock(&count_lock_Board2DataReady);
}
}
}
return NULL;
}
|
f6ae419d882d075c6ef9af6317d56175e072727d.hip | // !!! This is a file automatically generated by hipify!!!
//Add GRID Vector Using GPU
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 50000000
#define MAX_ERR 1e-6
__global__ void vector_add_grid(float *out, float *a, float *b, int n){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Global = %d\n", tid);
//Handling arbitary vector size
if (tid < n){
out[tid] = a[tid] + b[tid];
}
}
int main(int argc, char **argv){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
//Alokasi Host Memori
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
//Inisialisasi Array
for (int i = 0; i < N; i++){
a[i] = 29.0f;
b[i] = 57.0f;
}
//Alokasi Device memori
hipMalloc((void**)&d_a, sizeof(float) * N);
hipMalloc((void**)&d_b, sizeof(float) * N);
hipMalloc((void**)&d_out, sizeof(float) * N);
//Transfer Data dari Host memori ke Device memori
hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice);
//Eksekusi Kernel
int block_size = 256;
int grid_size = ((N + block_size) / block_size);
hipLaunchKernelGGL(( vector_add_grid) , dim3(grid_size), dim3(block_size), 0, 0, d_out, d_a, d_b, N);
//Transfer Data kembali ke Host Memori
hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost);
//Verification
//for (int i = 0; i < N; i++){
// assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
//}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
//Dealokasi Device Memori
hipFree(d_a);
hipFree(d_b);
hipFree(d_out);
//Dealokasi Host Memori
free(a);
free(b);
free(out);
return 0;
} | f6ae419d882d075c6ef9af6317d56175e072727d.cu | //Add GRID Vector Using GPU
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 50000000
#define MAX_ERR 1e-6
__global__ void vector_add_grid(float *out, float *a, float *b, int n){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Global = %d\n", tid);
//Handling arbitary vector size
if (tid < n){
out[tid] = a[tid] + b[tid];
}
}
int main(int argc, char **argv){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
//Alokasi Host Memori
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
//Inisialisasi Array
for (int i = 0; i < N; i++){
a[i] = 29.0f;
b[i] = 57.0f;
}
//Alokasi Device memori
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
//Transfer Data dari Host memori ke Device memori
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
//Eksekusi Kernel
int block_size = 256;
int grid_size = ((N + block_size) / block_size);
vector_add_grid <<<grid_size, block_size>>> (d_out, d_a, d_b, N);
//Transfer Data kembali ke Host Memori
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
//Verification
//for (int i = 0; i < N; i++){
// assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
//}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
//Dealokasi Device Memori
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
//Dealokasi Host Memori
free(a);
free(b);
free(out);
return 0;
} |
66a3258cb053e92979c51eaa845be271e26a6428.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_utils.h"
//#include "kernel_correlation.h"
// input: points(b, n, npoints, 3), kernel(l, m, 3)
// output: outputs(b, l, n)
__global__ void kernel_correlation_kernel(int b, int n, int npoints, int l, int m, float sigma,
const float *__restrict__ points,
const float *__restrict__ kernel,
float *__restrict__ outputs) {
const int batch_index = blockIdx.x;
points += batch_index * n * npoints * 3;
outputs += batch_index * l * n;
for (int i = threadIdx.y; i < l; i += blockDim.y) {
const float *temp_k = kernel + i * npoints * 3;
for (int j = threadIdx.x; j < n; j += blockDim.x) {
const float *temp_p = points + j * npoints * 3;
float sum = 0.;
for (int k1 = 0; k1 < m; ++k1) {
float kx = temp_k[k1 * 3];
float ky = temp_k[k1 * 3 + 1];
float kz = temp_k[k1 * 3 + 2];
for (int k2 = 0; k2 < npoints; ++k2) {
float x = temp_p[k2 * 3];
float y = temp_p[k2 * 3 + 1];
float z = temp_p[k2 * 3 + 2];
float d = (kx - x) * (kx - x) + (ky - y) * (ky - y) + (kz - z) * (kz - z);
float kc = expf(-1 * d / (2 * sigma * sigma)); // kernel function
sum += kc;
}
}
outputs[i * n + j] = sum / npoints;
}
}
}
// input: grad_outputs(b, l, n), points(b, n, npoints, 3), kernel(l, m, 3)
// output: grad_inputs(l, m, 3)
__global__ void kernel_correlation_grad_kernel(int b, int n, int npoints, int l, int m, float sigma,
const float *__restrict__ grad_outputs,
const float *__restrict__ points,
const float *__restrict__ kernel,
float *__restrict__ grad_inputs) {
const int batch_index = blockIdx.x;
points += batch_index * n * npoints * 3;
grad_outputs += batch_index * l * n;
float coef = 1 / (npoints * sigma * sigma);
for (int i = threadIdx.y; i < l; i += blockDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
float kx = kernel[(i * m + j) * 3];
float ky = kernel[(i * m + j) * 3 + 1];
float kz = kernel[(i * m + j) * 3 + 2];
float gx = 0.;
float gy = 0.;
float gz = 0.;
for (int k1 = 0; k1 < n; ++k1) {
const float *temp_p = points + k1 * npoints * 3;
float sum1 = 0.;
float sum2 = 0.;
float sum3 = 0.;
for (int k2 = 0; k2 < npoints; ++k2) {
float x = temp_p[k2 * 3];
float y = temp_p[k2 * 3 + 1];
float z = temp_p[k2 * 3 + 2];
float d = (kx - x) * (kx - x) + (ky - y) * (ky - y) + (kz - z) * (kz - z);
float kc = expf(- d / (2 * sigma * sigma));
sum1 += (x - kx) * kc;
sum2 += (y - ky) * kc;
sum3 += (z - kz) * kc;
}
gx += grad_outputs[i * n + k1] * sum1;
gy += grad_outputs[i * n + k1] * sum2;
gz += grad_outputs[i * n + k1] * sum3;
}
atomicAdd(grad_inputs + (i * m + j) * 3, gx * coef);
atomicAdd(grad_inputs + (i * m + j) * 3 + 1, gy * coef);
atomicAdd(grad_inputs + (i * m + j) * 3 + 2, gz * coef);
}
}
}
void kernel_correlation_kernel_wrapper(int b, int n, int npoints, int l, int m, float sigma,
const float *points, const float *kernel,
float *outputs) {
hipError_t err;
hipLaunchKernelGGL(( kernel_correlation_kernel), dim3(b), dim3(opt_block_config(n, l)), 0, 0, b, n, npoints, l, m, sigma, points, kernel, outputs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
void kernel_correlation_grad_kernel_wrapper(int b, int n, int npoints, int l, int m, float sigma,
const float *grad_outputs, const float *points, const float *kernel,
float *grad_inputs) {
hipError_t err;
hipLaunchKernelGGL(( kernel_correlation_grad_kernel), dim3(b), dim3(opt_block_config(m, l)), 0, 0,
b, n, npoints, l, m, sigma, grad_outputs, points, kernel, grad_inputs);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
| 66a3258cb053e92979c51eaa845be271e26a6428.cu | #include "cuda_utils.h"
//#include "kernel_correlation.h"
// input: points(b, n, npoints, 3), kernel(l, m, 3)
// output: outputs(b, l, n)
__global__ void kernel_correlation_kernel(int b, int n, int npoints, int l, int m, float sigma,
const float *__restrict__ points,
const float *__restrict__ kernel,
float *__restrict__ outputs) {
const int batch_index = blockIdx.x;
points += batch_index * n * npoints * 3;
outputs += batch_index * l * n;
for (int i = threadIdx.y; i < l; i += blockDim.y) {
const float *temp_k = kernel + i * npoints * 3;
for (int j = threadIdx.x; j < n; j += blockDim.x) {
const float *temp_p = points + j * npoints * 3;
float sum = 0.;
for (int k1 = 0; k1 < m; ++k1) {
float kx = temp_k[k1 * 3];
float ky = temp_k[k1 * 3 + 1];
float kz = temp_k[k1 * 3 + 2];
for (int k2 = 0; k2 < npoints; ++k2) {
float x = temp_p[k2 * 3];
float y = temp_p[k2 * 3 + 1];
float z = temp_p[k2 * 3 + 2];
float d = (kx - x) * (kx - x) + (ky - y) * (ky - y) + (kz - z) * (kz - z);
float kc = expf(-1 * d / (2 * sigma * sigma)); // kernel function
sum += kc;
}
}
outputs[i * n + j] = sum / npoints;
}
}
}
// input: grad_outputs(b, l, n), points(b, n, npoints, 3), kernel(l, m, 3)
// output: grad_inputs(l, m, 3)
__global__ void kernel_correlation_grad_kernel(int b, int n, int npoints, int l, int m, float sigma,
const float *__restrict__ grad_outputs,
const float *__restrict__ points,
const float *__restrict__ kernel,
float *__restrict__ grad_inputs) {
const int batch_index = blockIdx.x;
points += batch_index * n * npoints * 3;
grad_outputs += batch_index * l * n;
float coef = 1 / (npoints * sigma * sigma);
for (int i = threadIdx.y; i < l; i += blockDim.y) {
for (int j = threadIdx.x; j < m; j += blockDim.x) {
float kx = kernel[(i * m + j) * 3];
float ky = kernel[(i * m + j) * 3 + 1];
float kz = kernel[(i * m + j) * 3 + 2];
float gx = 0.;
float gy = 0.;
float gz = 0.;
for (int k1 = 0; k1 < n; ++k1) {
const float *temp_p = points + k1 * npoints * 3;
float sum1 = 0.;
float sum2 = 0.;
float sum3 = 0.;
for (int k2 = 0; k2 < npoints; ++k2) {
float x = temp_p[k2 * 3];
float y = temp_p[k2 * 3 + 1];
float z = temp_p[k2 * 3 + 2];
float d = (kx - x) * (kx - x) + (ky - y) * (ky - y) + (kz - z) * (kz - z);
float kc = expf(- d / (2 * sigma * sigma));
sum1 += (x - kx) * kc;
sum2 += (y - ky) * kc;
sum3 += (z - kz) * kc;
}
gx += grad_outputs[i * n + k1] * sum1;
gy += grad_outputs[i * n + k1] * sum2;
gz += grad_outputs[i * n + k1] * sum3;
}
atomicAdd(grad_inputs + (i * m + j) * 3, gx * coef);
atomicAdd(grad_inputs + (i * m + j) * 3 + 1, gy * coef);
atomicAdd(grad_inputs + (i * m + j) * 3 + 2, gz * coef);
}
}
}
void kernel_correlation_kernel_wrapper(int b, int n, int npoints, int l, int m, float sigma,
const float *points, const float *kernel,
float *outputs) {
cudaError_t err;
kernel_correlation_kernel<<<b, opt_block_config(n, l)>>>(b, n, npoints, l, m, sigma, points, kernel, outputs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
void kernel_correlation_grad_kernel_wrapper(int b, int n, int npoints, int l, int m, float sigma,
const float *grad_outputs, const float *points, const float *kernel,
float *grad_inputs) {
cudaError_t err;
kernel_correlation_grad_kernel<<<b, opt_block_config(m, l)>>>(
b, n, npoints, l, m, sigma, grad_outputs, points, kernel, grad_inputs);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
c2488b2814aed385007ad41647e68d32a057e2d9.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <memory>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/spatial/knn/epsilon_neighborhood.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace spatial {
namespace knn {
template <typename T, typename IdxT>
struct EpsInputs {
IdxT n_row, n_col, n_centers, n_batches;
T eps;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p)
{
return os;
}
template <typename T, typename IdxT>
class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> {
protected:
EpsNeighTest()
: data(0, resource::get_cuda_stream(handle)),
adj(0, resource::get_cuda_stream(handle)),
labels(0, resource::get_cuda_stream(handle)),
vd(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
auto stream = resource::get_cuda_stream(handle);
param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam();
data.resize(param.n_row * param.n_col, stream);
labels.resize(param.n_row, stream);
batchSize = param.n_row / param.n_batches;
adj.resize(param.n_row * batchSize, stream);
vd.resize(batchSize + 1, stream);
RAFT_CUDA_TRY(hipMemsetAsync(vd.data(), 0, vd.size() * sizeof(IdxT), stream));
random::make_blobs<T, IdxT>(data.data(),
labels.data(),
param.n_row,
param.n_col,
param.n_centers,
stream,
true,
nullptr,
nullptr,
T(0.01),
false);
}
const raft::resources handle;
EpsInputs<T, IdxT> param;
hipStream_t stream = 0;
rmm::device_uvector<T> data;
rmm::device_uvector<bool> adj;
rmm::device_uvector<IdxT> labels, vd;
IdxT batchSize;
}; // class EpsNeighTest
const std::vector<EpsInputs<float, int>> inputsfi = {
{15000, 16, 5, 1, 2.f},
{14000, 16, 5, 1, 2.f},
{15000, 17, 5, 1, 2.f},
{14000, 17, 5, 1, 2.f},
{15000, 18, 5, 1, 2.f},
{14000, 18, 5, 1, 2.f},
{15000, 32, 5, 1, 2.f},
{14000, 32, 5, 1, 2.f},
{20000, 10000, 10, 1, 2.f},
{20000, 10000, 10, 2, 2.f},
};
typedef EpsNeighTest<float, int> EpsNeighTestFI;
TEST_P(EpsNeighTestFI, Result)
{
for (int i = 0; i < param.n_batches; ++i) {
RAFT_CUDA_TRY(hipMemsetAsync(adj.data(), 0, sizeof(bool) * param.n_row * batchSize, stream));
RAFT_CUDA_TRY(hipMemsetAsync(vd.data(), 0, sizeof(int) * (batchSize + 1), stream));
auto adj_view = make_device_matrix_view<bool, int>(adj.data(), param.n_row, batchSize);
auto vd_view = make_device_vector_view<int, int>(vd.data(), batchSize + 1);
auto x_view = make_device_matrix_view<float, int>(data.data(), param.n_row, param.n_col);
auto y_view = make_device_matrix_view<float, int>(
data.data() + (i * batchSize * param.n_col), batchSize, param.n_col);
eps_neighbors_l2sq<float, int, int>(
handle, x_view, y_view, adj_view, vd_view, param.eps * param.eps);
ASSERT_TRUE(raft::devArrMatch(
param.n_row / param.n_centers, vd.data(), batchSize, raft::Compare<int>(), stream));
}
}
INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI, ::testing::ValuesIn(inputsfi));
}; // namespace knn
}; // namespace spatial
}; // namespace raft
| c2488b2814aed385007ad41647e68d32a057e2d9.cu | /*
* Copyright (c) 2020-2023, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../test_utils.cuh"
#include <gtest/gtest.h>
#include <memory>
#include <raft/core/device_mdspan.hpp>
#include <raft/core/resource/cuda_stream.hpp>
#include <raft/random/make_blobs.cuh>
#include <raft/spatial/knn/epsilon_neighborhood.cuh>
#include <raft/util/cudart_utils.hpp>
#include <rmm/device_uvector.hpp>
namespace raft {
namespace spatial {
namespace knn {
template <typename T, typename IdxT>
struct EpsInputs {
IdxT n_row, n_col, n_centers, n_batches;
T eps;
};
template <typename T, typename IdxT>
::std::ostream& operator<<(::std::ostream& os, const EpsInputs<T, IdxT>& p)
{
return os;
}
template <typename T, typename IdxT>
class EpsNeighTest : public ::testing::TestWithParam<EpsInputs<T, IdxT>> {
protected:
EpsNeighTest()
: data(0, resource::get_cuda_stream(handle)),
adj(0, resource::get_cuda_stream(handle)),
labels(0, resource::get_cuda_stream(handle)),
vd(0, resource::get_cuda_stream(handle))
{
}
void SetUp() override
{
auto stream = resource::get_cuda_stream(handle);
param = ::testing::TestWithParam<EpsInputs<T, IdxT>>::GetParam();
data.resize(param.n_row * param.n_col, stream);
labels.resize(param.n_row, stream);
batchSize = param.n_row / param.n_batches;
adj.resize(param.n_row * batchSize, stream);
vd.resize(batchSize + 1, stream);
RAFT_CUDA_TRY(cudaMemsetAsync(vd.data(), 0, vd.size() * sizeof(IdxT), stream));
random::make_blobs<T, IdxT>(data.data(),
labels.data(),
param.n_row,
param.n_col,
param.n_centers,
stream,
true,
nullptr,
nullptr,
T(0.01),
false);
}
const raft::resources handle;
EpsInputs<T, IdxT> param;
cudaStream_t stream = 0;
rmm::device_uvector<T> data;
rmm::device_uvector<bool> adj;
rmm::device_uvector<IdxT> labels, vd;
IdxT batchSize;
}; // class EpsNeighTest
const std::vector<EpsInputs<float, int>> inputsfi = {
{15000, 16, 5, 1, 2.f},
{14000, 16, 5, 1, 2.f},
{15000, 17, 5, 1, 2.f},
{14000, 17, 5, 1, 2.f},
{15000, 18, 5, 1, 2.f},
{14000, 18, 5, 1, 2.f},
{15000, 32, 5, 1, 2.f},
{14000, 32, 5, 1, 2.f},
{20000, 10000, 10, 1, 2.f},
{20000, 10000, 10, 2, 2.f},
};
typedef EpsNeighTest<float, int> EpsNeighTestFI;
TEST_P(EpsNeighTestFI, Result)
{
for (int i = 0; i < param.n_batches; ++i) {
RAFT_CUDA_TRY(cudaMemsetAsync(adj.data(), 0, sizeof(bool) * param.n_row * batchSize, stream));
RAFT_CUDA_TRY(cudaMemsetAsync(vd.data(), 0, sizeof(int) * (batchSize + 1), stream));
auto adj_view = make_device_matrix_view<bool, int>(adj.data(), param.n_row, batchSize);
auto vd_view = make_device_vector_view<int, int>(vd.data(), batchSize + 1);
auto x_view = make_device_matrix_view<float, int>(data.data(), param.n_row, param.n_col);
auto y_view = make_device_matrix_view<float, int>(
data.data() + (i * batchSize * param.n_col), batchSize, param.n_col);
eps_neighbors_l2sq<float, int, int>(
handle, x_view, y_view, adj_view, vd_view, param.eps * param.eps);
ASSERT_TRUE(raft::devArrMatch(
param.n_row / param.n_centers, vd.data(), batchSize, raft::Compare<int>(), stream));
}
}
INSTANTIATE_TEST_CASE_P(EpsNeighTests, EpsNeighTestFI, ::testing::ValuesIn(inputsfi));
}; // namespace knn
}; // namespace spatial
}; // namespace raft
|
797eb0aec1741d18f87964e892a727284749987c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/maxouting.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelMaxOut(const int nthreads, const T* input_data,
const int channels, const int input_height,
const int input_width, int groups,
T* output_data) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
T ele = static_cast<T>(-FLT_MAX);
for (int g = 0; g < groups; ++g) {
T x = input_data[data_idx + g * feat_len];
ele = ele > x ? ele : x;
}
output_data[i] = ele;
}
}
template <typename T>
__global__ void KernelMaxoutGrad(const int nthreads, const T* input_data,
const T* output_data, const T* output_grad,
T* input_grad, const int channels,
const int input_height, const int input_width,
int groups) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
int max_index = -1;
bool continue_match = true;
for (int g = 0; g < groups && continue_match; ++g) {
if (input_data[data_idx + g * feat_len] == output_data[i]) {
max_index = data_idx + g * feat_len;
continue_match = false;
break;
}
}
if (max_index != -1) {
input_grad[max_index] += output_grad[index];
}
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor* output,
int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = output->numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxOut<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(), nthreads, input_data, input_channels,
input_height, input_width, groups, output_data);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor* input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = output.numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
hipLaunchKernelGGL(( KernelMaxoutGrad<
T>), dim3(grid), dim3(threads), 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream(), nthreads, input_data, output_data,
output_grad_data, input_grad_data, input_channels,
input_height, input_width, groups);
}
};
template class MaxOutGradFunctor<platform::GPUPlace, float>;
template class MaxOutGradFunctor<platform::GPUPlace, double>;
template class MaxOutFunctor<platform::GPUPlace, float>;
template class MaxOutFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 797eb0aec1741d18f87964e892a727284749987c.cu | /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/maxouting.h"
#include "paddle/platform/cuda_helper.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
__global__ void KernelMaxOut(const int nthreads, const T* input_data,
const int channels, const int input_height,
const int input_width, int groups,
T* output_data) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
T ele = static_cast<T>(-FLT_MAX);
for (int g = 0; g < groups; ++g) {
T x = input_data[data_idx + g * feat_len];
ele = ele > x ? ele : x;
}
output_data[i] = ele;
}
}
template <typename T>
__global__ void KernelMaxoutGrad(const int nthreads, const T* input_data,
const T* output_data, const T* output_grad,
T* input_grad, const int channels,
const int input_height, const int input_width,
int groups) {
const int size = input_height * input_width * channels / groups;
const int feat_len = input_height * input_width;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = index; i < nthreads; i += offset) {
int batch_idx = i / size;
int batch_offset = i % size;
int channel_idx = batch_offset / feat_len;
int feat_idx = batch_offset % feat_len;
int data_idx =
(batch_idx * size + channel_idx * feat_len) * groups + feat_idx;
int max_index = -1;
bool continue_match = true;
for (int g = 0; g < groups && continue_match; ++g) {
if (input_data[data_idx + g * feat_len] == output_data[i]) {
max_index = data_idx + g * feat_len;
continue_match = false;
break;
}
}
if (max_index != -1) {
input_grad[max_index] += output_grad[index];
}
}
}
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor* output,
int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output->dims()[1];
const int output_height = output->dims()[2];
const int output_width = output->dims()[3];
const T* input_data = input.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
int nthreads = output->numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxOut<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, input_data, input_channels,
input_height, input_width, groups, output_data);
}
};
/*
* All tensors are in NCHW format.
*/
template <typename T>
class MaxOutGradFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& input, framework::Tensor* input_grad,
const framework::Tensor& output,
const framework::Tensor& output_grad, int groups) {
const int batch_size = input.dims()[0];
const int input_channels = input.dims()[1];
const int input_height = input.dims()[2];
const int input_width = input.dims()[3];
const int output_channels = output.dims()[1];
const int output_height = output.dims()[2];
const int output_width = output.dims()[3];
const T* input_data = input.data<T>();
const T* output_data = output.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
int nthreads = output.numel();
int blocks = (nthreads + 1024 - 1) / 1024;
dim3 threads(1024, 1);
dim3 grid(blocks, 1);
KernelMaxoutGrad<
T><<<grid, threads, 0,
reinterpret_cast<const platform::CUDADeviceContext&>(context)
.stream()>>>(nthreads, input_data, output_data,
output_grad_data, input_grad_data, input_channels,
input_height, input_width, groups);
}
};
template class MaxOutGradFunctor<platform::GPUPlace, float>;
template class MaxOutGradFunctor<platform::GPUPlace, double>;
template class MaxOutFunctor<platform::GPUPlace, float>;
template class MaxOutFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
77b2ed885ec383c639a71028f8d02baa9dc2cbc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "Header.h"
#define BLOCKSIZE 512
void __global__ kernel_1(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int stride = blockDim.x;
for (int j = stride; j > 0; j /= 2)
{
if (tx + j >= N)
{
d_A[bx*blockDim.x + tx] = d_A[bx*blockDim.x + tx];
}
else{
d_A[bx*blockDim.x + tx] += d_A[bx*blockDim.x + tx + j];
}
__syncthreads();
}
d_B[bx] = d_A[bx*blockDim.x];
}
void __global__ kernel_2_1_global(DATA_TYPE *d_A, DATA_TYPE *d_temp, unsigned int M, unsigned int N, unsigned int blockofRow)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * blockDim.x + tx; //col
int y = by * blockDim.y + ty; //row
int stride = blockDim.x/2;
for (int j = stride; j > 0; j /= 2)
{
if( tx < j && x + j < N)
{
//printf("(x,y)=(%d,%d), j=%d, y*N + x + j=%d , (tx,ty)=(%d,%d), (bx,by)=(%d,%d), \n", x, y,j, (y*N + x + j), tx, ty, bx, by);
d_A[y*N + x] += d_A[y*N + x + j];
}
__syncthreads();
}
if(tx==0 && ty==0)
d_temp[by*blockofRow + bx] = d_A[y*N + x];
//if (bx == 156 && by == 190 && tx == 0)
// printf("d_temp[by*blockofRow + bx]=%f\n", d_temp[by*blockofRow + bx]);
}
void __global__ kernel_2_2(DATA_TYPE *d_temp, DATA_TYPE *d_B, unsigned int M, unsigned int blockofRow)
{
int tx = threadIdx.x;
if(tx<M)
{
double temp = 0.0;
for (int i = 0; i < blockofRow; i++)
{
temp += d_temp[tx*blockofRow + i];
}
d_B[tx] = temp;
}
}
void __global__ kernel_3(DATA_TYPE *d_A, DATA_TYPE *d_temp, unsigned int M, unsigned int N, unsigned int S_NUM)
{
int tx = threadIdx.x;
int ty = threadIdx.y; // ty=0
int bx = blockIdx.x;
int by = blockIdx.y;
int x = tx + bx * blockDim.x;
//__shared__ float d_s[TILE_WIDTH];
extern __shared__ DATA_TYPE d_s[];
int n = by * N + bx * blockDim.x + tx;
d_s[tx] = x < N ? d_A[n] : 0.0;
__syncthreads();
int stride = blockDim.x / 2;
for (int j = stride; j > 0; j /= 2)
{
if (tx < j && tx + j < blockDim.x)
{
d_s[tx] += d_s[tx + j];
}
__syncthreads();
}
if (tx == 0)
{
d_temp[by*S_NUM + bx] = d_s[0];
}
}
__device__ void warpReduce( volatile DATA_TYPE *d_s, unsigned int tx)
{
d_s[tx] += d_s[tx + 32];
d_s[tx] += d_s[tx + 16];
d_s[tx] += d_s[tx + 8];
d_s[tx] += d_s[tx + 4];
d_s[tx] += d_s[tx + 2];
d_s[tx] += d_s[tx + 1];
}
__global__ void kernel_3_1(DATA_TYPE *d_A, DATA_TYPE *d_temp, unsigned int M, unsigned int N, unsigned int S_NUM)
{
int tx = threadIdx.x;
int ty = threadIdx.y; // ty=0
int bx = blockIdx.x;
int by = blockIdx.y;
int x = tx + bx * blockDim.x*2;
int y = by;
//__shared__ float d_s[BLOCKSIZE];
extern __shared__ DATA_TYPE d_s[];
d_s[tx] = x + blockDim.x < N ? (d_A[y * N + x] + d_A[y * N + x + blockDim.x]) : 0.0;
__syncthreads();
if (BLOCKSIZE >= 512)
{
if(tx<256) d_s[tx] += d_s[tx + 256];
__syncthreads();
}
if (BLOCKSIZE >= 256)
{
if (tx < 128) d_s[tx] += d_s[tx + 128];
__syncthreads();
}
if (BLOCKSIZE >= 128)
{
if (tx < 64) d_s[tx] += d_s[tx + 64];
__syncthreads();
}
if (tx < 32)
warpReduce(d_s, tx);
if (tx == 0)
d_temp[by*S_NUM + bx] = d_s[0];
}
void kernelLaunch_1(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N);
void kernelLaunch_2(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N);
void kernelLaunch_3(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N);
//void kernelLaunch_4(float *d_A, float *d_B, int M, int N);
extern "C" void cudaReduce(DATA_TYPE *h_A, DATA_TYPE *h_B, unsigned int M, unsigned int N)
{
////printf("=======================kernelLaunch_1:reduce a row in one block(N<=2048)================\n");
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
CHECK(hipEventRecord(start, 0));
hipEventQuery(start);
unsigned int SIZEA = M * N * sizeof(DATA_TYPE);
unsigned int SIZEB = M * sizeof(DATA_TYPE);
DATA_TYPE *d_A;
DATA_TYPE *d_B;
checkCudaErrors(hipMalloc((void**)&d_A, SIZEA));
checkCudaErrors(hipMalloc((void**)&d_B, SIZEB));
checkCudaErrors(hipMemset(d_B, 0, SIZEB));
checkCudaErrors(hipMemcpy(d_A, h_A, SIZEA, hipMemcpyDefault));
////reduce a row in one block(N<=2048):
//kernelLaunch_1(d_A, d_B, M, N);
//printf("=======================kernelLaunch_2: reduce a row in multiple blocks(Global memory) and reduce again at CPU/GPU================\n");
//reduce a row in multiple blocks of Global memory and reduce again at CPU
//kernelLaunch_2(d_A, d_B, M, N);
//printf("=======================kernelLaunch_3: reduce in shared memory ================\n");
kernelLaunch_3(d_A, d_B, M, N);
checkCudaErrors(hipMemcpy(h_B, d_B, SIZEB, hipMemcpyDefault));
CHECK(hipDeviceSynchronize());
//time end
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(hipEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("GPU time=%f ms\n\n", elapsedTime_cuda);
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
}
void kernelLaunch_3(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
dim3 block(BLOCKSIZE);
dim3 grid((N + block.x - 1) / block.x, M);
//shared memory
unsigned int S_SIZE = block.x * sizeof(DATA_TYPE);
//temp
DATA_TYPE * d_temp;
unsigned int S_NUM = (N + block.x - 1) / block.x;
unsigned int SIZE_T = S_NUM * M * sizeof(DATA_TYPE);
CHECK(hipMalloc((void**)&d_temp, SIZE_T));
CHECK(hipMemset(d_temp, 0, SIZE_T));
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
CHECK(hipEventRecord(start, 0));
hipEventQuery(start);
//shared memory
//kernel_3 <<<grid,block, S_SIZE >>> (d_A, d_temp,M,N, S_NUM);
// Fully expand warp
kernel_3_1 << <grid, block, S_SIZE >> > (d_A, d_temp, M, N, S_NUM);
CHECK(hipDeviceSynchronize()); //hipDeviceSynchronize() is deprecated
//time end
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(hipEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("kernel time=%f ms\n\n", elapsedTime_cuda);
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
//second reduce on CPU
DATA_TYPE *h_temp = (DATA_TYPE*)malloc(SIZE_T);
checkCudaErrors(hipMemcpy(h_temp, d_temp, SIZE_T, hipMemcpyDefault));
DATA_TYPE *h_cpureduce= (DATA_TYPE*)malloc(M* sizeof(DATA_TYPE));
for (int i = 0; i < M; i++)
{
double temp = 0.0f;
for (int j = 0; j < S_NUM; j++)
{
temp +=h_temp[i*S_NUM +j];
}
h_cpureduce[i] = temp;
//printf("h_cpureduce[%d]:%f\n", i, h_cpureduce[i]);
}
}
void kernelLaunch_2(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
//first level reduce d_A to d_temp
DATA_TYPE *d_temp;
dim3 block(1024);
int blockofRow = (N + block.x - 1) / block.x;
dim3 grid(blockofRow,M);
CHECK(hipMalloc((void**)&d_temp, M*blockofRow * sizeof(DATA_TYPE)));
hipEvent_t start, stop;
CHECK(hipEventCreate(&start));
CHECK(hipEventCreate(&stop));
CHECK(hipEventRecord(start, 0));
hipEventQuery(start);
kernel_2_1_global << <grid, block >> > (d_A, d_temp, M, N, blockofRow); //use global memory to reduce
CHECK(hipDeviceSynchronize());
//time end
CHECK(hipEventRecord(stop, 0));
CHECK(hipEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(hipEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("kernel_2_1_global time=%f ms\n\n", elapsedTime_cuda);
CHECK(hipEventDestroy(start));
CHECK(hipEventDestroy(stop));
////second reduce d_temp to d_B, used for M<=1024
int block_size_tmp = 32*((M+31)/32);
int block_size = block_size_tmp > 1024 ? 1024 : block_size_tmp;
dim3 block_num(block_size);
kernel_2_2 << <1, block_num >> > (d_temp, d_B, M, blockofRow);
//CHECK(hipGetLastError());
//second reduce on CPU
//float *h_temp = (float*)malloc(M*blockofRow * sizeof(float));
//checkCudaErrors(hipMemcpy(h_temp, d_temp, M*blockofRow * sizeof(float), hipMemcpyDefault));
////printf("h_temp result: \n");
//for (int i = 0; i < M; i++)
//{
// for (int j = 0; j < blockofRow; j++)
// {
// printf("h_temp[%d]=%f\t", i*blockofRow +j, h_temp[i*blockofRow +j]);
// }
// printf("\n");
//}
//float *h_cpureduce= (float*)malloc(M* sizeof(float));
//for (int i = 0; i < M; i++)
//{
// float temp = 0.0f;
// for (int j = 0; j < blockofRow; j++)
// {
// temp +=h_temp[i*blockofRow+j];
// }
// h_cpureduce[i] = temp;
// //printf("h_cpureduce[%d]:%f\n", i, h_cpureduce[i]);
//}
CHECK(hipFree(d_temp));
}
void kernelLaunch_1(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
dim3 block(1024);
dim3 grid(M);
kernel_1 << <grid, block >> > (d_A, d_B, M, N);
CHECK(hipGetLastError());
}
| 77b2ed885ec383c639a71028f8d02baa9dc2cbc2.cu | #include "Header.h"
#define BLOCKSIZE 512
void __global__ kernel_1(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
int tx = threadIdx.x;
int bx = blockIdx.x;
int stride = blockDim.x;
for (int j = stride; j > 0; j /= 2)
{
if (tx + j >= N)
{
d_A[bx*blockDim.x + tx] = d_A[bx*blockDim.x + tx];
}
else{
d_A[bx*blockDim.x + tx] += d_A[bx*blockDim.x + tx + j];
}
__syncthreads();
}
d_B[bx] = d_A[bx*blockDim.x];
}
void __global__ kernel_2_1_global(DATA_TYPE *d_A, DATA_TYPE *d_temp, unsigned int M, unsigned int N, unsigned int blockofRow)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int x = bx * blockDim.x + tx; //col
int y = by * blockDim.y + ty; //row
int stride = blockDim.x/2;
for (int j = stride; j > 0; j /= 2)
{
if( tx < j && x + j < N)
{
//printf("(x,y)=(%d,%d), j=%d, y*N + x + j=%d , (tx,ty)=(%d,%d), (bx,by)=(%d,%d), \n", x, y,j, (y*N + x + j), tx, ty, bx, by);
d_A[y*N + x] += d_A[y*N + x + j];
}
__syncthreads();
}
if(tx==0 && ty==0)
d_temp[by*blockofRow + bx] = d_A[y*N + x];
//if (bx == 156 && by == 190 && tx == 0)
// printf("d_temp[by*blockofRow + bx]=%f\n", d_temp[by*blockofRow + bx]);
}
void __global__ kernel_2_2(DATA_TYPE *d_temp, DATA_TYPE *d_B, unsigned int M, unsigned int blockofRow)
{
int tx = threadIdx.x;
if(tx<M)
{
double temp = 0.0;
for (int i = 0; i < blockofRow; i++)
{
temp += d_temp[tx*blockofRow + i];
}
d_B[tx] = temp;
}
}
void __global__ kernel_3(DATA_TYPE *d_A, DATA_TYPE *d_temp, unsigned int M, unsigned int N, unsigned int S_NUM)
{
int tx = threadIdx.x;
int ty = threadIdx.y; // ty=0
int bx = blockIdx.x;
int by = blockIdx.y;
int x = tx + bx * blockDim.x;
//__shared__ float d_s[TILE_WIDTH];
extern __shared__ DATA_TYPE d_s[];
int n = by * N + bx * blockDim.x + tx;
d_s[tx] = x < N ? d_A[n] : 0.0;
__syncthreads();
int stride = blockDim.x / 2;
for (int j = stride; j > 0; j /= 2)
{
if (tx < j && tx + j < blockDim.x)
{
d_s[tx] += d_s[tx + j];
}
__syncthreads();
}
if (tx == 0)
{
d_temp[by*S_NUM + bx] = d_s[0];
}
}
__device__ void warpReduce( volatile DATA_TYPE *d_s, unsigned int tx)
{
d_s[tx] += d_s[tx + 32];
d_s[tx] += d_s[tx + 16];
d_s[tx] += d_s[tx + 8];
d_s[tx] += d_s[tx + 4];
d_s[tx] += d_s[tx + 2];
d_s[tx] += d_s[tx + 1];
}
__global__ void kernel_3_1(DATA_TYPE *d_A, DATA_TYPE *d_temp, unsigned int M, unsigned int N, unsigned int S_NUM)
{
int tx = threadIdx.x;
int ty = threadIdx.y; // ty=0
int bx = blockIdx.x;
int by = blockIdx.y;
int x = tx + bx * blockDim.x*2;
int y = by;
//__shared__ float d_s[BLOCKSIZE];
extern __shared__ DATA_TYPE d_s[];
d_s[tx] = x + blockDim.x < N ? (d_A[y * N + x] + d_A[y * N + x + blockDim.x]) : 0.0;
__syncthreads();
if (BLOCKSIZE >= 512)
{
if(tx<256) d_s[tx] += d_s[tx + 256];
__syncthreads();
}
if (BLOCKSIZE >= 256)
{
if (tx < 128) d_s[tx] += d_s[tx + 128];
__syncthreads();
}
if (BLOCKSIZE >= 128)
{
if (tx < 64) d_s[tx] += d_s[tx + 64];
__syncthreads();
}
if (tx < 32)
warpReduce(d_s, tx);
if (tx == 0)
d_temp[by*S_NUM + bx] = d_s[0];
}
void kernelLaunch_1(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N);
void kernelLaunch_2(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N);
void kernelLaunch_3(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N);
//void kernelLaunch_4(float *d_A, float *d_B, int M, int N);
extern "C" void cudaReduce(DATA_TYPE *h_A, DATA_TYPE *h_B, unsigned int M, unsigned int N)
{
////printf("=======================kernelLaunch_1:reduce a row in one block(N<=2048)================\n");
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
cudaEventQuery(start);
unsigned int SIZEA = M * N * sizeof(DATA_TYPE);
unsigned int SIZEB = M * sizeof(DATA_TYPE);
DATA_TYPE *d_A;
DATA_TYPE *d_B;
checkCudaErrors(cudaMalloc((void**)&d_A, SIZEA));
checkCudaErrors(cudaMalloc((void**)&d_B, SIZEB));
checkCudaErrors(cudaMemset(d_B, 0, SIZEB));
checkCudaErrors(cudaMemcpy(d_A, h_A, SIZEA, cudaMemcpyDefault));
////reduce a row in one block(N<=2048):
//kernelLaunch_1(d_A, d_B, M, N);
//printf("=======================kernelLaunch_2: reduce a row in multiple blocks(Global memory) and reduce again at CPU/GPU================\n");
//reduce a row in multiple blocks of Global memory and reduce again at CPU
//kernelLaunch_2(d_A, d_B, M, N);
//printf("=======================kernelLaunch_3: reduce in shared memory ================\n");
kernelLaunch_3(d_A, d_B, M, N);
checkCudaErrors(cudaMemcpy(h_B, d_B, SIZEB, cudaMemcpyDefault));
CHECK(cudaDeviceSynchronize());
//time end
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(cudaEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("GPU time=%f ms\n\n", elapsedTime_cuda);
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
}
void kernelLaunch_3(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
dim3 block(BLOCKSIZE);
dim3 grid((N + block.x - 1) / block.x, M);
//shared memory
unsigned int S_SIZE = block.x * sizeof(DATA_TYPE);
//temp
DATA_TYPE * d_temp;
unsigned int S_NUM = (N + block.x - 1) / block.x;
unsigned int SIZE_T = S_NUM * M * sizeof(DATA_TYPE);
CHECK(cudaMalloc((void**)&d_temp, SIZE_T));
CHECK(cudaMemset(d_temp, 0, SIZE_T));
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
cudaEventQuery(start);
//shared memory
//kernel_3 <<<grid,block, S_SIZE >>> (d_A, d_temp,M,N, S_NUM);
// Fully expand warp
kernel_3_1 << <grid, block, S_SIZE >> > (d_A, d_temp, M, N, S_NUM);
CHECK(cudaDeviceSynchronize()); //cudaThreadSynchronize() is deprecated
//time end
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(cudaEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("kernel time=%f ms\n\n", elapsedTime_cuda);
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
//second reduce on CPU
DATA_TYPE *h_temp = (DATA_TYPE*)malloc(SIZE_T);
checkCudaErrors(cudaMemcpy(h_temp, d_temp, SIZE_T, cudaMemcpyDefault));
DATA_TYPE *h_cpureduce= (DATA_TYPE*)malloc(M* sizeof(DATA_TYPE));
for (int i = 0; i < M; i++)
{
double temp = 0.0f;
for (int j = 0; j < S_NUM; j++)
{
temp +=h_temp[i*S_NUM +j];
}
h_cpureduce[i] = temp;
//printf("h_cpureduce[%d]:%f\n", i, h_cpureduce[i]);
}
}
void kernelLaunch_2(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
//first level reduce d_A to d_temp
DATA_TYPE *d_temp;
dim3 block(1024);
int blockofRow = (N + block.x - 1) / block.x;
dim3 grid(blockofRow,M);
CHECK(cudaMalloc((void**)&d_temp, M*blockofRow * sizeof(DATA_TYPE)));
cudaEvent_t start, stop;
CHECK(cudaEventCreate(&start));
CHECK(cudaEventCreate(&stop));
CHECK(cudaEventRecord(start, 0));
cudaEventQuery(start);
kernel_2_1_global << <grid, block >> > (d_A, d_temp, M, N, blockofRow); //use global memory to reduce
CHECK(cudaDeviceSynchronize());
//time end
CHECK(cudaEventRecord(stop, 0));
CHECK(cudaEventSynchronize(stop));
float elapsedTime_cuda;
CHECK(cudaEventElapsedTime(&elapsedTime_cuda, start, stop));
printf("kernel_2_1_global time=%f ms\n\n", elapsedTime_cuda);
CHECK(cudaEventDestroy(start));
CHECK(cudaEventDestroy(stop));
////second reduce d_temp to d_B, used for M<=1024
int block_size_tmp = 32*((M+31)/32);
int block_size = block_size_tmp > 1024 ? 1024 : block_size_tmp;
dim3 block_num(block_size);
kernel_2_2 << <1, block_num >> > (d_temp, d_B, M, blockofRow);
//CHECK(cudaGetLastError());
//second reduce on CPU
//float *h_temp = (float*)malloc(M*blockofRow * sizeof(float));
//checkCudaErrors(cudaMemcpy(h_temp, d_temp, M*blockofRow * sizeof(float), cudaMemcpyDefault));
////printf("h_temp result: \n");
//for (int i = 0; i < M; i++)
//{
// for (int j = 0; j < blockofRow; j++)
// {
// printf("h_temp[%d]=%f\t", i*blockofRow +j, h_temp[i*blockofRow +j]);
// }
// printf("\n");
//}
//float *h_cpureduce= (float*)malloc(M* sizeof(float));
//for (int i = 0; i < M; i++)
//{
// float temp = 0.0f;
// for (int j = 0; j < blockofRow; j++)
// {
// temp +=h_temp[i*blockofRow+j];
// }
// h_cpureduce[i] = temp;
// //printf("h_cpureduce[%d]:%f\n", i, h_cpureduce[i]);
//}
CHECK(cudaFree(d_temp));
}
void kernelLaunch_1(DATA_TYPE *d_A, DATA_TYPE *d_B, unsigned int M, unsigned int N)
{
dim3 block(1024);
dim3 grid(M);
kernel_1 << <grid, block >> > (d_A, d_B, M, N);
CHECK(cudaGetLastError());
}
|
b3c841e17a0d3a7eb16cd7bb9e0c22033f64fc7b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k; //edw einai to pou tha paei to filtro gi auto elegxei apo katw kai an to d einia ektos oriwn eikonas
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
__global__ void convolutionRowGPU(double *h_Dst, double *h_Src, double *h_Filter, int imageW, int imageH, int filterR){
int k;
double sum = 0;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
for (k = -filterR; k <= filterR; k++) {
int d = ix + k; //edw einai to pou tha paei to filtro gi auto elegxei apo katw kai an to d einia ektos oriwn eikonas
if (d >= 0 && d < imageW) {
sum += h_Src[iy * imageW + d] * h_Filter[filterR - k];
}
h_Dst[iy * imageW + ix] = sum;
}
}
__global__ void convolutionColumnGPU(double *h_Dst, double *h_Src, double *h_Filter, int imageW, int imageH, int filterR){
int k;
double sum = 0;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
for (k = -filterR; k <= filterR; k++) {
int d = iy + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + ix] * h_Filter[filterR - k];
}
h_Dst[iy * imageW + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
double
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*dev_Filter,
*dev_Input,
*dev_Buffer,
*dev_OutputGPU,
*h_apotelesmata;
clock_t start, startGPU, endGPU, end;
double cpu_time_used,gpu_time_used;
int imageW;
int imageH;
int block_Dim, grid_Dim;
unsigned int i;
FILE * fp;
FILE * fp1;
fp = fopen("Xronoi_Ekteleshs_CPU_double_steady_image_size.txt", "a");
fp1 = fopen("Xronoi_Ekteleshs_GPU_double_steady_image_size.txt", "a");
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
fprintf(fp,"%d\n", filter_radius);
fprintf(fp1,"%d\n", filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
//scanf("%d", &imageW);
imageH = imageW = 8192;
if (imageW > 32){
block_Dim = 32;
grid_Dim = imageW / 32;
}
else{
block_Dim = imageW;
grid_Dim = 1;
}
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double));
h_Input = (double *)malloc(imageW * imageH * sizeof(double));
h_Buffer = (double *)malloc(imageW * imageH * sizeof(double));
h_OutputCPU = (double *)malloc(imageW * imageH * sizeof(double));
h_apotelesmata = (double *)malloc(imageW * imageH * sizeof(double)); //gia na epistrepsw to apotelesma apo thn GPU
if ((h_Filter == 0 )||(h_Input == 0) || (h_Buffer == 0) || (h_OutputCPU == 0)){
printf("Failure in memory allocation\n");
exit (0);
}
//Device allocation
int size = imageW * imageH * sizeof(double);
hipMalloc((void**)&dev_Filter, FILTER_LENGTH * sizeof(double));
hipMalloc((void**)&dev_Input, size);
hipMalloc((void**)&dev_Buffer, size);
hipMalloc((void**)&dev_OutputGPU, size);
if ((dev_Filter == 0 )||(dev_Input == 0) || (dev_Buffer == 0) || (dev_OutputGPU == 0)){
printf("Failure in Gpu memory allocation\n");
exit (0);
}
//arxikopoihsh gia kathe endexomeno
hipMemset(dev_Filter, 0 , FILTER_LENGTH * sizeof(double));
hipMemset(dev_Input, 0, size);
hipMemset(dev_Buffer, 0, size);
hipMemset(dev_OutputGPU, 0, size);
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (double)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX;
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
//for (i = 0; i < imageW * imageH; i++) { // stamataw tis ektypwseis gt dn pairnei apeiro xrono mexri na tis deiksei oles
// printf("%f\n",h_OutputCPU[i]);
//}
//memcpy afou gemisoun oi pinakes(eikones)
startGPU = clock();
hipMemcpy(dev_Filter, h_Filter, FILTER_LENGTH * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_Input, h_Input, size, hipMemcpyHostToDevice);
dim3 dimGrid(grid_Dim, grid_Dim);
dim3 dimBlock(block_Dim, block_Dim);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
hipLaunchKernelGGL(( convolutionRowGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_Buffer, dev_Input, dev_Filter, imageW, imageH, filter_radius);
// check for error dn vgainei gia na dw gt xtypaei to 2o error
hipError_t error = hipGetLastError();
if(error != hipSuccess){
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error));
}
hipLaunchKernelGGL(( convolutionColumnGPU), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_OutputGPU, dev_Buffer, dev_Filter, imageW, imageH, filter_radius);
// check for error
hipError_t error1 = hipGetLastError();
if(error1 != hipSuccess){
// print the CUDA error message and exit
printf("CUDA error: %s\n", hipGetErrorString(error1));
}
hipMemcpy(h_apotelesmata, dev_OutputGPU , size, hipMemcpyDeviceToHost);
endGPU = clock();
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
//for (i = 0; i < imageW * imageH; i++) {
// printf("%f\n",h_apotelesmata[i]);
//}
double diafora = 0;
double Max_acc = -1;
for (i = 0; i < imageW * imageH; i++) {
diafora = h_OutputCPU[i] - h_apotelesmata[i];
diafora = ABS(diafora);
if (diafora > Max_acc){
Max_acc = diafora;
}
}
printf ("to grid einia %d kai to block einia %d\n", grid_Dim, block_Dim);
printf ("The Max_accuracy is %f\n", Max_acc);
//fprintf(fp,"the Max accuracy is %32f\n",Max_acc);
printf("CPU time used in seconds is %f\n", cpu_time_used);
fprintf(fp,"%g\n", cpu_time_used);
fprintf(fp1,"%g\n", gpu_time_used);
printf("GPU time used in seconds is %f\n", gpu_time_used);
if(Max_acc > accuracy){
printf("CPU and GPU results don't match and program is gonna terminate\n");
exit(0);
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_apotelesmata);
hipFree(dev_Filter);
hipFree(dev_Input);
hipFree(dev_Buffer);
hipFree(dev_OutputGPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
hipDeviceReset();
return 0;
}
| b3c841e17a0d3a7eb16cd7bb9e0c22033f64fc7b.cu | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k; //edw einai to pou tha paei to filtro gi auto elegxei apo katw kai an to d einia ektos oriwn eikonas
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
double sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
__global__ void convolutionRowGPU(double *h_Dst, double *h_Src, double *h_Filter, int imageW, int imageH, int filterR){
int k;
double sum = 0;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
for (k = -filterR; k <= filterR; k++) {
int d = ix + k; //edw einai to pou tha paei to filtro gi auto elegxei apo katw kai an to d einia ektos oriwn eikonas
if (d >= 0 && d < imageW) {
sum += h_Src[iy * imageW + d] * h_Filter[filterR - k];
}
h_Dst[iy * imageW + ix] = sum;
}
}
__global__ void convolutionColumnGPU(double *h_Dst, double *h_Src, double *h_Filter, int imageW, int imageH, int filterR){
int k;
double sum = 0;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
for (k = -filterR; k <= filterR; k++) {
int d = iy + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + ix] * h_Filter[filterR - k];
}
h_Dst[iy * imageW + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
double
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*dev_Filter,
*dev_Input,
*dev_Buffer,
*dev_OutputGPU,
*h_apotelesmata;
clock_t start, startGPU, endGPU, end;
double cpu_time_used,gpu_time_used;
int imageW;
int imageH;
int block_Dim, grid_Dim;
unsigned int i;
FILE * fp;
FILE * fp1;
fp = fopen("Xronoi_Ekteleshs_CPU_double_steady_image_size.txt", "a");
fp1 = fopen("Xronoi_Ekteleshs_GPU_double_steady_image_size.txt", "a");
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
fprintf(fp,"%d\n", filter_radius);
fprintf(fp1,"%d\n", filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
//scanf("%d", &imageW);
imageH = imageW = 8192;
if (imageW > 32){
block_Dim = 32;
grid_Dim = imageW / 32;
}
else{
block_Dim = imageW;
grid_Dim = 1;
}
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double));
h_Input = (double *)malloc(imageW * imageH * sizeof(double));
h_Buffer = (double *)malloc(imageW * imageH * sizeof(double));
h_OutputCPU = (double *)malloc(imageW * imageH * sizeof(double));
h_apotelesmata = (double *)malloc(imageW * imageH * sizeof(double)); //gia na epistrepsw to apotelesma apo thn GPU
if ((h_Filter == 0 )||(h_Input == 0) || (h_Buffer == 0) || (h_OutputCPU == 0)){
printf("Failure in memory allocation\n");
exit (0);
}
//Device allocation
int size = imageW * imageH * sizeof(double);
cudaMalloc((void**)&dev_Filter, FILTER_LENGTH * sizeof(double));
cudaMalloc((void**)&dev_Input, size);
cudaMalloc((void**)&dev_Buffer, size);
cudaMalloc((void**)&dev_OutputGPU, size);
if ((dev_Filter == 0 )||(dev_Input == 0) || (dev_Buffer == 0) || (dev_OutputGPU == 0)){
printf("Failure in Gpu memory allocation\n");
exit (0);
}
//arxikopoihsh gia kathe endexomeno
cudaMemset(dev_Filter, 0 , FILTER_LENGTH * sizeof(double));
cudaMemset(dev_Input, 0, size);
cudaMemset(dev_Buffer, 0, size);
cudaMemset(dev_OutputGPU, 0, size);
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (double)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX;
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
//for (i = 0; i < imageW * imageH; i++) { // stamataw tis ektypwseis gt dn pairnei apeiro xrono mexri na tis deiksei oles
// printf("%f\n",h_OutputCPU[i]);
//}
//memcpy afou gemisoun oi pinakes(eikones)
startGPU = clock();
cudaMemcpy(dev_Filter, h_Filter, FILTER_LENGTH * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Input, h_Input, size, cudaMemcpyHostToDevice);
dim3 dimGrid(grid_Dim, grid_Dim);
dim3 dimBlock(block_Dim, block_Dim);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
convolutionRowGPU<<<dimGrid, dimBlock>>>(dev_Buffer, dev_Input, dev_Filter, imageW, imageH, filter_radius);
// check for error dn vgainei gia na dw gt xtypaei to 2o error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
}
convolutionColumnGPU<<<dimGrid, dimBlock>>>(dev_OutputGPU, dev_Buffer, dev_Filter, imageW, imageH, filter_radius);
// check for error
cudaError_t error1 = cudaGetLastError();
if(error1 != cudaSuccess){
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error1));
}
cudaMemcpy(h_apotelesmata, dev_OutputGPU , size, cudaMemcpyDeviceToHost);
endGPU = clock();
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
//for (i = 0; i < imageW * imageH; i++) {
// printf("%f\n",h_apotelesmata[i]);
//}
double diafora = 0;
double Max_acc = -1;
for (i = 0; i < imageW * imageH; i++) {
diafora = h_OutputCPU[i] - h_apotelesmata[i];
diafora = ABS(diafora);
if (diafora > Max_acc){
Max_acc = diafora;
}
}
printf ("to grid einia %d kai to block einia %d\n", grid_Dim, block_Dim);
printf ("The Max_accuracy is %f\n", Max_acc);
//fprintf(fp,"the Max accuracy is %32f\n",Max_acc);
printf("CPU time used in seconds is %f\n", cpu_time_used);
fprintf(fp,"%g\n", cpu_time_used);
fprintf(fp1,"%g\n", gpu_time_used);
printf("GPU time used in seconds is %f\n", gpu_time_used);
if(Max_acc > accuracy){
printf("CPU and GPU results don't match and program is gonna terminate\n");
exit(0);
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_apotelesmata);
cudaFree(dev_Filter);
cudaFree(dev_Input);
cudaFree(dev_Buffer);
cudaFree(dev_OutputGPU);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
f991a0e5aea2bdb24a8b436fbe625a7e7e3cab50.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void foo(int *inp, int *out)
{
if(inp[0] == 1) {
if(inp[2] == 2) {
out[3] = 3;
}
else {
__syncthreads();
}
}
else {
if(inp[3] == 4) {
out[0] = 4;
__syncthreads();
}
else {
if(inp[4] == 5) {
out[5] = 5;
}
else if(inp[5] == 5) {
out[4] = inp[5] + 4;
if(inp[14] == 66) {
out[44] = 5;
}
else {
out[32] = 5;
}
__syncthreads();
}
else {
__syncthreads();
out[4] = 5;
}
}
}
}
| f991a0e5aea2bdb24a8b436fbe625a7e7e3cab50.cu | __global__ void foo(int *inp, int *out)
{
if(inp[0] == 1) {
if(inp[2] == 2) {
out[3] = 3;
}
else {
__syncthreads();
}
}
else {
if(inp[3] == 4) {
out[0] = 4;
__syncthreads();
}
else {
if(inp[4] == 5) {
out[5] = 5;
}
else if(inp[5] == 5) {
out[4] = inp[5] + 4;
if(inp[14] == 66) {
out[44] = 5;
}
else {
out[32] = 5;
}
__syncthreads();
}
else {
__syncthreads();
out[4] = 5;
}
}
}
}
|
fe9dc4be785ce3c69148af5a3aa387e689fb8458.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <fstream>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
const int FILTER_WIDTH = 7;
const int BLOCK_SIZE = 256;
int FILTER[FILTER_WIDTH*FILTER_WIDTH] = {
1,4,7,10,7,4,1,
4,12,26,33,26,12,4,
7,26,55,71,55,26,7,
10,33,71,91,71,33,10,
7,26,55,71,55,26,7,
4,12,26,33,26,12,4,
1,4,7,10,7,4,1
};
// Display the first and last 10 items
void displayResult(const int original[], const int result[], int size) {
cout << "Display result: ";
cout << "(original -> result)\n";
for (int i = 0; i < 10; i++) {
cout << original[i] << " -> " << result[i] << "\n";
}
cout << ".\n.\n.\n";
for (int i = size - 10; i < size; i++) {
cout << original[i] << " -> " << result[i] << "\n";
}
}
void initData(string file, int **data, int *sizeX, int *sizeY) {
int x;
int y;
long long i = 0;
cout << "Reading "<< file << "... \n";
ifstream myfile(file);
if (myfile.is_open()) {
myfile >> x;
myfile >> y;
int *temp = new int[x * y];
for( i=0; i < x * y; i++){
myfile >> temp[(int)i];
}
myfile.close();
*data = temp;
*sizeX = x;
*sizeY = y;
}
else {
cout << "ERROR: File " << file << " not found!\n";
exit(0);
}
cout << i << " entries imported\n";
}
void saveResult(string file, int data[], int sizeX, int sizeY) {
long long i = 0;
cout << "Saving data to "<< file <<"... \n";
ofstream myfile(file, std::ofstream::out);
if (myfile.is_open()) {
myfile << sizeX << "\n";
myfile << sizeY << "\n";
for (i = 0; i < sizeX * sizeY; i++){
myfile << data[i] << "\n";
}
myfile.close();
}
else {
cout << "ERROR: Cannot save to " << file << "!\n";
exit(0);
}
cout << i << " entries saved\n";
}
// Kernel function for 2D smoothing in GPU
__global__
void calculateResult(int sizeX, int sizeY, int *data, int *result, int *filter){
int halfFilterWidth = FILTER_WIDTH/2;
//int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//int indexY = blockIdx.y * blockDim.y + threadIdx.y;
int strideY = blockDim.y * gridDim.y;
// start from last column in image
for(int x = blockIdx.x * blockDim.x + threadIdx.x; x < sizeX ; x += stride){
// start from last row in image
for(int y = blockIdx.y * blockDim.y + threadIdx.y; y < sizeY; y += strideY){
// store numberator and denominator for convolution calculation
int numerator = 0;
int denominator = 0;
// traverse the filter in the x-direction
for(int filterX = FILTER_WIDTH - 1; filterX >= 0; filterX--){
// traverse the filter in the y-direction
for(int filterY = FILTER_WIDTH -1; filterY >= 0; filterY--){
int xPos = x + filterX -halfFilterWidth;
int yPos = y + filterY - halfFilterWidth;
// adjust xPos to accomodate edges in grid
if(xPos < 0){
xPos = 0;
}
else if(xPos < sizeX){
}
else{
xPos = sizeX - 1;
}
// adjust yPos to accomodate edges in grid
if(yPos < 0){
yPos = 0;
}
else if(yPos < sizeY){
}
else{
yPos = sizeY - 1;
}
// adjust numerator and denominator
numerator += data[yPos * sizeX + xPos] * filter[filterY * FILTER_WIDTH + filterX];
denominator += filter[filterY * FILTER_WIDTH + filterX];
}
}
// store result
result[y * sizeX + x] = numerator/denominator;
}
}
}
// GPU implementation
void GPU_Test(int data[], int result[], int sizeX, int sizeY) {
// input:
// int data[] - int array holding the flattened original image
// int sizeX - the width of the image
// int sizeY - the height of the image
// output:
// int result[] - int array holding the smoothed image
int size = sizeX * sizeY;
// Allocate device memory for result[], data[] and FILTER[] and copy data onto the device
int *r, *d, *f;
hipMalloc((void**)&r, size*sizeof(int));
hipMalloc((void**)&d, size*sizeof(int));
hipMalloc((void**)&f, size*sizeof(int));
hipMemcpy(r, result, size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d, data, size*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(f, FILTER, FILTER_WIDTH*FILTER_WIDTH*sizeof(int), hipMemcpyHostToDevice);
// Start timer for kernel
auto startKernel = chrono::steady_clock::now();
int numBlocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
// Call the kernel function
hipLaunchKernelGGL(( calculateResult), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, sizeX, sizeY, d, r,f);
// End timer for kernel and display kernel time
hipDeviceSynchronize(); // <- DO NOT REMOVE
auto endKernel = chrono::steady_clock::now();
cout << "Kernel Elapsed time: " << chrono::duration <double, milli>(endKernel - startKernel).count() << "ms\n";
// Copy reuslt from device to host
hipMemcpy(result, r, size*sizeof(float), hipMemcpyDeviceToHost);
// Free device memory
hipFree(&d);
hipFree(&r);
hipFree(&f);
}
// CPU implementation
void CPU_Test(int data[], int result[], int sizeX, int sizeY) {
// input:
// int data[] - int array holding the flattened original image
// int sizeX - the width of the image
// int sizeY - the height of the image
// output:
// int result[] - int array holding the smoothed image
// Smooth the image with filter size = FILTER_WIDTH
// apply partial filter for the border
int halfFilterWidth = FILTER_WIDTH/2;
// start from last column in image
for(int x = sizeX -1; x >= 0; x--){
// start from last row in image
for(int y = sizeY - 1; y >= 0; y--){
// store numberator and denominator for convolution calculation
int numerator = 0;
int denominator = 0;
// traverse the filter in the x-direction
for(int filterX = FILTER_WIDTH - 1; filterX >= 0; filterX--){
// traverse the filter in the y-direction
for(int filterY = FILTER_WIDTH -1; filterY >= 0; filterY--){
int xPos = x + filterX -halfFilterWidth;
int yPos = y + filterY - halfFilterWidth;
// adjust xPos to accomodate edges in grid
if(xPos < 0){
xPos = 0;
}
else if(xPos < sizeX){
}
else{
xPos = sizeX - 1;
}
// adjust yPos to accomodate edges in grid
if(yPos < 0){
yPos = 0;
}
else if(yPos < sizeY){
}
else{
yPos = sizeY - 1;
}
// adjust numerator and denominator
numerator += data[yPos * sizeX + xPos] * FILTER[filterY * FILTER_WIDTH + filterX];
denominator += FILTER[filterY * FILTER_WIDTH + filterX];
}
}
// store result
result[y * sizeX + x] = numerator/denominator;
}
}
}
// The input is a 2D grayscale image
// The image is flattened into a text file of pixel values.
int main(int argc, char *argv[]) {
string inputFile = (argc == 1) ? "image2D.txt" : argv[1];
int sizeX;
int sizeY;
int *dataForCPUTest;
int *dataForGPUTest;
initData(inputFile, &dataForCPUTest, &sizeX, &sizeY);
initData(inputFile, &dataForGPUTest, &sizeX, &sizeY);
int size = sizeX * sizeY;
int *resultForCPUTest = new int[size];
int *resultForGPUTest = new int[size];
cout << "\n";
cout << "CPU Implementation\n";
auto startCPU = chrono::steady_clock::now();
CPU_Test(dataForCPUTest, resultForCPUTest, sizeX, sizeY);
auto endCPU = chrono::steady_clock::now();
cout << "Elapsed time: " << chrono::duration <double, milli>(endCPU - startCPU).count() << "ms\n";
displayResult(dataForCPUTest, resultForCPUTest, size);
saveResult("2D_result_CPU.txt",resultForCPUTest, sizeX, sizeY);
cout << "\n";
cout << "GPU Implementation\n";
auto startGPU = chrono::steady_clock::now();
GPU_Test(dataForGPUTest, resultForGPUTest, sizeX, sizeY);
auto endGPU = chrono::steady_clock::now();
cout << "Elapsed time: " << chrono::duration <double, milli>(endGPU - startGPU).count() << "ms\n";
displayResult(dataForGPUTest, resultForGPUTest, size);
saveResult("2D_result_GPU.txt",resultForGPUTest, sizeX, sizeY);
return 0;
}
| fe9dc4be785ce3c69148af5a3aa387e689fb8458.cu | #include <iostream>
#include <string>
#include <fstream>
#include <chrono>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
const int FILTER_WIDTH = 7;
const int BLOCK_SIZE = 256;
int FILTER[FILTER_WIDTH*FILTER_WIDTH] = {
1,4,7,10,7,4,1,
4,12,26,33,26,12,4,
7,26,55,71,55,26,7,
10,33,71,91,71,33,10,
7,26,55,71,55,26,7,
4,12,26,33,26,12,4,
1,4,7,10,7,4,1
};
// Display the first and last 10 items
void displayResult(const int original[], const int result[], int size) {
cout << "Display result: ";
cout << "(original -> result)\n";
for (int i = 0; i < 10; i++) {
cout << original[i] << " -> " << result[i] << "\n";
}
cout << ".\n.\n.\n";
for (int i = size - 10; i < size; i++) {
cout << original[i] << " -> " << result[i] << "\n";
}
}
void initData(string file, int **data, int *sizeX, int *sizeY) {
int x;
int y;
long long i = 0;
cout << "Reading "<< file << "... \n";
ifstream myfile(file);
if (myfile.is_open()) {
myfile >> x;
myfile >> y;
int *temp = new int[x * y];
for( i=0; i < x * y; i++){
myfile >> temp[(int)i];
}
myfile.close();
*data = temp;
*sizeX = x;
*sizeY = y;
}
else {
cout << "ERROR: File " << file << " not found!\n";
exit(0);
}
cout << i << " entries imported\n";
}
void saveResult(string file, int data[], int sizeX, int sizeY) {
long long i = 0;
cout << "Saving data to "<< file <<"... \n";
ofstream myfile(file, std::ofstream::out);
if (myfile.is_open()) {
myfile << sizeX << "\n";
myfile << sizeY << "\n";
for (i = 0; i < sizeX * sizeY; i++){
myfile << data[i] << "\n";
}
myfile.close();
}
else {
cout << "ERROR: Cannot save to " << file << "!\n";
exit(0);
}
cout << i << " entries saved\n";
}
// Kernel function for 2D smoothing in GPU
__global__
void calculateResult(int sizeX, int sizeY, int *data, int *result, int *filter){
int halfFilterWidth = FILTER_WIDTH/2;
//int indexX = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//int indexY = blockIdx.y * blockDim.y + threadIdx.y;
int strideY = blockDim.y * gridDim.y;
// start from last column in image
for(int x = blockIdx.x * blockDim.x + threadIdx.x; x < sizeX ; x += stride){
// start from last row in image
for(int y = blockIdx.y * blockDim.y + threadIdx.y; y < sizeY; y += strideY){
// store numberator and denominator for convolution calculation
int numerator = 0;
int denominator = 0;
// traverse the filter in the x-direction
for(int filterX = FILTER_WIDTH - 1; filterX >= 0; filterX--){
// traverse the filter in the y-direction
for(int filterY = FILTER_WIDTH -1; filterY >= 0; filterY--){
int xPos = x + filterX -halfFilterWidth;
int yPos = y + filterY - halfFilterWidth;
// adjust xPos to accomodate edges in grid
if(xPos < 0){
xPos = 0;
}
else if(xPos < sizeX){
}
else{
xPos = sizeX - 1;
}
// adjust yPos to accomodate edges in grid
if(yPos < 0){
yPos = 0;
}
else if(yPos < sizeY){
}
else{
yPos = sizeY - 1;
}
// adjust numerator and denominator
numerator += data[yPos * sizeX + xPos] * filter[filterY * FILTER_WIDTH + filterX];
denominator += filter[filterY * FILTER_WIDTH + filterX];
}
}
// store result
result[y * sizeX + x] = numerator/denominator;
}
}
}
// GPU implementation
void GPU_Test(int data[], int result[], int sizeX, int sizeY) {
// input:
// int data[] - int array holding the flattened original image
// int sizeX - the width of the image
// int sizeY - the height of the image
// output:
// int result[] - int array holding the smoothed image
int size = sizeX * sizeY;
// Allocate device memory for result[], data[] and FILTER[] and copy data onto the device
int *r, *d, *f;
cudaMalloc((void**)&r, size*sizeof(int));
cudaMalloc((void**)&d, size*sizeof(int));
cudaMalloc((void**)&f, size*sizeof(int));
cudaMemcpy(r, result, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d, data, size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(f, FILTER, FILTER_WIDTH*FILTER_WIDTH*sizeof(int), cudaMemcpyHostToDevice);
// Start timer for kernel
auto startKernel = chrono::steady_clock::now();
int numBlocks = (size + BLOCK_SIZE - 1) / BLOCK_SIZE;
// Call the kernel function
calculateResult<<<numBlocks, BLOCK_SIZE>>>(sizeX, sizeY, d, r,f);
// End timer for kernel and display kernel time
cudaDeviceSynchronize(); // <- DO NOT REMOVE
auto endKernel = chrono::steady_clock::now();
cout << "Kernel Elapsed time: " << chrono::duration <double, milli>(endKernel - startKernel).count() << "ms\n";
// Copy reuslt from device to host
cudaMemcpy(result, r, size*sizeof(float), cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(&d);
cudaFree(&r);
cudaFree(&f);
}
// CPU implementation
void CPU_Test(int data[], int result[], int sizeX, int sizeY) {
// input:
// int data[] - int array holding the flattened original image
// int sizeX - the width of the image
// int sizeY - the height of the image
// output:
// int result[] - int array holding the smoothed image
// Smooth the image with filter size = FILTER_WIDTH
// apply partial filter for the border
int halfFilterWidth = FILTER_WIDTH/2;
// start from last column in image
for(int x = sizeX -1; x >= 0; x--){
// start from last row in image
for(int y = sizeY - 1; y >= 0; y--){
// store numberator and denominator for convolution calculation
int numerator = 0;
int denominator = 0;
// traverse the filter in the x-direction
for(int filterX = FILTER_WIDTH - 1; filterX >= 0; filterX--){
// traverse the filter in the y-direction
for(int filterY = FILTER_WIDTH -1; filterY >= 0; filterY--){
int xPos = x + filterX -halfFilterWidth;
int yPos = y + filterY - halfFilterWidth;
// adjust xPos to accomodate edges in grid
if(xPos < 0){
xPos = 0;
}
else if(xPos < sizeX){
}
else{
xPos = sizeX - 1;
}
// adjust yPos to accomodate edges in grid
if(yPos < 0){
yPos = 0;
}
else if(yPos < sizeY){
}
else{
yPos = sizeY - 1;
}
// adjust numerator and denominator
numerator += data[yPos * sizeX + xPos] * FILTER[filterY * FILTER_WIDTH + filterX];
denominator += FILTER[filterY * FILTER_WIDTH + filterX];
}
}
// store result
result[y * sizeX + x] = numerator/denominator;
}
}
}
// The input is a 2D grayscale image
// The image is flattened into a text file of pixel values.
int main(int argc, char *argv[]) {
string inputFile = (argc == 1) ? "image2D.txt" : argv[1];
int sizeX;
int sizeY;
int *dataForCPUTest;
int *dataForGPUTest;
initData(inputFile, &dataForCPUTest, &sizeX, &sizeY);
initData(inputFile, &dataForGPUTest, &sizeX, &sizeY);
int size = sizeX * sizeY;
int *resultForCPUTest = new int[size];
int *resultForGPUTest = new int[size];
cout << "\n";
cout << "CPU Implementation\n";
auto startCPU = chrono::steady_clock::now();
CPU_Test(dataForCPUTest, resultForCPUTest, sizeX, sizeY);
auto endCPU = chrono::steady_clock::now();
cout << "Elapsed time: " << chrono::duration <double, milli>(endCPU - startCPU).count() << "ms\n";
displayResult(dataForCPUTest, resultForCPUTest, size);
saveResult("2D_result_CPU.txt",resultForCPUTest, sizeX, sizeY);
cout << "\n";
cout << "GPU Implementation\n";
auto startGPU = chrono::steady_clock::now();
GPU_Test(dataForGPUTest, resultForGPUTest, sizeX, sizeY);
auto endGPU = chrono::steady_clock::now();
cout << "Elapsed time: " << chrono::duration <double, milli>(endGPU - startGPU).count() << "ms\n";
displayResult(dataForGPUTest, resultForGPUTest, size);
saveResult("2D_result_GPU.txt",resultForGPUTest, sizeX, sizeY);
return 0;
}
|
e0850bd3a6c66506c5e2425a58f541109e4de6a4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* kernels.cu
*
* Created on: 12/12/2018
* Author: minterciso
*/
#include "kernels.h"
#include "ga.h"
__global__ void fitness(char s_dest[LEN_SIZE], individual *pop)
{
unsigned int pop_idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int str_idx = threadIdx.y + blockDim.y * blockIdx.y;
individual *ind = NULL;
if(pop_idx < POP_SIZE && str_idx < LEN_SIZE)
{
ind = &pop[pop_idx];
unsigned int l_fit = abs( (int)ind->s[str_idx] - (int)s_dest[str_idx]);
atomicAdd(&ind->fitness, l_fit);
}
}
| e0850bd3a6c66506c5e2425a58f541109e4de6a4.cu | /*
* kernels.cu
*
* Created on: 12/12/2018
* Author: minterciso
*/
#include "kernels.h"
#include "ga.h"
__global__ void fitness(char s_dest[LEN_SIZE], individual *pop)
{
unsigned int pop_idx = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int str_idx = threadIdx.y + blockDim.y * blockIdx.y;
individual *ind = NULL;
if(pop_idx < POP_SIZE && str_idx < LEN_SIZE)
{
ind = &pop[pop_idx];
unsigned int l_fit = abs( (int)ind->s[str_idx] - (int)s_dest[str_idx]);
atomicAdd(&ind->fitness, l_fit);
}
}
|
87e84900501a34226ad8301794a39a0abdc51a59.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MINMAX.h"
#include "common.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include <cfloat>
__global__ void cuda_MinMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 2> thresholds,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int minColumn = 0;
int minRow = 0;
int minFrame = 0;
float min = FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < kW; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
float threshold_lower = thresholds[0][frame];
float threshold_upper = thresholds[1][frame];
if ( val < threshold_lower || val > threshold_upper || frame == kT-1)
{
min = val;
minColumn = column;
minRow = row;
minFrame = frame;
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min == FLT_MAX)
{
min = 0;
}
output[slice][oFrame][oRow][oColumn] = min;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = minFrame;
((unsigned char*)(idx))[1] = minRow;
((unsigned char*)(idx))[2] = minColumn;
((unsigned char*)(idx))[3] = 0;
}
}
template <int KERNEL_WIDTH>
__global__ void cuda_MinMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 2> thresholds,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int minColumn = 0;
int minRow = 0;
int minFrame;
float min = FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < KERNEL_WIDTH; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
float threshold_lower = thresholds[0][frame];
float threshold_upper = thresholds[1][frame];
if ( val < threshold_lower || val > threshold_upper || frame == kT-1)
{
min = val;
minColumn = column;
minRow = row;
minFrame = frame;
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min == FLT_MAX)
{
min = 0;
}
output[slice][oFrame][oRow][oColumn] = min;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = minFrame;
((unsigned char*)(idx))[1] = minRow;
((unsigned char*)(idx))[2] = minColumn;
((unsigned char*)(idx))[3] = 0;
}
}
#define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
hipLaunchKernelGGL(( cuda_MinMaxPooling_updateOutput<KW>), dim3(grid), dim3(block), \
0, THCState_getCurrentStream(state), \
cudaInput, cudaThresholds, cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW); \
break
void THNN_CudaMinMaxPooling_updateOutput(
THCState *state, THCudaTensor *input, THCudaTensor *thresholds,
THCudaTensor *mask, THCudaTensor *output, THCudaTensor *indices,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceilMode)
{
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
MINMAX_assertSameGPU(state, 5, input, thresholds, mask, indices, output);
if (THCudaTensor_nDimension(state, input) == 4)
{
THArgCheck(
THCudaTensor_size(state, input, 1) >= kT &&
THCudaTensor_size(state, input, 2) >= kH &&
THCudaTensor_size(state, input, 3) >= kW, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
}
else if (THCudaTensor_nDimension(state, input) == 5)
{
THArgCheck(
THCudaTensor_size(state, input, 4) >= kW &&
THCudaTensor_size(state, input, 3) >= kH &&
THCudaTensor_size(state, input, 2) >= kT, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
}
else
{
THArgCheck(false, 2, "4D or 5D tensor expected");
}
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size"
);
if (ceilMode)
{
outputTime = (int)(ceil((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
else
{
outputTime = (int)(floor((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
if (padT || padW || padH)
{
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (input->nDimension == 4) /* 4D */
{
/* resize output */
THCudaTensor_resize4d(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
/* indices pack ti,i,j locations for each output point as uchar into
each float of the tensor */
THCudaTensor_resize4d(state, indices, inputSlices,
outputTime, outputHeight, outputWidth);
}
else
{ /* 5D */
THCudaTensor_resize5d(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
// Index tensor packs index offsets as uchars into floats
THCudaTensor_resize5d(state, indices, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCudaTensor_newContiguous(state, input);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaInput;
THCDeviceTensor<float, 2> cudaThresholds;
// THCDeviceTensor<float, 4> cudaMask;
THCDeviceTensor<float, 4> cudaOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaInput = toDeviceTensor<float, 4>(state, input);
cudaThresholds = toDeviceTensor<float, 2>(state, thresholds);
cudaOutput = toDeviceTensor<float, 4>(state, output);
}
else
{
cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>();
cudaThresholds = toDeviceTensor<float, 2>(state, thresholds);
cudaOutput = toDeviceTensor<float, 5>(state, output).downcastOuter<4>();
}
// copy indices tensor
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices),
indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
dim3 block(32, 8);
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
switch (kW)
{
UPDATE_OUTPUT_KERNEL_WIDTH(1);
UPDATE_OUTPUT_KERNEL_WIDTH(2);
UPDATE_OUTPUT_KERNEL_WIDTH(3);
UPDATE_OUTPUT_KERNEL_WIDTH(4);
UPDATE_OUTPUT_KERNEL_WIDTH(5);
UPDATE_OUTPUT_KERNEL_WIDTH(6);
UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
hipLaunchKernelGGL(( cuda_MinMaxPooling_updateOutput), dim3(grid), dim3(block),
0, THCState_getCurrentStream(state),
cudaInput, cudaThresholds, cudaIndices, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW);
}
THCudaTensor_free(state, input);
THCudaTensor_free(state, indices1);
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
__global__ void cuda_MinMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> gradInput,
int dT, int dH, int dW,
int padT, int padH, int padW)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % gradOutput.getSize(1); // output frame/time
int slice = blockIdx.z / gradOutput.getSize(1); // output slice/feature
if (oRow < gradOutput.getSize(2) && oColumn < gradOutput.getSize(3))
{
float *idx = &indices[slice][oFrame][oRow][oColumn];
int iFrame = ((unsigned char*)(idx))[0] + oFrame * dT - padT;
int iRow = ((unsigned char*)(idx))[1] + oRow * dH - padH;
int iColumn = ((unsigned char*)(idx))[2] + oColumn * dW - padW;
atomicAdd(&gradInput[slice][iFrame][iRow][iColumn],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
void THNN_CudaMinMaxPooling_updateGradInput(
THCState *state,
THCudaTensor *input, THCudaTensor *mask,
THCudaTensor *gradOutput, THCudaTensor *gradInput,
THCudaTensor *indices,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
// Resize and initialize result tensor.
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
int batchSize;
int inputSlices;
int outputTime;
int outputHeight;
int outputWidth;
MINMAX_assertSameGPU(state, 5, input, mask, indices, gradOutput, gradInput);
if (THCudaTensor_nDimension(state, input) == 4) /* 4D */
{
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
outputTime = THCudaTensor_size(state, gradOutput, 1);
outputHeight = THCudaTensor_size(state, gradOutput, 2);
outputWidth = THCudaTensor_size(state, gradOutput, 3);
}
else
{
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
outputTime = THCudaTensor_size(state, gradOutput, 2);
outputHeight = THCudaTensor_size(state, gradOutput, 3);
outputWidth = THCudaTensor_size(state, gradOutput, 4);
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaGradInput;
// THCDeviceTensor<float, 4> cudaMask;
THCDeviceTensor<float, 4> cudaGradOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaGradInput = toDeviceTensor<float, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
}
else
{
cudaGradInput =
toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>();
}
// copy indices tensor
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices), indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
dim3 block(32, 8);
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
hipLaunchKernelGGL(( cuda_MinMaxPooling_updateGradInput), dim3(grid), dim3(block),
0, THCState_getCurrentStream(state),
cudaGradOutput,
cudaIndices,
cudaGradInput,
dT, dH, dW,
padT, padH, padW);
// cleanup
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, indices1);
}
| 87e84900501a34226ad8301794a39a0abdc51a59.cu | #include "MINMAX.h"
#include "common.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include <cfloat>
__global__ void cuda_MinMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 2> thresholds,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH, int kW,
int dT, int dH, int dW,
int padT, int padH, int padW)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int minColumn = 0;
int minRow = 0;
int minFrame = 0;
float min = FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < kW; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
float threshold_lower = thresholds[0][frame];
float threshold_upper = thresholds[1][frame];
if ( val < threshold_lower || val > threshold_upper || frame == kT-1)
{
min = val;
minColumn = column;
minRow = row;
minFrame = frame;
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min == FLT_MAX)
{
min = 0;
}
output[slice][oFrame][oRow][oColumn] = min;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = minFrame;
((unsigned char*)(idx))[1] = minRow;
((unsigned char*)(idx))[2] = minColumn;
((unsigned char*)(idx))[3] = 0;
}
}
template <int KERNEL_WIDTH>
__global__ void cuda_MinMaxPooling_updateOutput(
THCDeviceTensor<float, 4> input,
THCDeviceTensor<float, 2> thresholds,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> output,
int kT, int kH,
int dT, int dH, int dW,
int padT, int padH, int padW)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % output.getSize(1); // output frame/time
int slice = blockIdx.z / output.getSize(1); // output slice/feature
if (oRow < output.getSize(2) && oColumn < output.getSize(3))
{
int iColumn = oColumn * dW - padW;
int iRow = oRow * dH - padH;
int iFrame = oFrame * dT - padT;
int minColumn = 0;
int minRow = 0;
int minFrame;
float min = FLT_MAX;
for (int frame = 0; frame < kT; ++frame)
{
if (iFrame + frame < input.getSize(1) && iFrame + frame >= 0)
{
for (int row = 0; row < kH; ++row)
{
if (iRow + row < input.getSize(2) && iRow + row >= 0)
{
for (int column = 0; column < KERNEL_WIDTH; ++column)
{
if (iColumn + column < input.getSize(3) && iColumn + column >= 0)
{
float val = input[slice][iFrame + frame][iRow + row][iColumn + column];
float threshold_lower = thresholds[0][frame];
float threshold_upper = thresholds[1][frame];
if ( val < threshold_lower || val > threshold_upper || frame == kT-1)
{
min = val;
minColumn = column;
minRow = row;
minFrame = frame;
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min < FLT_MAX)
{
break;
}
}
}
if (min == FLT_MAX)
{
min = 0;
}
output[slice][oFrame][oRow][oColumn] = min;
float *idx = &indices[slice][oFrame][oRow][oColumn];
((unsigned char*)(idx))[0] = minFrame;
((unsigned char*)(idx))[1] = minRow;
((unsigned char*)(idx))[2] = minColumn;
((unsigned char*)(idx))[3] = 0;
}
}
#define UPDATE_OUTPUT_KERNEL_WIDTH(KW) case KW: \
cuda_MinMaxPooling_updateOutput<KW><<<grid, block, \
0, THCState_getCurrentStream(state)>>>( \
cudaInput, cudaThresholds, cudaIndices, cudaOutput, kT, kH, dT, dH, dW, padT, padH, padW); \
break
void THNN_CudaMinMaxPooling_updateOutput(
THCState *state, THCudaTensor *input, THCudaTensor *thresholds,
THCudaTensor *mask, THCudaTensor *output, THCudaTensor *indices,
int kT, int kW, int kH,
int dT, int dW, int dH,
int padT, int padW, int padH,
bool ceilMode)
{
int batchSize;
int inputSlices;
int inputTime;
int inputHeight;
int inputWidth;
int outputTime;
int outputHeight;
int outputWidth;
MINMAX_assertSameGPU(state, 5, input, thresholds, mask, indices, output);
if (THCudaTensor_nDimension(state, input) == 4)
{
THArgCheck(
THCudaTensor_size(state, input, 1) >= kT &&
THCudaTensor_size(state, input, 2) >= kH &&
THCudaTensor_size(state, input, 3) >= kW, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
inputTime = THCudaTensor_size(state, input, 1);
inputHeight = THCudaTensor_size(state, input, 2);
inputWidth = THCudaTensor_size(state, input, 3);
}
else if (THCudaTensor_nDimension(state, input) == 5)
{
THArgCheck(
THCudaTensor_size(state, input, 4) >= kW &&
THCudaTensor_size(state, input, 3) >= kH &&
THCudaTensor_size(state, input, 2) >= kT, 2,
"input image smaller than kernel size"
);
/* sizes */
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
inputTime = THCudaTensor_size(state, input, 2);
inputHeight = THCudaTensor_size(state, input, 3);
inputWidth = THCudaTensor_size(state, input, 4);
}
else
{
THArgCheck(false, 2, "4D or 5D tensor expected");
}
THArgCheck(kT/2 >= padT && kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size"
);
if (ceilMode)
{
outputTime = (int)(ceil((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(ceil((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(ceil((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
else
{
outputTime = (int)(floor((float)(inputTime - kT + 2*padT) / dT)) + 1;
outputHeight = (int)(floor((float)(inputHeight - kH + 2*padH) / dH)) + 1;
outputWidth = (int)(floor((float)(inputWidth - kW + 2*padW) / dW)) + 1;
}
if (padT || padW || padH)
{
if ((outputTime - 1)*dT >= inputTime + padT)
--outputTime;
if ((outputHeight - 1)*dH >= inputHeight + padH)
--outputHeight;
if ((outputWidth - 1)*dW >= inputWidth + padW)
--outputWidth;
}
if (input->nDimension == 4) /* 4D */
{
/* resize output */
THCudaTensor_resize4d(state, output, inputSlices,
outputTime, outputHeight, outputWidth);
/* indices pack ti,i,j locations for each output point as uchar into
each float of the tensor */
THCudaTensor_resize4d(state, indices, inputSlices,
outputTime, outputHeight, outputWidth);
}
else
{ /* 5D */
THCudaTensor_resize5d(state, output, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
// Index tensor packs index offsets as uchars into floats
THCudaTensor_resize5d(state, indices, batchSize, inputSlices,
outputTime, outputHeight, outputWidth);
}
input = THCudaTensor_newContiguous(state, input);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaInput;
THCDeviceTensor<float, 2> cudaThresholds;
// THCDeviceTensor<float, 4> cudaMask;
THCDeviceTensor<float, 4> cudaOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaInput = toDeviceTensor<float, 4>(state, input);
cudaThresholds = toDeviceTensor<float, 2>(state, thresholds);
cudaOutput = toDeviceTensor<float, 4>(state, output);
}
else
{
cudaInput = toDeviceTensor<float, 5>(state, input).downcastOuter<4>();
cudaThresholds = toDeviceTensor<float, 2>(state, thresholds);
cudaOutput = toDeviceTensor<float, 5>(state, output).downcastOuter<4>();
}
// copy indices tensor
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices),
indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
dim3 block(32, 8);
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
switch (kW)
{
UPDATE_OUTPUT_KERNEL_WIDTH(1);
UPDATE_OUTPUT_KERNEL_WIDTH(2);
UPDATE_OUTPUT_KERNEL_WIDTH(3);
UPDATE_OUTPUT_KERNEL_WIDTH(4);
UPDATE_OUTPUT_KERNEL_WIDTH(5);
UPDATE_OUTPUT_KERNEL_WIDTH(6);
UPDATE_OUTPUT_KERNEL_WIDTH(7);
default:
cuda_MinMaxPooling_updateOutput<<<grid, block,
0, THCState_getCurrentStream(state)>>>(
cudaInput, cudaThresholds, cudaIndices, cudaOutput, kT, kH, kW, dT, dH, dW, padT, padH, padW);
}
THCudaTensor_free(state, input);
THCudaTensor_free(state, indices1);
}
#undef UPDATE_OUTPUT_KERNEL_WIDTH
__global__ void cuda_MinMaxPooling_updateGradInput(
THCDeviceTensor<float, 4> gradOutput,
THCDeviceTensor<float, 4> indices,
THCDeviceTensor<float, 4> gradInput,
int dT, int dH, int dW,
int padT, int padH, int padW)
{
int oColumn = blockIdx.x * blockDim.x + threadIdx.x;
int oRow = blockIdx.y * blockDim.y + threadIdx.y;
int oFrame = blockIdx.z % gradOutput.getSize(1); // output frame/time
int slice = blockIdx.z / gradOutput.getSize(1); // output slice/feature
if (oRow < gradOutput.getSize(2) && oColumn < gradOutput.getSize(3))
{
float *idx = &indices[slice][oFrame][oRow][oColumn];
int iFrame = ((unsigned char*)(idx))[0] + oFrame * dT - padT;
int iRow = ((unsigned char*)(idx))[1] + oRow * dH - padH;
int iColumn = ((unsigned char*)(idx))[2] + oColumn * dW - padW;
atomicAdd(&gradInput[slice][iFrame][iRow][iColumn],
gradOutput[slice][oFrame][oRow][oColumn]);
}
}
void THNN_CudaMinMaxPooling_updateGradInput(
THCState *state,
THCudaTensor *input, THCudaTensor *mask,
THCudaTensor *gradOutput, THCudaTensor *gradInput,
THCudaTensor *indices,
int dT, int dW, int dH,
int padT, int padW, int padH)
{
// Resize and initialize result tensor.
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
int batchSize;
int inputSlices;
int outputTime;
int outputHeight;
int outputWidth;
MINMAX_assertSameGPU(state, 5, input, mask, indices, gradOutput, gradInput);
if (THCudaTensor_nDimension(state, input) == 4) /* 4D */
{
batchSize = 1;
inputSlices = THCudaTensor_size(state, input, 0);
outputTime = THCudaTensor_size(state, gradOutput, 1);
outputHeight = THCudaTensor_size(state, gradOutput, 2);
outputWidth = THCudaTensor_size(state, gradOutput, 3);
}
else
{
batchSize = THCudaTensor_size(state, input, 0);
inputSlices = THCudaTensor_size(state, input, 1);
outputTime = THCudaTensor_size(state, gradOutput, 2);
outputHeight = THCudaTensor_size(state, gradOutput, 3);
outputWidth = THCudaTensor_size(state, gradOutput, 4);
}
gradOutput = THCudaTensor_newContiguous(state, gradOutput);
// Collapse batch and feature dimensions
THCDeviceTensor<float, 4> cudaGradInput;
// THCDeviceTensor<float, 4> cudaMask;
THCDeviceTensor<float, 4> cudaGradOutput;
if (THCudaTensor_nDimension(state, input) == 4)
{
cudaGradInput = toDeviceTensor<float, 4>(state, gradInput);
cudaGradOutput = toDeviceTensor<float, 4>(state, gradOutput);
}
else
{
cudaGradInput =
toDeviceTensor<float, 5>(state, gradInput).downcastOuter<4>();
cudaGradOutput =
toDeviceTensor<float, 5>(state, gradOutput).downcastOuter<4>();
}
// copy indices tensor
THLongStorage *indicesSize = THLongStorage_newWithSize(4);
long indicesSizeRaw[4] = { batchSize * inputSlices,
outputTime, outputHeight, outputWidth };
THLongStorage_rawCopy(indicesSize, indicesSizeRaw);
THCudaTensor *indices1 = THCudaTensor_newWithStorage(
state, THCudaTensor_storage(state, indices),
THCudaTensor_storageOffset(state, indices), indicesSize, NULL);
THLongStorage_free(indicesSize);
THCDeviceTensor<float, 4> cudaIndices =
toDeviceTensor<float, 4>(state, indices1);
dim3 block(32, 8);
dim3 grid(THCCeilDiv(outputWidth, static_cast<int>(block.x)),
THCCeilDiv(outputHeight, static_cast<int>(block.y)),
outputTime * inputSlices * batchSize);
cuda_MinMaxPooling_updateGradInput<<<grid, block,
0, THCState_getCurrentStream(state)>>>(
cudaGradOutput,
cudaIndices,
cudaGradInput,
dT, dH, dW,
padT, padH, padW);
// cleanup
THCudaTensor_free(state, gradOutput);
THCudaTensor_free(state, indices1);
}
|
1afd6b43fbea17a501ac4a9bef1ca49ecc2b03e4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "RayTracing.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include <assert.h>
#include "length_cm.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void raytracingGM(uchar4* ptrTabPixels, uint w, uint h, float t, Sphere *ptrSphere, int nbSphere);
extern __global__ void raytracingCM(uchar4* ptrTabPixels, uint w, uint h, float t, Sphere *ptrSphere, int nbSphere);
extern __global__ void raytracingSM(uchar4* ptrTabPixels, uint w, uint h, float t, Sphere *ptrSphere, int nbSphere);
extern __host__ void uploadGPUCM(Sphere* tabValue);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\raytracingcm
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid &grid, uint w, uint h, float dt, int nbSphere) :
Animable_I<uchar4>(grid, w, h, "Raytracing_Cuda")
{
assert(nbSphere==LENGTH_CM);
// time
this->t = 0;
this->dt = dt;
this->nbSphere = nbSphere;
SphereCreator sphereCreator = SphereCreator(nbSphere, w, h,100);
Sphere* ptrTabSphere = sphereCreator.getTabSphere();
//MemoryManagement
this->sizeOctetSpheres = nbSphere * sizeof(Sphere);
Device::malloc(&ptrDevTabSphere, sizeOctetSpheres);
Device::memclear(ptrDevTabSphere, sizeOctetSpheres);
Device::memcpyHToD(ptrDevTabSphere, ptrTabSphere, sizeOctetSpheres);
uploadGPUCM(ptrTabSphere);
}
RayTracing::~RayTracing()
{
Device::free(ptrDevTabSphere);
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("raytracing rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
static int i=1 ;
if (i%3 == 0)
{
hipLaunchKernelGGL(( raytracingGM), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t, ptrDevTabSphere, nbSphere);
}
else if (i%3 == 1)
{
hipLaunchKernelGGL(( raytracingCM), dim3(dg),dim3(db), 0, 0, ptrDevPixels,w,h,t, ptrDevTabSphere, nbSphere);
}
else if (i%3 == 2)
{
hipLaunchKernelGGL(( raytracingSM), dim3(dg),dim3(db),sizeOctetSpheres, 0, ptrDevPixels,w,h,t, ptrDevTabSphere, nbSphere);
}
//Device::lastCudaError("raytracing rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
| 1afd6b43fbea17a501ac4a9bef1ca49ecc2b03e4.cu | #include "RayTracing.h"
#include <iostream>
#include <assert.h>
#include "Device.h"
#include <assert.h>
#include "length_cm.h"
using std::cout;
using std::endl;
/*----------------------------------------------------------------------*\
|* Declaration *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Imported *|
\*-------------------------------------*/
extern __global__ void raytracingGM(uchar4* ptrTabPixels, uint w, uint h, float t, Sphere *ptrSphere, int nbSphere);
extern __global__ void raytracingCM(uchar4* ptrTabPixels, uint w, uint h, float t, Sphere *ptrSphere, int nbSphere);
extern __global__ void raytracingSM(uchar4* ptrTabPixels, uint w, uint h, float t, Sphere *ptrSphere, int nbSphere);
extern __host__ void uploadGPUCM(Sphere* tabValue);
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* Implementation *|
\*---------------------------------------------------------------------*/
/*--------------------------------------*\
|* Public *|
\*-------------------------------------*/
/*-------------------------*\raytracingcm
|* Constructeur *|
\*-------------------------*/
RayTracing::RayTracing(const Grid &grid, uint w, uint h, float dt, int nbSphere) :
Animable_I<uchar4>(grid, w, h, "Raytracing_Cuda")
{
assert(nbSphere==LENGTH_CM);
// time
this->t = 0;
this->dt = dt;
this->nbSphere = nbSphere;
SphereCreator sphereCreator = SphereCreator(nbSphere, w, h,100);
Sphere* ptrTabSphere = sphereCreator.getTabSphere();
//MemoryManagement
this->sizeOctetSpheres = nbSphere * sizeof(Sphere);
Device::malloc(&ptrDevTabSphere, sizeOctetSpheres);
Device::memclear(ptrDevTabSphere, sizeOctetSpheres);
Device::memcpyHToD(ptrDevTabSphere, ptrTabSphere, sizeOctetSpheres);
uploadGPUCM(ptrTabSphere);
}
RayTracing::~RayTracing()
{
Device::free(ptrDevTabSphere);
}
/*-------------------------*\
|* Methode *|
\*-------------------------*/
/**
* Override
* Call periodicly by the API
*
* Note : domaineMath pas use car pas zoomable
*/
void RayTracing::process(uchar4* ptrDevPixels, uint w, uint h, const DomaineMath& domaineMath)
{
//Device::lastCudaError("raytracing rgba uchar4 (before kernel)"); // facultatif, for debug only, remove for release
static int i=1 ;
if (i%3 == 0)
{
raytracingGM<<<dg,db>>>(ptrDevPixels,w,h,t, ptrDevTabSphere, nbSphere);
}
else if (i%3 == 1)
{
raytracingCM<<<dg,db>>>(ptrDevPixels,w,h,t, ptrDevTabSphere, nbSphere);
}
else if (i%3 == 2)
{
raytracingSM<<<dg,db,sizeOctetSpheres>>>(ptrDevPixels,w,h,t, ptrDevTabSphere, nbSphere);
}
//Device::lastCudaError("raytracing rgba uchar4 (after kernel)"); // facultatif, for debug only, remove for release
}
/**
* Override
* Call periodicly by the API
*/
void RayTracing::animationStep()
{
t += dt;
}
/*--------------------------------------*\
|* Private *|
\*-------------------------------------*/
/*----------------------------------------------------------------------*\
|* End *|
\*---------------------------------------------------------------------*/
|
ed32b2db2f7edfbbc12cf61518f1afe31ef854b0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
if(index < N)
{
result[index] = a[index] +b[index];
}
/*
*for(int i = index; i < N; i += stride)
*{
* result[i] = a[i] + b[i];
*}
*/
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
hipMallocManaged(&a, size);
hipMallocManaged(&b, size);
hipMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nsys should register performance changes when execution configuration
* is updated.
*/
int deviceId;
hipGetDevice(&deviceId);
hipDeviceProp_t props;
hipGetDeviceProperties(&props, deviceId);
threadsPerBlock = 256;
//Calculating the number of Blocks needed
int BlockNum = (N + threadsPerBlock - 1) / threadsPerBlock;
//Calculating the closest multiple of the number of streaming processers
numberOfBlocks = (((BlockNum - 1) / props.multiProcessorCount) + 1) * props.multiProcessorCount;
hipError_t addVectorsErr;
hipError_t asyncErr;
//Prefetching a
hipMemPrefetchAsync(a, size, deviceId);
//result: number of memory operations on device is now 7815 and on host is 768.
//Time to do the kernal is now 84872244 nanoseconds.
//Prefetching b after a
hipMemPrefetchAsync(b, size, deviceId);
//result: number of memory operations on device is now 4683 and on host is 768.
//Time to do the kernal is now 45627368 nanoseconds.
//Prefetching c after a and b
hipMemPrefetchAsync(c, size, deviceId);
//result: number of memory operations on device is now 192 and on host is 768.
//Time to do the kernal is now 495873 nanoseconds.
hipLaunchKernelGGL(( addVectorsInto), dim3(numberOfBlocks), dim3(threadsPerBlock), 0, 0, c, a, b, N);
addVectorsErr = hipGetLastError();
if(addVectorsErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(addVectorsErr));
asyncErr = hipDeviceSynchronize();
if(asyncErr != hipSuccess) printf("Error: %s\n", hipGetErrorString(asyncErr));
checkElementsAre(7, c, N);
hipFree(a);
hipFree(b);
hipFree(c);
}
| ed32b2db2f7edfbbc12cf61518f1afe31ef854b0.cu | #include <stdio.h>
/*
* Host function to initialize vector elements. This function
* simply initializes each element to equal its index in the
* vector.
*/
void initWith(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
/*
* Device kernel stores into `result` the sum of each
* same-indexed value of `a` and `b`.
*/
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
if(index < N)
{
result[index] = a[index] +b[index];
}
/*
*for(int i = index; i < N; i += stride)
*{
* result[i] = a[i] + b[i];
*}
*/
}
/*
* Host function to confirm values in `vector`. This function
* assumes all values are the same `target` value.
*/
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main()
{
const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
initWith(3, a, N);
initWith(4, b, N);
initWith(0, c, N);
size_t threadsPerBlock;
size_t numberOfBlocks;
/*
* nsys should register performance changes when execution configuration
* is updated.
*/
int deviceId;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props, deviceId);
threadsPerBlock = 256;
//Calculating the number of Blocks needed
int BlockNum = (N + threadsPerBlock - 1) / threadsPerBlock;
//Calculating the closest multiple of the number of streaming processers
numberOfBlocks = (((BlockNum - 1) / props.multiProcessorCount) + 1) * props.multiProcessorCount;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
//Prefetching a
cudaMemPrefetchAsync(a, size, deviceId);
//result: number of memory operations on device is now 7815 and on host is 768.
//Time to do the kernal is now 84872244 nanoseconds.
//Prefetching b after a
cudaMemPrefetchAsync(b, size, deviceId);
//result: number of memory operations on device is now 4683 and on host is 768.
//Time to do the kernal is now 45627368 nanoseconds.
//Prefetching c after a and b
cudaMemPrefetchAsync(c, size, deviceId);
//result: number of memory operations on device is now 192 and on host is 768.
//Time to do the kernal is now 495873 nanoseconds.
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
checkElementsAre(7, c, N);
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
136fdeb399204f25749ee61dc4df670e7d556509.hip | // !!! This is a file automatically generated by hipify!!!
#include "MemLeakDebug.h"
#include "CameraCalibration.h"
sCMOS_Calibration::sCMOS_Calibration(int2 imageSize, const float * offset, const float * gain, const float * variance)
: d_invgain(imageSize.x, imageSize.y),
d_vargain2(imageSize.x, imageSize.y),
d_offset(offset, imageSize.x, imageSize.y)
{
float* h_vargain2 = new float[d_vargain2.NumPixels()];
for (int i = 0; i < d_vargain2.NumPixels(); i++)
h_vargain2[i] = variance[i] / (gain[i] * gain[i]);
d_vargain2.CopyFromHost(h_vargain2);
delete[] h_vargain2;
float* invgain = new float[imageSize.x*imageSize.y];
for (int i=0;i<imageSize.x*imageSize.y;i++)
invgain[i] = 1.0f / gain[i];
this->d_invgain.CopyFromHost(invgain);
delete[] invgain;
}
void sCMOS_Calibration::ProcessImage(DeviceImage<float>& sample,hipStream_t stream)
{
auto smp = sample.GetIndexer();
auto offset = this->d_offset.GetIndexer();
auto invgain = this->d_invgain.GetIndexer();
auto vargain2 = this->d_vargain2.GetIndexer();
LaunchKernel(sample.width, sample.height, [=]__device__(int x, int y) {
smp(x, y) = (smp(x, y) - offset(x, y)) * invgain(x, y);// +vargain2(x, y); var/gain^2 is now added during optimization
}, 0, stream);
}
SampleOffset_sCMOS<float> sCMOS_Calibration::GetSampleOffset()
{
SampleOffset_sCMOS<float> offset;
offset.d_vargain2 = d_vargain2.data;
offset.pitch = d_vargain2.PitchInPixels();
return offset;
}
void GainOffsetCalibration::ProcessImage(DeviceImage<float>& img, hipStream_t stream)
{
float invgain = 1.0f / this->gain, offset = this->offset;
auto indexer = img.GetIndexer();
LaunchKernel(img.height, img.width, [=]__device__(int y, int x) {
float v = (indexer(x, y) - offset) * invgain;
if (v < 0.0f) v = 0.0f;
indexer(x, y) = v;
}, 0, stream);
}
GainOffsetImageCalibration::GainOffsetImageCalibration(int2 imgsize, const float * gain, const float * offset) :
invGain(imgsize), offset(imgsize)
{
float* invGain = new float[imgsize.x*imgsize.y];
for (int i = 0; i < imgsize.x*imgsize.y; i++)
invGain[i] = 1.0f / gain[i];
this->invGain.CopyFromHost(invGain);
this->offset.CopyFromHost(offset);
}
void GainOffsetImageCalibration::ProcessImage(DeviceImage<float>& img, hipStream_t stream)
{
auto invGain_ = invGain.GetConstIndexer();
auto offset_ = offset.GetConstIndexer();
auto img_ = img.GetIndexer();
LaunchKernel(img.height, img.width, [=]__device__(int y, int x) {
float v = (img_(x, y) - offset_(x, y))*invGain_(x, y);
if (v < 0.0f) v = 0.0f;
img_(x, y) = v;
}, 0, stream);
}
CDLL_EXPORT sCMOS_Calibration * sCMOS_Calib_Create(int w, int h, const float * offset, const float * gain, const float * variance, Context* ctx)
{
auto* r = new sCMOS_Calibration({ w,h }, offset, gain, variance);
if (ctx) r->SetContext(ctx);
return r;
}
CDLL_EXPORT GainOffsetCalibration * GainOffsetCalib_Create(float gain, float offset, Context* ctx)
{
auto* r = new GainOffsetCalibration(gain, offset);
if (ctx) r->SetContext(ctx);
return r;
}
CDLL_EXPORT GainOffsetImageCalibration * GainOffsetImageCalib_Create(int width,int height, const float *gain, const float* offset, Context* ctx)
{
auto* r = new GainOffsetImageCalibration({ width,height },gain, offset);
if (ctx) r->SetContext(ctx);
return r;
}
| 136fdeb399204f25749ee61dc4df670e7d556509.cu | #include "MemLeakDebug.h"
#include "CameraCalibration.h"
sCMOS_Calibration::sCMOS_Calibration(int2 imageSize, const float * offset, const float * gain, const float * variance)
: d_invgain(imageSize.x, imageSize.y),
d_vargain2(imageSize.x, imageSize.y),
d_offset(offset, imageSize.x, imageSize.y)
{
float* h_vargain2 = new float[d_vargain2.NumPixels()];
for (int i = 0; i < d_vargain2.NumPixels(); i++)
h_vargain2[i] = variance[i] / (gain[i] * gain[i]);
d_vargain2.CopyFromHost(h_vargain2);
delete[] h_vargain2;
float* invgain = new float[imageSize.x*imageSize.y];
for (int i=0;i<imageSize.x*imageSize.y;i++)
invgain[i] = 1.0f / gain[i];
this->d_invgain.CopyFromHost(invgain);
delete[] invgain;
}
void sCMOS_Calibration::ProcessImage(DeviceImage<float>& sample,cudaStream_t stream)
{
auto smp = sample.GetIndexer();
auto offset = this->d_offset.GetIndexer();
auto invgain = this->d_invgain.GetIndexer();
auto vargain2 = this->d_vargain2.GetIndexer();
LaunchKernel(sample.width, sample.height, [=]__device__(int x, int y) {
smp(x, y) = (smp(x, y) - offset(x, y)) * invgain(x, y);// +vargain2(x, y); var/gain^2 is now added during optimization
}, 0, stream);
}
SampleOffset_sCMOS<float> sCMOS_Calibration::GetSampleOffset()
{
SampleOffset_sCMOS<float> offset;
offset.d_vargain2 = d_vargain2.data;
offset.pitch = d_vargain2.PitchInPixels();
return offset;
}
void GainOffsetCalibration::ProcessImage(DeviceImage<float>& img, cudaStream_t stream)
{
float invgain = 1.0f / this->gain, offset = this->offset;
auto indexer = img.GetIndexer();
LaunchKernel(img.height, img.width, [=]__device__(int y, int x) {
float v = (indexer(x, y) - offset) * invgain;
if (v < 0.0f) v = 0.0f;
indexer(x, y) = v;
}, 0, stream);
}
GainOffsetImageCalibration::GainOffsetImageCalibration(int2 imgsize, const float * gain, const float * offset) :
invGain(imgsize), offset(imgsize)
{
float* invGain = new float[imgsize.x*imgsize.y];
for (int i = 0; i < imgsize.x*imgsize.y; i++)
invGain[i] = 1.0f / gain[i];
this->invGain.CopyFromHost(invGain);
this->offset.CopyFromHost(offset);
}
void GainOffsetImageCalibration::ProcessImage(DeviceImage<float>& img, cudaStream_t stream)
{
auto invGain_ = invGain.GetConstIndexer();
auto offset_ = offset.GetConstIndexer();
auto img_ = img.GetIndexer();
LaunchKernel(img.height, img.width, [=]__device__(int y, int x) {
float v = (img_(x, y) - offset_(x, y))*invGain_(x, y);
if (v < 0.0f) v = 0.0f;
img_(x, y) = v;
}, 0, stream);
}
CDLL_EXPORT sCMOS_Calibration * sCMOS_Calib_Create(int w, int h, const float * offset, const float * gain, const float * variance, Context* ctx)
{
auto* r = new sCMOS_Calibration({ w,h }, offset, gain, variance);
if (ctx) r->SetContext(ctx);
return r;
}
CDLL_EXPORT GainOffsetCalibration * GainOffsetCalib_Create(float gain, float offset, Context* ctx)
{
auto* r = new GainOffsetCalibration(gain, offset);
if (ctx) r->SetContext(ctx);
return r;
}
CDLL_EXPORT GainOffsetImageCalibration * GainOffsetImageCalib_Create(int width,int height, const float *gain, const float* offset, Context* ctx)
{
auto* r = new GainOffsetImageCalibration({ width,height },gain, offset);
if (ctx) r->SetContext(ctx);
return r;
}
|
531943534f821451138ac6c4ffdb9f502d99df1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "global_cuda.h"
//#include <thrust\extrema.h>
//#include <thrust\device_ptr.h>
#include "cuda_host_util.h"
#define ZMAX_APPROX_PREC (1000.0f)
/*
struct opp{
__host__ __device__ bool operator()(const cell_pos_set c1, const cell_pos_set c2){
return c1.z < c2.z;
};
};
float calc_zzmax(cell_pos_set* pos, int ncell){
thrust::device_ptr<cell_pos_set> pos_dptr(pos+10);
cell_pos_set temp;
//*thrust::max_element(pos_dptr, pos_dptr + 1, opp())
return pos_dptr[0].operator cell_pos_set().z;
}
__global__ void calc_zzmax_fool_impl(float* out, cell_pos_set* cs, int ncell){
//extern __shared__
int idx = 0;
for (int i = 1; i < ncell; i++){
if (cs[idx].z < cs[i].z)idx = i;
}
*out = cs[idx].z;
}
float calc_zzmax_2(DeviceData*d){
calc_zzmax_fool_impl<<<1,1>>>(d->zzmax, d->c_pos_d[d->current], d->ncell);
float mine;
hipMemcpy(&mine, d->zzmax, sizeof(float), hipMemcpyDeviceToHost);
return mine;
}
*/
__global__ void calc_zzmax_approx_pass1(int* out, cell_pos_set* cs, int ncell){
__shared__ int thread_max;
if (threadIdx.x == 0){
thread_max = 0;
}
__syncthreads();
int index = blockDim.x*blockIdx.x + threadIdx.x;
if (index < ncell){
int tmp = (int)(cs[index].z*ZMAX_APPROX_PREC);
atomicMax(&thread_max, tmp);
__syncthreads();
if (threadIdx.x == 0){
out[blockIdx.x] = thread_max;
}
}
}
__global__ void calc_zzmax_approx_pass2(float* out, int* block_max){
__shared__ int final_max;
if (threadIdx.x == 0){
final_max = 0;
}
__syncthreads();
atomicMax(&final_max, block_max[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0){
*out = ((float)final_max) / ZMAX_APPROX_PREC;
}
}
float calc_zzmax_approx(DeviceData*d){
hipMemset(d->block_max_store, 0, sizeof(int) * 512);
//hipMemset(d->zzmax, 0, sizeof(float));
calc_zzmax_approx_pass1 << <512, 128 >> >(d->block_max_store, d->c_pos_d[d->current], d->ncell);
hipDeviceSynchronize();
calc_zzmax_approx_pass2 << <1, 512 >> >(d->zzmax, d->block_max_store);
//hipDeviceSynchronize();
float mine;
hipMemcpy(&mine, d->zzmax, sizeof(float), hipMemcpyDeviceToHost); //sync
return mine;
} | 531943534f821451138ac6c4ffdb9f502d99df1c.cu | #include "global_cuda.h"
//#include <thrust\extrema.h>
//#include <thrust\device_ptr.h>
#include "cuda_host_util.h"
#define ZMAX_APPROX_PREC (1000.0f)
/*
struct opp{
__host__ __device__ bool operator()(const cell_pos_set c1, const cell_pos_set c2){
return c1.z < c2.z;
};
};
float calc_zzmax(cell_pos_set* pos, int ncell){
thrust::device_ptr<cell_pos_set> pos_dptr(pos+10);
cell_pos_set temp;
//*thrust::max_element(pos_dptr, pos_dptr + 1, opp())
return pos_dptr[0].operator cell_pos_set().z;
}
__global__ void calc_zzmax_fool_impl(float* out, cell_pos_set* cs, int ncell){
//extern __shared__
int idx = 0;
for (int i = 1; i < ncell; i++){
if (cs[idx].z < cs[i].z)idx = i;
}
*out = cs[idx].z;
}
float calc_zzmax_2(DeviceData*d){
calc_zzmax_fool_impl<<<1,1>>>(d->zzmax, d->c_pos_d[d->current], d->ncell);
float mine;
cudaMemcpy(&mine, d->zzmax, sizeof(float), cudaMemcpyDeviceToHost);
return mine;
}
*/
__global__ void calc_zzmax_approx_pass1(int* out, cell_pos_set* cs, int ncell){
__shared__ int thread_max;
if (threadIdx.x == 0){
thread_max = 0;
}
__syncthreads();
int index = blockDim.x*blockIdx.x + threadIdx.x;
if (index < ncell){
int tmp = (int)(cs[index].z*ZMAX_APPROX_PREC);
atomicMax(&thread_max, tmp);
__syncthreads();
if (threadIdx.x == 0){
out[blockIdx.x] = thread_max;
}
}
}
__global__ void calc_zzmax_approx_pass2(float* out, int* block_max){
__shared__ int final_max;
if (threadIdx.x == 0){
final_max = 0;
}
__syncthreads();
atomicMax(&final_max, block_max[threadIdx.x]);
__syncthreads();
if (threadIdx.x == 0){
*out = ((float)final_max) / ZMAX_APPROX_PREC;
}
}
float calc_zzmax_approx(DeviceData*d){
cudaMemset(d->block_max_store, 0, sizeof(int) * 512);
//cudaMemset(d->zzmax, 0, sizeof(float));
calc_zzmax_approx_pass1 << <512, 128 >> >(d->block_max_store, d->c_pos_d[d->current], d->ncell);
cudaThreadSynchronize();
calc_zzmax_approx_pass2 << <1, 512 >> >(d->zzmax, d->block_max_store);
//cudaThreadSynchronize();
float mine;
cudaMemcpy(&mine, d->zzmax, sizeof(float), cudaMemcpyDeviceToHost); //sync
return mine;
} |
df2126a8d56c4d4be6bff2fe4eaaef7024e7e996.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hiprand/hiprand.h>
#include "common/Error.h"
#include "common/GpuTimer.h"
#include "common/Vector.h"
#define N (32 * 1024)
#define THREADS 256
#define BLOCKS 32
const int ARRAY_BYTES = N * sizeof(float);
const int P_ARRAY_BYTES = BLOCKS * sizeof(float);
__global__ void dotKernel(Vector<float> d_a,
Vector<float> d_b,
Vector<float> d_c) {
__shared__ float cache[THREADS];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += d_a.getElement(tid) * d_b.getElement(tid);
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x / 2;
while (i != 0) {
__syncthreads();
if (cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
i /= 2;
}
if (cacheIndex == 0)
d_c.setElement(blockIdx.x, cache[0]);
}
int randomNumbersGenerator(Vector<float> d_data, int n) {
hiprandGenerator_t gen;
/* Create pseudo-random number generator */
HANDLER_CURAND(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT));
/* Set seed 1234ULL = unsigned long long */
srand48(time(NULL));
HANDLER_CURAND(hiprandSetPseudoRandomGeneratorSeed(gen, lrand48()));
/* Set seed 1234ULL = unsigned long long
Use this to generate the same random numbers*/
// HANDLER_CURAND(hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL));
/* Generate n floats on device */
HANDLER_CURAND(hiprandGenerateUniform(gen, d_data.elements, n));
/* Cleanup */
HANDLER_CURAND(hiprandDestroyGenerator(gen));
return EXIT_SUCCESS;
}
void onDevice(Vector<float> h_c) {
Vector<float> d_a, d_b, d_c;
d_a.length = N;
d_b.length = N;
d_c.length = BLOCKS;
// start timer
GpuTimer timer;
timer.Start();
// allocate memory on the GPU
HANDLER_ERROR_ERR(hipMalloc((void**)&d_a.elements, ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_b.elements, ARRAY_BYTES));
HANDLER_ERROR_ERR(hipMalloc((void**)&d_c.elements, P_ARRAY_BYTES));
randomNumbersGenerator(d_a, N);
randomNumbersGenerator(d_b, N);
hipLaunchKernelGGL(( dotKernel), dim3(BLOCKS), dim3(THREADS), 0, 0, d_a, d_b, d_c);
HANDLER_ERROR_MSG("kernel panic!!!");
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(hipMemcpy(h_c.elements, d_c.elements, P_ARRAY_BYTES,
hipMemcpyDeviceToHost));
// stop timer
timer.Stop();
// print time
printf("Time : %f ms\n", timer.Elapsed());
// free GPU memory
HANDLER_ERROR_ERR(hipFree(d_a.elements));
HANDLER_ERROR_ERR(hipFree(d_b.elements));
HANDLER_ERROR_ERR(hipFree(d_c.elements));
}
void test() {
Vector<float> h_c;
// declare vectore to store results
h_c.length = BLOCKS;
h_c.elements = (float*)malloc(P_ARRAY_BYTES);
// call device configuration
onDevice(h_c);
float finalValue = 0.0;
// verify that the GPU did the work we requested
for (int i = 0; i < BLOCKS; i++) {
finalValue += h_c.getElement(i);
}
printf("Dot result = %f \n", finalValue);
printf("-: successful execution :-\n");
free(h_c.elements);
}
int main(void) {
test();
return 0;
} | df2126a8d56c4d4be6bff2fe4eaaef7024e7e996.cu | #include <curand.h>
#include "common/Error.h"
#include "common/GpuTimer.h"
#include "common/Vector.h"
#define N (32 * 1024)
#define THREADS 256
#define BLOCKS 32
const int ARRAY_BYTES = N * sizeof(float);
const int P_ARRAY_BYTES = BLOCKS * sizeof(float);
__global__ void dotKernel(Vector<float> d_a,
Vector<float> d_b,
Vector<float> d_c) {
__shared__ float cache[THREADS];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while (tid < N) {
temp += d_a.getElement(tid) * d_b.getElement(tid);
tid += blockDim.x * gridDim.x;
}
// set the cache values
cache[cacheIndex] = temp;
// synchronize threads in this block
__syncthreads();
// for reductions, threadsPerBlock must be a power of 2
// because of the following code
int i = blockDim.x / 2;
while (i != 0) {
__syncthreads();
if (cacheIndex < i) {
cache[cacheIndex] += cache[cacheIndex + i];
}
i /= 2;
}
if (cacheIndex == 0)
d_c.setElement(blockIdx.x, cache[0]);
}
int randomNumbersGenerator(Vector<float> d_data, int n) {
curandGenerator_t gen;
/* Create pseudo-random number generator */
HANDLER_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed 1234ULL = unsigned long long */
srand48(time(NULL));
HANDLER_CURAND(curandSetPseudoRandomGeneratorSeed(gen, lrand48()));
/* Set seed 1234ULL = unsigned long long
Use this to generate the same random numbers*/
// HANDLER_CURAND(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL));
/* Generate n floats on device */
HANDLER_CURAND(curandGenerateUniform(gen, d_data.elements, n));
/* Cleanup */
HANDLER_CURAND(curandDestroyGenerator(gen));
return EXIT_SUCCESS;
}
void onDevice(Vector<float> h_c) {
Vector<float> d_a, d_b, d_c;
d_a.length = N;
d_b.length = N;
d_c.length = BLOCKS;
// start timer
GpuTimer timer;
timer.Start();
// allocate memory on the GPU
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a.elements, ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b.elements, ARRAY_BYTES));
HANDLER_ERROR_ERR(cudaMalloc((void**)&d_c.elements, P_ARRAY_BYTES));
randomNumbersGenerator(d_a, N);
randomNumbersGenerator(d_b, N);
dotKernel<<<BLOCKS, THREADS>>>(d_a, d_b, d_c);
HANDLER_ERROR_MSG("kernel panic!!!");
// copy data back from the GPU to the CPU
HANDLER_ERROR_ERR(cudaMemcpy(h_c.elements, d_c.elements, P_ARRAY_BYTES,
cudaMemcpyDeviceToHost));
// stop timer
timer.Stop();
// print time
printf("Time : %f ms\n", timer.Elapsed());
// free GPU memory
HANDLER_ERROR_ERR(cudaFree(d_a.elements));
HANDLER_ERROR_ERR(cudaFree(d_b.elements));
HANDLER_ERROR_ERR(cudaFree(d_c.elements));
}
void test() {
Vector<float> h_c;
// declare vectore to store results
h_c.length = BLOCKS;
h_c.elements = (float*)malloc(P_ARRAY_BYTES);
// call device configuration
onDevice(h_c);
float finalValue = 0.0;
// verify that the GPU did the work we requested
for (int i = 0; i < BLOCKS; i++) {
finalValue += h_c.getElement(i);
}
printf("Dot result = %f \n", finalValue);
printf("-: successful execution :-\n");
free(h_c.elements);
}
int main(void) {
test();
return 0;
} |
22c9d812adcf50e4b468b958d63259b9f1697b14.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <hip/hip_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linearcuda_bishal linear_bishal_cuda.cu -lm
*
* To run:
* ./linearcuda_bishal
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t{
double x;
double y;
}point_t;
int n_data = 1000;
__device__ int d_n_data =1000;
point_t data[] = {
{67.50,117.63},{65.33,126.07},{82.95,145.73},{76.19,113.32},
{87.53,145.91},{73.30,132.50},{76.57,134.90},{68.72,115.55},
{73.32,140.31},{78.84,143.44},{71.68,120.91},{92.42,138.04},
{76.05,128.45},{34.36,75.04},{91.02,154.71},{98.33,178.86},
{75.34,134.84},{84.94,143.54},{34.55,73.14},{22.78,36.84},
{11.34,37.95},{77.02,121.28},{20.48,54.11},{85.81,158.12},
{98.14,169.75},{61.93,126.98},{44.68,77.35},{38.15,83.43},
{30.16,67.27},{76.68,133.30},{86.62,141.45},{60.09,116.95},
{26.91,70.86},{ 9.10,37.19},{11.23,36.15},{26.50,59.09},
{41.44,93.35},{67.47,131.65},{89.46,161.35},{19.81,38.73},
{51.34,93.22},{97.28,174.58},{33.38,68.25},{19.98,40.87},
{44.04,85.03},{70.68,125.28},{84.78,128.46},{63.73,98.19},
{16.48,31.39},{91.67,169.18},{13.59,31.83},{69.96,133.01},
{15.59,53.61},{53.87,116.25},{57.95,119.53},{88.77,156.53},
{ 4.54,19.19},{60.18,111.69},{76.51,143.61},{21.62,54.08},
{53.82,107.72},{28.55,79.61},{51.32,89.52},{60.46,135.12},
{68.14,124.70},{13.20,32.38},{94.61,160.20},{57.63,99.17},
{80.81,143.84},{92.81,143.31},{91.60,164.46},{13.32,40.64},
{93.33,174.11},{50.18,102.21},{ 0.11,28.39},{69.56,119.09},
{55.02,110.04},{74.47,146.21},{91.14,163.71},{65.21,125.47},
{58.83,100.80},{10.02,46.51},{94.36,158.71},{51.22,103.94},
{ 9.25,34.11},{12.64,44.32},{50.24,104.77},{15.73,37.63},
{53.03,101.19},{30.42,72.53},{47.90,90.79},{25.89,68.64},
{75.24,126.35},{80.87,133.87},{ 5.86,31.16},{62.88,112.89},
{81.94,145.67},{51.81,88.24},{82.74,122.52},{80.97,139.14},
{ 0.28, 1.97},{62.20,111.18},{55.95,100.36},{29.17,61.74},
{71.13,120.95},{11.42,42.50},{38.60,70.96},{47.24,88.20},
{ 4.25,26.16},{13.53,50.16},{23.30,64.38},{96.18,162.57},
{97.45,167.05},{86.09,139.37},{19.61,40.67},{75.10,137.54},
{61.73,124.08},{ 7.59,27.78},{ 5.53,13.80},{59.76,116.05},
{19.08,55.39},{41.68,74.96},{16.33,42.42},{96.25,161.59},
{69.83,121.89},{ 5.65,37.87},{42.46,86.94},{79.37,151.11},
{48.34,97.43},{57.96,111.54},{22.31,63.95},{ 6.03,14.45},
{38.59,72.82},{91.91,166.06},{77.34,149.68},{20.95,49.40},
{18.24,44.16},{46.33,85.77},{87.69,162.06},{ 5.63,33.09},
{25.64,62.39},{78.37,129.15},{90.63,162.90},{59.07,108.46},
{ 3.73, 9.62},{73.31,127.30},{85.44,148.06},{62.86,111.04},
{27.61,66.72},{97.81,162.18},{76.86,141.77},{65.90,142.09},
{89.34,144.60},{ 9.42,48.00},{51.54,104.84},{11.47,42.53},
{42.31,79.72},{62.70,111.95},{15.81,44.71},{51.03,101.97},
{50.54,98.74},{84.62,138.28},{95.25,169.94},{ 3.97,31.21},
{10.63, 9.82},{ 1.47,32.91},{67.16,129.22},{10.14,26.05},
{52.42,103.57},{41.93,90.91},{96.18,166.50},{ 0.25,16.19},
{20.73,49.87},{34.86,70.58},{39.49,83.99},{93.26,153.09},
{89.43,147.70},{46.72,90.16},{30.27,50.94},{ 7.73,40.77},
{47.24,89.70},{60.71,110.70},{10.25,35.87},{87.93,176.16},
{81.83,132.92},{47.52,95.67},{ 8.22,30.97},{ 0.16,19.43},
{ 7.67,39.19},{25.22,46.59},{37.39,94.24},{23.87,54.68},
{53.00,94.78},{55.12,113.11},{ 0.39,17.41},{12.25,42.86},
{24.12,67.60},{40.49,92.29},{52.77,87.06},{12.23,46.57},
{67.85,125.89},{42.67,89.64},{34.42,61.02},{ 1.94,18.44},
{53.40,111.16},{89.61,164.56},{ 3.82, 3.73},{96.24,158.21},
{77.04,135.04},{97.05,148.24},{26.71,51.44},{95.02,163.28},
{34.29,61.81},{ 1.62,21.43},{67.74,107.75},{98.19,159.80},
{17.62,54.87},{85.72,146.11},{23.67,53.85},{49.02,101.01},
{93.66,161.56},{44.72,86.99},{72.81,113.39},{60.91,112.51},
{24.17,61.50},{49.89,89.80},{ 8.97,45.83},{26.67,59.28},
{62.50,111.35},{11.07,25.58},{37.01,63.82},{18.94,46.54},
{61.63,108.22},{28.93,50.01},{55.36,99.90},{92.64,173.42},
{28.57,52.10},{ 9.61,30.76},{19.82,52.09},{47.92,90.78},
{28.85,70.65},{33.80,38.50},{29.53,66.71},{42.50,89.01},
{34.95,92.60},{83.24,150.06},{94.97,158.22},{63.79,123.66},
{94.60,157.04},{79.72,136.29},{63.38,116.51},{16.22,41.53},
{40.06,65.75},{54.36,89.16},{65.52,130.07},{19.95,52.07},
{78.01,121.11},{32.30,71.77},{84.85,139.15},{50.25,98.61},
{72.77,124.69},{59.41,100.91},{89.09,168.89},{76.82,142.52},
{26.18,56.42},{10.95,52.42},{62.40,111.33},{62.71,102.03},
{ 2.35,13.42},{ 7.19,41.90},{62.53,123.58},{15.54,52.27},
{ 0.80,20.28},{ 5.30,26.03},{13.01,57.51},{19.16,35.04},
{59.74,133.37},{33.93,83.73},{ 4.54,17.74},{18.61,48.23},
{72.71,133.09},{51.18,90.42},{51.26,104.57},{58.02,83.55},
{68.78,148.00},{29.56,70.05},{10.44,22.10},{90.96,154.32},
{13.82,28.56},{88.00,150.89},{51.69,92.07},{54.30,90.90},
{57.44,101.77},{80.12,149.10},{10.78,39.64},{11.95,64.56},
{97.71,178.99},{37.08,69.88},{ 0.47,17.14},{65.62,109.62},
{99.78,179.04},{81.77,158.12},{11.90,33.54},{85.24,151.30},
{49.38,78.83},{63.79,114.06},{32.79,57.50},{31.96,88.52},
{84.21,144.62},{49.77,102.25},{49.55,88.18},{ 8.26,38.38},
{47.36,90.74},{97.88,170.49},{44.11,78.79},{38.23,88.99},
{40.69,69.98},{ 7.36,36.33},{56.85,111.52},{13.64,36.87},
{53.35,116.88},{72.47,124.76},{46.64,107.91},{94.73,151.85},
{66.61,117.51},{12.35,61.26},{79.14,159.92},{79.02,140.10},
{45.42,94.75},{16.54,29.85},{91.82,153.33},{28.86,55.11},
{50.75,79.96},{10.11,36.95},{32.04,68.39},{71.95,131.62},
{57.09,109.51},{10.91,27.38},{62.49,115.05},{20.30,46.80},
{93.11,159.57},{71.85,130.18},{53.28,108.81},{55.38,111.22},
{85.54,151.96},{22.64,52.50},{56.67,98.37},{ 0.97,21.47},
{72.50,138.03},{26.98,45.21},{96.25,167.19},{16.31,40.83},
{58.79,87.57},{47.38,89.11},{90.04,157.08},{32.23,62.24},
{11.57,34.79},{23.99,51.20},{64.23,105.49},{72.15,107.38},
{37.45,77.37},{73.55,128.18},{36.90,78.88},{45.26,95.74},
{37.99,74.96},{63.67,123.99},{68.51,129.23},{13.85,35.04},
{59.04,93.67},{54.42,102.56},{89.89,148.97},{76.40,139.33},
{15.26,37.71},{61.79,114.18},{31.03,61.43},{96.81,157.36},
{41.43,93.08},{59.88,107.00},{75.72,122.81},{47.51,113.65},
{39.71,81.28},{73.15,145.81},{13.27,27.44},{73.94,130.58},
{48.11,95.15},{91.97,147.09},{29.24,56.59},{88.10,143.34},
{83.07,136.67},{ 1.60,25.57},{83.37,132.98},{32.81,81.72},
{32.76,61.42},{26.69,62.44},{34.24,70.93},{75.68,125.27},
{96.68,165.04},{95.66,168.80},{79.86,144.53},{74.34,121.30},
{57.43,94.75},{56.67,79.08},{54.07,88.83},{99.94,171.14},
{66.96,110.58},{77.27,141.31},{68.77,120.16},{27.42,77.06},
{ 3.47,33.83},{22.31,49.66},{56.78,101.75},{96.06,157.01},
{ 1.29,25.47},{ 2.97,42.15},{66.51,105.60},{37.81,72.23},
{ 3.07,33.29},{37.37,92.70},{ 7.52,32.65},{43.43,75.38},
{63.53,120.10},{55.30,106.01},{65.04,118.04},{ 5.91,21.90},
{65.28,121.06},{29.55,51.16},{41.39,88.10},{35.63,81.24},
{86.27,136.99},{15.92,72.35},{75.93,120.09},{91.92,160.74},
{97.55,169.39},{70.19,117.49},{16.28,38.79},{44.36,81.43},
{87.91,149.02},{ 3.52,38.16},{59.12,120.72},{ 1.90, 0.73},
{83.31,156.42},{44.25,74.81},{36.88,57.45},{80.37,157.35},
{66.99,138.80},{79.54,145.55},{18.33,45.70},{64.15,122.52},
{34.89,69.76},{46.89,93.34},{14.47,48.95},{ 4.47,11.21},
{42.32,86.99},{31.84,63.03},{33.34,81.26},{ 4.88,25.36},
{79.82,133.64},{40.63,100.56},{63.46,121.03},{96.80,151.04},
{92.72,156.50},{90.13,156.67},{87.25,150.80},{63.02,122.96},
{17.30,47.83},{24.10,53.74},{55.24,105.56},{49.54,106.29},
{50.18,92.64},{28.50,73.07},{75.82,141.86},{43.76,88.26},
{33.55,61.23},{66.59,98.81},{25.78,64.50},{ 5.19,31.93},
{32.05,72.33},{61.50,119.08},{39.73,91.92},{80.39,146.69},
{73.53,149.32},{40.57,62.81},{91.25,166.56},{63.33,112.85},
{ 1.32,13.80},{87.01,143.92},{84.90,132.20},{36.73,88.35},
{81.82,127.95},{77.33,143.68},{ 4.44,17.14},{71.90,134.73},
{59.09,106.07},{83.32,145.03},{56.43,87.15},{55.72,118.37},
{35.02,93.87},{76.13,111.18},{43.98,75.47},{92.99,165.88},
{31.66,59.37},{28.52,59.74},{82.09,144.05},{26.09,49.24},
{70.97,117.20},{ 7.68,37.90},{70.42,123.06},{40.47,82.04},
{73.52,133.29},{21.29,62.15},{74.56,121.04},{76.26,137.21},
{10.29,56.09},{28.54,78.38},{21.19,63.67},{40.37,88.01},
{ 9.97,60.42},{59.83,106.32},{36.88,81.58},{64.00,122.44},
{44.79,60.82},{25.61,52.42},{32.59,72.08},{65.16,118.02},
{13.14,39.55},{75.40,123.94},{45.15,97.24},{53.90,113.09},
{75.55,129.32},{ 0.43,21.46},{52.76,92.05},{90.01,148.61},
{26.95,57.55},{30.46,68.83},{39.15,81.42},{58.32,98.73},
{70.37,115.08},{ 5.94,21.53},{ 3.43,33.83},{32.38,68.35},
{59.53,111.46},{37.94,108.20},{24.71,63.30},{96.93,166.78},
{87.47,146.91},{33.94,100.63},{76.73,141.16},{31.78,71.95},
{85.03,155.23},{ 2.52,39.44},{44.84,95.65},{77.68,131.95},
{41.72,86.46},{18.32,57.93},{69.89,120.19},{54.70,86.01},
{54.99,104.64},{48.59,95.15},{24.36,53.97},{51.98,96.80},
{60.23,100.55},{59.09,85.63},{33.81,67.74},{12.22,41.13},
{26.38,65.33},{ 7.09,30.43},{24.85,50.55},{99.52,170.23},
{84.73,129.42},{39.71,92.69},{57.91,105.37},{33.52,75.23},
{33.93,65.91},{27.34,52.79},{58.75,104.12},{60.52,110.72},
{ 2.81,12.48},{ 8.02,27.71},{64.73,120.96},{82.03,159.82},
{22.60,38.52},{24.08,61.92},{66.05,102.86},{19.42,49.76},
{48.04,97.54},{46.20,96.45},{ 1.17,17.39},{63.69,129.79},
{29.84,75.40},{26.53,45.12},{95.19,149.02},{90.77,157.73},
{41.81,86.87},{74.43,110.80},{49.39,97.73},{22.62,49.26},
{ 4.87,18.08},{19.41,58.94},{42.62,107.88},{77.24,159.90},
{80.67,133.41},{44.37,89.30},{51.39,91.86},{25.27,57.14},
{10.84,16.20},{99.73,182.30},{85.08,167.49},{16.49,38.24},
{48.48,98.37},{30.56,50.30},{45.38,97.80},{33.13,73.18},
{39.58,86.47},{56.27,115.05},{18.85,48.41},{51.63,99.71},
{ 7.00,29.08},{32.17,71.87},{44.00,94.70},{ 3.73,38.62},
{72.17,111.87},{29.35,54.28},{50.13,94.46},{91.52,170.01},
{40.05,72.34},{46.87,67.83},{76.24,138.98},{26.75,63.90},
{63.87,105.49},{13.12,23.17},{12.58,53.66},{ 8.20,43.82},
{14.36,32.76},{32.84,51.21},{11.45,24.07},{93.59,140.71},
{58.09,85.90},{52.69,102.77},{38.38,85.50},{98.36,158.74},
{74.87,125.72},{32.47,73.67},{55.48,122.80},{42.12,87.03},
{75.24,144.54},{71.66,134.49},{34.01,66.08},{58.69,105.94},
{35.47,72.45},{51.46,100.28},{87.79,150.58},{10.86,27.33},
{68.38,133.79},{38.57,86.54},{64.01,109.90},{17.09,63.00},
{ 9.34,35.52},{66.20,127.61},{22.82,52.08},{79.23,148.39},
{19.50,45.48},{ 4.76,14.25},{ 0.11,24.33},{55.86,91.16},
{43.58,90.07},{14.59,50.39},{39.88,99.03},{41.04,85.30},
{87.44,169.74},{55.54,98.60},{ 2.07, 1.75},{29.04,64.38},
{41.45,92.95},{73.41,124.41},{78.49,152.32},{33.64,87.75},
{67.48,139.43},{87.13,144.84},{59.65,100.97},{45.11,87.31},
{76.40,139.82},{62.21,124.75},{78.60,163.67},{20.57,49.21},
{80.06,138.88},{60.51,108.48},{ 2.05,29.92},{11.23,23.36},
{10.61,39.17},{30.63,63.71},{ 5.13,41.33},{74.37,123.26},
{14.03,38.39},{ 6.31,36.58},{ 9.16,36.90},{75.16,138.63},
{88.12,149.50},{ 1.78,31.54},{28.88,64.20},{79.20,136.08},
{27.98,48.89},{89.12,158.04},{ 9.51,11.76},{10.45,40.24},
{22.73,61.87},{73.97,124.05},{ 7.09,10.69},{11.73,32.78},
{90.67,166.68},{88.17,167.73},{97.82,164.53},{63.81,103.31},
{74.11,137.22},{71.03,119.75},{43.78,85.30},{84.66,148.37},
{12.33,30.33},{83.29,138.56},{21.34,71.07},{40.14,68.00},
{73.05,119.85},{ 7.44,29.55},{89.02,151.86},{17.24,61.99},
{41.66,73.47},{50.62,99.48},{60.53,111.85},{12.70,17.62},
{66.84,110.12},{52.27,89.56},{98.72,178.46},{79.92,113.48},
{23.55,43.25},{38.26,96.94},{56.52,118.31},{53.04,96.75},
{35.73,72.29},{60.43,109.43},{77.67,137.73},{45.78,98.97},
{32.36,67.11},{23.89,68.74},{24.53,45.00},{97.28,162.74},
{27.73,50.67},{90.85,165.35},{93.94,153.83},{ 6.63,43.74},
{93.38,150.59},{43.87,77.99},{49.91,86.07},{82.99,151.00},
{ 7.00,40.39},{46.17,89.39},{28.87,66.05},{72.85,141.73},
{27.21,58.82},{42.02,79.42},{95.29,149.89},{ 7.03,21.47},
{80.55,133.93},{75.29,147.77},{32.44,69.31},{29.14,61.10},
{94.21,157.98},{48.51,115.01},{ 9.76,32.67},{ 6.69,20.71},
{14.30,44.18},{98.57,173.85},{ 4.01,24.74},{34.46,60.56},
{19.21,46.64},{89.60,166.71},{27.93,53.40},{22.10,65.12},
{20.30,42.75},{95.02,166.30},{76.91,138.66},{ 0.28,32.32},
{62.29,108.93},{18.53,44.52},{58.50,118.40},{79.87,133.47},
{ 1.06,31.67},{43.28,75.77},{34.13,84.84},{71.34,142.31},
{94.14,172.56},{18.77,37.09},{ 3.58,15.15},{34.71,49.88},
{15.87,25.31},{40.55,70.94},{63.57,116.94},{33.01,78.49},
{12.21,36.69},{83.80,139.29},{15.41,38.32},{23.53,70.10},
{19.25,53.57},{32.17,40.06},{80.00,133.35},{15.29,51.71},
{43.63,81.51},{70.07,126.99},{44.69,85.99},{89.03,158.09},
{36.23,60.18},{ 2.37, 1.33},{28.27,71.44},{37.81,80.29},
{74.61,114.15},{32.45,63.47},{76.90,145.43},{45.78,89.56},
{43.76,90.34},{72.40,121.11},{80.03,158.07},{89.76,159.97},
{ 0.79,30.07},{74.50,132.38},{46.19,76.00},{98.40,166.43},
{83.71,152.87},{69.45,138.18},{20.09,57.62},{10.82,44.42},
{94.90,161.52},{56.24,105.19},{25.80,45.99},{78.59,144.32},
{41.90,95.14},{88.38,158.28},{72.22,136.40},{98.04,151.63},
{ 3.44,35.78},{18.58,59.71},{58.74,112.02},{43.90,84.81},
{59.96,131.25},{55.08,113.52},{11.76,36.25},{75.05,134.27},
{18.62,45.25},{49.76,101.82},{80.57,154.63},{93.50,167.65},
{70.39,126.65},{53.57,107.27},{36.88,59.79},{10.52,25.86},
{64.89,100.31},{35.21,90.41},{ 6.23,33.90},{93.30,143.70},
{63.45,129.25},{10.07,36.79},{28.01,58.59},{59.22,100.12},
{46.14,75.11},{51.65,78.56},{42.40,66.31},{99.08,164.34},
{ 8.14,35.13},{61.88,118.50},{39.24,88.28},{37.84,82.29},
{77.53,154.65},{ 3.52,12.20},{94.10,150.41},{52.95,90.29},
{33.45,63.79},{59.77,97.17},{37.34,66.25},{62.51,101.43},
{58.38,123.37},{85.57,146.57},{59.50,110.36},{64.77,113.77},
{52.31,86.72},{74.08,119.62},{20.13,55.40},{70.01,137.11},
{73.03,141.72},{72.90,116.95},{ 9.77,18.64},{77.91,120.62},
{35.13,81.81},{94.76,163.60},{84.97,153.65},{50.99,97.73},
{76.95,139.73},{95.14,165.88},{53.85,91.54},{11.67,32.28},
{74.95,128.36},{62.48,122.55},{52.39,104.02},{84.64,137.02},
{60.79,90.69},{10.88,42.09},{89.36,155.24},{42.14,99.07},
{10.47,24.63},{81.53,125.43},{83.23,156.18},{21.79,42.60},
{22.12,42.96},{84.10,145.52},{ 7.28,19.37},{45.70,87.18},
{68.93,116.49},{44.33,92.72},{83.48,164.04},{36.29,59.75},
{56.87,105.36},{10.77,32.58},{37.26,72.49},{81.52,151.25},
{20.22,51.77},{ 0.53,13.54},{70.22,141.70},{86.98,153.36},
{86.88,155.08},{95.61,163.24},{10.92,46.94},{52.02,86.13},
{79.54,145.77},{45.72,80.50},{23.64,54.82},{40.59,76.38},
{10.51,24.25},{88.39,154.46},{96.15,153.89},{52.43,104.17},
{56.14,93.00},{14.86,52.67},{17.22,45.09},{65.58,106.79},
{37.27,49.60},{21.86,54.55},{30.77,65.57},{18.91,46.54},
{99.20,188.44},{64.15,127.79},{53.69,114.35},{80.75,129.07},
{20.46,42.99},{43.95,89.80},{11.86,34.56},{76.24,137.26},
{60.32,123.89},{13.10,47.27},{ 3.21,27.37},{56.46,123.20},
{28.08,60.38},{62.73,112.94},{56.62,118.19},{ 7.11,21.06},
{35.00,74.47},{99.39,182.65},{31.10,63.43},{18.34,55.60},
{63.21,119.43},{96.73,152.88},{85.87,131.41},{85.13,150.34},
{58.50,106.92},{ 9.39,25.13},{32.07,64.76},{70.15,104.89},
{85.64,126.01},{ 5.71,31.30},{10.14,34.51},{55.14,97.21},
{40.93,71.15},{91.84,166.86},{11.77,33.90},{58.69,95.90},
{32.25,88.75},{79.19,149.50},{38.70,81.86},{23.71,55.47},
{58.19,95.57},{60.07,101.54},{20.08,56.31},{ 5.15,21.22},
{63.36,118.68},{58.66,97.64},{99.72,167.67},{55.95,108.87},
{83.51,155.14},{20.52,56.46},{62.20,126.56},{62.36,108.09},
{25.79,51.49},{10.73,31.13},{40.02,89.61},{ 0.96,19.08}
};
double residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
__device__ double d_residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
hipError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=hipMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_dm returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"hipMalloc on d_dc returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"hipMalloc on d_error_sum_arr returned %d %s\n",error, //371
hipGetErrorString(error));
exit(1);
}
error=hipMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"hipMalloc on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = hipMemcpy(d_dm,dm,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_dm returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_dc,dc,(sizeof(double)*8), hipMemcpyHostToDevice);
if(error){
fprintf(stderr,"hipMemcpy to d_dc returned %d %s\n",error,
hipGetErrorString(error));
}
error = hipMemcpy(d_data, data,sizeof(data), hipMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"hipMemcpy to d_data returned %d %s\n",error,
hipGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
hipLaunchKernelGGL((
d_rms_error) , dim3(100),dim3(10), 0, 0, &d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
hipDeviceSynchronize();
error =hipMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
hipMemcpyDeviceToHost);
if(error){
fprintf(stderr,"hipMemcpy to error_sum returned %d %s\n",error,
hipGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = hipFree(d_dm);
if(error){
fprintf(stderr,"hipFree on d_dm returned %d %s\n",error,
hipGetErrorString(error)); //453
exit(1);
}
error = hipFree(d_dc);
if(error){
fprintf(stderr,"hipFree on d_dc returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_data);
if(error){
fprintf(stderr,"hipFree on d_data returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
error = hipFree(d_error_sum_arr);
if(error){
fprintf(stderr,"hipFree on d_error_sum_arr returned %d %s\n",error,
hipGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
| 22c9d812adcf50e4b468b958d63259b9f1697b14.cu | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o linearcuda_bishal linear_bishal_cuda.cu -lm
*
* To run:
* ./linearcuda_bishal
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t{
double x;
double y;
}point_t;
int n_data = 1000;
__device__ int d_n_data =1000;
point_t data[] = {
{67.50,117.63},{65.33,126.07},{82.95,145.73},{76.19,113.32},
{87.53,145.91},{73.30,132.50},{76.57,134.90},{68.72,115.55},
{73.32,140.31},{78.84,143.44},{71.68,120.91},{92.42,138.04},
{76.05,128.45},{34.36,75.04},{91.02,154.71},{98.33,178.86},
{75.34,134.84},{84.94,143.54},{34.55,73.14},{22.78,36.84},
{11.34,37.95},{77.02,121.28},{20.48,54.11},{85.81,158.12},
{98.14,169.75},{61.93,126.98},{44.68,77.35},{38.15,83.43},
{30.16,67.27},{76.68,133.30},{86.62,141.45},{60.09,116.95},
{26.91,70.86},{ 9.10,37.19},{11.23,36.15},{26.50,59.09},
{41.44,93.35},{67.47,131.65},{89.46,161.35},{19.81,38.73},
{51.34,93.22},{97.28,174.58},{33.38,68.25},{19.98,40.87},
{44.04,85.03},{70.68,125.28},{84.78,128.46},{63.73,98.19},
{16.48,31.39},{91.67,169.18},{13.59,31.83},{69.96,133.01},
{15.59,53.61},{53.87,116.25},{57.95,119.53},{88.77,156.53},
{ 4.54,19.19},{60.18,111.69},{76.51,143.61},{21.62,54.08},
{53.82,107.72},{28.55,79.61},{51.32,89.52},{60.46,135.12},
{68.14,124.70},{13.20,32.38},{94.61,160.20},{57.63,99.17},
{80.81,143.84},{92.81,143.31},{91.60,164.46},{13.32,40.64},
{93.33,174.11},{50.18,102.21},{ 0.11,28.39},{69.56,119.09},
{55.02,110.04},{74.47,146.21},{91.14,163.71},{65.21,125.47},
{58.83,100.80},{10.02,46.51},{94.36,158.71},{51.22,103.94},
{ 9.25,34.11},{12.64,44.32},{50.24,104.77},{15.73,37.63},
{53.03,101.19},{30.42,72.53},{47.90,90.79},{25.89,68.64},
{75.24,126.35},{80.87,133.87},{ 5.86,31.16},{62.88,112.89},
{81.94,145.67},{51.81,88.24},{82.74,122.52},{80.97,139.14},
{ 0.28, 1.97},{62.20,111.18},{55.95,100.36},{29.17,61.74},
{71.13,120.95},{11.42,42.50},{38.60,70.96},{47.24,88.20},
{ 4.25,26.16},{13.53,50.16},{23.30,64.38},{96.18,162.57},
{97.45,167.05},{86.09,139.37},{19.61,40.67},{75.10,137.54},
{61.73,124.08},{ 7.59,27.78},{ 5.53,13.80},{59.76,116.05},
{19.08,55.39},{41.68,74.96},{16.33,42.42},{96.25,161.59},
{69.83,121.89},{ 5.65,37.87},{42.46,86.94},{79.37,151.11},
{48.34,97.43},{57.96,111.54},{22.31,63.95},{ 6.03,14.45},
{38.59,72.82},{91.91,166.06},{77.34,149.68},{20.95,49.40},
{18.24,44.16},{46.33,85.77},{87.69,162.06},{ 5.63,33.09},
{25.64,62.39},{78.37,129.15},{90.63,162.90},{59.07,108.46},
{ 3.73, 9.62},{73.31,127.30},{85.44,148.06},{62.86,111.04},
{27.61,66.72},{97.81,162.18},{76.86,141.77},{65.90,142.09},
{89.34,144.60},{ 9.42,48.00},{51.54,104.84},{11.47,42.53},
{42.31,79.72},{62.70,111.95},{15.81,44.71},{51.03,101.97},
{50.54,98.74},{84.62,138.28},{95.25,169.94},{ 3.97,31.21},
{10.63, 9.82},{ 1.47,32.91},{67.16,129.22},{10.14,26.05},
{52.42,103.57},{41.93,90.91},{96.18,166.50},{ 0.25,16.19},
{20.73,49.87},{34.86,70.58},{39.49,83.99},{93.26,153.09},
{89.43,147.70},{46.72,90.16},{30.27,50.94},{ 7.73,40.77},
{47.24,89.70},{60.71,110.70},{10.25,35.87},{87.93,176.16},
{81.83,132.92},{47.52,95.67},{ 8.22,30.97},{ 0.16,19.43},
{ 7.67,39.19},{25.22,46.59},{37.39,94.24},{23.87,54.68},
{53.00,94.78},{55.12,113.11},{ 0.39,17.41},{12.25,42.86},
{24.12,67.60},{40.49,92.29},{52.77,87.06},{12.23,46.57},
{67.85,125.89},{42.67,89.64},{34.42,61.02},{ 1.94,18.44},
{53.40,111.16},{89.61,164.56},{ 3.82, 3.73},{96.24,158.21},
{77.04,135.04},{97.05,148.24},{26.71,51.44},{95.02,163.28},
{34.29,61.81},{ 1.62,21.43},{67.74,107.75},{98.19,159.80},
{17.62,54.87},{85.72,146.11},{23.67,53.85},{49.02,101.01},
{93.66,161.56},{44.72,86.99},{72.81,113.39},{60.91,112.51},
{24.17,61.50},{49.89,89.80},{ 8.97,45.83},{26.67,59.28},
{62.50,111.35},{11.07,25.58},{37.01,63.82},{18.94,46.54},
{61.63,108.22},{28.93,50.01},{55.36,99.90},{92.64,173.42},
{28.57,52.10},{ 9.61,30.76},{19.82,52.09},{47.92,90.78},
{28.85,70.65},{33.80,38.50},{29.53,66.71},{42.50,89.01},
{34.95,92.60},{83.24,150.06},{94.97,158.22},{63.79,123.66},
{94.60,157.04},{79.72,136.29},{63.38,116.51},{16.22,41.53},
{40.06,65.75},{54.36,89.16},{65.52,130.07},{19.95,52.07},
{78.01,121.11},{32.30,71.77},{84.85,139.15},{50.25,98.61},
{72.77,124.69},{59.41,100.91},{89.09,168.89},{76.82,142.52},
{26.18,56.42},{10.95,52.42},{62.40,111.33},{62.71,102.03},
{ 2.35,13.42},{ 7.19,41.90},{62.53,123.58},{15.54,52.27},
{ 0.80,20.28},{ 5.30,26.03},{13.01,57.51},{19.16,35.04},
{59.74,133.37},{33.93,83.73},{ 4.54,17.74},{18.61,48.23},
{72.71,133.09},{51.18,90.42},{51.26,104.57},{58.02,83.55},
{68.78,148.00},{29.56,70.05},{10.44,22.10},{90.96,154.32},
{13.82,28.56},{88.00,150.89},{51.69,92.07},{54.30,90.90},
{57.44,101.77},{80.12,149.10},{10.78,39.64},{11.95,64.56},
{97.71,178.99},{37.08,69.88},{ 0.47,17.14},{65.62,109.62},
{99.78,179.04},{81.77,158.12},{11.90,33.54},{85.24,151.30},
{49.38,78.83},{63.79,114.06},{32.79,57.50},{31.96,88.52},
{84.21,144.62},{49.77,102.25},{49.55,88.18},{ 8.26,38.38},
{47.36,90.74},{97.88,170.49},{44.11,78.79},{38.23,88.99},
{40.69,69.98},{ 7.36,36.33},{56.85,111.52},{13.64,36.87},
{53.35,116.88},{72.47,124.76},{46.64,107.91},{94.73,151.85},
{66.61,117.51},{12.35,61.26},{79.14,159.92},{79.02,140.10},
{45.42,94.75},{16.54,29.85},{91.82,153.33},{28.86,55.11},
{50.75,79.96},{10.11,36.95},{32.04,68.39},{71.95,131.62},
{57.09,109.51},{10.91,27.38},{62.49,115.05},{20.30,46.80},
{93.11,159.57},{71.85,130.18},{53.28,108.81},{55.38,111.22},
{85.54,151.96},{22.64,52.50},{56.67,98.37},{ 0.97,21.47},
{72.50,138.03},{26.98,45.21},{96.25,167.19},{16.31,40.83},
{58.79,87.57},{47.38,89.11},{90.04,157.08},{32.23,62.24},
{11.57,34.79},{23.99,51.20},{64.23,105.49},{72.15,107.38},
{37.45,77.37},{73.55,128.18},{36.90,78.88},{45.26,95.74},
{37.99,74.96},{63.67,123.99},{68.51,129.23},{13.85,35.04},
{59.04,93.67},{54.42,102.56},{89.89,148.97},{76.40,139.33},
{15.26,37.71},{61.79,114.18},{31.03,61.43},{96.81,157.36},
{41.43,93.08},{59.88,107.00},{75.72,122.81},{47.51,113.65},
{39.71,81.28},{73.15,145.81},{13.27,27.44},{73.94,130.58},
{48.11,95.15},{91.97,147.09},{29.24,56.59},{88.10,143.34},
{83.07,136.67},{ 1.60,25.57},{83.37,132.98},{32.81,81.72},
{32.76,61.42},{26.69,62.44},{34.24,70.93},{75.68,125.27},
{96.68,165.04},{95.66,168.80},{79.86,144.53},{74.34,121.30},
{57.43,94.75},{56.67,79.08},{54.07,88.83},{99.94,171.14},
{66.96,110.58},{77.27,141.31},{68.77,120.16},{27.42,77.06},
{ 3.47,33.83},{22.31,49.66},{56.78,101.75},{96.06,157.01},
{ 1.29,25.47},{ 2.97,42.15},{66.51,105.60},{37.81,72.23},
{ 3.07,33.29},{37.37,92.70},{ 7.52,32.65},{43.43,75.38},
{63.53,120.10},{55.30,106.01},{65.04,118.04},{ 5.91,21.90},
{65.28,121.06},{29.55,51.16},{41.39,88.10},{35.63,81.24},
{86.27,136.99},{15.92,72.35},{75.93,120.09},{91.92,160.74},
{97.55,169.39},{70.19,117.49},{16.28,38.79},{44.36,81.43},
{87.91,149.02},{ 3.52,38.16},{59.12,120.72},{ 1.90, 0.73},
{83.31,156.42},{44.25,74.81},{36.88,57.45},{80.37,157.35},
{66.99,138.80},{79.54,145.55},{18.33,45.70},{64.15,122.52},
{34.89,69.76},{46.89,93.34},{14.47,48.95},{ 4.47,11.21},
{42.32,86.99},{31.84,63.03},{33.34,81.26},{ 4.88,25.36},
{79.82,133.64},{40.63,100.56},{63.46,121.03},{96.80,151.04},
{92.72,156.50},{90.13,156.67},{87.25,150.80},{63.02,122.96},
{17.30,47.83},{24.10,53.74},{55.24,105.56},{49.54,106.29},
{50.18,92.64},{28.50,73.07},{75.82,141.86},{43.76,88.26},
{33.55,61.23},{66.59,98.81},{25.78,64.50},{ 5.19,31.93},
{32.05,72.33},{61.50,119.08},{39.73,91.92},{80.39,146.69},
{73.53,149.32},{40.57,62.81},{91.25,166.56},{63.33,112.85},
{ 1.32,13.80},{87.01,143.92},{84.90,132.20},{36.73,88.35},
{81.82,127.95},{77.33,143.68},{ 4.44,17.14},{71.90,134.73},
{59.09,106.07},{83.32,145.03},{56.43,87.15},{55.72,118.37},
{35.02,93.87},{76.13,111.18},{43.98,75.47},{92.99,165.88},
{31.66,59.37},{28.52,59.74},{82.09,144.05},{26.09,49.24},
{70.97,117.20},{ 7.68,37.90},{70.42,123.06},{40.47,82.04},
{73.52,133.29},{21.29,62.15},{74.56,121.04},{76.26,137.21},
{10.29,56.09},{28.54,78.38},{21.19,63.67},{40.37,88.01},
{ 9.97,60.42},{59.83,106.32},{36.88,81.58},{64.00,122.44},
{44.79,60.82},{25.61,52.42},{32.59,72.08},{65.16,118.02},
{13.14,39.55},{75.40,123.94},{45.15,97.24},{53.90,113.09},
{75.55,129.32},{ 0.43,21.46},{52.76,92.05},{90.01,148.61},
{26.95,57.55},{30.46,68.83},{39.15,81.42},{58.32,98.73},
{70.37,115.08},{ 5.94,21.53},{ 3.43,33.83},{32.38,68.35},
{59.53,111.46},{37.94,108.20},{24.71,63.30},{96.93,166.78},
{87.47,146.91},{33.94,100.63},{76.73,141.16},{31.78,71.95},
{85.03,155.23},{ 2.52,39.44},{44.84,95.65},{77.68,131.95},
{41.72,86.46},{18.32,57.93},{69.89,120.19},{54.70,86.01},
{54.99,104.64},{48.59,95.15},{24.36,53.97},{51.98,96.80},
{60.23,100.55},{59.09,85.63},{33.81,67.74},{12.22,41.13},
{26.38,65.33},{ 7.09,30.43},{24.85,50.55},{99.52,170.23},
{84.73,129.42},{39.71,92.69},{57.91,105.37},{33.52,75.23},
{33.93,65.91},{27.34,52.79},{58.75,104.12},{60.52,110.72},
{ 2.81,12.48},{ 8.02,27.71},{64.73,120.96},{82.03,159.82},
{22.60,38.52},{24.08,61.92},{66.05,102.86},{19.42,49.76},
{48.04,97.54},{46.20,96.45},{ 1.17,17.39},{63.69,129.79},
{29.84,75.40},{26.53,45.12},{95.19,149.02},{90.77,157.73},
{41.81,86.87},{74.43,110.80},{49.39,97.73},{22.62,49.26},
{ 4.87,18.08},{19.41,58.94},{42.62,107.88},{77.24,159.90},
{80.67,133.41},{44.37,89.30},{51.39,91.86},{25.27,57.14},
{10.84,16.20},{99.73,182.30},{85.08,167.49},{16.49,38.24},
{48.48,98.37},{30.56,50.30},{45.38,97.80},{33.13,73.18},
{39.58,86.47},{56.27,115.05},{18.85,48.41},{51.63,99.71},
{ 7.00,29.08},{32.17,71.87},{44.00,94.70},{ 3.73,38.62},
{72.17,111.87},{29.35,54.28},{50.13,94.46},{91.52,170.01},
{40.05,72.34},{46.87,67.83},{76.24,138.98},{26.75,63.90},
{63.87,105.49},{13.12,23.17},{12.58,53.66},{ 8.20,43.82},
{14.36,32.76},{32.84,51.21},{11.45,24.07},{93.59,140.71},
{58.09,85.90},{52.69,102.77},{38.38,85.50},{98.36,158.74},
{74.87,125.72},{32.47,73.67},{55.48,122.80},{42.12,87.03},
{75.24,144.54},{71.66,134.49},{34.01,66.08},{58.69,105.94},
{35.47,72.45},{51.46,100.28},{87.79,150.58},{10.86,27.33},
{68.38,133.79},{38.57,86.54},{64.01,109.90},{17.09,63.00},
{ 9.34,35.52},{66.20,127.61},{22.82,52.08},{79.23,148.39},
{19.50,45.48},{ 4.76,14.25},{ 0.11,24.33},{55.86,91.16},
{43.58,90.07},{14.59,50.39},{39.88,99.03},{41.04,85.30},
{87.44,169.74},{55.54,98.60},{ 2.07, 1.75},{29.04,64.38},
{41.45,92.95},{73.41,124.41},{78.49,152.32},{33.64,87.75},
{67.48,139.43},{87.13,144.84},{59.65,100.97},{45.11,87.31},
{76.40,139.82},{62.21,124.75},{78.60,163.67},{20.57,49.21},
{80.06,138.88},{60.51,108.48},{ 2.05,29.92},{11.23,23.36},
{10.61,39.17},{30.63,63.71},{ 5.13,41.33},{74.37,123.26},
{14.03,38.39},{ 6.31,36.58},{ 9.16,36.90},{75.16,138.63},
{88.12,149.50},{ 1.78,31.54},{28.88,64.20},{79.20,136.08},
{27.98,48.89},{89.12,158.04},{ 9.51,11.76},{10.45,40.24},
{22.73,61.87},{73.97,124.05},{ 7.09,10.69},{11.73,32.78},
{90.67,166.68},{88.17,167.73},{97.82,164.53},{63.81,103.31},
{74.11,137.22},{71.03,119.75},{43.78,85.30},{84.66,148.37},
{12.33,30.33},{83.29,138.56},{21.34,71.07},{40.14,68.00},
{73.05,119.85},{ 7.44,29.55},{89.02,151.86},{17.24,61.99},
{41.66,73.47},{50.62,99.48},{60.53,111.85},{12.70,17.62},
{66.84,110.12},{52.27,89.56},{98.72,178.46},{79.92,113.48},
{23.55,43.25},{38.26,96.94},{56.52,118.31},{53.04,96.75},
{35.73,72.29},{60.43,109.43},{77.67,137.73},{45.78,98.97},
{32.36,67.11},{23.89,68.74},{24.53,45.00},{97.28,162.74},
{27.73,50.67},{90.85,165.35},{93.94,153.83},{ 6.63,43.74},
{93.38,150.59},{43.87,77.99},{49.91,86.07},{82.99,151.00},
{ 7.00,40.39},{46.17,89.39},{28.87,66.05},{72.85,141.73},
{27.21,58.82},{42.02,79.42},{95.29,149.89},{ 7.03,21.47},
{80.55,133.93},{75.29,147.77},{32.44,69.31},{29.14,61.10},
{94.21,157.98},{48.51,115.01},{ 9.76,32.67},{ 6.69,20.71},
{14.30,44.18},{98.57,173.85},{ 4.01,24.74},{34.46,60.56},
{19.21,46.64},{89.60,166.71},{27.93,53.40},{22.10,65.12},
{20.30,42.75},{95.02,166.30},{76.91,138.66},{ 0.28,32.32},
{62.29,108.93},{18.53,44.52},{58.50,118.40},{79.87,133.47},
{ 1.06,31.67},{43.28,75.77},{34.13,84.84},{71.34,142.31},
{94.14,172.56},{18.77,37.09},{ 3.58,15.15},{34.71,49.88},
{15.87,25.31},{40.55,70.94},{63.57,116.94},{33.01,78.49},
{12.21,36.69},{83.80,139.29},{15.41,38.32},{23.53,70.10},
{19.25,53.57},{32.17,40.06},{80.00,133.35},{15.29,51.71},
{43.63,81.51},{70.07,126.99},{44.69,85.99},{89.03,158.09},
{36.23,60.18},{ 2.37, 1.33},{28.27,71.44},{37.81,80.29},
{74.61,114.15},{32.45,63.47},{76.90,145.43},{45.78,89.56},
{43.76,90.34},{72.40,121.11},{80.03,158.07},{89.76,159.97},
{ 0.79,30.07},{74.50,132.38},{46.19,76.00},{98.40,166.43},
{83.71,152.87},{69.45,138.18},{20.09,57.62},{10.82,44.42},
{94.90,161.52},{56.24,105.19},{25.80,45.99},{78.59,144.32},
{41.90,95.14},{88.38,158.28},{72.22,136.40},{98.04,151.63},
{ 3.44,35.78},{18.58,59.71},{58.74,112.02},{43.90,84.81},
{59.96,131.25},{55.08,113.52},{11.76,36.25},{75.05,134.27},
{18.62,45.25},{49.76,101.82},{80.57,154.63},{93.50,167.65},
{70.39,126.65},{53.57,107.27},{36.88,59.79},{10.52,25.86},
{64.89,100.31},{35.21,90.41},{ 6.23,33.90},{93.30,143.70},
{63.45,129.25},{10.07,36.79},{28.01,58.59},{59.22,100.12},
{46.14,75.11},{51.65,78.56},{42.40,66.31},{99.08,164.34},
{ 8.14,35.13},{61.88,118.50},{39.24,88.28},{37.84,82.29},
{77.53,154.65},{ 3.52,12.20},{94.10,150.41},{52.95,90.29},
{33.45,63.79},{59.77,97.17},{37.34,66.25},{62.51,101.43},
{58.38,123.37},{85.57,146.57},{59.50,110.36},{64.77,113.77},
{52.31,86.72},{74.08,119.62},{20.13,55.40},{70.01,137.11},
{73.03,141.72},{72.90,116.95},{ 9.77,18.64},{77.91,120.62},
{35.13,81.81},{94.76,163.60},{84.97,153.65},{50.99,97.73},
{76.95,139.73},{95.14,165.88},{53.85,91.54},{11.67,32.28},
{74.95,128.36},{62.48,122.55},{52.39,104.02},{84.64,137.02},
{60.79,90.69},{10.88,42.09},{89.36,155.24},{42.14,99.07},
{10.47,24.63},{81.53,125.43},{83.23,156.18},{21.79,42.60},
{22.12,42.96},{84.10,145.52},{ 7.28,19.37},{45.70,87.18},
{68.93,116.49},{44.33,92.72},{83.48,164.04},{36.29,59.75},
{56.87,105.36},{10.77,32.58},{37.26,72.49},{81.52,151.25},
{20.22,51.77},{ 0.53,13.54},{70.22,141.70},{86.98,153.36},
{86.88,155.08},{95.61,163.24},{10.92,46.94},{52.02,86.13},
{79.54,145.77},{45.72,80.50},{23.64,54.82},{40.59,76.38},
{10.51,24.25},{88.39,154.46},{96.15,153.89},{52.43,104.17},
{56.14,93.00},{14.86,52.67},{17.22,45.09},{65.58,106.79},
{37.27,49.60},{21.86,54.55},{30.77,65.57},{18.91,46.54},
{99.20,188.44},{64.15,127.79},{53.69,114.35},{80.75,129.07},
{20.46,42.99},{43.95,89.80},{11.86,34.56},{76.24,137.26},
{60.32,123.89},{13.10,47.27},{ 3.21,27.37},{56.46,123.20},
{28.08,60.38},{62.73,112.94},{56.62,118.19},{ 7.11,21.06},
{35.00,74.47},{99.39,182.65},{31.10,63.43},{18.34,55.60},
{63.21,119.43},{96.73,152.88},{85.87,131.41},{85.13,150.34},
{58.50,106.92},{ 9.39,25.13},{32.07,64.76},{70.15,104.89},
{85.64,126.01},{ 5.71,31.30},{10.14,34.51},{55.14,97.21},
{40.93,71.15},{91.84,166.86},{11.77,33.90},{58.69,95.90},
{32.25,88.75},{79.19,149.50},{38.70,81.86},{23.71,55.47},
{58.19,95.57},{60.07,101.54},{20.08,56.31},{ 5.15,21.22},
{63.36,118.68},{58.66,97.64},{99.72,167.67},{55.95,108.87},
{83.51,155.14},{20.52,56.46},{62.20,126.56},{62.36,108.09},
{25.79,51.49},{10.73,31.13},{40.02,89.61},{ 0.96,19.08}
};
double residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
__device__ double d_residual_error(double r, double a, double m, double c) {
double e = (m * r) + c - a;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c,double *error_sum_arr,point_t *d_data) {
int i = threadIdx.x + blockIdx.x *blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x,d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0){
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main(){
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be= rms_error(bm,bc);
error=cudaMalloc(&d_dm,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dm returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_dc,(sizeof(double) * 8));
if(error){
fprintf(stderr,"cudaMalloc on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_error_sum_arr,(sizeof(double) * 1000));
if(error){
fprintf(stderr,"cudaMalloc on d_error_sum_arr returned %d %s\n",error, //371
cudaGetErrorString(error));
exit(1);
}
error=cudaMalloc(&d_data,sizeof(data)); //376
if(error){
fprintf(stderr,"cudaMalloc on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i]= bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm,dm,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dm returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc,dc,(sizeof(double)*8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr,"cudaMemcpy to d_dc returned %d %s\n",error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data,sizeof(data), cudaMemcpyHostToDevice); //401
if(error){
fprintf(stderr,"cudaMemcpy to d_data returned %d %s\n",error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++){
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i],&d_dc[i],d_error_sum_arr,d_data);
cudaDeviceSynchronize();
error =cudaMemcpy(&h_error_sum_arr,d_error_sum_arr,(sizeof(double) *1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr,"cudaMemcpy to error_sum returned %d %s\n",error,
cudaGetErrorString(error));
}
for(int j=0;j<n_data;j++){
error_sum_total+= h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] =sqrt(error_sum_mean);
if(e[i] < best_error){
best_error = e[i];
error_sum_total +=h_error_sum_arr[i];
}
error_sum_mean = error_sum_total /n_data;//431
e[i] = sqrt(error_sum_mean); //432
if(e[i]<best_error){ //434
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0; //438
}
if(best_error <be){
be=best_error;
bm =dm[best_error_i];
bc= dc[best_error_i];
}else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr,"cudaFree on d_dm returned %d %s\n",error,
cudaGetErrorString(error)); //453
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr,"cudaFree on d_dc returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr,"cudaFree on d_data returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr,"cudaFree on d_error_sum_arr returned %d %s\n",error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
;
|
df48bce65345c608ec936247badfb2dbb7d31be9.hip | // !!! This is a file automatically generated by hipify!!!
///-------------------------------------------------------------------------------------------------
// file: kmeansraw.cu
//
// summary: kmeans implementation over extents of floats (no underlying point/vector struct)
///-------------------------------------------------------------------------------------------------
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <vector>
#include <set>
#include <map>
#include <string>
#include <hip/hip_runtime.h>
#include <cutil_inline.h>
#include <shrUtils.h>
#include <shrQATest.h>
#include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples
typedef double (*LPFNKMEANS)(const int nSteps,
void * h_Points,
void * h_Centers,
const int nPoints,
const int nCenters,
StopWatchInterface * hTimer,
bool bVerify,
bool bVerbose);
typedef void (*LPFNBNC)(char * szFile,
LPFNKMEANS lpfn,
int nSteps,
int nSeed,
StopWatchInterface * hTimer,
bool bVerify,
bool bVerbose);
#include "kmeansraw.cu.h"
#include "testsuitedecl.h"
// declare_testsuite(4, 16);
// declare_testsuite(4, 32);
// declare_testsuite(4, 64);
// declare_testsuite(4, 128);
// declare_testsuite(4, 256);
// declare_testsuite(4, 512);
//
declare_testsuite(16, 16);
declare_testsuite(16, 32);
declare_testsuite(16, 64);
declare_testsuite(16, 128);
declare_testsuite(16, 256);
declare_testsuite_lg(16, 512);
//
// declare_testsuite(24, 16);
// declare_testsuite(24, 32);
// declare_testsuite(24, 64);
// declare_testsuite(24, 128);
// declare_testsuite_lg(24, 256);
// declare_testsuite_lg(24, 512);
//
// declare_testsuite(32, 16);
// declare_testsuite(32, 32);
// declare_testsuite(32, 64);
// declare_testsuite(32, 128);
// declare_testsuite_lg(32, 256);
// declare_testsuite_lg(32, 512);
//
// declare_testsuite(64, 16);
// declare_testsuite(64, 32);
// declare_testsuite(64, 64);
// declare_testsuite(64, 128);
// declare_testsuite_lg(64, 256);
// declare_testsuite_lg(64, 512);
//
declare_testsuite(128, 16);
declare_testsuite(128, 32);
declare_testsuite(128, 64);
declare_testsuite_lg(128, 128);
declare_testsuite_lg(128, 256);
declare_testsuite_lg(128, 512);
| df48bce65345c608ec936247badfb2dbb7d31be9.cu | ///-------------------------------------------------------------------------------------------------
// file: kmeansraw.cu
//
// summary: kmeans implementation over extents of floats (no underlying point/vector struct)
///-------------------------------------------------------------------------------------------------
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <vector>
#include <set>
#include <map>
#include <string>
#include <cuda_runtime.h>
#include <cutil_inline.h>
#include <shrUtils.h>
#include <shrQATest.h>
#include <sdkHelper.h> // helper for shared that are common to CUDA SDK samples
typedef double (*LPFNKMEANS)(const int nSteps,
void * h_Points,
void * h_Centers,
const int nPoints,
const int nCenters,
StopWatchInterface * hTimer,
bool bVerify,
bool bVerbose);
typedef void (*LPFNBNC)(char * szFile,
LPFNKMEANS lpfn,
int nSteps,
int nSeed,
StopWatchInterface * hTimer,
bool bVerify,
bool bVerbose);
#include "kmeansraw.cu.h"
#include "testsuitedecl.h"
// declare_testsuite(4, 16);
// declare_testsuite(4, 32);
// declare_testsuite(4, 64);
// declare_testsuite(4, 128);
// declare_testsuite(4, 256);
// declare_testsuite(4, 512);
//
declare_testsuite(16, 16);
declare_testsuite(16, 32);
declare_testsuite(16, 64);
declare_testsuite(16, 128);
declare_testsuite(16, 256);
declare_testsuite_lg(16, 512);
//
// declare_testsuite(24, 16);
// declare_testsuite(24, 32);
// declare_testsuite(24, 64);
// declare_testsuite(24, 128);
// declare_testsuite_lg(24, 256);
// declare_testsuite_lg(24, 512);
//
// declare_testsuite(32, 16);
// declare_testsuite(32, 32);
// declare_testsuite(32, 64);
// declare_testsuite(32, 128);
// declare_testsuite_lg(32, 256);
// declare_testsuite_lg(32, 512);
//
// declare_testsuite(64, 16);
// declare_testsuite(64, 32);
// declare_testsuite(64, 64);
// declare_testsuite(64, 128);
// declare_testsuite_lg(64, 256);
// declare_testsuite_lg(64, 512);
//
declare_testsuite(128, 16);
declare_testsuite(128, 32);
declare_testsuite(128, 64);
declare_testsuite_lg(128, 128);
declare_testsuite_lg(128, 256);
declare_testsuite_lg(128, 512);
|
f83ad11574c0a4d64ca6f9d9afdac42d260df2de.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <ctime>
#include <math.h>
using namespace std;
void properties(){
hipDeviceProp_t dev;
int dev_cnt = 0;
hipGetDeviceCount(&dev_cnt);
cout << dev_cnt << " dispositivos" << endl;
for(int i = 0; i < dev_cnt; i++){
hipGetDeviceProperties(&dev, i);
cout << "Device: " << i << endl;
cout << "name:" << dev.name << endl;
cout << "Compute capability " << dev.major << "." << dev.minor << endl;
cout << "total global memory(KB): " << dev.totalGlobalMem/1024 << endl;
cout << "shared mem per block: " << dev.sharedMemPerBlock << endl;
cout << "regs per block: " << dev.regsPerBlock << endl;
cout << "warp size: " << dev.warpSize << endl;
cout << "max threads per block: " << dev.maxThreadsPerBlock << endl;
cout << "max thread dim z:" << dev.maxThreadsDim[0] << " y:" << dev.maxThreadsDim[1] << " x:" << dev.maxThreadsDim[2] << endl;
cout << "max grid size z:" << dev.maxGridSize[0] << " y:" << dev.maxGridSize[1] << " x:" << dev.maxGridSize[2] << endl;
cout << "clock rate(KHz):" << dev.clockRate << endl;
cout << "total constant memory (bytes): " << dev.totalConstMem << endl;
cout << "multiprocessor count " << dev.multiProcessorCount << endl;
cout << "integrated: " << dev.integrated << endl;
cout << "async engine count: " << dev.asyncEngineCount << endl;
cout << "memory bus width: " << dev.memoryBusWidth << endl;
cout << "memory clock rate (KHz): " << dev.memoryClockRate << endl;
cout << "L2 cache size (bytes): " << dev.l2CacheSize << endl;
cout << "max threads per SM: " << dev.maxThreadsPerMultiProcessor << endl;
cout << endl;
}
}
void readfile(vector<float> &vec, string filename){
ifstream file;
file.open(filename);
if(!file.is_open()){
cerr << "No se ha podido abrir el archivo " << filename << endl;
exit(-1);
}
for(string line; getline(file, line);){
vec.push_back(stof(line));
}
vec.erase(vec.begin());
file.close();
}
// Kernel function to add the elements of two arrays
__global__
void add(float *x, float *y, float *result, int *size){
int i = threadIdx.x + blockDim.x * blockIdx.x;
float sumaparcial=0;
if(i < (*size)){
result[i] = x[i] + y[i];
sumaparcial = result[i];
for(int j = 1; j < 1000; j++){
sumaparcial = sumaparcial + j;
}
result[i] += sumaparcial;
}
}
int main(void){
clock_t begin, end;
vector<float> vec;
int mem_vec_size, *gpu_vec_size, *mem_pointer_size;
float *memoria_x, *memoria_y;
float *gpu_x, *gpu_y;
float *memoria_result, *gpu_result;
ofstream file;
memoria_x = NULL;
memoria_y = NULL;
gpu_x = NULL;
gpu_y = NULL;
memoria_x = NULL;
gpu_y = NULL;
mem_pointer_size = (int*)malloc(sizeof(int));
// Imprimir caractersticas
properties();
readfile(vec, "data/9/input0.raw");
mem_vec_size = vec.size();
// Reservar memoria para el primer array
memoria_x = (float *)malloc(sizeof(float)*mem_vec_size);
hipMallocManaged(&gpu_x, mem_vec_size*sizeof(float));
for(int i = 0; i < vec.size(); i++){
memoria_x[i] = vec[i];
}
vec.clear();
readfile(vec, "data/9/input1.raw");
mem_vec_size = vec.size();
// Reservar memoria para el segundo array
//hipMallocManaged(&memoria_y, mem_vec_size*sizeof(float));
memoria_y = (float *)malloc(sizeof(float)*mem_vec_size);
hipMallocManaged(&gpu_y, mem_vec_size*sizeof(float));
for(int i = 0; i < vec.size(); i++){
memoria_y[i] = vec[i];
}
// Reservar memoria para el array resultante
memoria_result = (float *)malloc(sizeof(float)*mem_vec_size);
hipMallocManaged(&gpu_result, mem_vec_size*sizeof(float));
// Reservar memoria para el dato del tamao del vector
hipMallocManaged(&gpu_vec_size, sizeof(int));
begin = clock();
*mem_pointer_size = mem_vec_size;
// Copiar los datos en la GPU
hipMemcpy(gpu_x, memoria_x, sizeof(float)*mem_vec_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_y, memoria_y, sizeof(float)*mem_vec_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_result, memoria_result, sizeof(float)*mem_vec_size, hipMemcpyHostToDevice);
hipMemcpy(gpu_vec_size, mem_pointer_size, sizeof(int), hipMemcpyHostToDevice);
// Llamar al kernel
// <<< Nmero de bloques, nmero de hebras >>>
dim3 unBloque(64,1,1);
dim3 bloques((mem_vec_size/64)+1, 1, 1);
hipLaunchKernelGGL(( add), dim3(bloques), dim3(unBloque), 0, 0, gpu_x, gpu_y, gpu_result, gpu_vec_size);
// Esperar a que la GPU termine
hipDeviceSynchronize();
// Copiar los resultados en memoria
hipMemcpy(memoria_result, gpu_result, sizeof(float)*mem_vec_size, hipMemcpyDeviceToHost);
end = clock();
file.open("result.raw");
file << mem_vec_size << endl;
for(int i = 0; i < mem_vec_size; i++){
file << memoria_result[i] << endl;
}
file.close();
cout << "Tiempo: " << double(end - begin) / CLOCKS_PER_SEC << " segundos" << endl;
// Free memory
hipFree(memoria_x);
hipFree(memoria_y);
hipFree(memoria_result);
hipFree(gpu_x);
hipFree(gpu_y);
hipFree(gpu_result);
hipFree(gpu_vec_size);
free(mem_pointer_size);
return 0;
}
| f83ad11574c0a4d64ca6f9d9afdac42d260df2de.cu | #include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include <ctime>
#include <math.h>
using namespace std;
void properties(){
cudaDeviceProp dev;
int dev_cnt = 0;
cudaGetDeviceCount(&dev_cnt);
cout << dev_cnt << " dispositivos" << endl;
for(int i = 0; i < dev_cnt; i++){
cudaGetDeviceProperties(&dev, i);
cout << "Device: " << i << endl;
cout << "name:" << dev.name << endl;
cout << "Compute capability " << dev.major << "." << dev.minor << endl;
cout << "total global memory(KB): " << dev.totalGlobalMem/1024 << endl;
cout << "shared mem per block: " << dev.sharedMemPerBlock << endl;
cout << "regs per block: " << dev.regsPerBlock << endl;
cout << "warp size: " << dev.warpSize << endl;
cout << "max threads per block: " << dev.maxThreadsPerBlock << endl;
cout << "max thread dim z:" << dev.maxThreadsDim[0] << " y:" << dev.maxThreadsDim[1] << " x:" << dev.maxThreadsDim[2] << endl;
cout << "max grid size z:" << dev.maxGridSize[0] << " y:" << dev.maxGridSize[1] << " x:" << dev.maxGridSize[2] << endl;
cout << "clock rate(KHz):" << dev.clockRate << endl;
cout << "total constant memory (bytes): " << dev.totalConstMem << endl;
cout << "multiprocessor count " << dev.multiProcessorCount << endl;
cout << "integrated: " << dev.integrated << endl;
cout << "async engine count: " << dev.asyncEngineCount << endl;
cout << "memory bus width: " << dev.memoryBusWidth << endl;
cout << "memory clock rate (KHz): " << dev.memoryClockRate << endl;
cout << "L2 cache size (bytes): " << dev.l2CacheSize << endl;
cout << "max threads per SM: " << dev.maxThreadsPerMultiProcessor << endl;
cout << endl;
}
}
void readfile(vector<float> &vec, string filename){
ifstream file;
file.open(filename);
if(!file.is_open()){
cerr << "No se ha podido abrir el archivo " << filename << endl;
exit(-1);
}
for(string line; getline(file, line);){
vec.push_back(stof(line));
}
vec.erase(vec.begin());
file.close();
}
// Kernel function to add the elements of two arrays
__global__
void add(float *x, float *y, float *result, int *size){
int i = threadIdx.x + blockDim.x * blockIdx.x;
float sumaparcial=0;
if(i < (*size)){
result[i] = x[i] + y[i];
sumaparcial = result[i];
for(int j = 1; j < 1000; j++){
sumaparcial = sumaparcial + j;
}
result[i] += sumaparcial;
}
}
int main(void){
clock_t begin, end;
vector<float> vec;
int mem_vec_size, *gpu_vec_size, *mem_pointer_size;
float *memoria_x, *memoria_y;
float *gpu_x, *gpu_y;
float *memoria_result, *gpu_result;
ofstream file;
memoria_x = NULL;
memoria_y = NULL;
gpu_x = NULL;
gpu_y = NULL;
memoria_x = NULL;
gpu_y = NULL;
mem_pointer_size = (int*)malloc(sizeof(int));
// Imprimir características
properties();
readfile(vec, "data/9/input0.raw");
mem_vec_size = vec.size();
// Reservar memoria para el primer array
memoria_x = (float *)malloc(sizeof(float)*mem_vec_size);
cudaMallocManaged(&gpu_x, mem_vec_size*sizeof(float));
for(int i = 0; i < vec.size(); i++){
memoria_x[i] = vec[i];
}
vec.clear();
readfile(vec, "data/9/input1.raw");
mem_vec_size = vec.size();
// Reservar memoria para el segundo array
//cudaMallocManaged(&memoria_y, mem_vec_size*sizeof(float));
memoria_y = (float *)malloc(sizeof(float)*mem_vec_size);
cudaMallocManaged(&gpu_y, mem_vec_size*sizeof(float));
for(int i = 0; i < vec.size(); i++){
memoria_y[i] = vec[i];
}
// Reservar memoria para el array resultante
memoria_result = (float *)malloc(sizeof(float)*mem_vec_size);
cudaMallocManaged(&gpu_result, mem_vec_size*sizeof(float));
// Reservar memoria para el dato del tamaño del vector
cudaMallocManaged(&gpu_vec_size, sizeof(int));
begin = clock();
*mem_pointer_size = mem_vec_size;
// Copiar los datos en la GPU
cudaMemcpy(gpu_x, memoria_x, sizeof(float)*mem_vec_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_y, memoria_y, sizeof(float)*mem_vec_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_result, memoria_result, sizeof(float)*mem_vec_size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_vec_size, mem_pointer_size, sizeof(int), cudaMemcpyHostToDevice);
// Llamar al kernel
// <<< Número de bloques, número de hebras >>>
dim3 unBloque(64,1,1);
dim3 bloques((mem_vec_size/64)+1, 1, 1);
add<<< bloques, unBloque>>>(gpu_x, gpu_y, gpu_result, gpu_vec_size);
// Esperar a que la GPU termine
cudaDeviceSynchronize();
// Copiar los resultados en memoria
cudaMemcpy(memoria_result, gpu_result, sizeof(float)*mem_vec_size, cudaMemcpyDeviceToHost);
end = clock();
file.open("result.raw");
file << mem_vec_size << endl;
for(int i = 0; i < mem_vec_size; i++){
file << memoria_result[i] << endl;
}
file.close();
cout << "Tiempo: " << double(end - begin) / CLOCKS_PER_SEC << " segundos" << endl;
// Free memory
cudaFree(memoria_x);
cudaFree(memoria_y);
cudaFree(memoria_result);
cudaFree(gpu_x);
cudaFree(gpu_y);
cudaFree(gpu_result);
cudaFree(gpu_vec_size);
free(mem_pointer_size);
return 0;
}
|
d2e6cf1f16b0910f3ab8706e86846ec3df9377d9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define wbCheck(stmt) \
do { \
hipError_t err = stmt; \
if (err != hipSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__shared__ float partialSum[2*BLOCK_SIZE];
int tx = threadIdx.x;
int bx = blockIdx.x;
int start = 2 * bx * blockDim.x;
if(start + tx < len){
partialSum[tx] = input[start + tx];
}
else{
partialSum[tx] = 0.0;
}
if(start + blockDim.x + tx < len){
partialSum[tx + blockDim.x] = input[start + blockDim.x + tx];
}
else{
partialSum[tx + blockDim.x] = 0.0;
}
for(int stride = blockDim.x; stride >= 1; stride >>= 1){
__syncthreads();
if(tx < stride){
partialSum[tx] += partialSum[tx + stride];
}
}
if(tx == 0){
output[bx] = partialSum[0];
}
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
hipMalloc((void**)&deviceInput, numInputElements*sizeof(float));
hipMalloc((void**)&deviceOutput, numOutputElements*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
hipMemcpy(deviceInput, hostInput, numInputElements*sizeof(float), hipMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
int real_block_size = BLOCK_SIZE;
int half_input = ceil(numInputElements / 2.0);
int num_of_blocks = (half_input) / real_block_size;
if((half_input)% real_block_size != 0){
num_of_blocks += 1;
}
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
hipLaunchKernelGGL(( total), dim3(num_of_blocks), dim3(real_block_size), 0, 0, deviceInput, deviceOutput, numInputElements);
hipDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
hipMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), hipMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
hipFree(deviceInput);
hipFree(deviceOutput);
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
| d2e6cf1f16b0910f3ab8706e86846ec3df9377d9.cu |
#define wbCheck(stmt) \
do { \
cudaError_t err = stmt; \
if (err != cudaSuccess) { \
wbLog(ERROR, "Failed to run stmt ", #stmt); \
wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \
return -1; \
} \
} while (0)
__global__ void total(float *input, float *output, int len) {
//@@ Load a segment of the input vector into shared memory
//@@ Traverse the reduction tree
//@@ Write the computed sum of the block to the output vector at the
//@@ correct index
__shared__ float partialSum[2*BLOCK_SIZE];
int tx = threadIdx.x;
int bx = blockIdx.x;
int start = 2 * bx * blockDim.x;
if(start + tx < len){
partialSum[tx] = input[start + tx];
}
else{
partialSum[tx] = 0.0;
}
if(start + blockDim.x + tx < len){
partialSum[tx + blockDim.x] = input[start + blockDim.x + tx];
}
else{
partialSum[tx + blockDim.x] = 0.0;
}
for(int stride = blockDim.x; stride >= 1; stride >>= 1){
__syncthreads();
if(tx < stride){
partialSum[tx] += partialSum[tx + stride];
}
}
if(tx == 0){
output[bx] = partialSum[0];
}
}
int main(int argc, char **argv) {
int ii;
wbArg_t args;
float *hostInput; // The input 1D list
float *hostOutput; // The output list
float *deviceInput;
float *deviceOutput;
int numInputElements; // number of elements in the input list
int numOutputElements; // number of elements in the output list
args = wbArg_read(argc, argv);
wbTime_start(Generic, "Importing data and creating memory on host");
hostInput =
(float *)wbImport(wbArg_getInputFile(args, 0), &numInputElements);
numOutputElements = numInputElements / (BLOCK_SIZE << 1);
if (numInputElements % (BLOCK_SIZE << 1)) {
numOutputElements++;
}
hostOutput = (float *)malloc(numOutputElements * sizeof(float));
wbTime_stop(Generic, "Importing data and creating memory on host");
wbLog(TRACE, "The number of input elements in the input is ",
numInputElements);
wbLog(TRACE, "The number of output elements in the input is ",
numOutputElements);
wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
cudaMalloc((void**)&deviceInput, numInputElements*sizeof(float));
cudaMalloc((void**)&deviceOutput, numOutputElements*sizeof(float));
wbTime_stop(GPU, "Allocating GPU memory.");
wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
cudaMemcpy(deviceInput, hostInput, numInputElements*sizeof(float), cudaMemcpyHostToDevice);
wbTime_stop(GPU, "Copying input memory to the GPU.");
//@@ Initialize the grid and block dimensions here
int real_block_size = BLOCK_SIZE;
int half_input = ceil(numInputElements / 2.0);
int num_of_blocks = (half_input) / real_block_size;
if((half_input)% real_block_size != 0){
num_of_blocks += 1;
}
wbTime_start(Compute, "Performing CUDA computation");
//@@ Launch the GPU Kernel here
total<<<num_of_blocks, real_block_size>>>(deviceInput, deviceOutput, numInputElements);
cudaDeviceSynchronize();
wbTime_stop(Compute, "Performing CUDA computation");
wbTime_start(Copy, "Copying output memory to the CPU");
//@@ Copy the GPU memory back to the CPU here
cudaMemcpy(hostOutput, deviceOutput, numOutputElements * sizeof(float), cudaMemcpyDeviceToHost);
wbTime_stop(Copy, "Copying output memory to the CPU");
/********************************************************************
* Reduce output vector on the host
* NOTE: One could also perform the reduction of the output vector
* recursively and support any size input. For simplicity, we do not
* require that for this lab.
********************************************************************/
for (ii = 1; ii < numOutputElements; ii++) {
hostOutput[0] += hostOutput[ii];
}
wbTime_start(GPU, "Freeing GPU Memory");
//@@ Free the GPU memory here
wbTime_stop(GPU, "Freeing GPU Memory");
cudaFree(deviceInput);
cudaFree(deviceOutput);
wbSolution(args, hostOutput, 1);
free(hostInput);
free(hostOutput);
return 0;
}
|
440262e2bb42905f11301215f323b1374ab456d8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel1;
int xdim0_reset_field_kernel1_h = -1;
__constant__ int ydim0_reset_field_kernel1;
int ydim0_reset_field_kernel1_h = -1;
__constant__ int xdim1_reset_field_kernel1;
int xdim1_reset_field_kernel1_h = -1;
__constant__ int ydim1_reset_field_kernel1;
int ydim1_reset_field_kernel1_h = -1;
__constant__ int xdim2_reset_field_kernel1;
int xdim2_reset_field_kernel1_h = -1;
__constant__ int ydim2_reset_field_kernel1;
int ydim2_reset_field_kernel1_h = -1;
__constant__ int xdim3_reset_field_kernel1;
int xdim3_reset_field_kernel1_h = -1;
__constant__ int ydim3_reset_field_kernel1;
int ydim3_reset_field_kernel1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x,y,z) (x+xdim0_reset_field_kernel1*(y)+xdim0_reset_field_kernel1*ydim0_reset_field_kernel1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_reset_field_kernel1*(y)+xdim1_reset_field_kernel1*ydim1_reset_field_kernel1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_reset_field_kernel1*(y)+xdim2_reset_field_kernel1*ydim2_reset_field_kernel1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_reset_field_kernel1*(y)+xdim3_reset_field_kernel1*ydim3_reset_field_kernel1*(z))
//user function
__device__
void reset_field_kernel1_gpu( double *density0, const double *density1,
double *energy0, const double *energy1) {
density0[OPS_ACC0(0,0,0)] = density1[OPS_ACC1(0,0,0)] ;
energy0[OPS_ACC2(0,0,0)] = energy1[OPS_ACC3(0,0,0)] ;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_reset_field_kernel1(
double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_reset_field_kernel1 + idx_z * 1*1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_reset_field_kernel1 + idx_z * 1*1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_reset_field_kernel1 + idx_z * 1*1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_reset_field_kernel1 + idx_z * 1*1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel1_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_reset_field_kernel1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,138)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(138,"reset_field_kernel1");
OPS_kernels[138].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel1_h || ydim0 != ydim0_reset_field_kernel1_h || xdim1 != xdim1_reset_field_kernel1_h || ydim1 != ydim1_reset_field_kernel1_h || xdim2 != xdim2_reset_field_kernel1_h || ydim2 != ydim2_reset_field_kernel1_h || xdim3 != xdim3_reset_field_kernel1_h || ydim3 != ydim3_reset_field_kernel1_h) {
hipMemcpyToSymbol( xdim0_reset_field_kernel1, &xdim0, sizeof(int) );
xdim0_reset_field_kernel1_h = xdim0;
hipMemcpyToSymbol( ydim0_reset_field_kernel1, &ydim0, sizeof(int) );
ydim0_reset_field_kernel1_h = ydim0;
hipMemcpyToSymbol( xdim1_reset_field_kernel1, &xdim1, sizeof(int) );
xdim1_reset_field_kernel1_h = xdim1;
hipMemcpyToSymbol( ydim1_reset_field_kernel1, &ydim1, sizeof(int) );
ydim1_reset_field_kernel1_h = ydim1;
hipMemcpyToSymbol( xdim2_reset_field_kernel1, &xdim2, sizeof(int) );
xdim2_reset_field_kernel1_h = xdim2;
hipMemcpyToSymbol( ydim2_reset_field_kernel1, &ydim2, sizeof(int) );
ydim2_reset_field_kernel1_h = ydim2;
hipMemcpyToSymbol( xdim3_reset_field_kernel1, &xdim3, sizeof(int) );
xdim3_reset_field_kernel1_h = xdim3;
hipMemcpyToSymbol( ydim3_reset_field_kernel1, &ydim3, sizeof(int) );
ydim3_reset_field_kernel1_h = ydim3;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[138].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_reset_field_kernel1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[138].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[138].mpi_time += t2-t1;
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 138;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 138;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_reset_field_kernel1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(138,"reset_field_kernel1");
}
ops_enqueue_kernel(desc);
}
#endif
| 440262e2bb42905f11301215f323b1374ab456d8.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_reset_field_kernel1;
int xdim0_reset_field_kernel1_h = -1;
__constant__ int ydim0_reset_field_kernel1;
int ydim0_reset_field_kernel1_h = -1;
__constant__ int xdim1_reset_field_kernel1;
int xdim1_reset_field_kernel1_h = -1;
__constant__ int ydim1_reset_field_kernel1;
int ydim1_reset_field_kernel1_h = -1;
__constant__ int xdim2_reset_field_kernel1;
int xdim2_reset_field_kernel1_h = -1;
__constant__ int ydim2_reset_field_kernel1;
int ydim2_reset_field_kernel1_h = -1;
__constant__ int xdim3_reset_field_kernel1;
int xdim3_reset_field_kernel1_h = -1;
__constant__ int ydim3_reset_field_kernel1;
int ydim3_reset_field_kernel1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#define OPS_ACC0(x,y,z) (x+xdim0_reset_field_kernel1*(y)+xdim0_reset_field_kernel1*ydim0_reset_field_kernel1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_reset_field_kernel1*(y)+xdim1_reset_field_kernel1*ydim1_reset_field_kernel1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_reset_field_kernel1*(y)+xdim2_reset_field_kernel1*ydim2_reset_field_kernel1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_reset_field_kernel1*(y)+xdim3_reset_field_kernel1*ydim3_reset_field_kernel1*(z))
//user function
__device__
void reset_field_kernel1_gpu( double *density0, const double *density1,
double *energy0, const double *energy1) {
density0[OPS_ACC0(0,0,0)] = density1[OPS_ACC1(0,0,0)] ;
energy0[OPS_ACC2(0,0,0)] = energy1[OPS_ACC3(0,0,0)] ;
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_reset_field_kernel1(
double* __restrict arg0,
const double* __restrict arg1,
double* __restrict arg2,
const double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_reset_field_kernel1 + idx_z * 1*1 * xdim0_reset_field_kernel1 * ydim0_reset_field_kernel1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_reset_field_kernel1 + idx_z * 1*1 * xdim1_reset_field_kernel1 * ydim1_reset_field_kernel1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_reset_field_kernel1 + idx_z * 1*1 * xdim2_reset_field_kernel1 * ydim2_reset_field_kernel1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_reset_field_kernel1 + idx_z * 1*1 * xdim3_reset_field_kernel1 * ydim3_reset_field_kernel1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
reset_field_kernel1_gpu(arg0, arg1, arg2, arg3);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
#else
void ops_par_loop_reset_field_kernel1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[4] = { arg0, arg1, arg2, arg3};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,4,range,138)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(138,"reset_field_kernel1");
OPS_kernels[138].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
if (xdim0 != xdim0_reset_field_kernel1_h || ydim0 != ydim0_reset_field_kernel1_h || xdim1 != xdim1_reset_field_kernel1_h || ydim1 != ydim1_reset_field_kernel1_h || xdim2 != xdim2_reset_field_kernel1_h || ydim2 != ydim2_reset_field_kernel1_h || xdim3 != xdim3_reset_field_kernel1_h || ydim3 != ydim3_reset_field_kernel1_h) {
cudaMemcpyToSymbol( xdim0_reset_field_kernel1, &xdim0, sizeof(int) );
xdim0_reset_field_kernel1_h = xdim0;
cudaMemcpyToSymbol( ydim0_reset_field_kernel1, &ydim0, sizeof(int) );
ydim0_reset_field_kernel1_h = ydim0;
cudaMemcpyToSymbol( xdim1_reset_field_kernel1, &xdim1, sizeof(int) );
xdim1_reset_field_kernel1_h = xdim1;
cudaMemcpyToSymbol( ydim1_reset_field_kernel1, &ydim1, sizeof(int) );
ydim1_reset_field_kernel1_h = ydim1;
cudaMemcpyToSymbol( xdim2_reset_field_kernel1, &xdim2, sizeof(int) );
xdim2_reset_field_kernel1_h = xdim2;
cudaMemcpyToSymbol( ydim2_reset_field_kernel1, &ydim2, sizeof(int) );
ydim2_reset_field_kernel1_h = ydim2;
cudaMemcpyToSymbol( xdim3_reset_field_kernel1, &xdim3, sizeof(int) );
xdim3_reset_field_kernel1_h = xdim3;
cudaMemcpyToSymbol( ydim3_reset_field_kernel1, &ydim3, sizeof(int) );
ydim3_reset_field_kernel1_h = ydim3;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
char *p_a[4];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[138].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_reset_field_kernel1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[138].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[2],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[138].mpi_time += t2-t1;
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[138].transfer += ops_compute_transfer(dim, start, end, &arg3);
}
}
#ifdef OPS_LAZY
void ops_par_loop_reset_field_kernel1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 138;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 138;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 4;
desc->args = (ops_arg*)malloc(4*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->function = ops_par_loop_reset_field_kernel1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(138,"reset_field_kernel1");
}
ops_enqueue_kernel(desc);
}
#endif
|
9b58bd6a2bed85b3412d4d64d80b90436961234a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void xMinDeltaIntegralKernel( const float *intData, const int intDataStrideChannel, float *tmpArray, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
*tmpArray = -delta;
}
} | 9b58bd6a2bed85b3412d4d64d80b90436961234a.cu | #include "includes.h"
__global__ void xMinDeltaIntegralKernel( const float *intData, const int intDataStrideChannel, float *tmpArray, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
// const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
*tmpArray = -delta;
}
} |
21e85f11dfc5e32842c5b3037623b767ed28f391.hip | // !!! This is a file automatically generated by hipify!!!
//general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#define GROUP 1
void sample_0_benchmark_cuFFT_single(bool file_output, FILE* output, int device_id)
{
const int num_runs = 3;
if (file_output)
fprintf(output, "0 - cuFFT FFT + iFFT C2C benchmark 1D batched in single precision\n");
printf("0 - cuFFT FFT + iFFT C2C benchmark 1D batched in single precision\n");
hipSetDevice(device_id);
double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples
hipfftComplex* inputC = (hipfftComplex*)malloc((uint64_t)sizeof(hipfftComplex)*pow(2, 27));
for (uint64_t i = 0; i < pow(2, 27); i++) {
inputC[i].x = 2 * ((float)rand()) / RAND_MAX - 1.0;
inputC[i].y = 2 * ((float)rand()) / RAND_MAX - 1.0;
}
for (int n = 0; n < 26; n++) {
double run_time[num_runs][2];
for (int r = 0; r < num_runs; r++) {
hipfftHandle planC2C;
hipfftComplex* dataC;
uint64_t dims[3];
dims[0] = 4 * pow(2, n); //Multidimensional FFT dimensions sizes (default 1). For best performance (and stability), order dimensions in descendant size order as: x>y>z.
if (n == 0) dims[0] = 4096;
dims[1] = 64* 32 * pow(2, 16)/dims[0];
//dims[1] = (dims[1] > 32768) ? 32768 : dims[1];
if (dims[1] == 0) dims[1] = 1;
dims[2] = 1;
hipMalloc((void**)&dataC, sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2]);
hipMemcpy(dataC, inputC, sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2], hipMemcpyHostToDevice);
if (hipGetLastError() != hipSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
uint64_t sizeCUDA;
switch (1) {
case 1:
hipfftPlan1d(&planC2C, dims[0], HIPFFT_C2C, dims[1]);
hipfftGetSize1d(planC2C, dims[0], HIPFFT_C2C, dims[1], (size_t*)&sizeCUDA);
break;
case 2:
hipfftPlan2d(&planC2C, dims[1], dims[0], HIPFFT_C2C);
break;
case 3:
hipfftPlan3d(&planC2C, dims[2], dims[1], dims[0], HIPFFT_C2C);
break;
}
float totTime = 0;
uint64_t cuBufferSize = sizeof(float) * 2 * dims[0] * dims[1] * dims[2];
uint64_t num_iter = ((3*4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (3*4096 * 1024.0 * 1024.0) / cuBufferSize;
if (num_iter == 0) num_iter = 1;
std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now();
for (int i = 0; i < num_iter; i++) {
hipfftExecC2C(planC2C, dataC, dataC, -1);
hipfftExecC2C(planC2C, dataC, dataC, 1);
}
hipDeviceSynchronize();
std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now();
totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter;
run_time[r][0] = totTime;
if (n > 0) {
if (r == num_runs - 1) {
double std_error = 0;
double avg_time = 0;
for (uint64_t t = 0; t < num_runs; t++) {
avg_time += run_time[t][0];
}
avg_time /= num_runs;
for (uint64_t t = 0; t < num_runs; t++) {
std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time);
}
std_error = sqrt(std_error / num_runs);
if (file_output)
fprintf(output, "cuFFT System: %" PRIu64 " %" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", (uint64_t)log2(dims[0]), dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
printf("cuFFT System: %" PRIu64 " %" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", (uint64_t)log2(dims[0]), dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
benchmark_result[0] += ((double)cuBufferSize / 1024) / avg_time;
}
}
hipfftDestroy(planC2C);
hipFree(dataC);
hipDeviceSynchronize();
//hipfftComplex* output_cuFFT = (hipfftComplex*)(malloc(sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2]));
//hipMemcpy(output_cuFFT, dataC, sizeof(hipfftComplex) * dims[0] * dims[1] * dims[2], hipMemcpyDeviceToHost);
//hipDeviceSynchronize();
}
}
free(inputC);
benchmark_result[0] /= (26 - 1);
if (file_output)
fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
}
| 21e85f11dfc5e32842c5b3037623b767ed28f391.cu | //general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#define GROUP 1
void sample_0_benchmark_cuFFT_single(bool file_output, FILE* output, int device_id)
{
const int num_runs = 3;
if (file_output)
fprintf(output, "0 - cuFFT FFT + iFFT C2C benchmark 1D batched in single precision\n");
printf("0 - cuFFT FFT + iFFT C2C benchmark 1D batched in single precision\n");
cudaSetDevice(device_id);
double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples
cufftComplex* inputC = (cufftComplex*)malloc((uint64_t)sizeof(cufftComplex)*pow(2, 27));
for (uint64_t i = 0; i < pow(2, 27); i++) {
inputC[i].x = 2 * ((float)rand()) / RAND_MAX - 1.0;
inputC[i].y = 2 * ((float)rand()) / RAND_MAX - 1.0;
}
for (int n = 0; n < 26; n++) {
double run_time[num_runs][2];
for (int r = 0; r < num_runs; r++) {
cufftHandle planC2C;
cufftComplex* dataC;
uint64_t dims[3];
dims[0] = 4 * pow(2, n); //Multidimensional FFT dimensions sizes (default 1). For best performance (and stability), order dimensions in descendant size order as: x>y>z.
if (n == 0) dims[0] = 4096;
dims[1] = 64* 32 * pow(2, 16)/dims[0];
//dims[1] = (dims[1] > 32768) ? 32768 : dims[1];
if (dims[1] == 0) dims[1] = 1;
dims[2] = 1;
cudaMalloc((void**)&dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]);
cudaMemcpy(dataC, inputC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
uint64_t sizeCUDA;
switch (1) {
case 1:
cufftPlan1d(&planC2C, dims[0], CUFFT_C2C, dims[1]);
cufftGetSize1d(planC2C, dims[0], CUFFT_C2C, dims[1], (size_t*)&sizeCUDA);
break;
case 2:
cufftPlan2d(&planC2C, dims[1], dims[0], CUFFT_C2C);
break;
case 3:
cufftPlan3d(&planC2C, dims[2], dims[1], dims[0], CUFFT_C2C);
break;
}
float totTime = 0;
uint64_t cuBufferSize = sizeof(float) * 2 * dims[0] * dims[1] * dims[2];
uint64_t num_iter = ((3*4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (3*4096 * 1024.0 * 1024.0) / cuBufferSize;
if (num_iter == 0) num_iter = 1;
std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now();
for (int i = 0; i < num_iter; i++) {
cufftExecC2C(planC2C, dataC, dataC, -1);
cufftExecC2C(planC2C, dataC, dataC, 1);
}
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now();
totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter;
run_time[r][0] = totTime;
if (n > 0) {
if (r == num_runs - 1) {
double std_error = 0;
double avg_time = 0;
for (uint64_t t = 0; t < num_runs; t++) {
avg_time += run_time[t][0];
}
avg_time /= num_runs;
for (uint64_t t = 0; t < num_runs; t++) {
std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time);
}
std_error = sqrt(std_error / num_runs);
if (file_output)
fprintf(output, "cuFFT System: %" PRIu64 " %" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", (uint64_t)log2(dims[0]), dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
printf("cuFFT System: %" PRIu64 " %" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", (uint64_t)log2(dims[0]), dims[0], dims[1], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
benchmark_result[0] += ((double)cuBufferSize / 1024) / avg_time;
}
}
cufftDestroy(planC2C);
cudaFree(dataC);
cudaDeviceSynchronize();
//cufftComplex* output_cuFFT = (cufftComplex*)(malloc(sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]));
//cudaMemcpy(output_cuFFT, dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
}
}
free(inputC);
benchmark_result[0] /= (26 - 1);
if (file_output)
fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
}
|
fb0f96ee7c3e3d71b8e1d889e9a4aa6ad3381203.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
// kernel
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
hipLaunchKernelGGL(( cube), dim3(1), dim3(ARRAY_SIZE), 0, 0, d_out, d_in);
// copy back the result array to the CPU
hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| fb0f96ee7c3e3d71b8e1d889e9a4aa6ad3381203.cu | #include <stdio.h>
#include <cuda.h>
// kernel
__global__ void cube(float * d_out, float * d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f * f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d_out, d_in);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
4b66ad37a0a61c186e52679cca40b6103a7f2601.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <fstream>
#include <iostream>
#include <sstream>
#include "Timer.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
using namespace std;
// leftrotate function definition
#define LEFTROTATE(x, c) (((x) << (c)) | ((x) >> (32 - (c))))
#define F(x,y,z) ((x & y) | ((~x) & z))
#define G(x,y,z) ((x & z) | ((~z) & y))
#define H(x,y,z) (x ^ y ^ z)
#define I(x,y,z) (y ^ (x | (~z)))
// This version of the round shifts the interpretation of a,b,c,d by one
// and must be called with v/x/y/z in a matching shuffle pattern.
// Every four Rounds, a,b,c,d are back to their original interpretation,
// thogh, so it all works out in the end (we have 64 rounds per block).
#define ROUND_INPLACE_VIA_SHIFT(w, r, k, v, x, y, z, func) \
{ \
v += func(x,y,z) + w + k; \
v = x + LEFTROTATE(v, r); \
}
// This version ignores the mapping of a/b/c/d to v/x/y/z and simply
// uses a temporary variable to keep the interpretation of a/b/c/d
// consistent. Whether this one or the previous one performs better
// probably depends on the compiler....
#define ROUND_USING_TEMP_VARS(w, r, k, v, x, y, z, func) \
{ \
a = a + func(b,c,d) + k + w; \
unsigned int temp = d; \
d = c; \
c = b; \
b = b + LEFTROTATE(a, r); \
a = temp; \
}
// Here, we pick which style of ROUND we use.
#define ROUND ROUND_USING_TEMP_VARS
//#define ROUND ROUND_INPLACE_VIA_SHIFT
/// NOTE: this really only allows a length up to 7 bytes, not 8, because
/// we need to start the padding in the first byte following the message,
/// and we only have two words to work with here....
/// It also assumes words[] has all zero bits except the chars of interest.
__host__ __device__ inline void md5_2words(unsigned int *words,
unsigned int len,
unsigned int *digest)
{
// For any block but the first one, these should be passed in, not
// initialized, but we are assuming we only operate on a single block.
unsigned int h0 = 0x67452301;
unsigned int h1 = 0xefcdab89;
unsigned int h2 = 0x98badcfe;
unsigned int h3 = 0x10325476;
unsigned int a = h0;
unsigned int b = h1;
unsigned int c = h2;
unsigned int d = h3;
unsigned int WL = len * 8;
unsigned int W0 = words[0];
unsigned int W1 = words[1];
switch (len)
{
case 0: W0 |= 0x00000080; break;
case 1: W0 |= 0x00008000; break;
case 2: W0 |= 0x00800000; break;
case 3: W0 |= 0x80000000; break;
case 4: W1 |= 0x00000080; break;
case 5: W1 |= 0x00008000; break;
case 6: W1 |= 0x00800000; break;
case 7: W1 |= 0x80000000; break;
}
// args: word data, per-round shift amt, constant, 4 vars, function macro
ROUND(W0, 7, 0xd76aa478, a, b, c, d, F);
ROUND(W1, 12, 0xe8c7b756, d, a, b, c, F);
ROUND(0, 17, 0x242070db, c, d, a, b, F);
ROUND(0, 22, 0xc1bdceee, b, c, d, a, F);
ROUND(0, 7, 0xf57c0faf, a, b, c, d, F);
ROUND(0, 12, 0x4787c62a, d, a, b, c, F);
ROUND(0, 17, 0xa8304613, c, d, a, b, F);
ROUND(0, 22, 0xfd469501, b, c, d, a, F);
ROUND(0, 7, 0x698098d8, a, b, c, d, F);
ROUND(0, 12, 0x8b44f7af, d, a, b, c, F);
ROUND(0, 17, 0xffff5bb1, c, d, a, b, F);
ROUND(0, 22, 0x895cd7be, b, c, d, a, F);
ROUND(0, 7, 0x6b901122, a, b, c, d, F);
ROUND(0, 12, 0xfd987193, d, a, b, c, F);
ROUND(WL, 17, 0xa679438e, c, d, a, b, F);
ROUND(0, 22, 0x49b40821, b, c, d, a, F);
ROUND(W1, 5, 0xf61e2562, a, b, c, d, G);
ROUND(0, 9, 0xc040b340, d, a, b, c, G);
ROUND(0, 14, 0x265e5a51, c, d, a, b, G);
ROUND(W0, 20, 0xe9b6c7aa, b, c, d, a, G);
ROUND(0, 5, 0xd62f105d, a, b, c, d, G);
ROUND(0, 9, 0x02441453, d, a, b, c, G);
ROUND(0, 14, 0xd8a1e681, c, d, a, b, G);
ROUND(0, 20, 0xe7d3fbc8, b, c, d, a, G);
ROUND(0, 5, 0x21e1cde6, a, b, c, d, G);
ROUND(WL, 9, 0xc33707d6, d, a, b, c, G);
ROUND(0, 14, 0xf4d50d87, c, d, a, b, G);
ROUND(0, 20, 0x455a14ed, b, c, d, a, G);
ROUND(0, 5, 0xa9e3e905, a, b, c, d, G);
ROUND(0, 9, 0xfcefa3f8, d, a, b, c, G);
ROUND(0, 14, 0x676f02d9, c, d, a, b, G);
ROUND(0, 20, 0x8d2a4c8a, b, c, d, a, G);
ROUND(0, 4, 0xfffa3942, a, b, c, d, H);
ROUND(0, 11, 0x8771f681, d, a, b, c, H);
ROUND(0, 16, 0x6d9d6122, c, d, a, b, H);
ROUND(WL, 23, 0xfde5380c, b, c, d, a, H);
ROUND(W1, 4, 0xa4beea44, a, b, c, d, H);
ROUND(0, 11, 0x4bdecfa9, d, a, b, c, H);
ROUND(0, 16, 0xf6bb4b60, c, d, a, b, H);
ROUND(0, 23, 0xbebfbc70, b, c, d, a, H);
ROUND(0, 4, 0x289b7ec6, a, b, c, d, H);
ROUND(W0, 11, 0xeaa127fa, d, a, b, c, H);
ROUND(0, 16, 0xd4ef3085, c, d, a, b, H);
ROUND(0, 23, 0x04881d05, b, c, d, a, H);
ROUND(0, 4, 0xd9d4d039, a, b, c, d, H);
ROUND(0, 11, 0xe6db99e5, d, a, b, c, H);
ROUND(0, 16, 0x1fa27cf8, c, d, a, b, H);
ROUND(0, 23, 0xc4ac5665, b, c, d, a, H);
ROUND(W0, 6, 0xf4292244, a, b, c, d, I);
ROUND(0, 10, 0x432aff97, d, a, b, c, I);
ROUND(WL, 15, 0xab9423a7, c, d, a, b, I);
ROUND(0, 21, 0xfc93a039, b, c, d, a, I);
ROUND(0, 6, 0x655b59c3, a, b, c, d, I);
ROUND(0, 10, 0x8f0ccc92, d, a, b, c, I);
ROUND(0, 15, 0xffeff47d, c, d, a, b, I);
ROUND(W1, 21, 0x85845dd1, b, c, d, a, I);
ROUND(0, 6, 0x6fa87e4f, a, b, c, d, I);
ROUND(0, 10, 0xfe2ce6e0, d, a, b, c, I);
ROUND(0, 15, 0xa3014314, c, d, a, b, I);
ROUND(0, 21, 0x4e0811a1, b, c, d, a, I);
ROUND(0, 6, 0xf7537e82, a, b, c, d, I);
ROUND(0, 10, 0xbd3af235, d, a, b, c, I);
ROUND(0, 15, 0x2ad7d2bb, c, d, a, b, I);
ROUND(0, 21, 0xeb86d391, b, c, d, a, I);
h0 += a;
h1 += b;
h2 += c;
h3 += d;
// write the final result out
digest[0] = h0;
digest[1] = h1;
digest[2] = h2;
digest[3] = h3;
}
// ****************************************************************************
// Function: FindKeyspaceSize
//
// Purpose:
/// Multiply out the byteLength by valsPerByte to find the
/// total size of the key space, with error checking.
//
// Arguments:
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
__host__ __device__ int FindKeyspaceSize(int byteLength, int valsPerByte)
{
int keyspace = 1;
for (int i=0; i<byteLength; ++i)
{
if (keyspace >= 0x7fffffff / valsPerByte)
{
// error, we're about to overflow a signed int
return -1;
}
keyspace *= valsPerByte;
}
return keyspace;
}
// ****************************************************************************
// Function: IndexToKey
//
// Purpose:
/// For a given index in the keyspace, find the actual key string
/// which is at that index.
//
// Arguments:
// index index in key space
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// vals output key string
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
__host__ __device__ void IndexToKey(unsigned int index,
int byteLength, int valsPerByte,
unsigned char vals[8])
{
// loop pointlessly unrolled to avoid CUDA compiler complaints
// about unaligned accesses (!?) on older compute capabilities
vals[0] = index % valsPerByte;
index /= valsPerByte;
vals[1] = index % valsPerByte;
index /= valsPerByte;
vals[2] = index % valsPerByte;
index /= valsPerByte;
vals[3] = index % valsPerByte;
index /= valsPerByte;
vals[4] = index % valsPerByte;
index /= valsPerByte;
vals[5] = index % valsPerByte;
index /= valsPerByte;
vals[6] = index % valsPerByte;
index /= valsPerByte;
vals[7] = index % valsPerByte;
index /= valsPerByte;
}
// ****************************************************************************
// Function: AsHex
//
// Purpose:
/// For a given key string, return the raw hex string for its bytes.
//
// Arguments:
// vals key string
// len length of key string
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
std::string AsHex(unsigned char *vals, int len)
{
ostringstream out;
char tmp[256];
for (int i=0; i<len; ++i)
{
sprintf(tmp, "%2.2X", vals[i]);
out << tmp;
}
return out.str();
}
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
//
// ****************************************************************************
void
addBenchmarkSpecOptions(OptionParser &op)
{
}
// ****************************************************************************
// Function: FindKeyWithDigest_CPU
//
// Purpose:
/// On the CPU, search the key space to find a key with the given digest.
//
// Arguments:
// searchDigest the digest to search for
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// foundIndex output - the index of the found key (if found)
// foundKey output - the string of the found key (if found)
// foundDigest output - the digest of the found key (if found)
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
double FindKeyWithDigest_CPU(const unsigned int searchDigest[4],
const int byteLength,
const int valsPerByte,
int *foundIndex,
unsigned char foundKey[8],
unsigned int foundDigest[4])
{
int timer = Timer::Start();
int keyspace = FindKeyspaceSize(byteLength, valsPerByte);
for (int i=0; i<keyspace; i += valsPerByte)
{
unsigned char key[8] = {0,0,0,0,0,0,0,0};
IndexToKey(i, byteLength, valsPerByte, key);
for (int j=0; j < valsPerByte; ++j)
{
unsigned int digest[4];
md5_2words((unsigned int*)key, byteLength, digest);
if (digest[0] == searchDigest[0] &&
digest[1] == searchDigest[1] &&
digest[2] == searchDigest[2] &&
digest[3] == searchDigest[3])
{
*foundIndex = i + j;
foundKey[0] = key[0];
foundKey[1] = key[1];
foundKey[2] = key[2];
foundKey[3] = key[3];
foundKey[4] = key[4];
foundKey[5] = key[5];
foundKey[6] = key[6];
foundKey[7] = key[7];
foundDigest[0] = digest[0];
foundDigest[1] = digest[1];
foundDigest[2] = digest[2];
foundDigest[3] = digest[3];
}
++key[0];
}
}
double runtime = Timer::Stop(timer, "md5 runtime");
return runtime;
}
// ****************************************************************************
// Function: FindKeyWithDigest_Kernel
//
// Purpose:
/// Within each thread of a GPU, search part of the key space
/// to find a key with the given digest.
//
// Arguments:
// searchDigest the digest to search for
// keyspace the size of the key space to search
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// foundIndex output - the index of the found key (if found)
// foundKey output - the string of the found key (if found)
// foundDigest output - the digest of the found key (if found)
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
__global__ void FindKeyWithDigest_Kernel(unsigned int searchDigest0,
unsigned int searchDigest1,
unsigned int searchDigest2,
unsigned int searchDigest3,
int keyspace,
int byteLength, int valsPerByte,
int *foundIndex,
unsigned char *foundKey,
unsigned int *foundDigest)
{
int threadid = blockIdx.x*blockDim.x + threadIdx.x;
int startindex = threadid * valsPerByte;
unsigned char key[8] = {0,0,0,0, 0,0,0,0};
IndexToKey(startindex, byteLength, valsPerByte, key);
for (int j=0; j < valsPerByte && startindex+j < keyspace; ++j)
{
unsigned int digest[4];
md5_2words((unsigned int*)key, byteLength, digest);
if (digest[0] == searchDigest0 &&
digest[1] == searchDigest1 &&
digest[2] == searchDigest2 &&
digest[3] == searchDigest3)
{
*foundIndex = startindex + j;
foundKey[0] = key[0];
foundKey[1] = key[1];
foundKey[2] = key[2];
foundKey[3] = key[3];
foundKey[4] = key[4];
foundKey[5] = key[5];
foundKey[6] = key[6];
foundKey[7] = key[7];
foundDigest[0] = digest[0];
foundDigest[1] = digest[1];
foundDigest[2] = digest[2];
foundDigest[3] = digest[3];
}
++key[0];
}
}
// ****************************************************************************
// Function: FindKeyWithDigest_GPU
//
// Purpose:
/// On the GPU, search the key space to find a key with the given digest.
//
// Arguments:
// searchDigest the digest to search for
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// foundIndex output - the index of the found key (if found)
// foundKey output - the string of the found key (if found)
// foundDigest output - the digest of the found key (if found)
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
double FindKeyWithDigest_GPU(const unsigned int searchDigest[4],
const int byteLength,
const int valsPerByte,
int *foundIndex,
unsigned char foundKey[8],
unsigned int foundDigest[4])
{
int keyspace = FindKeyspaceSize(byteLength, valsPerByte);
//
// allocate output buffers
//
int *d_foundIndex;
hipMalloc((void**)&d_foundIndex, sizeof(int) * 1);
CHECK_CUDA_ERROR();
unsigned char *d_foundKey;
hipMalloc((void**)&d_foundKey, 8);
CHECK_CUDA_ERROR();
unsigned int *d_foundDigest;
hipMalloc((void**)&d_foundDigest, sizeof(unsigned int) * 4);
CHECK_CUDA_ERROR();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
CHECK_CUDA_ERROR();
//
// calculate work thread shape
//
int nthreads = 384;
size_t nblocks = ceil((double(keyspace) / double(valsPerByte)) / double(nthreads));
//
// run the kernel
//
hipEventRecord(start, 0);
hipLaunchKernelGGL(( FindKeyWithDigest_Kernel), dim3(nblocks), dim3(nthreads), 0, 0, searchDigest[0],
searchDigest[1],
searchDigest[2],
searchDigest[3],
keyspace,
byteLength, valsPerByte,
d_foundIndex,
d_foundKey,
d_foundDigest);
CHECK_CUDA_ERROR();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
CHECK_CUDA_ERROR();
//
// get the timing/rate info
//
float millisec = 0;
hipEventElapsedTime(&millisec, start, stop);
//
// read the (presumably) found key
//
hipMemcpy(foundIndex, d_foundIndex, sizeof(int) * 1, hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
hipMemcpy(foundKey, d_foundKey, 8, hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
hipMemcpy(foundDigest, d_foundDigest, sizeof(unsigned int) * 4, hipMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
//
// free device memory
//
hipFree(d_foundIndex);
CHECK_CUDA_ERROR();
hipFree(d_foundKey);
CHECK_CUDA_ERROR();
hipFree(d_foundDigest);
CHECK_CUDA_ERROR();
//
// return the runtime in seconds
//
return millisec / 1.e3;
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the MD5 Hash benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
//
// ****************************************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
bool verbose = op.getOptionBool("verbose");
int size = op.getOptionInt("size");
if (size < 1 || size > 4)
{
cerr << "ERROR: Invalid size parameter\n";
return;
}
//
// Determine the shape/size of key space
//
const int sizes_byteLength[] = { 7, 5, 6, 5};
const int sizes_valsPerByte[] = {10, 36, 26, 70};
const int byteLength = sizes_byteLength[size-1];
const int valsPerByte = sizes_valsPerByte[size-1];
char atts[1024];
sprintf(atts, "%dx%d", byteLength, valsPerByte);
if (verbose)
cout << "Searching keys of length " << byteLength << " bytes "
<< "and " << valsPerByte << " values per byte" << endl;
const int keyspace = FindKeyspaceSize(byteLength, valsPerByte);
if (keyspace < 0)
{
cerr << "Error: more than 2^31 bits of entropy is unsupported.\n";
return;
}
if (byteLength > 7)
{
cerr << "Error: more than 7 byte key length is unsupported.\n";
return;
}
if (verbose)
cout << "|keyspace| = " << keyspace << " ("<<int(keyspace/1e6)<<"M)" << endl;
//
// Choose a random key from the keyspace, and calculate its hash.
//
//srandom(12345);
srandom(time(NULL));
int passes = op.getOptionInt("passes");
for (int pass = 0 ; pass < passes ; ++pass)
{
int randomIndex = random() % keyspace;;
unsigned char randomKey[8] = {0,0,0,0, 0,0,0,0};
unsigned int randomDigest[4];
IndexToKey(randomIndex, byteLength, valsPerByte, randomKey);
md5_2words((unsigned int*)randomKey, byteLength, randomDigest);
if (verbose)
{
cout << endl;
cout << "--- pass " << pass << " ---" << endl;
cout << "Looking for random key:" << endl;
cout << " randomIndex = " << randomIndex << endl;
cout << " randomKey = 0x" << AsHex(randomKey, 8/*byteLength*/) << endl;
cout << " randomDigest= " << AsHex((unsigned char*)randomDigest, 16) << endl;
}
//
// Use the GPU to brute force search the keyspace for this key.
//
unsigned int foundDigest[4] = {0,0,0,0};
int foundIndex = -1;
unsigned char foundKey[8] = {0,0,0,0, 0,0,0,0};
double t; // in seconds
if (false)
{
t = FindKeyWithDigest_CPU(randomDigest, byteLength, valsPerByte,
&foundIndex, foundKey, foundDigest);
}
else
{
t = FindKeyWithDigest_GPU(randomDigest, byteLength, valsPerByte,
&foundIndex, foundKey, foundDigest);
}
//
// Calculate the rate and add it to the results
//
double rate = (double(keyspace) / double(t)) / 1.e9;
if (verbose)
{
cout << "time = " << t << " sec, rate = " << rate << " GHash/sec\n";
}
//
// Double check everything matches (index, key, hash).
//
if (foundIndex != randomIndex)
{
cerr << "\nERROR: mismatch in key index found.\n";
rate = FLT_MAX;
}
else if (foundKey[0] != randomKey[0] ||
foundKey[1] != randomKey[1] ||
foundKey[2] != randomKey[2] ||
foundKey[3] != randomKey[3] ||
foundKey[4] != randomKey[4] ||
foundKey[5] != randomKey[5] ||
foundKey[6] != randomKey[6] ||
foundKey[7] != randomKey[7])
{
cerr << "\nERROR: mismatch in key value found.\n";
rate = FLT_MAX;
}
else if (foundDigest[0] != randomDigest[0] ||
foundDigest[1] != randomDigest[1] ||
foundDigest[2] != randomDigest[2] ||
foundDigest[3] != randomDigest[3])
{
cerr << "\nERROR: mismatch in digest of key.\n";
rate = FLT_MAX;
}
else
{
if (verbose)
cout << endl << "Successfully found match (index, key, hash):" << endl;
}
//
// Add the calculated performancethe results
//
resultDB.AddResult("MD5Hash", atts, "GHash/s", rate);
if (verbose)
{
cout << " foundIndex = " << foundIndex << endl;
cout << " foundKey = 0x" << AsHex(foundKey, 8/*byteLength*/) << endl;
cout << " foundDigest = " << AsHex((unsigned char*)foundDigest, 16) << endl;
cout << endl;
}
}
return;
}
| 4b66ad37a0a61c186e52679cca40b6103a7f2601.cu | #include <math.h>
#include <stdlib.h>
#include <string.h>
#include <cassert>
#include <fstream>
#include <iostream>
#include <sstream>
#include "Timer.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include "OptionParser.h"
#include "ResultDatabase.h"
#include "cudacommon.h"
using namespace std;
// leftrotate function definition
#define LEFTROTATE(x, c) (((x) << (c)) | ((x) >> (32 - (c))))
#define F(x,y,z) ((x & y) | ((~x) & z))
#define G(x,y,z) ((x & z) | ((~z) & y))
#define H(x,y,z) (x ^ y ^ z)
#define I(x,y,z) (y ^ (x | (~z)))
// This version of the round shifts the interpretation of a,b,c,d by one
// and must be called with v/x/y/z in a matching shuffle pattern.
// Every four Rounds, a,b,c,d are back to their original interpretation,
// thogh, so it all works out in the end (we have 64 rounds per block).
#define ROUND_INPLACE_VIA_SHIFT(w, r, k, v, x, y, z, func) \
{ \
v += func(x,y,z) + w + k; \
v = x + LEFTROTATE(v, r); \
}
// This version ignores the mapping of a/b/c/d to v/x/y/z and simply
// uses a temporary variable to keep the interpretation of a/b/c/d
// consistent. Whether this one or the previous one performs better
// probably depends on the compiler....
#define ROUND_USING_TEMP_VARS(w, r, k, v, x, y, z, func) \
{ \
a = a + func(b,c,d) + k + w; \
unsigned int temp = d; \
d = c; \
c = b; \
b = b + LEFTROTATE(a, r); \
a = temp; \
}
// Here, we pick which style of ROUND we use.
#define ROUND ROUND_USING_TEMP_VARS
//#define ROUND ROUND_INPLACE_VIA_SHIFT
/// NOTE: this really only allows a length up to 7 bytes, not 8, because
/// we need to start the padding in the first byte following the message,
/// and we only have two words to work with here....
/// It also assumes words[] has all zero bits except the chars of interest.
__host__ __device__ inline void md5_2words(unsigned int *words,
unsigned int len,
unsigned int *digest)
{
// For any block but the first one, these should be passed in, not
// initialized, but we are assuming we only operate on a single block.
unsigned int h0 = 0x67452301;
unsigned int h1 = 0xefcdab89;
unsigned int h2 = 0x98badcfe;
unsigned int h3 = 0x10325476;
unsigned int a = h0;
unsigned int b = h1;
unsigned int c = h2;
unsigned int d = h3;
unsigned int WL = len * 8;
unsigned int W0 = words[0];
unsigned int W1 = words[1];
switch (len)
{
case 0: W0 |= 0x00000080; break;
case 1: W0 |= 0x00008000; break;
case 2: W0 |= 0x00800000; break;
case 3: W0 |= 0x80000000; break;
case 4: W1 |= 0x00000080; break;
case 5: W1 |= 0x00008000; break;
case 6: W1 |= 0x00800000; break;
case 7: W1 |= 0x80000000; break;
}
// args: word data, per-round shift amt, constant, 4 vars, function macro
ROUND(W0, 7, 0xd76aa478, a, b, c, d, F);
ROUND(W1, 12, 0xe8c7b756, d, a, b, c, F);
ROUND(0, 17, 0x242070db, c, d, a, b, F);
ROUND(0, 22, 0xc1bdceee, b, c, d, a, F);
ROUND(0, 7, 0xf57c0faf, a, b, c, d, F);
ROUND(0, 12, 0x4787c62a, d, a, b, c, F);
ROUND(0, 17, 0xa8304613, c, d, a, b, F);
ROUND(0, 22, 0xfd469501, b, c, d, a, F);
ROUND(0, 7, 0x698098d8, a, b, c, d, F);
ROUND(0, 12, 0x8b44f7af, d, a, b, c, F);
ROUND(0, 17, 0xffff5bb1, c, d, a, b, F);
ROUND(0, 22, 0x895cd7be, b, c, d, a, F);
ROUND(0, 7, 0x6b901122, a, b, c, d, F);
ROUND(0, 12, 0xfd987193, d, a, b, c, F);
ROUND(WL, 17, 0xa679438e, c, d, a, b, F);
ROUND(0, 22, 0x49b40821, b, c, d, a, F);
ROUND(W1, 5, 0xf61e2562, a, b, c, d, G);
ROUND(0, 9, 0xc040b340, d, a, b, c, G);
ROUND(0, 14, 0x265e5a51, c, d, a, b, G);
ROUND(W0, 20, 0xe9b6c7aa, b, c, d, a, G);
ROUND(0, 5, 0xd62f105d, a, b, c, d, G);
ROUND(0, 9, 0x02441453, d, a, b, c, G);
ROUND(0, 14, 0xd8a1e681, c, d, a, b, G);
ROUND(0, 20, 0xe7d3fbc8, b, c, d, a, G);
ROUND(0, 5, 0x21e1cde6, a, b, c, d, G);
ROUND(WL, 9, 0xc33707d6, d, a, b, c, G);
ROUND(0, 14, 0xf4d50d87, c, d, a, b, G);
ROUND(0, 20, 0x455a14ed, b, c, d, a, G);
ROUND(0, 5, 0xa9e3e905, a, b, c, d, G);
ROUND(0, 9, 0xfcefa3f8, d, a, b, c, G);
ROUND(0, 14, 0x676f02d9, c, d, a, b, G);
ROUND(0, 20, 0x8d2a4c8a, b, c, d, a, G);
ROUND(0, 4, 0xfffa3942, a, b, c, d, H);
ROUND(0, 11, 0x8771f681, d, a, b, c, H);
ROUND(0, 16, 0x6d9d6122, c, d, a, b, H);
ROUND(WL, 23, 0xfde5380c, b, c, d, a, H);
ROUND(W1, 4, 0xa4beea44, a, b, c, d, H);
ROUND(0, 11, 0x4bdecfa9, d, a, b, c, H);
ROUND(0, 16, 0xf6bb4b60, c, d, a, b, H);
ROUND(0, 23, 0xbebfbc70, b, c, d, a, H);
ROUND(0, 4, 0x289b7ec6, a, b, c, d, H);
ROUND(W0, 11, 0xeaa127fa, d, a, b, c, H);
ROUND(0, 16, 0xd4ef3085, c, d, a, b, H);
ROUND(0, 23, 0x04881d05, b, c, d, a, H);
ROUND(0, 4, 0xd9d4d039, a, b, c, d, H);
ROUND(0, 11, 0xe6db99e5, d, a, b, c, H);
ROUND(0, 16, 0x1fa27cf8, c, d, a, b, H);
ROUND(0, 23, 0xc4ac5665, b, c, d, a, H);
ROUND(W0, 6, 0xf4292244, a, b, c, d, I);
ROUND(0, 10, 0x432aff97, d, a, b, c, I);
ROUND(WL, 15, 0xab9423a7, c, d, a, b, I);
ROUND(0, 21, 0xfc93a039, b, c, d, a, I);
ROUND(0, 6, 0x655b59c3, a, b, c, d, I);
ROUND(0, 10, 0x8f0ccc92, d, a, b, c, I);
ROUND(0, 15, 0xffeff47d, c, d, a, b, I);
ROUND(W1, 21, 0x85845dd1, b, c, d, a, I);
ROUND(0, 6, 0x6fa87e4f, a, b, c, d, I);
ROUND(0, 10, 0xfe2ce6e0, d, a, b, c, I);
ROUND(0, 15, 0xa3014314, c, d, a, b, I);
ROUND(0, 21, 0x4e0811a1, b, c, d, a, I);
ROUND(0, 6, 0xf7537e82, a, b, c, d, I);
ROUND(0, 10, 0xbd3af235, d, a, b, c, I);
ROUND(0, 15, 0x2ad7d2bb, c, d, a, b, I);
ROUND(0, 21, 0xeb86d391, b, c, d, a, I);
h0 += a;
h1 += b;
h2 += c;
h3 += d;
// write the final result out
digest[0] = h0;
digest[1] = h1;
digest[2] = h2;
digest[3] = h3;
}
// ****************************************************************************
// Function: FindKeyspaceSize
//
// Purpose:
/// Multiply out the byteLength by valsPerByte to find the
/// total size of the key space, with error checking.
//
// Arguments:
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
__host__ __device__ int FindKeyspaceSize(int byteLength, int valsPerByte)
{
int keyspace = 1;
for (int i=0; i<byteLength; ++i)
{
if (keyspace >= 0x7fffffff / valsPerByte)
{
// error, we're about to overflow a signed int
return -1;
}
keyspace *= valsPerByte;
}
return keyspace;
}
// ****************************************************************************
// Function: IndexToKey
//
// Purpose:
/// For a given index in the keyspace, find the actual key string
/// which is at that index.
//
// Arguments:
// index index in key space
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// vals output key string
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
__host__ __device__ void IndexToKey(unsigned int index,
int byteLength, int valsPerByte,
unsigned char vals[8])
{
// loop pointlessly unrolled to avoid CUDA compiler complaints
// about unaligned accesses (!?) on older compute capabilities
vals[0] = index % valsPerByte;
index /= valsPerByte;
vals[1] = index % valsPerByte;
index /= valsPerByte;
vals[2] = index % valsPerByte;
index /= valsPerByte;
vals[3] = index % valsPerByte;
index /= valsPerByte;
vals[4] = index % valsPerByte;
index /= valsPerByte;
vals[5] = index % valsPerByte;
index /= valsPerByte;
vals[6] = index % valsPerByte;
index /= valsPerByte;
vals[7] = index % valsPerByte;
index /= valsPerByte;
}
// ****************************************************************************
// Function: AsHex
//
// Purpose:
/// For a given key string, return the raw hex string for its bytes.
//
// Arguments:
// vals key string
// len length of key string
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
std::string AsHex(unsigned char *vals, int len)
{
ostringstream out;
char tmp[256];
for (int i=0; i<len; ++i)
{
sprintf(tmp, "%2.2X", vals[i]);
out << tmp;
}
return out.str();
}
// ****************************************************************************
// Function: addBenchmarkSpecOptions
//
// Purpose:
// Add benchmark specific options parsing
//
// Arguments:
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
//
// ****************************************************************************
void
addBenchmarkSpecOptions(OptionParser &op)
{
}
// ****************************************************************************
// Function: FindKeyWithDigest_CPU
//
// Purpose:
/// On the CPU, search the key space to find a key with the given digest.
//
// Arguments:
// searchDigest the digest to search for
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// foundIndex output - the index of the found key (if found)
// foundKey output - the string of the found key (if found)
// foundDigest output - the digest of the found key (if found)
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
double FindKeyWithDigest_CPU(const unsigned int searchDigest[4],
const int byteLength,
const int valsPerByte,
int *foundIndex,
unsigned char foundKey[8],
unsigned int foundDigest[4])
{
int timer = Timer::Start();
int keyspace = FindKeyspaceSize(byteLength, valsPerByte);
for (int i=0; i<keyspace; i += valsPerByte)
{
unsigned char key[8] = {0,0,0,0,0,0,0,0};
IndexToKey(i, byteLength, valsPerByte, key);
for (int j=0; j < valsPerByte; ++j)
{
unsigned int digest[4];
md5_2words((unsigned int*)key, byteLength, digest);
if (digest[0] == searchDigest[0] &&
digest[1] == searchDigest[1] &&
digest[2] == searchDigest[2] &&
digest[3] == searchDigest[3])
{
*foundIndex = i + j;
foundKey[0] = key[0];
foundKey[1] = key[1];
foundKey[2] = key[2];
foundKey[3] = key[3];
foundKey[4] = key[4];
foundKey[5] = key[5];
foundKey[6] = key[6];
foundKey[7] = key[7];
foundDigest[0] = digest[0];
foundDigest[1] = digest[1];
foundDigest[2] = digest[2];
foundDigest[3] = digest[3];
}
++key[0];
}
}
double runtime = Timer::Stop(timer, "md5 runtime");
return runtime;
}
// ****************************************************************************
// Function: FindKeyWithDigest_Kernel
//
// Purpose:
/// Within each thread of a GPU, search part of the key space
/// to find a key with the given digest.
//
// Arguments:
// searchDigest the digest to search for
// keyspace the size of the key space to search
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// foundIndex output - the index of the found key (if found)
// foundKey output - the string of the found key (if found)
// foundDigest output - the digest of the found key (if found)
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
__global__ void FindKeyWithDigest_Kernel(unsigned int searchDigest0,
unsigned int searchDigest1,
unsigned int searchDigest2,
unsigned int searchDigest3,
int keyspace,
int byteLength, int valsPerByte,
int *foundIndex,
unsigned char *foundKey,
unsigned int *foundDigest)
{
int threadid = blockIdx.x*blockDim.x + threadIdx.x;
int startindex = threadid * valsPerByte;
unsigned char key[8] = {0,0,0,0, 0,0,0,0};
IndexToKey(startindex, byteLength, valsPerByte, key);
for (int j=0; j < valsPerByte && startindex+j < keyspace; ++j)
{
unsigned int digest[4];
md5_2words((unsigned int*)key, byteLength, digest);
if (digest[0] == searchDigest0 &&
digest[1] == searchDigest1 &&
digest[2] == searchDigest2 &&
digest[3] == searchDigest3)
{
*foundIndex = startindex + j;
foundKey[0] = key[0];
foundKey[1] = key[1];
foundKey[2] = key[2];
foundKey[3] = key[3];
foundKey[4] = key[4];
foundKey[5] = key[5];
foundKey[6] = key[6];
foundKey[7] = key[7];
foundDigest[0] = digest[0];
foundDigest[1] = digest[1];
foundDigest[2] = digest[2];
foundDigest[3] = digest[3];
}
++key[0];
}
}
// ****************************************************************************
// Function: FindKeyWithDigest_GPU
//
// Purpose:
/// On the GPU, search the key space to find a key with the given digest.
//
// Arguments:
// searchDigest the digest to search for
// byteLength number of bytes in a key
// valsPerByte number of values each byte can take on
// foundIndex output - the index of the found key (if found)
// foundKey output - the string of the found key (if found)
// foundDigest output - the digest of the found key (if found)
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
// ****************************************************************************
double FindKeyWithDigest_GPU(const unsigned int searchDigest[4],
const int byteLength,
const int valsPerByte,
int *foundIndex,
unsigned char foundKey[8],
unsigned int foundDigest[4])
{
int keyspace = FindKeyspaceSize(byteLength, valsPerByte);
//
// allocate output buffers
//
int *d_foundIndex;
cudaMalloc((void**)&d_foundIndex, sizeof(int) * 1);
CHECK_CUDA_ERROR();
unsigned char *d_foundKey;
cudaMalloc((void**)&d_foundKey, 8);
CHECK_CUDA_ERROR();
unsigned int *d_foundDigest;
cudaMalloc((void**)&d_foundDigest, sizeof(unsigned int) * 4);
CHECK_CUDA_ERROR();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
CHECK_CUDA_ERROR();
//
// calculate work thread shape
//
int nthreads = 384;
size_t nblocks = ceil((double(keyspace) / double(valsPerByte)) / double(nthreads));
//
// run the kernel
//
cudaEventRecord(start, 0);
FindKeyWithDigest_Kernel<<<nblocks, nthreads>>>(searchDigest[0],
searchDigest[1],
searchDigest[2],
searchDigest[3],
keyspace,
byteLength, valsPerByte,
d_foundIndex,
d_foundKey,
d_foundDigest);
CHECK_CUDA_ERROR();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CHECK_CUDA_ERROR();
//
// get the timing/rate info
//
float millisec = 0;
cudaEventElapsedTime(&millisec, start, stop);
//
// read the (presumably) found key
//
cudaMemcpy(foundIndex, d_foundIndex, sizeof(int) * 1, cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
cudaMemcpy(foundKey, d_foundKey, 8, cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
cudaMemcpy(foundDigest, d_foundDigest, sizeof(unsigned int) * 4, cudaMemcpyDeviceToHost);
CHECK_CUDA_ERROR();
//
// free device memory
//
cudaFree(d_foundIndex);
CHECK_CUDA_ERROR();
cudaFree(d_foundKey);
CHECK_CUDA_ERROR();
cudaFree(d_foundDigest);
CHECK_CUDA_ERROR();
//
// return the runtime in seconds
//
return millisec / 1.e3;
}
// ****************************************************************************
// Function: RunBenchmark
//
// Purpose:
// Executes the MD5 Hash benchmark
//
// Arguments:
// resultDB: results from the benchmark are stored in this db
// op: the options parser / parameter database
//
// Returns: nothing
//
// Programmer: Jeremy Meredith
// Creation: July 23, 2014
//
// Modifications:
//
// ****************************************************************************
void
RunBenchmark(ResultDatabase &resultDB, OptionParser &op)
{
bool verbose = op.getOptionBool("verbose");
int size = op.getOptionInt("size");
if (size < 1 || size > 4)
{
cerr << "ERROR: Invalid size parameter\n";
return;
}
//
// Determine the shape/size of key space
//
const int sizes_byteLength[] = { 7, 5, 6, 5};
const int sizes_valsPerByte[] = {10, 36, 26, 70};
const int byteLength = sizes_byteLength[size-1];
const int valsPerByte = sizes_valsPerByte[size-1];
char atts[1024];
sprintf(atts, "%dx%d", byteLength, valsPerByte);
if (verbose)
cout << "Searching keys of length " << byteLength << " bytes "
<< "and " << valsPerByte << " values per byte" << endl;
const int keyspace = FindKeyspaceSize(byteLength, valsPerByte);
if (keyspace < 0)
{
cerr << "Error: more than 2^31 bits of entropy is unsupported.\n";
return;
}
if (byteLength > 7)
{
cerr << "Error: more than 7 byte key length is unsupported.\n";
return;
}
if (verbose)
cout << "|keyspace| = " << keyspace << " ("<<int(keyspace/1e6)<<"M)" << endl;
//
// Choose a random key from the keyspace, and calculate its hash.
//
//srandom(12345);
srandom(time(NULL));
int passes = op.getOptionInt("passes");
for (int pass = 0 ; pass < passes ; ++pass)
{
int randomIndex = random() % keyspace;;
unsigned char randomKey[8] = {0,0,0,0, 0,0,0,0};
unsigned int randomDigest[4];
IndexToKey(randomIndex, byteLength, valsPerByte, randomKey);
md5_2words((unsigned int*)randomKey, byteLength, randomDigest);
if (verbose)
{
cout << endl;
cout << "--- pass " << pass << " ---" << endl;
cout << "Looking for random key:" << endl;
cout << " randomIndex = " << randomIndex << endl;
cout << " randomKey = 0x" << AsHex(randomKey, 8/*byteLength*/) << endl;
cout << " randomDigest= " << AsHex((unsigned char*)randomDigest, 16) << endl;
}
//
// Use the GPU to brute force search the keyspace for this key.
//
unsigned int foundDigest[4] = {0,0,0,0};
int foundIndex = -1;
unsigned char foundKey[8] = {0,0,0,0, 0,0,0,0};
double t; // in seconds
if (false)
{
t = FindKeyWithDigest_CPU(randomDigest, byteLength, valsPerByte,
&foundIndex, foundKey, foundDigest);
}
else
{
t = FindKeyWithDigest_GPU(randomDigest, byteLength, valsPerByte,
&foundIndex, foundKey, foundDigest);
}
//
// Calculate the rate and add it to the results
//
double rate = (double(keyspace) / double(t)) / 1.e9;
if (verbose)
{
cout << "time = " << t << " sec, rate = " << rate << " GHash/sec\n";
}
//
// Double check everything matches (index, key, hash).
//
if (foundIndex != randomIndex)
{
cerr << "\nERROR: mismatch in key index found.\n";
rate = FLT_MAX;
}
else if (foundKey[0] != randomKey[0] ||
foundKey[1] != randomKey[1] ||
foundKey[2] != randomKey[2] ||
foundKey[3] != randomKey[3] ||
foundKey[4] != randomKey[4] ||
foundKey[5] != randomKey[5] ||
foundKey[6] != randomKey[6] ||
foundKey[7] != randomKey[7])
{
cerr << "\nERROR: mismatch in key value found.\n";
rate = FLT_MAX;
}
else if (foundDigest[0] != randomDigest[0] ||
foundDigest[1] != randomDigest[1] ||
foundDigest[2] != randomDigest[2] ||
foundDigest[3] != randomDigest[3])
{
cerr << "\nERROR: mismatch in digest of key.\n";
rate = FLT_MAX;
}
else
{
if (verbose)
cout << endl << "Successfully found match (index, key, hash):" << endl;
}
//
// Add the calculated performancethe results
//
resultDB.AddResult("MD5Hash", atts, "GHash/s", rate);
if (verbose)
{
cout << " foundIndex = " << foundIndex << endl;
cout << " foundKey = 0x" << AsHex(foundKey, 8/*byteLength*/) << endl;
cout << " foundDigest = " << AsHex((unsigned char*)foundDigest, 16) << endl;
cout << endl;
}
}
return;
}
|
0c5c66612aa60ae110e70a5a9c55700e5b3e1bea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h"
#include "test_common.h"
using namespace Eigen;
using Matrix5d = Matrix<double, 5, 5>;
__host__ __device__ void eigenValues(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
#if TEST_DEBUG
printf("Matrix(0,0): %f\n", (*m)(0, 0));
printf("Matrix(1,1): %f\n", (*m)(1, 1));
printf("Matrix(2,2): %f\n", (*m)(2, 2));
#endif
SelfAdjointEigenSolver<Matrix3d> es;
es.computeDirect(*m);
(*ret) = es.eigenvalues();
return;
}
__global__ void kernel(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
eigenValues(m, ret);
}
__global__ void kernelInverse3x3(Matrix3d *in, Matrix3d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse4x4(Matrix4d *in, Matrix4d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse5x5(Matrix5d *in, Matrix5d *out) { (*out) = in->inverse(); }
template <typename M1, typename M2, typename M3>
__global__ void kernelMultiply(M1 *J, M2 *C, M3 *result) {
// Map<M3> res(result->data());
#if TEST_DEBUG
printf("*** GPU IN ***\n");
#endif
printIt(J);
printIt(C);
// res.noalias() = (*J) * (*C);
// printIt(&res);
(*result) = (*J) * (*C);
#if TEST_DEBUG
printf("*** GPU OUT ***\n");
#endif
return;
}
template <int row1, int col1, int row2, int col2>
void testMultiply() {
std::cout << "TEST MULTIPLY" << std::endl;
std::cout << "Product of type " << row1 << "x" << col1 << " * " << row2 << "x" << col2 << std::endl;
Eigen::Matrix<double, row1, col1> J;
fillMatrix(J);
Eigen::Matrix<double, row2, col2> C;
fillMatrix(C);
Eigen::Matrix<double, row1, col2> multiply_result = J * C;
#if TEST_DEBUG
std::cout << "Input J:" << std::endl;
printIt(&J);
std::cout << "Input C:" << std::endl;
printIt(&C);
std::cout << "Output:" << std::endl;
printIt(&multiply_result);
#endif
// GPU
Eigen::Matrix<double, row1, col1> *JGPU = nullptr;
Eigen::Matrix<double, row2, col2> *CGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPUret = new Eigen::Matrix<double, row1, col2>();
hipMalloc((void **)&JGPU, sizeof(Eigen::Matrix<double, row1, col1>));
hipMalloc((void **)&CGPU, sizeof(Eigen::Matrix<double, row2, col2>));
hipMalloc((void **)&multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>));
hipMemcpy(JGPU, &J, sizeof(Eigen::Matrix<double, row1, col1>), hipMemcpyHostToDevice);
hipMemcpy(CGPU, &C, sizeof(Eigen::Matrix<double, row2, col2>), hipMemcpyHostToDevice);
hipMemcpy(multiply_resultGPU, &multiply_result, sizeof(Eigen::Matrix<double, row1, col2>), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelMultiply), dim3(1), dim3(1), 0, 0, JGPU, CGPU, multiply_resultGPU);
hipDeviceSynchronize();
hipMemcpy(
multiply_resultGPUret, multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>), hipMemcpyDeviceToHost);
printIt(multiply_resultGPUret);
assert(isEqualFuzzy(multiply_result, (*multiply_resultGPUret)));
}
void testInverse3x3() {
std::cout << "TEST INVERSE 3x3" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d m_inv = m.inverse();
Matrix3d *mGPU = nullptr;
Matrix3d *mGPUret = nullptr;
Matrix3d *mCPUret = new Matrix3d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
hipMalloc((void **)&mGPU, sizeof(Matrix3d));
hipMalloc((void **)&mGPUret, sizeof(Matrix3d));
hipMemcpy(mGPU, &m, sizeof(Matrix3d), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelInverse3x3), dim3(1), dim3(1), 0, 0, mGPU, mGPUret);
hipDeviceSynchronize();
hipMemcpy(mCPUret, mGPUret, sizeof(Matrix3d), hipMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse4x4() {
std::cout << "TEST INVERSE 4x4" << std::endl;
Matrix4d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix4d m_inv = m.inverse();
Matrix4d *mGPU = nullptr;
Matrix4d *mGPUret = nullptr;
Matrix4d *mCPUret = new Matrix4d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
hipMalloc((void **)&mGPU, sizeof(Matrix4d));
hipMalloc((void **)&mGPUret, sizeof(Matrix4d));
hipMemcpy(mGPU, &m, sizeof(Matrix4d), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelInverse4x4), dim3(1), dim3(1), 0, 0, mGPU, mGPUret);
hipDeviceSynchronize();
hipMemcpy(mCPUret, mGPUret, sizeof(Matrix4d), hipMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse5x5() {
std::cout << "TEST INVERSE 5x5" << std::endl;
Matrix5d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix5d m_inv = m.inverse();
Matrix5d *mGPU = nullptr;
Matrix5d *mGPUret = nullptr;
Matrix5d *mCPUret = new Matrix5d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
hipMalloc((void **)&mGPU, sizeof(Matrix5d));
hipMalloc((void **)&mGPUret, sizeof(Matrix5d));
hipMemcpy(mGPU, &m, sizeof(Matrix5d), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernelInverse5x5), dim3(1), dim3(1), 0, 0, mGPU, mGPUret);
hipDeviceSynchronize();
hipMemcpy(mCPUret, mGPUret, sizeof(Matrix5d), hipMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testEigenvalues() {
std::cout << "TEST EIGENVALUES" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d *m_gpu = nullptr;
Matrix3d *mgpudebug = new Matrix3d();
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret1 =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret_gpu = nullptr;
eigenValues(&m, ret);
#if TEST_DEBUG
std::cout << "Generated Matrix M 3x3:\n" << m << std::endl;
std::cout << "The eigenvalues of M are:" << std::endl << (*ret) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
hipMalloc((void **)&m_gpu, sizeof(Matrix3d));
hipMalloc((void **)&ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType));
hipMemcpy(m_gpu, &m, sizeof(Matrix3d), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel), dim3(1), dim3(1), 0, 0, m_gpu, ret_gpu);
hipDeviceSynchronize();
hipMemcpy(mgpudebug, m_gpu, sizeof(Matrix3d), hipMemcpyDeviceToHost);
hipMemcpy(ret1, ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType), hipMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "GPU Generated Matrix M 3x3:\n" << (*mgpudebug) << std::endl;
std::cout << "GPU The eigenvalues of M are:" << std::endl << (*ret1) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
assert(isEqualFuzzy(*ret, *ret1));
}
int main(int argc, char *argv[]) {
exitSansCUDADevices();
testEigenvalues();
testInverse3x3();
testInverse4x4();
testInverse5x5();
testMultiply<1, 2, 2, 1>();
testMultiply<1, 2, 2, 2>();
testMultiply<1, 2, 2, 3>();
testMultiply<1, 2, 2, 4>();
testMultiply<1, 2, 2, 5>();
testMultiply<2, 1, 1, 2>();
testMultiply<2, 1, 1, 3>();
testMultiply<2, 1, 1, 4>();
testMultiply<2, 1, 1, 5>();
testMultiply<2, 2, 2, 2>();
testMultiply<2, 3, 3, 1>();
testMultiply<2, 3, 3, 2>();
testMultiply<2, 3, 3, 4>();
testMultiply<2, 3, 3, 5>();
testMultiply<3, 2, 2, 3>();
testMultiply<2, 3, 3, 3>(); // DOES NOT COMPILE W/O PATCHING EIGEN
testMultiply<3, 3, 3, 3>();
testMultiply<8, 8, 8, 8>();
testMultiply<3, 4, 4, 3>();
testMultiply<2, 4, 4, 2>();
testMultiply<3, 4, 4, 2>(); // DOES NOT COMPILE W/O PATCHING EIGEN
return 0;
}
| 0c5c66612aa60ae110e70a5a9c55700e5b3e1bea.cu | #include <iostream>
#include <Eigen/Core>
#include <Eigen/Eigenvalues>
#include "HeterogeneousCore/CUDAUtilities/interface/cudaCheck.h"
#include "HeterogeneousCore/CUDAUtilities/interface/exitSansCUDADevices.h"
#include "test_common.h"
using namespace Eigen;
using Matrix5d = Matrix<double, 5, 5>;
__host__ __device__ void eigenValues(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
#if TEST_DEBUG
printf("Matrix(0,0): %f\n", (*m)(0, 0));
printf("Matrix(1,1): %f\n", (*m)(1, 1));
printf("Matrix(2,2): %f\n", (*m)(2, 2));
#endif
SelfAdjointEigenSolver<Matrix3d> es;
es.computeDirect(*m);
(*ret) = es.eigenvalues();
return;
}
__global__ void kernel(Matrix3d *m, Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret) {
eigenValues(m, ret);
}
__global__ void kernelInverse3x3(Matrix3d *in, Matrix3d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse4x4(Matrix4d *in, Matrix4d *out) { (*out) = in->inverse(); }
__global__ void kernelInverse5x5(Matrix5d *in, Matrix5d *out) { (*out) = in->inverse(); }
template <typename M1, typename M2, typename M3>
__global__ void kernelMultiply(M1 *J, M2 *C, M3 *result) {
// Map<M3> res(result->data());
#if TEST_DEBUG
printf("*** GPU IN ***\n");
#endif
printIt(J);
printIt(C);
// res.noalias() = (*J) * (*C);
// printIt(&res);
(*result) = (*J) * (*C);
#if TEST_DEBUG
printf("*** GPU OUT ***\n");
#endif
return;
}
template <int row1, int col1, int row2, int col2>
void testMultiply() {
std::cout << "TEST MULTIPLY" << std::endl;
std::cout << "Product of type " << row1 << "x" << col1 << " * " << row2 << "x" << col2 << std::endl;
Eigen::Matrix<double, row1, col1> J;
fillMatrix(J);
Eigen::Matrix<double, row2, col2> C;
fillMatrix(C);
Eigen::Matrix<double, row1, col2> multiply_result = J * C;
#if TEST_DEBUG
std::cout << "Input J:" << std::endl;
printIt(&J);
std::cout << "Input C:" << std::endl;
printIt(&C);
std::cout << "Output:" << std::endl;
printIt(&multiply_result);
#endif
// GPU
Eigen::Matrix<double, row1, col1> *JGPU = nullptr;
Eigen::Matrix<double, row2, col2> *CGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPU = nullptr;
Eigen::Matrix<double, row1, col2> *multiply_resultGPUret = new Eigen::Matrix<double, row1, col2>();
cudaMalloc((void **)&JGPU, sizeof(Eigen::Matrix<double, row1, col1>));
cudaMalloc((void **)&CGPU, sizeof(Eigen::Matrix<double, row2, col2>));
cudaMalloc((void **)&multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>));
cudaMemcpy(JGPU, &J, sizeof(Eigen::Matrix<double, row1, col1>), cudaMemcpyHostToDevice);
cudaMemcpy(CGPU, &C, sizeof(Eigen::Matrix<double, row2, col2>), cudaMemcpyHostToDevice);
cudaMemcpy(multiply_resultGPU, &multiply_result, sizeof(Eigen::Matrix<double, row1, col2>), cudaMemcpyHostToDevice);
kernelMultiply<<<1, 1>>>(JGPU, CGPU, multiply_resultGPU);
cudaDeviceSynchronize();
cudaMemcpy(
multiply_resultGPUret, multiply_resultGPU, sizeof(Eigen::Matrix<double, row1, col2>), cudaMemcpyDeviceToHost);
printIt(multiply_resultGPUret);
assert(isEqualFuzzy(multiply_result, (*multiply_resultGPUret)));
}
void testInverse3x3() {
std::cout << "TEST INVERSE 3x3" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d m_inv = m.inverse();
Matrix3d *mGPU = nullptr;
Matrix3d *mGPUret = nullptr;
Matrix3d *mCPUret = new Matrix3d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaMalloc((void **)&mGPU, sizeof(Matrix3d));
cudaMalloc((void **)&mGPUret, sizeof(Matrix3d));
cudaMemcpy(mGPU, &m, sizeof(Matrix3d), cudaMemcpyHostToDevice);
kernelInverse3x3<<<1, 1>>>(mGPU, mGPUret);
cudaDeviceSynchronize();
cudaMemcpy(mCPUret, mGPUret, sizeof(Matrix3d), cudaMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse4x4() {
std::cout << "TEST INVERSE 4x4" << std::endl;
Matrix4d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix4d m_inv = m.inverse();
Matrix4d *mGPU = nullptr;
Matrix4d *mGPUret = nullptr;
Matrix4d *mCPUret = new Matrix4d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaMalloc((void **)&mGPU, sizeof(Matrix4d));
cudaMalloc((void **)&mGPUret, sizeof(Matrix4d));
cudaMemcpy(mGPU, &m, sizeof(Matrix4d), cudaMemcpyHostToDevice);
kernelInverse4x4<<<1, 1>>>(mGPU, mGPUret);
cudaDeviceSynchronize();
cudaMemcpy(mCPUret, mGPUret, sizeof(Matrix4d), cudaMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testInverse5x5() {
std::cout << "TEST INVERSE 5x5" << std::endl;
Matrix5d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix5d m_inv = m.inverse();
Matrix5d *mGPU = nullptr;
Matrix5d *mGPUret = nullptr;
Matrix5d *mCPUret = new Matrix5d();
#if TEST_DEBUG
std::cout << "Here is the matrix m:" << std::endl << m << std::endl;
std::cout << "Its inverse is:" << std::endl << m.inverse() << std::endl;
#endif
cudaMalloc((void **)&mGPU, sizeof(Matrix5d));
cudaMalloc((void **)&mGPUret, sizeof(Matrix5d));
cudaMemcpy(mGPU, &m, sizeof(Matrix5d), cudaMemcpyHostToDevice);
kernelInverse5x5<<<1, 1>>>(mGPU, mGPUret);
cudaDeviceSynchronize();
cudaMemcpy(mCPUret, mGPUret, sizeof(Matrix5d), cudaMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "Its GPU inverse is:" << std::endl << (*mCPUret) << std::endl;
#endif
assert(isEqualFuzzy(m_inv, *mCPUret));
}
void testEigenvalues() {
std::cout << "TEST EIGENVALUES" << std::endl;
Matrix3d m;
fillMatrix(m);
m += m.transpose().eval();
Matrix3d *m_gpu = nullptr;
Matrix3d *mgpudebug = new Matrix3d();
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret1 =
new Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType;
Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType *ret_gpu = nullptr;
eigenValues(&m, ret);
#if TEST_DEBUG
std::cout << "Generated Matrix M 3x3:\n" << m << std::endl;
std::cout << "The eigenvalues of M are:" << std::endl << (*ret) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
cudaMalloc((void **)&m_gpu, sizeof(Matrix3d));
cudaMalloc((void **)&ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType));
cudaMemcpy(m_gpu, &m, sizeof(Matrix3d), cudaMemcpyHostToDevice);
kernel<<<1, 1>>>(m_gpu, ret_gpu);
cudaDeviceSynchronize();
cudaMemcpy(mgpudebug, m_gpu, sizeof(Matrix3d), cudaMemcpyDeviceToHost);
cudaMemcpy(ret1, ret_gpu, sizeof(Eigen::SelfAdjointEigenSolver<Matrix3d>::RealVectorType), cudaMemcpyDeviceToHost);
#if TEST_DEBUG
std::cout << "GPU Generated Matrix M 3x3:\n" << (*mgpudebug) << std::endl;
std::cout << "GPU The eigenvalues of M are:" << std::endl << (*ret1) << std::endl;
std::cout << "*************************\n\n" << std::endl;
#endif
assert(isEqualFuzzy(*ret, *ret1));
}
int main(int argc, char *argv[]) {
exitSansCUDADevices();
testEigenvalues();
testInverse3x3();
testInverse4x4();
testInverse5x5();
testMultiply<1, 2, 2, 1>();
testMultiply<1, 2, 2, 2>();
testMultiply<1, 2, 2, 3>();
testMultiply<1, 2, 2, 4>();
testMultiply<1, 2, 2, 5>();
testMultiply<2, 1, 1, 2>();
testMultiply<2, 1, 1, 3>();
testMultiply<2, 1, 1, 4>();
testMultiply<2, 1, 1, 5>();
testMultiply<2, 2, 2, 2>();
testMultiply<2, 3, 3, 1>();
testMultiply<2, 3, 3, 2>();
testMultiply<2, 3, 3, 4>();
testMultiply<2, 3, 3, 5>();
testMultiply<3, 2, 2, 3>();
testMultiply<2, 3, 3, 3>(); // DOES NOT COMPILE W/O PATCHING EIGEN
testMultiply<3, 3, 3, 3>();
testMultiply<8, 8, 8, 8>();
testMultiply<3, 4, 4, 3>();
testMultiply<2, 4, 4, 2>();
testMultiply<3, 4, 4, 2>(); // DOES NOT COMPILE W/O PATCHING EIGEN
return 0;
}
|
50c47380b717edec0ecc8e71a6b3d354879c9560.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "core_cuda.hpp"
#include "point.hpp"
#include "state_update_cuda.hpp"
//#include "state_update.hpp"
#include "flux_residual_cuda.hpp"
#include "utils.hpp"
#include<thrust/reduce.h>
#include <thrust/system/hip/execution_policy.h>
__device__ inline void q_var_derivatives_get_sum_delq_innerloop(Point* globaldata, int idx, int conn, double weights, double delta_x, double delta_y, double qi_tilde[4], double qk_tilde[4], double sig_del_x_del_q[4], double sig_del_y_del_q[4]);
template <class Type>
bool isNan(Type var)
{
if(var!=var) return true;
return false;
}
double calculateTheta(Config configData)
{
return (configData.core.aoa * (M_PI)/180.0);
}
void getInitialPrimitive(Config configData, double primal[4])
{
primal[0] = configData.core.rho_inf;
double mach = configData.core.mach;
double machcos = mach * cos(calculateTheta(configData));
double machsin = mach * sin(calculateTheta(configData));
primal[1] = machcos;
primal[2] = machsin;
primal[3] = configData.core.pr_inf;
}
void placeNormals(Point* globaldata, int idx, Config configData, long long interior, long long wall, long long outer)
{
int flag = globaldata[idx].flag_1;
if (flag == wall || flag == outer)
{
xy_tuple currpt = getxy(globaldata[idx]);
int leftpt_tmp = globaldata[idx].left;
leftpt_tmp = leftpt_tmp - 1; // To account for indexing
xy_tuple leftpt = getxy(globaldata[leftpt_tmp]);
int rightpt_tmp = globaldata[idx].right;
rightpt_tmp = rightpt_tmp - 1; // To account for indexing
xy_tuple rightpt = getxy(globaldata[rightpt_tmp]);
xy_tuple normals = calculateNormals(leftpt, rightpt, std::get<0>(currpt), std::get<1>(currpt));
setNormals(globaldata, idx, normals);
}
else if (flag == interior)
setNormals(globaldata, idx, std::make_tuple(0.0, 1.0));
else
cout<<"Illegal Point Type"<<endl;
}
xy_tuple calculateNormals(xy_tuple left, xy_tuple right, double mx, double my)
{
double lx = std::get<0>(left);
double ly = std::get<1>(left);
double rx = std::get<0>(right);
double ry = std::get<1>(right);
double nx1 = my - ly;
double nx2 = ry - my;
double ny1 = mx - lx;
double ny2 = rx - mx;
double nx = 0.5*(nx1 + nx2);
double ny = 0.5*(ny1 + ny2);
double det = hypot(nx, ny);
nx = -nx/det;
ny = ny/det;
return std::make_tuple(nx, ny);
}
void calculateConnectivity(Point* globaldata, int idx)
{
Point ptInterest = globaldata[idx];
double currx = ptInterest.x;
double curry = ptInterest.y;
double nx = ptInterest.nx;
double ny = ptInterest.ny;
int flag = ptInterest.flag_1;
double tx = ny;
double ty = -nx;
int xpos_nbhs = 0;
int xneg_nbhs = 0;
int ypos_nbhs = 0;
int yneg_nbhs = 0;
int xpos_conn[20] = {0};
int ypos_conn[20] = {0};
int xneg_conn[20] = {0};
int yneg_conn[20] = {0};
// /* Start Connectivity Generation */
for (int i=0; i<20; i++)
{
int itm = ptInterest.conn[i];
if (itm==0)
{
//cout<<"\n Breaking"<<endl;
break;
}
itm = itm -1; // to account for indexing
//cout<< "\n Unbroken \n";
double itmx = globaldata[itm].x;
double itmy = globaldata[itm].y;
double delta_x = itmx - currx;
double delta_y = itmy - curry;
double delta_s = delta_x*tx + delta_y*ty;
double delta_n = delta_x*nx + delta_y*ny;
itm = itm + 1; // to reaccount for indexing when we add the point below xpos_conn[xpos_nbhs] = itm;
if(delta_s <= 0.0)
{
xpos_conn[xpos_nbhs] = itm;
xpos_nbhs+=1;
}
if(delta_s >= 0.0)
{
xneg_conn[xneg_nbhs] = itm;
xneg_nbhs+=1;
}
if(flag==1)
{
if(delta_n<=0.0)
{
ypos_conn[ypos_nbhs] = itm;
ypos_nbhs+=1;
}
if(delta_n>=0.0)
{
yneg_conn[yneg_nbhs] = itm;
yneg_nbhs+=1;
}
}
else if (flag==0)
{
yneg_conn[yneg_nbhs] = itm;
yneg_nbhs+=1;
}
else if (flag==2)
{
ypos_conn[ypos_nbhs] = itm;
ypos_nbhs+=1;
}
}
/* End Connectivity Generation */
for(int i=0; i<20; i++)
{
globaldata[idx].xpos_conn[i] = xpos_conn[i];
globaldata[idx].xneg_conn[i] = xneg_conn[i];
globaldata[idx].ypos_conn[i] = ypos_conn[i];
globaldata[idx].yneg_conn[i] = yneg_conn[i];
}
globaldata[idx].xpos_nbhs = xpos_nbhs;
globaldata[idx].xneg_nbhs = xneg_nbhs;
globaldata[idx].ypos_nbhs = ypos_nbhs;
globaldata[idx].yneg_nbhs = yneg_nbhs;
}
void fpi_solver(int iter, Point* globaldata_d, Config configData, double* res_old_d, double* res_sqr_d, int numPoints, TempqDers* tempdq_d, hipStream_t& stream, hipGraph_t& graph, hipGraphExec_t& instance, bool& graphCreated, double res_old[1], double* res_sqr, unsigned int mem_size_C, unsigned int mem_size_D)
{
int block_size = configData.core.threadsperblock;
dim3 threads(block_size);
dim3 grid((numPoints / threads.x +1));
if (iter == 0)
cout<<"\nStarting FuncDelta"<<endl;
int rks = configData.core.rks;
double cfl = configData.core.cfl;
double power = configData.core.power;
// if(!graphCreated)
// {
// hipStreamBeginCapture(stream, hipStreamCaptureModeGlobal);
hipLaunchKernelGGL(( call_func_delta_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, numPoints, cfl, threads);
// hipStreamEndCapture(stream, &graph);
// hipGraphInstantiate(&instance, graph, NULL, NULL, 0);
// graphCreated = true;
// }
// hipGraphLaunch(instance, stream);
// hipStreamSynchronize(stream);
for(int rk=0; rk<rks; rk++)
{
call_rem_fpi_solver_cuda(globaldata_d, numPoints, power, tempdq_d, block_size, configData, res_old_d, res_sqr_d, iter, rk, rks, threads, grid, stream, graph, instance, graphCreated, res_old, res_sqr, mem_size_C, mem_size_D);
}
}
__global__ void q_variables_cuda(Point* globaldata, int numPoints, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
double q_result[4] = {0};
if(idx < numPoints)
{
double rho = globaldata[idx].prim[0];
double u1 = globaldata[idx].prim[1];
double u2 = globaldata[idx].prim[2];
double pr = globaldata[idx].prim[3];
double beta = 0.5 * (rho/pr);
double two_times_beta = 2.0 * beta;
q_result[0] = log(rho) + log(beta) * 2.5 - (beta * ((u1 * u1) + (u2 * u2)));
q_result[1] = (two_times_beta * u1);
q_result[2] = (two_times_beta * u2);
q_result[3] = -two_times_beta;
for(int i=0; i<4; i++)
{
globaldata[idx].q[i] = q_result[i];
}
}
}
void call_rem_fpi_solver_cuda(Point* globaldata_d, int numPoints, double power, TempqDers* tempdq_d, int block_size, Config configData, double* res_old_d, double* res_sqr_d, int iter, int rk, int rks, dim3 threads, dim3 grid, hipStream_t& stream, hipGraph_t& graph, hipGraphExec_t& instance, bool& graphCreated, double res_old[1], double* res_sqr, unsigned int mem_size_C, unsigned int mem_size_D)
{
if(!graphCreated)
{
hipStreamBeginCapture(stream, hipStreamCaptureModeGlobal);
// Make the kernel calls
hipLaunchKernelGGL(( q_variables_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, numPoints, threads);
hipLaunchKernelGGL(( q_var_derivatives_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, numPoints, power, threads);
// hipStreamEndCapture(stream, &graph);
// hipGraphInstantiate(&instance, graph, NULL, NULL, 0);
// graphCreated = true;
// }
// hipGraphLaunch(instance, stream);
// hipStreamSynchronize(stream);
for(int inner_iters=0; inner_iters<2; inner_iters++) // Basically, three inner iters
{
hipLaunchKernelGGL(( q_var_derivatives_innerloop_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, numPoints, power, tempdq_d, threads);
hipLaunchKernelGGL(( q_var_derivatives_update_innerloop_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, tempdq_d, threads);
}
hipLaunchKernelGGL(( cal_flux_residual_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, numPoints, configData, threads);
hipStreamEndCapture(stream, &graph);
hipGraphInstantiate(&instance, graph, NULL, NULL, 0);
graphCreated = true;
}
hipGraphLaunch(instance, stream);
hipStreamSynchronize(stream);
// Remember that steam capture cannot handle device synchronize commands
checkCudaErrors(hipMemcpyAsync(res_old_d, res_old, mem_size_C, hipMemcpyHostToDevice, stream));
checkCudaErrors(hipMemcpyAsync(res_sqr_d, res_sqr, mem_size_D, hipMemcpyHostToDevice, stream));
hipLaunchKernelGGL(( state_update_cuda), dim3(grid), dim3(threads), 0, stream, globaldata_d, numPoints, configData, iter, res_old_d, rk, rks, res_sqr_d, threads);
hipDeviceSynchronize();
checkCudaErrors(hipMemcpyAsync(res_old, res_old_d, mem_size_C, hipMemcpyDeviceToHost, stream));
//checkCudaErrors(hipMemcpyAsync(res_sqr, res_sqr_d, mem_size_D, hipMemcpyDeviceToHost, stream));
// double sig_res_sqr = 0.0;
// for(int i=0; i<numPoints; i++)
// sig_res_sqr+=res_sqr[i];
double sig_res_sqr = thrust::reduce(thrust::hip::par.on(stream), res_sqr_d, res_sqr_d + numPoints, (double) 0.0, thrust::plus<double>());
double res_new = sqrt(sig_res_sqr)/numPoints;
double residue = 0.0;
if(iter<=1)
{
res_old[0] = res_new;
residue = 0.0;
}
else
residue = log10(res_new/res_old[0]);
if(rk == rks-1)
cout<<std::fixed<<std::setprecision(17)<<"\n Residue: "<<iter+1<<" "<<residue<<endl;
}
__global__ void q_var_derivatives_cuda(Point* globaldata, int numPoints, double power, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
double sig_del_x_del_q[4], sig_del_y_del_q[4], min_q[4], max_q[4];
if(idx < numPoints)
{
double x_i = globaldata[idx].x;
double y_i = globaldata[idx].y;
double sig_del_x_sqr = 0.0;
double sig_del_y_sqr = 0.0;
double sig_del_x_del_y = 0.0;
#pragma unroll
for(int i=0; i<4; i++)
{
sig_del_x_del_q[i] = 0.0;
sig_del_y_del_q[i] = 0.0;
}
#pragma unroll
for(int i=0; i<4; i++)
{
max_q[i] = globaldata[idx].q[i];
min_q[i] = globaldata[idx].q[i];
}
#pragma unroll
for(int i=0; i<20; i++)
{
int conn = globaldata[idx].conn[i];
if(conn == 0)
{
break;
}
conn = conn - 1; // To account for the indexing difference
double x_k = globaldata[conn].x;
double y_k = globaldata[conn].y;
double delta_x = x_k - x_i;
double delta_y = y_k - y_i;
double dist = hypot(delta_x, delta_y);
double weights = pow(dist, power);
sig_del_x_sqr += ((delta_x * delta_x) * weights);
sig_del_y_sqr += ((delta_y * delta_y) * weights);
sig_del_x_del_y += ((delta_x * delta_y) * weights);
#pragma unroll
for(int iter=0; iter<4; iter++)
{
double intermediate_var = weights * (globaldata[conn].q[iter] - globaldata[idx].q[iter]);
sig_del_x_del_q[iter] = sig_del_x_del_q[iter] + (delta_x * intermediate_var);
sig_del_y_del_q[iter] = sig_del_y_del_q[iter] + (delta_y * intermediate_var);
}
#pragma unroll
for(int j=0; j<4; j++)
{
if (max_q[j] < globaldata[conn].q[j])
{
max_q[j] = globaldata[conn].q[j];
}
if(min_q[j] > globaldata[conn].q[j])
{
min_q[j] = globaldata[conn].q[j];
}
}
}
#pragma unroll
for(int i=0; i<4; i++)
{
globaldata[idx].max_q[i] = max_q[i];
globaldata[idx].min_q[i] = min_q[i];
}
double det = (sig_del_x_sqr * sig_del_y_sqr) - (sig_del_x_del_y * sig_del_x_del_y);
double one_by_det = 1.0/det;
#pragma unroll
for(int iter=0; iter<4; iter++)
{
globaldata[idx].dq1[iter] = one_by_det * (sig_del_x_del_q[iter] * sig_del_y_sqr - sig_del_y_del_q[iter] * sig_del_x_del_y);
globaldata[idx].dq2[iter] = one_by_det * (sig_del_y_del_q[iter] * sig_del_x_sqr - sig_del_x_del_q[iter] * sig_del_x_del_y);
}
}
}
__global__ void q_var_derivatives_innerloop_cuda(Point* globaldata, int numPoints, double power, TempqDers* tempdq, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
double sig_del_x_del_q[4], sig_del_y_del_q[4], qi_tilde[4] ={0}, qk_tilde[4] = {0};
if(idx <numPoints)
{
double x_i = globaldata[idx].x;
double y_i = globaldata[idx].y;
double sig_del_x_sqr = 0.0;
double sig_del_y_sqr = 0.0;
double sig_del_x_del_y = 0.0;
#pragma unroll
for(int i=0; i<4; i++)
{
sig_del_x_del_q[i] = 0.0;
sig_del_y_del_q[i] = 0.0;
}
#pragma unroll
for(int i=0; i<20; i++)
{
int conn = globaldata[idx].conn[i];
if(conn == 0) break;
conn = conn - 1;
double x_k = globaldata[conn].x;
double y_k = globaldata[conn].y;
double delta_x = x_k - x_i;
double delta_y = y_k - y_i;
double dist = hypot(delta_x, delta_y);
double weights = pow(dist, power);
sig_del_x_sqr += ((delta_x * delta_x) * weights);
sig_del_y_sqr += ((delta_y * delta_y) * weights);
sig_del_x_del_y += ((delta_x * delta_y) * weights);
q_var_derivatives_get_sum_delq_innerloop(globaldata, idx, conn, weights, delta_x, delta_y, qi_tilde, qk_tilde, sig_del_x_del_q, sig_del_y_del_q);
}
double det = (sig_del_x_sqr * sig_del_y_sqr) - (sig_del_x_del_y * sig_del_x_del_y);
double one_by_det = 1.0/det;
#pragma unroll
for(int iter =0; iter<4; iter++)
{
tempdq[idx].dq1[iter] = one_by_det * (sig_del_x_del_q[iter] * sig_del_y_sqr - sig_del_y_del_q[iter] * sig_del_x_del_y);
tempdq[idx].dq2[iter] = one_by_det * (sig_del_y_del_q[iter] * sig_del_x_sqr - sig_del_x_del_q[iter] * sig_del_x_del_y);
}
}
}
__device__ inline void q_var_derivatives_get_sum_delq_innerloop(Point* globaldata, int idx, int conn, double weights, double delta_x, double delta_y, double qi_tilde[4], double qk_tilde[4], double sig_del_x_del_q[4], double sig_del_y_del_q[4])
{
#pragma unroll
for(int iter=0; iter<4; iter++)
{
qi_tilde[iter] = globaldata[idx].q[iter] - 0.5 * (delta_x * globaldata[idx].dq1[iter] + delta_y * globaldata[idx].dq2[iter]);
qk_tilde[iter] = globaldata[conn].q[iter] - 0.5 * (delta_x * globaldata[conn].dq1[iter] + delta_y * globaldata[conn].dq2[iter]);
double intermediate_var = weights * (qk_tilde[iter] - qi_tilde[iter]);
sig_del_x_del_q[iter] += (delta_x * intermediate_var);
sig_del_y_del_q[iter] += (delta_y * intermediate_var);
}
}
__global__ void q_var_derivatives_update_innerloop_cuda(Point* globaldata, TempqDers* tempdq, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
#pragma unroll
for(int iter=0; iter<4; iter++)
{
globaldata[idx].dq1[iter] = tempdq[idx].dq1[iter];
globaldata[idx].dq2[iter] = tempdq[idx].dq2[iter];
}
}
| 50c47380b717edec0ecc8e71a6b3d354879c9560.cu | #include "core_cuda.hpp"
#include "point.hpp"
#include "state_update_cuda.hpp"
//#include "state_update.hpp"
#include "flux_residual_cuda.hpp"
#include "utils.hpp"
#include<thrust/reduce.h>
#include <thrust/system/cuda/execution_policy.h>
__device__ inline void q_var_derivatives_get_sum_delq_innerloop(Point* globaldata, int idx, int conn, double weights, double delta_x, double delta_y, double qi_tilde[4], double qk_tilde[4], double sig_del_x_del_q[4], double sig_del_y_del_q[4]);
template <class Type>
bool isNan(Type var)
{
if(var!=var) return true;
return false;
}
double calculateTheta(Config configData)
{
return (configData.core.aoa * (M_PI)/180.0);
}
void getInitialPrimitive(Config configData, double primal[4])
{
primal[0] = configData.core.rho_inf;
double mach = configData.core.mach;
double machcos = mach * cos(calculateTheta(configData));
double machsin = mach * sin(calculateTheta(configData));
primal[1] = machcos;
primal[2] = machsin;
primal[3] = configData.core.pr_inf;
}
void placeNormals(Point* globaldata, int idx, Config configData, long long interior, long long wall, long long outer)
{
int flag = globaldata[idx].flag_1;
if (flag == wall || flag == outer)
{
xy_tuple currpt = getxy(globaldata[idx]);
int leftpt_tmp = globaldata[idx].left;
leftpt_tmp = leftpt_tmp - 1; // To account for indexing
xy_tuple leftpt = getxy(globaldata[leftpt_tmp]);
int rightpt_tmp = globaldata[idx].right;
rightpt_tmp = rightpt_tmp - 1; // To account for indexing
xy_tuple rightpt = getxy(globaldata[rightpt_tmp]);
xy_tuple normals = calculateNormals(leftpt, rightpt, std::get<0>(currpt), std::get<1>(currpt));
setNormals(globaldata, idx, normals);
}
else if (flag == interior)
setNormals(globaldata, idx, std::make_tuple(0.0, 1.0));
else
cout<<"Illegal Point Type"<<endl;
}
xy_tuple calculateNormals(xy_tuple left, xy_tuple right, double mx, double my)
{
double lx = std::get<0>(left);
double ly = std::get<1>(left);
double rx = std::get<0>(right);
double ry = std::get<1>(right);
double nx1 = my - ly;
double nx2 = ry - my;
double ny1 = mx - lx;
double ny2 = rx - mx;
double nx = 0.5*(nx1 + nx2);
double ny = 0.5*(ny1 + ny2);
double det = hypot(nx, ny);
nx = -nx/det;
ny = ny/det;
return std::make_tuple(nx, ny);
}
void calculateConnectivity(Point* globaldata, int idx)
{
Point ptInterest = globaldata[idx];
double currx = ptInterest.x;
double curry = ptInterest.y;
double nx = ptInterest.nx;
double ny = ptInterest.ny;
int flag = ptInterest.flag_1;
double tx = ny;
double ty = -nx;
int xpos_nbhs = 0;
int xneg_nbhs = 0;
int ypos_nbhs = 0;
int yneg_nbhs = 0;
int xpos_conn[20] = {0};
int ypos_conn[20] = {0};
int xneg_conn[20] = {0};
int yneg_conn[20] = {0};
// /* Start Connectivity Generation */
for (int i=0; i<20; i++)
{
int itm = ptInterest.conn[i];
if (itm==0)
{
//cout<<"\n Breaking"<<endl;
break;
}
itm = itm -1; // to account for indexing
//cout<< "\n Unbroken \n";
double itmx = globaldata[itm].x;
double itmy = globaldata[itm].y;
double delta_x = itmx - currx;
double delta_y = itmy - curry;
double delta_s = delta_x*tx + delta_y*ty;
double delta_n = delta_x*nx + delta_y*ny;
itm = itm + 1; // to reaccount for indexing when we add the point below xpos_conn[xpos_nbhs] = itm;
if(delta_s <= 0.0)
{
xpos_conn[xpos_nbhs] = itm;
xpos_nbhs+=1;
}
if(delta_s >= 0.0)
{
xneg_conn[xneg_nbhs] = itm;
xneg_nbhs+=1;
}
if(flag==1)
{
if(delta_n<=0.0)
{
ypos_conn[ypos_nbhs] = itm;
ypos_nbhs+=1;
}
if(delta_n>=0.0)
{
yneg_conn[yneg_nbhs] = itm;
yneg_nbhs+=1;
}
}
else if (flag==0)
{
yneg_conn[yneg_nbhs] = itm;
yneg_nbhs+=1;
}
else if (flag==2)
{
ypos_conn[ypos_nbhs] = itm;
ypos_nbhs+=1;
}
}
/* End Connectivity Generation */
for(int i=0; i<20; i++)
{
globaldata[idx].xpos_conn[i] = xpos_conn[i];
globaldata[idx].xneg_conn[i] = xneg_conn[i];
globaldata[idx].ypos_conn[i] = ypos_conn[i];
globaldata[idx].yneg_conn[i] = yneg_conn[i];
}
globaldata[idx].xpos_nbhs = xpos_nbhs;
globaldata[idx].xneg_nbhs = xneg_nbhs;
globaldata[idx].ypos_nbhs = ypos_nbhs;
globaldata[idx].yneg_nbhs = yneg_nbhs;
}
void fpi_solver(int iter, Point* globaldata_d, Config configData, double* res_old_d, double* res_sqr_d, int numPoints, TempqDers* tempdq_d, cudaStream_t& stream, cudaGraph_t& graph, cudaGraphExec_t& instance, bool& graphCreated, double res_old[1], double* res_sqr, unsigned int mem_size_C, unsigned int mem_size_D)
{
int block_size = configData.core.threadsperblock;
dim3 threads(block_size);
dim3 grid((numPoints / threads.x +1));
if (iter == 0)
cout<<"\nStarting FuncDelta"<<endl;
int rks = configData.core.rks;
double cfl = configData.core.cfl;
double power = configData.core.power;
// if(!graphCreated)
// {
// cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
call_func_delta_cuda<<<grid, threads, 0, stream>>>(globaldata_d, numPoints, cfl, threads);
// cudaStreamEndCapture(stream, &graph);
// cudaGraphInstantiate(&instance, graph, NULL, NULL, 0);
// graphCreated = true;
// }
// cudaGraphLaunch(instance, stream);
// cudaStreamSynchronize(stream);
for(int rk=0; rk<rks; rk++)
{
call_rem_fpi_solver_cuda(globaldata_d, numPoints, power, tempdq_d, block_size, configData, res_old_d, res_sqr_d, iter, rk, rks, threads, grid, stream, graph, instance, graphCreated, res_old, res_sqr, mem_size_C, mem_size_D);
}
}
__global__ void q_variables_cuda(Point* globaldata, int numPoints, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
double q_result[4] = {0};
if(idx < numPoints)
{
double rho = globaldata[idx].prim[0];
double u1 = globaldata[idx].prim[1];
double u2 = globaldata[idx].prim[2];
double pr = globaldata[idx].prim[3];
double beta = 0.5 * (rho/pr);
double two_times_beta = 2.0 * beta;
q_result[0] = log(rho) + log(beta) * 2.5 - (beta * ((u1 * u1) + (u2 * u2)));
q_result[1] = (two_times_beta * u1);
q_result[2] = (two_times_beta * u2);
q_result[3] = -two_times_beta;
for(int i=0; i<4; i++)
{
globaldata[idx].q[i] = q_result[i];
}
}
}
void call_rem_fpi_solver_cuda(Point* globaldata_d, int numPoints, double power, TempqDers* tempdq_d, int block_size, Config configData, double* res_old_d, double* res_sqr_d, int iter, int rk, int rks, dim3 threads, dim3 grid, cudaStream_t& stream, cudaGraph_t& graph, cudaGraphExec_t& instance, bool& graphCreated, double res_old[1], double* res_sqr, unsigned int mem_size_C, unsigned int mem_size_D)
{
if(!graphCreated)
{
cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
// Make the kernel calls
q_variables_cuda<<<grid, threads, 0, stream>>>(globaldata_d, numPoints, threads);
q_var_derivatives_cuda<<<grid, threads, 0, stream>>>(globaldata_d, numPoints, power, threads);
// cudaStreamEndCapture(stream, &graph);
// cudaGraphInstantiate(&instance, graph, NULL, NULL, 0);
// graphCreated = true;
// }
// cudaGraphLaunch(instance, stream);
// cudaStreamSynchronize(stream);
for(int inner_iters=0; inner_iters<2; inner_iters++) // Basically, three inner iters
{
q_var_derivatives_innerloop_cuda<<<grid, threads, 0, stream>>>(globaldata_d, numPoints, power, tempdq_d, threads);
q_var_derivatives_update_innerloop_cuda<<<grid, threads, 0, stream>>>(globaldata_d, tempdq_d, threads);
}
cal_flux_residual_cuda<<<grid, threads, 0, stream>>>(globaldata_d, numPoints, configData, threads);
cudaStreamEndCapture(stream, &graph);
cudaGraphInstantiate(&instance, graph, NULL, NULL, 0);
graphCreated = true;
}
cudaGraphLaunch(instance, stream);
cudaStreamSynchronize(stream);
// Remember that steam capture cannot handle device synchronize commands
checkCudaErrors(cudaMemcpyAsync(res_old_d, res_old, mem_size_C, cudaMemcpyHostToDevice, stream));
checkCudaErrors(cudaMemcpyAsync(res_sqr_d, res_sqr, mem_size_D, cudaMemcpyHostToDevice, stream));
state_update_cuda<<<grid, threads, 0, stream>>>(globaldata_d, numPoints, configData, iter, res_old_d, rk, rks, res_sqr_d, threads);
cudaDeviceSynchronize();
checkCudaErrors(cudaMemcpyAsync(res_old, res_old_d, mem_size_C, cudaMemcpyDeviceToHost, stream));
//checkCudaErrors(cudaMemcpyAsync(res_sqr, res_sqr_d, mem_size_D, cudaMemcpyDeviceToHost, stream));
// double sig_res_sqr = 0.0;
// for(int i=0; i<numPoints; i++)
// sig_res_sqr+=res_sqr[i];
double sig_res_sqr = thrust::reduce(thrust::cuda::par.on(stream), res_sqr_d, res_sqr_d + numPoints, (double) 0.0, thrust::plus<double>());
double res_new = sqrt(sig_res_sqr)/numPoints;
double residue = 0.0;
if(iter<=1)
{
res_old[0] = res_new;
residue = 0.0;
}
else
residue = log10(res_new/res_old[0]);
if(rk == rks-1)
cout<<std::fixed<<std::setprecision(17)<<"\n Residue: "<<iter+1<<" "<<residue<<endl;
}
__global__ void q_var_derivatives_cuda(Point* globaldata, int numPoints, double power, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
double sig_del_x_del_q[4], sig_del_y_del_q[4], min_q[4], max_q[4];
if(idx < numPoints)
{
double x_i = globaldata[idx].x;
double y_i = globaldata[idx].y;
double sig_del_x_sqr = 0.0;
double sig_del_y_sqr = 0.0;
double sig_del_x_del_y = 0.0;
#pragma unroll
for(int i=0; i<4; i++)
{
sig_del_x_del_q[i] = 0.0;
sig_del_y_del_q[i] = 0.0;
}
#pragma unroll
for(int i=0; i<4; i++)
{
max_q[i] = globaldata[idx].q[i];
min_q[i] = globaldata[idx].q[i];
}
#pragma unroll
for(int i=0; i<20; i++)
{
int conn = globaldata[idx].conn[i];
if(conn == 0)
{
break;
}
conn = conn - 1; // To account for the indexing difference
double x_k = globaldata[conn].x;
double y_k = globaldata[conn].y;
double delta_x = x_k - x_i;
double delta_y = y_k - y_i;
double dist = hypot(delta_x, delta_y);
double weights = pow(dist, power);
sig_del_x_sqr += ((delta_x * delta_x) * weights);
sig_del_y_sqr += ((delta_y * delta_y) * weights);
sig_del_x_del_y += ((delta_x * delta_y) * weights);
#pragma unroll
for(int iter=0; iter<4; iter++)
{
double intermediate_var = weights * (globaldata[conn].q[iter] - globaldata[idx].q[iter]);
sig_del_x_del_q[iter] = sig_del_x_del_q[iter] + (delta_x * intermediate_var);
sig_del_y_del_q[iter] = sig_del_y_del_q[iter] + (delta_y * intermediate_var);
}
#pragma unroll
for(int j=0; j<4; j++)
{
if (max_q[j] < globaldata[conn].q[j])
{
max_q[j] = globaldata[conn].q[j];
}
if(min_q[j] > globaldata[conn].q[j])
{
min_q[j] = globaldata[conn].q[j];
}
}
}
#pragma unroll
for(int i=0; i<4; i++)
{
globaldata[idx].max_q[i] = max_q[i];
globaldata[idx].min_q[i] = min_q[i];
}
double det = (sig_del_x_sqr * sig_del_y_sqr) - (sig_del_x_del_y * sig_del_x_del_y);
double one_by_det = 1.0/det;
#pragma unroll
for(int iter=0; iter<4; iter++)
{
globaldata[idx].dq1[iter] = one_by_det * (sig_del_x_del_q[iter] * sig_del_y_sqr - sig_del_y_del_q[iter] * sig_del_x_del_y);
globaldata[idx].dq2[iter] = one_by_det * (sig_del_y_del_q[iter] * sig_del_x_sqr - sig_del_x_del_q[iter] * sig_del_x_del_y);
}
}
}
__global__ void q_var_derivatives_innerloop_cuda(Point* globaldata, int numPoints, double power, TempqDers* tempdq, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
double sig_del_x_del_q[4], sig_del_y_del_q[4], qi_tilde[4] ={0}, qk_tilde[4] = {0};
if(idx <numPoints)
{
double x_i = globaldata[idx].x;
double y_i = globaldata[idx].y;
double sig_del_x_sqr = 0.0;
double sig_del_y_sqr = 0.0;
double sig_del_x_del_y = 0.0;
#pragma unroll
for(int i=0; i<4; i++)
{
sig_del_x_del_q[i] = 0.0;
sig_del_y_del_q[i] = 0.0;
}
#pragma unroll
for(int i=0; i<20; i++)
{
int conn = globaldata[idx].conn[i];
if(conn == 0) break;
conn = conn - 1;
double x_k = globaldata[conn].x;
double y_k = globaldata[conn].y;
double delta_x = x_k - x_i;
double delta_y = y_k - y_i;
double dist = hypot(delta_x, delta_y);
double weights = pow(dist, power);
sig_del_x_sqr += ((delta_x * delta_x) * weights);
sig_del_y_sqr += ((delta_y * delta_y) * weights);
sig_del_x_del_y += ((delta_x * delta_y) * weights);
q_var_derivatives_get_sum_delq_innerloop(globaldata, idx, conn, weights, delta_x, delta_y, qi_tilde, qk_tilde, sig_del_x_del_q, sig_del_y_del_q);
}
double det = (sig_del_x_sqr * sig_del_y_sqr) - (sig_del_x_del_y * sig_del_x_del_y);
double one_by_det = 1.0/det;
#pragma unroll
for(int iter =0; iter<4; iter++)
{
tempdq[idx].dq1[iter] = one_by_det * (sig_del_x_del_q[iter] * sig_del_y_sqr - sig_del_y_del_q[iter] * sig_del_x_del_y);
tempdq[idx].dq2[iter] = one_by_det * (sig_del_y_del_q[iter] * sig_del_x_sqr - sig_del_x_del_q[iter] * sig_del_x_del_y);
}
}
}
__device__ inline void q_var_derivatives_get_sum_delq_innerloop(Point* globaldata, int idx, int conn, double weights, double delta_x, double delta_y, double qi_tilde[4], double qk_tilde[4], double sig_del_x_del_q[4], double sig_del_y_del_q[4])
{
#pragma unroll
for(int iter=0; iter<4; iter++)
{
qi_tilde[iter] = globaldata[idx].q[iter] - 0.5 * (delta_x * globaldata[idx].dq1[iter] + delta_y * globaldata[idx].dq2[iter]);
qk_tilde[iter] = globaldata[conn].q[iter] - 0.5 * (delta_x * globaldata[conn].dq1[iter] + delta_y * globaldata[conn].dq2[iter]);
double intermediate_var = weights * (qk_tilde[iter] - qi_tilde[iter]);
sig_del_x_del_q[iter] += (delta_x * intermediate_var);
sig_del_y_del_q[iter] += (delta_y * intermediate_var);
}
}
__global__ void q_var_derivatives_update_innerloop_cuda(Point* globaldata, TempqDers* tempdq, dim3 thread_dim)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx*thread_dim.x + tx;
#pragma unroll
for(int iter=0; iter<4; iter++)
{
globaldata[idx].dq1[iter] = tempdq[idx].dq1[iter];
globaldata[idx].dq2[iter] = tempdq[idx].dq2[iter];
}
}
|
4f08207d434614e93eb46b40443e72184465d2b8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <hip/hip_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 30
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,hipReadModeElementType> texmem1;
texture<float,1,hipReadModeElementType> texmem2;
texture<float,1,hipReadModeElementType> texmem3;
texture<float,1,hipReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling hipGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
hipError_t err = hipGetLastError();
if (hipSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 += log2(sum);
Value2 += ConstArray1[(tid+i)%THREADS_PER_BLOCK] + log2(I1[(i+j)%THREADS_PER_BLOCK]);
sum+=tex1Dfetch(texmem2,tid*j);
sum+=tex1Dfetch(texmem3,tid*j);
Value1 += exp(sum);
Value2 += ConstArray2[(tid)%THREADS_PER_BLOCK] + log2(I2[(i+j)%THREADS_PER_BLOCK]);
sum+=tex1Dfetch(texmem4,tid*j);
}
A[tid*2] = sum;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
hipMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
hipMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
hipMalloc((void**) &device_texture1, size1);
hipMalloc((void**) &device_texture2, size1);
hipMalloc((void**) &device_texture3, size1);
hipMalloc((void**) &device_texture4, size1);
hipMemcpy(device_texture1, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture2, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture3, host_texture1, size1, hipMemcpyHostToDevice);
hipMemcpy(device_texture4, host_texture1, size1, hipMemcpyHostToDevice);
hipBindTexture(0, texmem1, device_texture1, size1);
hipBindTexture(0, texmem2, device_texture2, size1);
hipBindTexture(0, texmem3, device_texture3, size1);
hipBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( hipMalloc((void**)&d_A1, size1) );
checkCudaErrors( hipMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( hipMemcpy(d_A1, h_A1, size1, hipMemcpyHostToDevice) );
checkCudaErrors( hipMemcpy(d_A2, h_A2, size1, hipMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( hipDeviceSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
hipLaunchKernelGGL(( PowerKernal1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A1, d_A2, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( hipDeviceSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( hipDeviceSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( hipDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
hipFree(d_A1);
if (d_A2)
hipFree(d_A2);
if (d_A3)
hipFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
| 4f08207d434614e93eb46b40443e72184465d2b8.cu | #include <stdio.h>
#include <stdlib.h>
#include <cutil.h>
// Includes
#include <stdio.h>
// includes, project
#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
//#include <shrQATest.h>
//#include <shrUtils.h>
// includes CUDA
#include <cuda_runtime.h>
//NI DAQ
#include "../include/ContAcq-IntClk.h"
#define THREADS_PER_BLOCK 256
#define NUM_OF_BLOCKS 60
#define ITERATIONS 30
// Variables
bool noprompt = false;
unsigned int my_timer;
texture<float,1,cudaReadModeElementType> texmem1;
texture<float,1,cudaReadModeElementType> texmem2;
texture<float,1,cudaReadModeElementType> texmem3;
texture<float,1,cudaReadModeElementType> texmem4;
__constant__ float ConstArray1[THREADS_PER_BLOCK];
__constant__ float ConstArray2[THREADS_PER_BLOCK];
// Functions
void CleanupResources(void);
void RandomInit_int(unsigned*, int);
void RandomInit_fp(float*, int);
void ParseArguments(int, char**);
////////////////////////////////////////////////////////////////////////////////
// These are CUDA Helper functions
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err){
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err){
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// end of CUDA Helper Functions
// Device code
__global__ void PowerKernal1(float *A, float *B, int N)
{
int tid = blockIdx.x*blockIdx.x + threadIdx.x;
float Value1=0;
float Value2=0;
__device__ __shared__ float I1[THREADS_PER_BLOCK];
__device__ __shared__ float I2[THREADS_PER_BLOCK];
I1[tid%THREADS_PER_BLOCK] = A[tid];
I2[tid%THREADS_PER_BLOCK] = B[tid];
__syncthreads();
float sum = 0.0;
if(tid < N){
for(unsigned i=0; i<ITERATIONS; ++i){
sum = tex1Dfetch(texmem1,tid)+B[tid];
for(unsigned j=1; j<=2; ++j){
sum+=tex1Dfetch(texmem1,tid*j);
Value1 += log2(sum);
Value2 += ConstArray1[(tid+i)%THREADS_PER_BLOCK] + log2(I1[(i+j)%THREADS_PER_BLOCK]);
sum+=tex1Dfetch(texmem2,tid*j);
sum+=tex1Dfetch(texmem3,tid*j);
Value1 += exp(sum);
Value2 += ConstArray2[(tid)%THREADS_PER_BLOCK] + log2(I2[(i+j)%THREADS_PER_BLOCK]);
sum+=tex1Dfetch(texmem4,tid*j);
}
A[tid*2] = sum;
B[tid] = A[tid*2]+A[tid];
}
}
}
__global__ void PowerKernalEmpty(unsigned* C, int N)
{
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
//Do Some Computation
__syncthreads();
// Excessive Mod/Div Operations
for(unsigned long k=0; k<ITERATIONS*(blockDim.x + 299);k++) {
//Value1=(I1)+k;
//Value2=(I2)+k;
//Value3=(Value2)+k;
//Value2=(Value1)+k;
/*
__asm volatile (
"B0: bra.uni B1;\n\t"
"B1: bra.uni B2;\n\t"
"B2: bra.uni B3;\n\t"
"B3: bra.uni B4;\n\t"
"B4: bra.uni B5;\n\t"
"B5: bra.uni B6;\n\t"
"B6: bra.uni B7;\n\t"
"B7: bra.uni B8;\n\t"
"B8: bra.uni B9;\n\t"
"B9: bra.uni B10;\n\t"
"B10: bra.uni B11;\n\t"
"B11: bra.uni B12;\n\t"
"B12: bra.uni B13;\n\t"
"B13: bra.uni B14;\n\t"
"B14: bra.uni B15;\n\t"
"B15: bra.uni B16;\n\t"
"B16: bra.uni B17;\n\t"
"B17: bra.uni B18;\n\t"
"B18: bra.uni B19;\n\t"
"B19: bra.uni B20;\n\t"
"B20: bra.uni B21;\n\t"
"B21: bra.uni B22;\n\t"
"B22: bra.uni B23;\n\t"
"B23: bra.uni B24;\n\t"
"B24: bra.uni B25;\n\t"
"B25: bra.uni B26;\n\t"
"B26: bra.uni B27;\n\t"
"B27: bra.uni B28;\n\t"
"B28: bra.uni B29;\n\t"
"B29: bra.uni B30;\n\t"
"B30: bra.uni B31;\n\t"
"B31: bra.uni LOOP;\n\t"
"LOOP:"
);
*/
}
C[id]=id;
__syncthreads();
}
// Host code
float *h_A1, *h_A2, *h_A3;
float *d_A1, *d_A2, *d_A3;
int main()
{
printf("Power Microbenchmarks\n");
float array1[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array1[i] = rand() / RAND_MAX;
}
float array2[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK;i++){
srand(time(0));
array2[i] = rand() / RAND_MAX;
}
cudaMemcpyToSymbol("ConstArray1", array1, sizeof(float) * THREADS_PER_BLOCK );
cudaMemcpyToSymbol("ConstArray2", array2, sizeof(float) * THREADS_PER_BLOCK );
int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS*2;
// Allocate input vectors h_A and h_B in host memory
size_t size1 = N * sizeof(float);
h_A1 = (float*)malloc(size1);
if (h_A1 == 0) CleanupResources();
h_A2 = (float*)malloc(size1);
if (h_A2 == 0) CleanupResources();
float *host_texture1 = (float*) malloc(size1);
for (int i=0; i< N; i++) {
host_texture1[i] = i;
}
float *device_texture1;
float *device_texture2;
float *device_texture3;
float *device_texture4;
cudaMalloc((void**) &device_texture1, size1);
cudaMalloc((void**) &device_texture2, size1);
cudaMalloc((void**) &device_texture3, size1);
cudaMalloc((void**) &device_texture4, size1);
cudaMemcpy(device_texture1, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture2, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture3, host_texture1, size1, cudaMemcpyHostToDevice);
cudaMemcpy(device_texture4, host_texture1, size1, cudaMemcpyHostToDevice);
cudaBindTexture(0, texmem1, device_texture1, size1);
cudaBindTexture(0, texmem2, device_texture2, size1);
cudaBindTexture(0, texmem3, device_texture3, size1);
cudaBindTexture(0, texmem4, device_texture4, size1);
dim3 dimGrid2(1,1);
dim3 dimBlock2(1,1);
// Initialize input vectors
RandomInit_fp(h_A1, N);
RandomInit_fp(h_A2, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A1, size1) );
checkCudaErrors( cudaMalloc((void**)&d_A2, size1) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A1, h_A1, size1, cudaMemcpyHostToDevice) );
checkCudaErrors( cudaMemcpy(d_A2, h_A2, size1, cudaMemcpyHostToDevice) );
dim3 dimGrid(NUM_OF_BLOCKS,1);
dim3 dimBlock(THREADS_PER_BLOCK,1);
CUT_SAFE_CALL(cutCreateTimer(&my_timer));
TaskHandle taskhandle = LaunchDAQ();
CUT_SAFE_CALL(cutStartTimer(my_timer));
CUDA_SAFE_CALL( cudaThreadSynchronize() );
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
PowerKernal1<<<dimGrid,dimBlock>>>(d_A1, d_A2, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
//PowerKernalEmpty<<<dimGrid2,dimBlock2>>>(d_A3, N);
CUDA_SAFE_CALL( cudaThreadSynchronize() );
printf("execution time = %f\n", cutGetTimerValue(my_timer));
getLastCudaError("kernel launch failure");
CUDA_SAFE_CALL( cudaThreadSynchronize() );
CUT_SAFE_CALL(cutStopTimer(my_timer));
TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
printf("execution time = %f\n", cutGetTimerValue(my_timer));
CUT_SAFE_CALL(cutDeleteTimer(my_timer));
#ifdef _DEBUG
checkCudaErrors( cudaDeviceSynchronize() );
#endif
// Copy result from device memory to host memory
CleanupResources();
return 0;
}
void CleanupResources(void)
{
// Free device memory
if (d_A1)
cudaFree(d_A1);
if (d_A2)
cudaFree(d_A2);
if (d_A3)
cudaFree(d_A3);
// Free host memory
if (h_A1)
free(h_A1);
if (h_A2)
free(h_A2);
if (h_A3)
free(h_A3);
}
// Allocates an array with random float entries.
void RandomInit_int(float* data, int n)
{
for (int i = 0; i < n; ++i){
srand((unsigned)time(0));
data[i] = rand() / RAND_MAX;
}
}
void RandomInit_fp(float* data, int n)
{
for (int i = 0; i < n; ++i){
data[i] = rand() / RAND_MAX;
}
}
|
4b9acff1ce3f154ad8c908f01e615e726f616262.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_cc.cu
*
* @brief Simple test driver program for connected component.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
#include "EvqueueManager.h"
// CC includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/filter/kernel.cuh>
// Boost includes for CPU CC reference algorithms
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId>
struct CcList
{
VertexId root;
unsigned int histogram;
CcList(VertexId root, unsigned int histogram) :
root(root), histogram(histogram) {}
};
template<typename CcList>
bool CCCompare(
CcList elem1,
CcList elem2)
{
return elem1.histogram > elem2.histogram;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"\ntest_cc <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick=<0|1>]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code. Default: 0.\n"
);
}
/**
* @brief Displays the CC result (i.e., number of components)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] comp_ids Host-side vector to store computed component id for each node
* @param[in] nodes Number of nodes in the graph
* @param[in] num_components Number of connected components in the graph
* @param[in] roots Host-side vector stores the root for each node in the graph
* @param[in] histogram Histogram of connected component ids
*/
template<typename VertexId, typename SizeT>
void DisplaySolution(
VertexId *comp_ids,
SizeT nodes,
unsigned int num_components,
VertexId *roots,
unsigned int *histogram)
{
typedef CcList<VertexId> CcListType;
printf("Number of Components: %d\n", num_components);
if (nodes <= 40)
{
PrintFormatArray (comp_ids, nodes, "%4d", 10);
/*
printf("[");
for (VertexId i = 0; i < nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(comp_ids[i]);
printf(",");
printf(" ");
}
printf("]\n");
*/
}
else
{
//sort the components by size
CcListType *cclist =
(CcListType*)malloc(sizeof(CcListType) * num_components);
for (int i = 0; i < num_components; ++i)
{
cclist[i].root = roots[i];
cclist[i].histogram = histogram[i];
}
std::stable_sort(
cclist, cclist + num_components, CCCompare<CcListType>);
// Print out at most top 10 largest components
int top = (num_components < 10) ? num_components : 10;
printf("Top %d largest components:\n", top);
for (int i = 0; i < top; ++i)
{
printf("CC ID: %d, CC Root: %d, CC Size: %d\n",
i, cclist[i].root, cclist[i].histogram);
}
free(cclist);
}
}
/**
* Performance/Evaluation statistics
*/
/******************************************************************************
* CC Testing Routines
*****************************************************************************/
/**
* @brief CPU-based reference CC algorithm using Boost Graph Library
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] row_offsets Host-side vector stores row offsets for each node in the graph
* @param[in] column_indices Host-side vector stores column indices for each edge in the graph
* @param[in] num_nodes
* @param[out] labels Host-side vector to store the component id for each node in the graph
*
* \return Number of connected components in the graph
*/
template<typename VertexId, typename SizeT>
unsigned int RefCPUCC(
SizeT *row_offsets, VertexId *column_indices, int num_nodes, int *labels)
{
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS> Graph;
Graph G;
for (int i = 0; i < num_nodes; ++i)
{
for (int j = row_offsets[i]; j < row_offsets[i+1]; ++j)
{
add_edge(i, column_indices[j], G);
}
}
CpuTimer cpu_timer;
cpu_timer.Start();
int num_components = connected_components(G, &labels[0]);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU CC finished in %lf msec.\n", elapsed);
return num_components;
}
/**
* @brief Run tests for connected component algorithm
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] max_grid_size Maximum CTA occupancy for CC kernels
* @param[in] iterations Number of iterations for running the test
* @param[in] num_gpus Number of GPUs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
int max_grid_size,
int iterations,
int num_gpus)
{
typedef CCProblem<
VertexId,
SizeT,
Value,
true> Problem; //use double buffer for edgemap and vertexmap.
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check = (g_quick) ? NULL : reference_component_ids;
unsigned int ref_num_components = 0;
// Allocate CC enactor map
CCEnactor<INSTRUMENT> cc_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"CC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU CC
//
if (reference_check != NULL && !g_quick)
{
printf("Computing reference value ...\n");
ref_num_components = RefCPUCC(
graph.row_offsets,
graph.column_indices,
graph.nodes,
reference_check);
printf("\n");
}
long long total_queued = 0;
VertexId num_iter = 0;
double avg_duty = 0.0;
// Perform CC
GpuTimer gpu_timer;
float elapsed = 0.0f;
iterations = 5000;
struct timeval start, end;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(cc_enactor.GetFrontierType()),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
if(iter%/*50*/7==0)
{
gettimeofday(&start, NULL);
}
util::GRError(
cc_enactor.template Enact<Problem>(csr_problem, max_grid_size),
"CC Problem Enact Failed", __FILE__, __LINE__);
if(iter%/*50*/7==/*49*/6)
{
gettimeofday(&end, NULL);
std::cerr << "[CC] ---- " << (end.tv_sec - start.tv_sec)*1000000+(end.tv_usec - start.tv_usec) << std::endl;
}
EvqueueSynch();
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
//printf("iteration %d, time: %.5f\n", iter+1, gpu_timer.ElapsedMillis());
}
elapsed /= iterations;
cc_enactor.GetStatistics(total_queued, num_iter, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
// Validity
if (!g_quick)
{
if (ref_num_components == csr_problem->num_components)
printf("CORRECT.\n");
else
printf("INCORRECT. Ref Component Count: %d,"
"GPU Computed Component Count: %d\n",
ref_num_components, csr_problem->num_components);
}
// Compute size and root of each component
VertexId *h_roots = new VertexId[csr_problem->num_components];
unsigned int *h_histograms = new unsigned int[csr_problem->num_components];
csr_problem->ComputeCCHistogram(h_component_ids, h_roots, h_histograms);
// Display Solution
DisplaySolution(h_component_ids, graph.nodes,
csr_problem->num_components,
h_roots, h_histograms);
if (h_roots) delete[] h_roots;
if (h_histograms) delete[] h_histograms;
printf("[GPU Connected Component] finished.\n");
printf(" elapsed: %.4f ms\n", elapsed);
printf(" num_iterations: %d\n", num_iter);
// Cleanup
if (csr_problem) delete csr_problem;
if (reference_component_ids) free(reference_component_ids);
if (h_component_ids) free(h_component_ids);
hipDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args)
{
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // Maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
int iterations = 1; // Default run test times
g_quick = 1; // Whether or not to skip ref validation
instrumented = args.CheckCmdLineFlag("instrumented");
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("quick", g_quick);
args.GetCmdLineArgument("iteration-num", iterations);
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
max_grid_size,
iterations,
num_gpus);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
max_grid_size,
iterations,
num_gpus);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
EvqueueCreate(4);
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
DeviceInit(args);
hipSetDeviceFlags(hipDeviceMapHost);
// Parse graph-contruction params
g_undirected = false; //Does not make undirected graph
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
fflush(stdout);
// Run tests
RunTests(csr, args);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
EvqueueDestroy();
return 0;
}
| 4b9acff1ce3f154ad8c908f01e615e726f616262.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_cc.cu
*
* @brief Simple test driver program for connected component.
*/
#include <stdio.h>
#include <string>
#include <deque>
#include <vector>
#include <iostream>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph construction utils
#include <gunrock/graphio/market.cuh>
#include "EvqueueManager.h"
// CC includes
#include <gunrock/app/cc/cc_enactor.cuh>
#include <gunrock/app/cc/cc_problem.cuh>
#include <gunrock/app/cc/cc_functor.cuh>
// Operator includes
#include <gunrock/oprtr/filter/kernel.cuh>
// Boost includes for CPU CC reference algorithms
#include <boost/config.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/connected_components.hpp>
using namespace gunrock;
using namespace gunrock::util;
using namespace gunrock::oprtr;
using namespace gunrock::app::cc;
/******************************************************************************
* Defines, constants, globals
******************************************************************************/
bool g_verbose;
bool g_undirected;
bool g_quick;
bool g_stream_from_host;
template <typename VertexId>
struct CcList
{
VertexId root;
unsigned int histogram;
CcList(VertexId root, unsigned int histogram) :
root(root), histogram(histogram) {}
};
template<typename CcList>
bool CCCompare(
CcList elem1,
CcList elem2)
{
return elem1.histogram > elem2.histogram;
}
/******************************************************************************
* Housekeeping Routines
******************************************************************************/
void Usage()
{
printf(
"\ntest_cc <graph type> <graph type args> [--device=<device_index>] "
"[--instrumented] [--quick=<0|1>]\n"
"\n"
"Graph types and args:\n"
" market [<file>]\n"
" Reads a Matrix-Market coordinate-formatted graph of directed/undirected\n"
" edges from stdin (or from the optionally-specified file).\n"
" --device=<device_index> Set GPU device for running the graph primitive.\n"
" --instrumented If set then kernels keep track of queue-search_depth\n"
" and barrier duty (a relative indicator of load imbalance.)\n"
" --quick If set will skip the CPU validation code. Default: 0.\n"
);
}
/**
* @brief Displays the CC result (i.e., number of components)
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] comp_ids Host-side vector to store computed component id for each node
* @param[in] nodes Number of nodes in the graph
* @param[in] num_components Number of connected components in the graph
* @param[in] roots Host-side vector stores the root for each node in the graph
* @param[in] histogram Histogram of connected component ids
*/
template<typename VertexId, typename SizeT>
void DisplaySolution(
VertexId *comp_ids,
SizeT nodes,
unsigned int num_components,
VertexId *roots,
unsigned int *histogram)
{
typedef CcList<VertexId> CcListType;
printf("Number of Components: %d\n", num_components);
if (nodes <= 40)
{
PrintFormatArray (comp_ids, nodes, "%4d", 10);
/*
printf("[");
for (VertexId i = 0; i < nodes; ++i)
{
PrintValue(i);
printf(":");
PrintValue(comp_ids[i]);
printf(",");
printf(" ");
}
printf("]\n");
*/
}
else
{
//sort the components by size
CcListType *cclist =
(CcListType*)malloc(sizeof(CcListType) * num_components);
for (int i = 0; i < num_components; ++i)
{
cclist[i].root = roots[i];
cclist[i].histogram = histogram[i];
}
std::stable_sort(
cclist, cclist + num_components, CCCompare<CcListType>);
// Print out at most top 10 largest components
int top = (num_components < 10) ? num_components : 10;
printf("Top %d largest components:\n", top);
for (int i = 0; i < top; ++i)
{
printf("CC ID: %d, CC Root: %d, CC Size: %d\n",
i, cclist[i].root, cclist[i].histogram);
}
free(cclist);
}
}
/**
* Performance/Evaluation statistics
*/
/******************************************************************************
* CC Testing Routines
*****************************************************************************/
/**
* @brief CPU-based reference CC algorithm using Boost Graph Library
*
* @tparam VertexId
* @tparam SizeT
*
* @param[in] row_offsets Host-side vector stores row offsets for each node in the graph
* @param[in] column_indices Host-side vector stores column indices for each edge in the graph
* @param[in] num_nodes
* @param[out] labels Host-side vector to store the component id for each node in the graph
*
* \return Number of connected components in the graph
*/
template<typename VertexId, typename SizeT>
unsigned int RefCPUCC(
SizeT *row_offsets, VertexId *column_indices, int num_nodes, int *labels)
{
using namespace boost;
typedef adjacency_list <vecS, vecS, undirectedS> Graph;
Graph G;
for (int i = 0; i < num_nodes; ++i)
{
for (int j = row_offsets[i]; j < row_offsets[i+1]; ++j)
{
add_edge(i, column_indices[j], G);
}
}
CpuTimer cpu_timer;
cpu_timer.Start();
int num_components = connected_components(G, &labels[0]);
cpu_timer.Stop();
float elapsed = cpu_timer.ElapsedMillis();
printf("CPU CC finished in %lf msec.\n", elapsed);
return num_components;
}
/**
* @brief Run tests for connected component algorithm
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
* @tparam INSTRUMENT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] max_grid_size Maximum CTA occupancy for CC kernels
* @param[in] iterations Number of iterations for running the test
* @param[in] num_gpus Number of GPUs
*/
template <
typename VertexId,
typename Value,
typename SizeT,
bool INSTRUMENT>
void RunTests(
const Csr<VertexId, Value, SizeT> &graph,
int max_grid_size,
int iterations,
int num_gpus)
{
typedef CCProblem<
VertexId,
SizeT,
Value,
true> Problem; //use double buffer for edgemap and vertexmap.
// Allocate host-side label array (for both reference and gpu-computed results)
VertexId *reference_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *h_component_ids = (VertexId*)malloc(sizeof(VertexId) * graph.nodes);
VertexId *reference_check = (g_quick) ? NULL : reference_component_ids;
unsigned int ref_num_components = 0;
// Allocate CC enactor map
CCEnactor<INSTRUMENT> cc_enactor(g_verbose);
// Allocate problem on GPU
Problem *csr_problem = new Problem;
util::GRError(csr_problem->Init(
g_stream_from_host,
graph,
num_gpus),
"CC Problem Initialization Failed", __FILE__, __LINE__);
//
// Compute reference CPU CC
//
if (reference_check != NULL && !g_quick)
{
printf("Computing reference value ...\n");
ref_num_components = RefCPUCC(
graph.row_offsets,
graph.column_indices,
graph.nodes,
reference_check);
printf("\n");
}
long long total_queued = 0;
VertexId num_iter = 0;
double avg_duty = 0.0;
// Perform CC
GpuTimer gpu_timer;
float elapsed = 0.0f;
iterations = 5000;
struct timeval start, end;
for (int iter = 0; iter < iterations; ++iter)
{
util::GRError(
csr_problem->Reset(cc_enactor.GetFrontierType()),
"CC Problem Data Reset Failed", __FILE__, __LINE__);
gpu_timer.Start();
if(iter%/*50*/7==0)
{
gettimeofday(&start, NULL);
}
util::GRError(
cc_enactor.template Enact<Problem>(csr_problem, max_grid_size),
"CC Problem Enact Failed", __FILE__, __LINE__);
if(iter%/*50*/7==/*49*/6)
{
gettimeofday(&end, NULL);
std::cerr << "[CC] ---- " << (end.tv_sec - start.tv_sec)*1000000+(end.tv_usec - start.tv_usec) << std::endl;
}
EvqueueSynch();
gpu_timer.Stop();
elapsed += gpu_timer.ElapsedMillis();
//printf("iteration %d, time: %.5f\n", iter+1, gpu_timer.ElapsedMillis());
}
elapsed /= iterations;
cc_enactor.GetStatistics(total_queued, num_iter, avg_duty);
// Copy out results
util::GRError(
csr_problem->Extract(h_component_ids),
"CC Problem Data Extraction Failed", __FILE__, __LINE__);
// Validity
if (!g_quick)
{
if (ref_num_components == csr_problem->num_components)
printf("CORRECT.\n");
else
printf("INCORRECT. Ref Component Count: %d,"
"GPU Computed Component Count: %d\n",
ref_num_components, csr_problem->num_components);
}
// Compute size and root of each component
VertexId *h_roots = new VertexId[csr_problem->num_components];
unsigned int *h_histograms = new unsigned int[csr_problem->num_components];
csr_problem->ComputeCCHistogram(h_component_ids, h_roots, h_histograms);
// Display Solution
DisplaySolution(h_component_ids, graph.nodes,
csr_problem->num_components,
h_roots, h_histograms);
if (h_roots) delete[] h_roots;
if (h_histograms) delete[] h_histograms;
printf("[GPU Connected Component] finished.\n");
printf(" elapsed: %.4f ms\n", elapsed);
printf(" num_iterations: %d\n", num_iter);
// Cleanup
if (csr_problem) delete csr_problem;
if (reference_component_ids) free(reference_component_ids);
if (h_component_ids) free(h_component_ids);
cudaDeviceSynchronize();
}
/**
* @brief RunTests entry
*
* @tparam VertexId
* @tparam Value
* @tparam SizeT
*
* @param[in] graph Reference to the CSR graph we process on
* @param[in] args Reference to the command line arguments
*/
template <
typename VertexId,
typename Value,
typename SizeT>
void RunTests(
Csr<VertexId, Value, SizeT> &graph,
CommandLineArgs &args)
{
bool instrumented = false; // Whether or not to collect instrumentation from kernels
int max_grid_size = 0; // Maximum grid size (0: leave it up to the enactor)
int num_gpus = 1; // Number of GPUs for multi-gpu enactor to use
int iterations = 1; // Default run test times
g_quick = 1; // Whether or not to skip ref validation
instrumented = args.CheckCmdLineFlag("instrumented");
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("quick", g_quick);
args.GetCmdLineArgument("iteration-num", iterations);
if (instrumented)
{
RunTests<VertexId, Value, SizeT, true>(
graph,
max_grid_size,
iterations,
num_gpus);
}
else
{
RunTests<VertexId, Value, SizeT, false>(
graph,
max_grid_size,
iterations,
num_gpus);
}
}
/******************************************************************************
* Main
******************************************************************************/
int main( int argc, char** argv)
{
EvqueueCreate(4);
CommandLineArgs args(argc, argv);
if ((argc < 2) || (args.CheckCmdLineFlag("help")))
{
Usage();
return 1;
}
DeviceInit(args);
cudaSetDeviceFlags(cudaDeviceMapHost);
// Parse graph-contruction params
g_undirected = false; //Does not make undirected graph
std::string graph_type = argv[1];
int flags = args.ParsedArgc();
int graph_args = argc - flags - 1;
if (graph_args < 1)
{
Usage();
return 1;
}
//
// Construct graph and perform search(es)
//
if (graph_type == "market")
{
// Matrix-market coordinate-formatted graph file
typedef int VertexId; // Use as the node identifier
typedef int Value; // Use as the value type
typedef int SizeT; // Use as the graph size type
Csr<VertexId, Value, SizeT> csr(false); // default for stream_from_host
if (graph_args < 1) { Usage(); return 1; }
char *market_filename = (graph_args == 2) ? argv[2] : NULL;
if (graphio::BuildMarketGraph<false>(
market_filename,
csr,
g_undirected,
false) != 0) // no inverse graph
{
return 1;
}
csr.PrintHistogram();
fflush(stdout);
// Run tests
RunTests(csr, args);
}
else
{
fprintf(stderr, "Unspecified graph type\n");
return 1;
}
EvqueueDestroy();
return 0;
}
|
c86be429cc09a812d7709d4cf60fe159af1cd36c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hiprand/hiprand_kernel.h>
#include <stdio.h>
#include <map>
#include <set>
#include <vector>
#include <string>
#include <iostream>
#include <windows.h>
#include <fstream>
#include <ctime>
#include <random>
#include <omp.h>
#include <fstream>
/*
#####################################################################################
READ FEATURES
#####################################################################################
*/
std::string getBaseDir() {
char buffer[MAX_PATH];
GetModuleFileName(NULL, buffer, MAX_PATH);
std::string::size_type pos = std::string(buffer).find_last_of("\\/");
return std::string(buffer).substr(0, pos);
}
std::string getDirPath(const std::string baseDir, const std::string dirName) {
std::string path = baseDir;
path.append("\\");
path.append(dirName);
return path;
}
std::vector<std::string> getFiles(const std::string& dir)
{
std::vector<std::string> v;
std::string pattern(dir);
pattern.append("\\*.dat");
WIN32_FIND_DATA data;
HANDLE hFind;
if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) {
do {
std::string path = dir;
path.append("\\");
path.append(data.cFileName);
v.push_back(path);
} while (FindNextFile(hFind, &data) != 0);
FindClose(hFind);
}
return v;
}
void readMapFloat(const std::string filePath, std::map<int, float>& voxel_value, float coefficient) {
std::ifstream infile(filePath);
int voxelId;
float value;
while (infile >> voxelId >> value)
{
voxel_value[voxelId] = coefficient * value;
}
}
int getIndexFromPath(const std::string path) {
std::size_t pos1 = path.find_last_of("\\");
std::string fileName = path.substr(pos1 + 1);
return std::stoi(fileName);
}
void readFields(const std::vector<std::string>& filePaths, std::map<int, std::map<int, float> >& neuron_voxel_value, float coefficient) {
for (auto it = filePaths.begin(); it != filePaths.end(); ++it) {
int neuronId = getIndexFromPath(*it);
std::map<int, float> voxel_value;
readMapFloat(*it, voxel_value, coefficient);
neuron_voxel_value[neuronId] = voxel_value;
}
}
/*
#####################################################################################
PREPARE DATA
#####################################################################################
*/
void flattenFeatures(std::map<int, std::map<int, float> >& neuron_voxel_pre,
std::map<int, std::map<int, float> >& neuron_voxel_postExc,
std::map<int, float>& voxel_postAllExc,
float* pre,
float* post,
float* postAll) {
int i = 0;
int nVoxel = voxel_postAllExc.size();
for (auto it = voxel_postAllExc.begin(); it != voxel_postAllExc.end(); ++it) {
int voxelId = it->first;
int j = 0;
for (auto it2 = neuron_voxel_pre.begin(); it2 != neuron_voxel_pre.end(); ++it2) {
auto x = it2->second.find(voxelId);
if (x != it2->second.end()) {
pre[j * nVoxel + i] = x->second;
}
else {
pre[j * nVoxel + i] = 0;
}
j++;
}
j = 0;
for (auto it2 = neuron_voxel_postExc.begin(); it2 != neuron_voxel_postExc.end(); ++it2) {
auto x = it2->second.find(voxelId);
if (x != it2->second.end()) {
post[j * nVoxel + i] = x->second;
}
else {
post[j * nVoxel + i] = 0;
}
j++;
}
postAll[i] = it->second;
i++;
}
}
/*
#####################################################################################
GPU COMPUTATION
#####################################################################################
*/
__global__ void calcKernel(float *contacts,
float *pre,
float *post,
float* postAll,
float b0,
unsigned int nVoxel,
unsigned int nPre,
unsigned int nPost
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < nPre && j < nPost) {
for (unsigned int k = 0; k < nVoxel; k++)
{
float preVal = pre[i * nVoxel + k];
float postVal = post[j * nVoxel + k];
if (preVal != 0 && postVal != 0 && contacts[i * nPost + j] < 1000) {
float arg = b0 + preVal + postVal + postAll[k];
int synapses = 0;
if (arg >= -7 && arg <= 7)
{
float mu = exp(arg);
contacts[i * nPost + j] += mu;
}
else if (arg > 7)
{
contacts[i * nPost + j] = 1000;
}
}
}
}
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t calcWithCuda(float *pre,
float *post,
float *postAll,
float b0,
unsigned int nPre,
unsigned int nPost,
unsigned int nVoxel,
float* contacts,
std::clock_t start,
bool verbose)
{
float *dev_pre = 0;
float *dev_post = 0;
float *dev_postAll = 0;
float *dev_contacts = 0;
hipError_t cudaStatus;
double copyToDeviceStartTime = std::clock();
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_pre, nPre * nVoxel * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_post, nPost * nVoxel * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_postAll, nVoxel * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_contacts, nPre * nPost * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_pre, pre, nPre * nVoxel * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_post, post, nPost * nVoxel * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_postAll, postAll, nVoxel * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_contacts, contacts, nPre * nPost * sizeof(bool), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
double duration;
if (verbose) {
duration = (std::clock() - copyToDeviceStartTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Copy to device " << duration << std::endl;
}
double computeKernelStartTime = std::clock();
// Launch a kernel on the GPU
dim3 threads(16, 16);
dim3 blocks(nPre / threads.x + 1, nPost / threads.y + 1);
calcKernel << <blocks, threads >> > (dev_contacts,
dev_pre,
dev_post,
dev_postAll,
b0,
nVoxel,
nPre,
nPost
);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "calcKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
if (verbose) {
duration = (std::clock() - computeKernelStartTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Compute GPU " << duration << std::endl;
}
double copyToHostTime = std::clock();
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(contacts, dev_contacts, nPre * nPost * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
if (verbose) {
duration = (std::clock() - copyToHostTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Copy to host " << duration << std::endl;
}
Error:
hipFree(dev_pre);
hipFree(dev_post);
hipFree(dev_postAll);
hipFree(dev_contacts);
return cudaStatus;
}
/*
#####################################################################################
MAIN
#####################################################################################
*/
int main(int argc, char *argv[]) {
if (argc != 6) {
std::cout << "Usage:" << std::endl;
std::cout << "Simulator.exe CPU|GPU theta1 theta2 theta3 theta4" << std::endl;
return -1;
}
std::string mode = argv[1];
bool gpu = mode.compare("GPU") == 0;
float b0, b1, b2, b3;
b0 = std::stof(argv[2]);
b1 = std::stof(argv[3]);
b2 = std::stof(argv[4]);
b3 = std::stof(argv[5]);
bool verbose = false;
std::cout << "[*] Start simulation " << b0 << " " << b1 << " " << b2 << " " << b3 << std::endl;
std::clock_t start;
double duration;
start = std::clock();
std::map<int, std::map<int, float> > neuron_voxel_pre;
std::map<int, std::map<int, float> > neuron_voxel_postExc;
std::map<int, float> voxel_postAllExc;
std::string baseDir = getBaseDir();
double readFeatureTime = std::clock();
std::vector<std::string> preFiles = getFiles(getDirPath(baseDir, "features_pre"));
readFields(preFiles, neuron_voxel_pre, b1);
readFields(getFiles(getDirPath(baseDir, "features_postExc")), neuron_voxel_postExc, b2);
readMapFloat(getDirPath(baseDir, "features_postAll").append("\\voxel_postAllExc.dat"), voxel_postAllExc, b3);
if (verbose) {
duration = (std::clock() - readFeatureTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Read features " << duration << std::endl;
}
std::size_t nPre = neuron_voxel_pre.size();
std::size_t nPost = neuron_voxel_postExc.size();
std::size_t nVoxel = voxel_postAllExc.size();
if (verbose) {
std::cout << "[*] Presynaptic: " << nPre << " Postsynaptic: " << nPost << " Voxels: " << nVoxel << std::endl;
}
std::random_device rd;
std::mt19937 randomGenerator(rd());
std::uniform_real_distribution<float> dis(0.0, 1.0);
float* connections = (float*)malloc(nPre * nPost * sizeof(float));
for (int i = 0; i < nPre; i++) {
for (int j = 0; j < nPost; j++) {
connections[i * nPost + j] = 0;
}
}
if (!gpu) {
std::vector<int> preIndices;
std::vector<int> postIndices;
for (auto it = neuron_voxel_pre.begin(); it != neuron_voxel_pre.end(); ++it)
{
preIndices.push_back(it->first);
}
for (auto it = neuron_voxel_postExc.begin(); it != neuron_voxel_postExc.end(); ++it)
{
postIndices.push_back(it->first);
}
double computeCPUStartTime = std::clock();
#pragma omp parallel for schedule(dynamic)
for (unsigned int i = 0; i < preIndices.size(); i++)
{
int preId = preIndices[i];
for (unsigned int j = 0; j < postIndices.size(); j++)
{
int postId = postIndices[j];
//qDebug() << i << j << preId << postId;
if (preId != postId)
{
for (auto pre = neuron_voxel_pre[preId].begin(); pre != neuron_voxel_pre[preId].end(); ++pre)
{
if (neuron_voxel_postExc[postId].find(pre->first) != neuron_voxel_postExc[postId].end())
{
float preVal = pre->second;
float postVal = neuron_voxel_postExc[postId][pre->first];
float postAllVal = voxel_postAllExc[pre->first];
float arg = b0 + preVal + postVal + postAllVal;
//int synapses = 0;
if (arg >= -7 && arg <= 7)
{
float mu = exp(arg);
connections[i * nPost + j] += mu;
}
else if (arg > 7)
{
connections[i * nPost + j] = 1000;
break;
}
}
}
}
}
}
if (verbose) {
duration = (std::clock() - computeCPUStartTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Compute CPU " << duration << std::endl;
}
}
else
{
float* pre = (float*)malloc(nPre * nVoxel * sizeof(float));
float* post = (float*)malloc(nPost * nVoxel * sizeof(float));
float* postAll = (float*)malloc(nVoxel * sizeof(float));
double flattenFeaturesTime = std::clock();
flattenFeatures(neuron_voxel_pre, neuron_voxel_postExc, voxel_postAllExc, pre, post, postAll);
if (verbose) {
duration = (std::clock() - flattenFeaturesTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Flatten features " << duration << std::endl;
}
hipError_t cudaStatus = calcWithCuda(pre, post, postAll, b0, nPre, nPost, nVoxel, connections, start, verbose);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "calcWithCuda failed!");
return 1;
}
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
free(pre);
free(post);
free(postAll);
}
double computeProbabilityStartTime = std::clock();
float connectionProbability = 0;
for (unsigned int i = 0; i < nPre; i++) {
int realizedConnections = 0;
for (unsigned int j = 0; j < nPost; j++) {
float mu = connections[i * nPost + j];
if (mu <= 1000) {
float prob = 1 - exp(-1 * mu);
float rand = dis(randomGenerator);
realizedConnections += rand <= prob ? 1 : 0;
}
else {
realizedConnections++;
}
}
connectionProbability += (float)realizedConnections / (float)nPost;
}
connectionProbability /= (float)nPre;
free(connections);
duration = (std::clock() - computeProbabilityStartTime) / (double)CLOCKS_PER_SEC;
//std::cout << "[*] Compute connection probability " << duration << std::endl;
std::ofstream outfile;
outfile.open("output.json");
outfile << "{\"CONNECTION_PROBABILITY\":" << connectionProbability << "}";
outfile.close();
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Finish simulation " << duration << "s" << std::endl;
std::cout << "[*] Connection prob. " << connectionProbability << std::endl;
return 0;
}
| c86be429cc09a812d7709d4cf60fe159af1cd36c.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
#include <stdio.h>
#include <map>
#include <set>
#include <vector>
#include <string>
#include <iostream>
#include <windows.h>
#include <fstream>
#include <ctime>
#include <random>
#include <omp.h>
#include <fstream>
/*
#####################################################################################
READ FEATURES
#####################################################################################
*/
std::string getBaseDir() {
char buffer[MAX_PATH];
GetModuleFileName(NULL, buffer, MAX_PATH);
std::string::size_type pos = std::string(buffer).find_last_of("\\/");
return std::string(buffer).substr(0, pos);
}
std::string getDirPath(const std::string baseDir, const std::string dirName) {
std::string path = baseDir;
path.append("\\");
path.append(dirName);
return path;
}
std::vector<std::string> getFiles(const std::string& dir)
{
std::vector<std::string> v;
std::string pattern(dir);
pattern.append("\\*.dat");
WIN32_FIND_DATA data;
HANDLE hFind;
if ((hFind = FindFirstFile(pattern.c_str(), &data)) != INVALID_HANDLE_VALUE) {
do {
std::string path = dir;
path.append("\\");
path.append(data.cFileName);
v.push_back(path);
} while (FindNextFile(hFind, &data) != 0);
FindClose(hFind);
}
return v;
}
void readMapFloat(const std::string filePath, std::map<int, float>& voxel_value, float coefficient) {
std::ifstream infile(filePath);
int voxelId;
float value;
while (infile >> voxelId >> value)
{
voxel_value[voxelId] = coefficient * value;
}
}
int getIndexFromPath(const std::string path) {
std::size_t pos1 = path.find_last_of("\\");
std::string fileName = path.substr(pos1 + 1);
return std::stoi(fileName);
}
void readFields(const std::vector<std::string>& filePaths, std::map<int, std::map<int, float> >& neuron_voxel_value, float coefficient) {
for (auto it = filePaths.begin(); it != filePaths.end(); ++it) {
int neuronId = getIndexFromPath(*it);
std::map<int, float> voxel_value;
readMapFloat(*it, voxel_value, coefficient);
neuron_voxel_value[neuronId] = voxel_value;
}
}
/*
#####################################################################################
PREPARE DATA
#####################################################################################
*/
void flattenFeatures(std::map<int, std::map<int, float> >& neuron_voxel_pre,
std::map<int, std::map<int, float> >& neuron_voxel_postExc,
std::map<int, float>& voxel_postAllExc,
float* pre,
float* post,
float* postAll) {
int i = 0;
int nVoxel = voxel_postAllExc.size();
for (auto it = voxel_postAllExc.begin(); it != voxel_postAllExc.end(); ++it) {
int voxelId = it->first;
int j = 0;
for (auto it2 = neuron_voxel_pre.begin(); it2 != neuron_voxel_pre.end(); ++it2) {
auto x = it2->second.find(voxelId);
if (x != it2->second.end()) {
pre[j * nVoxel + i] = x->second;
}
else {
pre[j * nVoxel + i] = 0;
}
j++;
}
j = 0;
for (auto it2 = neuron_voxel_postExc.begin(); it2 != neuron_voxel_postExc.end(); ++it2) {
auto x = it2->second.find(voxelId);
if (x != it2->second.end()) {
post[j * nVoxel + i] = x->second;
}
else {
post[j * nVoxel + i] = 0;
}
j++;
}
postAll[i] = it->second;
i++;
}
}
/*
#####################################################################################
GPU COMPUTATION
#####################################################################################
*/
__global__ void calcKernel(float *contacts,
float *pre,
float *post,
float* postAll,
float b0,
unsigned int nVoxel,
unsigned int nPre,
unsigned int nPost
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < nPre && j < nPost) {
for (unsigned int k = 0; k < nVoxel; k++)
{
float preVal = pre[i * nVoxel + k];
float postVal = post[j * nVoxel + k];
if (preVal != 0 && postVal != 0 && contacts[i * nPost + j] < 1000) {
float arg = b0 + preVal + postVal + postAll[k];
int synapses = 0;
if (arg >= -7 && arg <= 7)
{
float mu = exp(arg);
contacts[i * nPost + j] += mu;
}
else if (arg > 7)
{
contacts[i * nPost + j] = 1000;
}
}
}
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t calcWithCuda(float *pre,
float *post,
float *postAll,
float b0,
unsigned int nPre,
unsigned int nPost,
unsigned int nVoxel,
float* contacts,
std::clock_t start,
bool verbose)
{
float *dev_pre = 0;
float *dev_post = 0;
float *dev_postAll = 0;
float *dev_contacts = 0;
cudaError_t cudaStatus;
double copyToDeviceStartTime = std::clock();
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_pre, nPre * nVoxel * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_post, nPost * nVoxel * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_postAll, nVoxel * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_contacts, nPre * nPost * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_pre, pre, nPre * nVoxel * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_post, post, nPost * nVoxel * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_postAll, postAll, nVoxel * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_contacts, contacts, nPre * nPost * sizeof(bool), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
double duration;
if (verbose) {
duration = (std::clock() - copyToDeviceStartTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Copy to device " << duration << std::endl;
}
double computeKernelStartTime = std::clock();
// Launch a kernel on the GPU
dim3 threads(16, 16);
dim3 blocks(nPre / threads.x + 1, nPost / threads.y + 1);
calcKernel << <blocks, threads >> > (dev_contacts,
dev_pre,
dev_post,
dev_postAll,
b0,
nVoxel,
nPre,
nPost
);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "calcKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
if (verbose) {
duration = (std::clock() - computeKernelStartTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Compute GPU " << duration << std::endl;
}
double copyToHostTime = std::clock();
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(contacts, dev_contacts, nPre * nPost * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
if (verbose) {
duration = (std::clock() - copyToHostTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Copy to host " << duration << std::endl;
}
Error:
cudaFree(dev_pre);
cudaFree(dev_post);
cudaFree(dev_postAll);
cudaFree(dev_contacts);
return cudaStatus;
}
/*
#####################################################################################
MAIN
#####################################################################################
*/
int main(int argc, char *argv[]) {
if (argc != 6) {
std::cout << "Usage:" << std::endl;
std::cout << "Simulator.exe CPU|GPU theta1 theta2 theta3 theta4" << std::endl;
return -1;
}
std::string mode = argv[1];
bool gpu = mode.compare("GPU") == 0;
float b0, b1, b2, b3;
b0 = std::stof(argv[2]);
b1 = std::stof(argv[3]);
b2 = std::stof(argv[4]);
b3 = std::stof(argv[5]);
bool verbose = false;
std::cout << "[*] Start simulation " << b0 << " " << b1 << " " << b2 << " " << b3 << std::endl;
std::clock_t start;
double duration;
start = std::clock();
std::map<int, std::map<int, float> > neuron_voxel_pre;
std::map<int, std::map<int, float> > neuron_voxel_postExc;
std::map<int, float> voxel_postAllExc;
std::string baseDir = getBaseDir();
double readFeatureTime = std::clock();
std::vector<std::string> preFiles = getFiles(getDirPath(baseDir, "features_pre"));
readFields(preFiles, neuron_voxel_pre, b1);
readFields(getFiles(getDirPath(baseDir, "features_postExc")), neuron_voxel_postExc, b2);
readMapFloat(getDirPath(baseDir, "features_postAll").append("\\voxel_postAllExc.dat"), voxel_postAllExc, b3);
if (verbose) {
duration = (std::clock() - readFeatureTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Read features " << duration << std::endl;
}
std::size_t nPre = neuron_voxel_pre.size();
std::size_t nPost = neuron_voxel_postExc.size();
std::size_t nVoxel = voxel_postAllExc.size();
if (verbose) {
std::cout << "[*] Presynaptic: " << nPre << " Postsynaptic: " << nPost << " Voxels: " << nVoxel << std::endl;
}
std::random_device rd;
std::mt19937 randomGenerator(rd());
std::uniform_real_distribution<float> dis(0.0, 1.0);
float* connections = (float*)malloc(nPre * nPost * sizeof(float));
for (int i = 0; i < nPre; i++) {
for (int j = 0; j < nPost; j++) {
connections[i * nPost + j] = 0;
}
}
if (!gpu) {
std::vector<int> preIndices;
std::vector<int> postIndices;
for (auto it = neuron_voxel_pre.begin(); it != neuron_voxel_pre.end(); ++it)
{
preIndices.push_back(it->first);
}
for (auto it = neuron_voxel_postExc.begin(); it != neuron_voxel_postExc.end(); ++it)
{
postIndices.push_back(it->first);
}
double computeCPUStartTime = std::clock();
#pragma omp parallel for schedule(dynamic)
for (unsigned int i = 0; i < preIndices.size(); i++)
{
int preId = preIndices[i];
for (unsigned int j = 0; j < postIndices.size(); j++)
{
int postId = postIndices[j];
//qDebug() << i << j << preId << postId;
if (preId != postId)
{
for (auto pre = neuron_voxel_pre[preId].begin(); pre != neuron_voxel_pre[preId].end(); ++pre)
{
if (neuron_voxel_postExc[postId].find(pre->first) != neuron_voxel_postExc[postId].end())
{
float preVal = pre->second;
float postVal = neuron_voxel_postExc[postId][pre->first];
float postAllVal = voxel_postAllExc[pre->first];
float arg = b0 + preVal + postVal + postAllVal;
//int synapses = 0;
if (arg >= -7 && arg <= 7)
{
float mu = exp(arg);
connections[i * nPost + j] += mu;
}
else if (arg > 7)
{
connections[i * nPost + j] = 1000;
break;
}
}
}
}
}
}
if (verbose) {
duration = (std::clock() - computeCPUStartTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Compute CPU " << duration << std::endl;
}
}
else
{
float* pre = (float*)malloc(nPre * nVoxel * sizeof(float));
float* post = (float*)malloc(nPost * nVoxel * sizeof(float));
float* postAll = (float*)malloc(nVoxel * sizeof(float));
double flattenFeaturesTime = std::clock();
flattenFeatures(neuron_voxel_pre, neuron_voxel_postExc, voxel_postAllExc, pre, post, postAll);
if (verbose) {
duration = (std::clock() - flattenFeaturesTime) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Flatten features " << duration << std::endl;
}
cudaError_t cudaStatus = calcWithCuda(pre, post, postAll, b0, nPre, nPost, nVoxel, connections, start, verbose);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "calcWithCuda failed!");
return 1;
}
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
free(pre);
free(post);
free(postAll);
}
double computeProbabilityStartTime = std::clock();
float connectionProbability = 0;
for (unsigned int i = 0; i < nPre; i++) {
int realizedConnections = 0;
for (unsigned int j = 0; j < nPost; j++) {
float mu = connections[i * nPost + j];
if (mu <= 1000) {
float prob = 1 - exp(-1 * mu);
float rand = dis(randomGenerator);
realizedConnections += rand <= prob ? 1 : 0;
}
else {
realizedConnections++;
}
}
connectionProbability += (float)realizedConnections / (float)nPost;
}
connectionProbability /= (float)nPre;
free(connections);
duration = (std::clock() - computeProbabilityStartTime) / (double)CLOCKS_PER_SEC;
//std::cout << "[*] Compute connection probability " << duration << std::endl;
std::ofstream outfile;
outfile.open("output.json");
outfile << "{\"CONNECTION_PROBABILITY\":" << connectionProbability << "}";
outfile.close();
duration = (std::clock() - start) / (double)CLOCKS_PER_SEC;
std::cout << "[*] Finish simulation " << duration << "s" << std::endl;
std::cout << "[*] Connection prob. " << connectionProbability << std::endl;
return 0;
}
|
146f85f52b508b260154407dfc050d3da4b3a73b.hip | // !!! This is a file automatically generated by hipify!!!
#include "mincu.hpp"
#include <hip/hip_runtime_api.h>
#include <cctype>
#include <cstdint>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
static const uint32_t GRID_SIZE = 64;
static const uint32_t BLOCK_SIZE = 32;
// static_cast(GRID_SIZE%BLOCK_SIZE == 0, "block size must divide grid size");
using namespace mincu;
extern "C"
__global__ void test_insts(
uint32_t *OUT,
const uint32_t *IN,
uint32_t arg1)
{
const int id = blockDim.x*blockIdx.x + threadIdx.x;
int sum = 0;
for (int i = 0; i < 32; i++) {
sum += IN[(id + i)%32] + arg1;
}
OUT[id] = sum;
}
int main(int argc, const char* argv[])
{
const int32_t DFT_SRC1 = 1;
int32_t src1 = DFT_SRC1;
int verbosity = 0;
for (int ai = 1; ai < argc; ai++) {
std::string arg(argv[ai]);
auto eq_ix = arg.find('=');
std::string key = eq_ix != std::string::npos ?
arg.substr(0,eq_ix+1) : arg;
std::string val = eq_ix != std::string::npos ?
arg.substr(eq_ix+1) : "";
auto badArg = [&](const char *msg) {
fatal(arg, ": ", msg);
};
auto parseInt = [&](){
int radix = 10;
bool negate = false;
const char *cval = val.c_str();
if (cval[0] == '-') {
cval++;
negate = true;
}
if (cval[0] == '0' &&
(cval[1] == 'x' || cval[1] == 'X') &&
isxdigit(cval[2]))
{
radix = 16;
cval += 2;
}
long ival = 0;
try {
char *end = nullptr;
ival = std::strtol(cval, &end, radix);
if (*end) {
badArg("malformed integer");
}
} catch (...) {
badArg("malformed integer");
}
if (negate)
ival = -ival;
return (int)ival;
};
if (arg == "-h" || arg == "--help") {
std::cout <<
"usage: itester OPTS\n"
"where OPTS are:\n"
" -v verbose output\n"
" -v=INT sets verbosity\n"
" -src1=INT sets src1 (default: " << DFT_SRC1 << ")\n"
"";
return EXIT_SUCCESS;
} else if (key == "-src1=") {
src1 = parseInt();
} else if (key == "-v" || key == "--verbose") {
verbosity = 1;
} else if (key == "-v=") {
verbosity = parseInt();
} else {
badArg("unrecognized option");
}
}
umem<uint32_t> IN(GRID_SIZE);
for (uint32_t i = 0; i < GRID_SIZE; i++) {
IN[i] = i;
}
umem<uint32_t> OUT(GRID_SIZE, init_const<>(0xFFFFFFFF));
if (verbosity > 0) {
std::cout <<
"-------------------------------------------------------\n"
"running micro\n";
}
hipLaunchKernelGGL(( test_insts), dim3(GRID_SIZE/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, OUT, IN, src1);
auto e = hipDeviceSynchronize();
if (verbosity > 0) {
std::cout << "hipDeviceSynchronize: returned " <<
hipGetErrorString(e) << "\n";
}
if (e != hipSuccess) {
std::cerr << " unexpected error code " <<
hipGetErrorName(e) << " (" << hipGetErrorString(e) << ")\n";
exit(EXIT_FAILURE);
} else if (verbosity >= 0) {
OUT.str(std::cout);
}
return EXIT_SUCCESS;
}
| 146f85f52b508b260154407dfc050d3da4b3a73b.cu | #include "mincu.hpp"
#include <cuda_runtime_api.h>
#include <cctype>
#include <cstdint>
#include <iomanip>
#include <iostream>
#include <ostream>
#include <sstream>
#include <string>
static const uint32_t GRID_SIZE = 64;
static const uint32_t BLOCK_SIZE = 32;
// static_cast(GRID_SIZE%BLOCK_SIZE == 0, "block size must divide grid size");
using namespace mincu;
extern "C"
__global__ void test_insts(
uint32_t *OUT,
const uint32_t *IN,
uint32_t arg1)
{
const int id = blockDim.x*blockIdx.x + threadIdx.x;
int sum = 0;
for (int i = 0; i < 32; i++) {
sum += IN[(id + i)%32] + arg1;
}
OUT[id] = sum;
}
int main(int argc, const char* argv[])
{
const int32_t DFT_SRC1 = 1;
int32_t src1 = DFT_SRC1;
int verbosity = 0;
for (int ai = 1; ai < argc; ai++) {
std::string arg(argv[ai]);
auto eq_ix = arg.find('=');
std::string key = eq_ix != std::string::npos ?
arg.substr(0,eq_ix+1) : arg;
std::string val = eq_ix != std::string::npos ?
arg.substr(eq_ix+1) : "";
auto badArg = [&](const char *msg) {
fatal(arg, ": ", msg);
};
auto parseInt = [&](){
int radix = 10;
bool negate = false;
const char *cval = val.c_str();
if (cval[0] == '-') {
cval++;
negate = true;
}
if (cval[0] == '0' &&
(cval[1] == 'x' || cval[1] == 'X') &&
isxdigit(cval[2]))
{
radix = 16;
cval += 2;
}
long ival = 0;
try {
char *end = nullptr;
ival = std::strtol(cval, &end, radix);
if (*end) {
badArg("malformed integer");
}
} catch (...) {
badArg("malformed integer");
}
if (negate)
ival = -ival;
return (int)ival;
};
if (arg == "-h" || arg == "--help") {
std::cout <<
"usage: itester OPTS\n"
"where OPTS are:\n"
" -v verbose output\n"
" -v=INT sets verbosity\n"
" -src1=INT sets src1 (default: " << DFT_SRC1 << ")\n"
"";
return EXIT_SUCCESS;
} else if (key == "-src1=") {
src1 = parseInt();
} else if (key == "-v" || key == "--verbose") {
verbosity = 1;
} else if (key == "-v=") {
verbosity = parseInt();
} else {
badArg("unrecognized option");
}
}
umem<uint32_t> IN(GRID_SIZE);
for (uint32_t i = 0; i < GRID_SIZE; i++) {
IN[i] = i;
}
umem<uint32_t> OUT(GRID_SIZE, init_const<>(0xFFFFFFFF));
if (verbosity > 0) {
std::cout <<
"-------------------------------------------------------\n"
"running micro\n";
}
test_insts<<<GRID_SIZE/BLOCK_SIZE,BLOCK_SIZE>>>(OUT, IN, src1);
auto e = cudaDeviceSynchronize();
if (verbosity > 0) {
std::cout << "cudaDeviceSynchronize: returned " <<
cudaGetErrorString(e) << "\n";
}
if (e != cudaSuccess) {
std::cerr << " unexpected error code " <<
cudaGetErrorName(e) << " (" << cudaGetErrorString(e) << ")\n";
exit(EXIT_FAILURE);
} else if (verbosity >= 0) {
OUT.str(std::cout);
}
return EXIT_SUCCESS;
}
|
b242f5ee1193084542f0d6c950c101305a84df49.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 4>
template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 4> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 4> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 4> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 4> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| b242f5ee1193084542f0d6c950c101305a84df49.cu | // Copyright (c) 2009-2017 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSphere, 4>
template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSphere, 4> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeSphere, 4> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
template void gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSphere, 4> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSphere, 4> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSphere, 4> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
8db58135a94edf0b01560e96232f71fdee33e97f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) "2019, by Stanford University
// Developer: Mario Di Renzo
// Affiliation: Center for Turbulence Research, Stanford University
// URL: https://ctr.stanford.edu
// Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020).
// HTR solver: An open-source exascale-oriented task-based
// multi-GPU high-order code for hypersonic aerothermodynamics.
// Computer Physics Communications 255, 107262"
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "prometeo_sensor.hpp"
#include "prometeo_sensor.inl"
#include "cuda_utils.hpp"
//-----------------------------------------------------------------------------
// KERNEL FOR UpdateShockSensorTask
//-----------------------------------------------------------------------------
template<direction dir>
__global__
void ComputeDucrosSensor_kernel(const DeferredBuffer<double, 3> DucrosS,
const AccessorRO< Vec3, 3> vGradX,
const AccessorRO< Vec3, 3> vGradY,
const AccessorRO< Vec3, 3> vGradZ,
const Rect<3> my_bounds,
const Rect<3> Fluid_bounds,
const double eps,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
DucrosS[p] = DucrosSensor(vGradX[p].v, vGradY[p].v, vGradZ[p].v, eps);
}
}
template<direction dir>
__global__
void UpdateShockSensor_kernel(const DeferredBuffer<double, 3> DucrosS,
const AccessorRO<VecNEq, 3> Conserved,
const AccessorRO< int, 3> nType,
const AccessorWO< bool, 3> shockSensor,
const Rect<3> my_bounds,
const Rect<3> Fluid_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z,
const coord_t size)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
const Point<3> pM2 = warpPeriodic<dir, Minus>(Fluid_bounds, p, size, offM2(nType[p]));
const Point<3> pM1 = warpPeriodic<dir, Minus>(Fluid_bounds, p, size, offM1(nType[p]));
const Point<3> pP1 = warpPeriodic<dir, Plus >(Fluid_bounds, p, size, offP1(nType[p]));
const Point<3> pP2 = warpPeriodic<dir, Plus >(Fluid_bounds, p, size, offP2(nType[p]));
const Point<3> pP3 = warpPeriodic<dir, Plus >(Fluid_bounds, p, size, offP3(nType[p]));
const double Phi = max(max(max(max(max(
DucrosS[pM2],
DucrosS[pM1]),
DucrosS[p ]),
DucrosS[pP1]),
DucrosS[pP2]),
DucrosS[pP3]);
bool sensor = true;
#pragma unroll
for (int i=0; i<nSpec; i++)
sensor = sensor && TENOsensor(Conserved[pM2][i], Conserved[pM1][i], Conserved[p ][i],
Conserved[pP1][i], Conserved[pP2][i], Conserved[pP3][i],
nType[p], Phi);
shockSensor[p] = sensor;
}
}
template<direction dir>
__host__
void UpdateShockSensorTask<dir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(futures.size() == 0);
// Accessors for variables in the Ghost regions
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
const AccessorRO< Vec3, 3> acc_vGradX (regions[0], FID_velocityGradientX);
const AccessorRO< Vec3, 3> acc_vGradY (regions[0], FID_velocityGradientY);
const AccessorRO< Vec3, 3> acc_vGradZ (regions[0], FID_velocityGradientZ);
// Accessors for node type
const AccessorRO< int, 3> acc_nType (regions[1], FID_nType);
// Accessors for shock sensor
const AccessorWO< bool, 3> acc_shockSensor (regions[2], FID_shockSensor);
// Extract execution domains
Rect<3> r_MyFluid = runtime->get_index_space_domain(ctx, args.ModCells.get_index_space());
Rect<3> Fluid_bounds = args.Fluid_bounds;
const coord_t size = getSize<dir>(Fluid_bounds);
// Compute vorticity scale
const double eps = max(args.vorticityScale*args.vorticityScale, 1e-6);
// Store Ducros sensor in a DeferredBuffer
Domain GhostDomain = runtime->get_index_space_domain(ctx, args.Ghost.get_index_space());
DeferredBuffer<double, 3> DucrosSensor(Memory::GPU_FB_MEM, GhostDomain);
{
const int threads_per_block = 256;
for (RectInDomainIterator<3> Rit(GhostDomain); Rit(); Rit++) {
const dim3 TPB_3d = splitThreadsPerBlock<dir>(threads_per_block, *Rit);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(*Rit) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(*Rit) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(*Rit) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( ComputeDucrosSensor_kernel<dir>), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
DucrosSensor, acc_vGradX, acc_vGradY, acc_vGradZ,
(*Rit), Fluid_bounds, eps,
getSize<Xdir>(*Rit), getSize<Ydir>(*Rit), getSize<Zdir>(*Rit));
}
}
// Launch the kernel to update the shock sensor
{
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<dir>(threads_per_block, r_MyFluid);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_MyFluid) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_MyFluid) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_MyFluid) + (TPB_3d.z - 1)) / TPB_3d.z);
hipLaunchKernelGGL(( UpdateShockSensor_kernel<dir>), dim3(num_blocks_3d), dim3(TPB_3d), 0, 0,
DucrosSensor, acc_Conserved,
acc_nType, acc_shockSensor,
r_MyFluid, Fluid_bounds,
getSize<Xdir>(r_MyFluid), getSize<Ydir>(r_MyFluid), getSize<Zdir>(r_MyFluid), size);
}
}
template void UpdateShockSensorTask<Xdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void UpdateShockSensorTask<Ydir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void UpdateShockSensorTask<Zdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
| 8db58135a94edf0b01560e96232f71fdee33e97f.cu | // Copyright (c) "2019, by Stanford University
// Developer: Mario Di Renzo
// Affiliation: Center for Turbulence Research, Stanford University
// URL: https://ctr.stanford.edu
// Citation: Di Renzo, M., Lin, F., and Urzay, J. (2020).
// HTR solver: An open-source exascale-oriented task-based
// multi-GPU high-order code for hypersonic aerothermodynamics.
// Computer Physics Communications 255, 107262"
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "prometeo_sensor.hpp"
#include "prometeo_sensor.inl"
#include "cuda_utils.hpp"
//-----------------------------------------------------------------------------
// KERNEL FOR UpdateShockSensorTask
//-----------------------------------------------------------------------------
template<direction dir>
__global__
void ComputeDucrosSensor_kernel(const DeferredBuffer<double, 3> DucrosS,
const AccessorRO< Vec3, 3> vGradX,
const AccessorRO< Vec3, 3> vGradY,
const AccessorRO< Vec3, 3> vGradZ,
const Rect<3> my_bounds,
const Rect<3> Fluid_bounds,
const double eps,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
DucrosS[p] = DucrosSensor(vGradX[p].v, vGradY[p].v, vGradZ[p].v, eps);
}
}
template<direction dir>
__global__
void UpdateShockSensor_kernel(const DeferredBuffer<double, 3> DucrosS,
const AccessorRO<VecNEq, 3> Conserved,
const AccessorRO< int, 3> nType,
const AccessorWO< bool, 3> shockSensor,
const Rect<3> my_bounds,
const Rect<3> Fluid_bounds,
const coord_t size_x,
const coord_t size_y,
const coord_t size_z,
const coord_t size)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
if ((x < size_x) && (y < size_y) && (z < size_z)) {
const Point<3> p = Point<3>(x + my_bounds.lo.x,
y + my_bounds.lo.y,
z + my_bounds.lo.z);
const Point<3> pM2 = warpPeriodic<dir, Minus>(Fluid_bounds, p, size, offM2(nType[p]));
const Point<3> pM1 = warpPeriodic<dir, Minus>(Fluid_bounds, p, size, offM1(nType[p]));
const Point<3> pP1 = warpPeriodic<dir, Plus >(Fluid_bounds, p, size, offP1(nType[p]));
const Point<3> pP2 = warpPeriodic<dir, Plus >(Fluid_bounds, p, size, offP2(nType[p]));
const Point<3> pP3 = warpPeriodic<dir, Plus >(Fluid_bounds, p, size, offP3(nType[p]));
const double Phi = max(max(max(max(max(
DucrosS[pM2],
DucrosS[pM1]),
DucrosS[p ]),
DucrosS[pP1]),
DucrosS[pP2]),
DucrosS[pP3]);
bool sensor = true;
#pragma unroll
for (int i=0; i<nSpec; i++)
sensor = sensor && TENOsensor(Conserved[pM2][i], Conserved[pM1][i], Conserved[p ][i],
Conserved[pP1][i], Conserved[pP2][i], Conserved[pP3][i],
nType[p], Phi);
shockSensor[p] = sensor;
}
}
template<direction dir>
__host__
void UpdateShockSensorTask<dir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime)
{
assert(regions.size() == 4);
assert(futures.size() == 0);
// Accessors for variables in the Ghost regions
const AccessorRO<VecNEq, 3> acc_Conserved (regions[0], FID_Conserved);
const AccessorRO< Vec3, 3> acc_vGradX (regions[0], FID_velocityGradientX);
const AccessorRO< Vec3, 3> acc_vGradY (regions[0], FID_velocityGradientY);
const AccessorRO< Vec3, 3> acc_vGradZ (regions[0], FID_velocityGradientZ);
// Accessors for node type
const AccessorRO< int, 3> acc_nType (regions[1], FID_nType);
// Accessors for shock sensor
const AccessorWO< bool, 3> acc_shockSensor (regions[2], FID_shockSensor);
// Extract execution domains
Rect<3> r_MyFluid = runtime->get_index_space_domain(ctx, args.ModCells.get_index_space());
Rect<3> Fluid_bounds = args.Fluid_bounds;
const coord_t size = getSize<dir>(Fluid_bounds);
// Compute vorticity scale
const double eps = max(args.vorticityScale*args.vorticityScale, 1e-6);
// Store Ducros sensor in a DeferredBuffer
Domain GhostDomain = runtime->get_index_space_domain(ctx, args.Ghost.get_index_space());
DeferredBuffer<double, 3> DucrosSensor(Memory::GPU_FB_MEM, GhostDomain);
{
const int threads_per_block = 256;
for (RectInDomainIterator<3> Rit(GhostDomain); Rit(); Rit++) {
const dim3 TPB_3d = splitThreadsPerBlock<dir>(threads_per_block, *Rit);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(*Rit) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(*Rit) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(*Rit) + (TPB_3d.z - 1)) / TPB_3d.z);
ComputeDucrosSensor_kernel<dir><<<num_blocks_3d, TPB_3d>>>(
DucrosSensor, acc_vGradX, acc_vGradY, acc_vGradZ,
(*Rit), Fluid_bounds, eps,
getSize<Xdir>(*Rit), getSize<Ydir>(*Rit), getSize<Zdir>(*Rit));
}
}
// Launch the kernel to update the shock sensor
{
const int threads_per_block = 256;
const dim3 TPB_3d = splitThreadsPerBlock<dir>(threads_per_block, r_MyFluid);
const dim3 num_blocks_3d = dim3((getSize<Xdir>(r_MyFluid) + (TPB_3d.x - 1)) / TPB_3d.x,
(getSize<Ydir>(r_MyFluid) + (TPB_3d.y - 1)) / TPB_3d.y,
(getSize<Zdir>(r_MyFluid) + (TPB_3d.z - 1)) / TPB_3d.z);
UpdateShockSensor_kernel<dir><<<num_blocks_3d, TPB_3d>>>(
DucrosSensor, acc_Conserved,
acc_nType, acc_shockSensor,
r_MyFluid, Fluid_bounds,
getSize<Xdir>(r_MyFluid), getSize<Ydir>(r_MyFluid), getSize<Zdir>(r_MyFluid), size);
}
}
template void UpdateShockSensorTask<Xdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void UpdateShockSensorTask<Ydir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
template void UpdateShockSensorTask<Zdir>::gpu_base_impl(
const Args &args,
const std::vector<PhysicalRegion> ®ions,
const std::vector<Future> &futures,
Context ctx, Runtime *runtime);
|
395a6a7e706d793deb49e5b25edafab1a87b2379.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlaset.cu, normal z -> s, Sun Nov 20 20:20:30 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slaset_full_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_S_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to slaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_lower_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to slaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_upper_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void slaset_full_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_lower_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_upper_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slaset_full_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_lower_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_upper_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the vbatched routine.
*/
__global__
void slaset_full_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
float **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
slaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void slaset_lower_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
float **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
slaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void slaset_upper_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
float **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
slaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
/***************************************************************************//**
Purpose
-------
SLASET initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag REAL
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag REAL
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
and A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laset
*******************************************************************************/
extern "C"
void magmablas_slaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slaset_lower_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slaset_upper_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, hipMemset is faster.
// TODO: use hipMemset2D ?
if ( m == ldda &&
MAGMA_S_EQUAL( offdiag, MAGMA_S_ZERO ) &&
MAGMA_S_EQUAL( diag, MAGMA_S_ZERO ) )
{
size_t size = m*n;
hipError_t err = hipMemsetAsync( dA, 0, size*sizeof(float), queue->cuda_stream() );
assert( err == hipSuccess );
MAGMA_UNUSED( err );
}
else {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
hipLaunchKernelGGL(( slaset_full_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
/******************************************************************************/
extern "C"
void magmablas_slaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( slaset_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( slaset_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else {
hipLaunchKernelGGL(( slaset_full_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
}
/******************************************************************************/
extern "C"
void magmablas_slaset_vbatched(
magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n,
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
magmaFloat_ptr dAarray[], magma_int_t* ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( max_m < 0 )
info = -2;
else if ( max_n < 0 )
info = -3;
//else if ( ldda < max(1,m) )
// info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( max_m == 0 || max_n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( slaset_lower_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( slaset_upper_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
else {
hipLaunchKernelGGL(( slaset_full_kernel_vbatched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dAarray, ldda);
}
}
| 395a6a7e706d793deb49e5b25edafab1a87b2379.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@author Mark Gates
@author Azzam Haidar
@generated from magmablas/zlaset.cu, normal z -> s, Sun Nov 20 20:20:30 2016
*/
#include "magma_internal.h"
#include "batched_kernel_param.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for slaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/******************************************************************************/
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to slaset, slacpy, slag2d, clag2z, sgeadd.
*/
static __device__
void slaset_full_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag || above diag || offdiag == diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y || ind + BLK_X <= iby || MAGMA_S_EQUAL( offdiag, diag )));
/* do only rows inside matrix */
if ( ind < m ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block or offdiag == diag
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to slaset_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_lower_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind > iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
Similar to slaset_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to slaset, slacpy, zlat2c, clat2z.
*/
static __device__
void slaset_upper_device(
int m, int n,
float offdiag, float diag,
float *A, int lda )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
A += ind + iby*lda;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
A[j*lda] = offdiag;
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( iby+j == ind )
A[j*lda] = diag;
else if ( ind < iby+j )
A[j*lda] = offdiag;
}
}
}
}
/******************************************************************************/
/*
kernel wrappers to call the device functions.
*/
__global__
void slaset_full_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_full_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_lower_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_lower_device(m, n, offdiag, diag, dA, ldda);
}
__global__
void slaset_upper_kernel(
int m, int n,
float offdiag, float diag,
float *dA, int ldda )
{
slaset_upper_device(m, n, offdiag, diag, dA, ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void slaset_full_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_full_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_lower_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_lower_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
__global__
void slaset_upper_kernel_batched(
int m, int n,
float offdiag, float diag,
float **dAarray, int ldda )
{
int batchid = blockIdx.z;
slaset_upper_device(m, n, offdiag, diag, dAarray[batchid], ldda);
}
/******************************************************************************/
/*
kernel wrappers to call the device functions for the vbatched routine.
*/
__global__
void slaset_full_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
float **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
slaset_full_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void slaset_lower_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
float **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
slaset_lower_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
__global__
void slaset_upper_kernel_vbatched(
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
float **dAarray, magma_int_t* ldda )
{
const int batchid = blockIdx.z;
const int my_m = (int)m[batchid];
const int my_n = (int)n[batchid];
if( blockIdx.x >= (my_m+BLK_X-1)/BLK_X ) return;
if( blockIdx.y >= (my_n+BLK_Y-1)/BLK_Y ) return;
slaset_upper_device(my_m, my_n, offdiag, diag, dAarray[batchid], (int)ldda[batchid]);
}
/***************************************************************************//**
Purpose
-------
SLASET initializes a 2-D array A to DIAG on the diagonal and
OFFDIAG on the off-diagonals.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
offdiag REAL
The scalar OFFDIAG. (In LAPACK this is called ALPHA.)
@param[in]
diag REAL
The scalar DIAG. (In LAPACK this is called BETA.)
@param[in]
dA REAL array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = OFFDIAG, 1 <= i <= m, 1 <= j <= n, i != j;
and A(i,i) = DIAG, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_laset
*******************************************************************************/
extern "C"
void magmablas_slaset(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if (uplo == MagmaLower) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slaset_lower_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else if (uplo == MagmaUpper) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slaset_upper_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
else {
// if continuous in memory & set to zero, cudaMemset is faster.
// TODO: use cudaMemset2D ?
if ( m == ldda &&
MAGMA_S_EQUAL( offdiag, MAGMA_S_ZERO ) &&
MAGMA_S_EQUAL( diag, MAGMA_S_ZERO ) )
{
size_t size = m*n;
cudaError_t err = cudaMemsetAsync( dA, 0, size*sizeof(float), queue->cuda_stream() );
assert( err == cudaSuccess );
MAGMA_UNUSED( err );
}
else {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, diag, dA(i*super_NB, j*super_NB), ldda );
}
else { // off diagonal super block
slaset_full_kernel<<< grid, threads, 0, queue->cuda_stream() >>>
( mm, nn, offdiag, offdiag, dA(i*super_NB, j*super_NB), ldda );
}
}
}
}
}
}
/******************************************************************************/
extern "C"
void magmablas_slaset_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
float offdiag, float diag,
magmaFloat_ptr dAarray[], magma_int_t ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m) )
info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
slaset_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
slaset_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else {
slaset_full_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
}
/******************************************************************************/
extern "C"
void magmablas_slaset_vbatched(
magma_uplo_t uplo, magma_int_t max_m, magma_int_t max_n,
magma_int_t* m, magma_int_t* n,
float offdiag, float diag,
magmaFloat_ptr dAarray[], magma_int_t* ldda,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( max_m < 0 )
info = -2;
else if ( max_n < 0 )
info = -3;
//else if ( ldda < max(1,m) )
// info = -7;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( max_m == 0 || max_n == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( max_m, BLK_X ), magma_ceildiv( max_n, BLK_Y ), batchCount );
if (uplo == MagmaLower) {
slaset_lower_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else if (uplo == MagmaUpper) {
slaset_upper_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
else {
slaset_full_kernel_vbatched<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dAarray, ldda);
}
}
|
a762e8e4555e600d8e3aebbdc5f5b94f65301f13.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
unsigned char *image_s = NULL; // source image array
unsigned char *image_t = NULL; // target image array
FILE *fp_s = NULL; // source file handler
FILE *fp_t = NULL; // target file handler
unsigned int width, height; // image width, image height
unsigned int rgb_raw_data_offset;// RGB raw data offset
unsigned char bit_per_pixel; // bit per pixel
unsigned short byte_per_pixel; // byte per pixel
// bitmap header
unsigned char header[54] = {
0x42, // identity : B
0x4d, // identity : M
0, 0, 0, 0, // file size
0, 0, // reserved1
0, 0, // reserved2
54, 0, 0, 0, // RGB data offset
40, 0, 0, 0, // struct BITMAPINFOHEADER size
0, 0, 0, 0, // bmp width
0, 0, 0, 0, // bmp height
1, 0, // planes
24, 0, // bit per pixel
0, 0, 0, 0, // compression
0, 0, 0, 0, // data size
0, 0, 0, 0, // h resolution
0, 0, 0, 0, // v resolution
0, 0, 0, 0, // used colors
0, 0, 0, 0 // important colors
};
// sobel mask (5x5 version)
// Task 2: Put mask[][][] into Shared Memroy
int
mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}}
,
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int
read_bmp (const char *fname_s) {
fp_s = fopen(fname_s, "rb");
if (fp_s == NULL) {
printf("fopen fp_s error\n");
return -1;
}
// move offset to 10 to find rgb raw data offset
fseek(fp_s, 10, SEEK_SET);
fread(&rgb_raw_data_offset, sizeof(unsigned int), 1, fp_s);
// move offset to 18 to get width & height;
fseek(fp_s, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp_s);
fread(&height, sizeof(unsigned int), 1, fp_s);
// get bit per pixel
fseek(fp_s, 28, SEEK_SET);
fread(&bit_per_pixel, sizeof(unsigned short), 1, fp_s);
byte_per_pixel = bit_per_pixel / 8;
// move offset to rgb_raw_data_offset to get RGB raw data
fseek(fp_s, rgb_raw_data_offset, SEEK_SET);
// Task 3: Assign image_s to Pinnned Memory //Done!
// Hint : err = hipHostMalloc ( ... )
// if (err != hipSuccess)
int err;
err = hipHostMalloc(&image_s,(size_t)width * height * byte_per_pixel);
if (err != hipSuccess) {
printf("malloc images_s error\n");
return -1;
}
// Task 3: Assign image_t to Pinned Memory //Done!
// Hint : err = hipHostMalloc ( ... )
// if (err != hipSuccess)
err = hipHostMalloc(&image_t, (size_t) width * height * byte_per_pixel);
if (err != hipSuccess) {
printf("malloc image_t error\n");
return -1;
}
fread(image_s, sizeof(unsigned char), (size_t)(long) width * height * byte_per_pixel, fp_s);
return 0;
}
__global__ void sobel(unsigned char *image_s, unsigned char *image_t,
unsigned int width, unsigned int height,
unsigned short byte_per_pixel,
int* d_mask ) {
int x, y, i, v, u; // for loop counter
int R, G, B; // color of R, G, B
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
// Task 2: Put mask[][][] into Shared Memory
// Hint : Please declare it in kernel function
// Then use some threads to move data from global memory to shared memory
// Remember to __syncthreads() after it's done <WHY?>
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
v = threadIdx.x;
if(v < MASK_X){
for(int i = 0; i < MASK_N; i++){
for(int j = 0; j < MASK_Y; j++){
mask[i][v][j] = d_mask[i * MASK_X * MASK_Y + v * MASK_Y + j];
}
}
}
__syncthreads();
// Task 1: Relabel x, y into combination of blockIdx, threadIdx ... etc
// Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// Hint B: Maybe you can map each y to different threads in different blocks
for (y = blockIdx.x; y < blockIdx.x + 1; ++y) {
for (x = threadIdx.x; x < width; x+=256) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = image_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
G = image_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
B = image_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i*3+2] * val[i*3+2];
totalG += val[i*3+1] * val[i*3+1];
totalB += val[i*3+0] * val[i*3+0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
image_t[ byte_per_pixel * (width * y + x) + 2 ] = cR;
image_t[ byte_per_pixel * (width * y + x) + 1 ] = cG;
image_t[ byte_per_pixel * (width * y + x) + 0 ] = cB;
}
}
return;
}
int
write_bmp (const char *fname_t) {
unsigned int file_size; // file size
fp_t = fopen(fname_t, "wb");
if (fp_t == NULL) {
printf("fopen fname_t error\n");
return -1;
}
// file size
file_size = width * height * byte_per_pixel + rgb_raw_data_offset;
header[2] = (unsigned char)(file_size & 0x000000ff);
header[3] = (file_size >> 8) & 0x000000ff;
header[4] = (file_size >> 16) & 0x000000ff;
header[5] = (file_size >> 24) & 0x000000ff;
// width
header[18] = width & 0x000000ff;
header[19] = (width >> 8) & 0x000000ff;
header[20] = (width >> 16) & 0x000000ff;
header[21] = (width >> 24) & 0x000000ff;
// height
header[22] = height &0x000000ff;
header[23] = (height >> 8) & 0x000000ff;
header[24] = (height >> 16) & 0x000000ff;
header[25] = (height >> 24) & 0x000000ff;
// bit per pixel
header[28] = bit_per_pixel;
// write header
fwrite(header, sizeof(unsigned char), rgb_raw_data_offset, fp_t);
// write image
fwrite(image_t, sizeof(unsigned char), (size_t)(long)width * height * byte_per_pixel, fp_t);
fclose(fp_s);
fclose(fp_t);
return 0;
}
int
init_device ()
{ // Task 1: Device (GPU) Initialization //Done!
// Hint : hipSetDevice()
hipSetDevice(0);
return 0;
}
int
main(int argc, char **argv) {
init_device();
const char *input = "candy.bmp";
if (argc > 1) input = argv[1];
read_bmp(input); // 24 bit gray level image
// Task 1: Allocate memory on GPU //Done!!
// Hint : hipMalloc ()
// What do we need to store on GPU? (input image, output image, ...)
unsigned char *d_image_s = NULL; // source image array
unsigned char *d_image_t = NULL; // target image array
int *d_mask = NULL; // mask array
hipMalloc((void**)&d_image_t, (size_t)width * height * byte_per_pixel);
hipMalloc((void**)&d_image_s, (size_t)width * height * byte_per_pixel);
hipMalloc((void**)&d_mask, (size_t)sizeof(int) * MASK_N * MASK_Y * MASK_X);
// Task 1: Memory copy from Host to Device (GPU) //Done!!
// Hint : hipMemcpy ( ... , hipMemcpyHostToDevice )
hipMemcpy(d_image_s, image_s, width * height * byte_per_pixel, hipMemcpyHostToDevice);
hipMemcpy(d_mask, mask, sizeof(int) * MASK_N * MASK_Y * MASK_X, hipMemcpyHostToDevice);
// Task 1: Modify sobel() to CUDA kernel function //Done!
// Hint : sobel_Kernel <<< ??? , ??? >>> ( ??? );
hipLaunchKernelGGL(( sobel), dim3(height), dim3(256), 0, 0, d_image_s, d_image_t, width, height, byte_per_pixel, d_mask);
// Task 1: Memory Copy from Device (GPU) to Host //DOne!
// Hint : hipMemcpy ( ... , hipMemcpyDeviceToHost )
hipMemcpy(image_t, d_image_t, (size_t)width * height * byte_per_pixel, hipMemcpyDeviceToHost);
// Task 1: Free memory on device //Done!
// Hint : hipFree ( ... )
hipFree(d_image_t);
hipFree(d_image_s);
hipFree(d_mask);
write_bmp("out.bmp");
// Task 3: Free Pinned memory //Done!
// Hint : replace free ( ... ) by hipHostFree ( ... )
hipHostFree (image_s);
hipHostFree (image_t);
}
| a762e8e4555e600d8e3aebbdc5f5b94f65301f13.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
unsigned char *image_s = NULL; // source image array
unsigned char *image_t = NULL; // target image array
FILE *fp_s = NULL; // source file handler
FILE *fp_t = NULL; // target file handler
unsigned int width, height; // image width, image height
unsigned int rgb_raw_data_offset;// RGB raw data offset
unsigned char bit_per_pixel; // bit per pixel
unsigned short byte_per_pixel; // byte per pixel
// bitmap header
unsigned char header[54] = {
0x42, // identity : B
0x4d, // identity : M
0, 0, 0, 0, // file size
0, 0, // reserved1
0, 0, // reserved2
54, 0, 0, 0, // RGB data offset
40, 0, 0, 0, // struct BITMAPINFOHEADER size
0, 0, 0, 0, // bmp width
0, 0, 0, 0, // bmp height
1, 0, // planes
24, 0, // bit per pixel
0, 0, 0, 0, // compression
0, 0, 0, 0, // data size
0, 0, 0, 0, // h resolution
0, 0, 0, 0, // v resolution
0, 0, 0, 0, // used colors
0, 0, 0, 0 // important colors
};
// sobel mask (5x5 version)
// Task 2: Put mask[][][] into Shared Memroy
int
mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}}
,
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int
read_bmp (const char *fname_s) {
fp_s = fopen(fname_s, "rb");
if (fp_s == NULL) {
printf("fopen fp_s error\n");
return -1;
}
// move offset to 10 to find rgb raw data offset
fseek(fp_s, 10, SEEK_SET);
fread(&rgb_raw_data_offset, sizeof(unsigned int), 1, fp_s);
// move offset to 18 to get width & height;
fseek(fp_s, 18, SEEK_SET);
fread(&width, sizeof(unsigned int), 1, fp_s);
fread(&height, sizeof(unsigned int), 1, fp_s);
// get bit per pixel
fseek(fp_s, 28, SEEK_SET);
fread(&bit_per_pixel, sizeof(unsigned short), 1, fp_s);
byte_per_pixel = bit_per_pixel / 8;
// move offset to rgb_raw_data_offset to get RGB raw data
fseek(fp_s, rgb_raw_data_offset, SEEK_SET);
// Task 3: Assign image_s to Pinnned Memory //Done!
// Hint : err = cudaMallocHost ( ... )
// if (err != CUDA_SUCCESS)
int err;
err = cudaMallocHost(&image_s,(size_t)width * height * byte_per_pixel);
if (err != CUDA_SUCCESS) {
printf("malloc images_s error\n");
return -1;
}
// Task 3: Assign image_t to Pinned Memory //Done!
// Hint : err = cudaMallocHost ( ... )
// if (err != CUDA_SUCCESS)
err = cudaMallocHost(&image_t, (size_t) width * height * byte_per_pixel);
if (err != CUDA_SUCCESS) {
printf("malloc image_t error\n");
return -1;
}
fread(image_s, sizeof(unsigned char), (size_t)(long) width * height * byte_per_pixel, fp_s);
return 0;
}
__global__ void sobel(unsigned char *image_s, unsigned char *image_t,
unsigned int width, unsigned int height,
unsigned short byte_per_pixel,
int* d_mask ) {
int x, y, i, v, u; // for loop counter
int R, G, B; // color of R, G, B
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
// Task 2: Put mask[][][] into Shared Memory
// Hint : Please declare it in kernel function
// Then use some threads to move data from global memory to shared memory
// Remember to __syncthreads() after it's done <WHY?>
__shared__ int mask[MASK_N][MASK_X][MASK_Y];
v = threadIdx.x;
if(v < MASK_X){
for(int i = 0; i < MASK_N; i++){
for(int j = 0; j < MASK_Y; j++){
mask[i][v][j] = d_mask[i * MASK_X * MASK_Y + v * MASK_Y + j];
}
}
}
__syncthreads();
// Task 1: Relabel x, y into combination of blockIdx, threadIdx ... etc
// Hint A: We do not have enough threads for each pixels in the image, so what should we do?
// Hint B: Maybe you can map each y to different threads in different blocks
for (y = blockIdx.x; y < blockIdx.x + 1; ++y) {
for (x = threadIdx.x; x < width; x+=256) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = image_s[byte_per_pixel * (width * (y+v) + (x+u)) + 2];
G = image_s[byte_per_pixel * (width * (y+v) + (x+u)) + 1];
B = image_s[byte_per_pixel * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i*3+2] * val[i*3+2];
totalG += val[i*3+1] * val[i*3+1];
totalB += val[i*3+0] * val[i*3+0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
image_t[ byte_per_pixel * (width * y + x) + 2 ] = cR;
image_t[ byte_per_pixel * (width * y + x) + 1 ] = cG;
image_t[ byte_per_pixel * (width * y + x) + 0 ] = cB;
}
}
return;
}
int
write_bmp (const char *fname_t) {
unsigned int file_size; // file size
fp_t = fopen(fname_t, "wb");
if (fp_t == NULL) {
printf("fopen fname_t error\n");
return -1;
}
// file size
file_size = width * height * byte_per_pixel + rgb_raw_data_offset;
header[2] = (unsigned char)(file_size & 0x000000ff);
header[3] = (file_size >> 8) & 0x000000ff;
header[4] = (file_size >> 16) & 0x000000ff;
header[5] = (file_size >> 24) & 0x000000ff;
// width
header[18] = width & 0x000000ff;
header[19] = (width >> 8) & 0x000000ff;
header[20] = (width >> 16) & 0x000000ff;
header[21] = (width >> 24) & 0x000000ff;
// height
header[22] = height &0x000000ff;
header[23] = (height >> 8) & 0x000000ff;
header[24] = (height >> 16) & 0x000000ff;
header[25] = (height >> 24) & 0x000000ff;
// bit per pixel
header[28] = bit_per_pixel;
// write header
fwrite(header, sizeof(unsigned char), rgb_raw_data_offset, fp_t);
// write image
fwrite(image_t, sizeof(unsigned char), (size_t)(long)width * height * byte_per_pixel, fp_t);
fclose(fp_s);
fclose(fp_t);
return 0;
}
int
init_device ()
{ // Task 1: Device (GPU) Initialization //Done!
// Hint : cudaSetDevice()
cudaSetDevice(0);
return 0;
}
int
main(int argc, char **argv) {
init_device();
const char *input = "candy.bmp";
if (argc > 1) input = argv[1];
read_bmp(input); // 24 bit gray level image
// Task 1: Allocate memory on GPU //Done!!
// Hint : cudaMalloc ()
// What do we need to store on GPU? (input image, output image, ...)
unsigned char *d_image_s = NULL; // source image array
unsigned char *d_image_t = NULL; // target image array
int *d_mask = NULL; // mask array
cudaMalloc((void**)&d_image_t, (size_t)width * height * byte_per_pixel);
cudaMalloc((void**)&d_image_s, (size_t)width * height * byte_per_pixel);
cudaMalloc((void**)&d_mask, (size_t)sizeof(int) * MASK_N * MASK_Y * MASK_X);
// Task 1: Memory copy from Host to Device (GPU) //Done!!
// Hint : cudaMemcpy ( ... , cudaMemcpyHostToDevice )
cudaMemcpy(d_image_s, image_s, width * height * byte_per_pixel, cudaMemcpyHostToDevice);
cudaMemcpy(d_mask, mask, sizeof(int) * MASK_N * MASK_Y * MASK_X, cudaMemcpyHostToDevice);
// Task 1: Modify sobel() to CUDA kernel function //Done!
// Hint : sobel_Kernel <<< ??? , ??? >>> ( ??? );
sobel<<<height, 256>>>( d_image_s, d_image_t, width, height, byte_per_pixel, d_mask);
// Task 1: Memory Copy from Device (GPU) to Host //DOne!
// Hint : cudaMemcpy ( ... , cudaMemcpyDeviceToHost )
cudaMemcpy(image_t, d_image_t, (size_t)width * height * byte_per_pixel, cudaMemcpyDeviceToHost);
// Task 1: Free memory on device //Done!
// Hint : cudaFree ( ... )
cudaFree(d_image_t);
cudaFree(d_image_s);
cudaFree(d_mask);
write_bmp("out.bmp");
// Task 3: Free Pinned memory //Done!
// Hint : replace free ( ... ) by cudaFreeHost ( ... )
cudaFreeHost (image_s);
cudaFreeHost (image_t);
}
|
34d1b7879ea551a7b626f896f29f31f5f2aaea1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
#include <unistd.h>
#include <memory>
#include <array>
#include <algorithm>
#include <vector>
const std::size_t NUMBER_STREAMS = 80;
const std::size_t N = 1 << 20;
struct test_struct
{
hipStream_t stream;
std::unique_ptr<std::array<float, N>> h_a;
std::unique_ptr<std::array<float, N>> h_b;
std::unique_ptr<std::array<float, N>> h_c;
float* d_a;
float* d_b;
float* d_c;
std::size_t error_count;
};
typedef struct test_struct test_struct_t;
int make_test_struct(test_struct_t& t)
{
t.h_a.reset(new std::array<float, N>);
t.h_b.reset(new std::array<float, N>);
t.h_c.reset(new std::array<float, N>);
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipStreamCreate(&t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipHostMalloc(&t.d_a, N * sizeof(t.d_a[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipHostMalloc(&t.d_b, N * sizeof(t.d_b[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipHostMalloc(&t.d_c, N * sizeof(t.d_c[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
std::fill(t.h_a->begin(), t.h_a->end(), 1.0);
std::fill(t.h_b->begin(), t.h_b->end(), 2.0);
t.error_count = 0;
return 0;
}
int destroy_test_struct(test_struct_t& t)
{
if (hipSuccess != hipHostFree(t.d_a))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipHostFree(t.d_b))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipHostFree(t.d_c))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipStreamDestroy(t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
return 0;
}
__global__
void vec_add(float* const c, const float* const a, const float* const b, const std::size_t n)
{
for (std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x); i < n; i += (blockDim.x * gridDim.x))
{
c[i] = a[i] + b[i] + 1;
}
}
int main(void)
{
const dim3 grid_size (1, 1, 1);
const dim3 block_size (1024, 1, 1);
std::array<test_struct_t, NUMBER_STREAMS> streams;
std::for_each(streams.begin(), streams.end(), make_test_struct);
for (test_struct_t& t: streams)
{
if (hipSuccess != hipMemcpyAsync(t.d_a, t.h_a->data(), N * sizeof(t.d_a[0]), hipMemcpyHostToDevice, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (hipSuccess != hipMemcpyAsync(t.d_b, t.h_b->data(), N * sizeof(t.d_b[0]), hipMemcpyHostToDevice, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
hipLaunchKernelGGL(( vec_add), dim3(grid_size), dim3(block_size), 0, t.stream, t.d_c, t.d_a, t.d_b, N);
if (hipSuccess != hipMemcpyAsync(t.h_c->data(), t.d_c, N * sizeof(t.d_c[0]), hipMemcpyDeviceToHost, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
}
for (test_struct_t& t: streams)
{
if (hipSuccess != hipStreamSynchronize(t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
}
for (test_struct_t& t: streams)
{
for (std::size_t i = 0; i < N; ++ i)
{
if (4 != t.h_c->at(i))
{
++ t.error_count;
}
}
if (0 != t.error_count)
{
std::cout << t.error_count << std::endl << std::flush;
}
}
std::for_each(streams.begin(), streams.end(), destroy_test_struct);
return 0;
}
| 34d1b7879ea551a7b626f896f29f31f5f2aaea1b.cu | #include <iostream>
#include <math.h>
#include <unistd.h>
#include <memory>
#include <array>
#include <algorithm>
#include <vector>
const std::size_t NUMBER_STREAMS = 80;
const std::size_t N = 1 << 20;
struct test_struct
{
cudaStream_t stream;
std::unique_ptr<std::array<float, N>> h_a;
std::unique_ptr<std::array<float, N>> h_b;
std::unique_ptr<std::array<float, N>> h_c;
float* d_a;
float* d_b;
float* d_c;
std::size_t error_count;
};
typedef struct test_struct test_struct_t;
int make_test_struct(test_struct_t& t)
{
t.h_a.reset(new std::array<float, N>);
t.h_b.reset(new std::array<float, N>);
t.h_c.reset(new std::array<float, N>);
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (nullptr == t.h_a)
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaStreamCreate(&t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&t.d_a, N * sizeof(t.d_a[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&t.d_b, N * sizeof(t.d_b[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&t.d_c, N * sizeof(t.d_c[0])))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
std::fill(t.h_a->begin(), t.h_a->end(), 1.0);
std::fill(t.h_b->begin(), t.h_b->end(), 2.0);
t.error_count = 0;
return 0;
}
int destroy_test_struct(test_struct_t& t)
{
if (cudaSuccess != cudaFreeHost(t.d_a))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaFreeHost(t.d_b))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaFreeHost(t.d_c))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaStreamDestroy(t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
return 0;
}
__global__
void vec_add(float* const c, const float* const a, const float* const b, const std::size_t n)
{
for (std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x); i < n; i += (blockDim.x * gridDim.x))
{
c[i] = a[i] + b[i] + 1;
}
}
int main(void)
{
const dim3 grid_size (1, 1, 1);
const dim3 block_size (1024, 1, 1);
std::array<test_struct_t, NUMBER_STREAMS> streams;
std::for_each(streams.begin(), streams.end(), make_test_struct);
for (test_struct_t& t: streams)
{
if (cudaSuccess != cudaMemcpyAsync(t.d_a, t.h_a->data(), N * sizeof(t.d_a[0]), cudaMemcpyHostToDevice, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMemcpyAsync(t.d_b, t.h_b->data(), N * sizeof(t.d_b[0]), cudaMemcpyHostToDevice, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
vec_add<<<grid_size, block_size, 0, t.stream>>>(t.d_c, t.d_a, t.d_b, N);
if (cudaSuccess != cudaMemcpyAsync(t.h_c->data(), t.d_c, N * sizeof(t.d_c[0]), cudaMemcpyDeviceToHost, t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
}
for (test_struct_t& t: streams)
{
if (cudaSuccess != cudaStreamSynchronize(t.stream))
{
std::cout << __FUNCTION__ << " " << __LINE__ << std::endl;
return -__LINE__;
}
}
for (test_struct_t& t: streams)
{
for (std::size_t i = 0; i < N; ++ i)
{
if (4 != t.h_c->at(i))
{
++ t.error_count;
}
}
if (0 != t.error_count)
{
std::cout << t.error_count << std::endl << std::flush;
}
}
std::for_each(streams.begin(), streams.end(), destroy_test_struct);
return 0;
}
|
a283dfae9e123ca007bc75b7401382bd579094aa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "differenceImg.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Octave0 = NULL;
hipMalloc(&d_Octave0, XSIZE*YSIZE);
float *d_Octave1 = NULL;
hipMalloc(&d_Octave1, XSIZE*YSIZE);
float *d_diffOctave = NULL;
hipMalloc(&d_diffOctave, XSIZE*YSIZE);
int pitch = 2;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
differenceImg), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Octave0,d_Octave1,d_diffOctave,pitch,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
differenceImg), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Octave0,d_Octave1,d_diffOctave,pitch,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
differenceImg), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Octave0,d_Octave1,d_diffOctave,pitch,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | a283dfae9e123ca007bc75b7401382bd579094aa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "differenceImg.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *d_Octave0 = NULL;
cudaMalloc(&d_Octave0, XSIZE*YSIZE);
float *d_Octave1 = NULL;
cudaMalloc(&d_Octave1, XSIZE*YSIZE);
float *d_diffOctave = NULL;
cudaMalloc(&d_diffOctave, XSIZE*YSIZE);
int pitch = 2;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
differenceImg<<<gridBlock,threadBlock>>>(d_Octave0,d_Octave1,d_diffOctave,pitch,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
differenceImg<<<gridBlock,threadBlock>>>(d_Octave0,d_Octave1,d_diffOctave,pitch,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
differenceImg<<<gridBlock,threadBlock>>>(d_Octave0,d_Octave1,d_diffOctave,pitch,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
60522db9827dcbbf0301e6ab005fdb97a1fb91f7.hip | // !!! This is a file automatically generated by hipify!!!
#include "PrefixSumHandler.cuh"
#include "SequenceVisitor.cuh"
template<>
void SequenceVisitor::set_arguments_size<prefix_sum_scifi_hits_t>(
prefix_sum_scifi_hits_t::arguments_t arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
const HostBuffers& host_buffers)
{
arguments.set_size<dev_prefix_sum_auxiliary_array_4>(prefix_sum_scifi_hits_t::aux_array_size(
host_buffers.host_number_of_selected_events[0] * SciFi::Constants::n_mat_groups_and_mats));
}
template<>
void SequenceVisitor::visit<prefix_sum_scifi_hits_t>(
prefix_sum_scifi_hits_t& state,
const prefix_sum_scifi_hits_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
hipStream_t& cuda_stream,
hipEvent_t& cuda_generic_event)
{
// Set size of the main array to be prefix summed
state.set_size(host_buffers.host_number_of_selected_events[0] * SciFi::Constants::n_mat_groups_and_mats);
// Set the cuda_stream
state.set_opts(cuda_stream);
// Set arguments: Array to prefix sum and auxiliary array
state.set_arguments(arguments.offset<dev_scifi_hit_count>(), arguments.offset<dev_prefix_sum_auxiliary_array_4>());
// Invoke all steps of prefix sum
state.invoke();
// Fetch total number of hits
cudaCheck(hipMemcpyAsync(
host_buffers.host_accumulated_number_of_scifi_hits,
arguments.offset<dev_scifi_hit_count>() +
host_buffers.host_number_of_selected_events[0] * SciFi::Constants::n_mat_groups_and_mats,
sizeof(uint),
hipMemcpyDeviceToHost,
cuda_stream));
hipEventRecord(cuda_generic_event, cuda_stream);
hipEventSynchronize(cuda_generic_event);
// info_cout << "Total SciFi cluster count: " << *host_buffers.host_accumulated_number_of_scifi_hits << std::endl;
}
| 60522db9827dcbbf0301e6ab005fdb97a1fb91f7.cu | #include "PrefixSumHandler.cuh"
#include "SequenceVisitor.cuh"
template<>
void SequenceVisitor::set_arguments_size<prefix_sum_scifi_hits_t>(
prefix_sum_scifi_hits_t::arguments_t arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
const HostBuffers& host_buffers)
{
arguments.set_size<dev_prefix_sum_auxiliary_array_4>(prefix_sum_scifi_hits_t::aux_array_size(
host_buffers.host_number_of_selected_events[0] * SciFi::Constants::n_mat_groups_and_mats));
}
template<>
void SequenceVisitor::visit<prefix_sum_scifi_hits_t>(
prefix_sum_scifi_hits_t& state,
const prefix_sum_scifi_hits_t::arguments_t& arguments,
const RuntimeOptions& runtime_options,
const Constants& constants,
HostBuffers& host_buffers,
cudaStream_t& cuda_stream,
cudaEvent_t& cuda_generic_event)
{
// Set size of the main array to be prefix summed
state.set_size(host_buffers.host_number_of_selected_events[0] * SciFi::Constants::n_mat_groups_and_mats);
// Set the cuda_stream
state.set_opts(cuda_stream);
// Set arguments: Array to prefix sum and auxiliary array
state.set_arguments(arguments.offset<dev_scifi_hit_count>(), arguments.offset<dev_prefix_sum_auxiliary_array_4>());
// Invoke all steps of prefix sum
state.invoke();
// Fetch total number of hits
cudaCheck(cudaMemcpyAsync(
host_buffers.host_accumulated_number_of_scifi_hits,
arguments.offset<dev_scifi_hit_count>() +
host_buffers.host_number_of_selected_events[0] * SciFi::Constants::n_mat_groups_and_mats,
sizeof(uint),
cudaMemcpyDeviceToHost,
cuda_stream));
cudaEventRecord(cuda_generic_event, cuda_stream);
cudaEventSynchronize(cuda_generic_event);
// info_cout << "Total SciFi cluster count: " << *host_buffers.host_accumulated_number_of_scifi_hits << std::endl;
}
|
05e73bcde81233db0f676f7971d465b0c514d81c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/TensorUtils.h>
#include <ATen/TensorOperators.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/squeeze.h>
#endif
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) {
auto grad_input = at::empty_like(input);
if (!log_target) {
TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(target)
.add_input(grad)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
}
else {
grad_input = -at::exp(target) * grad;
if (reduction == at::Reduction::Mean) {
grad_input /= input.numel();
}
}
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = ::log(input_val);
scalar_t log_1_minus_input_val = ::log(one - input_val);
log_input_val = ::max(log_input_val, neg_100);
log_1_minus_input_val = ::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
constexpr int NLL_LOSS_THREADS = 32;
#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \
[&] { \
at::ScalarType _it = TYPE; \
RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \
switch (_it) { \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\
default: \
AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \
} \
}()
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
index_t* target,
scalar_t* output,
scalar_t* weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1};
*total_weight = cur_weight;
if (size_average) {
// If we try to normalize a zero then we return a NaN
if (cur_weight == 0) {
*output = std::numeric_limits<scalar_t>::quiet_NaN();
} else {
*output = -input[t];
}
} else {
*output = -cur_weight * input[t];
}
} else {
// If the only element was omited, we get 0. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
*output = scalar_t{0};
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input_,
const Tensor& target_,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto input = *input_.expect_contiguous();
auto target = *target_.expect_contiguous();
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None && n_dims == 2) {
at::native::resize_output(output, {batch_size});
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel_index",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<index_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return;
}
// produce scalar outputs for the reduction case
at::native::resize_output(output, {});
total_weight.resize_({});
if (target.numel() == 0) {
// Here target (and input) have zero elements
// Mean reduction on empty tensors produces NaN. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
if (reduction == Reduction::Mean) {
output.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
output.zero_();
}
total_weight.zero_();
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d_index",
[&] {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>;
hipLaunchKernelGGL(( nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>)
, dim3(1),
dim3(NLL_LOSS_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
index_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
index_t *target,
scalar_t *total_weight,
bool size_average,
int n_classes,
int64_t ignore_index
) {
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
grad_input[t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
index_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[i * ndim + t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
}
void nll_loss_backward_out_cuda_template(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& input_,
const Tensor& target_,
const Tensor& total_weight,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto target = *target_.expect_contiguous();
auto input = *input_.expect_contiguous();
auto grad_input = *grad_input_.expect_contiguous();
auto grad_output = *grad_output_.expect_contiguous();
int64_t n_dims = input.dim();
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>)
, dim3(at::cuda::detail::GET_BLOCKS(batch_size)),
dim3(at::cuda::detail::CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
batch_size,
target.data_ptr<index_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>)
, dim3(1), dim3(1), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
target.data_ptr<index_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d_index",
[&] {
hipLaunchKernelGGL(( nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>)
, dim3(1), dim3(NLL_LOSS_THREADS), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
}
}
#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES
} // namespace
TORCH_IMPL_FUNC(nll_loss_forward_out_cuda)
(const Tensor& self,
const Tensor& target,
const OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& output,
const Tensor& total_weight) {
const Tensor& weight = weight_opt.getTensorRef();
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight, reduction, ignore_index);
}
TORCH_IMPL_FUNC(nll_loss_backward_out_cuda)
(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
const Tensor& grad_input) {
const Tensor& weight = weight_opt.getTensorRef();
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight,
reduction,
ignore_index);
}
}} // namespace at::native
| 05e73bcde81233db0f676f7971d465b0c514d81c.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/AccumulateType.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/TensorUtils.h>
#include <ATen/TensorOperators.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/Resize.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/binary_cross_entropy_backward_native.h>
#include <ATen/ops/binary_cross_entropy_native.h>
#include <ATen/ops/empty_like.h>
#include <ATen/ops/exp.h>
#include <ATen/ops/nll_loss_backward_native.h>
#include <ATen/ops/nll_loss_forward_native.h>
#include <ATen/ops/squeeze.h>
#endif
constexpr float EPSILON = 1e-12;
namespace {
using namespace at;
void binary_cross_entropy_backward_out_kernel(Tensor& grad_input, const Tensor& grad, const Tensor& input, const Tensor& target) {
at::TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(grad)
.add_input(input)
.add_input(target)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_backward_out_cuda", [&]() {
at::native::gpu_kernel(iter, [] GPU_LAMBDA (
scalar_t grad_val,
scalar_t input_val,
scalar_t target_val
) -> scalar_t {
const scalar_t one = 1;
const scalar_t epsilon = EPSILON;
scalar_t grad_input_denominator = max(
(one - input_val) * input_val,
epsilon
);
return grad_val * (input_val - target_val) / grad_input_denominator;
}
);
});
}
} // namespace
namespace at { namespace native {
Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction, bool log_target) {
auto grad_input = at::empty_like(input);
if (!log_target) {
TensorIterator iter = TensorIteratorConfig()
.add_output(grad_input)
.add_input(target)
.add_input(grad)
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "kl_div_backward_cuda", [&]() {
scalar_t inv = (reduction == at::Reduction::Mean) ? scalar_t(1.0 / input.numel()) : scalar_t(1.0);
gpu_kernel(iter,
[inv] GPU_LAMBDA (scalar_t target_val, scalar_t grad_val) {
return (target_val > 0) ? scalar_t(-target_val * grad_val * inv) : scalar_t(0.0);
});
});
}
else {
grad_input = -at::exp(target) * grad;
if (reduction == at::Reduction::Mean) {
grad_input /= input.numel();
}
}
return grad_input;
}
Tensor binary_cross_entropy_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss = at::empty_like(input);
return at::native::binary_cross_entropy_out_cuda(
input, target, weight, reduction, loss);
}
Tensor& binary_cross_entropy_out_cuda(const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& loss) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor loss_squeezed = at::squeeze(loss);
TensorIterator iter = TensorIteratorConfig()
.add_output(loss_squeezed)
.add_owned_input(at::squeeze(input))
.add_owned_input(at::squeeze(target))
.build();
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "binary_cross_entropy_out_cuda", [&]() {
gpu_kernel(iter,
[] GPU_LAMBDA (scalar_t input_val, scalar_t target_val) -> scalar_t {
const scalar_t zero = 0;
const scalar_t one = 1;
const scalar_t neg_100 = -100;
CUDA_KERNEL_ASSERT(input_val >= zero && input_val <= one);
scalar_t log_input_val = std::log(input_val);
scalar_t log_1_minus_input_val = std::log(one - input_val);
log_input_val = std::max(log_input_val, neg_100);
log_1_minus_input_val = std::max(log_1_minus_input_val, neg_100);
return ((target_val - one) * log_1_minus_input_val) - (target_val * log_input_val);
}
);
});
if (weight.defined()) {
loss.mul_(weight);
}
if (reduction != at::Reduction::None) {
Tensor loss_reduced;
if (reduction == at::Reduction::Mean) {
loss_reduced = loss.mean();
} else if (reduction == at::Reduction::Sum) {
loss_reduced = loss.sum();
}
loss.resize_as_(loss_reduced).copy_(loss_reduced);
}
return loss;
}
Tensor binary_cross_entropy_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_input = at::empty_like(input);
return at::native::binary_cross_entropy_backward_out_cuda(
grad, input, target, weight, reduction, grad_input);
}
Tensor& binary_cross_entropy_backward_out_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, const c10::optional<Tensor>& weight_opt, int64_t reduction, Tensor& grad_input) {
// See [Note: hacky wrapper removal for optional tensor]
c10::MaybeOwned<Tensor> weight_maybe_owned = at::borrow_from_optional_tensor(weight_opt);
const Tensor& weight = *weight_maybe_owned;
Tensor grad_expand = grad.expand_as(input);
binary_cross_entropy_backward_out_kernel(grad_input, grad_expand, input, target);
if (weight.defined()) {
grad_input.mul_(weight);
}
if (reduction == at::Reduction::Mean) {
grad_input.div_(input.numel());
}
return grad_input;
}
// -----------------------------------
// nll_loss
// -----------------------------------
namespace {
constexpr int NLL_LOSS_THREADS = 32;
#define AT_DISPATCH_NLL_LOSS_INDEX_TYPES(TYPE, NAME, ...) \
[&] { \
at::ScalarType _it = TYPE; \
RECORD_KERNEL_FUNCTION_DTYPE(NAME, _it) \
switch (_it) { \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Byte, uint8_t, index_t, __VA_ARGS__) \
AT_PRIVATE_CASE_TYPE_USING_HINT(NAME, at::ScalarType::Long, int64_t, index_t, __VA_ARGS__)\
default: \
AT_ERROR(#NAME, " not implemented for '", toString(_it), "'"); \
} \
}()
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_no_reduce_cuda_kernel(
int64_t batch_size,
PackedTensorAccessor64<scalar_t, 2> input,
index_t* target,
scalar_t* output,
scalar_t* weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
output[index] = static_cast<scalar_t>(0);
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
auto cur_weight =
weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
output[index] = -cur_weight * input[index][cur_target];
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_1d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int n_classes,
int64_t ignore_index) {
CUDA_KERNEL_ASSERT(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0);
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto cur_weight = weights != nullptr ? weights[t] : scalar_t{1};
*total_weight = cur_weight;
if (size_average) {
// If we try to normalize a zero then we return a NaN
if (cur_weight == 0) {
*output = std::numeric_limits<scalar_t>::quiet_NaN();
} else {
*output = -input[t];
}
} else {
*output = -cur_weight * input[t];
}
} else {
// If the only element was omited, we get 0. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
*output = scalar_t{0};
}
}
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void nll_loss_forward_reduce_cuda_kernel_2d(
scalar_t* output,
scalar_t* total_weight,
scalar_t* input,
index_t* target,
scalar_t* weights,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
// NOLINTNEXTLINE(cppcoreguidelines-init-variables)
__shared__ accscalar_t sh_inputs[NLL_LOSS_THREADS],
acc_weight[NLL_LOSS_THREADS];
sh_inputs[threadIdx.x] = static_cast<accscalar_t>(0);
acc_weight[threadIdx.x] = static_cast<accscalar_t>(0);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
scalar_t cur_weight =
weights != nullptr ? weights[t] : static_cast<scalar_t>(1);
sh_inputs[threadIdx.x] -= input[i * ndim + t] * cur_weight;
acc_weight[threadIdx.x] += cur_weight;
}
}
__syncthreads();
if (threadIdx.x == 0) {
accscalar_t output_acc = 0;
accscalar_t total_weight_acc = 0;
for (int i = 0; i < NLL_LOSS_THREADS; ++i) {
output_acc += sh_inputs[i];
total_weight_acc += acc_weight[i];
}
*total_weight = static_cast<scalar_t>(total_weight_acc);
if (size_average) {
*output = static_cast<scalar_t>(output_acc / total_weight_acc);
} else {
*output = static_cast<scalar_t>(output_acc);
}
}
}
void nll_loss_forward_out_cuda_template(
const Tensor& output,
const Tensor& total_weight,
const Tensor& input_,
const Tensor& target_,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto input = *input_.expect_contiguous();
auto target = *target_.expect_contiguous();
int64_t n_classes = input.size(-1);
int64_t n_dims = input.dim();
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == Reduction::None && n_dims == 2) {
at::native::resize_output(output, {batch_size});
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with
// 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_no_reduce_cuda_kernel_index",
[&] {
nll_loss_forward_no_reduce_cuda_kernel<scalar_t, index_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
input.packed_accessor64<scalar_t, 2>(),
target.data_ptr<index_t>(),
output.data_ptr<scalar_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return;
}
// produce scalar outputs for the reduction case
at::native::resize_output(output, {});
total_weight.resize_({});
if (target.numel() == 0) {
// Here target (and input) have zero elements
// Mean reduction on empty tensors produces NaN. See the discussion in
// https://github.com/pytorch/pytorch/pull/64572#issuecomment-926504162
if (reduction == Reduction::Mean) {
output.fill_(std::numeric_limits<double>::quiet_NaN());
} else {
output.zero_();
}
total_weight.zero_();
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_1d_index",
[&] {
nll_loss_forward_reduce_cuda_kernel_1d<scalar_t, index_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
} else if (n_dims == 2) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_forward_reduce_cuda_kernel_2d_index",
[&] {
using accscalar_t = at::acc_type<scalar_t, /*is_cuda*/true>;
nll_loss_forward_reduce_cuda_kernel_2d<scalar_t, accscalar_t, index_t>
<<<1,
NLL_LOSS_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
output.data_ptr<scalar_t>(),
total_weight.data_ptr<scalar_t>(),
input.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight_.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_no_reduce_cuda_kernel(
int batch_size,
index_t *target,
PackedTensorAccessor64<scalar_t, 1> grad_output,
PackedTensorAccessor64<scalar_t, 2> grad_input,
scalar_t *weights,
int n_classes,
int ignore_index) {
CUDA_KERNEL_LOOP(index, batch_size) {
int cur_target = target[index];
if (cur_target == ignore_index) {
continue;
}
CUDA_KERNEL_ASSERT(cur_target >= 0 && cur_target < n_classes);
scalar_t weight = weights != nullptr ? weights[cur_target] : static_cast<scalar_t>(1);
grad_input[index][cur_target] = -weight * grad_output[index];
}
};
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_1d(
scalar_t *grad_input,
scalar_t *grad_output,
scalar_t *weights,
index_t *target,
scalar_t *total_weight,
bool size_average,
int n_classes,
int64_t ignore_index
) {
int t = static_cast<int>(*target);
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
grad_input[t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
template <typename scalar_t, typename index_t>
__global__ void nll_loss_backward_reduce_cuda_kernel_2d(
scalar_t* grad_input,
scalar_t* grad_output,
index_t* target,
scalar_t* weights,
scalar_t* total_weight,
bool size_average,
int nframe,
int ndim,
int n_classes,
int64_t ignore_index) {
const auto grad = -(size_average ? *grad_output / *total_weight
: *grad_output);
for (int i = threadIdx.x; i < nframe; i += NLL_LOSS_THREADS) {
int t = target[i];
if (t != static_cast<int>(ignore_index)) {
CUDA_KERNEL_ASSERT(t >= 0 && t < n_classes);
grad_input[i * ndim + t] = weights != nullptr ? weights[t] * grad
: grad;
}
}
}
void nll_loss_backward_out_cuda_template(
const Tensor& grad_input_,
const Tensor& grad_output_,
const Tensor& input_,
const Tensor& target_,
const Tensor& total_weight,
const Tensor& weight,
int64_t reduction,
int64_t ignore_index) {
auto target = *target_.expect_contiguous();
auto input = *input_.expect_contiguous();
auto grad_input = *grad_input_.expect_contiguous();
auto grad_output = *grad_output_.expect_contiguous();
int64_t n_dims = input.dim();
int64_t n_classes = input.size(-1);
int64_t batch_size = n_dims == 1 ? 1 : input.size(0);
auto weight_ = weight.defined() ? weight.contiguous() : weight;
if (reduction == at::Reduction::None && n_dims == 2) {
if (batch_size == 0) {
// This guards from unnecessary operations and launching CUDA kernel with 0 blocks.
return;
}
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_no_reduce_cuda_kernel_index",
[&] {
nll_loss_backward_no_reduce_cuda_kernel<scalar_t, index_t>
<<<at::cuda::detail::GET_BLOCKS(batch_size),
at::cuda::detail::CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
batch_size,
target.data_ptr<index_t>(),
grad_output.packed_accessor64<scalar_t, 1>(),
grad_input.packed_accessor64<scalar_t, 2>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return;
}
if (n_dims == 1) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_1d_index",
[&] {
nll_loss_backward_reduce_cuda_kernel_1d<scalar_t, index_t>
<<<1, 1, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>()
: nullptr,
target.data_ptr<index_t>(),
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
input.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d",
[&] {
AT_DISPATCH_NLL_LOSS_INDEX_TYPES(
target.scalar_type(),
"nll_loss_backward_reduce_cuda_kernel_2d_index",
[&] {
nll_loss_backward_reduce_cuda_kernel_2d<scalar_t, index_t>
<<<1, NLL_LOSS_THREADS, 0, at::cuda::getCurrentCUDAStream()>>>(
grad_input.data_ptr<scalar_t>(),
grad_output.data_ptr<scalar_t>(),
target.data_ptr<index_t>(),
weight.defined() ? weight_.data_ptr<scalar_t>() : nullptr,
total_weight.data_ptr<scalar_t>(),
reduction == at::Reduction::Mean,
input.size(0),
input.size(1),
n_classes,
ignore_index);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
}
}
#undef AT_DISPATCH_NLL_LOSS_INDEX_TYPES
} // namespace
TORCH_IMPL_FUNC(nll_loss_forward_out_cuda)
(const Tensor& self,
const Tensor& target,
const OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& output,
const Tensor& total_weight) {
const Tensor& weight = weight_opt.getTensorRef();
nll_loss_forward_out_cuda_template(
output, total_weight, self, target, weight, reduction, ignore_index);
}
TORCH_IMPL_FUNC(nll_loss_backward_out_cuda)
(const Tensor& grad_output,
const Tensor& self,
const Tensor& target,
OptionalTensorRef weight_opt,
int64_t reduction,
int64_t ignore_index,
const Tensor& total_weight,
const Tensor& grad_input) {
const Tensor& weight = weight_opt.getTensorRef();
grad_input.zero_();
nll_loss_backward_out_cuda_template(
grad_input,
grad_output,
self,
target,
total_weight,
weight,
reduction,
ignore_index);
}
}} // namespace at::native
|
58b9a692e3c2b9597a21bbadaf771454d181ed58.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "computeCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
hipMalloc(&Params, XSIZE*YSIZE);
const float *uproj = NULL;
hipMalloc(&uproj, XSIZE*YSIZE);
const float *mu = NULL;
hipMalloc(&mu, XSIZE*YSIZE);
const float *W = NULL;
hipMalloc(&W, XSIZE*YSIZE);
const int *ioff = NULL;
hipMalloc(&ioff, XSIZE*YSIZE);
const bool *iW = NULL;
hipMalloc(&iW, XSIZE*YSIZE);
float *cmax = NULL;
hipMalloc(&cmax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,uproj,mu,W,ioff,iW,cmax);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,uproj,mu,W,ioff,iW,cmax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
computeCost), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,uproj,mu,W,ioff,iW,cmax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 58b9a692e3c2b9597a21bbadaf771454d181ed58.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "computeCost.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const double *Params = NULL;
cudaMalloc(&Params, XSIZE*YSIZE);
const float *uproj = NULL;
cudaMalloc(&uproj, XSIZE*YSIZE);
const float *mu = NULL;
cudaMalloc(&mu, XSIZE*YSIZE);
const float *W = NULL;
cudaMalloc(&W, XSIZE*YSIZE);
const int *ioff = NULL;
cudaMalloc(&ioff, XSIZE*YSIZE);
const bool *iW = NULL;
cudaMalloc(&iW, XSIZE*YSIZE);
float *cmax = NULL;
cudaMalloc(&cmax, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
computeCost<<<gridBlock,threadBlock>>>(Params,uproj,mu,W,ioff,iW,cmax);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
computeCost<<<gridBlock,threadBlock>>>(Params,uproj,mu,W,ioff,iW,cmax);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
computeCost<<<gridBlock,threadBlock>>>(Params,uproj,mu,W,ioff,iW,cmax);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
df8408fb3741ec5292cc06a63126d826cd3b5e82.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void SpaceDiscretization_gpu( float *left,
float *right,
const float *cellLeft, const float *cellRight,
const float *edgeFluxes,
const float *bathySource,
const float *edgeNormals, const int *isRightBoundary,
const float *cellVolumes0,
const float *cellVolumes1) {
if ((cellLeft[0] > EPS) || (cellRight[0] > EPS)){
left[0] -= (edgeFluxes[0])/cellVolumes0[0];
left[1] -= (edgeFluxes[1] + bathySource[0] * edgeNormals[0])/cellVolumes0[0];
left[2] -= (edgeFluxes[2] + bathySource[0] * edgeNormals[1])/cellVolumes0[0];
left[1] += (bathySource[2] *edgeNormals[0])/cellVolumes0[0];
left[2] += (bathySource[2] *edgeNormals[1])/cellVolumes0[0];
}else{
left[0] -= 0.0f;
left[1] -= 0.0f;
left[2] -= 0.0f;
}
if (!*isRightBoundary) {
if ((cellLeft[0] > EPS) || (cellRight[0] > EPS)){
right[0] += edgeFluxes[0]/cellVolumes1[0];
right[1] += (edgeFluxes[1] + bathySource[1] * edgeNormals[0])/cellVolumes1[0];
right[2] += (edgeFluxes[2] + bathySource[1] * edgeNormals[1])/cellVolumes1[0];
right[1] -= (bathySource[3] *edgeNormals[0])/cellVolumes1[0];
right[2] -= (bathySource[3] *edgeNormals[1])/cellVolumes1[0];
}else{
right[0] += 0.0f;
right[1] += 0.0f;
right[2] += 0.0f;
}
}
}
// CUDA kernel function
__global__ void op_cuda_SpaceDiscretization(
float *__restrict ind_arg0,
const float *__restrict ind_arg1,
const float *__restrict ind_arg2,
const int *__restrict opDat0Map,
const float *__restrict arg4,
const float *__restrict arg5,
const float *__restrict arg6,
const int *__restrict arg7,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg0_l[4];
float arg1_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg0_l[d] = ZERO_float;
}
for ( int d=0; d<4; d++ ){
arg1_l[d] = ZERO_float;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
//user-supplied kernel call
SpaceDiscretization_gpu(arg0_l,
arg1_l,
ind_arg1+map0idx*4,
ind_arg1+map1idx*4,
arg4+(n+offset_b)*3,
arg5+(n+offset_b)*4,
arg6+(n+offset_b)*2,
arg7+(n+offset_b)*1,
ind_arg2+map0idx*1,
ind_arg2+map1idx*1);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg0_l[0] += ind_arg0[0+map0idx*4];
arg0_l[1] += ind_arg0[1+map0idx*4];
arg0_l[2] += ind_arg0[2+map0idx*4];
arg0_l[3] += ind_arg0[3+map0idx*4];
ind_arg0[0+map0idx*4] = arg0_l[0];
ind_arg0[1+map0idx*4] = arg0_l[1];
ind_arg0[2+map0idx*4] = arg0_l[2];
ind_arg0[3+map0idx*4] = arg0_l[3];
arg1_l[0] += ind_arg0[0+map1idx*4];
arg1_l[1] += ind_arg0[1+map1idx*4];
arg1_l[2] += ind_arg0[2+map1idx*4];
arg1_l[3] += ind_arg0[3+map1idx*4];
ind_arg0[0+map1idx*4] = arg1_l[0];
ind_arg0[1+map1idx*4] = arg1_l[1];
ind_arg0[2+map1idx*4] = arg1_l[2];
ind_arg0[3+map1idx*4] = arg1_l[3];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_SpaceDiscretization(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8,
op_arg arg9){
int nargs = 10;
op_arg args[10];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
args[9] = arg9;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(25);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[25].name = name;
OP_kernels[25].count += 1;
int ninds = 3;
int inds[10] = {0,0,1,1,-1,-1,-1,-1,2,2};
if (OP_diags>2) {
printf(" kernel routine with indirection: SpaceDiscretization\n");
}
//get plan
#ifdef OP_PART_SIZE_25
int part_size = OP_PART_SIZE_25;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_25
int nthread = OP_BLOCK_SIZE_25;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
hipLaunchKernelGGL(( op_cuda_SpaceDiscretization), dim3(nblocks),dim3(nthread), 0, 0,
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg8.data_d,
arg0.map_data_d,
(float*)arg4.data_d,
(float*)arg5.data_d,
(float*)arg6.data_d,
(int*)arg7.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[25].transfer += Plan->transfer;
OP_kernels[25].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[25].time += wall_t2 - wall_t1;
}
| df8408fb3741ec5292cc06a63126d826cd3b5e82.cu | //
// auto-generated by op2.py
//
//user function
__device__ void SpaceDiscretization_gpu( float *left,
float *right,
const float *cellLeft, const float *cellRight,
const float *edgeFluxes,
const float *bathySource,
const float *edgeNormals, const int *isRightBoundary,
const float *cellVolumes0,
const float *cellVolumes1) {
if ((cellLeft[0] > EPS) || (cellRight[0] > EPS)){
left[0] -= (edgeFluxes[0])/cellVolumes0[0];
left[1] -= (edgeFluxes[1] + bathySource[0] * edgeNormals[0])/cellVolumes0[0];
left[2] -= (edgeFluxes[2] + bathySource[0] * edgeNormals[1])/cellVolumes0[0];
left[1] += (bathySource[2] *edgeNormals[0])/cellVolumes0[0];
left[2] += (bathySource[2] *edgeNormals[1])/cellVolumes0[0];
}else{
left[0] -= 0.0f;
left[1] -= 0.0f;
left[2] -= 0.0f;
}
if (!*isRightBoundary) {
if ((cellLeft[0] > EPS) || (cellRight[0] > EPS)){
right[0] += edgeFluxes[0]/cellVolumes1[0];
right[1] += (edgeFluxes[1] + bathySource[1] * edgeNormals[0])/cellVolumes1[0];
right[2] += (edgeFluxes[2] + bathySource[1] * edgeNormals[1])/cellVolumes1[0];
right[1] -= (bathySource[3] *edgeNormals[0])/cellVolumes1[0];
right[2] -= (bathySource[3] *edgeNormals[1])/cellVolumes1[0];
}else{
right[0] += 0.0f;
right[1] += 0.0f;
right[2] += 0.0f;
}
}
}
// CUDA kernel function
__global__ void op_cuda_SpaceDiscretization(
float *__restrict ind_arg0,
const float *__restrict ind_arg1,
const float *__restrict ind_arg2,
const int *__restrict opDat0Map,
const float *__restrict arg4,
const float *__restrict arg5,
const float *__restrict arg6,
const int *__restrict arg7,
int block_offset,
int *blkmap,
int *offset,
int *nelems,
int *ncolors,
int *colors,
int nblocks,
int set_size) {
float arg0_l[4];
float arg1_l[4];
__shared__ int nelems2, ncolor;
__shared__ int nelem, offset_b;
extern __shared__ char shared[];
if (blockIdx.x+blockIdx.y*gridDim.x >= nblocks) {
return;
}
if (threadIdx.x==0) {
//get sizes and shift pointers and direct-mapped data
int blockId = blkmap[blockIdx.x + blockIdx.y*gridDim.x + block_offset];
nelem = nelems[blockId];
offset_b = offset[blockId];
nelems2 = blockDim.x*(1+(nelem-1)/blockDim.x);
ncolor = ncolors[blockId];
}
__syncthreads(); // make sure all of above completed
for ( int n=threadIdx.x; n<nelems2; n+=blockDim.x ){
int col2 = -1;
int map0idx;
int map1idx;
if (n<nelem) {
//initialise local variables
for ( int d=0; d<4; d++ ){
arg0_l[d] = ZERO_float;
}
for ( int d=0; d<4; d++ ){
arg1_l[d] = ZERO_float;
}
map0idx = opDat0Map[n + offset_b + set_size * 0];
map1idx = opDat0Map[n + offset_b + set_size * 1];
//user-supplied kernel call
SpaceDiscretization_gpu(arg0_l,
arg1_l,
ind_arg1+map0idx*4,
ind_arg1+map1idx*4,
arg4+(n+offset_b)*3,
arg5+(n+offset_b)*4,
arg6+(n+offset_b)*2,
arg7+(n+offset_b)*1,
ind_arg2+map0idx*1,
ind_arg2+map1idx*1);
col2 = colors[n+offset_b];
}
//store local variables
for ( int col=0; col<ncolor; col++ ){
if (col2==col) {
arg0_l[0] += ind_arg0[0+map0idx*4];
arg0_l[1] += ind_arg0[1+map0idx*4];
arg0_l[2] += ind_arg0[2+map0idx*4];
arg0_l[3] += ind_arg0[3+map0idx*4];
ind_arg0[0+map0idx*4] = arg0_l[0];
ind_arg0[1+map0idx*4] = arg0_l[1];
ind_arg0[2+map0idx*4] = arg0_l[2];
ind_arg0[3+map0idx*4] = arg0_l[3];
arg1_l[0] += ind_arg0[0+map1idx*4];
arg1_l[1] += ind_arg0[1+map1idx*4];
arg1_l[2] += ind_arg0[2+map1idx*4];
arg1_l[3] += ind_arg0[3+map1idx*4];
ind_arg0[0+map1idx*4] = arg1_l[0];
ind_arg0[1+map1idx*4] = arg1_l[1];
ind_arg0[2+map1idx*4] = arg1_l[2];
ind_arg0[3+map1idx*4] = arg1_l[3];
}
__syncthreads();
}
}
}
//host stub function
void op_par_loop_SpaceDiscretization(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4,
op_arg arg5,
op_arg arg6,
op_arg arg7,
op_arg arg8,
op_arg arg9){
int nargs = 10;
op_arg args[10];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
args[5] = arg5;
args[6] = arg6;
args[7] = arg7;
args[8] = arg8;
args[9] = arg9;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(25);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[25].name = name;
OP_kernels[25].count += 1;
int ninds = 3;
int inds[10] = {0,0,1,1,-1,-1,-1,-1,2,2};
if (OP_diags>2) {
printf(" kernel routine with indirection: SpaceDiscretization\n");
}
//get plan
#ifdef OP_PART_SIZE_25
int part_size = OP_PART_SIZE_25;
#else
int part_size = OP_part_size;
#endif
int set_size = op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
op_plan *Plan = op_plan_get(name,set,part_size,nargs,args,ninds,inds);
//execute plan
int block_offset = 0;
for ( int col=0; col<Plan->ncolors; col++ ){
if (col==Plan->ncolors_core) {
op_mpi_wait_all_cuda(nargs, args);
}
#ifdef OP_BLOCK_SIZE_25
int nthread = OP_BLOCK_SIZE_25;
#else
int nthread = OP_block_size;
#endif
dim3 nblocks = dim3(Plan->ncolblk[col] >= (1<<16) ? 65535 : Plan->ncolblk[col],
Plan->ncolblk[col] >= (1<<16) ? (Plan->ncolblk[col]-1)/65535+1: 1, 1);
if (Plan->ncolblk[col] > 0) {
op_cuda_SpaceDiscretization<<<nblocks,nthread>>>(
(float *)arg0.data_d,
(float *)arg2.data_d,
(float *)arg8.data_d,
arg0.map_data_d,
(float*)arg4.data_d,
(float*)arg5.data_d,
(float*)arg6.data_d,
(int*)arg7.data_d,
block_offset,
Plan->blkmap,
Plan->offset,
Plan->nelems,
Plan->nthrcol,
Plan->thrcol,
Plan->ncolblk[col],
set->size+set->exec_size);
}
block_offset += Plan->ncolblk[col];
}
OP_kernels[25].transfer += Plan->transfer;
OP_kernels[25].transfer2 += Plan->transfer2;
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[25].time += wall_t2 - wall_t1;
}
|
2ec30268cc37a11eb205964c5eacd4a2371acf18.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/copying.hpp>
#include <utilities/legacy/error_utils.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <memory>
namespace cudf {
namespace experimental {
namespace detail {
struct dispatch_map_type {
template <typename map_type, std::enable_if_t<std::is_integral<map_type>::value
and not std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
std::unique_ptr<table> destination_table;
if (check_bounds) {
cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0;
CUDF_EXPECTS(
num_destination_rows == thrust::count_if(
rmm::exec_policy()->on(0),
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
bounds_checker<map_type>{begin, source_table.num_rows()}),
"Index out of bounds.");
}
if (allow_negative_indices) {
destination_table =
gather(source_table,
thrust::make_transform_iterator(
gather_map.begin<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
thrust::make_transform_iterator(
gather_map.end<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
ignore_out_of_bounds,
mr,
stream
);
}
else {
destination_table =
gather(source_table,
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
ignore_out_of_bounds,
mr,
stream
);
}
return destination_table;
}
template <typename map_type, std::enable_if_t<not std::is_integral<map_type>::value
or std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds, bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0) {
CUDF_FAIL("Gather map must be an integral type.");
}
};
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, bool ignore_out_of_bounds,
bool allow_negative_indices,
rmm::mr::device_memory_resource* mr,
hipStream_t stream) {
CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls");
std::unique_ptr<table> destination_table =
cudf::experimental::type_dispatcher(gather_map.type(), dispatch_map_type{},
source_table, gather_map,
gather_map.size(),
check_bounds, ignore_out_of_bounds,
allow_negative_indices,
mr,
stream);
return destination_table;
}
} // namespace detail
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, rmm::mr::device_memory_resource* mr) {
CUDF_FUNC_RANGE();
return detail::gather(source_table, gather_map, check_bounds, false, true, mr);
}
} // namespace exp
} // namespace cudf
| 2ec30268cc37a11eb205964c5eacd4a2371acf18.cu | #include <cudf/detail/gather.cuh>
#include <cudf/detail/gather.hpp>
#include <cudf/types.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cudf/table/table_view.hpp>
#include <cudf/table/table.hpp>
#include <cudf/copying.hpp>
#include <utilities/legacy/error_utils.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <thrust/count.h>
#include <memory>
namespace cudf {
namespace experimental {
namespace detail {
struct dispatch_map_type {
template <typename map_type, std::enable_if_t<std::is_integral<map_type>::value
and not std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table,
column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds,
bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
std::unique_ptr<table> destination_table;
if (check_bounds) {
cudf::size_type begin = (allow_negative_indices) ? -source_table.num_rows() : 0;
CUDF_EXPECTS(
num_destination_rows == thrust::count_if(
rmm::exec_policy()->on(0),
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
bounds_checker<map_type>{begin, source_table.num_rows()}),
"Index out of bounds.");
}
if (allow_negative_indices) {
destination_table =
gather(source_table,
thrust::make_transform_iterator(
gather_map.begin<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
thrust::make_transform_iterator(
gather_map.end<map_type>(),
index_converter<map_type>{source_table.num_rows()}),
ignore_out_of_bounds,
mr,
stream
);
}
else {
destination_table =
gather(source_table,
gather_map.begin<map_type>(),
gather_map.end<map_type>(),
ignore_out_of_bounds,
mr,
stream
);
}
return destination_table;
}
template <typename map_type, std::enable_if_t<not std::is_integral<map_type>::value
or std::is_same<map_type, bool>::value>* = nullptr>
std::unique_ptr<table> operator()(table_view const& source_table, column_view const& gather_map,
size_type num_destination_rows, bool check_bounds,
bool ignore_out_of_bounds, bool allow_negative_indices = false,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0) {
CUDF_FAIL("Gather map must be an integral type.");
}
};
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, bool ignore_out_of_bounds,
bool allow_negative_indices,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream) {
CUDF_EXPECTS(gather_map.has_nulls() == false, "gather_map contains nulls");
std::unique_ptr<table> destination_table =
cudf::experimental::type_dispatcher(gather_map.type(), dispatch_map_type{},
source_table, gather_map,
gather_map.size(),
check_bounds, ignore_out_of_bounds,
allow_negative_indices,
mr,
stream);
return destination_table;
}
} // namespace detail
std::unique_ptr<table> gather(table_view const& source_table, column_view const& gather_map,
bool check_bounds, rmm::mr::device_memory_resource* mr) {
CUDF_FUNC_RANGE();
return detail::gather(source_table, gather_map, check_bounds, false, true, mr);
}
} // namespace exp
} // namespace cudf
|
0ba9789ffbd891dbc941e61464c6d3caf6ddadbb.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "__embedmat2d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
long long *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
int nrows = 1;
int ncols = 1;
int sortdown = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
__embedmat2d), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,nrows,ncols,sortdown);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
__embedmat2d), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,nrows,ncols,sortdown);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
__embedmat2d), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,nrows,ncols,sortdown);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0ba9789ffbd891dbc941e61464c6d3caf6ddadbb.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "__embedmat2d.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
long long *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
int nrows = 1;
int ncols = 1;
int sortdown = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
__embedmat2d<<<gridBlock,threadBlock>>>(a,b,nrows,ncols,sortdown);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
__embedmat2d<<<gridBlock,threadBlock>>>(a,b,nrows,ncols,sortdown);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
__embedmat2d<<<gridBlock,threadBlock>>>(a,b,nrows,ncols,sortdown);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
ee5509a5e1cd2db53dec0c742e5606760577344a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
float h_A[]= {
0.9216671137735739, 0.9259668865552235, 0.529123634423551, 0.8139788790699016, 0.7667188580252835, 0.6234550692398635, 0.955397987834083, 0.820606072273355, 0.9747061244685017, 0.5449702555130261, 0.6805020971641711, 0.9497025422008357, 0.916780885019636, 0.8645539840766937, 0.8372153538679408, 0.9210798097887585, 0.6595966859568572, 0.7438568195279243, 0.6568512551894685, 0.8097578195479649, 0.9271535983770643, 0.6225497080406767, 0.6873301194093778, 0.6165056765248539, 0.8847068122982977, 0.7582141469711399, 0.7395767647465418, 0.6611136441137175, 0.9321300700185404, 0.6419739391654381, 0.5436808070646744, 0.5257441567517374, 0.6706684093062416, 0.7943614601884345, 0.9445448811114667, 0.5312288371931326, 0.8973316812303838, 0.8285475251020219, 0.7693197108835361, 0.7692744370710647, 0.8397756880782328, 0.9703134985681496, 0.8687405941754177, 0.9363545857880495, 0.9971404633836063, 0.9207732759728204, 0.8356686979855378, 0.8314220262500929, 0.547744432464938, 0.7759442960879432, 0.5263370782925765, 0.6803881976025952, 0.627475904047562, 0.7064388760240781, 0.6000810643243775, 0.9265503671339475, 0.5531206395588608, 0.887921048121384, 0.9010937444937734, 0.5222886419208206, 0.5408002622709964, 0.7157656389351329, 0.5534566516358301, 0.9874200627165446, 0.902134078950197, 0.9810895696041988, 0.5049801008537993, 0.8630281218547906, 0.5553828576975883, 0.7370295178258517, 0.9717126354169827, 0.6784222509046566, 0.6606141613167389, 0.7945288835244577, 0.5932746738017876, 0.579526427543219, 0.545635777066586, 0.821650045836283, 0.5878948697864732, 0.878825744943236, 0.7025796047455404, 0.661734297988421, 0.7242959957669841, 0.9445216605562303, 0.5862038606389584, 0.989491515003887, 0.5468072240497905, 0.9108474255881974, 0.9183192993991219, 0.8776473473897614, 0.8945740763574814, 0.8603233887156332, 0.5112918003847812, 0.7260404448534639, 0.9591112748672374, 0.5204596294799271, 0.7100385315722304, 0.5875454200982264, 0.520802987034169, 0.8215246387542896, 0.8684118875071221, 0.6509443952099467, 0.8134680830432439, 0.8291182949363824, 0.5415536691807257, 0.9293060891204017, 0.7357379484063507, 0.5134029010619998, 0.9065767834173741, 0.8889754524883648, 0.885554902534046, 0.8135773114746045, 0.7507035933733358, 0.5284169643697278, 0.7187493248530583, 0.7424576920280601, 0.5450304533254877, 0.5216399995240176, 0.7265275079522786, 0.9692646488347687, 0.5140448906837614, 0.6930188605704032, 0.5555224592587304, 0.6196737179149066, 0.7195427098268359, 0.7265524586466348, 0.875509488610236, 0.8716799654144672, 0.6937948496109221, 0.6092611535767769, 0.9575060269956277, 0.7930928096830722, 0.7430820800080244, 0.5648397020331211, 0.8163789560504975, 0.6536467088410594, 0.5667497711557377, 0.7536364909472333, 0.5092390071313797, 0.9782514793147645, 0.87301224200337, 0.6829247934029222, 0.9875506720579211, 0.7800669791854336, 0.5798425246214773, 0.6191230385752904, 0.787236650392571, 0.7164397555074665, 0.6677423724617377, 0.6634329991787249, 0.8789784075550298, 0.9692840871528745, 0.8109734658789058, 0.7401964749116179, 0.7134218759037075, 0.9764613662058619, 0.5297657139330443, 0.843031744454393, 0.7699392321560388, 0.9583443082091971, 0.7210666788283511, 0.5945747308328897, 0.7655611313797848, 0.8449268833421848, 0.7998900801072616, 0.9830499597490183, 0.6036895091322468, 0.738989585187496, 0.5895632014670855, 0.8613121714427324, 0.9919103539094855, 0.593438313175546, 0.7431973473569677, 0.5103158472741681, 0.8427014740106553, 0.7620666948473269, 0.5799523202442651, 0.6042796044936183, 0.9946302252046653, 0.501587392583402, 0.8471604574733634, 0.782111312383675, 0.7952005764441605, 0.6795735350900232, 0.8648981997895826, 0.7865921304660962, 0.5583183347803565, 0.7905854430937097, 0.5882550932626784, 0.8994313977567083, 0.5609909688264463, 0.7922913314087107, 0.7859382327481339, 0.6717845610807968, 0.5424367397651949, 0.7164827557302551, 0.8335159492427666, 0.9585170430237094, 0.9293006772505201, 0.9817072734800205, 0.5074702869792, 0.6025053310444526, 0.8918772904771506, 0.6392959616848508, 0.940220688563334, 0.8082450127848257, 0.8637567413045056, 0.6442898991173415, 0.5807528581745736, 0.653985274972398, 0.6006226251904051, 0.8458155592492118, 0.7024506344281192, 0.9381683848787739, 0.873424329865334, 0.8195045589889434, 0.833095348963804, 0.5628720439457162, 0.8532455069025757, 0.6070424549734865, 0.5339705154926186, 0.9468679280506351, 0.8865864462562572, 0.6490017190789034, 0.631924873676879, 0.6719102923644252, 0.6284355127575263, 0.874600323772994, 0.6754474434272286, 0.5886829184066007, 0.761505754682917, 0.8520438493382958, 0.7197303134007649, 0.5687539821828387, 0.8793917387225841, 0.6421222698287155, 0.6250531685567113, 0.970021670886096, 0.6341092319101063, 0.9511977851739615, 0.8173009861591181, 0.6733563389993713, 0.6591781046065724, 0.6364719993813477, 0.9020728451790798, 0.8630492836609356, 0.9767749619170898, 0.6448269404825091, 0.7853827619175091, 0.6532143736983875, 0.7880381764095202, 0.6757606205873756, 0.968222745990454, 0.9298255995504066, 0.5273021514559249, 0.8183816904590533, 0.7984771683680065, 0.8009571879603901, 0.8485324762541582, 0.5855194317801073, 0.7171189032651861, 0.7356352647548356, 0.5992976882353682, 0.6625907087474872, 0.9727704892550105, 0.5283728992484613, 0.9143658588696284, 0.6555492483666501, 0.9904883064507276, 0.5491229642506774, 0.7747194226975798, 0.8703680914870984, 0.555637432631982, 0.9242173691155551, 0.6725011587098031, 0.9432622373696058, 0.6530692820057429, 0.5610627291899675, 0.596617893571048, 0.68200111939416, 0.5199490339357886, 0.767796534610827, 0.7737318199710886, 0.7621488815348187, 0.762866296656554, 0.7397292035088329, 0.8824325191147508, 0.5220060883929296, 0.8612015256044017, 0.5675781505262227, 0.7837009509750368, 0.6321620745718581, 0.5772997978831373, 0.5419292708913468, 0.7235321943354327, 0.9477915035522984, 0.8021179009179382, 0.909730843780943, 0.5833259669181641, 0.9425017147454485, 0.894595081508261, 0.8909960203026366, 0.8401854519342586, 0.7976813415999713, 0.6875244905234732, 0.9945196388575134, 0.6785950597371104, 0.8017849880204823, 0.5473296936612452, 0.8011075458627961, 0.5157089543675681, 0.9173454113074128, 0.539016664199054, 0.9153521831226112, 0.7324681880351431, 0.649776626062358, 0.6290724974530969, 0.7544058818768641, 0.6891335869470445, 0.6002679162914446, 0.7837463066729005, 0.586224505493124, 0.6120664131484358, 0.5008220794298333, 0.9747794210880714, 0.6458858104855871, 0.906322031520653, 0.8737247593180527, 0.7714982736741113, 0.7085568638355604, 0.9990333211658603, 0.7640119328616719, 0.6887165766382841, 0.8663972341564308, 0.5845991124178218, 0.5637029040943806, 0.7346593530485449, 0.7891755139252982, 0.7709428836960096, 0.6406110713246898, 0.9441740739069884, 0.6081207187482933, 0.8944817121232926, 0.8080339967627799, 0.8930083440400667, 0.7045218910466198, 0.7902919287335399, 0.9594777990540604, 0.8059361521697647, 0.9557765750809311, 0.8176036545107347, 0.500529621179729, 0.6991048812058782, 0.9276082707765305, 0.641107504036851, 0.9777775532517992, 0.9626129252086482, 0.9827579013481113, 0.7767985336583361, 0.6801057155465532, 0.9456954383489884, 0.681708523586133, 0.5478289650100251, 0.5210212258761937, 0.9232931819531824, 0.9750198747304095, 0.9887271980472914, 0.7775726456785058, 0.5559002140547222, 0.9030007752860083, 0.5254342784494093, 0.8069566704985469, 0.8482212219518261, 0.9450056053087255, 0.6922516509573722, 0.5366578854188919, 0.7859603969546526, 0.658756951715851, 0.6881638025132244, 0.7868757403865683, 0.5276991495376908, 0.7768594125195742, 0.7427509832434228, 0.5204001900306612, 0.6985499827822527, 0.6857784575835362, 0.8773720422084488, 0.8124085229980192, 0.8018952976818463, 0.6765048792585264, 0.7069779721987237, 0.6962624408127829, 0.6465318177278605, 0.6774667602833642, 0.6635379577432889, 0.6132462223745183, 0.5411691225315017, 0.833643556061747, 0.9991720993329812, 0.7725213506317767, 0.85907790946419, 0.8252719998338963, 0.6277951927768464, 0.8983378701888862, 0.9601948910970051, 0.8961300698794863, 0.9108312726534498, 0.6315999696626221, 0.9423590564795141, 0.5325946576517946, 0.849040346638893, 0.537148364005497, 0.7065723767965002, 0.8960343140720672, 0.6604650409526562, 0.9819507080954655, 0.5771271491953495, 0.9016900255814859, 0.7979078340497731, 0.6537775157313379, 0.7011071077150637, 0.9548623894281789, 0.8830138235912647, 0.9292150300356683, 0.5849341803762471, 0.7662603352806626, 0.8092041040491773, 0.5552713103276209, 0.9696961000728477, 0.7559463183851474, 0.99141408105196, 0.9363097025280913, 0.6436420695261068, 0.8252355973284545, 0.6787933654125349, 0.5334194975657831, 0.6829777700587618, 0.795145763407717, 0.5078471846363453, 0.9529816592066132, 0.9162126284449145, 0.7460541027992926, 0.960575964918089, 0.8629794063767344, 0.64890871485968, 0.7978403271210119, 0.7149411021312915, 0.800162738131813, 0.6145938813068041, 0.8973278134397308, 0.5305249880819985, 0.9693017167901166, 0.5965297151541311, 0.8948829235829735, 0.5747327758677809, 0.5482917489204502, 0.5811440962718639, 0.9640363242184689, 0.8758298245041101, 0.5776014297493163, 0.9888630721062226, 0.8526997795027349, 0.8363150569837465, 0.9450214356300706, 0.5481696306964134, 0.5901488162540396, 0.7850052098646734, 0.7909501124513845, 0.6336144491154045, 0.7626166729425833, 0.9003679640146613, 0.8698017432354006, 0.8375228698476831, 0.7362060656261855, 0.9327292206971012, 0.9865637851430111, 0.8589088716118296, 0.878639926837649, 0.8805491249095778, 0.7555255588595426, 0.5060910247106656, 0.7953013014674081, 0.533584602384032, 0.6076550544013842, 0.5386915517011587, 0.5255790731672769, 0.8978278600164893, 0.874102689119535, 0.9946029637623885, 0.6404247047043987, 0.7456119246635018, 0.5420222832508359, 0.5945748807479805, 0.6985173971019021, 0.628038333452225, 0.7180584909114431, 0.7832539302403989, 0.6358244004847529, 0.6822436564811202, 0.995775057430544, 0.6653434114675126, 0.5901907587565823, 0.6591988430900728, 0.9238206803506717, 0.5500180833871403, 0.7814947630638838, 0.6480839685324531, 0.5210074873723858, 0.810195192012664, 0.6661956240830971, 0.8941922598608865, 0.6124301660464329, 0.9924445288248418, 0.8729939057219813, 0.8013471592216475, 0.9449524963406729, 0.6748622109338589, 0.6471096458994134, 0.6038686077572357, 0.8148691916819335, 0.7249506525285494, 0.8198573107894667, 0.9689822119172304, 0.8088066066727744, 0.5314860905320825, 0.8459312941432855, 0.9259494861762931, 0.9813526212320538, 0.5108795770368002, 0.7296607999471256, 0.7026107666002752, 0.9232789414134731, 0.8033725745680294, 0.5908702428140199, 0.9361460801398024, 0.74408436454596, 0.5894009881511981, 0.8665282500139995, 0.8362198036945517, 0.7813236795368261, 0.9561706558304732, 0.5432566222203219, 0.9895690452397705, 0.7968365965484724, 0.7452791417032931, 0.8281341979719787, 0.741180240135333, 0.6742534884708479, 0.7516041484214463, 0.7549631982368905, 0.5728064538341096, 0.6479400964895048, 0.9417429920690994, 0.7601396456767089, 0.6019577696564704, 0.8036791779050894, 0.9766538809998122, 0.8881973742027274, 0.9621635407323594, 0.7894556532235659, 0.899813817598998, 0.6979301484616577, 0.868343764613014, 0.7226658720677055, 0.8671543651906082, 0.9315829964026322, 0.9239443290873267, 0.7951213099549391, 0.9564218208074375, 0.5234178117816819, 0.6145821325806528, 0.6984054114075906, 0.6938317520203263, 0.5417998576723205, 0.9857771503128859, 0.9748240095967549, 0.9599600318457008, 0.9442725976223674, 0.7663957767719287, 0.8390376223782419, 0.6426285469064341, 0.8052665578898907, 0.8352712672517705, 0.9053455822976855, 0.7037565794225322, 0.9088258644660407, 0.5504093273498039, 0.8676760755498834, 0.6310533370087177, 0.5732195535080751, 0.6872822108757679, 0.7775485038250733, 0.9125699648696226, 0.960436363206784, 0.8988413329636344, 0.630282530154656, 0.9971667933259553, 0.6295110137134554, 0.726762252338643, 0.8663836403431763, 0.9163900500215992, 0.9234055643549428, 0.6480305534625808, 0.5496200094480316, 0.6379102508432486, 0.5105749550161138, 0.56832845402508, 0.8557972695862612, 0.8222521154912541, 0.7628575347814696, 0.6482748235352652, 0.7659260305993768, 0.7066273305339461, 0.9056450877183424, 0.9726087264481125, 0.5643989692027088, 0.8883352783105487, 0.621521268379231, 0.7811704837231688, 0.7235530890409703, 0.694897462091832, 0.5562282436776539, 0.5089829538380776, 0.5903699516309031, 0.6343146066607598, 0.8295241388710166, 0.6131798520928614, 0.8703904950966039, 0.8955380556638892, 0.7448742408448301, 0.8120571130259104, 0.5274140188603744, 0.6190941609608469, 0.8631061483216109, 0.9089289226812072, 0.7944174652477043, 0.5309113818506521, 0.8112294946280179, 0.5108559117688328, 0.8086458087207212, 0.7514928505011462, 0.9827453081133848, 0.9822163691498444, 0.8640624433042137, 0.6161505052370182, 0.8253614804851583, 0.8977122545550367, 0.7161619750604278, 0.8018502976667362, 0.5870588185437267, 0.820969004523819, 0.8409869639835703, 0.666795854649106, 0.9395030350229625, 0.6824938116071633, 0.9868827323452644, 0.6794221191901662, 0.729630126402192, 0.549366404230716, 0.6686336758858258, 0.7684044893923059, 0.9958793039239806, 0.8859762137139124, 0.6820876032248455, 0.8889634837189839, 0.7618696343333502, 0.7203059505078497, 0.5091125992371971, 0.5272768113981139, 0.8413652466668688, 0.6206955053685164, 0.686755831042797, 0.7745626307447179, 0.7831206389991652, 0.8275174522611409, 0.9792083885838145, 0.974905588431755, 0.9830206923053897, 0.9257713227026451, 0.878470293887782, 0.5739577418151882, 0.9229939055320724, 0.7947202750083115, 0.660523243284155, 0.676341101372957, 0.5788217788093197, 0.547254637391827, 0.7378906801138472, 0.5401205712414019, 0.9487351412024605, 0.9303657987330848, 0.6228935638348114, 0.9310965627926562, 0.740974648004825, 0.9656387925690042, 0.644245744131611, 0.5806758930420364, 0.7412969181322414, 0.9701938743992404, 0.8596678802192648, 0.9314912832974012, 0.5984016829852306, 0.8768192304455749, 0.546268425295013, 0.5118740025596062, 0.5292038870988811, 0.6130259284679909, 0.8326442940639478, 0.9899071518869458, 0.5408998564598531, 0.7783280325641129, 0.8351573277069222, 0.5042045347251798, 0.9795335322064445, 0.8484564627556755, 0.9829354095928053, 0.9437571154489093, 0.5502528966250156, 0.7825765938341879, 0.7794124833264172, 0.7646336469137793, 0.8533342813312019, 0.8605595431710716, 0.702874029717541, 0.9042576622623957, 0.8642045106530776, 0.7931129163389936, 0.6358497708897893, 0.9052734891501634, 0.9386891094443137, 0.8695174695335605, 0.9119693254714878, 0.975204416405018, 0.5661694133390474, 0.6171068823784494, 0.88041026043042, 0.6906375673158645, 0.533618933915891, 0.954880441631982, 0.5222346563262485, 0.8426573513653492, 0.5464200177608265, 0.8642337086677473, 0.6436384313580408, 0.6913296673597247, 0.5066954581177479, 0.945660593361934, 0.5109180579107948, 0.5251830696447451, 0.9292990624602904, 0.7218149367294755, 0.6291839883994259, 0.882392443705077, 0.8685291183514765, 0.6971893137367153, 0.9795108541263691, 0.6794005379110508, 0.8968882477578769, 0.5385940249817389, 0.6698423831983245, 0.9907231821353459, 0.533335673324423, 0.9224016984335824, 0.9497854515481365, 0.5346639122504808, 0.5007762621759675, 0.537887498274092, 0.5185242539483925, 0.5587143070589908, 0.7673851288354463, 0.8206473968540803, 0.8147642572325544, 0.8548711463436075, 0.5382016692864937, 0.6851101132364461, 0.65194330950609, 0.5223271043064521, 0.603453533384509, 0.8190155985810478, 0.6934251320246801, 0.6184013478707955, 0.7582775646762517, 0.8471397520740511, 0.8473882424150514, 0.5188059369415656, 0.9749836136604184, 0.9545219256881399, 0.9715242891874024, 0.8104754985878846, 0.5029088920427389, 0.9851400643846813, 0.9289848672406951, 0.8468457279891457, 0.5174803805786039, 0.8790110414528569, 0.7606983889226775, 0.5384250074763902, 0.6756849705427539, 0.6191942580153251, 0.9647992525340439, 0.6015455307857078, 0.6843490560989096, 0.7321570585242833, 0.9531529169137323, 0.5908737003817123, 0.8074315248346582, 0.966526471095507, 0.8257513037492404, 0.7994036151801811, 0.8733447611366283, 0.5050614334859463, 0.5038429747091849, 0.5542977537646783, 0.9770140167260556, 0.8008256592081022, 0.9660876507953511, 0.5502938570579414, 0.9084347804450148, 0.9267648026056765, 0.825007474657154, 0.9518749409775471, 0.5306549690432831, 0.7891377669509345, 0.5581345004013425, 0.5651586687373928, 0.6470634654728478, 0.6244257144418144, 0.9380832982253642, 0.7786511316487337, 0.9225617861691511, 0.6536220941988623, 0.9512222355874635, 0.9177719847403267, 0.6430382156396826, 0.6320785579152588, 0.8670830915803722, 0.9161318618699865, 0.7077059927763325, 0.7012298601567291, 0.9037223067951062, 0.9558588577222036, 0.8249815957914077, 0.5000469528249509, 0.7191409224327417, 0.8818097062966732, 0.853534387453789, 0.9544091433704234, 0.688084638177503, 0.5731592091659972, 0.8766047416567597, 0.6222707839996628, 0.6209749449551556, 0.8200248700723859, 0.873429049653955, 0.7045833122130492, 0.5660229349993555, 0.7646959646116624, 0.7148810725577941, 0.8105834337970546, 0.8060720801406966, 0.5085858607781255, 0.5099329721340045, 0.5553284019543476, 0.7509586720006531, 0.7465057647272786, 0.5056654123091001, 0.9794532765316755, 0.5721059817264368, 0.6731997091408666, 0.5566621634346356, 0.835987694235389, 0.6892489203004126, 0.5983134235669962, 0.9019379558674235, 0.628891805416975, 0.8225510960500011, 0.6018118388380014, 0.7459145116974089, 0.7335382623954554, 0.5594763531643043, 0.8061429384462783, 0.8586505106471622, 0.6840731229131954, 0.7264516497766653, 0.7802173972037252, 0.9262043298891021, 0.5029120856790592, 0.8816737961250458, 0.5396912791196441, 0.8716927606245692, 0.5216223009499844, 0.8193335786457265, 0.6965126542766815, 0.7857571833683645, 0.640701498493339, 0.6872471056551462, 0.8992170561823896, 0.948896232021339, 0.8181019673852571, 0.9288557544946763, 0.6194987388466021, 0.5499633253044904, 0.5985503359140414, 0.6944570143091652, 0.7277885116340552, 0.9316180922859238, 0.5895499714184234, 0.5517735224504497, 0.8815447255998352, 0.595504276749134, 0.9626862052108424, 0.8574837968436988, 0.6027632943070532, 0.5391643269047095, 0.7843694175083837, 0.7817072409659447, 0.7987365536575867, 0.9575769386195299, 0.837039755945976, 0.5912668960967795, 0.9595544604050215, 0.7651488597109803, 0.5102266173421838, 0.602453774707782, 0.5931335918947596, 0.6474215405287799, 0.9415459709398168, 0.5747896954089827, 0.8919229557666805, 0.7632923610551576, 0.678961137478115, 0.9888885036470201, 0.995575205012369, 0.9697335972260528, 0.5654598340889327, 0.6195395144642922, 0.5446093770023626, 0.6595011489244844, 0.7719678564260256, 0.5187010328409289, 0.672581104800426, 0.8347733862907214, 0.7102790758409926, 0.5318722366440634, 0.5176415593488158, 0.7168519118414326, 0.9103802301887599, 0.7748645485421517, 0.6557325481735871, 0.9488038778663602, 0.7832915049647408, 0.6565923199473312, 0.8333545207522052, 0.930835031204955, 0.5875871093109455, 0.7377030631955565, 0.5546759032272931, 0.9585304621990633, 0.7185398691508715, 0.6201637421665822, 0.9395025797669436, 0.7708952618723155, 0.9227900533550503, 0.6335835978590527, 0.5332313259341875, 0.57934820513474, 0.7373713596841834, 0.995044874346324, 0.9859078657766813, 0.7679507205962619, 0.6556474294677952, 0.9774276562049402, 0.5766423688830888, 0.8594824341409192, 0.8165963087399671, 0.7830655672196515, 0.7703237041573574, 0.9898213247891225, 0.868248106066982, 0.9914859626533479, 0.8506458964408128, 0.7051513094910113, 0.6440081183259749, 0.7390796513173647, 0.6316871168824272, 0.6380469165629579, 0.9676067549121243, 0.8473940876998567, 0.857481548548724, 0.9165726895338917, 0.6766997439462077, 0.8073860138839496, 0.883105263907328, 0.6318838965980128, 0.58933109972014, 0.6394829769196315, 0.5982245489836427, 0.8740871399670815, 0.9729931410451973, 0.8614022786734691, 0.9586729458690313, 0.8402993180167142, 0.84392688302327, 0.8455924455761777, 0.8340209229083142, 0.9889443457403624, 0.6869252000992945, 0.6756311369032765, 0.6909029240264284, 0.8024917780893785, 0.9861020083088645, 0.7391122847852283, 0.6245542816511129, 0.7048968841131351, 0.7267146842097345, 0.6948911026933527, 0.9656872150376348, 0.506583704876449, 0.8617454698287362, 0.6480245826611977, 0.782469315968259, 0.7961073029686467, 0.6873693278870787, 0.5460907942019348, 0.640731217745252, 0.6042275328810488, 0.9785317305439865, 0.6377238277506051, 0.7463989333608662, 0.9382189544466375, 0.6305470235915822, 0.9838993811049291, 0.876689826660352, 0.8970890185216724, 0.5967346010874301, 0.6939211072694553, 0.7635526500150227, 0.7919330994515923, 0.6921956716910898, 0.9325282265830159, 0.9668705977288559, 0.7944263249934285, 0.6603393909033087, 0.8760300225076659, 0.9758697842887976, 0.9833047954643165, 0.6614349100137609, 0.9042485486719412, 0.8250554687591679, 0.7496634456956157, 0.8601764267871554, 0.779516428932857, 0.7671342178079599, 0.9029361009479866, 0.7881706389254973, 0.6056854517614012, 0.8780734961617007, 0.5166106740563123, 0.5614031679632778, 0.962596040240443, 0.8589207264795227, 0.9479565835878379, 0.8596562775226775, 0.7544331633024149, 0.9612785571592898, 0.7741860603194581, 0.549175336699811, 0.7936852731156976, 0.5853513791378279, 0.998134315791664, 0.711483846535307, 0.8673559282292914, 0.8894598442227005, 0.8773589263209093, 0.881494663413888, 0.7452420685822241, 0.5512879556910861, 0.6214452148327507, 0.5696569488592175, 0.9635768717746669, 0.7046256965837097, 0.8157666249205264, 0.6983666962065178, 0.7164080337373797, 0.6702456836432855, 0.9042459676381059, 0.5714586350381845, 0.7815952725144056, 0.9933507666806158, 0.9942893580798527, 0.668129771504675, 0.6963103861152862, 0.5211497188486723, 0.539778083054081, 0.582310844471948, 0.5280232017494186, 0.693891739477448, 0.9968940183494145, 0.9924484664817492, 0.5640056920098295, 0.5605519506270331, 0.616960658406681, 0.999694703254854, 0.5977582949070652, 0.5057688188506425, 0.6460174239578158, 0.9326932622601605, 0.9449011554292818, 0.9322562831303829, 0.9068408997770132, 0.7513069895362594, 0.8336057564032469, 0.7787897757051964, 0.5295226802818913, 0.9781323384521723, 0.9782738102557322, 0.8976582857709368, 0.5167486925069503, 0.8617552897210083, 0.8891166191830291, 0.5657298076417347, 0.7354210285119069, 0.7818965201627845, 0.7340462918447159, 0.7212394687479131, 0.809678601149967, 0.7237386473694959, 0.9433474349187121, 0.7699083509802724, 0.7031392330615165, 0.7152561340857625, 0.8949907980694933, 0.8917291202850698, 0.6144227047818664, 0.6301557441769972, 0.6382523577564776, 0.6448361445500618, 0.9523748853329643, 0.7857866305261341, 0.7621073911976689, 0.6815296155089878, 0.7060218158313946, 0.9318302465107239, 0.9201074573882175, 0.7770810032027037, 0.7824549612008919, 0.763206730849868, 0.9890220072315994, 0.945012657382885, 0.7127142864168685, 0.5803220582608013, 0.9127847442690014, 0.7255541855395691, 0.7179803446527518, 0.6181358062769367, 0.5690378138174337, 0.565847677508386, 0.5290919801062635, 0.5710930759624069, 0.511434734576071, 0.818842547572367, 0.7858129905385549, 0.9182710332208974, 0.6075024256757066, 0.7869372820948817, 0.5458093282964245, 0.8520944364109528, 0.5525418035225851, 0.9812542707851029, 0.884040582076986, 0.5745859897104377, 0.8504840920139489, 0.9206215105578315, 0.6291951772907747, 0.5243462342579568, 0.9055715504243531, 0.6503291486188121, 0.5901473399081442, 0.6866528933001382, 0.7663290043525336, 0.7620055640547345, 0.5168994304409589, 0.7517334877396036, 0.7949890872005867, 0.5349388057529925, 0.5277616015933263, 0.9284322734979356, 0.9811232191923034, 0.5442885411419439, 0.5595017344792519, 0.993815954837556, 0.7810080548808955, 0.7856324501105842, 0.5012366501761663, 0.9803587886521816, 0.8240725685662416, 0.8839051583036196, 0.8941503946504055, 0.5078471888124179, 0.7040107679954666, 0.8159866204594579, 0.8086634552343939, 0.5845548316101646, 0.5833594970761069, 0.6438083702895601, 0.9621142473399726, 0.5887027953050363, 0.8367053911240048, 0.9384007648485128, 0.5807622551576057, 0.5335251858730323, 0.8267063232498886, 0.8820131241617852, 0.6398507212209206, 0.7623700623931121, 0.6274467824547376, 0.7577903481228104, 0.7925391895925162, 0.6519646196311729, 0.5891308440120491, 0.7480206575704471, 0.7902085967334395, 0.8410974321779182, 0.7049684772071141, 0.789523225360353, 0.7157207947401047, 0.7658980110992637, 0.8516079454062001, 0.6255426072169553, 0.8043563762066168, 0.5400456411616363, 0.6867381562575054, 0.5034928218124997, 0.8627662552271966, 0.9816889539440665, 0.7224488953850285, 0.8028129099387518, 0.926211920471341, 0.6533133976381724, 0.6349295570521011, 0.7062523237090994, 0.8194789877545345, 0.9258251660465973, 0.7816123547860668, 0.6647091214875547, 0.5054349835130504, 0.6877772465152158, 0.8003420788471775, 0.570652294352374, 0.8063778530152308, 0.7583271608009006, 0.5605529483246536, 0.7116497016906277, 0.5250817382996807, 0.9887335353867786, 0.8063895187721736, 0.9460790213818583, 0.9888244044170562, 0.6795760830128597, 0.7249964436695808, 0.517682564876955, 0.9935376750299947, 0.878730520174906, 0.8497997458804403, 0.8053957366752131, 0.5003997631094433, 0.7328467351592665, 0.7005335874576561, 0.6827573069567194, 0.8020961239126243, 0.734409734601474, 0.7007012949682707, 0.6227776423320106, 0.6414436612564258, 0.6298672100655409, 0.9787780844313614, 0.5101422585063303, 0.9438203981814932, 0.655130402810983, 0.7422560583474436, 0.7177621220472759, 0.9702414229485843, 0.8770775492870528, 0.5845088488331953, 0.9670097333516035, 0.7818517995734362, 0.5066541898217352, 0.7064959923419996, 0.7360859982588992, 0.8614799714434951, 0.6200478205584103, 0.8277998893089273, 0.6471747374819139, 0.5730222327666661, 0.6409383676870992, 0.9412412475420646, 0.8640779815808062, 0.9783604198017595, 0.8859664365440569, 0.7119407833502625, 0.7515597477249905, 0.5814193027485708, 0.7352542627187919, 0.6415218220120338, 0.5308149447742508, 0.700259908900839, 0.9623183835457934, 0.7714868814631721, 0.6331948504153944, 0.6302566311778752, 0.7091194208830387, 0.8334075704561972, 0.6290894531771447, 0.5097550781364463, 0.7948968670968533, 0.8051403743496856, 0.7668946394280755, 0.786475414184878, 0.8169047556243374, 0.5840506812552487, 0.8276685219935866, 0.6985162038202466, 0.7275029241100196, 0.6467098486684593, 0.7819833043112013, 0.7604952434776018, 0.9933768523900466, 0.8447041349515533, 0.5257066660473596, 0.7237902519880188, 0.570686018163802, 0.7893302513178245, 0.8033373057568056, 0.5505890603434999, 0.551380811706228, 0.611967739343661, 0.8248628700999945, 0.789202696737912, 0.8579453726858131, 0.5508403649190379, 0.5531953113602495, 0.6847139997961984, 0.9456311656620024, 0.5600180545404805, 0.6405261452836511, 0.6608320249870092, 0.6562723847634783, 0.8046366064712689, 0.9468514809791011, 0.5397890736886071, 0.6235742791055154, 0.8385732668737202, 0.5582805371261237, 0.7449589560164497, 0.8214747457273245, 0.6317841217302552, 0.5117410720111055, 0.8144050418268279, 0.8337382858617299, 0.7115122178537665, 0.8366687709708095, 0.9806400967361413, 0.8458037391362809, 0.6648409205906362, 0.9736155610418002, 0.8026780036911342, 0.7868520015613119, 0.9047925047718597, 0.7627141778132296, 0.7417107271895707, 0.9883599040220807, 0.7719154092065674, 0.838003122620097, 0.63127607774271, 0.7889407835454753, 0.5569777690385331, 0.9306060671447074, 0.9270907811139246, 0.959877121850353, 0.8694490536550761, 0.7527296001755716, 0.585339742432902, 0.5988141657271393, 0.9305511213237412, 0.7808102946270414, 0.719532052527251, 0.7457333979178264, 0.6492340645514302, 0.6288566934236766, 0.963549642326252, 0.7670054293487237, 0.5270397030277134, 0.7358888589585801, 0.6693393023864271, 0.9374624265574645, 0.7829386998521632, 0.7118286973083596, 0.6413636182582083, 0.8592802550053222, 0.7495177251303903, 0.575109366346383, 0.5382619854710495, 0.9146727321495629, 0.8072497878643471, 0.9261192387222685, 0.6557712500467408, 0.5626508223837102, 0.7694851719219118, 0.5327667025545326, 0.5574875857024476, 0.6551172676213356, 0.8954688318377988, 0.7419290343467295, 0.8270849167463623, 0.9686120814731446, 0.9014755464084104, 0.8622260641885435, 0.5943410459991977, 0.6544167469934972, 0.7405285407975928, 0.9858163950875409, 0.6417956173053092, 0.7009196215673653, 0.6942555671913129, 0.8623840165059408, 0.7419834721848009, 0.9171544865753265, 0.9931705381797595, 0.7848053972320059, 0.7212536899577293, 0.5230993151430317, 0.612843348213165, 0.6563822224984408, 0.6478036035425043, 0.6022634679577985, 0.5506466136062316, 0.8195194881169214, 0.7928249592178214, 0.5825476206670253, 0.7589467887723953, 0.9341235463817583, 0.5963372666500124, 0.8376664250257624, 0.5020560574888158, 0.7460504424640362, 0.9038514224052916, 0.6708753390299005, 0.9508945759465965, 0.8691871715687083, 0.6156280825843977, 0.7442182447841424, 0.5620870130803717, 0.8014573096968337, 0.7572914656987377, 0.8905185928729915, 0.7411824542800673, 0.7278459706167695, 0.8616986102495499, 0.597572014029275, 0.8651861723454074, 0.5453022369105054, 0.7518083321984251, 0.853305900313726, 0.8000166594467777, 0.6780044686328024, 0.7411978454807818, 0.5290271366924479, 0.7321971833001963, 0.9328146010786914, 0.5059672278817323, 0.9363178181119695, 0.7612513393659137, 0.5169717343596048, 0.6710931300876426, 0.8237506040640201, 0.6705765542682307, 0.6467778845737362, 0.6221771667588778, 0.822860824079428, 0.5435053908200917, 0.5716925195309062, 0.7645177998717916, 0.6449767295070419, 0.7579266187107521, 0.6629883235187158, 0.9184980228703506, 0.6574156291761165, 0.5660335561644976, 0.7419334648549664, 0.5252361123604492, 0.7022133883263915, 0.6069530204484435, 0.840624832119031, 0.7554371424887718, 0.7031087273497425, 0.5675741256472842, 0.8940676464458657, 0.5481251188102441, 0.5186581229541638, 0.9736170451128141, 0.5804958334816349, 0.7667453136039686, 0.9296648667507496, 0.6458255009220575, 0.879678370689515, 0.9861491219501701, 0.7738940045904027, 0.7744105135443132, 0.7062041696045598, 0.9064574397944303, 0.5518477568334876, 0.9056635792781211, 0.5319174933211595, 0.7096837405740473, 0.6341911676433946, 0.8267545898309308, 0.5226783712536641, 0.7352479243151311, 0.7004099202178383, 0.74560488159848, 0.6323187388048438, 0.6840540504525183, 0.8292724078193561, 0.9067576699178141, 0.5005511832257777, 0.6379702186213629, 0.7847885270550696, 0.5161455469585536, 0.9629587706413951, 0.7991563015717698, 0.681595782423281, 0.7470784788414035, 0.8737848681573055, 0.5251658723308554, 0.7620679009362251, 0.7067204054200809, 0.5511302082916567, 0.7934760188962106, 0.610023102551059, 0.8735471227313313, 0.8429515599659176, 0.8680832524814843, 0.8226505656133603, 0.6540461757995218, 0.5187501961573847, 0.6665947552978801, 0.9842064223872922, 0.9213538024990637, 0.6262383774723317, 0.6104757781247561, 0.9376298558710241, 0.7684369311756902, 0.6319620588611553, 0.7047816270924204, 0.7384916935177619, 0.8794297902885422, 0.9070104131197243, 0.9094709287170251, 0.8834257124375424, 0.8616222950029991, 0.7026086056355892, 0.752608374756182, 0.8223035871898103, 0.881229670293258, 0.8913339227594382, 0.7141692933758009, 0.6624934972432198, 0.5275303174351731, 0.5913473780996347, 0.8174407243924495, 0.6817949398940973, 0.6017574659614358, 0.679858664056066, 0.8824096018198546, 0.8154708737059717, 0.5286129345973527, 0.7051693043391822, 0.7457709772260301, 0.7284002144937857, 0.5642195080137955, 0.5987216310442116, 0.5307456909031247, 0.5179929909604788, 0.9660951781118005, 0.9358890161534696, 0.9943510377708138, 0.819959017457845, 0.6309585760167764, 0.6452281576419192, 0.6141138627256467, 0.9944764290698378, 0.8836506708865322, 0.6038312272402626, 0.6348926975926352, 0.9090091310770609, 0.6455243840022198, 0.5132390719562758, 0.7375031816524507, 0.930376597550834, 0.5711800106484257, 0.6109934952353684, 0.6319075657011446, 0.7512594695216468, 0.5732172933494201, 0.9817874641525488, 0.9522344858790579, 0.9084822064317449, 0.9051712577129669, 0.6570335912702843, 0.6714882309606236, 0.7848517219826309, 0.7190137029448019, 0.7487076369736234, 0.5899697101389321, 0.5787201571843514, 0.867434648453336, 0.5925222420012082, 0.8411693894689111, 0.6003908049070508, 0.8870111330991746, 0.7053009987236214, 0.8461769162208173, 0.77778972518772, 0.6169665185827642, 0.9760993709261493, 0.9821782767799458, 0.5457557296561075, 0.5216980550926974, 0.7649002916318361, 0.6557254619628229, 0.6198782012401618, 0.9296387965265422, 0.6836636232451007, 0.8437764031345535, 0.719970512462723, 0.905030168451894, 0.8372986477362061, 0.6646166808588734, 0.9105877217991786, 0.5055158713581248, 0.9799252750525893, 0.7074351577095138, 0.7112763133210374, 0.5201616968341588, 0.9487538807704881, 0.798831327592181, 0.6526318643354257, 0.5521598988398242, 0.8246729200080916, 0.91434754402491, 0.6385959683066096, 0.938727414343203, 0.8282599145832673, 0.8270558917655529, 0.929902546974928, 0.6668983811331604, 0.5676680719863137, 0.8841810101721649, 0.8904485303191043, 0.9868883453713758, 0.7521159849997042, 0.8846341551034866, 0.6996369735837882, 0.5459845661619562, 0.6892594283523037, 0.759444049968131, 0.9496034937561737, 0.8865621761960463, 0.8990653960674029, 0.5119083241512825, 0.8154480586507606, 0.9634348253908026, 0.9569694798018062, 0.5008136730232805, 0.500891934934077, 0.9476909302302678, 0.909960745950898, 0.8721105275141994, 0.80732460082626, 0.9222846024729547, 0.7342817978452927, 0.5176670150611016, 0.8113034160884381, 0.6627421844186328, 0.9861028751705712, 0.5931934745517473, 0.9357622211095176, 0.9061344122939461, 0.8607640969911841, 0.6188952208248051, 0.9820261485340711, 0.9980115190286941, 0.8679860254853973, 0.5135596238257629, 0.8936052895602973, 0.8841207911253555, 0.6666044810551673, 0.6140777593482087, 0.692351145771175, 0.5967588164618752, 0.9051812533887376, 0.6760946849940804, 0.8531308333803611, 0.9192934925836713, 0.6267362578541635, 0.6111613385931397, 0.8635439298514969, 0.695308576521262, 0.6655579147325794, 0.6736350556942764, 0.9075537195073717, 0.826428245240014, 0.6634690326474955, 0.6443596242189039, 0.7154278026364844, 0.8612698391931082, 0.5855702379721556, 0.5181153702598698, 0.7905900257642282, 0.5268147480668288, 0.7269525913013763, 0.8137547677380472, 0.6227016676418715, 0.6395947580681127, 0.9448696482415905, 0.5556403158034176, 0.5160276828716296, 0.9783293807302829, 0.6370681322262641, 0.977862639404877, 0.6781477625021648, 0.5614145965671615, 0.6448412459964972, 0.6422385587949027, 0.6560127556290263, 0.8196590233612948, 0.664433887647206, 0.979576888821041, 0.8719073371337125, 0.9041625666630493, 0.8914823275921122, 0.9183116333994672, 0.6114834100717577, 0.9757449158288911, 0.8086284147108984, 0.9396780849926079, 0.5891233367095016, 0.8119729521906072, 0.7767581245684333, 0.7951189330475095, 0.6645122458894936, 0.6914682543753763, 0.7298753046114064, 0.9997662828351666, 0.7681675636424525, 0.9848867257954905, 0.7837199432827113, 0.5008124686322651, 0.5530650043025814, 0.7996583778012137, 0.5952508177767724, 0.5395517024726416, 0.5209573587721816, 0.5249380791134702, 0.6996265086868436, 0.7500590089502168, 0.6356538616599412, 0.5955780019583825, 0.9959775345561508, 0.5729569568037796, 0.606684772457051, 0.6991657296912013, 0.9859576675700996, 0.9459977698336584, 0.8918924031484018, 0.9498980704079647, 0.8866842077037829, 0.830447182366132, 0.8045076505999222, 0.8063336127820586, 0.8049190191438017, 0.7247812350819139, 0.7367008316847972, 0.8776752659408713, 0.6852939506488774, 0.655767550467349, 0.8434790828304859, 0.5212518058273122, 0.972140433688844, 0.9213108596946251, 0.8037813386462072, 0.7321653485052018, 0.8458775662320196, 0.5371920061765371, 0.736332189756966, 0.7533139590639941, 0.7982215688648778, 0.5738039928298189, 0.6896783704953389, 0.938815177008558, 0.7759709671034214, 0.9771657386433807, 0.5667899213152976, 0.6806834416404638, 0.675000634078732, 0.9115955548694313, 0.8624831780004305, 0.5173070907790026, 0.833934020332165, 0.81356438974143, 0.6023068196408612, 0.7085748474371925, 0.6854472243138341, 0.8550523303064395, 0.7430826719768953, 0.9615508147583659, 0.864779382861024, 0.8834072183878618, 0.8893122314996325, 0.7627238082749295, 0.5581528186530738, 0.5262904953166789, 0.7774659708148595, 0.9998137345004395, 0.5309876071946549, 0.8914371073924378, 0.7492095429710193, 0.6219763259325135, 0.7294652490132449, 0.7097430564567107, 0.7542281139748754, 0.6603791443701118, 0.6460802083801935, 0.9493168205384496, 0.6821496589590872, 0.5275850303095146, 0.8019777978059113, 0.5479373150426716, 0.9950454260360384, 0.929080541488293, 0.6057978718502397, 0.7882955839204489, 0.7799859261337179, 0.6833792839019688, 0.6839021751586496, 0.6187298359805373, 0.9886432655295452, 0.5418412518362523, 0.6335489618718533, 0.5216825564206147, 0.8829044112395026, 0.6316234248434097, 0.5767122021912718, 0.9042304332460286, 0.9352376541295586, 0.7314514890778353, 0.5923722909254066, 0.5410660795956526, 0.9661081061560532, 0.6972083253229387, 0.9914522248197629, 0.5879723450762414, 0.7535538426810933, 0.6087760546015412, 0.7214324750881919, 0.8667594789024313, 0.9659275275081762, 0.8660593483161793, 0.5044414379486236, 0.9850809531466471, 0.9543288239576782, 0.8539714657374136, 0.5445818407971541, 0.8871130772073916, 0.6638357639208705, 0.9271531752278197, 0.8890279766325324, 0.6500513149521344, 0.5819646970586044, 0.8573889279970115, 0.5181955738074646, 0.5172910636175574, 0.5950854815726876, 0.7162480858979885, 0.8491324294779745, 0.5209397690622037, 0.7305861427457396, 0.775022506630562, 0.5637944148905267, 0.8910221045652591, 0.7172986933048117, 0.7037987442194873, 0.8614415171919263, 0.7646696013539243, 0.565064055424439, 0.7892434560222991, 0.6363553178948065, 0.5550835248094188, 0.9822793336866487, 0.9242492544623759, 0.7371436306982793, 0.5326017983034801, 0.510344757039304, 0.5891503672302391, 0.6380551613782393, 0.6377548020296753, 0.6385547344901867, 0.5050028049726104, 0.6211268156932697, 0.6933481318353545, 0.890756003317956, 0.7556414625442993, 0.943785138684367, 0.7538051851326814, 0.5301126679940031, 0.5638041147484133, 0.7078447299454526, 0.7062749145463163, 0.7201820480665235, 0.6563707778868852, 0.817987653947188, 0.661785473147359, 0.7361945088470101, 0.850208718210759, 0.8334400370694939, 0.8680663933896592, 0.6396706310039934, 0.8772472472039117, 0.9453408207563374, 0.5971229467337729, 0.6995680038970282, 0.9974688756455805, 0.8498525995593812, 0.6554537650172639, 0.5934935948420612, 0.8046528637455062, 0.5043584058831712, 0.843393378683677, 0.9770852916906162, 0.9258237335023769, 0.9418090982045895, 0.7232557646836759, 0.9415396907857592, 0.8589258723799877, 0.7387609075699979, 0.6802281285214127, 0.5492271017948716, 0.7503989863782983, 0.7226762938707219, 0.9730398093116197, 0.8443572575977245, 0.8876968229859434, 0.9236617153042712, 0.5266279236938434, 0.939506664550944, 0.774598652596193, 0.7718946892874505, 0.7224853460432685, 0.6681710693003422, 0.9458466098029894, 0.561904744023082, 0.5000254274361318, 0.7401225448033275, 0.7363826947811298, 0.7128052076760445, 0.977333808051756, 0.9147481784048166, 0.9544953786032451, 0.9977612154538347, 0.5270610323936222, 0.8195428787733308, 0.7636999774011413, 0.9219336556022592, 0.750015254096302, 0.9745234575535204, 0.9170288153429474, 0.5149510462186235, 0.8769914438647263, 0.8744969685531001, 0.7951048399969892, 0.7724128709400182, 0.9738938212880166, 0.6267843801673204, 0.6901447455575923, 0.9639156204537488, 0.7241518583993587, 0.5741193947199115, 0.8406498749631632, 0.5853989656152097, 0.6258022504914131, 0.9531262159153916, 0.9346163282154896, 0.9381644218949154, 0.8683631475107916, 0.9119718941198189, 0.8745685705646724, 0.9655997324998655, 0.6106696851322003, 0.9275738935355664, 0.7053878779522316, 0.5894113015599114, 0.9011080881978446, 0.5357165904104386, 0.7427953555570181, 0.8633613262005048, 0.6079607109280012, 0.7893379897284258, 0.694403701797583, 0.5747866459400556, 0.7776758277852445, 0.7710777492959329, 0.780027622133494, 0.6191014619035511, 0.7689517412222513, 0.7478126057471011, 0.8724761226004878, 0.6995888720985874, 0.8204434146721773, 0.8263246531764548, 0.6921658873786969, 0.5711149032967113, 0.5947721533085231, 0.8996061572749458, 0.836195485306293, 0.8469626470052265, 0.5628806995828515, 0.9037081092540709, 0.8074922167272036, 0.8668864284550146, 0.6587344445952257, 0.5173848458371322, 0.6053392861131255, 0.756503317379369, 0.6493656977521975, 0.8299960191876762, 0.9002011673448891, 0.9241709856304257, 0.6971502749978216, 0.5707245540472572, 0.7320577012047178, 0.5333826425358694, 0.9498272529455414, 0.7556141861268241, 0.838538960824177, 0.5206895840607018, 0.84050958497649, 0.7238049414529346, 0.9472717433172894, 0.6913276863370048, 0.8756048862416508, 0.9896517016465669, 0.8233316205569392, 0.8043632209417028, 0.8154463146056603, 0.6723943642247665, 0.8544661265156519, 0.9191260101916038, 0.5876692566268746, 0.8566595824210747, 0.7782243044255192, 0.615411202087323, 0.8341614063612921, 0.7364778138781638, 0.8309710071847838, 0.9293752562144686, 0.9393710571751837, 0.8693808195005546, 0.9570378343731991, 0.878342608042524, 0.7273527446790536, 0.8622010743943685, 0.7639256298692352, 0.6899452692711436, 0.5208744197426183, 0.8194536747943877, 0.6813180134087871, 0.9184588426430118, 0.6538058431915783, 0.7481166126353576, 0.9483775020828586, 0.8677247516156861, 0.8718267299747298, 0.984064463938188, 0.5465413431851942, 0.7278198892156332, 0.5272674423961954, 0.607081412844473, 0.9117143325486674, 0.8051611524232607, 0.6245806068276458, 0.5794890035832927, 0.5958854826486918, 0.6843285490929049, 0.7517021844868392, 0.7553122562092678, 0.5849326921207143, 0.6955801190393369, 0.9964140009713187, 0.9758098805349426, 0.809183197021641, 0.8969584410716571, 0.7511211681776977, 0.7455855469850854, 0.5515970064835132, 0.7474454956134564, 0.7324302619194603, 0.548163614468874, 0.649600770695111, 0.8620009719889867, 0.6742680243251216, 0.9266011151044349, 0.7068667199647505, 0.5607469361639303, 0.5566812716716744, 0.6440157323468853, 0.580165318716734, 0.6634180513148218, 0.512496047176882, 0.9670918930216199, 0.7962650121608199, 0.6660355678732796, 0.7171495569569071, 0.5785482247314544, 0.5008459699488154, 0.8932131433174106, 0.6892122728599488, 0.5657648190944582, 0.5375439211787556, 0.8092115002943252, 0.6403985642340273, 0.5167019280969183, 0.744884777795993, 0.7984725072251524, 0.7236275431170557, 0.8370426620251687, 0.8116432723157918, 0.699576926595131, 0.8408088889835106, 0.7264616150114027, 0.8273113298528554, 0.6038165638409823, 0.5450282708460006, 0.8918701345447992, 0.8657985668972333, 0.5408175763671523, 0.7600614881790015, 0.9145515606461536, 0.5480514271858998, 0.8960774182011206, 0.8863881683780687, 0.5334640071338519, 0.5484522820311637, 0.7017485661681451, 0.5317760661558291, 0.7865715999245827, 0.818834423501025, 0.6106847824797025, 0.5238298034809007, 0.6182732452650475, 0.8596236609801673, 0.895674364250482, 0.5242299169789866, 0.7046635028423154, 0.7222659480293994, 0.6240593849495728, 0.9706479582535341, 0.8291346815905388, 0.6098661059024504, 0.7903450781545551, 0.9397814086177511, 0.614392801651706, 0.5858715060097055, 0.6369744854754043, 0.7332906607372982, 0.5196249614580837, 0.8466966040923827, 0.931883509921039, 0.5600740075435553, 0.6685248790352829, 0.7538853791903136, 0.5266490924643138, 0.6639092448516721, 0.7614712050726256, 0.5976102658920773, 0.9229761894009949, 0.5485264168811499, 0.8204246510571301, 0.7920778407449542, 0.7986127301359651, 0.5197357113761125, 0.6964198471554277, 0.7240986461015781, 0.6766114295867378, 0.8394645246347903, 0.6795600464632704, 0.9063247471940573, 0.5599783639782336, 0.8189245565741594, 0.678178819752929, 0.9519109241141486, 0.5744012451450682, 0.5015178683428515, 0.6477462205101983, 0.8203080891529182, 0.6732376503755453, 0.6631139320351856, 0.8985968821706292, 0.534467391412328, 0.9585495332982978, 0.7478510859129774, 0.8517040442360259, 0.8626413241431303, 0.5508121847622662, 0.5983977881105242, 0.5256800298146505, 0.666446117457814, 0.5415425985141005, 0.6576383790104291, 0.7218358110561718, 0.7358866722226498, 0.9589864589947997, 0.9974456805194367, 0.7177555056029419, 0.7450861712732544, 0.6766298877570418, 0.6751064249283597, 0.7637123299719584, 0.996263759137641, 0.7870106633974234, 0.9116490117689205, 0.9219049273740918, 0.7603840632508736, 0.6808176610351038, 0.7123660121480082, 0.9100791191587887, 0.8963598815057874, 0.6815710056250776, 0.6522263284822875, 0.6110971527820436, 0.7924473713369649, 0.9064534793382832, 0.5998535870931572, 0.5531795276546378, 0.7583476545811292, 0.6626130200464817, 0.82144638452223, 0.9548753392781786, 0.522325174586451, 0.9609873071125818, 0.6879982590294426, 0.7562395657053043, 0.916090173249865, 0.7853970241481645, 0.8723215816132952, 0.7272132993184843, 0.9858097522164576, 0.8846725813215883, 0.6665521797941456, 0.800244814324659, 0.7028890918699306, 0.9272076621224977, 0.5091511310941095, 0.9016209720265302, 0.7680343489413552, 0.5160694414831838, 0.7382051056022627, 0.6391199773116663, 0.9863123644698222, 0.6449446029324186, 0.6876376766522452, 0.9321008631042913, 0.5483486308106629, 0.5927230816959939, 0.7683749158486973, 0.543608239600696, 0.8984181246992834, 0.5758194563096474, 0.995309199940579, 0.8666919721586628, 0.9120060405670498, 0.9601394779117387, 0.842560544005106, 0.7462227456610766, 0.8798227722906202, 0.9740114427219154, 0.7187432427491971, 0.55953689495599, 0.5062885992866598, 0.9644973443228567, 0.8246203859779844, 0.9797904209012898, 0.9478725241045431, 0.7390302645631974, 0.6288306019096355, 0.9746290811314917, 0.5718084374465005, 0.9106681803773329, 0.9882322617960813, 0.6664797609808313, 0.8443178238748257, 0.6370912344962041, 0.8438424870588062, 0.7391840727059162, 0.7191859223740228, 0.8424865462872456, 0.9450440156314714, 0.5038311854360225, 0.8802336960084556, 0.6968017548084691, 0.995851623843993, 0.5297976497073484, 0.6700849078372233, 0.5008739072953022, 0.7933326607775384, 0.6422086330474224, 0.5539650161513019, 0.5169201071017562, 0.9500493471907951, 0.8591145039924564, 0.9455175410672488, 0.6677835620369059, 0.9244915523864796, 0.8059206699894786, 0.8172131583330743, 0.7783463220548712, 0.9032515130615626, 0.7405214046981496, 0.7457693523382725, 0.6527620554490086, 0.8042035931681192, 0.9601080110086135, 0.9261730867762222, 0.9714396231917928, 0.7156494589466947, 0.8291143822699782, 0.6166236629006266, 0.6486024681768643, 0.95248438644456, 0.9330789802675555, 0.7351383657494535, 0.6758259003587624, 0.9163615483561494, 0.8726496219761017, 0.5171231661009701, 0.7107138122451102, 0.5630714799090925, 0.9730234486696643, 0.7041789090849541, 0.8592602620102574, 0.9402533110835662, 0.7134901144077617, 0.6783649649462968, 0.5882087047965749, 0.604846211449191, 0.8431334230270895, 0.5264905580176578, 0.770659494617265, 0.6783397112182434, 0.561760707622118, 0.6501211540651648, 0.7704897879076128, 0.7948163857098847, 0.5141015406629316, 0.722018309678393, 0.9414231923929354, 0.5987212762526237, 0.6570671970012331, 0.5942542303055613, 0.5810638795909882, 0.7512514659742325, 0.8295958472359901, 0.8289995518545317, 0.7152500177444701, 0.7074493886609099, 0.9489193017586339, 0.5534580518593148, 0.5757697490820046, 0.6973524285539843, 0.952907863542449, 0.7997036829648836, 0.5009932659438392, 0.9516431598424724, 0.9147584692813532, 0.5455209816624105, 0.9926169677081438, 0.7168574142871716, 0.9133274350914906, 0.7678644452752019, 0.6114136414734443, 0.9935248956668492, 0.7860268795590357, 0.5657921103859921, 0.8705474983805882, 0.5473553767495565, 0.6307355764676275, 0.5407599105432583, 0.5227288345463961, 0.9072387549110901, 0.9991163897075708, 0.9158470998177526, 0.6205472023880897, 0.982882839157422, 0.6629495967456318, 0.6682375867786177, 0.6670623524630204, 0.6403583269921148, 0.5114618277097999, 0.5142885526174652, 0.9627717350437712, 0.6585001754861917, 0.5816369721669221, 0.6489403926026054, 0.5958148393618129, 0.8151892358439243, 0.7892280040349682, 0.5161061420953379, 0.8787538394474602, 0.9693198657930219, 0.8089248766620887, 0.8115419722044646, 0.8283262737917221, 0.5173007022038625, 0.6405231967048743, 0.9065229877246581, 0.8515555993203523, 0.6344667869876397, 0.5647223811405556, 0.8643447031312907, 0.915305566489595, 0.5761789181667456, 0.6942139877761839, 0.6261641733781513, 0.7866109130029919, 0.6944991405531085, 0.9139809596532751, 0.7813811345898016, 0.958131581822836, 0.6178169172873619, 0.6072635123832437, 0.6006762004700781, 0.9913478563144043, 0.7139496410391665, 0.5813374367881725, 0.6429788542480084, 0.9086105633786247, 0.6307945075615828, 0.7903746459641727, 0.8871741424823981, 0.8700612044766536, 0.6081196192362803, 0.5219904243709447, 0.9845430973769125, 0.725595597497359, 0.5808041551856565, 0.8961437104310912, 0.5972936429987712, 0.6660091515734143, 0.8805359470826463, 0.8926217419546579, 0.6773856265021672, 0.8954449724758038, 0.5590473860103944, 0.9346056797557112, 0.6128429198293597, 0.5713069593651716, 0.7385500086157397, 0.8808403485812116, 0.8347354844422884, 0.8470547264646218, 0.6171958649048304, 0.9134135563150849, 0.5804764995144827, 0.9968508213653636, 0.8299498294884899, 0.5424781993830292, 0.6151452666764194, 0.8789689097544671, 0.8286483678312297, 0.6264463832522078, 0.97959205806016, 0.9564118714450914, 0.6232902986168195, 0.5655743311883095, 0.8538490576047382, 0.8207231950169875, 0.7353304935472038, 0.8293944589532669, 0.8537297145379604, 0.953410668011491, 0.555061100432882, 0.8765967984594267, 0.6476883653564282, 0.746363763074968, 0.5754453439278662, 0.6093935021183574, 0.5268344631641896, 0.5825873711536004, 0.8736711737406146, 0.7538634322390283, 0.5662025548463988, 0.7737031620426296, 0.9687377826101504, 0.7100079768998684, 0.5711801645122865, 0.5779115734301702, 0.5872719087523406, 0.8656746668000932, 0.6620523478696088, 0.6327161098212486, 0.722554220657954, 0.9401576337323104, 0.7927674845158732, 0.7743281527460237, 0.8740562230736812, 0.9451444140433779, 0.9908018303923034, 0.571448871822182, 0.7344009260829856, 0.9267189334104584, 0.7701346151747348, 0.8149000852150965, 0.7403406837345254, 0.6817805468247987, 0.8578694404725296, 0.8072589398323616, 0.7451362445805279, 0.5749478073751535, 0.8512754759893068, 0.7432880645611157, 0.7684410967927551, 0.7623309065587089, 0.6264386496321328, 0.7791835292689946, 0.5889891970529668, 0.9185291946889405, 0.8183421517831962, 0.8106012954402826, 0.8194915596299497, 0.5883764392494619, 0.7675039092224392, 0.8507485595767853, 0.9765747108287348, 0.5361015135372025, 0.6241779854695968, 0.5996388179360326, 0.8794350765825356, 0.7903846908706378, 0.6898676264987217, 0.6969975784219045, 0.9219081051081447, 0.6614897491996066, 0.5816971898775614, 0.8844386450862658, 0.7524452651167064, 0.6598923885844306, 0.8914015265527055, 0.6170222048998922, 0.6120750314680887, 0.9552618458121597, 0.8814662800557718, 0.6871496498015863, 0.9466536337704443, 0.7336804652353215, 0.6888770274760445, 0.7914782291896192, 0.5269140873524412, 0.6062872863042144, 0.8489134842963831, 0.9584518268580691, 0.7095208956238865, 0.5241990271250478, 0.7625730044461534, 0.7313896501628946, 0.5752732794367506, 0.8893460100706294, 0.8619076346496057, 0.7717107348942223, 0.5836468889854184, 0.6370555399730218, 0.586023901650879, 0.5414855055023964, 0.5584029283598384, 0.9033546803670113, 0.9407163084564012, 0.8451096578713595, 0.9069535535038491, 0.762657464166566, 0.9011710531695479, 0.8838138032715205, 0.8143528277663148, 0.9568685675329014, 0.8059140504257594, 0.6690303722427509, 0.5004485195856947, 0.6462210456350267, 0.5975668340832492, 0.8499671350568214, 0.929823889912581, 0.9849875318777168, 0.7152455189217097, 0.7557122727080332, 0.5417800468841938, 0.687514639912614, 0.7975393949962701, 0.9386451449147588, 0.8814467869447274, 0.5431486747334489, 0.8544678946294395, 0.9687581689888833, 0.6320836860543512, 0.7939096399815228, 0.792711559997678, 0.6291345324706898, 0.8772640147328536, 0.9764658240265771, 0.9167886281493398, 0.7222289092763061, 0.8433416575182235, 0.7335975357464466, 0.5613672488305446, 0.7771199894702139, 0.8835875923256653, 0.5570816491938695, 0.8686344153215597, 0.9529652258078783, 0.6177240954567544, 0.6814456458049272, 0.980624254398271, 0.5536451915282534, 0.9293295874332383, 0.991717889037926, 0.7436590390121083, 0.9474775817423802, 0.8788875014507112, 0.8786588706595608, 0.7419659575801654, 0.9616148516956865, 0.6995128752496608, 0.9482983947153762, 0.7749173962741085, 0.7867846380803369, 0.681542363633993, 0.9658395653877965, 0.9654220331534193, 0.6973987313043307, 0.5809047515521133, 0.5748169467489102, 0.7575660542532718, 0.6681296808915259, 0.8451211845451565, 0.6506310523908239, 0.6297268838676231, 0.7129738386857994, 0.6813213996093047, 0.9494833041308472, 0.9333375258111951, 0.780887192412042, 0.7510259105894446, 0.5444223557943852, 0.8439811348476157, 0.9087060796209493, 0.6072843619434538, 0.6561319923672162, 0.7416167864019135, 0.5569880283148245, 0.8632670034335959, 0.6270881572050617, 0.5698502273904741, 0.7757773127494239, 0.5136907636423829, 0.9406512924271724, 0.8107880951205583, 0.5433572702473952, 0.6485363251225513, 0.7332449480122797, 0.6124944356166648, 0.9839993728629857, 0.81775051347553, 0.9421091968096209, 0.9751516866339122, 0.8718521725371495, 0.9045811365859182, 0.8309256808215815, 0.9382515916419809, 0.5046476144838992, 0.8392510307550388, 0.8027814623000871, 0.5118227480195101, 0.870387054734302, 0.5645352858954812, 0.5989641406439755, 0.5991847206917045, 0.5947915456744497, 0.5785095340651988, 0.9279896275144226, 0.8910029776840207, 0.6039506126500283, 0.9035674582554344, 0.7131294095148977, 0.6421719409942792, 0.7006466331914014, 0.9827291832533168, 0.6591010525803978, 0.552837540156766, 0.685483268223853, 0.822848492861449, 0.8942250957429615, 0.5383400052987974, 0.5514188326494728, 0.5806938909793542, 0.8816291753045151, 0.8026104215632716, 0.8960173257121651, 0.6776591072437597, 0.5673964663056502, 0.5980630811507732, 0.6838276876912747, 0.8004117242379176, 0.7434529608493728, 0.5407977889740246, 0.9139520512874688, 0.9487882995488135, 0.9104486014034687, 0.6176979853067907, 0.9235297974631616, 0.6793843918054661, 0.7674983295109492, 0.9749227836612235, 0.7525647578768645, 0.6140218163785687, 0.8275286762079491, 0.9726362460784626, 0.940373460197685, 0.5267930619391865, 0.9349988794256849, 0.6511544723571583, 0.8126226745056053, 0.5719443541020935, 0.6298079940138772, 0.920990741962288, 0.5155551804820478, 0.7582743769292755, 0.8965964797859519, 0.5092545647752258, 0.8670968216274367, 0.7568335501525225, 0.6073101388565901, 0.687738320877251, 0.6976271074984104, 0.552043232616812, 0.5014532258867168, 0.6742062255399086, 0.9198287088964983, 0.7230436526408749, 0.7987713349400303, 0.6675911006865661, 0.5685117914367295, 0.9601250641777064, 0.7691198231583392, 0.8638539753904315, 0.6432291795900615, 0.5651237062900314, 0.7438607000362611, 0.6240164973452791, 0.7183716304453734, 0.7239599322687591, 0.8273192638495226, 0.7953491390680196, 0.6690492584141314, 0.6778296331700115, 0.9800754711976176, 0.7059720438449026, 0.6561737293042589, 0.5363878025111016, 0.674186507464603, 0.9722164384626109, 0.8895510048503524, 0.6074385101949717, 0.606685196736555, 0.5587836175297294, 0.6702096646787044, 0.5577728395759654, 0.5969665476704351, 0.6544030006745836, 0.8171365066295144, 0.6724784447969203, 0.5949408188432157, 0.836462219852199, 0.8136001746461938, 0.8701821408263306, 0.9702453878622757, 0.5963983166830017, 0.554110797884894, 0.9503827790580617, 0.9644498144671124, 0.7440279914981902, 0.8489723247774501, 0.7570589203545905, 0.6943208496534414, 0.8738968164664827, 0.7799711051778684, 0.8151395110682196, 0.5736932639528851, 0.6609226907758939, 0.9441684765509875, 0.5628267613057175, 0.9984424297719551, 0.9832907481630968, 0.8841708958779819, 0.8283036167893705, 0.5610020056093679, 0.9825669830926015, 0.8146427776555525, 0.8243130211353669, 0.7372033012909179, 0.7345024616630519, 0.730991008330822, 0.8030471631259428, 0.8199893097501061, 0.5759811354291611, 0.5251148978159614, 0.8082891311412381, 0.8991451635106373, 0.891658843909261, 0.8752632835139953, 0.8227869480799929, 0.8556278518271838, 0.8858479475698553, 0.6926406674701582, 0.9131710102757638, 0.5013226871556226, 0.8831675947259869, 0.9511578378875993, 0.6707430039535975, 0.5360047630929234, 0.882398838978089, 0.8296223288025535, 0.8168941510762678, 0.989327140258687, 0.5736521318446457, 0.773606085087504, 0.7164859679897169, 0.9426158023283177, 0.9348262514515744, 0.8462975208337078, 0.9606119363415349, 0.7598518072786569, 0.6826465151255033, 0.6365873322522196, 0.8410467461056443, 0.7173202964703158, 0.9019875412701253, 0.5035819810260516, 0.8094057816439135, 0.8606470489572551, 0.6420856719702883, 0.9179042100687703, 0.5555018610706175, 0.9241686243720717, 0.842067684517461, 0.8520044496346373, 0.7513406484048377, 0.944281698155607, 0.7326739760574545, 0.7572876778160289, 0.9286254503392263, 0.8604597659146092, 0.8720872865464331, 0.5428520792892535, 0.6471611726327748, 0.6223487825699641, 0.6552699843572486, 0.9085305485099517, 0.7667255048477155, 0.7422675448777764, 0.994143152327678, 0.8567872649994377, 0.8118124299026702, 0.7791283135633289, 0.5589414288019998, 0.5389206229192537, 0.8595338905978887, 0.9971670361757786, 0.5379497136418303, 0.5327419293182468, 0.9228854965790831, 0.8728859056205439, 0.6950863876515626, 0.6994377364116546, 0.8373306747363738, 0.5024095877747297, 0.5152708878237167, 0.9997743811774906, 0.7697817307553014, 0.7074622036303814, 0.8411553193731504, 0.583222847799081, 0.5004869362934715, 0.6379736521856527, 0.6345391877296133, 0.9308247468862663, 0.5923092341555988, 0.6077007467198213, 0.8104044102092867, 0.5132180242588369, 0.7261362843002563, 0.9650295161783975, 0.7600689705412269, 0.86830386213186, 0.6152447169268365, 0.6665930872446589, 0.6647596028306956, 0.6738932056748773, 0.831954105791794, 0.579645634183224, 0.7190682485936566, 0.5267391833741185, 0.8672957449804916, 0.756784349763842, 0.9448598845430417, 0.9434905360979802, 0.9782645690563916, 0.694272993130983, 0.5138129146377044, 0.8497177490003818, 0.7880741459058247, 0.5664069350762695, 0.9335312773341948, 0.6123776449438758, 0.813707642785415, 0.6984054135036769, 0.5636955252723582, 0.6448459960123984, 0.7402065631126629, 0.6948455997879595, 0.6027944618108441, 0.8579111316721326, 0.8031758468851828, 0.6659200610741576, 0.812120317188655, 0.9854613631337802, 0.7053259818159642, 0.8368728418455988, 0.7682378935797884, 0.9445679982034973, 0.6595552647143927, 0.7011681094598292, 0.7843247812617844, 0.8845879719785676, 0.9376727847864766, 0.8600717555728472, 0.6985796180662106, 0.5201431433730678, 0.7383128581170035, 0.851078231419939, 0.8375382323840497, 0.5400827176961015, 0.578842596511979, 0.5783563025656988, 0.7613971331450595, 0.7774558306042696, 0.8474699877888138, 0.5019933216289075, 0.6583544521417617, 0.7090560208506097, 0.8262571686962222, 0.7264267046263004, 0.7014780529452084, 0.7334926849802597, 0.7251345038974732, 0.9276941526996416, 0.8667918771714495, 0.6229947831487681, 0.7000292448253775, 0.9173869986771501, 0.6746565973731085, 0.9326080929713245, 0.5376521972391337, 0.8715434243796012, 0.9331697410180659, 0.5764049510692764, 0.6906888050881691, 0.5770053069864011, 0.8623383239154008, 0.8934350641071431, 0.5136546439554712, 0.8913202223194161, 0.6094115551713013, 0.8749955836869501, 0.8378637214810893, 0.8430761405114326, 0.7900646763954025, 0.8063718570524847, 0.7172061725412688, 0.9727418740701654, 0.5382888756185344, 0.6272774738541127, 0.8379863779537109, 0.9608414502128895, 0.709341134110709, 0.7938332658177483, 0.7903003404588433, 0.551445448734772, 0.8490519828307794, 0.9856921124395114, 0.5830126768325816, 0.9829149423697384, 0.9107762737163019, 0.628374920363531, 0.6857158572863713, 0.9108309940183728, 0.568636610033302, 0.6247033408611327, 0.6391545718879734, 0.6086982775363714, 0.6985590831314061, 0.7581990905048568, 0.562214666069007, 0.6721272260325029, 0.7200369511923448, 0.7540490867619375, 0.7062125104446992, 0.9369694927605341, 0.8913659170173578, 0.7214968493655436, 0.9929514413646938, 0.5021137067148868, 0.508107190805376, 0.7706144789097304, 0.8858958911108707, 0.7797087377195959, 0.6269523243389383, 0.9781331590502216, 0.8299294022304835, 0.8365389038965221, 0.8978611117685522, 0.7070865812394249, 0.9761042230747874, 0.7264833943746083, 0.7169184987290508, 0.6216394819955386, 0.7103452956250196, 0.5294199985051538, 0.8902745325577556, 0.7448779052005168, 0.7535936342466744, 0.8230549305380278, 0.7880586741386058, 0.5612564225275911, 0.7862628273311236, 0.9495132803058794, 0.5721306379726792, 0.5537781080005326, 0.6616912184643665, 0.868094849485553, 0.6062447778127855, 0.8400967513409803, 0.6843211494951167, 0.8068394075032945, 0.9758581641753536, 0.7581929007742503, 0.6214059110853666, 0.6482032289767079, 0.5772153635562383, 0.7607581410657636, 0.6651069333280322, 0.6235586509990727, 0.9032079367768049, 0.6326196308875603, 0.5781251047716036, 0.8122763230543477, 0.9553967983355045, 0.8398478187618807, 0.5490449712912693, 0.7513177707148324, 0.8834470464793862, 0.6705463018886006, 0.6891884108739874, 0.7122472722384333, 0.5358843661281966, 0.7921334941683512, 0.9844705764563417, 0.5292312802452259, 0.824155185184233, 0.6598583012994599, 0.6269051476490545, 0.6419982121447021, 0.773427117773128, 0.8912179397086246, 0.794432478197579, 0.5808644906864568, 0.5490630454127229, 0.8492992129427506, 0.6579238470173302, 0.9436789332946627, 0.5859365868856452, 0.9610216291933318, 0.5922897633772748, 0.5980233743250968, 0.5732483847777736, 0.7828453092877443, 0.76238925013941, 0.6045202987651834, 0.9863539300366156, 0.5479959926462739, 0.5934977745869859, 0.6687976214487499, 0.9594776837296665, 0.556169142825283, 0.8816050907244007, 0.5823190095331806, 0.7396020117875266, 0.822428252085884, 0.6419171183774246, 0.7837830603599905, 0.8663563195215196, 0.6337359518948089, 0.7012294709713258, 0.600798302319282, 0.6976161104910148, 0.5015668646417728, 0.6767174239618943, 0.6021369237485426, 0.8370831427385736, 0.6100373641505488, 0.6278185152485618, 0.9992869242328888, 0.5474052235543903, 0.8862322775416778, 0.6273242027378986, 0.5190587595287706, 0.5548844187864879, 0.5650845137350797, 0.9816839756868436, 0.5960033693843689, 0.7394471987794418, 0.7364640344415228, 0.9681280047896479, 0.9960645587147172, 0.9235824465771382, 0.980365719164238, 0.7626026725497262, 0.7196039579422824, 0.8273260592877953, 0.5302599180206797, 0.8347624711610805, 0.818588610169569, 0.7159405274643966, 0.8145205386993444, 0.6201139409418963, 0.6013922521012822, 0.6821541265133997, 0.6603949525481106, 0.755956336515834, 0.7600549747744086, 0.9715834211158483, 0.8897699577668605, 0.7081202130723793, 0.6663133042237852, 0.532883082046719, 0.6666324587876931, 0.8756771251304618, 0.8331511687032793, 0.7674325619458121, 0.9283248876876451, 0.6886749441012117, 0.7372152724350004, 0.7216944933919807, 0.6930480047005831, 0.6108180531420058, 0.6000852090234333, 0.8058099016776172, 0.9156516953026206, 0.8311525018082917, 0.6690327473271138, 0.6139130356566923, 0.9821702527991532, 0.7677428342160522, 0.9565100810908558, 0.7031420752357818, 0.7945168387252348, 0.8905127605456926, 0.9761407938820292, 0.8639928651931525, 0.7882409308419308, 0.995668624269426, 0.5024506289549826, 0.8906658684494655, 0.8182001188614719, 0.7506326880170122, 0.9535796776553385, 0.6742706285985915, 0.6588154881207708, 0.7159580399594389, 0.8794986724351553, 0.7059151388943556, 0.7313671623016074, 0.5059605580148157, 0.9857414022452751, 0.5368996997824332, 0.6605271172126146, 0.6561268238569695, 0.7670056876112998, 0.7125285529501798, 0.6919043446866149, 0.7520490549098657, 0.5909477976086389, 0.7928039952174308, 0.66708705535326, 0.7197798391297962, 0.7397947276512106, 0.7443847349210058, 0.5265751403150821, 0.5921580842769807, 0.9088333937528829, 0.6725600584753625, 0.6929872225726035, 0.9089042160177825, 0.8713883116521075, 0.9138008064878678, 0.6874875812966084, 0.6616081528826188, 0.9134263756167035, 0.9989606849399351, 0.5791655082597176, 0.7693298177860453, 0.9205873285996702, 0.8454993122239228, 0.7815593993877026, 0.7530257099485884, 0.9424346696736157, 0.9355158003948083, 0.9755083596961343, 0.8327285928502746, 0.8803622105397699, 0.9549052827306124, 0.7000525517765492, 0.7009661826601719, 0.8557716569064662, 0.8263779531599444, 0.6420122103001542, 0.5380693306452808, 0.8479386969233536, 0.6828819254392312, 0.6968165390453455, 0.639289601235821, 0.8393725097516722, 0.78618177089106, 0.7813974036698299, 0.9225821013634699, 0.7951878596581078, 0.5871361991187928, 0.9954103159651109, 0.7181741184986452, 0.5963716281806355, 0.9086976215031731, 0.9183928306027697, 0.9894531102534194, 0.6168461738130534, 0.9294547404568266, 0.8309775844769116, 0.541543889193647, 0.7416515159659963, 0.7514298224971772, 0.5149847141854995, 0.7184850961070275, 0.5192513278400134, 0.8954997007050874, 0.9384314328223511, 0.7875242005402265, 0.8396527394626145, 0.7844204594992523, 0.6860262662639752, 0.6161210757880231, 0.9572818204584636, 0.8786359388315437, 0.7422249806718209, 0.5621185877914125, 0.6020776986417843, 0.8023679550942133, 0.7464730398713582, 0.6542049617955598, 0.7613640137532547, 0.9511343762276497, 0.8042915828257695, 0.5696032322973441, 0.7315854681973316, 0.9656364712596832, 0.535695894763572, 0.8037225002647701, 0.8370201611440459, 0.8457391948080885, 0.5197122874647486, 0.8295230383085626, 0.6097450817619172, 0.9965639381079814, 0.6790344609328618, 0.9397964946267389, 0.6612303652269587, 0.5582890049476301, 0.7036924750239331, 0.9806421960855765, 0.6528231506869547, 0.5270203660303475, 0.970622878788389, 0.6239677690717654, 0.7349917398503893, 0.9142267061757641, 0.9000819306547982, 0.9328466571009701, 0.840849535095235, 0.6458945740143803, 0.7094047260709637, 0.7536321962771266, 0.9401339220049307, 0.5024794463707992, 0.9834957775678107, 0.7496325723443159, 0.7198401721384431, 0.7271723863584658, 0.931895249661926, 0.8970558214591866, 0.9303334834510879, 0.6823077293443005, 0.5356448410066761, 0.6076295688372243, 0.6132384729583824, 0.8999357867991074, 0.5317822275331008, 0.5551175293368934, 0.8418229409112669, 0.65501809421875, 0.5481206455381993, 0.9764345455931037, 0.5721719630019337, 0.842131729894942, 0.6117058144289462, 0.771629021952049, 0.6557047697363825, 0.6224199544029378, 0.9024012253449603, 0.7368764794458296, 0.8124358894249768, 0.9052831358513603, 0.9932007113823733, 0.5176854029234931, 0.9631747526993908, 0.8664480218973079, 0.5970319684041945, 0.5391686099274546, 0.8798345520252816, 0.5540981861582686, 0.6139311317531977, 0.9380622275835937, 0.9822581749195181, 0.7326082740141475, 0.6073617757517626, 0.5298271432369572, 0.8579692606374871, 0.8543655379046864, 0.6392234430288181, 0.7537570987717757, 0.8479931865988414, 0.5103318632105194, 0.5889444692786188, 0.9693229615712141, 0.6399570395374428, 0.8896537479141577, 0.974809036219452, 0.5805986215735818, 0.7222205012733589, 0.599763196917183, 0.9289681553730402, 0.7712938886389852, 0.9924437538412736, 0.5718373108460542, 0.7274568443648963, 0.75905534997771, 0.6039765742609216, 0.564021599834234, 0.5144015190821487, 0.6510107085766106, 0.5915089293357968, 0.7637876657174084, 0.5836953240193629, 0.9159617778326037, 0.5580828634445751, 0.9092651045185938, 0.5309373102262285, 0.9064053876660834, 0.8111431328977808, 0.843137802755112, 0.8728119564584498, 0.5904089729871229, 0.90219557014581, 0.9472607300371636, 0.9337218641915803, 0.8780944349340809, 0.5554591739080438, 0.7502426261875105, 0.7375293973831438, 0.7480598944965731, 0.9354919590773998, 0.8868513591976728, 0.6961282623781372, 0.582805016658795, 0.814367285858478, 0.8178238701366772, 0.5133203358149843, 0.6812331163708643, 0.584299521331404, 0.9144009769626703, 0.5996400540890074, 0.7673347898550376, 0.8262300488305223, 0.7813719079437307, 0.5566038824807877, 0.8822122784269697, 0.9258652797710123, 0.8540575969312789, 0.843211803678547, 0.8143556575587858, 0.9862198378750249, 0.9633919291690676, 0.6843942457617882, 0.9723192039966584, 0.9197308376857061, 0.6292043926118017, 0.7684424263523295, 0.9156783870229452, 0.6896165021382399, 0.7091679643809116, 0.576131028898794, 0.9965699765200079, 0.7619432624127153, 0.732847999141733, 0.60341015195903, 0.7416796201115806, 0.9922692193422218, 0.6876457139543957, 0.6158122421214377, 0.7368107380767555, 0.6927561749158355, 0.510570639921827, 0.5825634976087368, 0.8294388523137202, 0.8857250926014479, 0.9393046574741934, 0.9571666619455044, 0.6194268052599208, 0.5882646483231497, 0.610497049069062, 0.5879186818809916, 0.5486493753286665, 0.7096776358739912, 0.9449562115533007, 0.8274383407892139, 0.6491222111115731, 0.662442446004956, 0.9967888559229849, 0.600287470123257, 0.5600012997205974, 0.5565976989704479, 0.8111837213831832, 0.8061958315407716, 0.8606022639331934, 0.7293988357238033, 0.9149770087064019, 0.6289567885283724, 0.6249324175723125, 0.5014618040758707, 0.5355188232786513, 0.6476981600652525, 0.7974930661925387, 0.9611682070797261, 0.6134032782304832, 0.7517654257927224, 0.6536255202009639, 0.7864601061921246, 0.5796729643610739, 0.7278722749602573, 0.9304416177254222, 0.5338736334191783, 0.5175650319778484, 0.8503587107435642, 0.9173819633210781, 0.7202780213636873, 0.8551021480381888, 0.5799698734956589, 0.8301070603531466, 0.7515752980946818, 0.8305477754475854, 0.8361478389798631, 0.6696556901080518, 0.5735046782051229, 0.6424162724512468, 0.8541662031600575, 0.8135271349443122, 0.8361632795056265, 0.5333344722950251, 0.6523098666929699, 0.6749098767311414, 0.6465536418729791, 0.7437718116478131, 0.6062715024711461, 0.8529116467961935, 0.5949637443150905, 0.9259558918297277, 0.6859714995669919, 0.5239151932389465, 0.8076102129088503, 0.5425668092303539, 0.5921267596072874, 0.5660716838196405, 0.8039802112431444, 0.6111328449981048, 0.89068533614921, 0.888955264448629, 0.5196207939112973, 0.8948811016960416, 0.9752015786127566, 0.8252118427985056, 0.9492639850952784, 0.7117553106344916, 0.9854438497024245, 0.9324743506337807, 0.889915438235605, 0.8377014046229831, 0.836387385987389, 0.758250574654558, 0.5112773701879887, 0.5334767545613277, 0.8941213252193365, 0.8036998036979875, 0.6078850632413633, 0.6697791571509871, 0.7343492270245612, 0.7457386507781518, 0.8420789427972841, 0.500298690449372, 0.5511928907606685, 0.9646468831585449, 0.6674789324314846, 0.8517709594645596, 0.5202204446745966, 0.8800993097772417, 0.6321459684447619, 0.6798754761316463, 0.8384278457523815, 0.5293641002078835, 0.6079350271338901, 0.8893665901724246, 0.8458118664752445, 0.5276598607369047, 0.5364123148175421, 0.9375228600160033, 0.5041093415571196, 0.569653664728266, 0.9341349992817596, 0.7233949455094992, 0.617126209946989, 0.6546285705609678, 0.654863701224081, 0.7115446512885852, 0.5770419421565367, 0.8822239989069645, 0.8941435902673784, 0.937584380610978, 0.8770918337849043, 0.7759848161749294, 0.532909176286783, 0.9505778926039941, 0.7472014391694513, 0.5575475392474971, 0.9951962282252098, 0.5982224441018168, 0.5305937528289497, 0.6102577521663904, 0.9644378076794101, 0.7627789058442784, 0.6516514317229195, 0.9176045439439648, 0.9836234568347106, 0.958563684290414, 0.8346330909920261, 0.8722054530903873, 0.7286370868299501, 0.8399420060092435, 0.9764490215597557, 0.9699134610367794, 0.9922487333234404, 0.6318396387142269, 0.6113441644173485, 0.5863458070772187, 0.9818244624486603, 0.7413662234949521, 0.5042683495942042, 0.970319393286766, 0.5913455913588883, 0.9100743983344928, 0.9173877735612657, 0.6456325763249092, 0.8285610473014046, 0.6249147432135169, 0.7284078773612962, 0.8048767026458759, 0.893820223372771, 0.6765534422072945, 0.6818712597018406, 0.9072829091901661, 0.7097199804903149, 0.7535415803858985, 0.9886694144439403, 0.8849950235557109, 0.8763136900961428, 0.9054169874717167, 0.54947736215335, 0.6241516593033436, 0.8967640417175657, 0.8408896218206692, 0.8394986480589515, 0.7835124105122946, 0.7811352059948994, 0.9209309094644896, 0.9142590483277938, 0.9073931044583838, 0.546186577769713, 0.772561742846035, 0.6417432208700913, 0.6475433806756226, 0.6930034528479914, 0.8644605244512521, 0.6816553926060309, 0.7587772583855201, 0.8058384586494272, 0.5435576071658241, 0.6974590191644201, 0.5049787962420342, 0.7321443338990294, 0.7760604122638503, 0.8856484341260353, 0.751308759672884, 0.5739450940466968, 0.6670309844869554, 0.7574326823088016, 0.6685956223124518, 0.8130402970245794, 0.5463837990117768, 0.8737497686285399, 0.884535306271566, 0.5864769564367442, 0.5028674411459895, 0.7061499971425365, 0.6178335875874057, 0.6518054111524243, 0.758031443327677, 0.9079831355977048, 0.5728231459889814, 0.7472669801994942, 0.8001958981562629, 0.7843192625362014, 0.9995043887842836, 0.7729325404380817, 0.5681405576807377, 0.8286634218255572, 0.7779194638814642, 0.8906587510896199, 0.5040152976963306, 0.5762674262341791, 0.6911786866211643, 0.6505305427421322, 0.6280978925347521, 0.6901189045251916, 0.8896559561818342, 0.5803559050902054, 0.8629978805739356, 0.6036572880542779, 0.6452806908962789, 0.6956875943829542, 0.971794763201996, 0.7769519789737716, 0.5100811205067248, 0.8922898170119224, 0.8210850077510192, 0.79244722099454, 0.5625238869177696, 0.7968211990624027, 0.5074728267272246, 0.8535594413281502, 0.9233429114146139, 0.8471822893849955, 0.6607944702020383, 0.9705467770073621, 0.6446225320062715, 0.605475275848498, 0.6487271655933571, 0.8140189255406248, 0.8422916265025313, 0.8749972745877082, 0.9304066146250758, 0.5566728557411276, 0.971907469322446, 0.6164640112846772, 0.513501869199827, 0.5842123432286923, 0.7576311321048799, 0.6640648172999034, 0.8602937117990037, 0.9430973878682349, 0.6600763893987327, 0.9795707854050302, 0.9907174297586817, 0.5144014348413808, 0.7858267311474205, 0.5865661120807408, 0.9892844809711958, 0.902418525502636, 0.7048547138085701, 0.7161372436800431, 0.9026402802783977, 0.7169626896748142, 0.7193792394732779, 0.9131186645109142, 0.766899665702337, 0.8269903705501794, 0.7604672056306743, 0.8477214098247756, 0.9628793808742733, 0.913426258307533, 0.6264567053564076, 0.9367439685161982, 0.9463543125037811, 0.5345937538698344, 0.8161626799886705, 0.9762697591360123, 0.9558399313912003, 0.5224098208999817, 0.8119339987688166, 0.6841382296579064, 0.7426810192379125, 0.7972676522640316, 0.5592692501236417, 0.6234725038730666, 0.5715073559462196, 0.6578018137816256, 0.7396596551723362, 0.9616452563347047, 0.5441287601838876, 0.9913593806506097, 0.7292383993102203, 0.5809147475487599, 0.6093723091129992, 0.9176043436549315, 0.8142766999857667, 0.5756086678123156, 0.5124335113601741, 0.545234981111641, 0.9983719510618303, 0.7311544677841881, 0.7615634657147141, 0.8898266431273785, 0.8518506996567201, 0.5900321987113523, 0.6013987220380507, 0.7982570637248863, 0.6532833501192564, 0.9961307225382755, 0.5970153035139798, 0.6227946392372982, 0.8450141980637875, 0.7713751912444469, 0.8391307995834035, 0.9737458422377778, 0.8633152955893169, 0.5263151221503957, 0.6477269284977154, 0.5482175737237887, 0.6970704055990331, 0.8587000731299443, 0.5640588885907306, 0.8746978418760929, 0.7050585154674196, 0.593994282260023, 0.5656005263098478, 0.9884958833868766, 0.6275568023894906, 0.874992515971041, 0.6502772650304256, 0.5444736115189264, 0.7027268157635234, 0.6811350376952451, 0.7116491968288641, 0.5994275157892888, 0.592444343217987, 0.6308650200063217, 0.9115175077374501, 0.6828338875976427, 0.9689605984590903, 0.5277726294405292, 0.6681948902443453, 0.8774663484801646, 0.815003654155101, 0.9357932523353396, 0.6571107657596733, 0.7310591770851315, 0.9020167184770407, 0.8559015931825302, 0.9491727996991424, 0.7763394172870616, 0.9680303562250392, 0.5872204845873076, 0.9133061021192695, 0.7837931192784728, 0.5474015348215433, 0.6640243292302193, 0.6116647662249037, 0.6665099407719157, 0.7425079965999197, 0.5756073348929036, 0.7477637847626863, 0.5927525694817427, 0.9538321666822156, 0.8114739532188548, 0.9131236012797843, 0.9997414732492376, 0.90242812741988, 0.5664983273773818, 0.7652193858643603, 0.9561846385673001, 0.8025973970501394, 0.957972951709235, 0.583019253595593, 0.9853891562508299, 0.625790882930424, 0.715565470547862, 0.6961383184081635, 0.5768130900482586, 0.5515826597434768, 0.9404172171674547, 0.6237953542340624, 0.7978629058579874, 0.8669930393766455, 0.5374284443571289, 0.9306391044333711, 0.9567827167262613, 0.9858875758399301, 0.8395766761299435, 0.9066834245199289, 0.7789584349228103, 0.6257783347329777, 0.6071451738452169, 0.942036910215707, 0.651877290368349, 0.6123916991500019, 0.9462286985610339, 0.8551739039987898, 0.6562753963614649, 0.811823416616839, 0.8874474525160181, 0.9211907166208921, 0.5061043008783714, 0.6903706498616344, 0.5271223498267887, 0.5731428089603887, 0.6928879505741554, 0.5578525283730408, 0.6864725885908483, 0.6784444074474691, 0.7024844815111351, 0.876059094807746, 0.6355877593469905, 0.5526474385070931, 0.5084747233768838, 0.940265609615863, 0.5698551317587652, 0.7432564525259401, 0.589416823229026, 0.9768479572342932, 0.990102999398915, 0.6584002864924634, 0.743195033047356, 0.808390329912215, 0.9488618267028148, 0.8306332983477815, 0.9362119532271809, 0.949160538673679, 0.9945953047064429, 0.6981749829720478, 0.788988265328338, 0.6914919155249318, 0.6851976191554938, 0.8186119174645252, 0.5306061840773579, 0.7686852835452944, 0.769248970834416, 0.7975639144263396, 0.5736270421240353, 0.5361230810888848, 0.8630797856991369, 0.6200458649894671, 0.8708246249251125, 0.5671788764882053, 0.7040564957994349, 0.8491959178571916, 0.5985192713900369, 0.8979659388303616, 0.7265597583104256, 0.6586462158721815, 0.6381329576972551, 0.704416384279922, 0.7178918396386115, 0.52933863592449, 0.9748897290914786, 0.5886149341102709, 0.6990836855495869, 0.502248646142496, 0.6134056484079298, 0.5968187656957123, 0.5965124559672692, 0.6924334116890349, 0.7197314315858099, 0.9889459487133816, 0.7617170428238299, 0.9255581042684362, 0.6854651420416511, 0.9936488123728695, 0.7202572469864783, 0.7832605275607804, 0.9955708328086449, 0.8226312181912858, 0.5979888508299722, 0.6260395174709371, 0.7159674193513927, 0.9076673168245484, 0.8183825711789987, 0.9480513960994938, 0.845866444028899, 0.678962705406913, 0.6309192306711322, 0.5580094211563917, 0.7685264809556049, 0.5281583766280201, 0.5318176871348835, 0.7153945173260163, 0.7429567502345875, 0.6586034253343385, 0.75659032532731, 0.7731171100750749, 0.5981597457685324, 0.9650925643493824, 0.7066532104908274, 0.9134520412985019, 0.6888809870860001, 0.578932953246084, 0.6124540605672533, 0.8312724994357055, 0.5546145091282197, 0.5118496645597177, 0.5715237205993112, 0.7012192941668298, 0.8104812991822465, 0.8347479021480744, 0.6888389880586117, 0.5664702930595494, 0.7515925277150961, 0.9868800403856945, 0.663228799418198, 0.5195478548818439, 0.9281935227880443, 0.6495138949760149, 0.6909473031119053, 0.8650818133700751, 0.6282504345591884, 0.6944054204566451, 0.6172038406378619, 0.7756573305180305, 0.8669145374089108, 0.7091713571431275, 0.8792517642678765, 0.5315426893779114, 0.7719252753114908, 0.9470746325584232, 0.7957401343201738, 0.9705405588132312, 0.7260450567694852, 0.7792730242791701, 0.8777807375633655, 0.5492221573183207, 0.8466724251718755, 0.6989780936145418, 0.8092525875209902, 0.7337837994303922, 0.5596767575740467, 0.8602305280164488, 0.5046308870008049, 0.7954458129919246, 0.5147137335403709, 0.8454022220616496, 0.5147191547496379, 0.6851042842185957, 0.6836101158652245, 0.8844857827761219, 0.6166753541760757, 0.8477945561086097, 0.628484142268962, 0.7495685706572516, 0.7680548060939838, 0.9566469334978671, 0.6503589652050329, 0.7472179616145207, 0.6288634584865183, 0.6326548578501511, 0.7906227576284608, 0.8260142746412232, 0.9491765788935989, 0.711457398407071, 0.991287019803617, 0.5417560807851582, 0.9254143264309848, 0.6825676210867061, 0.7443438727920569, 0.8493851687587468, 0.522513821750058, 0.5220927420051407, 0.978835991002152, 0.6871613315338474, 0.6349538584583161, 0.6475550427739141, 0.8636392611382702, 0.8850948933324643, 0.6825515796540094, 0.6985081665195144, 0.68084296153053, 0.5565383932317135, 0.9699515285812754, 0.6199029261430282, 0.9998680015036105, 0.7210524944998677, 0.8339989842537829, 0.8484867722549152, 0.8517054554443488, 0.9101549376613866, 0.5903272221375762, 0.7617145231525861, 0.7251646420332338, 0.7886260617775689, 0.87383062241891, 0.9317761225455787, 0.8566197671014135, 0.6129917013521511, 0.7806470630620417, 0.513056815634956, 0.7523203680518478, 0.8808355445125694, 0.8556640479972064, 0.5136383065316764, 0.8681602814458975, 0.6153878088366056, 0.923732056220931, 0.5248725268299727, 0.5009395105920962, 0.5924026752037398, 0.8454552957987089, 0.7377333644332547, 0.8731932536815259, 0.9575186243549065, 0.9666564122964525, 0.5455012086892286, 0.7985426786607335, 0.5270210402764428, 0.9004597805504706, 0.9658821604585754, 0.999222852085121, 0.8975177636480007, 0.9681857160668006, 0.8554459964025822, 0.9686989937279606, 0.5046942149226912, 0.8778030375089005, 0.635114202220084, 0.5232855798155707, 0.8294483214971278, 0.5103389101964448, 0.9973794454516229, 0.9573081645001518, 0.7131417903322546, 0.7069757531695728, 0.7043017530915971, 0.906850982078423, 0.611740565979522, 0.8706298487924232, 0.9731935121409188, 0.7276997849049915, 0.6997962810032685, 0.5098815609842537, 0.9012731825306322, 0.7487485742912507, 0.9854084389561553, 0.8854948007519725, 0.6134004564374049, 0.528269588733693, 0.6557304790165518, 0.7722080111764996, 0.7317820915522507, 0.8988326255508774, 0.5246875938001455, 0.8098307118642044, 0.731508491590775, 0.5576317545391438, 0.8877296414917858, 0.9029762480003575, 0.6403361521196262, 0.52680067162606, 0.6840145981293477, 0.9700862866726572, 0.6831004523429902, 0.9163691084801209, 0.5900896915146651, 0.7851614053960677, 0.5740782708250063, 0.7139672338553928, 0.6931140494998568, 0.782284485286739, 0.9512951595097082, 0.5121309173729429, 0.6756201667289847, 0.6507232391970132, 0.8187839634290239, 0.573653283732743, 0.8118567344663915, 0.9283514729406346, 0.8415648013015882, 0.5760511668641464, 0.5881762957370895, 0.6509884090086566, 0.9475832069490029, 0.6815610432816507, 0.9904989888269977, 0.723943322552264, 0.505461770088973, 0.8990961255945016, 0.5372007463990541, 0.8016902111591504, 0.9864368863165764, 0.7453091874429847, 0.5690055631569877, 0.9797026918453455, 0.7664547423574721, 0.98942414087818, 0.6181781981649184, 0.7254158942124425, 0.7700601142048233, 0.5645101398548824, 0.8124458930738365, 0.7699811808420823, 0.6941704312701418, 0.5915464516534601, 0.5953823457974323, 0.5022592033632213, 0.9671181613518485, 0.7750304526941181, 0.9814128459386697, 0.5030236137883567, 0.9641591216227432, 0.5795317114758327, 0.7591104195840634, 0.5857770830800775, 0.7074064651460573, 0.7461307975277133, 0.8150459056124245, 0.7729732352186266, 0.8541080254663813, 0.7443874074497544, 0.9618056434660313, 0.7386932150778767, 0.954901680448417, 0.6128648375118482, 0.5242539013404158, 0.6586027615636157, 0.7314214767646057, 0.9855554440255082, 0.6961135710117814, 0.519104660932838, 0.9452664367122584, 0.6659270762788059, 0.5656472380693114, 0.7431741708025408, 0.6603661232751206, 0.8162968887218094, 0.9097148038629974, 0.785105087728482, 0.7338704429848057, 0.5328526938842912, 0.9280416967795566, 0.7750382551177579, 0.7053497347886374, 0.7067979218730323, 0.9988593445011217, 0.8993818641133612, 0.5504373566608851, 0.6699121806589375, 0.8568105801514609, 0.88125052203259, 0.9577039875150163, 0.5718199199744618, 0.791472085069657, 0.5983425738227612, 0.7939035038608163, 0.7635957958382101, 0.7879993746338181, 0.679563098433883, 0.9960043706730175, 0.9041312703635793, 0.5562178588247184, 0.710847366105956, 0.8658738747554198, 0.8016032501282371, 0.9499123569700078, 0.9918570997628936, 0.5385107518881784, 0.5179342808326803, 0.8920859953827034, 0.9488254927037983, 0.7983173068465046, 0.5057555630660096, 0.7403495798375939, 0.5238726414368522, 0.7815355028159165, 0.7250599941522018, 0.5925019205564901, 0.6351801799547223, 0.7330294920241971, 0.9107899157652668, 0.6362171652140044, 0.6576482449856796, 0.5023212278218689, 0.9742195317237607, 0.5638725681682097, 0.5071143408974959, 0.5938413117235017, 0.8570630937806789, 0.60346404625177, 0.9017586311306516, 0.7091151483497689, 0.62921705075889, 0.6613343494330254, 0.8889252498330327, 0.8112200813827187, 0.7477215061314588, 0.6627336489683948, 0.7811018466773055, 0.7824766348227208, 0.6909903399536467, 0.572028705838343, 0.5809569186955231, 0.7913894923017031, 0.7443589372253501, 0.8432496647478271, 0.6810443784679505, 0.9248146428343241, 0.9853504974176922, 0.5313564047850929, 0.5923432049833091, 0.8841924052057906, 0.7382460649832651, 0.9037545930579777, 0.9374561844921683, 0.7896933628044946, 0.5252335062757836, 0.721322102332089, 0.8219149646703166, 0.8849296245404927, 0.7642284089570559, 0.5462010143178277, 0.787895120453965, 0.6319558168800552, 0.5816126598012248, 0.624438040493389, 0.8363759108444633, 0.725078998413833, 0.9242128313942214, 0.6097660709533614, 0.9995662022951413, 0.5587838751527439, 0.903834987314011, 0.9766237157376048, 0.768384795808029, 0.6089791514502723, 0.5732132038061345, 0.5683569540390572, 0.5652975171574637, 0.9900769337421751, 0.7924575146565511, 0.7434190688077589, 0.6675577291277932, 0.7885635441372425, 0.8651571746011999, 0.9035304495964904, 0.5450104256350956, 0.9356564125980775, 0.6514835892240267, 0.5724443643086463, 0.7879508688190562, 0.8436620160542458, 0.77437128267245, 0.5313398882209144, 0.8648281365242654, 0.8047414671038939, 0.7347816265933247, 0.8858010726280914, 0.5259226990183148, 0.8872623341938497, 0.8519765830605531, 0.6349685550842841, 0.7598322782827183, 0.8197699030818066, 0.5639812365536361, 0.966467844707396, 0.8619640327921279, 0.5797388683198478, 0.9847312970167896, 0.7103222910853257, 0.7807443137885584, 0.6389405470341019, 0.7873960823940143, 0.8981876412721721, 0.7880249273192739, 0.7085868091309155, 0.5034744619519913, 0.7315078201896503, 0.5006820087792563, 0.7006138911792099, 0.7117343046152138, 0.6610037232863523, 0.7703562811089484, 0.6305751896871108, 0.5184623818361409, 0.9537161253321361, 0.5445399512655904, 0.8617347555201884, 0.8399578196426051, 0.6491938444290165, 0.7527870377391765, 0.7424816621137633, 0.8725570924906392, 0.7470381648661563, 0.8014368840308512, 0.9025209019408988, 0.8893931209458334, 0.9983475922647447, 0.7300525937925054, 0.5334407896758244, 0.931506738539178, 0.9309087110029369, 0.8741803136008417, 0.9169414997068327, 0.9987307080198823, 0.7075264396015957, 0.5344982683855752, 0.9031953142232105, 0.6568856857250064, 0.9619936094043586, 0.5849982826977098, 0.9106915829375877, 0.6351942040540446, 0.6046812529538368, 0.5229802679565478, 0.6345783934652262, 0.8664753246135278, 0.7816396183297436, 0.6315209438186895, 0.7791579530309378, 0.5967141508166496, 0.5562899773210062, 0.6481190517046596, 0.649305111704004, 0.611980077705233, 0.7240501242111507, 0.5040289949325936, 0.5071734728070347, 0.6669192851322561, 0.6400862149407924, 0.9223372113733603, 0.9734272705632658, 0.7348043454185353, 0.591832800744664, 0.784244247409756, 0.8902534860830471, 0.7808660813782506, 0.7530868331005135, 0.5205061620073341, 0.5208092179527344, 0.574852137384517, 0.979144329609055, 0.8095869371077964, 0.8164963714086, 0.9941913510639124, 0.5596469423126396, 0.7486984063004066, 0.790672611834595, 0.8091458343493686, 0.7604629127649142, 0.9918671258509382, 0.9375025819877933, 0.5982274653239246, 0.9037214227682322, 0.642043485962257, 0.5850621611045898, 0.5755495849733616, 0.7872235171364034, 0.5092434855468917, 0.5474737636786109, 0.7375422554056691, 0.961290568010969, 0.7537073713096569, 0.8090647078766322, 0.828670748080933, 0.5862432877720123, 0.879380501607439, 0.7778807984580716, 0.629169712247287, 0.9312940816027969, 0.517432983917077, 0.5050180988386053, 0.6444799062759012, 0.7964198441329627, 0.6163508272144814, 0.597497478035176, 0.799070197179431, 0.5069245848603505, 0.7127532681225406, 0.7571198604810296, 0.9909127058847533, 0.933820996650019, 0.7447461744909085, 0.9315109289893784, 0.6856658609113162, 0.8876093077761951, 0.965597705391162, 0.6521707082798518, 0.9839606934349443, 0.9306755512869855, 0.7448809842763622, 0.6360117739161775, 0.853519725395664, 0.6321560640585013, 0.9539122628271877, 0.7396390229443454, 0.7852050439601828, 0.880075135138529, 0.8325135069175278, 0.7829768924023014, 0.9093282308214669, 0.6539237837663004, 0.8638838890607159, 0.5665054122281621, 0.677183169457008, 0.6670031809594612, 0.7206659438128236, 0.8959562580974219, 0.5421658683140596, 0.6245011135711354, 0.7760281066083721, 0.9194578199106611, 0.5487369966099462, 0.8095413623007865, 0.6788807757602013, 0.8863193508808466, 0.6040090441662469, 0.8681869003935613, 0.6805616979557707, 0.7283823316445873, 0.7436766800404275, 0.7872678040395824, 0.8665639185415166, 0.5335171969290213, 0.8969439001871081, 0.5100738846305676, 0.7466574390262143, 0.5296965456812869, 0.8503668950382548, 0.8531394277069615, 0.7368328058998992, 0.9175410804561921, 0.7771257184061442, 0.911948177640515, 0.7167455602997854, 0.8360069632051086, 0.5177079429301323, 0.8303565306554884, 0.5748839566529362, 0.6679665487275244, 0.6565200873949109, 0.7024238461491299, 0.861149670030356, 0.9043800020792766, 0.9407371353809034, 0.5991380338305026, 0.9345140804441838, 0.7146707216889873, 0.6783081562916747, 0.5847432663129872, 0.8894055805536452, 0.8174027614627601, 0.6673091327811238, 0.9306728717727067, 0.7148127806619258, 0.932277249872112, 0.5155798547016849, 0.7908256854057454, 0.9573560737079864, 0.6710347200318898, 0.8721833726417756, 0.7185697334611609, 0.7826687857416825, 0.5927814765524002, 0.5370344497593589, 0.6739702957526515, 0.5178483750768806, 0.5698818089765529, 0.5002272324988125, 0.7715845506596647, 0.627315811552265, 0.9135496522717781, 0.7056267230233447, 0.990576835388455, 0.6357922913460203, 0.7627882193516673, 0.6061280450666033, 0.5867884488405632, 0.7529944748586043, 0.8861038716163026, 0.5749362143828816, 0.8971326031219784, 0.7934479094400515, 0.9821079728327391, 0.9925029365094504, 0.7608628625255107, 0.5819408295350038, 0.582229533905438, 0.5870800119760736, 0.7943625729729308, 0.7850005286764965, 0.6428435257974827, 0.7805353847731515, 0.9603822163600837, 0.8314305471043031, 0.6398775194024496, 0.8213085769706212, 0.6443078077656882, 0.5366919562578676, 0.9566447369001998, 0.8289736815535838, 0.7379516131652808, 0.6672341712829228, 0.9028438690407206, 0.5316111321340256, 0.741683134493758, 0.9618091861614229, 0.995096521770565, 0.9574724829853174, 0.7655070420703565, 0.5451245175643264, 0.6997504516706753, 0.9056378105106231, 0.5114755789287153, 0.5545250745590357, 0.565489840793902, 0.5050614128305649, 0.8119766876557201, 0.7553533737453226, 0.8548621313112779, 0.6954986189816639, 0.9134839109321897, 0.7305060938106878, 0.9187623714640085, 0.7043244705905043, 0.5549417165423842, 0.6579693643893074, 0.8094059450311376, 0.8529402660051191, 0.5141374064785692, 0.5610100431789452, 0.5039162594711493, 0.7549924322485755, 0.9293182192542326, 0.6200823008452536, 0.9897692028834567, 0.845290598964923, 0.6509520933871176, 0.5870783138716529, 0.9226154922612884, 0.5734030063202342, 0.7463168981547899, 0.8588454737063763, 0.5082912608979979, 0.5597469304167135, 0.5776532833110142, 0.7639041508860402, 0.9403836125494844, 0.9531124930329016, 0.9591290022779524, 0.609828308893869, 0.9458896021607384, 0.8901704689249386, 0.7117431991619552, 0.6376079599723906, 0.8608365264317535, 0.9074606539773098, 0.6267690826057057, 0.875029772235273, 0.7464587129405821, 0.7042744918321705, 0.898593176227544, 0.6055946900931157, 0.7068458666942409, 0.9626103266546835, 0.9054224754145239, 0.8403894925342543, 0.9103648987726622, 0.6732743631453464, 0.7473413818761461, 0.6578296382110436, 0.6829685318475627, 0.5613805810156309, 0.8527580853345949, 0.8205569084339316, 0.9851645869910722, 0.6102914146509679, 0.8484944290971078, 0.6504234441751715, 0.8632355382986991, 0.820482690744111, 0.5701899621404849, 0.5973875223640845, 0.9517419099573426, 0.9332098835030636, 0.8850935305331309, 0.8570043298855421, 0.9521287839163322, 0.9745678237374664, 0.9894551111971632, 0.5318389940424684, 0.7305147271857177, 0.6055730400570342, 0.8117984227917701, 0.9622214714725345, 0.5699758873782265, 0.5494945509390948, 0.738575481142271, 0.7828964963379996, 0.605870495835157, 0.7692813925621698, 0.823314439417344, 0.9651385396726666, 0.6350600368350792, 0.7438882390353672, 0.5940221976978319, 0.9730684327996642, 0.7369034838007074, 0.9384167021597074, 0.9057389032233585, 0.8675917393565464, 0.9334878751298188, 0.6910717833173633, 0.9104160006286093, 0.8594354835366123, 0.6376050280249175, 0.9361528172239829, 0.6631958056987723, 0.6183294977910736, 0.6223437627643955, 0.7144101500514191, 0.9518816922166529, 0.7670373201523522, 0.7647492857576637, 0.5826298836263939, 0.9442140496405889, 0.9979583760031439, 0.8651208074394114, 0.5329668142232502, 0.7110255790475349, 0.5737664465117902, 0.6951563473599501, 0.6339426997850415, 0.7244342232188153, 0.7569684011600817, 0.7557006872787138, 0.7638444058364573, 0.978221621631057, 0.6801348489125162, 0.6565239722145642, 0.9274354957710176, 0.6195204702750413, 0.9858412041210194, 0.5419778935085784, 0.9358476839640878, 0.9772067261197692, 0.7470764417206719, 0.9533411453804412, 0.898065691455392, 0.692762260092944, 0.8636816850314042, 0.6208268291809766, 0.9060333797568805, 0.6101662260123728, 0.6908347005508357, 0.8168877573015518, 0.6637455576457161, 0.9080047036136256, 0.7948562879088221, 0.6807014395880531, 0.7843120622110102, 0.6622894802740545, 0.6683188944583764, 0.7485639701766951, 0.8811405502139662, 0.8364584633993899, 0.835253764353633, 0.7380775430735609, 0.9644542000356928, 0.6632370490678856, 0.6781189595837167, 0.7321792835038026, 0.6933904008412003, 0.8116497269032026, 0.9529774660366498, 0.5973188692883368, 0.9857016338320305, 0.9170297439592918, 0.7093198803574543, 0.9083216025108516, 0.6123663690578309, 0.9020612004616424, 0.8493778344557164, 0.9949422334566542, 0.7966525958370415, 0.6191402988246217, 0.9920100021609468, 0.5240338986326976, 0.7622603380748776, 0.6860554478030287, 0.9465412163733903, 0.9390245292365542, 0.7564727604220183, 0.6578509121299283, 0.6693134371053673, 0.6341862099164013, 0.6182778854231661, 0.56382675468219, 0.8311880705870431, 0.5878208052517822, 0.9847107196418174, 0.8654675464250812, 0.7798256087152433, 0.8762864130226986, 0.8520939344769818, 0.9146248806861705, 0.9501482174811509, 0.8492435666174758, 0.812307664200139, 0.7891422393020311, 0.9476540363817627, 0.6419615239402378, 0.6973273272588552, 0.8469754340873489, 0.7536054406020001, 0.7149110778559304, 0.580939875065916, 0.9578791711017789, 0.5334125740537462, 0.7733988902714124, 0.7836610967798014, 0.9371157171003535, 0.9949316143949323, 0.995970699390573, 0.6298814978264717, 0.822557585826974, 0.6946939667863967, 0.7159426980008361, 0.7493757442468818, 0.6457727042871291, 0.5027653632070037, 0.9384997241733334, 0.6627788861670041, 0.7522738670703528, 0.6254403116355132, 0.9954480256218727, 0.7912353328654131, 0.5165094868243046, 0.8933411084830939, 0.7663458407461039, 0.7894704545155808, 0.7085418422982902, 0.9494093984891057, 0.6146936796343276, 0.9036636546664242, 0.7307808417442754, 0.6277116627470154, 0.9700855051811194, 0.7185671897496738, 0.547789688843485, 0.9616808478840433, 0.872567879936194, 0.6948735965722321, 0.5320436298404432, 0.8097670855666254, 0.7557927896779557, 0.5289376566162238, 0.6482306730106798, 0.9564901196276678, 0.8761190802322487, 0.5346384668958861, 0.8919965416292511, 0.623916971504094, 0.6360462387523821, 0.8283805772547486, 0.8521120343871866, 0.5902353383669849, 0.7164561987841103, 0.8923595343903112, 0.9611911571860436, 0.6288047337842293, 0.6028928120150476, 0.5879999874391185, 0.9580857767264522, 0.5998747356405761, 0.5610170362031999, 0.6702473232353281, 0.5175902269144954, 0.5874000145370389, 0.9070844712938807, 0.7301247717593108, 0.5436316828825247, 0.6240740137831222, 0.6063617315487606, 0.9364081056955892, 0.6595484066953254, 0.6803528312982925, 0.6377927885828183, 0.7158677930286338, 0.8484162022218453, 0.6130740762030165, 0.7330230619154061, 0.9225657425706255, 0.9471189802963923, 0.8098907551971939, 0.5142356771085276, 0.8407298981745199, 0.660765612745165, 0.8153219486070691, 0.5980205681598147, 0.7975760887710962, 0.6115201172382305, 0.8204201946426055, 0.966699574837776, 0.8624843179974305, 0.636675301039552, 0.7923415680901023, 0.5371838483510898, 0.9714030636257345, 0.6564389433765491, 0.6918293040793935, 0.5804877837938685, 0.6495292326026679, 0.841528152110926, 0.581910046515228, 0.9748019365815921, 0.9792938951574468, 0.5103526058196024, 0.5180364674142408, 0.5206198386866664, 0.8370184016062335, 0.7235184511310173, 0.6406294197447667, 0.8883829138066919, 0.5228496618828617, 0.5599625197545003, 0.8995153141191241, 0.9459989916042502, 0.6736533050037716, 0.5018366823930801, 0.7428839556474796, 0.9521208489070663, 0.689263224619918, 0.5637783366285749, 0.556724679953464, 0.7913779822675193, 0.8794779552902567, 0.78222777276939, 0.8368371406882462, 0.7146510065906551, 0.7223221883146498, 0.7620213280636259, 0.5874154895920893, 0.8645401455906134, 0.88902791897316, 0.7975590731933635, 0.8704035705543989, 0.6335536301132205, 0.946831308885151, 0.6369016131161434, 0.5058287526683423, 0.5247413319005931, 0.6468784482618033, 0.6959897299370564, 0.5910156292595761, 0.9352740753557651, 0.9475929679829304, 0.9603556166762732, 0.5382658036766805, 0.755224464656749, 0.597621459212843, 0.8624913320934032, 0.9332160778900838, 0.7944315150080581, 0.6319417428154329, 0.6884878296238199, 0.8247443087116646, 0.5331194183825446, 0.9964981916751118, 0.7616316186258048, 0.7164229260297518, 0.915902913503972, 0.7535111993766261, 0.888424892029793, 0.6103586057005062, 0.5646677611792807, 0.9128370432169816, 0.9493448005135315, 0.7423175556816761, 0.7326753466011232, 0.5722584876669231, 0.9343589918628212, 0.812626152105616, 0.6712529812475354, 0.725593006887213, 0.8754261469292633, 0.8465528695410676, 0.9884321141206938, 0.8604895176705588, 0.8328210746064184, 0.8116941256957738, 0.8697082508990124, 0.8715218571562302, 0.9772462473431681, 0.7530534307551483, 0.9164396213298943, 0.546318859741125, 0.7079294517978144, 0.779104209371171, 0.6174245073248683, 0.6347400287540844, 0.6432941558408505, 0.734504031267535, 0.7484417438737337, 0.7386556190218361, 0.5547935693827457, 0.5560996056463322, 0.8591042331316334, 0.8588369341325341, 0.7900983525744665, 0.9778085249688033, 0.7242821058876339, 0.7518234321322614, 0.6398053679148747, 0.7055149853807676, 0.7159079335945835, 0.7133771985101964, 0.7524565466092881, 0.9136527124133766, 0.5702378120272291, 0.5177765732912135, 0.8884841721150794, 0.5321264010547221, 0.5721715161966341, 0.8887831890708056, 0.7475884805642521, 0.5433089387030123, 0.5474861982392579, 0.9009720647342847, 0.58107451032276, 0.7214400361783191, 0.842036583124816, 0.6861102826477937, 0.7849843433674293, 0.7781247191515106, 0.645315076755161, 0.5173411758774483, 0.5424615283145791, 0.7112368722736455, 0.9638767978398111, 0.6801691934372915, 0.6668988330991603, 0.83483545308073, 0.7476640647598576, 0.9229365271365151, 0.5131546387147847, 0.9761628947914731, 0.580997220048671, 0.9388296275827948, 0.9976020927147775, 0.6748400236294192, 0.971832245444031, 0.9667056318508997, 0.6075330258763375, 0.9226943942231801, 0.682511528080349, 0.9857881210802208, 0.9168438448187509, 0.9584065438896643, 0.620465366371892, 0.7813126568648288, 0.6945732346048412, 0.7998195172380794, 0.5671880986997778, 0.8496023794938576, 0.6213886385464458, 0.9365692455761303, 0.7827971188392562, 0.6558473671045175, 0.7147528269630758, 0.9605887691578998, 0.9651401594710198, 0.8904216833280082, 0.6408728302858866, 0.9355913060317791, 0.6239331203759768, 0.6239265967371393, 0.8815066063302409, 0.9271750971506783, 0.5872070720171185, 0.6541837361799661, 0.6648025912135113, 0.8800897232256225, 0.9646972216867352, 0.8564496166733047, 0.7954612389477489, 0.8018572536136546, 0.7368077062126361, 0.5615441647931354, 0.9067951782486163, 0.7307328754428681, 0.5721968026974031, 0.8858483670683425, 0.692162814934307, 0.6556809288740023, 0.8457015586750469, 0.9683434767594734, 0.8935338696526679, 0.5601641480311508, 0.5303437265826243, 0.7287401572548897, 0.5099937766594732, 0.6544272499539991, 0.5833156537844908, 0.9391406210766067, 0.993744676130049, 0.972243812995608, 0.8810321792615954, 0.7614210763464557, 0.9106878541826937, 0.7786169844678759, 0.9542014733636626, 0.7621711894638106, 0.6778063899278132, 0.8949694093692783, 0.6842431740108286, 0.9207639998082516, 0.5403966371084841, 0.9002480105296271, 0.8855656817085555, 0.9479983383560329, 0.639197648946185, 0.5364799329983814, 0.9211958689334698, 0.7726115697371396, 0.6663333652514676, 0.9840721712983641, 0.8479144332353785, 0.6244537119702945, 0.8618394032787833, 0.548959256757342, 0.7852261316981382, 0.843158932121135, 0.8070307147585145, 0.7840267252788236, 0.7311940896022665, 0.8478102997330577, 0.8704709479359671, 0.5088815192543511, 0.8993688568604927, 0.5404512344587518, 0.7491943759957727, 0.9419977154980564, 0.7100423885388494, 0.8894836777343098, 0.5009271214948339, 0.818007186908571, 0.7502383476500147, 0.7336121376598952, 0.982264502655048, 0.767396823405647, 0.8672528713041836, 0.7451599329198744, 0.6790825545089858, 0.7921740579202579, 0.90113199168579, 0.621457037218418, 0.9469328986695984, 0.7193322807093752, 0.8061324136965343, 0.7114953532408704, 0.7915820920787524, 0.5592361854457446, 0.9455618396191587, 0.967374651392527, 0.7846586606657, 0.5564994828088583, 0.8164792593124451, 0.624052332030862, 0.9115194599670198, 0.6055557627451109, 0.8004681406837666, 0.5360724504142322, 0.7742520437884532, 0.8333236731361652, 0.7238981395301625, 0.6522076722909161, 0.7901493750088351, 0.987656128222595, 0.6313950590723031, 0.5111337511351441, 0.9925443969974723, 0.807769898352081, 0.8451862513937655, 0.8861406838916412, 0.7555662921683335, 0.800366178042524, 0.9363632640638551, 0.547570727541185, 0.684393141513131, 0.5593176179485029, 0.9446053606401024, 0.946299033858322, 0.759812182676486, 0.8365028267896049, 0.9917960906042824, 0.7254030484933631, 0.6848874799046828, 0.5181812754446133, 0.6888889290663069, 0.5233318523853832, 0.6677237283448025, 0.5402498902682621, 0.6423044105385645, 0.9685205531820618, 0.8954069176570909, 0.9536888127158618, 0.6579723972084346, 0.5616157635109977, 0.7133668574592944, 0.7747775554327208, 0.9718814128375408, 0.9004149899921183, 0.9037229026829712, 0.8945312264959524, 0.8166717364397114, 0.796442270770692, 0.8638789218042029, 0.7192143298847276, 0.6032359909995749, 0.8199498913534986, 0.9750033502480067, 0.6880753980731347, 0.8863445417911063, 0.9174458798837946, 0.583509003432199, 0.6837577044083423, 0.8603904137565367, 0.8674965836969752, 0.8973069146667755, 0.9823894364250612, 0.5195574345092677, 0.9019047304255278, 0.6668468407918526, 0.9960896001258335, 0.57461679551327, 0.7847427205067534, 0.8518483991613927, 0.9994132673389152, 0.7851984325674142, 0.805504827576133, 0.7859231810998053, 0.8334603113443356, 0.9188401485326088, 0.8481918777190194, 0.8291281583762748, 0.74761552727818, 0.8190321512784611, 0.6545285595299648, 0.8596298465771894, 0.9492035530026637, 0.9449913352845507, 0.524232295834198, 0.7204987328608028, 0.9730108239173486, 0.7896988032427688, 0.7331000891487568, 0.722164777691116, 0.7201104458308536, 0.7540055561202527, 0.5999175479395931, 0.8896919913601615, 0.7023386633963506, 0.8823415325687671, 0.7736733571272534, 0.8130869144941575, 0.5581532041861805, 0.59267791783405, 0.7477404327249555, 0.8364353292038835, 0.5112494385462871, 0.9415032612385543, 0.6030619078936028, 0.8282231706941614, 0.9219814951982956, 0.6042815686901672, 0.8649832213522914, 0.8704331137716113, 0.8515664101846716, 0.5312791587883494, 0.5132780907810911, 0.8543802284606137, 0.5400377463092558, 0.8914013072739788, 0.7322265749576984, 0.7288916086875139, 0.8993089893714744, 0.8318245189829434, 0.5494148015796778, 0.5268340970106551, 0.5162368807104829, 0.634657970498449, 0.7391855333993445, 0.5394717278099426, 0.7247290066435037, 0.915014942249861, 0.8614278273933464, 0.6212000670487117, 0.7618334207417254, 0.6901159810595486, 0.6372838384502196, 0.9614375126796443, 0.9084286005216535, 0.7683267896104367, 0.979739528041361, 0.5322087214097759, 0.677389838966787, 0.6099913948916957, 0.518685515366273, 0.5258558586733868, 0.8404624184124547, 0.783107200834799, 0.8168877687804317, 0.9440906117180341, 0.5915871412789702, 0.7155083195774129, 0.8494719616594506, 0.8206840848573154, 0.8840366746841697, 0.5940864892989604, 0.9625202359433089, 0.7170013959181833, 0.9812977389374758, 0.6492350560336124, 0.6363597247504806, 0.7160241797785473, 0.6440470597847234, 0.6796498258682144, 0.8575227254509543, 0.66547600149669, 0.7413663675068235, 0.6210378637199936, 0.688891959703017, 0.9518665986662196, 0.8980156391873513, 0.8591566833204757, 0.9827072624650137, 0.6257353053849599, 0.9236015640509191, 0.7609215573253497, 0.6268519264908718, 0.5048596310752997, 0.7969502122663539, 0.7954097652063203, 0.7558359287304273, 0.5722208780626803, 0.9096929453262113, 0.7989427237937796, 0.7018487706484159, 0.7218093113962288, 0.8091745562908543, 0.7404061541095757, 0.6836815871203421, 0.9013612479872923, 0.9038521647214823, 0.6370600439093486, 0.7632436673887038, 0.6452138690432688, 0.89883944092138, 0.7197515705990098, 0.7646078654996592, 0.8392947868387901, 0.6337658670591697, 0.963903717921917, 0.740669744829889, 0.9275738296356356, 0.920901227832003, 0.529347134818861, 0.5220730091606502, 0.6718771017095652, 0.5102994518998817, 0.5110095322378143, 0.8385947277588246, 0.550046788229519, 0.5563381519906799, 0.9263598839208965, 0.7284474735851905, 0.6338088706610153, 0.7945179677044245, 0.9030968262292847, 0.6735441900022652, 0.9310233864522878, 0.7902625368934596, 0.5400281508502177, 0.6271743584542803, 0.8135211602051378, 0.7100520171153991, 0.8483297250931388, 0.8355270300712945, 0.8411142759057504, 0.5323546941155959, 0.5601873712253603, 0.6469869134804409, 0.9103098387033637, 0.8251884074766609, 0.8111856607195499, 0.8934272642341184, 0.5145363461049262, 0.7071295131917874, 0.6425587568663564, 0.7771088039191361, 0.7086382740596433, 0.7382621368711736, 0.9654912099412221, 0.9816897696612976, 0.8625219525900318, 0.5569088611287505, 0.5722312078852959, 0.6313694636345413, 0.8005805248253577, 0.7737108147889447, 0.7262379752294681, 0.6208750233113259, 0.9017410931637637, 0.9955474759286791, 0.9923750256185633, 0.9861585542797995, 0.6262395936733746, 0.6582975932864659, 0.8982619158477658, 0.9752667047556878, 0.7758505112097835, 0.622802208289711, 0.6836093736150988, 0.5386040910492049, 0.5423871484571003, 0.7025425981385482, 0.643079829780306, 0.726235308930603, 0.5371456326601523, 0.5850356818954697, 0.6197297945352759, 0.6102769919102071, 0.8140625525701708, 0.7859419745876225, 0.8397341030604777, 0.8822588518917414, 0.9149957529004557, 0.6004479392874982, 0.8646318130499591, 0.5375682054326609, 0.5817248662572607, 0.610239582381995, 0.8245201836520089, 0.5717396521450391, 0.6055253583817947, 0.8070814570216864, 0.8003542316979448, 0.7377516278675103, 0.6348961643523008, 0.7575621941108874, 0.6417639815874614, 0.8542244337818206, 0.8989031852778737, 0.6769232432267304, 0.7292331021157007, 0.9035932751276445, 0.5941638773899262, 0.5732649856403749, 0.5823933621407975, 0.9734759254144262, 0.7322731376155186, 0.6777984841453153, 0.7214369056875807, 0.9391785010015795, 0.8187401575791504, 0.8997368632453318, 0.9666474838479255, 0.9302481530244513, 0.5895740947998367, 0.8750873593864277, 0.846156651092459, 0.8497892826007127, 0.9594291101972122, 0.5309264006466587, 0.6461132923198729, 0.6058485900104501, 0.8410961395125862, 0.9614917890441848, 0.715427417641967, 0.6380395098663345, 0.6490140480455839, 0.9868685504316494, 0.503150282964443, 0.7715149769733514, 0.7910972058170889, 0.5671795848487021, 0.5511240314886784, 0.6724075478029479, 0.647197013735372, 0.5990136052784172, 0.9817557167428403, 0.8096864787344082, 0.6910410683753112, 0.5018895136212136, 0.9442494463633817, 0.6643861426183688, 0.7197001933319646, 0.7339071537282209, 0.5238791887581662, 0.9353611374339593, 0.8195803678140776, 0.7747842452678699, 0.7381248578258269, 0.5598860020442309, 0.6514652799448352, 0.6863479934180822, 0.5373712414862809, 0.5249975464837005, 0.5280936934069054, 0.5269383755916286, 0.5198611894558367, 0.5216729947055688, 0.540273086426588, 0.6648499223279636, 0.927747465936461, 0.7358517712969235, 0.5136869035550389, 0.5607993894301244, 0.5572455341563796, 0.7623357822718201, 0.9755718838076354, 0.747455677472561, 0.5460416946375806, 0.8506326622366782, 0.5123799034975105, 0.6700437736537596, 0.7524404498641188, 0.7936726498399898, 0.6482858102903355, 0.6918307790692904, 0.5362925567445394, 0.5227375249350111, 0.6731497614230084, 0.9528180530735932, 0.9898968395838428, 0.8748675161539647, 0.6574304122320023, 0.6684991542141377, 0.7437968907342514, 0.9137147842303848, 0.7414686778769922, 0.9540124217165116, 0.6292874748908375, 0.9817046743846204, 0.5144340416443955, 0.9872243933997009, 0.9095638401115518, 0.6754596023703134, 0.8542911127354094, 0.6685248672396963, 0.7833829332432648, 0.9812366020665229, 0.8048663515666151, 0.9157618239453229, 0.7880451118918126, 0.7500863161909559, 0.5880415795964637, 0.6351158321368573, 0.5855121680516236, 0.6954985700629188, 0.8053285012978684, 0.9244368705994056, 0.8006315823409067, 0.8095217491974301, 0.7598360394451066, 0.9688500063451155, 0.7092294023846166, 0.6314973361963954, 0.7342038453438753, 0.8859111588836623, 0.8011873789849867, 0.9334176131360783, 0.7339730719586757, 0.5588616462417627, 0.737657732565123, 0.868585027442178, 0.5445109613002692, 0.6673433059881293, 0.6988024495869134, 0.5157178263156391, 0.5395271352586505, 0.8481628891866954, 0.9324592516593919, 0.6246178568549781, 0.8260325374237583, 0.7212938412920943, 0.5726576101255787, 0.8209346514293777, 0.5108058168086897, 0.9856498903190667, 0.9544501082474894, 0.9349293364849063, 0.692585836817279, 0.7808302354091972, 0.9838113630456338, 0.5389838255416257, 0.6034725901006793, 0.8316497714150399, 0.8749053566065914, 0.648945498783579, 0.9737886100041577, 0.7723563883323128, 0.5008279330967019, 0.787116639419347, 0.9479234514401975, 0.6782480451711745, 0.7374267486721663, 0.6458074906278434, 0.7460585167199334, 0.7832054420183322, 0.9217116984268392, 0.9389206824934463, 0.8507691314472945, 0.5034380103753959, 0.893039252237585, 0.9583771358039115, 0.639377666830192, 0.8233615763949378, 0.9693943880994937, 0.6082512123458352, 0.6911415669063239, 0.5847438334646642, 0.5818164344012322, 0.7294725716934021, 0.8752470656655612, 0.6976800893467131, 0.814310049650173, 0.6179091227267974, 0.5475779656088522, 0.6994115010447791, 0.8583420433348233, 0.9524677467375537, 0.5149527282879423, 0.570171307532666, 0.7588268885513052, 0.6601404055428983, 0.5674875439619949, 0.5712938937748859, 0.578742923344147, 0.6575377498733981, 0.5514797911624192, 0.8728833916508638, 0.7221500407093423, 0.7239129296576878, 0.9293894011817154, 0.8873234564933248, 0.9455476215546941, 0.5348533139228975, 0.9977865317251147, 0.7230765946126323, 0.5602168110321393, 0.7808961612048668, 0.9778516840391309, 0.578791228007137, 0.9744969100263318, 0.6444785482875955, 0.6666819952087444, 0.8588688918559978, 0.6798100966616158, 0.8504518021547537, 0.9629777329123743, 0.7948792471595791, 0.8325949091380869, 0.7664527861457241, 0.5119401134353643, 0.9885687865236161, 0.6935632038151153, 0.5552979426143005, 0.5215979186003623, 0.502809387178616, 0.8101895506879346, 0.8725985312986207, 0.642542130850923, 0.9717920269546558, 0.9981476844261328, 0.8266951097609672, 0.8606713345356837, 0.859006638420805, 0.9444419996959761, 0.852544612256288, 0.645045896708733, 0.673732752741481, 0.9028938833228648, 0.9022490629049897, 0.573276660805133, 0.8201335269807379, 0.8062246536445227, 0.9425494999736497, 0.7698234237028811, 0.7812627803707208, 0.6601270281625344, 0.6833211044094266, 0.9005189293119379, 0.741905035103029, 0.9708722901361506, 0.9325434198380227, 0.7326945968566655, 0.5076657090574668, 0.6107934609634899, 0.9509734913599894, 0.9082229102525441, 0.7447839562866976, 0.9356071289269521, 0.6628083125418147, 0.8449091337043921, 0.8872363884723455, 0.608359764772767, 0.7311550484317273, 0.77246101831335, 0.7760237255612348, 0.6270887423493159, 0.9226031382487441, 0.5510781038912547, 0.9376045600543428, 0.6047265543338174, 0.5361037785907956, 0.7417506395783122, 0.739019189977881, 0.7409128581034403, 0.9110898085509713, 0.9578754190903733, 0.9993470945636754, 0.6760689894765377, 0.7636267493353869, 0.7770520816106123, 0.8345795677178554, 0.9869821144203226, 0.9767631886355026, 0.5303008866140309, 0.7890424214710354, 0.8000845401377714, 0.7914186366956131, 0.8989338078068876, 0.7574630677174261, 0.8631027136699847, 0.6577004229639347, 0.708742269093042, 0.566354619763062, 0.5718628019150936, 0.503071762393992, 0.9936069991169546, 0.962458279288257, 0.8009830708807595, 0.516296680166372, 0.5982880086104149, 0.7919350790157077, 0.7914604345452405, 0.8096570387403117, 0.8916886167050608, 0.5378980926893404, 0.7785838600191306, 0.9325184492015317, 0.7065347828019137, 0.8638726854745045, 0.599868821401625, 0.9576394188010167, 0.8444778973519227, 0.8242972062767895, 0.7017233579654094, 0.8579343122443654, 0.6111085786914451, 0.8191434332908873, 0.5118465635570333, 0.6951236647285647, 0.6218925094353663, 0.9260611398122116, 0.5946048582097629, 0.5219800930521022, 0.8549608692035228, 0.9221636727215834, 0.8394696494802536, 0.9026817674300412, 0.6589401326909363, 0.6667786782201075, 0.6630666177241462, 0.5691756323747791, 0.7751022762494933, 0.8025802575433575, 0.9752508495090317, 0.5872715917137229, 0.9367445940265996, 0.6829744575984515, 0.5063431323254117, 0.8099951671770089, 0.8256876270513587, 0.9842076596584544, 0.6055571012264312, 0.9414607054006818, 0.8892877027164785, 0.9766506907532224, 0.7929005927890584, 0.6105548731023314, 0.9435103349347012, 0.6364340208533934, 0.9637043939286849, 0.553922585275437, 0.9889048167578277, 0.6109328610509381, 0.866716290336103, 0.5776389328213496, 0.6041192546632266, 0.6989293207183811, 0.8572626312900489, 0.9951525019350285, 0.9790668392256985, 0.6490245022332725, 0.9656396978663327, 0.726755341428553, 0.6377000816634117, 0.9706060824884167, 0.7616071133075443, 0.5034711847133995, 0.6757382439512098, 0.6333253522326063, 0.655177853130088, 0.9770602363010077, 0.7509882178363865, 0.9207294120615884, 0.7241774205574782, 0.5990542853450753, 0.9406030861703343, 0.5279575365022711, 0.9728210792856089, 0.6366383981185642, 0.5513830863720265, 0.7150131965955506, 0.9210092678557928, 0.6974403023539286, 0.7117871903969986, 0.6118048766721813, 0.6786294320228123, 0.9717589272386185, 0.6839306113777397, 0.6222545678732214, 0.5843225706364517, 0.816516113924897, 0.9240337965740428, 0.6940046964376436, 0.9484094727002607, 0.7096442763585953, 0.5968897388443893, 0.5656724383781112, 0.5051217990688686, 0.9230175150202338, 0.9282755869883049, 0.687631412102969, 0.5398725192761846, 0.750843475982037, 0.7032362243484255, 0.8445449295503269, 0.7702341349400381, 0.6781213474066514, 0.9131490546689942, 0.675732278517527, 0.7157672360319696, 0.516016852396953, 0.6953534614156713, 0.8734654922526535, 0.7804881329640294, 0.5833350948287985, 0.5594148997068207, 0.9637860164679073, 0.54738006867765, 0.6020572898814258, 0.7462015263928137, 0.9273891464340291, 0.5833379172995456, 0.6873189619998237, 0.5105666829168423, 0.9136397716431675, 0.7583470261022474, 0.9344039229863914, 0.5412404719952592, 0.6943692173877771, 0.8178552463182654, 0.9863349636090745, 0.812619476124713, 0.9756985635794557, 0.5009468515129654, 0.5124187348819722, 0.5416450834510933, 0.6251388704310689, 0.749794645584661, 0.7956471867393897, 0.6309024893310049, 0.7824916748792973, 0.7249351319256507, 0.9356907222525706, 0.6836747343377789, 0.6345942338519615, 0.6210794467088041, 0.7285615696251786, 0.8115343717340306, 0.5359882097558171, 0.9064085878492965, 0.9572387603576407, 0.8384985077148235, 0.609957434753001, 0.5418231463127732, 0.5951368927136766, 0.7333430622614748, 0.5348068518160805, 0.5846475427357793, 0.8026477405284358, 0.5514632923977401, 0.75104217037273, 0.7528090568224972, 0.5134841941827111, 0.6590530618916643, 0.6190456246579895, 0.9034647023539315, 0.8382733410452762, 0.6360240818779539, 0.7176371458686219, 0.5059167571292171, 0.9328627294121602, 0.7073815919089477, 0.8074106623910939, 0.5697177093608167, 0.6136214129423891, 0.8140261727601713, 0.8402597825237004, 0.5442825053249002, 0.6405244187761032, 0.5502522712467122, 0.6385015547358888, 0.8463338156891069, 0.6345946566548436, 0.5766744956885794, 0.8875220259826198, 0.5537175930472589, 0.9493754882139969, 0.5839936004502078, 0.5150660491593223, 0.5000392666591574, 0.9349992433613223, 0.5012125441484563, 0.8070979910942205, 0.6851246383004808, 0.726591770731496, 0.7784760528240624, 0.8523733379207701, 0.762766136536754, 0.8873176156917695, 0.6926804091070734, 0.5778813708802364, 0.9556020485303524, 0.8612694661480146, 0.6669779209256248, 0.995865549871345, 0.8447402019599726, 0.6560031084553939, 0.9896496746136852, 0.6214555651419851, 0.9894336531445542, 0.9032378350111951, 0.8941567086943745, 0.9492337164574749, 0.5838736350433884, 0.5491771663993514, 0.532951682378328, 0.8273922207629032, 0.7907663082339706, 0.609920160817432, 0.960317325147463, 0.7541672038644947, 0.7762204052370741, 0.9636236898544819, 0.8748422594149271, 0.7043211027472938, 0.8086957609981333, 0.5553759788182938, 0.7785748555325445, 0.7755426511621469, 0.5839985593224928, 0.8313267914639748, 0.8309740057379031, 0.9598253050949141, 0.5914592751999057, 0.5937644147683505, 0.7974635721162611, 0.6573531853598971, 0.9378782694634245, 0.8651310751102228, 0.5522065172995136, 0.5296476000042134, 0.9043383571738728, 0.7972368064139712, 0.569351099300613, 0.9110433944250793, 0.905727643318029, 0.6264608890572052, 0.7117474707315747, 0.512657542583697, 0.7564182637506036, 0.8310815744909419, 0.7653473022162028, 0.5188725791153027, 0.9102537176948475, 0.567464225725248, 0.7384189980150471, 0.7269944798638353, 0.9743701277021364, 0.518723120735142, 0.7189650224598585, 0.8773490836361324, 0.810602448879796, 0.6688554418442395, 0.9679116313712568, 0.9620843701706048, 0.9629227870166467, 0.8936724485090056, 0.6277642139745165, 0.963551265999379, 0.6340997666338266, 0.9292695610913646, 0.5966338687509076, 0.5310442316611228, 0.9318355804785958, 0.8248986769713833, 0.6079451312207413, 0.9391224711062762, 0.6936850888136467, 0.7713377879573884, 0.534463660555052, 0.8591469749341554, 0.8027566274254397, 0.9129955371352383, 0.9971274573044289, 0.747615817057609, 0.6841980607771536, 0.7866421020249239, 0.6104261487274811, 0.8944436005690404, 0.9985118722376358, 0.5270420036704774, 0.6699825766949941, 0.674784453599302, 0.8360193827264525, 0.9372533124523315, 0.7677635444946864, 0.9492489735625587, 0.5200769571519746, 0.997196905939027, 0.5825643019333262, 0.7546581795434796, 0.9102851210730737, 0.5714184872055691, 0.8962717649297255, 0.6931020705826774, 0.6845143622088485, 0.8409423380598593, 0.7734679475339662, 0.9066184629886563, 0.8624307854320123, 0.7494768248713953, 0.6245030994015324, 0.8255850177365647, 0.7122906633492101, 0.8901182468479936, 0.8766228332646482, 0.5850932963474207, 0.8452258949036013, 0.6007362697264276, 0.770431344531564, 0.804587090234633, 0.6654636909215492, 0.8322027554457749, 0.8621105081784558, 0.9287222468910685, 0.8315631551845997, 0.6750700791938962, 0.9236967412563286, 0.921182726057747, 0.8659835462599514, 0.8416422713057718, 0.5025418342726374, 0.6261161991687716, 0.5022091917007929, 0.5798928080065433, 0.5363576071107952, 0.669321760789553, 0.6058564889216264, 0.949921351216715, 0.769697909051791, 0.8072330848745706, 0.9229931489233538, 0.9405511861531048, 0.9054548979093746, 0.5432350603691514, 0.5037711072100077, 0.9567080160878203, 0.7602639315118973, 0.7362844826923686, 0.8125483577018, 0.5070662239247713, 0.8731969325018999, 0.7832588623898722, 0.6639170365721706, 0.7374720286951952, 0.9959382560561849, 0.8964147119829369, 0.9666666444231455, 0.5472655302784217, 0.5261671368780566, 0.6979253195861839, 0.8123692988777219, 0.6275802582996411, 0.815054061122898, 0.985602039718307, 0.5370218840351981, 0.7126101340392066, 0.9522998385310161, 0.61974962869716, 0.5982490497504899, 0.845078745932625, 0.8932673766938211, 0.5295815801762733, 0.5999261155208725, 0.6702417264681098, 0.8594253683141393, 0.8052683353229192, 0.7278016429209253, 0.7409097465099967, 0.8197406973287888, 0.513652548227453, 0.6420963097134954, 0.5603168349048113, 0.8172826001923763, 0.5393178754445933, 0.5332718323313894, 0.8519098401370075, 0.8262042935470987, 0.9922044126418307, 0.7845126330467393, 0.6663749832298982, 0.5987134484306595, 0.9672149199694884, 0.6248393052635071, 0.6062108300189502, 0.9248148008476116, 0.899555141689558, 0.6773681537039467, 0.7200500977450486, 0.9687138689305688, 0.9767348427100875, 0.6054688359882712, 0.8351133322729526, 0.6062457966071007, 0.6816282021822521, 0.7364293734327925, 0.802672011819912, 0.7865980760794906, 0.7609240959704748, 0.8285098509945197, 0.8603402414436888, 0.6962186221989842, 0.7452711046221938, 0.9722666122281686, 0.557468984823842, 0.9309111487841875, 0.62284183383019, 0.832422201295617, 0.7232267588097433, 0.5741903520926116, 0.5981352179523063, 0.8256760856588591, 0.9584928740300414, 0.5768374678332068, 0.990903917743431, 0.7084666723715222, 0.6634823747826015, 0.5355127449845863, 0.7822253527509735, 0.6701070755386045, 0.5428637926882567, 0.647256917232664, 0.7568676124509219, 0.9692849764605767, 0.8194730992579946, 0.5651589214688639, 0.8040566954573232, 0.5429332779042513, 0.5406937004180606, 0.8918178778327361, 0.6086653711738561, 0.8800308104626838, 0.8561798024991795, 0.6923889677769046, 0.8853399901343189, 0.6836649855626138, 0.7748478042444298, 0.6002161848701351, 0.8326188015968141, 0.6972354923184344, 0.5708936291386966, 0.6818619011004947, 0.8545730400544295, 0.8723305220762754, 0.77861089974283, 0.8162508324650718, 0.915349636702316, 0.7008135438572292, 0.6176489834168915, 0.9281367934767483, 0.7312299744609534, 0.675819504029632, 0.8504152248216135, 0.585173016608992, 0.8565292819132899, 0.6859066648085864, 0.7774916802034492, 0.5735319485523755, 0.5519996099143231, 0.7085236585315975, 0.8982749853422598, 0.9685455356492862, 0.6014647954908803, 0.6206324004634931, 0.7854355539837583, 0.6381949377370446, 0.8655253007187989, 0.6579097957014209, 0.5056257281065363, 0.9769617407348852, 0.8873224954015593, 0.7379857994601929, 0.6555354101494208, 0.9367706567962744, 0.7713150968578296, 0.8012475976613829, 0.6818826851552302, 0.8606527132326749, 0.6693918815980305, 0.6729963239339816, 0.8428500348309795, 0.5711291304962418, 0.7284553951449911, 0.7768262605668825, 0.7692036384502099, 0.5324369442341351, 0.7031816447735146, 0.603544063665086, 0.6777282594173946, 0.6871375876584906, 0.5140556195493159, 0.7995058520068807, 0.9308008149349221, 0.9490582773495729, 0.7567373813716625, 0.9633017597458, 0.8656737762784562, 0.9809215064277793, 0.5430884668806616, 0.5177917403728367, 0.9233513662226196, 0.566957789763743, 0.7414991893280478, 0.7060478587016069, 0.6183922960718176, 0.7806391659076204, 0.5220521483235272, 0.8206707497429648, 0.8481840151042581, 0.9326198370282011, 0.9567209200769395, 0.5535557431531415, 0.9015954238080286, 0.8453266986902508, 0.6287591138160658, 0.5969346763611441, 0.9862292826146752, 0.7907527131485528, 0.6656703071531956, 0.8143377910309539, 0.6148848234114292, 0.5926609099983556, 0.7100064730808691, 0.5528261875323367, 0.6760433499110663, 0.8215723629471162, 0.9374890526573204, 0.7989220983160481, 0.6368835098332224, 0.8594838047802251, 0.55843655410501, 0.7926585646827202, 0.9516104540133019, 0.5244915958587759, 0.8060114213093871, 0.9408497333571726, 0.6503022498030764, 0.949089952112198, 0.7083390474850555, 0.9471869322954924, 0.9794295398420312, 0.8491822024523947, 0.6392068444928434, 0.9517221354543837, 0.8959840507104817, 0.9838327981692905, 0.5750723475710346, 0.6427206077801404, 0.99212118298145, 0.5947001604916384, 0.8494298112530936, 0.9103428534755811, 0.8245154098781077, 0.7741536452820699, 0.9233893651221811, 0.6859919474795948, 0.7574448498308981, 0.5616124186847737, 0.6190830948447096, 0.8659163045563885, 0.6473601946447097, 0.6674709831655921, 0.6157730571356623, 0.7135238973856037, 0.7475837338338929, 0.5284805263987723, 0.9081569600028363, 0.5521564507727594, 0.8083040687399924, 0.6398784598288266, 0.720505105777344, 0.8741866445431536, 0.5888295047945112, 0.6449689732332794, 0.5622253346882016, 0.6186832464234064, 0.5113818953912067, 0.7369367095955264, 0.9691441897164556, 0.764878259229943, 0.6011265226260765, 0.5931400626107023, 0.9643543589892132, 0.6503747824087764, 0.5449893691027146, 0.5553607341244646, 0.7470825912894278, 0.7327812081658533, 0.8757017980531236, 0.5688839041389342, 0.7780138281284166, 0.6612354634649129, 0.7916310114367473, 0.7634786798540194, 0.7107678451016344, 0.9117519421446598, 0.9840235809876633, 0.727233302384568, 0.7715268431428877, 0.9073530857485845, 0.9941484090474397, 0.9079723655440115, 0.7028689438273585, 0.6082420347002233, 0.8919024961570929, 0.5888126359937447, 0.9563212769059618, 0.8675847612488778, 0.5926151949166716, 0.6538519170646964, 0.8446051722533761, 0.5677460206083618, 0.7561735096343865, 0.8668520953745842, 0.7565576655961541, 0.7514981373169209, 0.6729963571257735, 0.5067779263285822, 0.9525110993993968, 0.5858806382214847, 0.5511393415400861, 0.8369371126080578, 0.8087878651407572, 0.8190395921013353, 0.90825633460382, 0.7436270964347929, 0.9738189574747631, 0.9125750742255893, 0.8188989860715459, 0.9818972286316547, 0.6692139356310622, 0.7037812623564682, 0.7696629510028221, 0.9132544253347732, 0.8486112444372502, 0.7426203280589634, 0.5312177688774493, 0.5958537352475961, 0.6229694668580726, 0.7635250125858071, 0.6000527090801319, 0.5415395013904307, 0.8736993894215761, 0.9374919101933652, 0.537450681210444, 0.6365990284993457, 0.9022842419458068, 0.9512380825638279, 0.5850482938269438, 0.6828459369970097, 0.823021140044931, 0.6194110061235774, 0.9006064165911718, 0.7300085996680875, 0.5681278451538685, 0.718237659137091, 0.6126807681872347, 0.8086374764876656, 0.827855281702863, 0.7442797594040728, 0.5957011764555046, 0.9654592716966832, 0.5350053849772012, 0.6105068637607387, 0.7213911274739186, 0.7959925269089096, 0.7674827518295776, 0.7323465547303107, 0.7855242013743642, 0.577834970957525, 0.6360982123245609, 0.6439552417385199, 0.9846722714351649, 0.7438655356359871, 0.6454360128116033, 0.5085080769796839, 0.7314519882521999, 0.9135436818191751, 0.6576222745170242, 0.7355832445748569, 0.6452301757532583, 0.6956611241317715, 0.9203763986207976, 0.8642612829143659, 0.5907182529941462, 0.8402747194853737, 0.9512720736303928, 0.5684626649723701, 0.7905640325301343, 0.5884525038171619, 0.9631106723990945, 0.9702205715832801, 0.727096831454149, 0.5752283587760649, 0.5210542855586748, 0.5818621489011698, 0.9891931027000938, 0.9355650861161892, 0.7443110518179695, 0.9057157080431305, 0.8239512163055607, 0.75573553307936, 0.5535781585433803, 0.6743348114091862, 0.9638795687896289, 0.9756590436224598, 0.9329214317502044, 0.6760076407856148, 0.5913444438431286, 0.6461707047020011, 0.7027474841821523, 0.5119886340738745, 0.836669926726046, 0.7644616633901054, 0.5912264542425754, 0.8169898904829358, 0.9791524235554845, 0.7854859100627366, 0.6478876236033945, 0.8612329600235856, 0.5199892079085677, 0.5461294730985549, 0.8951319386599894, 0.6265404768102655, 0.7391865007028358, 0.793622719427687, 0.9855373531293146, 0.7355614926065314, 0.9091836813263208, 0.6260572231845112, 0.5466313587936067, 0.8081449963038877, 0.7227710221912304, 0.6220760443198827, 0.857566637308771, 0.980344102562143, 0.5537185864373197, 0.6152210731824982, 0.634175350434081, 0.5046470262571372, 0.6661155963370186, 0.9831513799527677, 0.7055685126307994, 0.6371081761756275, 0.9382759930905693, 0.7927792184220019, 0.678789857515375, 0.5112463945973067, 0.8342099057445561, 0.9971873128898955, 0.6480772591693902, 0.8805668316419292, 0.9387323182130501, 0.7024638761645723, 0.7837102894004315, 0.5264643563373144, 0.7877400990821826, 0.8126961234134398, 0.7187987644671028, 0.8388518888546945, 0.7720566930241868, 0.510461601114337, 0.7889839501348093, 0.7831728300319954, 0.8374364967310325, 0.5575941507893972, 0.5847541178414828, 0.5711239243352655, 0.5314178730983741, 0.7992889042607869, 0.509954804403177, 0.5009972170713588, 0.9798670888380112, 0.5429867007959885, 0.5677120974428653, 0.6171073307276179, 0.8084125091573613, 0.6315549395892892, 0.7230849392527211, 0.6797071627139104, 0.9412166672869154, 0.5737705282431329, 0.9635226545930025, 0.6874611602331034, 0.5029893940315628, 0.7772228145285395, 0.586539217749387, 0.7580517978503387, 0.746017570923279, 0.7568825544046776, 0.565208584821175, 0.7685880252409154, 0.5218836866860661, 0.6264151032118899, 0.8943507514676579, 0.9338160719827094, 0.7509928050767949, 0.9996539062167273, 0.7499384494819343, 0.6669438569207757, 0.917664894587708, 0.9182371905477896, 0.6273708373540885, 0.7720217429391993, 0.999576758615524, 0.8675484235103079, 0.9261468266772273, 0.7178305684852333, 0.759846799071375, 0.544867836247851, 0.7410904886863929, 0.8097739465801272, 0.8148944326031204, 0.9312990910196831, 0.7063265500828251, 0.8944605784763431, 0.9955358531725214, 0.6538217433643974, 0.9946930503549869, 0.668108980652885, 0.8524183356415282, 0.8523763530458056, 0.5270045579312084, 0.5036400098817472, 0.9668095096312559, 0.94653479669861, 0.6679724798073583, 0.7124526625645001, 0.5477324754907589, 0.5344799113572549, 0.7379699562572815, 0.8255113791826703, 0.5111712522338886, 0.6346539268032771, 0.5649555508506262, 0.8322497940039693, 0.6641289608291825, 0.5110899912353581, 0.6463987852591151, 0.9933753445806726, 0.7593696364378808, 0.8367532065994969, 0.5428868152926717, 0.7666242943240016, 0.5917579382072506, 0.9041392011853346, 0.9541386161526159, 0.6166600894767538, 0.6713590779619028, 0.9975404490347903, 0.996308197123388, 0.7846042621495347, 0.9709548350208956, 0.9311624645543151, 0.8729232421703541, 0.8787227056721492, 0.7462309943230621, 0.9872196234850119, 0.5381772841169736, 0.7448711336759151, 0.8322280382753161, 0.7144184484674657, 0.9126379025326973, 0.8094167269447191, 0.951562105934744, 0.6040432612643509, 0.7696773554749266, 0.7081641757988499, 0.7405527019355218, 0.9831198320295774, 0.8604495558011944, 0.7404745762113755, 0.8290018571600816, 0.7268821700670188, 0.8400111620394037, 0.7243510081477836, 0.674666917062057, 0.5273684201777415, 0.5776839010732273, 0.9271514818787591, 0.5398818058784609, 0.6194467099677113, 0.7116005173603769, 0.9991903887220492, 0.8147528876197454, 0.7001769848416133, 0.8422484692326335, 0.9228467659223615, 0.8836960620072648, 0.5806169248891131, 0.7110423324450474, 0.5671177083985344, 0.727878018141914, 0.8914480664773619, 0.6167911966443238, 0.9127874379006454, 0.5630544817641895, 0.5321394514727613, 0.7396439599912522, 0.6446339525658222, 0.8665326162793483, 0.8643839312510044, 0.5696851450039568, 0.7202945390090463, 0.5155482609584428, 0.6748301887872173, 0.7830985923325158, 0.9327932372045782, 0.5039589350137526, 0.6726040838206643, 0.5114507756770525, 0.9181387517658355, 0.5321559484474032, 0.8162409554431903, 0.7662836341961987, 0.8893481801035326, 0.7263104571276324, 0.8807681184294094, 0.786168857605418, 0.9435921151909993, 0.6721069238375659, 0.7402776941141498, 0.6653357388481412, 0.8207862658810701, 0.7644222852016392, 0.6271274227675144, 0.9455124811528045, 0.7727093359955668, 0.7755545603384424, 0.6351249036490738, 0.6158882041583026, 0.5284674923598409, 0.9858109521630491, 0.7838711628832729, 0.8674154659667108, 0.8688361417769831, 0.7354566714218418, 0.9250078357044795, 0.8340156326561563, 0.5426661734518841, 0.8489981494765831, 0.8186246506489838, 0.9125264421985273, 0.8453335280889195, 0.9493254150239725, 0.9506902263891219, 0.7670876709387999, 0.730407132604942, 0.8844805667479888, 0.9541807739459605, 0.8200663988918473, 0.8836803502840291, 0.6477621897808299, 0.9698293101615928, 0.6970686071032199, 0.6836130535189044, 0.5587364513929024, 0.5058953367968233, 0.8463494220311992, 0.5676171839097062, 0.7426861456623269, 0.7269063142547658, 0.592361810990852, 0.6546121799942486, 0.5212552096604248, 0.822657397149354, 0.8186668710315186, 0.8265556968821101, 0.6052724657887545, 0.5048670823976928, 0.8366537020858298, 0.6902580289515878, 0.6459061886330209, 0.8116790315586114, 0.5984419602333355, 0.8125543120706907, 0.8981562169846034, 0.6920334700988581, 0.8731676413001581, 0.7296288334651466, 0.7474225403710775, 0.8658182108489143, 0.6363010604786765, 0.9296159801463377, 0.776244309296453, 0.8861029741741773, 0.5584576558162251, 0.7112105173087626, 0.5023048091828969, 0.603199183427834, 0.8116785553497544, 0.9235123069066798, 0.5002165521212099, 0.8470544017871128, 0.545757277563675, 0.7684201493495113, 0.5235131460881723, 0.7031002792551487, 0.5427297003279323, 0.6484313228271297, 0.9367805842422139, 0.8246277777680251, 0.7355793336517344, 0.5562345087453929, 0.8641445858317434, 0.7694217811396502, 0.7499457324854767, 0.9959521567230796, 0.9114467713573493, 0.9343206157738644, 0.7711882753450874, 0.6629834058650261, 0.5239677942419438, 0.5626123001055539, 0.5490217791159427, 0.5087047411185999, 0.5634409682689796, 0.6008919823069818, 0.6306925216317467, 0.9981868204079383, 0.5877533954674311, 0.5551935143819265, 0.6400685431151479, 0.8426155711404304, 0.6511992210124624, 0.531009566103037, 0.8265263720791223, 0.7796041092352683, 0.7310742589185497, 0.9210203390911876, 0.5297206066790543, 0.5583039715177951, 0.5309310249648862, 0.764484451237845, 0.9564844671104927, 0.8921051453852875, 0.7179884007331963, 0.6605785484424316, 0.9204020238383401, 0.823636834665612, 0.9434286075471616, 0.7356266042034474, 0.8824246507326796, 0.713776520605774, 0.6397810540306872, 0.9120727893227702, 0.5309199257423955, 0.7042590119246288, 0.7185915438485845, 0.7280561803791392, 0.785887973642114, 0.8978141328089366, 0.5619248777610866, 0.9076571265174007, 0.5092736014180451, 0.6543573962397131, 0.8488680061120388, 0.780006587584401, 0.5270275821378965, 0.5262363935268559, 0.7062009847608354, 0.637102436047473, 0.7718440804289202, 0.7291657601649492, 0.9121544562967727, 0.6758421575501203, 0.9713722033987666, 0.9174848955972378, 0.8692764823767132, 0.685132152223991, 0.7069975047957622, 0.7473237037762936, 0.8965086295568194, 0.977650927214354, 0.6193371488155119, 0.5258354717701192, 0.8721496593776146, 0.6189379616488555, 0.7144649101559211, 0.9042105546292121, 0.6411078391899923, 0.515593136786936, 0.918406220401448, 0.686173312598797, 0.9750488464633291, 0.5412477911179696, 0.7043844523932244, 0.9579197751808968, 0.6671337677838602, 0.6690820274775544, 0.6721806691219613, 0.9747934341589481, 0.7613873576506273, 0.9662790428947363, 0.7850921740070298, 0.8232182439823429, 0.951829107017769, 0.5138079944610605, 0.6692081431290201, 0.8250031717648754, 0.5141347963740474, 0.5083427305553911, 0.5448558789028874, 0.7398922545810716, 0.6916743210082879, 0.8846613868795434, 0.5677127452975965, 0.5051317585841877, 0.6642815852254187, 0.6983868575818674, 0.9105844356297033, 0.707021274466119, 0.8064342179549404, 0.5309647885518964, 0.7866400931841762, 0.7245891883134639, 0.9505179568573249, 0.5629343809126935, 0.6471077575719015, 0.9313976787118161, 0.644501412533785, 0.9854001714114657, 0.8664750318162362, 0.9185103766469811, 0.7209558158696543, 0.543121331318809, 0.8942554129059062, 0.5779107852087313, 0.8051006350120722, 0.7717028036727749, 0.834298339110104, 0.8070285945836502, 0.8689019833381815, 0.6781151975231137, 0.8060417098918757, 0.8695271655784573, 0.962719779696348, 0.7781275616362786, 0.7094012730920389, 0.9281372618696491, 0.808728713135998, 0.6695428917523818, 0.9372281172929032, 0.8259394376032987, 0.6259972197002488, 0.8258915991240723, 0.6263461685731155, 0.5342097583067902, 0.7923305813816093, 0.7637000758218915, 0.6494324892192158, 0.8184012917953769, 0.86926579754843, 0.5523324686307655, 0.735619984383646, 0.645035663727604, 0.9887574859556982, 0.5579932220996028, 0.9986108611477695, 0.6626534077632318, 0.926208520493804, 0.6784848650049617, 0.8178686750718624, 0.956047381428324, 0.6861231067032398, 0.8822232225669648, 0.7500628229959201, 0.7635381471429424, 0.8498533815033646, 0.6316008190964739, 0.952656813815955, 0.9238433752876782, 0.5281374921771211, 0.6099247238324044, 0.8831911035448566, 0.6282611405241336, 0.7371195282472097, 0.8323888813922373, 0.661265480559343, 0.6603986016715373, 0.975420152414312, 0.824585364300529, 0.9724822048426212, 0.5132722222722432, 0.8001872904805885, 0.6279860411311726, 0.9616109018370627, 0.7530471628125248, 0.8525780878940638, 0.7546138984014494, 0.9534885049347459, 0.8718874961005038, 0.690890881873713, 0.935352932442304, 0.5507582497865618, 0.8246148997414503, 0.8682003443493073, 0.9289257656163088, 0.6467745449982281, 0.8787013150852137, 0.5946443871889959, 0.9023948545707113, 0.6515762545848449, 0.5866881051575117, 0.8044343054808836, 0.9398111612394889, 0.8621379997978955, 0.6134018461799253, 0.6905739646351703, 0.5491285634435807, 0.8340282145025864, 0.6379176998452243, 0.8077007891235459, 0.9239501495601992, 0.622913840393809, 0.9308326813232127, 0.9080952694985185, 0.7823847426446415, 0.7126266362792011, 0.8951757332633813, 0.9330209642148461, 0.8105182079102384, 0.6458246124491469, 0.8498843653507917, 0.7137253388494257, 0.9026700978471847, 0.7848839093294422, 0.6359310797322881, 0.7411336706759242, 0.5439122679306694, 0.859842082746241, 0.7390374447972763, 0.599925216349594, 0.8428789387318829, 0.9562516264118018, 0.7133027874225739, 0.6162125789546928, 0.6594423350722588, 0.8648677007942274, 0.7204264815076062, 0.8497094893170116, 0.7107279100161454, 0.6014161528377286, 0.9926971653298364, 0.5161190249030319, 0.6037780242515192, 0.5956889991915146, 0.6126570317277158, 0.5576048323669197, 0.5139523286804082, 0.6477505548217909, 0.9044605275491019, 0.5035048922273457, 0.521443057373846, 0.6631101793838245, 0.9170626222004616, 0.8769872096479172, 0.7606616155605965, 0.5288924315578927, 0.9635197741611838, 0.6872982604203566, 0.8184958865291864, 0.5483707815159965, 0.5820958562286016, 0.6973966265959773, 0.831741972156314, 0.7953687070063196, 0.8758295616452784, 0.719608738241817, 0.5390668102698974, 0.6882143982401849, 0.6975762570445236, 0.5338150031024935, 0.7853608472128872, 0.5801313494469591, 0.7459349307792235, 0.5186287276533443, 0.7195219566820134, 0.8627333746876672, 0.9832891736845644, 0.9088906156192551, 0.868792300890675, 0.604573911324918, 0.6314131104112364, 0.8249237811263161, 0.7322144873304749, 0.9279000188327395, 0.9299801201961228, 0.7611936106282134, 0.9024363077098239, 0.8799406384004573, 0.6114927681487199, 0.7515405323938269, 0.6672112445072889, 0.8739242544558256, 0.9365588752786322, 0.6291752544672066, 0.5602369865233567, 0.7363394513202572, 0.89686037246018, 0.9283870266398906, 0.7253428623862012, 0.5685412989180011, 0.62524199024412, 0.5775093686023742, 0.8805962420877957, 0.7633075236653208, 0.9583726746290752, 0.5000577791942538, 0.9839859097801374, 0.8478566354675321, 0.625375803836255, 0.9158830819446666, 0.7033440463039768, 0.9391831477169177, 0.9377955841732608, 0.8220867520653625, 0.5189188140130653, 0.6518018662160456, 0.8647300687947892, 0.5292938411826922, 0.7157953056904329, 0.7691433232830407, 0.5029923875585651, 0.8980111306366005, 0.899581282318821, 0.5005886140274424, 0.8693118500146004, 0.749817768183314, 0.8081116231834096, 0.5243210523631947, 0.5314505979755901, 0.7432309827234714, 0.7427654794888032, 0.6801714491748037, 0.5548353197194504, 0.642399211274417, 0.8417019435615438, 0.9430241927337402, 0.5199041304288148, 0.9355008376894365, 0.8133400258918573, 0.51055699657285, 0.5151589410529308, 0.8524269567265967, 0.6653524219906262, 0.7625793339462129, 0.5427050179441465, 0.6197435642741382, 0.6082401113100515, 0.9171184587259967, 0.6954018212961611, 0.9060071160293799, 0.9482790534665326, 0.5868851051663212, 0.9421095924804728, 0.9937672404852453, 0.7970030299042608, 0.6334347936262634, 0.5426167790682626, 0.9964357546790694, 0.6178754806772097, 0.610985258814065, 0.9759772737297119, 0.8537968242607776, 0.7823493446011867, 0.9848366237291069, 0.8096679430329425, 0.5715835962118969, 0.6657919693325226, 0.5902150990252015, 0.8919097999226239, 0.5977566935637117, 0.6642895417090637, 0.6850491801612228, 0.6555599837939109, 0.9107870934375218, 0.694281627736526, 0.8425122736356317, 0.849390334154022, 0.73203003395747, 0.8152047956905886, 0.7639237237582128, 0.5014649858355451, 0.6410760378300207, 0.9518097426253751, 0.5666834572861182, 0.6137960603799518, 0.6937898798384126, 0.6817737038609202, 0.7899883007043327, 0.8352334355316635, 0.7933968411437118, 0.6244430059296312, 0.6510899653910809, 0.6568204177364352, 0.9262659484601363, 0.9898352030041284, 0.5686171220082015, 0.8374690318562625, 0.990252990463848, 0.9138666102300594, 0.6471293202625439, 0.7807470708986379, 0.6592224219965032, 0.5797638982470112, 0.8229299971379401, 0.5679533052603534, 0.7087496748260693, 0.7236117501202894, 0.6522488624885658, 0.604469804784922, 0.6192754261848669, 0.7989454145783709, 0.9482969779416663, 0.8533425542420714, 0.54392583750337, 0.5361138210426564, 0.5350488079863229, 0.9998169124656113, 0.6120443022127269, 0.5050970277633939, 0.6819628952271102, 0.7986698648973365, 0.6304841128473444, 0.9193748318519392, 0.8317756506352325, 0.7595534185669646, 0.8876991244332386, 0.773109236369753, 0.8286024468886313, 0.8700845117920686, 0.8734780242400348, 0.5519832249819789, 0.597653514893006, 0.6204952080583143, 0.6780146615371225, 0.7077214489302213, 0.5380712735885391, 0.6018939148629825, 0.9716763638110371, 0.7353310168928955, 0.5675008423676775, 0.7239319763811465, 0.9051807356490416, 0.5071452808187754, 0.7734156016775335, 0.5180771388832801, 0.8384925311533069, 0.5879824252995702, 0.7715886335033841, 0.6896794477648678, 0.6816931814462492, 0.5565719107128644, 0.5395986817552783, 0.9621578629890699, 0.8292166223397806, 0.7466576628373809, 0.5541381209273302, 0.5144165751956713, 0.5033172374331185, 0.9316284948039614, 0.7831161062944936, 0.8780732618800372, 0.6886502688358875, 0.6312335341000522, 0.8852229552761948, 0.8098616510136243, 0.7063531117441764, 0.8270271216554453, 0.5366713478927885, 0.992061399568087, 0.7386382275562304, 0.6510270148588049, 0.7473420482905186, 0.5363275292932727, 0.9405362653811689, 0.796243088171958, 0.7027221266321015, 0.8044790615622768, 0.7679157855890035, 0.9693352428803728, 0.6731181175478624, 0.7491222638243311, 0.994136454715711, 0.554444544886805, 0.5681696778845032, 0.8029207853867069, 0.6989827989087196, 0.5973005182720861, 0.6100628004933664, 0.8300456585975724, 0.8153710557482154, 0.6752054651583943, 0.8588517159430198, 0.9982496202494672, 0.6435906936015021, 0.6600158796449536, 0.8001254256325656, 0.781659276306993, 0.8430711076255264, 0.5937130366156099, 0.8401813028278653, 0.6965307996964054, 0.5499183520663791, 0.5765514286394222, 0.7302831677491594, 0.7443177104209422, 0.9191491693599627, 0.954930283814498, 0.7844998284999989, 0.956059346676702, 0.9789185062498594, 0.5571305092177539, 0.67646718556722, 0.705515586450257, 0.8891360930566139, 0.7366368949823658, 0.9962335671104361, 0.5719886435822993, 0.5019193062441968, 0.5474176965972062, 0.8780871418505896, 0.89188865253533, 0.5500871221133379, 0.8988104219663047, 0.5413888091459624, 0.6725241742154879, 0.9569820754504905, 0.5042904725527979, 0.8769055650290091, 0.5677274616176587, 0.6059418026118518, 0.5552588615213832, 0.6214387242780913, 0.660958809800936, 0.8096487330496678, 0.5733694078680085, 0.617683090477003, 0.95068211257062, 0.7660298087150648, 0.9324717169620234, 0.8318199431082534, 0.957993176993367, 0.668750856303751, 0.6796099976139836, 0.7860712605246531, 0.505896510098759, 0.5802345532902302, 0.8632125542946796, 0.9318344075291092, 0.5891932399985773, 0.86839427577612, 0.5346169449528835, 0.806071619085903, 0.6975657149906165, 0.8723523428395017, 0.5838354687952514, 0.6867436051086168, 0.7891339108651831, 0.9902747299024339, 0.7318489708403285, 0.5487891064947032, 0.9470812393302092, 0.8435625809009686, 0.7306120359635915, 0.7705597758162295, 0.686148874820971, 0.5104757232534871, 0.7925852088875154, 0.7466318362875519, 0.5225716846605646, 0.984980696052126, 0.7738087862248992, 0.6666628598354025, 0.6093580584688351, 0.987350805931072, 0.9148102001426299, 0.5139661204111012, 0.5075471089033605, 0.7726952530562045, 0.8090889310246678, 0.8017470554523652, 0.816504092598958, 0.5908317105312143, 0.6776835014856731, 0.9302058858828263, 0.8893427292139602, 0.9263044154046836, 0.815121999018413, 0.5369789015191556, 0.9843375474510958, 0.862148044959157, 0.5030881093676809, 0.6837159724193983, 0.7654775442320314, 0.5284454707893518, 0.5266651789092671, 0.8635168909470163, 0.5929561279107423, 0.5837103872636057, 0.819323120342299, 0.7190569966413938, 0.726098028719343, 0.9417056047400691, 0.9291271744028274, 0.8673303241626622, 0.5915361144959737, 0.9122164499383449, 0.8196587120519341, 0.7137444019037469, 0.9337248945379601, 0.8368829360922516, 0.8632658692401454, 0.9709123494026608, 0.5021320741132014, 0.9196645352503487, 0.7201625352324423, 0.5748623750920008, 0.7899600486762379, 0.7971525599509026, 0.7014791724153668, 0.673775414478569, 0.602549513373169, 0.6675408345523928, 0.6625568140179224, 0.9491887850227618, 0.6965675561056668, 0.9812314600727468, 0.537104656872978, 0.6657424398355059, 0.7169744526345376, 0.5224099239401654, 0.7543055808136876, 0.8673524322027428, 0.76526604384146, 0.7272988715905602, 0.7089201888918701, 0.82992386622858, 0.6748766924713965, 0.9330125792168433, 0.7785391664726462, 0.5983611549904522, 0.6975228073616458, 0.5490957842062099, 0.5218408091314498, 0.5807397237706922, 0.8203892163410624, 0.5024401321850673, 0.563089018586474, 0.6519652005143522, 0.8287159596575362, 0.7503692359306482, 0.6869501742718438, 0.7465192829035193, 0.8434937100295481, 0.8930515741922826, 0.8970287093548893, 0.734784019738367, 0.7558116992377513, 0.6176130942663063, 0.7403401127852169, 0.9494180493970918, 0.7534259867613342, 0.9814281726621628, 0.8448689900676196, 0.6443571948909876, 0.5896600794296798, 0.5632366511781279, 0.9465820994970866, 0.5741966858203453, 0.6909024978107532, 0.8214671930777402, 0.6579627446075389, 0.5244741855649464, 0.8333153310132175, 0.6964310291045633, 0.9503277971257762, 0.538000676453273, 0.610504727360466, 0.9537587691496733, 0.5688811555162946, 0.5258252144361527, 0.7763217381714098, 0.8536631217974211, 0.9205017864298222, 0.9811849880952082, 0.8757059948784334, 0.9710411648445549, 0.7505486840180691, 0.866551665333946, 0.5592304511248376, 0.7538417913916001, 0.8493747810864221, 0.5379107936545338, 0.7206538787065615, 0.9839814730072989, 0.5634052587242933, 0.5746556533443803, 0.6737314137412048, 0.723042712710908, 0.9217411003561168, 0.7214047875638809, 0.9996470526894871, 0.9180706842303228, 0.6392340350075378, 0.6194844556102006, 0.7730744819700488, 0.8261786586382031, 0.7249601558752136, 0.813745716987981, 0.788671998784937, 0.5586038984839505, 0.9256849143474335, 0.8995341700726758, 0.6244977188719486, 0.5098254170964984, 0.9682559802517348, 0.6805415239235658, 0.6774802362193539, 0.5696350952365454, 0.5588914051987492, 0.6261772863029069, 0.6632182152864498, 0.6531986298174521, 0.8692215532748822, 0.9429605469259603, 0.6499022471998924, 0.9687848531914778, 0.7687451102641741, 0.7020552278413245, 0.6976937304716404, 0.9626459591193646, 0.899324652320924, 0.7626928562712203, 0.7933660619540637, 0.6758213677306288, 0.6752134375571114, 0.9140879164715667, 0.9278972069586214, 0.598467045785223, 0.6568056946059463, 0.963079437726591, 0.5671378062761058, 0.8802188998292182, 0.6840935618949646, 0.8483062963285806, 0.9157809442650571, 0.667866481675819, 0.9521206907813737, 0.8356806597864489, 0.9884007301616815, 0.8232524868302993, 0.7503414460190757, 0.9426846564451785, 0.6438433622890158, 0.5414764928833953, 0.5549423156539428, 0.9578258155211894, 0.7529381549920954, 0.5629109212570895, 0.850373999832803, 0.902018760013165, 0.9888818771045639, 0.812681372390517, 0.9530681801203658, 0.6410751949507808, 0.5459961620037641, 0.8342147792004835, 0.535550833716812, 0.8130202121114553, 0.7605513431306602, 0.7675214472652813, 0.6974340922448211, 0.9509251471987582, 0.54290667803535, 0.8334897781170811, 0.5648639434486378, 0.6465122400713653, 0.97229720331211, 0.9621935795533001, 0.6729288172477268, 0.7549985772724193, 0.7894774745325739, 0.8006193137834079, 0.7298653234454616, 0.9740857174628067, 0.9936768445834434, 0.762423693954322, 0.5609811338144908, 0.9131985340176627, 0.7677329471255903, 0.8321411498927178, 0.7985704694144342, 0.947917793855903, 0.5343495330869947, 0.9802202908957489, 0.7276627097658713, 0.598139321628373, 0.5223764670196325, 0.5009283613664826, 0.9498881806890891, 0.8647699218954146, 0.9170286321005576, 0.6362016657223848, 0.5065577896085021, 0.6477935663125494, 0.6849518141113276, 0.9828642169711065, 0.6388790475977224, 0.905902980703784, 0.5432540952222453, 0.9262263993262512, 0.683261006532454, 0.6365498569292907, 0.6134806938976519, 0.620459912056069, 0.5003133016602277, 0.6998105043765243, 0.7949659178406169, 0.5896420001820742, 0.9468397225996401, 0.9829441887635324, 0.8236943162524016, 0.5653241307059595, 0.8218321542625129, 0.869898317709512, 0.5226934342242441, 0.6089840093931727, 0.6273604937983819, 0.9643960492059269, 0.7247708778219095, 0.5447631563952018, 0.6900653014528751, 0.8326602340580782, 0.6549269218216451, 0.5720827173238281, 0.8395194875648527, 0.9557455659864991, 0.5519649079311079, 0.744542848323019, 0.8858214863190208, 0.9917704289366249, 0.8907004318041425, 0.9671464312866488, 0.9525320087811608, 0.7610832108510217, 0.6146437533796734, 0.6203016872527087, 0.8235617894473442, 0.7111218898822971, 0.7041746889016567, 0.6960297486834592, 0.6348800121593381, 0.8598993689040956, 0.5217408677197135, 0.9967151646695395, 0.8370606258021032, 0.9895552548522227, 0.8581179875333542, 0.5395002778947329, 0.7245274718651709, 0.6227606199381441, 0.5729219558702328, 0.8773925898053457, 0.8356227556886306, 0.5389234410175339, 0.59145368744027, 0.6836269806672401, 0.758121600906925, 0.9542220148056574, 0.6647207162147066, 0.8199763000146758, 0.66675198349945, 0.5472465202677816, 0.7108002893769438, 0.8199150400238899, 0.7780859839293288, 0.7933575789245431, 0.8575682599753696, 0.8151971750340504, 0.5568533192009404, 0.8006372701455708, 0.7619772864758318, 0.8548446999162798, 0.8879921752395592, 0.9247327464086033, 0.9711740844339702, 0.8100545282171335, 0.742701289035117, 0.8262799652480649, 0.6481324931563608, 0.7009197558269551, 0.801667887583188, 0.8028651947212226, 0.5612644508835924, 0.6663567440163423, 0.9451641791954737, 0.8729980770848149, 0.6292940973862557, 0.9536923655244157, 0.8976199787670138, 0.7233930562096185, 0.8143877125189747, 0.7654400195432582, 0.8405555331146073, 0.7564605127760247, 0.6884762337508459, 0.6964572485367925, 0.886520312043674, 0.6995006370138571, 0.9943869722770642, 0.8203934391508851, 0.8534876338657695, 0.6481915580028381, 0.8100502806427312, 0.5563331376561425, 0.9154039101499499, 0.6774773008390982, 0.9857052934820876, 0.9783010244966088, 0.5978225987032018, 0.5997644529082331, 0.8867800908066936, 0.6347881238158926, 0.6053175673209674, 0.5580767436065999, 0.6358180025442135, 0.5865877425821086, 0.9007401966107615, 0.6891833527588704, 0.9608456104326573, 0.510642243214017, 0.8820985713944914, 0.8160111274474944, 0.819678217972656, 0.8872062905215561, 0.8265489570464071, 0.6779965035387925, 0.8440063429057509, 0.9975546130356586, 0.5668785719635784, 0.9265490614084575, 0.8419367103428017, 0.8637886910397377, 0.9184172866190286, 0.9815432505049343, 0.8438321024646801, 0.669853124973354, 0.7417681151822157, 0.5632691780994121, 0.6896765070698891, 0.8970317351185302, 0.7420858402368051, 0.638202316698968, 0.7705889842956324, 0.5680022543162297, 0.8399161551068282, 0.9787462581763378, 0.6875450108822153, 0.567506891807962, 0.8620372666604812, 0.7879695729288905, 0.769561917742366, 0.843314642119354, 0.8176300892246411, 0.7859592107457547, 0.5035631970757929, 0.9121607126562545, 0.5340475013388022, 0.8841610700099609, 0.7602502713182524, 0.714272229108668, 0.8699838986225781, 0.8815900952845603, 0.6371582397348443, 0.5534103758506121, 0.7513429111877867, 0.9352700262469047, 0.8620842711357508, 0.9815154578166976, 0.5274679054278502, 0.5946610979795909, 0.6270125094061709, 0.8782806826743412, 0.5393092425386068, 0.5173393715584027, 0.7450310857196292, 0.6826043078190993, 0.6748176828943, 0.9496885165717635, 0.664114895250508, 0.6972584331164307, 0.9826245684606423, 0.922332134222148, 0.5417354575865632, 0.6961694456526608, 0.6508233599597419, 0.6205156597231859, 0.7110717776750299, 0.9138872374318562, 0.7556920395806475, 0.786216133499837, 0.565557123828536, 0.5794766908273565, 0.8106257989061291, 0.5600759126077421, 0.7625805743128926, 0.8481457811176831, 0.6269387858900277, 0.5098536632204409, 0.6169067208149214, 0.7509733571011472, 0.686091549941874, 0.6931455098917298, 0.5697581358510742, 0.6526051239044441, 0.9195095330961984, 0.5090582196796174, 0.9382041750310233, 0.7979358431256751, 0.5575939464509814, 0.6330711423570188, 0.6584689622250143, 0.9489443996198307, 0.6263675479891027, 0.9498006081962078, 0.8575531951160686, 0.8198910793297804, 0.5025558223278572, 0.5291506327553475, 0.9947534682103085, 0.8484907226629219, 0.6126531023353766, 0.823853533700668, 0.8402947211949117, 0.513079853590318, 0.5363590904541584, 0.5111670989567352, 0.8142517191122458, 0.5667543004652906, 0.7443649123655183, 0.6715671077336407, 0.6260777265541623, 0.6828216486124413, 0.8257723500524998, 0.9717320586494979, 0.6327164552670443, 0.5735497384388073, 0.8231924047653978, 0.602916626349524, 0.6335020649355599, 0.6549946465772312, 0.7480204089146247, 0.5262339610512139, 0.9988030139546034, 0.8960357560943086, 0.8450919208829379, 0.8959266129425225, 0.6594870745808087, 0.7685269530076804, 0.9178440912584496, 0.7313984671601129, 0.8005699358636621, 0.8967830934015127, 0.6386319366399367, 0.6259705264796198, 0.8813646510149092, 0.7081774090730928, 0.6906585574361498, 0.6312568563310496, 0.7469581112850916, 0.7438135329675212, 0.5541338810319202, 0.6581876825300045, 0.7234351910256018, 0.7630560055139658, 0.5723050206664599, 0.9832369364180524, 0.8376439223918346, 0.7979162183704729, 0.7862568869492068, 0.8495850368911004, 0.8877373546377314, 0.8065316024370293, 0.8977379879143036, 0.8261112868880016, 0.6815665038031119, 0.9351767052537221, 0.5495289547645605, 0.6635178132661115, 0.5511662134113707, 0.5525804175658349, 0.6413126584730846, 0.7262217158257849, 0.6132329769557705, 0.5920096550854164, 0.7254263774542871, 0.6305425805772074, 0.8765607355567007, 0.585819909395307, 0.9523909797736125, 0.7779090351269023, 0.7470514345074004, 0.949012727098375, 0.8096244338824778, 0.6544989488073659, 0.7158427912451508, 0.9623535652085724, 0.5464536346670268, 0.8820963588583696, 0.5176609351985862, 0.8313330972797395, 0.7216029798628936, 0.6292373361719178, 0.6027592061566918, 0.7776547296063132, 0.9846474222791428, 0.9682656681393065, 0.5944753749815213, 0.6171104735105781, 0.7888315280990634, 0.8792654649620906, 0.7872853128770132, 0.9736270985754585, 0.7326105205586322, 0.6105879550811275, 0.7144495055201419, 0.8410947632263073, 0.7495612217168315, 0.8378726962031999, 0.7249338957791372, 0.9627164136567637, 0.8242260521255416, 0.7500872960304581, 0.562009139947627, 0.9955704643632719, 0.6097437762552133, 0.8393141569084934, 0.9013018201597598, 0.7566662930162193, 0.6275657508028659, 0.7771385537876218, 0.8677993606642519, 0.9429020872980662, 0.8632319525780825, 0.6354084558332898, 0.8346963680385617, 0.5188463734404185, 0.605935228242096, 0.723986812344729, 0.6484396012972797, 0.8719890383773565, 0.8816048992549905, 0.6812392364615307, 0.6468122206578286, 0.9292161445968067, 0.7382602322437704, 0.9070388519104522, 0.6238244234574846, 0.5944605462795571, 0.7871026542042949, 0.6137815412243306, 0.9208939702504568, 0.5232314827588108, 0.9773648889229092, 0.8753705000229164, 0.6768888249819291, 0.5552616946016324, 0.8214878538148168, 0.8922189475279962, 0.6473715252461343, 0.5010575874477694, 0.7261401193004414, 0.5053661266414875, 0.7861582996992149, 0.9659023261298825, 0.7089483593037691, 0.6369879781065595, 0.7692089980893784, 0.6738672782756603, 0.9003253837730443, 0.87258299503376, 0.582494568750497, 0.6333291938053854, 0.9144309061256621, 0.9164708197209165, 0.7027022324300142, 0.5189030111963369, 0.9917322320690363, 0.5535493475292388, 0.861894059707146, 0.9366803682756882, 0.8176048776442745, 0.9594920861033736, 0.9506468215398346, 0.7302917544668059, 0.8607023921679804, 0.5139796448775245, 0.7343014850082304, 0.8261509126922342, 0.6212128612462586, 0.9430099679034633, 0.9008493156180504, 0.5724682910307877, 0.9289036272476647, 0.6304286231161866, 0.8419034191775419, 0.8624472298408585, 0.5397786086769298, 0.6006270029869929, 0.5275058894126794, 0.5530850429369006, 0.9634072494082186, 0.5784777539328666, 0.5579117802839166, 0.6753850168526687, 0.5653148529448984, 0.6159301930331291, 0.6472745470939852, 0.514891641526345, 0.5712311207722495, 0.5112378602286369, 0.8181679695649431, 0.679221268278043, 0.9720637831583179, 0.7048454834170113, 0.5327108046400962, 0.7325153477754263, 0.9361211553869453, 0.9227770631242613, 0.6185960784325476, 0.7348284484309849, 0.6587672574530243, 0.9156984352136961, 0.852035942712472, 0.8257848736675468, 0.7602513511188869, 0.9674326846956262, 0.7353887596043504, 0.5506697597158241, 0.5574847646239598, 0.8844954621783534, 0.9239288653462556, 0.8052570597567704, 0.5499954907744088, 0.877216836721963, 0.731433923003443, 0.841931996051793, 0.9900703721270798, 0.6574222696618801, 0.9237169242670993, 0.5293443133522993, 0.7691805931682868, 0.7491898836151948, 0.6735660908546869, 0.8208924875043004, 0.8700123160238031, 0.6764799995464666, 0.5225499094042994, 0.6837701709115769, 0.7414528241301479, 0.5361968913711108, 0.6504287866121936, 0.914005126127613, 0.8792235958927161, 0.6168117611931403, 0.9150733673999156, 0.8011623107441164, 0.561995314778267, 0.8606221456992051, 0.5716373951806171, 0.6719254887078532, 0.7229776387412603, 0.9153869555939124, 0.6968410300950708, 0.8109037853038864, 0.649567078255038, 0.6088979736018822, 0.820583796594342, 0.5110137902800198, 0.7534526290882841, 0.7653651320928447, 0.90597731921986, 0.5396744779723806, 0.7369258452971881, 0.6722457948182561, 0.990493705084418, 0.7205922573954, 0.748785935010519, 0.6855451189783339, 0.8882047645522989, 0.7869539477802572, 0.615865432154809, 0.8525067609184833, 0.9649262658904434, 0.51907464204399, 0.8748190915562188, 0.98882026685235, 0.7821573165308313, 0.7993892969483727, 0.6063388683222191, 0.7495519561803916, 0.9033682301360413, 0.7031775335645012, 0.6065204384944094, 0.700243868069037, 0.9013264018893167, 0.5038986014060347, 0.8137321326807733, 0.5487164077368853, 0.7943134175790953, 0.8856767998509485, 0.5447086694815917, 0.8382790206813407, 0.7413327297333465, 0.7592638810180795, 0.5118643689068822, 0.7304673760063718, 0.6685206474967422, 0.8473337552765117, 0.7485068735031175, 0.7973506000117021, 0.5966571498832132, 0.8581670649525934, 0.9499847076427659, 0.930030282199322, 0.5772196628031078, 0.8635896201020599, 0.6911919862908293, 0.6870011877958819, 0.7655678511422448, 0.9472689912428409, 0.5506145787746555, 0.5505525463821661, 0.936211427811467, 0.8686457728023731, 0.8772228748086839, 0.5449257831974454, 0.6717825495774536, 0.8976007161273589, 0.9325636425138342, 0.5173358155642809, 0.5109918616502291, 0.9285060735330433, 0.5763756045433561, 0.6078670025665242, 0.5919207943727225, 0.8146248676312148, 0.8200526030645239, 0.6988680267400142, 0.7916079946143312, 0.8519493175757175, 0.8619097562684734, 0.7204914408953362, 0.7274147356347185, 0.914248454088844, 0.7751339119457573, 0.8474671309277927, 0.6683409236504583, 0.5117580163915878, 0.9834642947491044, 0.751049771681691, 0.7960506950239372, 0.7101115212885796, 0.5375484292850223, 0.6849187320471852, 0.740233920462613, 0.8670219293877097, 0.6759315481123946, 0.7711232409040614, 0.7244341124055647, 0.9482176314100208, 0.9394795310819621, 0.8251169485808874, 0.9308379843132351, 0.7768794750272867, 0.9515412757111991, 0.6160083266830307, 0.7042729034693638, 0.957925378437712, 0.6084567422304702, 0.5632258292668968, 0.8210782430742456, 0.9452288202511758, 0.5066685927984618, 0.5377127409850126, 0.8560579750066777, 0.9545323581936089, 0.5412431581448278, 0.8234544492953193, 0.9455570335995138, 0.6048468191993973, 0.5346696802271569, 0.9566801192073742, 0.520368550867651, 0.9448701482281618, 0.7338294742856515, 0.7399688105693817, 0.5915877879841107, 0.5618064413478951, 0.8842686724925468, 0.613288954301067, 0.5957944077425856, 0.5250803093415194, 0.6360596526971407, 0.8230232957700672, 0.7685709398845139, 0.7003879180203121, 0.7925883235379777, 0.5765473534064796, 0.8535881464958508, 0.6406616970551404, 0.7960705969926134, 0.667545936963887, 0.5898337728010129, 0.9718350078103184, 0.7658225768229521, 0.6969002609951208, 0.5698220015758422, 0.5948350348571644, 0.8271988629100391, 0.8953020299765728, 0.6827695599653352, 0.9943391972185065, 0.6168079413233408, 0.5613324878571494, 0.9140251425097408, 0.6287909686768656, 0.7247465485884879, 0.5543840359254533, 0.749745999165528, 0.8936963764188102, 0.9919152524806039, 0.816289065522855, 0.5875574072590574, 0.5542742982773547, 0.7662787677490386, 0.5213380505859606, 0.5507017673792358, 0.6335106661685728, 0.8998373498791792, 0.6431423373503686, 0.657559630807917, 0.5651906791381527, 0.9093765377013225, 0.7864642699885906, 0.6884857414685577, 0.8416811100508435, 0.7794005947100422, 0.9032300929124337, 0.7212432745595792, 0.8299390470464829, 0.6323931314184619, 0.9831566294336953, 0.9006323070876951, 0.888244752958592, 0.7460944128380982, 0.8657144790905793, 0.8428007824871999, 0.731436740530289, 0.5693633281289037, 0.7508967673482881, 0.7498081313007685, 0.5135959012557095, 0.8972420905548613, 0.7201699010540452, 0.6861231925770315, 0.7423109886605521, 0.5678044591350295, 0.7370444140239594, 0.6193735393085958, 0.5006499174363966, 0.8259108597858293, 0.540103780458975, 0.6988686125756502, 0.8471080173452654, 0.7825488569006074, 0.7619649745503321, 0.5989183087498622, 0.8873331837309928, 0.814400355738212, 0.7531135524575839, 0.9331004326454664, 0.8420699402344713, 0.6848397756152054, 0.7083866213831937, 0.751064401521079, 0.5511020150726594, 0.5904789182873579, 0.7658322519375201, 0.9637328463907303, 0.7613424582130816, 0.7982584935135164, 0.5090907115094327, 0.5660528935093676, 0.824326114744694, 0.6411971660452318, 0.5791521605644928, 0.9296349116898779, 0.7222746128897204, 0.5788079780450297, 0.9571104063660261, 0.9717627337116496, 0.9903664346713927, 0.7532352284684806, 0.8622293516381233, 0.7250124808315008, 0.8188818114887281, 0.9573962933489404, 0.5879975461014971, 0.6757976515319732, 0.5745667811937021, 0.8633782497020501, 0.9504024074885765, 0.625282968234216, 0.7243157078936511, 0.9697600235407751, 0.7190582422140136, 0.6377792196641994, 0.5234528108116235, 0.9465572424827515, 0.550129845665297, 0.5151702598405513, 0.7778395857863358, 0.7985329790044373, 0.5248915752066232, 0.737616241754933, 0.5035213767560469, 0.8059295083073574, 0.5600327426337088, 0.7620534788245364, 0.6294609899268465, 0.8118217258867659, 0.8250749475881529, 0.9096735745716311, 0.6600745272098678, 0.5299262007856276, 0.8758570585534342, 0.6102001100013886, 0.9180795215807709, 0.8996173152157186, 0.6260290955934245, 0.9049766842053386, 0.5989906444681823, 0.6019065971559077, 0.9902282141527949, 0.9115089950709434, 0.6573494963080175, 0.6321505051434555, 0.8558865168341661, 0.582784871026137, 0.8380531992965288, 0.6704576638826097, 0.8997598350935687, 0.657908779973781, 0.675952108029501, 0.8318514895217466, 0.8286085891020736, 0.6672973362775286, 0.5500486109317955, 0.8783639703828469, 0.5818845861218802, 0.5356185119122717, 0.765989289113606, 0.627750003177433, 0.5551023902480505, 0.7650348740063769, 0.8058910534595795, 0.891784112602062, 0.906697884196118, 0.6681702710943966, 0.6585402133110658, 0.8511471290589099, 0.5510438452260253, 0.9239345549913942, 0.9005686592344647, 0.9713171113552186, 0.5582671921681899, 0.6973123012463238, 0.5191996826025855, 0.6080823014966176, 0.8175252410078931, 0.7059968902817324, 0.5968952313413398, 0.7665562934594614, 0.7747339795761414, 0.613550570741267, 0.898501671199977, 0.6077758458395346, 0.9941822516170065, 0.6559035592549407, 0.987615878695826, 0.5276190249165856, 0.8897570260704477, 0.8530280798881091, 0.9356378388010999, 0.9259132837020175, 0.8022082004762856, 0.8076298512566639, 0.8663344796382106, 0.6719871215590179, 0.9396497774534588, 0.6769964142781693, 0.6820050279841984, 0.7033436147326058, 0.6164252836658448, 0.9921244036918004, 0.963104757453525, 0.6317850870019371, 0.6395263971902858, 0.5803888997745585, 0.5206734135207015, 0.5884262306478718, 0.941740956573067, 0.9113101481781289, 0.8385563145593948, 0.7659296096193375, 0.7380366848687308, 0.8585284721588077, 0.6759857526933563, 0.5806504627490463, 0.9690394014306821, 0.5367254992130097, 0.525410665072933, 0.6466583200279806, 0.8717538860607642, 0.6088989480823765, 0.8224472662810164, 0.7124146320123471, 0.7050438290305696, 0.6201993370066445, 0.9881896188530422, 0.8712970769853917, 0.7649412560987339, 0.9947333293603701, 0.8300646592692115, 0.9301604472656366, 0.907860576722283, 0.7030511170536878, 0.705647018379691, 0.8722756997468808, 0.6790271263271409, 0.8657444322536907, 0.823356949892082, 0.8051842625578691, 0.8055056957937127, 0.799921195660403, 0.6535140808500328, 0.5716511122953435, 0.9704208445082035, 0.6162670202431607, 0.8576432525570918, 0.9336314765017143, 0.7300903624758298, 0.7942489241675008, 0.7813294235023105, 0.9203710285510914, 0.7226521351592334, 0.7762493100043286, 0.6757453566292384, 0.9031855187068207, 0.8539188508414891, 0.6740469900048862, 0.6190052129106343, 0.5712236825852508, 0.5859096428330488, 0.6686451031934264, 0.7085048900688125, 0.8648958248943523, 0.7987444153433043, 0.9098587804468852, 0.5720477116639935, 0.9608115661421195, 0.9182441438533953, 0.6166074188697499, 0.8838129415010282, 0.8698466772931654, 0.953034488275111, 0.6812225854250429, 0.5545305532391946, 0.5558268852938302, 0.7874219998161728, 0.9487465843293579, 0.8606361765031318, 0.8836918815961555, 0.8756167809846236, 0.6952053976413081, 0.985315958618731, 0.645056983324513, 0.520588783640038, 0.9793086607880441, 0.6060657649914263, 0.7584605490749321, 0.9448674850411432, 0.9292241704947568, 0.9866268365086971, 0.5717773447633844, 0.5875167880568788, 0.6288177437778986, 0.9850147064600626, 0.6846646625403404, 0.5910140389198368, 0.7867727493582136, 0.6338948645563487, 0.7567761848827812, 0.5871550928949574, 0.6115941411445491, 0.789526386309104, 0.5632814431450048, 0.601144457537686, 0.642373098644154, 0.8728971149645948, 0.502460610624194, 0.6737244323093441, 0.7449332753584716, 0.7219871547306973, 0.8994475856506726, 0.9550442277846831, 0.5751775245747792, 0.5613306381275425, 0.6931615614247966, 0.8120917358508175, 0.7706272421285101, 0.6023295569215569, 0.8694478299825259, 0.9655871496882871, 0.7612053388089376, 0.7620097284927044, 0.5773911862947334, 0.6639569597980737, 0.6295607522539917, 0.813522016869495, 0.6640709933649265, 0.5252437355528481, 0.9262936067407029, 0.7670165052232678, 0.5023597839319631, 0.9541007537958119, 0.5711231200102524, 0.881883755652858, 0.6134217474037961, 0.6775862795531706, 0.534734195139827, 0.714557410370556, 0.7380215532686084, 0.7428949813900866, 0.72465025549974, 0.532164715448096, 0.5935038094579259, 0.9363810444628665, 0.7954662714679772, 0.5355866116646468, 0.8820043338845257, 0.8587325909510577, 0.580902721335165, 0.7755556224099927, 0.5277981140498147, 0.6554040809889321, 0.5323573212141062, 0.5189437600436185, 0.683079866195905, 0.5846062521143292, 0.7653048103393465, 0.9691090476955062, 0.9496939798595256, 0.8755425600331175, 0.5547957058166093, 0.8526766938607282, 0.8375328712197896, 0.8401365234028744, 0.5302386441630519, 0.7353653670453577, 0.6866361408400885, 0.5567200554428446, 0.8162585961735183, 0.5602748207627459, 0.5200404021176728, 0.9188601882390706, 0.7943084912819973, 0.5740976937983508, 0.5715038067718914, 0.9460635395064275, 0.8088422676953992, 0.7485986604423719, 0.6758363260197797, 0.9742038778979673, 0.855058482660801, 0.8803786393360761, 0.8301285583223847, 0.6268584947511391, 0.9459684613428287, 0.7702963900626292, 0.8140790693065083, 0.5830563180802633, 0.8429290179455283, 0.6881458950780083, 0.6233807512949392, 0.9411036776342137, 0.9772449657385469, 0.9331135193973528, 0.6051894034156275, 0.8857005932606112, 0.6205685333134852, 0.9349095594396509, 0.6870994424215626, 0.7702963200211639, 0.6330905581106776, 0.7335703507703141, 0.9806125047611549, 0.809935771392029, 0.9680318445356297, 0.9907955010576935, 0.7018576774311336, 0.8737619819731841, 0.7430281544282362, 0.5346162891122459, 0.5313448151861109, 0.9498325975184183, 0.9197729717831442, 0.9419502807872836, 0.5224001067261326, 0.6265886757355831, 0.9427293942169503, 0.9026766144827201, 0.5864619673227689, 0.8926426749964536, 0.7198942903832104, 0.6411891935610923, 0.8344929170139922, 0.588405286635808, 0.8707474022862429, 0.7285309486122931, 0.9617662585193423, 0.889979542242541, 0.6491607370917211, 0.8132998863080803, 0.5785662326739717, 0.5988388979940804, 0.9193026527612438, 0.9804952641261735, 0.829352118388035, 0.5355540262635212, 0.941242155705027, 0.878959651644897, 0.8041875528696139, 0.5962700465902669, 0.7222650193598619, 0.7973453839984679, 0.7497821231081976, 0.7467536348852084, 0.523875900967779, 0.6050936127763273, 0.8619714795941609, 0.7928492616410567, 0.5184431153082757, 0.8461802185585174, 0.5650504109985135, 0.8199115062242581, 0.6649595562431209, 0.8878006054055887, 0.8327746815521813, 0.819120642793767, 0.8147525176427104, 0.6260215616247357, 0.670799528440656, 0.6477954081586846, 0.6706465863293012, 0.8976475569839268, 0.7228434424748698, 0.5040094477593858, 0.7635646127721865, 0.6545380677150301, 0.6177041268358816, 0.7008703449576072, 0.9807913727760703, 0.615117474430808, 0.872712830734838, 0.6164566770718011, 0.6518306439591928, 0.5536003978548509, 0.9057425444466782, 0.651327498564467, 0.8750545348961674, 0.5485690194510258, 0.7046673340076635, 0.71843081886911, 0.5999630510665281, 0.6756407397864563, 0.6887861333168779, 0.694913787159859, 0.6888864786445565, 0.5087171350062574, 0.6849145265656034, 0.7728288340701824, 0.7870175413027949, 0.8417980786625336, 0.7055402244015437, 0.7863869559775242, 0.6490659033993795, 0.7388765089193521, 0.8682408552364653, 0.8263757844753712, 0.758871706900969, 0.7602705935933283, 0.6472411803433875, 0.7259650168063405, 0.5389532105367616, 0.8409169409196784, 0.8464650843324759, 0.9350882087776262, 0.8605232408211891, 0.7939712398364309, 0.8418020487372474, 0.8876181435360168, 0.9707864811510643, 0.7515688042119097, 0.5996741509371599, 0.8413762402160891, 0.6953202514334009, 0.5790983737766018, 0.5237383223225864, 0.9976036599191442, 0.6974060324537301, 0.750890705558378, 0.7121031540502588, 0.9332030828502587, 0.9388803390145135, 0.8252357354665766, 0.5956661024468823, 0.6352404631787756, 0.5657937301729925, 0.9322737775009885, 0.5103707410643037, 0.5222172405715066, 0.7543507866898926, 0.5532166253651667, 0.6341690297962665, 0.9000279863314429, 0.8597248566966185, 0.8742078543006462, 0.7572359263914215, 0.7833427208335813, 0.8141618537390469, 0.5645738606790078, 0.7252005023610126, 0.8824147351787281, 0.8986464328417412, 0.9400837422386391, 0.9665573717123819, 0.9003557953852034, 0.8190898206767318, 0.5015205447179772, 0.5348005376116955, 0.7143814770594434, 0.9245885507420415, 0.7669613706189886, 0.7919605643836028, 0.6036380126007359, 0.8336020336584449, 0.9795507623982922, 0.8352137264496835, 0.5512713680653604, 0.5521341336007183, 0.5242499542182981, 0.9316364341604466, 0.6412947407753605, 0.5432375238664342, 0.5489393820851522, 0.9753924634925674, 0.5724560469532951, 0.5736908081528617, 0.7376027923984716, 0.7882604474636408, 0.9197181006419016, 0.6483751687889916, 0.5439128712687834, 0.9662975739208575, 0.8689190978497114, 0.7059015331205292, 0.9033684570927076, 0.6119243493915545, 0.599088565082854, 0.8996019604917032, 0.5818037601224803, 0.6542580759644486, 0.9452984953677468, 0.5949326166097579, 0.6728003070848034, 0.6015790506179525, 0.9538325669461212, 0.5334336782584617, 0.5011813477014594, 0.6909660367168466, 0.5604647681884702, 0.549473336737705, 0.5847587247658192, 0.5174832502156003, 0.5998657790716546, 0.9696818039299651, 0.9562950045906856, 0.9786327390512979, 0.6103669384467313, 0.6903592935279297, 0.8828671923233538, 0.9869734336452283, 0.618901331060258, 0.5396665887779374, 0.6083749195509901, 0.5487636325907506, 0.5354532821554758, 0.825246479260421, 0.6659189667420435, 0.9641847891092528, 0.8925926863996794, 0.5653289865145408, 0.9290371903423068, 0.9783708332670252, 0.8280476938241061, 0.7329226455775693, 0.695200490429299, 0.8378249586484021, 0.7175864941424048, 0.7299079414878753, 0.5178369997856186, 0.5307383235381322, 0.8664212242643519, 0.9757082406368283, 0.8476019742736709, 0.8750368552155969, 0.6840740315040172, 0.9271388469543946, 0.5483408704977355, 0.5306987073746443, 0.9866461671216615, 0.9401627307352052, 0.6851293660516289, 0.989374291904175, 0.9173407903444842, 0.9131396687204718, 0.5928497694983489, 0.8921253247785039, 0.6404921406509265, 0.6590034407245933, 0.6368873996556527, 0.6154957412619169, 0.5626138069783115, 0.5559227522488596, 0.780325893402523, 0.7185348870457486, 0.748429057843059, 0.7318489123429515, 0.512974173239293, 0.5375232751260086, 0.5444508050851451, 0.5135545088738855, 0.6663138433250435, 0.7137778541813065, 0.5176790833839024, 0.6993095963511424, 0.5559854745501589, 0.927393025795457, 0.6574929060417009, 0.66050952250461, 0.9018020336401099, 0.5628730954958898, 0.5625100861292662, 0.6923612441188374, 0.96993766665067, 0.6491808132740051, 0.658345050718145, 0.5697324787857312, 0.6554197659149847, 0.681595387799711, 0.5923065737465381, 0.8358004860922268, 0.5620553345269275, 0.7262017805883108, 0.5105689284392927, 0.9460960841543795, 0.6256476585762547, 0.550005091863761, 0.8823108462105582, 0.6660697910219056, 0.5971310825452371, 0.7163047628547002, 0.6299944572446625, 0.9202825484050157, 0.5283342821472989, 0.8470460048110457, 0.9827102484558055, 0.8417970932978674, 0.6169094057126969, 0.8701026674137329, 0.5929527960066063, 0.6324026699514409, 0.593413894023409, 0.6255380537540426, 0.5027640299688362, 0.9723198524079664, 0.7764055455878347, 0.9306896495620276, 0.7493069476193279, 0.562484949706866, 0.7996250069772091, 0.6189210420271674, 0.6418247095045443, 0.9600603911350425, 0.981982703109771, 0.7705830654066324, 0.5332880817866366, 0.5694493862503373, 0.8605846730300757, 0.6594330046218453, 0.532151151953562, 0.8202478687477963, 0.5277557846501773, 0.5290185243333995, 0.9542667014222752, 0.8245237592114034, 0.6129066039564643, 0.6937407554159649, 0.5043392251368963, 0.5602130503767873, 0.8357452980500577, 0.8716287530008084, 0.9498828962664604, 0.8659184760275654, 0.9823140291765984, 0.7008021742620822, 0.8834722874562448, 0.9824001830612819, 0.6791840515819725, 0.6483856950777767, 0.649968557250207, 0.6001849757083981, 0.7223302289696527, 0.8024251841426631, 0.8634342311600989, 0.5169009331377228, 0.9662773993893077, 0.7765493528649623, 0.7254095259321445, 0.727129301782401, 0.6727081800273026, 0.7666467340918106, 0.8540759581629306, 0.687559070702519, 0.8601807748699746, 0.619445354491728, 0.7593909383075799, 0.9703445872432859, 0.8998495397565269, 0.8396752873302114, 0.7161121031292113, 0.749498855719513, 0.704654070555183, 0.9153502275815859, 0.6821065802138799, 0.8411995685342668, 0.9955173637121844, 0.6451170613880599, 0.5740634450108866, 0.66442700382197, 0.9454696988476781, 0.8354436383484761, 0.9678099486521383, 0.9002160200993903, 0.5577686342724, 0.7684614918484495, 0.6181945317796389, 0.8336289296354471, 0.9096686003034127, 0.8084634352261246, 0.9954437016808999, 0.9109337807708693, 0.542692059468711, 0.7583156349076676, 0.8674652471456393, 0.8514226155580098, 0.506900177038757, 0.5442004810826535, 0.8514775330833759, 0.626462119388369, 0.511862131336785, 0.8227734604824068, 0.620895187804642, 0.8210969266747374, 0.5721707063734223, 0.6787945436338478, 0.7236644768741474, 0.5397434424799972, 0.7619962291166397, 0.8474891231540558, 0.7499879450220742, 0.8711158050387391, 0.6261916122417712, 0.8721147723322011, 0.5879894138688491, 0.5089613325914653, 0.9245416091368196, 0.5180033582070525, 0.6373984585536747, 0.9341692760181095, 0.6868403459597207, 0.5824600376158422, 0.9087907847415326, 0.9678963281142069, 0.8662511466593132, 0.8281853875270615, 0.8905730723794523, 0.803515186377235, 0.6051932113720374, 0.8644762975673759, 0.6150414645233476, 0.9258064237093282, 0.9303138869690724, 0.8577384416141227, 0.505896797661946, 0.7596439741954497, 0.6929367954775999, 0.7172749409695984, 0.9414812728553392, 0.5902193625707517, 0.9700380497689913, 0.5217811846889718, 0.6184980009234367, 0.5277381630570539, 0.7934396376606434, 0.5977099346707444, 0.9599214508677109, 0.5770555969104993, 0.5449676219878758, 0.7710347723278393, 0.9745099887470595, 0.5443195456485818, 0.8909160672013308, 0.9280242818814788, 0.9566738198102659, 0.7042339636776744, 0.9162469396363988, 0.573071726861343, 0.766818149672752, 0.541685646829758, 0.6081215708554822, 0.7026737492599313, 0.6934210071349887, 0.8008161330706527, 0.5378678596843129, 0.7364504232549932, 0.7679519475924441, 0.6663558160903691, 0.6717726743281361, 0.8136787266738638, 0.9133869028869999, 0.54201302534837, 0.725256636850235, 0.7431041466744208, 0.7829171960686965, 0.9542961748100589, 0.7278832298114261, 0.5213547593524555, 0.5788151959928478, 0.530917350620761, 0.5877692598878214, 0.8068518217696246, 0.691330062521076, 0.5859168351043973, 0.8156253805076512, 0.6389657508795474, 0.7682465518475344, 0.6729189934642386, 0.9755968950502857, 0.9235884855964993, 0.7682823795244674, 0.556547830351385, 0.9181431679404277, 0.7466280819559477, 0.6619975322643356, 0.8292933002106285, 0.555678388120137, 0.7043782549347142, 0.6325618997993612, 0.8848029902019758, 0.5627466983669587, 0.6752874010855486, 0.6141496050872751, 0.6849985589774341, 0.9531847355751917, 0.512691382092538, 0.6519844194003923, 0.6492910296331261, 0.9615797792049522, 0.714045156061828, 0.6167180847099561, 0.7699240701651358, 0.817659615063669, 0.903922893021033, 0.9461095826527588, 0.7279027324881104, 0.5366210621055633, 0.7648939442789948, 0.6174449703406262, 0.8398918706694232, 0.5763262027301563, 0.7529782111862842, 0.7985926257767835, 0.8041388965837635, 0.8802351364708525, 0.7107706426547469, 0.6555384837883365, 0.7152228413136109, 0.9904149509513913, 0.6141781788307905, 0.5639344563331482, 0.5868347012515496, 0.9816677037151273, 0.9208199999173612, 0.7453818028143753, 0.9709222741340808, 0.5564895851010512, 0.8926440452194039, 0.6710175682265584, 0.8000770290474418, 0.5149079966050134, 0.8027983702502337, 0.6150870741556888, 0.9747516446356383, 0.959988098007055, 0.8211530903052813, 0.6977037599904928, 0.8945371466640016, 0.7818075049871255, 0.9516455502899213, 0.5527795807967668, 0.9675748431286151, 0.981099188928729, 0.9089721794407005, 0.8673040452370431, 0.679987961284993, 0.7103761028944429, 0.9109712947185835, 0.5566394610762241, 0.6360999921602584, 0.9423503200460436, 0.952649627467711, 0.6825217973256271, 0.6728257061309948, 0.5058796924725897, 0.6699698915046006, 0.6861886614706515, 0.834822071394409, 0.6298885608511081, 0.5448163434900164, 0.5993762359350454, 0.5921937982530016, 0.7298164889729954, 0.9648801239824929, 0.8689441609705755, 0.814080300564279, 0.5752254158935599, 0.6759611216135382, 0.8020691537230942, 0.5949350320325141, 0.6889367796147561, 0.6212501698876923, 0.8763808159972218, 0.6775655336944391, 0.5675327750721475, 0.70584945190642, 0.5077288461352205, 0.9046226382947079, 0.6796226440544724, 0.9187291222452718, 0.5069421954499779, 0.5038947562920253, 0.6509185083764708, 0.5845088264350369, 0.5642642822361438, 0.7879178009916094, 0.6295349394913368, 0.5858034416035156, 0.7077125166366502, 0.6809177589101957, 0.8904987779449438, 0.7764474434444204, 0.8991373931060274, 0.5751199370936136, 0.6633893965961093, 0.6604205725085899, 0.95960530662307, 0.7824783557599465, 0.963669243119569, 0.8162240189000338, 0.9583482476933879, 0.5782179261589959, 0.8142450962465292, 0.9293125871760721, 0.7139715549471628, 0.6215605361766585, 0.5898700950976031, 0.6701031399498145, 0.7899024897709117, 0.7315438332047909, 0.7343198264777968, 0.5231632374167393, 0.7303232052293694, 0.5104761910616764, 0.8990616528671433, 0.8577605680658276, 0.569186862395054, 0.6320641976403885, 0.5031605295812585, 0.9716738806165182, 0.5438828039267748, 0.7602515876419256, 0.7930214636164927, 0.7546032182022457, 0.5212174262693634, 0.5262113294922479, 0.5683028426093731, 0.6066564782053392, 0.8777248965684252, 0.7523213816664709, 0.8434753949553973, 0.8306365940957856, 0.6889848532125536, 0.7145859659199729, 0.9957434164171688, 0.6578851383298614, 0.5014579075283225, 0.5757560208123773, 0.5648806937551252, 0.8705767286364579, 0.5646880482989844, 0.8349283326252841, 0.9722161037327262, 0.5015215938829879, 0.5097935139540002, 0.687274679874318, 0.5229079434802062, 0.5716276613984506, 0.7958740434206778, 0.7142155703412395, 0.8654395578440137, 0.994806852979961, 0.6556560726356176, 0.7741592060935174, 0.8514937745526325, 0.6709663716428071, 0.9664096758087319, 0.9682211428010034, 0.6437021696047956, 0.6875275068774256, 0.6977220088542435, 0.5027015179217347, 0.8924298924125942, 0.5790708824921, 0.6724028751180934, 0.6285541637330476, 0.5762314290850374, 0.9655564346850165, 0.6334261555120037, 0.9057215445682762, 0.8755063357634865, 0.6007061373942025, 0.9779640745804545, 0.554050614406367, 0.6577253523925167, 0.8861921814472979, 0.9385651544934673, 0.6086355632556777, 0.9014332471498584, 0.665980035926153, 0.7904453879650565, 0.9527948836714559, 0.526926906852854, 0.667565172776565, 0.8627104052160981, 0.6146941966902582, 0.7649932055942912, 0.6930318529178939, 0.782497156235595, 0.9076293963850417, 0.7995976582337183, 0.968452171156633, 0.6103549996026874, 0.7355528493596413, 0.7619548065122121, 0.559307415375837, 0.6013440388508529, 0.6924274380164934, 0.7816190674607186, 0.5208622909344763, 0.520459980026413, 0.5453795825432717, 0.5397147554021606, 0.7316225680109656, 0.7331909515805597, 0.9086266273311245, 0.8033930570077801, 0.7453742176630862, 0.6495507803800962, 0.8525347870638668, 0.8544100909312595, 0.7732910945680993, 0.9264158323297158, 0.6996035966085891, 0.8521683992864342, 0.8860150637175559, 0.5904408788760029, 0.7205423573643264, 0.533856785068281, 0.9021876511450807, 0.7754852535111529, 0.9721003679595741, 0.7485574615970252, 0.6799011160386764, 0.8167901496287806, 0.7614457754359016, 0.5395273009582886, 0.874045199495892, 0.9472883068282947, 0.8025437487950279, 0.5248517866064586, 0.8184913917146308, 0.9792818761378814, 0.8483548015440541, 0.6035674002065153, 0.5170885077313301, 0.9285332980600611, 0.9085272966993554, 0.6990985882581635, 0.7380731495060222, 0.5591267987748653, 0.5996273667986312, 0.7912940091809645, 0.9035986833860188, 0.9671108253291714, 0.5432790408784876, 0.6952447296671933, 0.6120027266002355, 0.5527390879159451, 0.5186894059293159, 0.6746338584080429, 0.5192303859478286, 0.5932315490771689, 0.7335247083229268, 0.9204923780781618, 0.7128881381730712, 0.8624752478783524, 0.879806180226099, 0.8436313120789638, 0.7894862711098602, 0.5201468364419395, 0.7568205951428757, 0.6833085810306713, 0.9685234094639541, 0.8155709546639882, 0.6331726275249911, 0.8081123429827732, 0.8219346563458187, 0.936434664542732, 0.7256070300577464, 0.6791457202643266, 0.6986342771864538, 0.9535724523881355, 0.8168024191240619, 0.7638310089566872, 0.8598066369029675, 0.7884069231189875, 0.8235076509023885, 0.6579202417118759, 0.5902586193182868, 0.6223726463296129, 0.9660633609352243, 0.6503242896894703, 0.6896063810137991, 0.6128947536379585, 0.5545845640701546, 0.5402454869780122, 0.9399289137309998, 0.7032413336333619, 0.5782178160423928, 0.5494592334048825, 0.6187478390252601, 0.7690333750383032, 0.581275413361635, 0.9825184666118802, 0.7533126224421118, 0.6766074241999127, 0.5775908404529225, 0.7063197170882771, 0.8576974792297944, 0.7217075070513674, 0.6839978524638509, 0.5534742510713389, 0.9106424890567621, 0.9561121260708758, 0.767072596178155, 0.6504644994038037, 0.6393555780553144, 0.845216570170833, 0.8261667801389472, 0.7298403312624755, 0.6595466248793277, 0.9345055181870027, 0.6198170908925191, 0.9608011530913083, 0.5111720205935295, 0.9867705468685462, 0.9402763278682669, 0.803678700947656, 0.8505558353998938, 0.6344285489209867, 0.8152012233798952, 0.5530050253211909, 0.5566216821781544, 0.5833894669769608, 0.9092066374903908, 0.6234687279449965, 0.7794358877372354, 0.7496719190992079, 0.5335759024209206, 0.6469183616745596, 0.6026014851173884, 0.821206292899862, 0.7537845336694702, 0.6959033675892267, 0.6568874325962265, 0.9623266187588355, 0.5289971313791488, 0.5002840648385878, 0.7467831227213513, 0.9246990706596491, 0.8858228202765808, 0.9704309327223359, 0.9538398905356427, 0.5738361514258059, 0.9683458209462519, 0.6178672821347777, 0.5445167137304125, 0.9744419395579537, 0.6913415606575306, 0.594342548159051, 0.9589895734726248, 0.9839262646212866, 0.8428886290914162, 0.9304286167503343, 0.6991781820113081, 0.6980201634110673, 0.6258738824862575, 0.885212206876415, 0.940046489476273, 0.5529200991089074, 0.5477596450437086, 0.5935320498421326, 0.7037879742825275, 0.5207648259242388, 0.8116336788994523, 0.6898996367084265, 0.7781605116710751, 0.841699279313996, 0.8079109732922893, 0.7962394608337753, 0.5553710598246921, 0.8994493607961103, 0.7871429198107819, 0.6150068586585972, 0.8609957854978727, 0.9293097775845793, 0.8671215675364474, 0.5990244292613935, 0.6610823377263677, 0.6800202166606217, 0.5006572717379356, 0.7443808576097803, 0.6690738837941872, 0.8663505071780522, 0.87503025264999, 0.7596565704566891, 0.875665458552762, 0.7824293999264846, 0.6756166185772822, 0.8392564307395609, 0.7655789423496597, 0.8224577721501691, 0.9731946585739254, 0.7604547532799975, 0.6490870605897648, 0.5293050594982103, 0.7220688819588065, 0.7748027264429302, 0.6007555918108791, 0.6965426479848937, 0.7811991121443956, 0.8245961520188537, 0.8270720284736646, 0.9361947511739146, 0.55765461700437, 0.9766197381107335, 0.6440753443365373, 0.8107708864998401, 0.8033093387135131, 0.872512048922736, 0.9854769888076755, 0.7625600195261744, 0.7633816656253142, 0.9322886638662085, 0.5807854944890684, 0.9449772923570263, 0.7924090685705829, 0.9826387779522046, 0.9736340868981148, 0.7526360455074783, 0.8051441315583848, 0.9511068594505951, 0.5501478868236753, 0.808482406491918, 0.9015175987613242, 0.8697219066348713, 0.5437962880863704, 0.5080952030617141, 0.5697279615203752, 0.6424173520722365, 0.6674788847228975, 0.9501274390392864, 0.80805283618588, 0.6136833514789499, 0.6018733753528003, 0.6801436779712622, 0.5942461980486634, 0.7788250017132181, 0.7952381905022247, 0.7787468198436001, 0.5094713989194088, 0.9232278297946617, 0.7993503809976664, 0.5336003814284302, 0.9169294385712541, 0.8180553108994795, 0.5088899282614795, 0.7910467834131915, 0.9589072692257254, 0.7087694073477259, 0.8081433078400163, 0.6504880791595209, 0.7258689160935168, 0.8051130540861801, 0.6149979777012178, 0.9061502553216942, 0.5183196826887508, 0.7980549990330649, 0.519207646749718, 0.894276076081657, 0.5194843691615258, 0.7160555742619694, 0.7878045038225634, 0.9384645639115323, 0.8453096395513864, 0.5702473094296707, 0.7717068635043582, 0.5411092057460474, 0.6158098142927518, 0.5631170466068787, 0.8548134435824688, 0.9292854116760844, 0.7095165796612242, 0.8872260355716302, 0.6629474527832082, 0.7383218873748978, 0.6328277521672023, 0.5239687825434277, 0.9165349883938992, 0.6857320399543914, 0.5565731684027704, 0.5925449540067151, 0.7021159588302159, 0.7680063481591578, 0.5483711679414587, 0.7650558274361963, 0.7029909270811426, 0.5902953400583097, 0.8190611982074361, 0.7611685374820367, 0.8054120774459479, 0.8790047491569999, 0.8949154202434753, 0.806528685351799, 0.7247990822092615, 0.6208122289044287, 0.8555462647045395, 0.9164097949403246, 0.9731046857865806, 0.9057444746838841, 0.6848411381410828, 0.6442187270108444, 0.5715218408910602, 0.9493113564346929, 0.7950798120608012, 0.613199380979824, 0.9310572149679698, 0.8084061363046946, 0.9841856142185937, 0.5879460118096054, 0.6859928001761246, 0.752199432452834, 0.7774055124184058, 0.5024798092558632, 0.8469960937091385, 0.7470424165915601, 0.7485562077083412, 0.8919300145863469, 0.9110158011519865, 0.6213205992214139, 0.6825393212273578, 0.7827795227157295, 0.8097622346575791, 0.8656036488380721, 0.5813462517494754, 0.7446577775693579, 0.5171282516136704, 0.5464843769680995, 0.5031282087854462, 0.5067881131788623, 0.865038488365861, 0.6885930913070378, 0.6279069455187765, 0.6220211506108742, 0.8012307502704152, 0.5408649180645592, 0.6699135669523227, 0.5390696030740132, 0.7089536942071151, 0.9529190283328561, 0.5477981297310479, 0.5908081866202959, 0.8743580356946319, 0.5374688172583211, 0.9112195486927535, 0.8252126447451517, 0.5631115630800205, 0.8405237126821734, 0.5206708544478329, 0.9421298738748682, 0.5736744961463727, 0.645370212572673, 0.5631902418665614, 0.9886649620172487, 0.6452792934230961, 0.5772891711044903, 0.8128490016162399, 0.6984117187632636, 0.515246458522628, 0.8867756285142829, 0.8804562897779185, 0.7590101040007499, 0.5775247735904198, 0.5231790663223221, 0.7484160486204599, 0.5248445424010859, 0.7090744387630475, 0.6955102369826636, 0.6667039281018805, 0.5249033533407815, 0.5738433109558123, 0.8846960349284785, 0.7821887723890877, 0.838189798072696, 0.6101620225441838, 0.616234965129905, 0.5757043712702437, 0.8422048409986094, 0.8315342168351418, 0.5983201937846212, 0.5163736767389673, 0.9959891231456286, 0.7468185381293722, 0.6160580627277077, 0.8056276830451359, 0.6723682529613956, 0.9815202818134445, 0.863334445709177, 0.5871294154287162, 0.8292675580359092, 0.8781914077901485, 0.8105595217833212, 0.7627138435234047, 0.5133330638182625, 0.9150308457876093, 0.7235049087440282, 0.631577755944976, 0.6001413609871002, 0.8798759283144801, 0.5991607735432115, 0.6946675041944292, 0.9749284051191018, 0.5626949995911733, 0.8887687896627021, 0.9130505298999987, 0.920681592589019, 0.7819569916405928, 0.8132404244314897, 0.9225855411059843, 0.9862090975673115, 0.6070302463563115, 0.5241313899166071, 0.8971817328576748, 0.531387332603124, 0.8245630853625467, 0.9450216193210498, 0.6198890153662797, 0.7776489236998307, 0.9471462575991767, 0.9143097841478744, 0.582270919354441, 0.9838758682654509, 0.5657776951379965, 0.7303909445496246, 0.7514317217624777, 0.9963887210129464, 0.8178237379746466, 0.5417006097723811, 0.8205285493970387, 0.6603375463406403, 0.7478445219680957, 0.5135381633897821, 0.860708509568298, 0.7543298235575211, 0.9926702898942891, 0.627869930043902, 0.5369439820216722, 0.7376408749198897, 0.8617418097475916, 0.5195118074000534, 0.6509871722811174, 0.5169164141580462, 0.506854263289062, 0.875250254520769, 0.9916133154945594, 0.8152162044000315, 0.9921127176449176, 0.8005634751975907, 0.6528895569143718, 0.9445658034582574, 0.8775008084259559, 0.7266744026447018, 0.5609096969558951, 0.8293021154871291, 0.8670010526765987, 0.6808459933220715, 0.9608264582161803, 0.8279466982006547, 0.7554190730141334, 0.977897193767469, 0.997295671195453, 0.7722614665008297, 0.8956534217232477, 0.5345972967856558, 0.6028665559083268, 0.6043695974730873, 0.7993803148853398, 0.6330217228257666, 0.7183225154704749, 0.6319594022598625, 0.8845029505974085, 0.7240380662442457, 0.9166115475563683, 0.9317373066948063, 0.8986473115287902, 0.9220792794697379, 0.6555779901964581, 0.741804506036783, 0.6986240266107933, 0.544322769166643, 0.9635175579705871, 0.6558502246824522, 0.6121445410227712, 0.9598715280786585, 0.7946567855188624, 0.5272082113793213, 0.5986248863131058, 0.5672628227928724, 0.5615762605848086, 0.7286610971947292, 0.7004220172835361, 0.614284527495911, 0.5238091653173995, 0.7574754803304095, 0.6109710586211345, 0.9993467994522978, 0.5112995399070247, 0.777219807555878, 0.7439795950766576, 0.700881462951735, 0.6350522186937668, 0.8661622684631717, 0.6814724523989466, 0.8858532525067782, 0.5904112023149835, 0.7144097307226638, 0.6979163869817508, 0.5457711602239327, 0.7766771844262235, 0.5599398580936734, 0.5864546067743786, 0.9439957403540136, 0.6954677243130956, 0.797803711278561, 0.8026408300949259, 0.7216975647431131, 0.9948771978358865, 0.8104877595813813, 0.9651349966280991, 0.8511626473504177, 0.8010961399666299, 0.8994021616119323, 0.5717122627195772, 0.5761588000624911, 0.589791831203178, 0.7400231999906346, 0.6994320762072432, 0.637055965963883, 0.8265757255703843, 0.8763618759314351, 0.9702720250205857, 0.5401392879870723, 0.6281271647342948, 0.7450094742999751, 0.8872303574449344, 0.6234722391154623, 0.5712356315051725, 0.8815099214699725, 0.6032598181300725, 0.936385319376053, 0.5427785341286708, 0.7955411017797415, 0.9766816333681742, 0.7623561804402867, 0.9110963137810579, 0.7732042887267006, 0.7377502174045518, 0.7120547191244331, 0.6668978745300735, 0.8549527453201329, 0.8872393827175555, 0.9951350316665786, 0.7857128701685783, 0.9982562212853715, 0.9201134452463362, 0.6021519425203737, 0.9994320171197875, 0.9595222962732118, 0.6923855096575731, 0.9841745831324711, 0.7008711187651926, 0.665274003104921, 0.5766143939063056, 0.7954265614592271, 0.6858801836087615, 0.8290635124121566, 0.8855299681993158, 0.9128650799163551, 0.7247257779769968, 0.6399814076723913, 0.8734596564100097, 0.6149961793330867, 0.759202576633471, 0.5814469246721403, 0.8595951127583523, 0.8629537030624894, 0.90778637111458, 0.5070632565274242, 0.856025883847876, 0.6348802371923654, 0.7142879980190131, 0.7895703135138148, 0.5612087445668664, 0.7820276810200505, 0.6870234009184563, 0.813232216123801, 0.833417514673811, 0.8668673376498361, 0.7603571795644565, 0.7932296616738366, 0.7242120620422228, 0.6027936413433973, 0.9008053961111564, 0.8641570275137036, 0.5734888384098857, 0.5702043547574647, 0.7338575606990476, 0.9564446695915485, 0.5854255081129536, 0.9987551955179238, 0.7432491933358634, 0.8127084284603835, 0.9432534558121586, 0.7631389746316815, 0.7106309295501108, 0.5940258249326855, 0.6065320794227412, 0.6018114175979834, 0.6580104390640178, 0.5943903882956566, 0.7132414161445553, 0.6097822760364939, 0.9646902718993591, 0.8588364204586957, 0.9695390716813518, 0.8844892671678829, 0.6471105988683106, 0.9126159403007921, 0.8853160800063717, 0.7630776349224813, 0.8689644046012055, 0.596166804824517, 0.8993656892058466, 0.6428532139139829, 0.5369272309340085, 0.5243024854927476, 0.9646567109716292, 0.6156712394626696, 0.5207381332549712, 0.8058149001219379, 0.9185107229981562, 0.9726641618024041, 0.5276979219554175, 0.818102755156987, 0.5032396381197715, 0.9031001547809405, 0.9481368514289688, 0.9182867243725044, 0.5372734025763934, 0.5665579811810997, 0.8236468357332862, 0.8917529294191842, 0.8764085681393174, 0.5822322337741697, 0.9090465311770917, 0.7051888400912139, 0.7720404795114686, 0.6778976806981926, 0.966126511824142, 0.9842898541921896, 0.7633041144091375, 0.7906928571366945, 0.6400819353217018, 0.6114897550203928, 0.8831401296021835, 0.9305528137835452, 0.7293106930022961, 0.6551139613574122, 0.6452054528248916, 0.8281179372313615, 0.5201587410570044, 0.8086294843874958, 0.986643965631546, 0.5966339373665679, 0.5119120420005743, 0.8952173899497895, 0.958553938919002, 0.9511259815923699, 0.8201078426087738, 0.8669457910507496, 0.695909933181231, 0.8159412204465146, 0.6695156354457674, 0.6333269693680457, 0.6134531529901803, 0.7416624600636939, 0.6986783518294368, 0.642624768879416, 0.9949764619759903, 0.5229715720084384, 0.9689298302611844, 0.7754376762970687, 0.8536267992470632, 0.8501081156306967, 0.6849795790625938, 0.9794373567980468, 0.6785653508487072, 0.7637598499449807, 0.9956239987905893, 0.5904490449012784, 0.5513031879943074, 0.5756438671837449, 0.884326288607202, 0.6806591174714265, 0.7305360955017246, 0.6696798333891645, 0.5222534499383533, 0.5810510227115202, 0.7708183931265113, 0.5476594005372712, 0.819723507576016, 0.5328614132700478, 0.5119152304723296, 0.686220555807164, 0.6796922384976694, 0.9885250006045265, 0.7721776728353464, 0.8380204890493856, 0.7958467713184565, 0.6584669705613468, 0.8991796219011037, 0.6813086147580554, 0.5836130934786161, 0.9274835700042965, 0.8707204544839335, 0.7695094555832641, 0.5834383450558072, 0.6271379072345808, 0.710281989248227, 0.7683212624954594, 0.6666133256261766, 0.84359046902831, 0.580062569098556, 0.5988880665016245, 0.8763826528185408, 0.6123950650381118, 0.7229466564197579, 0.8550895068102276, 0.6978329955698428, 0.6882214369583303, 0.7257571839270194, 0.917543452627345, 0.8690938365087767, 0.574633422356067, 0.74819868508217, 0.9201798620631094, 0.9317520727937282, 0.5474360507599076, 0.9338175491339533, 0.5649673162939898, 0.7743997425193931, 0.913754950136264, 0.7010267937894126, 0.6926379910091895, 0.642802766691216, 0.9757693419394468, 0.7994104364955451, 0.9066652089887561, 0.9561112184864029, 0.6324897507877919, 0.6897729965452086, 0.6275429842179372, 0.7741720633885928, 0.797356093308722, 0.869679207628119, 0.8106377019651227, 0.6764217027748228, 0.7939975543354462, 0.6199846938521293, 0.784581230737027, 0.6360178126836411, 0.6607854819732049, 0.7029252181886807, 0.6932814139260778, 0.7049882056162979, 0.8996497647631683, 0.8436992026634376, 0.965920506709456, 0.6260401318690527, 0.9161278555558613, 0.7050740378285334, 0.5347205383985376, 0.5359285991939369, 0.7513688083761951, 0.8168434695509812, 0.9305560857921216, 0.6799749745883118, 0.9831240318704548, 0.5865785400031963, 0.7199741586464077, 0.9994433365379649, 0.5888350400629541, 0.6847820595628544, 0.8230033880398911, 0.5785343978563517, 0.8222217713576303, 0.9891385054389079, 0.9321858413096425, 0.6842417511462657, 0.8592767825017105, 0.7216250589186606, 0.6679067025776937, 0.5149734392296137, 0.7854998503013882, 0.7194843652305638, 0.5550793739900475, 0.5094463340462309, 0.5574047608223625, 0.8781257195307903, 0.6144873080869593, 0.9258277492135638, 0.8550150564617599, 0.707909569425702, 0.7490012819575291, 0.6372643462974394, 0.5755319022613901, 0.503807667417667, 0.6377858109157388, 0.9298150396821497, 0.771855666429687, 0.8565524376548259, 0.9808462939926073, 0.5523820756047375, 0.9708199593172722, 0.646060749874654, 0.6024841461499694, 0.6917268429973857, 0.638251976212411, 0.8247329308148112, 0.5865322504512117, 0.8034131211541642, 0.934850579075209, 0.7967218350757975, 0.7874952116540579, 0.702788669445252, 0.9978518803610521, 0.7479427455909662, 0.526642576391495, 0.9680078167862922, 0.5804404867948234, 0.536513272701844, 0.8762332924148399, 0.8660601084184769, 0.7885296205392007, 0.7485077227276464, 0.6950979054406289, 0.8161555105497491, 0.8570419693748859, 0.6517274704564842, 0.8096830674332285, 0.7797278537414566, 0.6719570838214521, 0.5855643707270016, 0.7202525214439972, 0.7128944462841299, 0.5032168910487345, 0.8483360807989035, 0.5305664264088521, 0.5960449829584391, 0.8760866103877585, 0.6243886472601813, 0.8724252180544008, 0.7376963486731695, 0.5128264779613079, 0.8266142032311891, 0.7673323504193978, 0.7880394331114096, 0.7703183230975812, 0.8467711099713169, 0.5186892186473948, 0.579920900077552, 0.8009244961523495, 0.7018692244453142, 0.8091844622434212, 0.7021683393458953, 0.9525860187510808, 0.5698237785800344, 0.8221941385521242, 0.5486883171544652, 0.7616908932652768, 0.7619927864566292, 0.6848339498216883, 0.8772894856565959, 0.8370267836770462, 0.9209079547661503, 0.5511514033067986, 0.9417911581079392, 0.7516009214892393, 0.6879175015935123, 0.8100776072822975, 0.763770529322895, 0.5837267170805998, 0.6751739585802554, 0.5843774021107886, 0.7165163178002438, 0.5587828743475284, 0.5102648694027399, 0.9130333496628259, 0.9832545774308239, 0.8652513972648648, 0.5111868178387056, 0.6154350246221627, 0.6650289079608547, 0.6161531234279186, 0.6225879218703785, 0.6533713299581343, 0.5773677627894813, 0.8526461700780015, 0.6022619144225032, 0.8121915139448492, 0.6397230374910176, 0.5640585979851966, 0.8675641916559036, 0.9466789204985864, 0.825700022873288, 0.5150135556969488, 0.7652916191912205, 0.56484361351571, 0.9272722653227244, 0.8810471404359373, 0.6553568011458208, 0.5322075606959329, 0.6868193596958916, 0.9491385245546873, 0.8732521314860817, 0.5449105421217533, 0.6237895182596438, 0.5650161805532531, 0.8046464942966671, 0.9452969883062525, 0.7198464966770992, 0.5012028579126862, 0.5948177239377797, 0.6233581823435448, 0.6663018783848174, 0.7541752681250806, 0.838374939363798, 0.9924583097743709, 0.605748225204902, 0.8170937868179384, 0.8819405606592039, 0.8953872285386024, 0.7613906403253463, 0.7812062899159056, 0.5725299242138986, 0.9643585686525795, 0.5344800892498531, 0.8058346677883022, 0.7771379080984946, 0.7784539780524589, 0.7950225590151399, 0.8276210608236265, 0.653928195683174, 0.8595364885520331, 0.7003700952034859, 0.9981396877202174, 0.9996501690593609, 0.573378133637297, 0.9605807208379833, 0.6241563118326878, 0.7928369706497539, 0.9309647156418577, 0.9410788129240647, 0.7456271381594973, 0.887818083298638, 0.5802576070361323, 0.9758646034725722, 0.9234179480448766, 0.9832749381126099, 0.595007318948005, 0.6403120016167165, 0.8418190728745928, 0.8072384314181336, 0.8970178913230814, 0.7812211949771313, 0.7557673687312643, 0.6769557663536361, 0.8125369225199659, 0.5420957939699604, 0.7056401648355234, 0.8890480934526065, 0.7630432540646406, 0.5322592833007415, 0.6779637478442955, 0.9981088032006484, 0.6304615782685883, 0.7446285776127196, 0.5192774897373966, 0.7381138544593628, 0.5059611728553004, 0.8451946951185523, 0.7370921637981896, 0.9996046060958176, 0.7261103662005324, 0.6057989663561532, 0.631927031855222, 0.5114025234597249, 0.8537261685977682, 0.873137271353745, 0.5613686690478964, 0.8400462089982969, 0.6990063615854591, 0.7416878255454596, 0.9387128705012002, 0.5791656394244713, 0.766589176481664, 0.9530631562807699, 0.8981956357503869, 0.5768977531653822, 0.5540756058333226, 0.935530782628852, 0.710640065597345, 0.6639700640683901, 0.9438310534283578, 0.5371892171381056, 0.5883978837424024, 0.9799509684807955, 0.830958047471148, 0.9715501735350318, 0.7560333557267717, 0.9341641263645439, 0.7412203921336978, 0.7184953362250885, 0.8193383030804043, 0.6014477608185932, 0.6026604573146075, 0.6556123718556368, 0.7921871769354515, 0.6076851804874278, 0.8265145805999781, 0.9063959479608973, 0.5173099284036233, 0.6046398504114269, 0.9450842041979363, 0.9094484447072101, 0.9674251916662269, 0.8161687345195737, 0.9706654843241075, 0.9306505714038966, 0.9220869941041601, 0.7252603383294509, 0.6808287275153282, 0.6826610721565358, 0.8494710323254485, 0.9914212196898031, 0.9322939425300365, 0.8481801372754522, 0.6344850866009024, 0.7159535844712148, 0.7081174411278928, 0.8198387642945947, 0.9040286197708592, 0.8106059198358909, 0.5527656405913508, 0.6754093839681057, 0.6160041415677786, 0.6136693649804644, 0.5211803898054612, 0.6645899071104351, 0.8658221672241138, 0.6753733665161313, 0.7862538611074619, 0.8229926222721555, 0.7386421621022523, 0.7550635349693529, 0.9098915511710705, 0.8117838998148323, 0.7426325420127402, 0.7055193734424141, 0.7813964838007532, 0.7179894268290834, 0.7504676397363348, 0.9514921601071437, 0.6749008866164863, 0.7957278192922661, 0.8083671407256653, 0.6022080851247461, 0.7427267927268458, 0.5294860234783685, 0.9572543945757501, 0.9954989018745688, 0.7504201420412218, 0.5360251784017425, 0.7710030712368793, 0.5616859422332268, 0.5086151578121703, 0.7740299253791894, 0.8592174216733688, 0.5465401717942419, 0.7698072602658657, 0.7812132971085022, 0.7833414480342056, 0.5893674813914631, 0.8400220133483248, 0.7153814896645617, 0.6139974328873021, 0.7355318050099, 0.8252520616541414, 0.516153713377169, 0.9112762420546237, 0.8273880889083909, 0.8960675853248702, 0.6246029803014972, 0.5171056691781694, 0.7026885674925218, 0.730914464180012, 0.5353783160469554, 0.9835110679424796, 0.812060169157127, 0.7608372849372362, 0.7600838850778715, 0.7868480409544684, 0.9276579940183655, 0.672812212958253, 0.6349359075508048, 0.8685360472532447, 0.6973482314352476, 0.6018735017130887, 0.551646919139382, 0.9927386882656222, 0.7542969065757662, 0.5219236626752427, 0.563214257826583, 0.7710208302536774, 0.7174366505273577, 0.8053102066592004, 0.9201283538389093, 0.8189119521327839, 0.580417799714827, 0.9993140680446964, 0.7910567791112213, 0.9795690793277205, 0.7777571023578369, 0.9642306002863801, 0.697651496670199, 0.5228120051330659, 0.9817809051246545, 0.5411420641193934, 0.8011028030423378, 0.8699857842396463, 0.5658847615147735, 0.9187263437218715, 0.6523408144045028, 0.5415344519944065, 0.5873372051488833, 0.8342222734440151, 0.8136171463655082, 0.8089579804050668, 0.8712479642562247, 0.9497625328575128, 0.8640891990629378, 0.8463934591658376, 0.9042836327233638, 0.5791611691212255, 0.7404214414570007, 0.6482317398689432, 0.569655216416533, 0.5181859056553474, 0.5347056684523148, 0.8076436270974472, 0.7422187745222979, 0.8866453008107739, 0.5764582161570628, 0.9991279393200214, 0.9826123734969399, 0.6961100614094907, 0.8414972889343706, 0.8469956921190072, 0.5571840852248895, 0.5809871681081333, 0.9662670686298709, 0.5328882023134897, 0.568303539274598, 0.8569564428863419, 0.9758817341191537, 0.5528955445274155, 0.9994449193398074, 0.7779723270731372, 0.6124047863833864, 0.6837289032227729, 0.618410752232444, 0.913549304101059, 0.5238631516828832, 0.8809274340642403, 0.5463906969333729, 0.6594406294937891, 0.7496274260014804, 0.8735044268317445, 0.8317260558723785, 0.8610834739139013, 0.7302698667756238, 0.8981771051879945, 0.7280133206637629, 0.9508585023127107, 0.5976172997995461, 0.6913770452491872, 0.9393130136462782, 0.9920115443845849, 0.9901918518758611, 0.9488811979785263, 0.8958337379169983, 0.75730264537726, 0.7577382656921781, 0.9256232646128403, 0.8741445927115739, 0.7347821818818028, 0.7583108749961396, 0.549002224755328, 0.7988931395888709, 0.8902454169755363, 0.9815542250788536, 0.9235614124702167, 0.6336502970120924, 0.5104724229030451, 0.5952880950619368, 0.832363356568834, 0.5359927123717212, 0.5409450990629462, 0.8093219577475967, 0.5919258381331547, 0.6284042922358494, 0.9707567610291936, 0.7261382208061528, 0.880427108794196, 0.8266737307103462, 0.727443628838056, 0.5122572019955757, 0.8072149324889621, 0.8113687262415736, 0.7800388043299118, 0.7785408196719712, 0.6485795192344325, 0.9454582849248951, 0.9667598864516254, 0.8602928484839529, 0.6542051107441502, 0.8680049211439669, 0.8294527478277032, 0.51890009318374, 0.8695953845630969, 0.9073419618222895, 0.9933007030966636, 0.7982437567053842, 0.8698026787795, 0.8246868605373437, 0.891707755905778, 0.5624269807000131, 0.941936502853179, 0.9329179333330886, 0.6606774995926497, 0.5312688482828851, 0.6268988048542531, 0.5467469157456258, 0.5430192220180976, 0.7647340574870074, 0.735536418074586, 0.7809708954356858, 0.6962186135097255, 0.6525709291773635, 0.6294350151107467, 0.7695415317667507, 0.9182795015290149, 0.53298619876591, 0.9313645067852616, 0.7917577884869551, 0.8055858063348955, 0.5159849128857303, 0.5896337129008007, 0.7680313645889143, 0.9874934494868973, 0.8774448480518509, 0.6444770492868335, 0.801773331034852, 0.9988383184814792, 0.8748315393593838, 0.8862377682807767, 0.9746967653623132, 0.585156561564778, 0.5240116502963319, 0.5933469819131072, 0.927062478256679, 0.5216566007055866, 0.6323303703001171, 0.8349254498467276, 0.7773073554490674, 0.6367973866128076, 0.7024473146897228, 0.9877992772852564, 0.70724434764404, 0.86587748648681, 0.7814001051649704, 0.587414674563862, 0.9151369529856144, 0.5959135027446922, 0.8170002198799203, 0.720844527618669, 0.6869647830679353, 0.7121067773822027, 0.5217726728793535, 0.7719938448631494, 0.8307845939111851, 0.8803478882575779, 0.7525127299456861, 0.9330172734710497, 0.7032977224698904, 0.577670800337559, 0.782330791737595, 0.5879765848657468, 0.6440119102731225, 0.9516875808177099, 0.848138410814524, 0.5445782737291484, 0.6316663233773114, 0.5683860113095582, 0.60550641370436, 0.7591000327876483, 0.9687076605528813, 0.8354458657240298, 0.6826171669737039, 0.5119143994749202, 0.9342990755468417, 0.6854511602315989, 0.9902567062844091, 0.9773098026629743, 0.8621053330886026, 0.9685587600450085, 0.6226665640987814, 0.6641612747633737, 0.8207959557593397, 0.6046667023636073, 0.8988426101652707, 0.6319251744258916, 0.5882514376417067, 0.7435739608870376, 0.7772982336605014, 0.5697424560169416, 0.850402708332054, 0.6847506343958345, 0.8747166916604145, 0.6899622379934945, 0.7541140570649647, 0.7308865313372781, 0.5442564183482173, 0.5543311545464, 0.9674452215824211, 0.9520764657241781, 0.745175155099594, 0.7384725609975424, 0.7470134168857155, 0.660031137225078, 0.7567556148353243, 0.8725634543308297, 0.5307212413150989, 0.6743237146741068, 0.95090792334755, 0.6623854731857932, 0.8823613556290397, 0.9625277363323828, 0.9536567255057538, 0.6235644939221178, 0.5618939745305918, 0.9313179308049618, 0.675732950945956, 0.5283416621834669, 0.7560188097562397, 0.6557098721825398, 0.5226687714261122, 0.60655057140534, 0.7973552102256841, 0.7983329100394764, 0.9562838472762163, 0.534019243757723, 0.7464493297684847, 0.9472096255383258, 0.5161886808275007, 0.710539895093891, 0.8052642091990014, 0.9541885459322319, 0.8368923163380528, 0.9606419136615804, 0.6243579461282991, 0.6828937837820167, 0.8061943168073233, 0.5719693341071056, 0.8404130891631711, 0.6080008582453049, 0.8475777540556471, 0.8695987065875079, 0.6407210917709378, 0.8172152833308074, 0.9036758426479015, 0.8956437327448861, 0.5028049914791978, 0.7401206809164466, 0.814505590510454, 0.9148767731355076, 0.7313634930176462, 0.8202366001989045, 0.8823238750114626, 0.5182015331505399, 0.8268749760853529, 0.6721148638549626, 0.5253445657924735, 0.7238568379086875, 0.7608532836215502, 0.6225893383261671, 0.778202201778178, 0.7039953661375438, 0.5045636742504812, 0.7480352909939568, 0.6071268976119856, 0.9413334395989285, 0.5089504314997424, 0.811207707923008, 0.715854562630545, 0.9160716466730712, 0.5385451047185765, 0.504040396294351, 0.9481798897870797, 0.677638501426507, 0.5815815712951585, 0.9048799034689534, 0.8981687657309205, 0.9053230354013788, 0.8406507026794051, 0.9562443063348363, 0.9435701144444191, 0.8467056714584167, 0.6822460387172442, 0.589380166979103, 0.6203744317737567, 0.6674612639188056, 0.752169762348985, 0.7401828590433716, 0.8224722644550835, 0.7654022830126418, 0.9229654136107421, 0.9611181005277536, 0.6371013952618079, 0.8767623466419003, 0.8665289484095975, 0.8276617659108596, 0.5193813803747143, 0.624025118652983, 0.8501806840200858, 0.9216713336308479, 0.8570671318286112, 0.6441202564758277, 0.7560360735616678, 0.5179075613740156, 0.5317977816042376, 0.9767726728142343, 0.9390786193531149, 0.8768223349148478, 0.6076111164936312, 0.8280801597829788, 0.8672505793956599, 0.8569146206436604, 0.6719133041277869, 0.8853042899368039, 0.9350565196626751, 0.8071822133622781, 0.9737557965475013, 0.6499224634360703, 0.6827260479608264, 0.7872058803624107, 0.8232865419400963, 0.6763747617417706, 0.7182632477966253, 0.7458785281077954, 0.7189288959784279, 0.9260947931474894, 0.5493234002192391, 0.7312765179363415, 0.8751010249079618, 0.685440719319419, 0.9931721127781864, 0.7284455588057588, 0.6411748738536229, 0.9014479374630088, 0.9133902080826984, 0.6686973301114045, 0.9792904227011552, 0.5695799310847466, 0.9405098560258136, 0.5510273331092063, 0.8600482914066792, 0.6065165192060725, 0.7303440465063289, 0.5617597357582442, 0.9529178669858909, 0.511450475827993, 0.9521657913514864, 0.5524743603777087, 0.5771395779162529, 0.6610515626956674, 0.8237912318387126, 0.736583678075398, 0.8500564382502812, 0.5868996231761716, 0.6193784736545376, 0.702605359245227, 0.6854709442594149, 0.5297487641213559, 0.999754911049881, 0.7625763370398482, 0.7280521779730853, 0.5247330243865274, 0.8558161251416838, 0.6155078076723661, 0.865672733566386, 0.5461326839424219, 0.9305628991457955, 0.6476638114553697, 0.5078371532810659, 0.7558847597713207, 0.9818354239793489, 0.8481970689723692, 0.9520440433707861, 0.542523584553827, 0.8287308764080221, 0.8883306734422571, 0.5504482496646828, 0.9197647350081668, 0.8123541095613596, 0.6784812341195744, 0.6107195984618692, 0.773951801316857, 0.7277809672514264, 0.6925532081240201, 0.8284796490165613, 0.6576770324847594, 0.9394809085467613, 0.649322037784573, 0.8014712259082606, 0.7333293230718677, 0.9120183545904892, 0.790866584944153, 0.9883020280954045, 0.5492292639066756, 0.5709948027735099, 0.958965402568621, 0.5294289709997171, 0.5221205815985168, 0.8445831226242944, 0.65926449128619, 0.7297909400359269, 0.7730700232115171, 0.7203320706685907, 0.8501777128350786, 0.6546285936770815, 0.9445437645878028, 0.5243568389771871, 0.9330404004436124, 0.7347433409704183, 0.5024915703510742, 0.8442714842701309, 0.6702277184112371, 0.699231875645732, 0.5500472036803161, 0.6862821673862489, 0.5904428627923752, 0.5955246396441576, 0.8868226354213642, 0.7947850645561984, 0.8661948982061654, 0.5904692444241713, 0.9067443811019166, 0.8130567326980924, 0.9407416083806781, 0.6013413716082177, 0.9700826538614653, 0.5924058035236179, 0.8718424332684643, 0.510728700957544, 0.6008520824769992, 0.9591055831161781, 0.8387836227037209, 0.5482677469837536, 0.7819005539800881, 0.554369470730879, 0.5853428675606951, 0.5537469747976009, 0.8138307492526865, 0.698305826598488, 0.8230895980048585, 0.8572358279237797, 0.9504807712096133, 0.8241065540226931, 0.9116089864279272, 0.8997344491365745, 0.7505267690902451, 0.7841605989255629, 0.6537343174547577, 0.8890977806878855, 0.9102489793221631, 0.5074337899251586, 0.8698120454118636, 0.5017861857321719, 0.7369207449848436, 0.7780399756559748, 0.9248497790168444, 0.9366697725437323, 0.6644396133558534, 0.8676893336659257, 0.8651649314252576, 0.7132791420601436, 0.8851121692953942, 0.9629442668770863, 0.8317048508172631, 0.6746711816804156, 0.630719542778569, 0.6101065320722306, 0.8361338343803342, 0.555517022993887, 0.6878554864475088, 0.5896858837479098, 0.6138929765683876, 0.6769413272514417, 0.9679335467347959, 0.6751040391912554, 0.9198951736855223, 0.6849724868500267, 0.9882476839577147, 0.514733614447415, 0.778445067535768, 0.922972843003414, 0.9322620719508474, 0.8967176188033279, 0.8975596751327655, 0.5854767061449817, 0.6243071783660414, 0.6452647924464454, 0.7891741387657406, 0.7447401491530364, 0.7695436937521316, 0.8517643018039832, 0.9060876259865407, 0.8220993820593753, 0.752461119273908, 0.7078811110705496, 0.760064067782549, 0.8479382539932564, 0.5436727782334454, 0.9148610066831129, 0.7494527167051457, 0.6136107123472052, 0.8831674441866009, 0.6202734064800857, 0.9514779316849922, 0.9064499710814171, 0.9889176098858412, 0.930773923892693, 0.9355869659808265, 0.6527479221499977, 0.9387738010499496, 0.7654824379081191, 0.8032476967679795, 0.7718609521865656, 0.9965215047182499, 0.717065047373623, 0.6388070730486002, 0.6505799026878576, 0.7101386114060204, 0.7997288698073706, 0.8508013044939149, 0.7194494844593613, 0.773632802269969, 0.8806663909697086, 0.8083364754478386, 0.7139682155000515, 0.6784885536926091, 0.9444811149405924, 0.7590976038694095, 0.5724926301337734, 0.9934353919941411, 0.8281886247458795, 0.5091347705637892, 0.501748070219536, 0.683394674858629, 0.5982237821934275, 0.9189081518675422, 0.7566779679715803, 0.624289315789818, 0.9936731743149791, 0.7869810103967283, 0.6683838747698069, 0.6544663185944204, 0.9000234930720306, 0.8740768581001676, 0.9608237117145026, 0.8567581896857528, 0.5735594812078052, 0.8831795794216069, 0.5888828897085976, 0.9825298680056086, 0.8909307442710556, 0.7476033953986694, 0.7105227399889753, 0.886052552971862, 0.763074144448429, 0.5727800766085863, 0.8108340868094173, 0.6159431771885893, 0.970347885395811, 0.929230384246003, 0.8461412475067169, 0.7674744459923637, 0.8197066713881626, 0.5108155649687821, 0.8801583995272122, 0.9810702857024985, 0.7614052053774699, 0.8867638723918347, 0.9970317893270931, 0.777121840903461, 0.9632058055847028, 0.7279998121233532, 0.8310380188221359, 0.7011224136037146, 0.7475642835370704, 0.9486803198570758, 0.6235650630529357, 0.850281756775513, 0.6620706633319766, 0.5267415894512016, 0.7353257646558352, 0.6640536100063643, 0.8625074445251911, 0.8147351203196846, 0.6782690078627983, 0.510340536095562, 0.9675743657140268, 0.8156991463315322, 0.9467191351861592, 0.660935070630384, 0.9159151289663251, 0.884576624201673, 0.5863958632793651, 0.6809285910090923, 0.9137955007502451, 0.6532152072121166, 0.7664609380078853, 0.6923774576877005, 0.846727616705374, 0.7017631799038455, 0.5573308420760317, 0.8230179437472747, 0.682491394156699, 0.7308202125520533, 0.6651955833732, 0.6100876822653598, 0.9655653940104474, 0.5595298866321099, 0.5826577147861662, 0.9604532083901766, 0.5160802098013906, 0.8966207410330207, 0.5355271002483042, 0.858326636329299, 0.9940564436098952, 0.9126125476493909, 0.8617468753881017, 0.5728665676541093, 0.8195987287846496, 0.6298422652140451, 0.9007967813275553, 0.6408076882227933, 0.5702908917760947, 0.9998355367740908, 0.8700879704034847, 0.5894705032943002, 0.5070534235386972, 0.8539650380685524, 0.903988798054447, 0.6720624165266293, 0.942774356260544, 0.6994921368149427, 0.7655464630953568, 0.7994379384120709, 0.9402525437370264, 0.6799293551854271, 0.5184386587951626, 0.5923098402416104, 0.6994804204340063, 0.6833675905127194, 0.8736255310853414, 0.8361467363475303, 0.9328578525131336, 0.5413985105452254, 0.603442368849302, 0.9509453152502207, 0.8640858310790651, 0.6101082078113969, 0.6089611216254606, 0.5913308596837631, 0.9088459409472089, 0.6093042584123562, 0.6961273904782643, 0.7964965670147843, 0.7142052995180183, 0.5160344375737145, 0.5061227141852715, 0.547352116778061, 0.9008386953223235, 0.8954438357677224, 0.684820830052157, 0.7504891140523864, 0.8319316618569503, 0.5630332067567176, 0.8477807779478816, 0.6573940721997518, 0.5859410263672951, 0.6159904606269719, 0.6328693713735745, 0.7927285957898806, 0.8356193910866577, 0.7384734535540372, 0.6831626807336029, 0.670439897434248, 0.5784506323327947, 0.8044073910660896, 0.9012091010160727, 0.9948203668987314, 0.9222857303941645, 0.5551420578487756, 0.6779852919185088, 0.5980726277730533, 0.8369095153061428, 0.7795116284677148, 0.6199348434129323, 0.6075885109743344, 0.758565139606844, 0.6324669907328062, 0.8829152758397707, 0.9110807147789013, 0.506683082278309, 0.6212380748320134, 0.73140233766797, 0.8244905712042646, 0.9589011802486122, 0.8134652172277164, 0.7155950017391886, 0.9991334366566638, 0.5637184146778567, 0.9715878694241007, 0.8908320611688832, 0.8119924086437931, 0.7145135099286344, 0.7842268156957815, 0.8431670153436817, 0.519447940854487, 0.6515709489593756, 0.6622782519247057, 0.8801506909310948, 0.9922833082994893, 0.6362808748607693, 0.910250843955189, 0.9808359648674215, 0.8799594633745513, 0.7254660290582452, 0.5998062834839086, 0.7876878703739287, 0.5923711890736538, 0.602410794307294, 0.7736658664694482, 0.6517171584366296, 0.711604910040186, 0.754760337217821, 0.7827163359392575, 0.8036883201600789, 0.983003119034372, 0.9781377851096126, 0.9367774644007265, 0.7785183170558536, 0.763233884415861, 0.6067376095600505, 0.8521762003658775, 0.5648899439410049, 0.895585986830917, 0.9547320383628244, 0.8145006822364994, 0.9175809763121462, 0.8852567220170398, 0.7309076819688112, 0.8370561604450821, 0.6380005547968222, 0.998636287896666, 0.7713352907410059, 0.6110369952256214, 0.7681059530715695, 0.9945611209633708, 0.9212101250989504, 0.9499480197946983, 0.7534247886869332, 0.5408540495693528, 0.8083832630466754, 0.6095985733413304, 0.6167405126479042, 0.9822317133911908, 0.5716911068850092, 0.9602098800013934, 0.5720548969488883, 0.6273790621906594, 0.9669208970683251, 0.9672154304699498, 0.5037718650638476, 0.829563753606287, 0.912534219684742, 0.8660570077028834, 0.8523312768980491, 0.7453402698583502, 0.5935630853625881, 0.9748054873359935, 0.8146679918100346, 0.8133819321312785, 0.7827486485907489, 0.7228607292431852, 0.847375794618203, 0.6751974000196865, 0.9330145714453553, 0.7289001854987538, 0.8562755685808772, 0.7756170235484552, 0.7701265639271444, 0.7326353375039303, 0.7014217406770307, 0.5373894792155403, 0.7161099622169644, 0.7824407495325022, 0.6784964535983276, 0.6380688580302389, 0.8184519397899578, 0.8649438510851306, 0.6522803874280041, 0.916011381947067, 0.8049716583044273, 0.7506807034845665, 0.9298151775015899, 0.9648164570778847, 0.837738446813125, 0.8509026421891364, 0.6551261553988229, 0.8134447732337132, 0.6152723865652475, 0.9968904077219006, 0.8880158685568038, 0.6360611319659781, 0.9480636079748209, 0.5555797167172476, 0.7020678409355605, 0.9810645423215072, 0.5825438107518457, 0.5245459927193878, 0.9056366457296867, 0.6596524310042033, 0.596913378012448, 0.6856315909470803, 0.7333340216013883, 0.7901462554934706, 0.6284601639075303, 0.5517030460645098, 0.940066309975484, 0.9617411931129642, 0.649737397638569, 0.6107512712462073, 0.550098083983085, 0.6394412423468291, 0.5641542416539054, 0.8300776351806305, 0.574291434898409, 0.8013470816454749, 0.7447960595836374, 0.5461741525376078, 0.9784174367058537, 0.5579520596637332, 0.5194639247496613, 0.6338751056730325, 0.5103429827312435, 0.8744429256858238, 0.500188573892516, 0.9073925552554353, 0.8155508529181127, 0.8210963632118948, 0.6951834695018257, 0.9886424289315761, 0.546623305385149, 0.8147535002494668, 0.659741082938428, 0.9935696035060255, 0.6605426258591813, 0.7576128783858771, 0.8261166461707296, 0.8522373770117421, 0.9712725476462905, 0.624723505250198, 0.7840370324952851, 0.6169236372992564, 0.8147469552834192, 0.8071675556514655, 0.5267642611863341, 0.8733547116039272, 0.7136879019743576, 0.6648965379907477, 0.9972816022924897, 0.9503980066368906, 0.5095679501582486, 0.7143862724456447, 0.9995149868780073, 0.592878259873282, 0.6862235918994057, 0.6883005820283419, 0.6098368335083854, 0.9218082034667245, 0.7372816752265119, 0.6808777083948144, 0.6413267323626756, 0.7949958333808322, 0.822850573089041, 0.9923470253531088, 0.8298281001883037, 0.6077307529210346, 0.9211035892043268, 0.5275587477463141, 0.5143046052922184, 0.9096803107971679, 0.5634239395323501, 0.9476605184971905, 0.5101940533732008, 0.9600893842859056, 0.9383961238800308, 0.9288994615094953, 0.6390953026818942, 0.7794733949878664, 0.5843381929876235, 0.7552445125516651, 0.8085897974654835, 0.5848863419528489, 0.6618357662665559, 0.6179481806846203, 0.5348447602710794, 0.9071482434771707, 0.9473669421168538, 0.9835783835494402, 0.5260180097146059, 0.8761233996869192, 0.7448654368341181, 0.8419260002340896, 0.8357218829736082, 0.8760042863998199, 0.8246806320964339, 0.8393005921918328, 0.7228622438186857, 0.5666925744100482, 0.7986574560448427, 0.8780191569598346, 0.9750629546825127, 0.9277245870875527, 0.7818989797677448, 0.9839901689939494, 0.9438848582730706, 0.8658498457170971, 0.6809422755536136, 0.6409494534031361, 0.545161256210537, 0.7542772625994436, 0.5021694916593846, 0.8343362978087849, 0.657113888222146, 0.67309436716041, 0.8212216542170903, 0.6280726892685036, 0.5458401555704143, 0.6801697253259935, 0.6141083383250927, 0.6896667694021501, 0.742919441560878, 0.852769620912089, 0.9122556914499608, 0.9384675581646909, 0.5403309192524726, 0.8765297245382173, 0.8500262367446296, 0.9202311875331177, 0.8327083997368464, 0.618085963267361, 0.683092120339022, 0.8984616499586959, 0.6543689262323231, 0.8519702225638408, 0.9986460591818016, 0.5518661651819229, 0.9041350030032134, 0.6883297371749124, 0.8668679917721618, 0.5905661547713158, 0.7257414095771682, 0.5246134102892263, 0.744288295450833, 0.7703030470829513, 0.5920146182294646, 0.8629397186815838, 0.5779152802427421, 0.9020734738548793, 0.8166329143283566, 0.9544794463273394, 0.9516737210723756, 0.7836499379580381, 0.9970378514509504, 0.7872349391613461, 0.5281121136240805, 0.7714451162459341, 0.6296399790578044, 0.785534527482715, 0.5739727379118205, 0.8023907152622461, 0.5991678845981494, 0.6651431248899364, 0.8586834798786077, 0.7291665862126413, 0.9724886654815155, 0.813654624433375, 0.9798693096143178, 0.7694999865027616, 0.8197978867631046, 0.660117961745389, 0.8875032489608204, 0.9757072059574721, 0.8096876588993627, 0.5650954098931456, 0.8131450254150199, 0.6061841629992131, 0.7789241187574154, 0.5789634528542494, 0.7675030102704066, 0.7129377691214563, 0.8995295126322564, 0.7190182125898387, 0.731555584956731, 0.6960493913549843, 0.673566678920858, 0.9286973464219704, 0.8323535101784991, 0.5411837283678966, 0.7836146537852562, 0.9205360198736645, 0.6725288679613612, 0.9237418206334851, 0.6698610834154359, 0.7483023659323281, 0.5140294649727772, 0.7868803882788784, 0.8579601268242265, 0.8560919271655394, 0.8036622651004433, 0.5475242385218235, 0.5789327299186764, 0.512820368388478, 0.6542282659415765, 0.6744601442110775, 0.7992373322742004, 0.5721742204282843, 0.7638213428115135, 0.8440855293390057, 0.7243937897292188, 0.6907357672188539, 0.8326733776757614, 0.6861868354627771, 0.5824526563772976, 0.5814678797134158, 0.8764208875855838, 0.596450739022365, 0.8390876270000394, 0.6856800779885206, 0.760917008117951, 0.6360503407863325, 0.8768238977159428, 0.6021479175459195, 0.5708106338824109, 0.9783003390609035, 0.696825349889991, 0.8225803245465064, 0.6240657143725483, 0.7766248249352607, 0.6304412820101573, 0.5696429999486147, 0.8532995279826151, 0.5415231009122043, 0.9396004934231243, 0.7888713411234576, 0.5462742089349256, 0.8344401079243511, 0.5634330400108436, 0.9972076880010479, 0.6245168607492158, 0.9097255748773032, 0.9651050487782602, 0.961897884471947, 0.7500809315408049, 0.7706289645252606, 0.8675085668265599, 0.7013869370676389, 0.7528979774618325, 0.9780866229550942, 0.6214090967570617, 0.7597825038502304, 0.9067249241852073, 0.6613838413027765, 0.9567557541728684, 0.6093113001805601, 0.852500560719218, 0.5971305473971338, 0.7875274802158391, 0.5224626485684396, 0.6943581140577737, 0.8954486767471892, 0.7653895014019932, 0.7030133297414334, 0.6654046276083185, 0.8731427704624624, 0.7174614704416942, 0.7336774428855994, 0.6002958085751513, 0.7159976467170795, 0.7364508429438865, 0.7635116956011195, 0.7451417556050928, 0.7437524819526318, 0.7927586278972161, 0.8519044653469269, 0.7215103979736837, 0.7235587528422986, 0.9065994888468206, 0.9077551965610507, 0.6819072790320417, 0.9657235669641431, 0.9886697884074086, 0.9790414804360077, 0.7659107757686867, 0.5787223466139608, 0.8845893473391067, 0.8289988776773491, 0.8067589221952908, 0.8201146327961706, 0.9148184291934568, 0.5022749924524906, 0.6528635074900242, 0.760827181018221, 0.5529647941039892, 0.9064391934147552, 0.8191856241480587, 0.8461468188400734, 0.5607670953438225, 0.6461924635905991, 0.7807106056382447, 0.7808726100404251, 0.920926868181247, 0.6892106711862711, 0.5560912497373993, 0.5893359766076371, 0.6043581879750932, 0.7881320142339356, 0.7706587259547919, 0.8408883519053117, 0.9376127803481064, 0.6730290139825181, 0.6760501298757429, 0.5675817628041047, 0.6246537142034645, 0.7148606667856597, 0.5598190873625526, 0.7344904726598696, 0.5485077824076413, 0.8295164934842905, 0.7511920178054985, 0.9423155301679107, 0.6847611229324859, 0.9954086018954436, 0.8992263896790046, 0.6521646924851472, 0.9291891185117342, 0.5431542716251372, 0.9792041337275421, 0.8956470226634837, 0.6774650650306053, 0.9055766008702579, 0.6534247234193359, 0.9532940200188923, 0.9488782949766871, 0.712148777871597, 0.5133565848720965, 0.7163583300700533, 0.9277434181748565, 0.8680569925129406, 0.5801919632857433, 0.8889392522921833, 0.7190291736524852, 0.5004989894762134, 0.7102014114431465, 0.5233502726471708, 0.8248007389801283, 0.8151292568935571, 0.771734474204854, 0.7495306512534168, 0.6797373520170316, 0.8256297682249284, 0.8804487850087307, 0.6957086058890457, 0.7447714352754835, 0.5466049347999726, 0.8877038641354618, 0.9603646113706321, 0.5598438965196888, 0.9210547235268227, 0.6505840174032351, 0.9371426319233502, 0.5562372028336671, 0.8001826822225258, 0.5757692194833961, 0.9127888693884674, 0.8781572733443077, 0.6615201765932874, 0.7913316271250745, 0.7459094551471541, 0.681413387523824, 0.8607663887165129, 0.620982679041975, 0.5241950249049512, 0.8897355327803325, 0.938472564850015, 0.6343203067725497, 0.9882399841037806, 0.9852904899952979, 0.6889761504453129, 0.7668146949409265, 0.9768740604999956, 0.7104549401863569, 0.8182061977822424, 0.7761090041796221, 0.5941482068293412, 0.8913195075894687, 0.8184381276863701, 0.7166479154335426, 0.7914942007445563, 0.9398238413281128, 0.9993303371239881, 0.5402879667759716, 0.7945420022552192, 0.6521542625977528, 0.7093943600761164, 0.6186021745753839, 0.8863484592185322, 0.9821983423695025, 0.6789504063820817, 0.7542359646505659, 0.8124988465026911, 0.7384770116835173, 0.7752787769297782, 0.6541348261392224, 0.6754108022786154, 0.9179134557910025, 0.8476426566014577, 0.5324461800419635, 0.7364070371237954, 0.7531896034479602, 0.6730142633512233, 0.8099148193603343, 0.9389098986998499, 0.6558146278186625, 0.5392914247781913, 0.7256406178031058, 0.8849246202741872, 0.8259618830733488, 0.612955252834892, 0.6361946084192427, 0.9769135794433688, 0.8281242180906234, 0.9949183724024712, 0.9084079414984888, 0.680643380638968, 0.9924720273580592, 0.5575065738807208, 0.8122851830919711, 0.5070851626758354, 0.939655138673563, 0.8150942201946132, 0.5577082551032106, 0.7784175487683436, 0.643817868414388, 0.702334868314733, 0.5192200568893675, 0.5626600949787814, 0.6748358167127647, 0.7011605181555589, 0.929220735786296, 0.9854941709767707, 0.9262733128502385, 0.5791411178045012, 0.7350756487719379, 0.6572250993788271, 0.864258947216735, 0.9394398744864334, 0.8804505810238927, 0.8241417619864462, 0.6653769433259338, 0.5234011984078738, 0.6049070609389899, 0.676844554768262, 0.5233239502038525, 0.7076508507972578, 0.7447525202088798, 0.7237172743635789, 0.642751384297521, 0.9681356541055558, 0.5212863239944373, 0.7595372461782821, 0.7696360366063448, 0.9867880260090558, 0.5727442121596665, 0.7843374609149906, 0.8854609317646296, 0.6360380689903056, 0.6412993507046978, 0.7467393657603677, 0.9247273667432951, 0.7545103840012011, 0.9147384758664314, 0.8861308670766289, 0.722834520498348, 0.7046301749197721, 0.6953980684541219, 0.6909476484405574, 0.686553463317206, 0.7312205108619313, 0.649108897822364, 0.9018591050220326, 0.944151730293589, 0.7218801049944765, 0.7892825610426222, 0.7709887550120834, 0.8973663887625161, 0.9286752534660818, 0.8891735621657747, 0.7557802175370882, 0.6920987466015629, 0.971544820258293, 0.5605156711923918, 0.5892446303145493, 0.7684926361041038, 0.8396845496525206, 0.7134362016348405, 0.9873923159079618, 0.8808748868517806, 0.796639345203365, 0.9582093337078781, 0.506189337628411, 0.664635378233213, 0.7363175929991932, 0.8255876404312921, 0.5412769705210531, 0.6125700480263334, 0.9204331042036445, 0.7977273902014078, 0.7139229743102473, 0.7338192377750862, 0.7477610256242675, 0.7755499766146718, 0.8738083772567063, 0.698871770088341, 0.6575998225524126, 0.5275281734316957, 0.743717655116983, 0.8238297575823657, 0.929177588957151, 0.8542743527845541, 0.6535308273519187, 0.6387951106277374, 0.8867246176371337, 0.5759484347193301, 0.9298134620158445, 0.6500796470049863, 0.8961111923459095, 0.6787906318810061, 0.5629809770806683, 0.5554963099715822, 0.6095815630949619, 0.7533826495235556, 0.6856247745803674, 0.9485391220470988, 0.8885128637260677, 0.54699735367101, 0.8959913873167338, 0.6285217546902655, 0.7833273893382476, 0.8729871168834478, 0.5389539214264035, 0.5192959529718892, 0.5523334933853361, 0.5552944208066675, 0.624531472525784, 0.6233217934252675, 0.8671205665507198, 0.6995952514358769, 0.8655321415211321, 0.9543147584017113, 0.7776470182050212, 0.6906572898125991, 0.9013793177018605, 0.5297483334189514, 0.7681686380793935, 0.8468448676699423, 0.7192682071851061, 0.9884320605054531, 0.8362891013487423, 0.9244325833921927, 0.6974504586903001, 0.913326463672816, 0.8180860718059628, 0.5783991246964437, 0.803298097308369, 0.9820821805278677, 0.6045816123035446, 0.6470106217428961, 0.6572029334204916, 0.7541284618269695, 0.6984487473238793, 0.5457376101092011, 0.5166467144993733, 0.7797735392133995, 0.8120792924817061, 0.9475948536092011, 0.8878328958805155, 0.6023536773968243, 0.564677978190723, 0.7930340916359505, 0.8614154828454601, 0.7905750927217792, 0.9031661095648271, 0.9799240650813658, 0.7378961717899822, 0.5632703214365105, 0.5961253054981531, 0.5332931424090059, 0.9066879701542925, 0.8325939745771346, 0.9174743310965481, 0.5737037901788016, 0.8944056819428408, 0.5997231601595876, 0.87262354856193, 0.9558282388981603, 0.7296069513691675, 0.9228231439329118, 0.710375575557028, 0.9520266163240549, 0.6354200018833955, 0.7724045812962841, 0.906371565852309, 0.5735904941505041, 0.9423991648397103, 0.7005080280082974, 0.8629157822059614, 0.8012778704110843, 0.6584795690800309, 0.6601556565534542, 0.5010961965617253, 0.5883016542267638, 0.7819013604812033, 0.6376891861606908, 0.7255111816913702, 0.7899614642156993, 0.9636050609352336, 0.6600912121809464, 0.5567289667286796, 0.908912501361012, 0.791775562805588, 0.6100840014966082, 0.930600430781582, 0.5647581273480611, 0.7872567142411537, 0.5742372125161561, 0.8458998065481185, 0.6508078806371768, 0.8112101497813092, 0.824303779393728, 0.7045806411491954, 0.5319971410273543, 0.7328600371354517, 0.5859136063497179, 0.7092440217966849, 0.9036199464050274, 0.8490097915983121, 0.878404810386694, 0.9217059328840217, 0.889125963308439, 0.7956885794592163, 0.5189175476987468, 0.7926699597003404, 0.5441513661499233, 0.9752177571863616, 0.6394386857251889, 0.7484092130059635, 0.7003575472251199, 0.6971732088594004, 0.8918594863243581, 0.9475109540408819, 0.7976991284354068, 0.6584539694626395, 0.7596543498673187, 0.834093496157776, 0.6315071044323084, 0.7257800755150537, 0.7613013867458259, 0.685106766294596, 0.7034601629776389, 0.8114772915168782, 0.6335824056016182, 0.9995896915456262, 0.8186110597310162, 0.9981494616156834, 0.502297056582431, 0.5793325977632442, 0.6535287877967197, 0.6393337933207417, 0.9032521469768862, 0.529637778291616, 0.6779847303884604, 0.5771971939163543, 0.7332407682877353, 0.8850591817180783, 0.5900082577653902, 0.9743134781127331, 0.7757431220551423, 0.6487183009597273, 0.722436902482217, 0.6986118470803124, 0.5311849638235153, 0.9367749481492245, 0.519708961029563, 0.561504790821346, 0.9338801298307611, 0.7881247322846188, 0.6540668053651815, 0.9126924414060251, 0.6703433670805382, 0.9215140795737846, 0.8559454441663517, 0.7710920417714936, 0.9242707649712256, 0.9034261310957409, 0.6991309986339969, 0.7687687466085857, 0.9599632285471492, 0.5212375395216666, 0.6987264227151251, 0.9268599674931516, 0.9372821391286983, 0.6996666741765527, 0.7845179166546925, 0.5145775230721541, 0.831603891370587, 0.802620942808469, 0.6559998501817275, 0.6292357160542208, 0.7388438834931446, 0.6273055957208579, 0.7303933241537535, 0.957978165957504, 0.5885328298630496, 0.9599956915361791, 0.5950492666388517, 0.6384638799077916, 0.750096843935993, 0.5569618059315617, 0.8192345290587713, 0.6900727034019176, 0.8932590782676365, 0.8377553330629615, 0.983828096143334, 0.8305988333491152, 0.9289090351097508, 0.9345416033239451, 0.7412764719544395, 0.6397853565945386, 0.9236378425002041, 0.554308393581685, 0.7515816585660685, 0.9222411363709248, 0.5691893709992575, 0.761709167386953, 0.8272151586622497, 0.7361215847213503, 0.8851051504693108, 0.7611555657549418, 0.7666314394621985, 0.6603529208718204, 0.9334051341880529, 0.6356429329686704, 0.5206495773202935, 0.973617523950227, 0.7816733282766819, 0.5187899600500379, 0.9085905872269147, 0.8380108782623885, 0.5393395888557552, 0.8561450550074381, 0.7042492496339697, 0.62659538592638, 0.5017509987400555, 0.6017945248435624, 0.7550925905811001, 0.6569147105647009, 0.6647589566114078, 0.8949785747822034, 0.7981038869452706, 0.8078481319689448, 0.5190733591705379, 0.5966040342875998, 0.7473842254497933, 0.9653565132225694, 0.7466904917877952, 0.8136192427659394, 0.6896916358526248, 0.6689589756692935, 0.8465143575720012, 0.5798120812537224, 0.5136370882109454, 0.5337675968618212, 0.5537471006581711, 0.9035534546411088, 0.759948615076788, 0.5155841890989501, 0.5321566151613681, 0.7434781342532575, 0.8106265138509909, 0.7445369564766827, 0.9488801046148768, 0.9123847153909774, 0.693394268276001, 0.9445157762638249, 0.7802943056104577, 0.5679785449639886, 0.5342035388360291, 0.6869595235856183, 0.9652342025596492, 0.516837290751162, 0.8841727730843741, 0.5330631805783026, 0.5541539503259652, 0.9699248057491848, 0.7697540083233966, 0.6947690359219851, 0.5103287586362033, 0.6445427659518552, 0.5366822867775145, 0.7239649645796169, 0.6450165314316411, 0.5605263671996396, 0.9606769693016073, 0.6983313524737516, 0.6784533537429207, 0.5576189858200257, 0.9044235920788786, 0.8618002249824743, 0.9864145291851316, 0.6828566188719255, 0.5499063168799968, 0.5607724038299435, 0.7570549217545142, 0.7445144584202521, 0.7068611020766534, 0.675603018742714, 0.884373725598283, 0.8779055433416527, 0.6508828482119289, 0.7228396262885124, 0.6582973183516396, 0.7154085975905422, 0.7778083350199249, 0.6179544863076294, 0.8112350415198861, 0.8428937501292155, 0.9250532213426288, 0.7932253308264771, 0.6221682119801941, 0.6353684920070097, 0.8030036780916574, 0.6352256226537237, 0.7827020062600356, 0.5376374336121499, 0.9299288998034045, 0.9667740120365718, 0.7572522264681572, 0.9916203057788281, 0.6941180002472918, 0.7937905671668881, 0.8684126567608168, 0.8275674333361687, 0.6272264103722187, 0.8771120110307709, 0.6494022726183125, 0.5867961873528513, 0.9071497512391153, 0.6140553287625421, 0.9951983730136194, 0.7391192163782392, 0.7769483123583993, 0.6413208415629827, 0.9140648854656985, 0.7303544384817418, 0.5887657820054462, 0.5283916472655535, 0.7701495712824791, 0.8307535542460243, 0.9887288289788697, 0.9775057900180614, 0.6173460311493959, 0.5075896069829997, 0.9876347727254942, 0.6171263127252626, 0.9125707858067098, 0.8398490565884678, 0.78837019862587, 0.683015894186576, 0.5802191675902054, 0.6528529103265974, 0.7560529097734369, 0.5024269848515954, 0.5392414727364346, 0.7124317298647678, 0.9188492044968617, 0.8123808624228366, 0.6012304602635786, 0.7484575352876188, 0.8536725487429473, 0.7173760143736572, 0.8110569675364422, 0.8479892425484614, 0.8846534477849222, 0.7000693908711461, 0.7336568254757716, 0.6635290277301378, 0.6223554639335576, 0.5362640137847742, 0.8882870054519589, 0.7163268670750973, 0.7104214509515776, 0.9963848255559498, 0.9868025113062244, 0.8326162466275249, 0.9677138853048244, 0.6491409044525878, 0.819186268723904, 0.8632433886295151, 0.8423557342894004, 0.6781766727049546, 0.9116453766964716, 0.9209050478601077, 0.8612186911214432, 0.9448855197415272, 0.6599628275966338, 0.8267004279287236, 0.8841363201278161, 0.8476611064417752, 0.5327673918647406, 0.942741814641038, 0.722607327974946, 0.634911849368301, 0.706046448719406, 0.5261906949754455, 0.6048554123326247, 0.5121306519641968, 0.5394756069668671, 0.6270451940492923, 0.9252588527363774, 0.7610523425477483, 0.5305463745103293, 0.6336972055293804, 0.5373885936022622, 0.6780688785222311, 0.6726564264409338, 0.6460064002425365, 0.8548520570470997, 0.518032418759254, 0.6887105589779026, 0.6759836945271931, 0.550945952798556, 0.6360331461657965, 0.7491652460836116, 0.5201922210859875, 0.7877450925847825, 0.8353421551947229, 0.5517670792418792, 0.625882952550467, 0.6994487442148252, 0.9894585315939254, 0.7536683927179098, 0.796424668244397, 0.8263079899579068, 0.8356376797431189, 0.5745436144360014, 0.688705684127715, 0.8936058307177115, 0.8840732354526252, 0.6805323136142203, 0.6180666136608088, 0.5285903541692274, 0.554652888770693, 0.530236468786847, 0.6957752695641517, 0.8540708242744803, 0.5846091678160662, 0.6561598298574719, 0.5509624571779996, 0.5101030062611733, 0.517266192921281, 0.7830015686813927, 0.9489102315771012, 0.9007403744352733, 0.6448692720067073, 0.9523058625080271, 0.8409467334734527, 0.6952282318901238, 0.8787155943743392, 0.8675547154824033, 0.8560032607886652, 0.6723086352876358, 0.7191614181766182, 0.869902466094742, 0.987606925793334, 0.7623075402771582, 0.9981542667918712, 0.5719310168341947, 0.5483193220396165, 0.5715725831457623, 0.7012803193401596, 0.7173265444122627, 0.5370810735067721, 0.5874527934868933, 0.959794795411199, 0.6795708076377649, 0.7580956101371952, 0.7790475497171765, 0.7764623137119675, 0.7657394994027746, 0.752175216040681, 0.7135390498338658, 0.5423861898227704, 0.7954937647919256, 0.6464374930663215, 0.9744319519790623, 0.538116347870107, 0.5576642297474788, 0.5192931615202776, 0.5419315730167737, 0.588997282108403, 0.9350742041607036, 0.5504107684744056, 0.5647171547235883, 0.6086499240961714, 0.7086206166539698, 0.5217482433352668, 0.9171601133125864, 0.6903298032502844, 0.9353564128950083, 0.8665402535942437, 0.7466134546798942, 0.7893529589777042, 0.581875540051596, 0.5139738336440804, 0.5507141264064859, 0.9540510302389148, 0.9401678300841473, 0.5718434421498102, 0.8053531767499522, 0.7008584982899335, 0.5372663095362579, 0.5986888838876328, 0.7624424537480867, 0.9140397252906535, 0.9004147878086695, 0.7576435194641811, 0.8749574445715758, 0.6444599562987262, 0.8554354263427, 0.8271631641262133, 0.7425073574672616, 0.8157177739357947, 0.6423471665447793, 0.8048614926877139, 0.8059872376327417, 0.7924127954314113, 0.9397545270160947, 0.7818469978786318, 0.6872208119942189, 0.546494246043427, 0.7444950582743656, 0.518386453739781, 0.648410049054347, 0.8188233736348773, 0.9456346964246918, 0.7991125554063075, 0.8663133719692693, 0.6551189432182372, 0.8180530335022058, 0.9465332147271952, 0.6819205267408491, 0.8751542082454447, 0.8400783596983494, 0.5259252468208211, 0.8843310313417023, 0.7793344597220375, 0.9593082023770796, 0.8000894951997879, 0.5679849642765422, 0.9971450026806838, 0.5954063110370049, 0.5452591747469295, 0.8062644656437361, 0.512187644380953, 0.7724621615696772, 0.6886087532108373, 0.7705024363851649, 0.7728633958862638, 0.5325382424035647, 0.9065084602665593, 0.6166867772087186, 0.5868539975500966, 0.7583324941245229, 0.8300074532239149, 0.825192145484216, 0.6529312303374695, 0.9240236847362238, 0.8966743571013744, 0.8306068318576115, 0.58861626137656, 0.5657927567740306, 0.9734161638845027, 0.9713763991706443, 0.7721911079373711, 0.5894708691201935, 0.9876412972594538, 0.6649913109963417, 0.5621455435752004, 0.6097976309844703, 0.722579133778726, 0.5884576141566555, 0.5768443458379353, 0.7487897678342967, 0.5226803025138909, 0.8704854818081101, 0.6098823475893297, 0.9436308585999544, 0.8780329352881144, 0.9126854540472125, 0.5310064661121673, 0.8868396984447398, 0.8043075121254326, 0.6355325461227537, 0.5929871914689948, 0.6940485871801825, 0.5245288896514653, 0.9726357944403783, 0.7717946876557142, 0.5223753700631169, 0.7757529149907503, 0.9492369984432912, 0.7982144912308776, 0.7278144263944242, 0.6118778717047424, 0.6710178319446812, 0.6299272517798922, 0.5243891735113534, 0.8979710815821145, 0.715981453007359, 0.8216073089733498, 0.9237053859476205, 0.9561646337058041, 0.8179422109744766, 0.6127553260584536, 0.5031320941546762, 0.8508548748514682, 0.6904671264388458, 0.5070620944156995, 0.517222960654731, 0.7308215319757037, 0.510903548716789, 0.9885921926105263, 0.9689818096346756, 0.8307165543915493, 0.62434905452682, 0.9861729284714387, 0.6997147874060936, 0.601541625193811, 0.6522377500073782, 0.9055414175906151, 0.7597540237173013, 0.8392040765436448, 0.7933729649484653, 0.534873157581687, 0.6454714415720297, 0.5784498714387567, 0.6176451728014773, 0.6746258539274073, 0.5551864633326207, 0.8600152074025468, 0.5774005420071131, 0.7340964785725448, 0.8290398892139985, 0.7157688569698777, 0.9407324607866355, 0.8826857682548104, 0.7785177310705763, 0.9958315524927559, 0.9642953244477621, 0.6732250020681008, 0.9099448930691374, 0.7935662867346335, 0.8851116906803674, 0.9536514811729383, 0.7474301440658393, 0.8761705528928306, 0.9758014162182136, 0.7469421991992371, 0.6044891281399603, 0.6478989768758665, 0.5135792525923362, 0.8621639614415636, 0.9199047609699088, 0.5154644517157755, 0.7690225029870702, 0.5724999403023663, 0.902078634490041, 0.5191888101751181, 0.6261542457556428, 0.970679562110067, 0.8060703628971796, 0.9163211032163197, 0.9417978316034252, 0.8072657440303623, 0.9162593466348575, 0.8726495597522537, 0.6019129321783365, 0.9765366344231267, 0.6023280584533477, 0.8593133127925613, 0.9600738850035861, 0.6484953003836834, 0.7979222910305838, 0.9351079578469352, 0.7748805318090811, 0.6983528634608742, 0.6592965152450494, 0.8145861510161136, 0.577037522140796, 0.9294164865346759, 0.6871865082220328, 0.6306701588957033, 0.7485306714658273, 0.9609588604431102, 0.9636085268740594, 0.5011854055529477, 0.638176624178109, 0.8053712920675489, 0.7358274348229998, 0.5675745290928029, 0.6889175178565792, 0.5121884201528151, 0.8557647782624446, 0.7449251495823985, 0.616944721757762, 0.7393801668192937, 0.9442132920305208, 0.88134613230323, 0.6161605263330489, 0.6481389838150236, 0.9844528022457633, 0.5677721606976881, 0.8872466302192161, 0.5596338057800146, 0.8281117139743494, 0.762209322397509, 0.5618711618964776, 0.7874505567338994, 0.8857828314255343, 0.8155096352462072, 0.7500879531433734, 0.8949110795962513, 0.9794375154793528, 0.634376829794359, 0.9086139442710777, 0.8074212439699625, 0.9424600841958293, 0.5963550142087078, 0.8769465612554581, 0.7483730626524907, 0.559973833571417, 0.8741698403933583, 0.7891546177753148, 0.652562385268817, 0.6582265603274937, 0.9486170282813609, 0.5310736577545747, 0.6436942137541406, 0.9718518644164476, 0.7524196560618659, 0.8541347956565268, 0.8397747206480717, 0.7819302042310893, 0.9981736731969868, 0.5350626203569935, 0.7637504362219365, 0.7725597895203562, 0.6403522216514618, 0.7427040003698917, 0.9661319108260311, 0.6008954345131949, 0.9592641381010371, 0.6252251535360791, 0.8039435603064737, 0.9579984623536575, 0.8954303554114333, 0.8185660028343427, 0.9146891379455744, 0.7285421733423345, 0.9984000688051282, 0.8958921600094314, 0.6055386498385424, 0.6075164158436276, 0.6964896267407578, 0.7653071854145757, 0.5889163073839212, 0.8921720980614745, 0.6257046318174697, 0.9421881944484283, 0.8846001178058516, 0.8773994010586506, 0.728020077551095, 0.6416236294094402, 0.9576022117458347, 0.6389742151635547, 0.6094385008594292, 0.5134900160788387, 0.9575893783415486, 0.8402047433417306, 0.6568647348292327, 0.5840864035387434, 0.5547299464955966, 0.8618908835553073, 0.9195051412249546, 0.7338218439126829, 0.8987380516819468, 0.979785060418783, 0.824873712581002, 0.7241033362763034, 0.9148042081725467, 0.9281697255926152, 0.5110968093728785, 0.8907632947396393, 0.7505714932831478, 0.969804811750447, 0.9820844570358498, 0.5157505949181551, 0.646121112319032, 0.5255221921434381, 0.891224362512566, 0.5723060007596348, 0.523307607969367, 0.8874068399064243, 0.7254000005038836, 0.8484803110220693, 0.6212856078042616, 0.7063510295699968, 0.6306520590257277, 0.999291193595526, 0.7794717406822863, 0.723897578674028, 0.8069923824067018, 0.6555782993164965, 0.7052191665415767, 0.7223498839363773, 0.7339894292226148, 0.749135565893455, 0.5663436888367801, 0.8994859161634003, 0.7084887802593675, 0.6294217991739482, 0.8135878157895287, 0.6528759449811772, 0.6434741371628244, 0.9864972952140686, 0.6707996691579247, 0.6353432603471088, 0.6863009930228949, 0.6551483616918086, 0.8899960731808676, 0.6242853027213233, 0.994880074319453, 0.7734483119218949, 0.5003405569236599, 0.5864212344577578, 0.9718619397683518, 0.7638129665038689, 0.6480611042548127, 0.6339671751834932, 0.9334234186443982, 0.9630664370324571, 0.8231484156975931, 0.917257632521306, 0.9266932710423375, 0.9702125013501407, 0.7288839439943366, 0.5790714977950748, 0.9257216001312374, 0.5931882661900935, 0.589132071064218, 0.7970387268994846, 0.792432465635999, 0.7021158139285857, 0.8398261775920941, 0.9178677509904491, 0.8567757275808666, 0.8326757533115012, 0.6039204138384744, 0.5383255129550508, 0.8136465833597559, 0.5965405029605422, 0.6382100400201687, 0.5175602606137304, 0.7043810283568386, 0.5363179581662572, 0.7669058497257845, 0.8306303255114605, 0.531820616770075, 0.716167131507899, 0.9557054527015394, 0.8159446738714675, 0.6269857521773877, 0.7932221480651338, 0.550758178361823, 0.730160112862537, 0.6545656313479393, 0.6191519543773264, 0.5820513120615316, 0.9263740687485538, 0.9060671704412716, 0.8866137672177157, 0.6760093585784646, 0.6166500895200604, 0.9256585744991555, 0.9678758475643292, 0.5212648619439639, 0.5415143192832994, 0.9361310466169672, 0.9438071993377584, 0.6967494790436619, 0.6177041031411237, 0.9731701129667334, 0.8983476174339682, 0.7565527028469816, 0.8605917226445873, 0.9005873749658703, 0.5767378155367053, 0.7644224214090648, 0.7454254426860116, 0.899706025048755, 0.6004628138522554, 0.6982556449400006, 0.5569587942315634, 0.6702475909779898, 0.7533950735754009, 0.6817708830683449, 0.9032004838770193, 0.9038093825119919, 0.7468104367742727, 0.9730642784393817, 0.9149331693482966, 0.8935239041484648, 0.9472143574074054, 0.7748880378665225, 0.6373753882153954, 0.9899187911085254, 0.8439929632800807, 0.6754300214971112, 0.9488433628546744, 0.758664375966714, 0.8114656241687404, 0.7361633874643486, 0.6615169364347009, 0.7897124078040141, 0.9465627173603538, 0.585395238528531, 0.8820932347940567, 0.6873983082642632, 0.5601208319882636, 0.8865556794919918, 0.9075949423496578, 0.945552786452684, 0.8898443302645584, 0.6338222294404284, 0.6933626796281214, 0.5643205301927142, 0.7839033833688844, 0.93052018953846, 0.9350155484001486, 0.8850956829555422, 0.549140180590469, 0.7566140788169946, 0.5752440317660192, 0.7824643206362906, 0.994138178655896, 0.6830340943558177, 0.5255414496843999, 0.853599031171212, 0.677254677121377, 0.9969034648122267, 0.8463349572431947, 0.8703081560077534, 0.8731320587817958, 0.6686109838507304, 0.731476113035961, 0.6108833662340049, 0.9752296960764153, 0.5189777105755327, 0.8359745133630496, 0.6485795945816286, 0.9922455391235666, 0.5615683820916314, 0.6161948707766061, 0.7723202483108247, 0.5666641481909863, 0.5953836902031768, 0.8589434260567097, 0.6783458514854459, 0.6339002753803176, 0.5658329198168699, 0.6007394197187375, 0.8334571976041041, 0.7769523852995388, 0.8360679891766758, 0.970584619551702, 0.5427370268842955, 0.834914978336332, 0.8932849218760224, 0.7054517086717116, 0.950368123700973, 0.6651558670798301, 0.6588800726442492, 0.5403318205147403, 0.5307823827490856, 0.6506866396186739, 0.538170985764667, 0.9872351017709378, 0.6181379044547393, 0.7248893248550057, 0.823953765331932, 0.8755056865770019, 0.6103501304489556, 0.9372192420960461, 0.7258040671135173, 0.9903888815861992, 0.7507894527651784, 0.9920563075898063, 0.698053285884948, 0.5562027523569001, 0.5837363554201375, 0.769591048753881, 0.5735092521590779, 0.6697813436742901, 0.8601310985767885, 0.8712938533038019, 0.7091419526169358, 0.5730302609545167, 0.6989937194832307, 0.585275667745835, 0.9633542672692492, 0.7597287151648342, 0.6009474894969723, 0.6049700753175997, 0.5578481977924106, 0.5183247420137536, 0.9016050642241582, 0.9782253389842697, 0.9293306907337724, 0.8588791680730208, 0.7689541654214487, 0.7432667225401792, 0.7147351780063295, 0.9107399741378257, 0.8831875994947476, 0.8293082291824923, 0.9772197985075703, 0.5641543169483442, 0.9398843505970345, 0.8452086650229345, 0.8286848659831503, 0.7246141628294916, 0.7038272750998714, 0.6866513656287525, 0.6712757202088746, 0.8043978394167357, 0.7048689208663501, 0.9375770460995199, 0.7758147236048574, 0.8190061858467073, 0.8487249145494267, 0.9453716181870384, 0.517459066699724, 0.9129495962530119, 0.5412540839653273, 0.6395706795527489, 0.7142923468067505, 0.9731663872685078, 0.8536143860268721, 0.7618534059118254, 0.9958952265171641, 0.9085421606060577, 0.9055496798097873, 0.5549015383811016, 0.9897323306136083, 0.8108941861216805, 0.9500408844632662, 0.5993028126285679, 0.9114100446023349, 0.768199163499814, 0.5114433675588209, 0.8719680686402629, 0.9509353004234862, 0.7413872173560676, 0.5040880337125957, 0.7247521972724581, 0.8376648514879727, 0.8636635521412357, 0.9333603275806632, 0.5197406566212965, 0.9918308410835529, 0.6833347214027592, 0.9614466200916423, 0.7542778011626943, 0.6002190790773654, 0.631492606880381, 0.8109914326424259, 0.5161812830143655, 0.6252151264092796, 0.5563295105531301, 0.6722476291825616, 0.9814941780907311, 0.5415566663569251, 0.8141559753477177, 0.7225184916194556, 0.7342069780499053, 0.809701599173068, 0.653955532012059, 0.5378903518651146, 0.5906632508094507, 0.7919183337599558, 0.9007377798522733, 0.7901304849493768, 0.7389298436463119, 0.9574143041517182, 0.5039903338943303, 0.8954577104347112, 0.5745928799246139, 0.9087785886363235, 0.7934889862562722, 0.6710060447533774, 0.550212005836778, 0.835008281162029, 0.8510286052442837, 0.7804049977096068, 0.6036689122652494, 0.7727626014601158, 0.8315500693412192, 0.8983751203419754, 0.5868933569767257, 0.8875288440556156, 0.7356579295941361, 0.5300579664684208, 0.787141961850735, 0.5731032258421873, 0.7771625629714027, 0.7706856675586471, 0.8432312145445374, 0.8350891540543319, 0.8430168417911172, 0.9707365543526527, 0.6769370194019667, 0.6972949161890372, 0.9919861764494173, 0.6605388422680165, 0.7937865386973404, 0.5347256437044872, 0.9978082836318315, 0.9212164951890227, 0.5939148266323726, 0.7539552020948936, 0.6254125044579439, 0.738138979438131, 0.6889566544749289, 0.9562179302013395, 0.7780706515777125, 0.7945490752682269, 0.5866935736709497, 0.5117052085665055, 0.5960383271076996, 0.8373891797367077, 0.9616561649098803, 0.704562289394896, 0.5921527883761083, 0.5565223995964991, 0.8680148031076484, 0.9891890031508208, 0.9991977806031309, 0.5020739651876478, 0.9033441281936851, 0.9085178608407365, 0.9460405196994227, 0.8968718038461636, 0.6480858695390368, 0.9356590496329726, 0.6561949230565862, 0.8383891003945961, 0.7621981204086448, 0.8392817709409162, 0.875384403157071, 0.8208388980329449, 0.7950198911602151, 0.821174436665876, 0.9422948167138974, 0.871667302541906, 0.8957949072385882, 0.5643544288798994, 0.5265437361518454, 0.7838464086710242, 0.7002119090602839, 0.9128247112277962, 0.6656861323509509, 0.5574090315707385, 0.5117149606644859, 0.7049319164807151, 0.655712709926491, 0.552250002418659, 0.8403682646704249, 0.7603953613149395, 0.6656980810965404, 0.5571750944513156, 0.7478608907435305, 0.7859537277016483, 0.8868613983239813, 0.6587943618875944, 0.7282173572695367, 0.8536416356550576, 0.5508967131394626, 0.9676547541240563, 0.7729936463718674, 0.8050784661109416, 0.7107792610773778, 0.6782692683361127, 0.686688778855292, 0.523475193874605, 0.9120515697347464, 0.7635565951594016, 0.5414979214434901, 0.9662511292749141, 0.9563552695332933, 0.9713461503863521, 0.8693382048714071, 0.5455291876155046, 0.9437475837323048, 0.8404310369367367, 0.6711412371898577, 0.5806684521639653, 0.76185107230379, 0.6863842692031963, 0.7799265581697847, 0.8132095569475222, 0.8645071373159684, 0.7257135762681602, 0.6594308030988855, 0.7415823050238428, 0.9018363219340275, 0.8989081152312284, 0.5100819025457073, 0.8204543526569082, 0.6612421794471938, 0.6278729041295942, 0.6008067677625952, 0.7453462793914194, 0.5177574625042592, 0.634116943236831, 0.9385787960274532, 0.7377472102867573, 0.786072709668902, 0.6823833836872213, 0.7236629573919562, 0.518798613967759, 0.734058665508672, 0.800608834813055, 0.8215045921202445, 0.8061461396196147, 0.9918149627758686, 0.7682512755896524, 0.816603621163309, 0.6906261024725174, 0.7641341010072509, 0.8288477401313115, 0.8731977584594863, 0.8696896046339879, 0.6932755295599362, 0.7962713996411181, 0.5637197663897254, 0.8378282116054132, 0.6385708201813594, 0.6513482345493173, 0.9982930702377553, 0.6349865583573022, 0.87973311063735, 0.9967636970561813, 0.9011873825395244, 0.5486470153024088, 0.7225052775139813, 0.8328362625183899, 0.709546650905453, 0.8727825736817809, 0.6248504185411998, 0.6966875497532397, 0.7679289191465868, 0.6876044001029729, 0.6907512542725858, 0.9716130831838297, 0.5465233966987704, 0.5079160084451937, 0.7798734394056803, 0.5245168375275437, 0.7288014080811629, 0.7109099641915859, 0.5974897204490134, 0.861213328321542, 0.5926732059280315, 0.6861911946077153, 0.8216087912701582, 0.9956681309861797, 0.7308050842518341, 0.5726382720974044, 0.7572630885155367, 0.6761216492210128, 0.7395922583340873, 0.5873800772193867, 0.9042536714899516, 0.6243434020895847, 0.5593910760203269, 0.660504988568668, 0.5777780134231019, 0.6360489698801602, 0.5882089162234283, 0.7997124786634061, 0.7545236858736835, 0.8948540450708029, 0.7920617127847283, 0.5324935798154609, 0.6177964558602171, 0.5191109927551804, 0.7801813110423599, 0.5174654865458037, 0.5252849350697887, 0.8002040479606283, 0.7255384668593685, 0.7207847829158686, 0.7528565012837907, 0.6720739983937307, 0.8810990894397748, 0.8094635533772907, 0.5834873392821667, 0.6435269369817114, 0.9976347099468805, 0.8205345989147516, 0.8876194577687934, 0.9820978956573037, 0.8213378259076773, 0.5501929795512703, 0.6153757370909215, 0.5819127577177274, 0.8274687020761464, 0.5628522311250969, 0.6244698912825695, 0.5102769715237401, 0.9244868046053919, 0.6428282398739493, 0.6679077463851406, 0.569902041614619, 0.5483353386191343, 0.5471812825182429, 0.6228096036106108, 0.9995139525017558, 0.7664963668787226, 0.7125306354120882, 0.7695007874725337, 0.8681501372794491, 0.5847211285373533, 0.5032180240889214, 0.514369376208951, 0.8749158951636755, 0.5897675029919548, 0.538661933576478, 0.9553961073609187, 0.8533740150939677, 0.944158877980643, 0.7471764564721031, 0.865738330054729, 0.8436635058195981, 0.5403139624162261, 0.864552185841324, 0.9204111526295999, 0.9682253921493723, 0.9584307749308025, 0.6079934139216234, 0.5885791445680008, 0.5638256635934655, 0.9551568188892479, 0.6768067246682317, 0.5363390318904455, 0.7936301673754824, 0.5597112345369075, 0.6467751745435537, 0.6845232534759939, 0.9995549414814826, 0.6597165314784545, 0.9883719978448388, 0.9063676017960303, 0.8709980223948988, 0.6013645401833374, 0.5755227088786643, 0.7177985790408538, 0.7362076121694701, 0.9783651176665795, 0.5259617099586735, 0.9276381091109698, 0.8796794837364181, 0.5109806046848253, 0.6710685512321988, 0.7734803917584137, 0.8786212076322106, 0.6038412500319168, 0.5989793368936327, 0.7632612025277121, 0.5472310011511524, 0.6551400523821337, 0.7149185689709059, 0.7499743847572446, 0.7577846992057407, 0.5806655036882333, 0.9714750234471559, 0.7454340985767132, 0.9830194649966267, 0.7790028838759937, 0.5016892642915557, 0.8166443536329637, 0.6476579752374458, 0.5833138031482902, 0.8942782016467313, 0.9695709128909993, 0.8412091481593496, 0.8172544078480183, 0.8015403888371355, 0.7508187699150942, 0.7373149030342488, 0.9069932468809174, 0.6595519185065096, 0.8146617107804667, 0.5929650259615724, 0.8056849426069934, 0.5263081581129534, 0.8045248762395376, 0.7979861708929247, 0.7984660650792001, 0.6021124958959202, 0.6521342500909952, 0.5771664400778067, 0.6762920272655364, 0.6353887512609796, 0.6260037926216111, 0.8740929136387398, 0.5018285085731558, 0.8240583787897326, 0.5342401161670398, 0.8725115480630575, 0.7390891371605438, 0.7896765514835025, 0.8895650651109446, 0.9349441625717856, 0.5966941148916289, 0.5219869322753499, 0.5836495809845825, 0.7993804871038517, 0.6836784519384127, 0.9963138607622459, 0.9496154832894257, 0.5597897776223891, 0.8814716323580363, 0.955535451686141, 0.7634198523657836, 0.8437172930526917, 0.7283684214564021, 0.5374041179908119, 0.7224996026916783, 0.9806685830720767, 0.9091699005339076, 0.5415597892153154, 0.7613535687508881, 0.7136139637204437, 0.8698290879454517, 0.8620129281628524, 0.8490485541927337, 0.6528201389460334, 0.7005521888085702, 0.9650403836049155, 0.9611203197210232, 0.6127502737440849, 0.9498261052791144, 0.6372848657502415, 0.9628626942358385, 0.607463036432202, 0.9562602587903004, 0.8009887060337448, 0.9121399812024468, 0.6763252134727143, 0.9657060114706498, 0.7862657931266726, 0.9177865243861368, 0.823771471986032, 0.5637864332752306, 0.5941740211248552, 0.5379137309674176, 0.9161503002155971, 0.6956195926804702, 0.8941726520798509, 0.696230618792935, 0.9178126386374205, 0.8387215204779213, 0.750203393865648, 0.7804911858817962, 0.9057478744742908, 0.7797753765884784, 0.9526615549337787, 0.7051619925951889, 0.7438589400808755, 0.569172315622775, 0.9434286301021557, 0.5062007423373796, 0.9872105289228383, 0.5100965874105416, 0.5608942291930203, 0.966468295008235, 0.9206103132974156, 0.9434108091168484, 0.9777306818419872, 0.9399314709071163, 0.9040075104077301, 0.5998064140790018, 0.903500890703905, 0.5343086824375558, 0.627277360898244, 0.8090297504096741, 0.7303719174039627, 0.6677611291714706, 0.7202136199365592, 0.8178668023743709, 0.8664232542083196, 0.8131800587410267, 0.942644033696461, 0.6489749771973732, 0.6607030300129861, 0.5556753305799906, 0.5467172898642518, 0.761513941934358, 0.7617192694467498, 0.9715931474210282, 0.6943451023893396, 0.5138427210210446, 0.8777983259677875, 0.5050938950931333, 0.5186706687603886, 0.5316041949725829, 0.8237926384291601, 0.5476973813865056, 0.7163753012563157, 0.6845108225507595, 0.7766070341230242, 0.7883077794455602, 0.7407924088125332, 0.7570392371795018, 0.8503570415734574, 0.7190612298502584, 0.9022092358672618, 0.7554558226310812, 0.7180431991654309, 0.9705770833799818, 0.8642031607978518, 0.7984411848582298, 0.9834381313547491, 0.7704947946775054, 0.5914614129287259, 0.7361347507628071, 0.8526747817759072, 0.6921828740640213, 0.8045622400653267, 0.8289924779531153, 0.8636265882318837, 0.8661320709762448, 0.9729472442171625, 0.6305384308615505, 0.8719931199324702, 0.6466183414429763, 0.5350884644197395, 0.8027194311936477, 0.6317380478915222, 0.9400569039968141, 0.7586522813622297, 0.8408747448726566, 0.7203515425172643, 0.7630560673230458, 0.5783459995963741, 0.777814524498915, 0.5424307089620881, 0.5310720024546842, 0.6378356942937053, 0.6565937765118874, 0.9851674411564668, 0.9136669006929409, 0.9645253446626939, 0.6890649709374835, 0.9409422499803337, 0.7479636814985337, 0.9903503544659735, 0.5927755986838261, 0.899672906452762, 0.5914037576696334, 0.9821928808790297, 0.65108651481081, 0.7968445649218723, 0.6363740617299674, 0.6732072404786531, 0.9761352766570479, 0.6686909690667922, 0.8084246652231619, 0.6147058850396075, 0.6586473381339263, 0.6874095747960682, 0.9355545065747353, 0.6548617312204594, 0.9098129832965676, 0.7826180569760881, 0.5129253885366412, 0.8268949335772147, 0.5400740221070177, 0.7895500924975816, 0.9655398648305034, 0.6646032842738232, 0.7119240636269042, 0.5776792020981381, 0.9726123785746221, 0.7964609020224402, 0.9691024040373649, 0.9378813696173313, 0.7227410688442742, 0.7465770975561838, 0.8638463572235302, 0.790339572251427, 0.8523870892122026, 0.9695437488342918, 0.8544977589885372, 0.6410760360892473, 0.9483695007210202, 0.9766892604721211, 0.9041437789469893, 0.6576697820376012, 0.6179163105790378, 0.790849540630882, 0.5658020217077739, 0.5726909921881618, 0.6152615733854174, 0.5219502091525692, 0.8077924438188018, 0.9831554101917974, 0.8862911160765423, 0.7739090345221962, 0.5314536872746016, 0.9916106578325709, 0.5852265760431694, 0.654458832638862, 0.7999348086674061, 0.6628840413475516, 0.9262576133605082, 0.7601672399370694, 0.7416353410771388, 0.6207668618337987, 0.9403352239586148, 0.8615699140703286, 0.9348439356499569, 0.7144607032362407, 0.9570440778714445, 0.6487828050361331, 0.989192098747559, 0.9762521467485317, 0.8417081902157003, 0.8110179189321116, 0.789549609756276, 0.7203395611654507, 0.8517153558770418, 0.6625384184569401, 0.856652189026136, 0.5151793597042622, 0.8799305871522366, 0.6458980481720256, 0.6311556889707571, 0.8811202190514058, 0.9117658277856879, 0.7557627032867491, 0.539139036024693, 0.512435739980718, 0.7062902061025393, 0.6235539804832755, 0.6372926653166835, 0.5586010438024055, 0.8559367933304853, 0.7303433196856852, 0.5192699132669873, 0.7947815978104387, 0.510661067374014, 0.9041096021703686, 0.8770313570937261, 0.7268250006969299, 0.9796502731574195, 0.6548556403888672, 0.5084762513188588, 0.5585135781498733, 0.8200062809560259, 0.9202698907293998, 0.9748151929919912, 0.745140573706847, 0.8014436729376119, 0.9677078427508046, 0.7073261379770268, 0.739640991400933, 0.7598985826037528, 0.7371714397588682, 0.7722016185305758, 0.6059809957983857, 0.6449684588718088, 0.7914931930418876, 0.897961067170529, 0.7508590725358286, 0.9880698793610241, 0.6414529448393895, 0.8550088159941225, 0.5191452143378337, 0.5573287782894297, 0.8116567456750672, 0.8950016349561414, 0.9092775956896271, 0.6639136581754055, 0.6050731729086327, 0.5328686944379568, 0.9976161586823712, 0.8784816000231497, 0.6466020163331188, 0.5806568950558442, 0.5327811232411733, 0.627028757952206, 0.867765215220156, 0.9778874529187038, 0.8333856077959961, 0.5906710198300975, 0.5416540765791633, 0.8135515445633956, 0.8282572493818172, 0.5987480427191667, 0.9449597084374622, 0.7371426189228444, 0.8356001195700531, 0.8037766128496334, 0.6482718421317616, 0.5216574127490037, 0.8182884831759679, 0.8287797488807751, 0.6399486349708507, 0.870776889785081, 0.7538607767887806, 0.5922045928716269, 0.6776968545176028, 0.5834351905586155, 0.5249241523042817, 0.609453253136648, 0.83980719393308, 0.6471335437672803, 0.8083062830932849, 0.6845334216410978, 0.7692111652238531, 0.5795032340068536, 0.7299840584721793, 0.8491370832162819, 0.5700786881177323, 0.9107909083559388, 0.8746297855328784, 0.8001767161334985, 0.5475462908581064, 0.86651903541155, 0.6825944047059339, 0.5177210640691665, 0.9567626213299281, 0.5303166257935457, 0.9100248241288362, 0.6056721695294562, 0.7896083184420181, 0.5223615725018624, 0.7368112747783151, 0.7875708598971636, 0.7281276586196301, 0.7042967187786757, 0.5248267901579124, 0.5846304345702154, 0.5811199697098608, 0.9291989963478394, 0.6704313677888942, 0.8479008392537299, 0.7764989158487352, 0.8848031625649313, 0.7149377817591864, 0.8793944525855528, 0.68084854486983, 0.712050483474927, 0.8008444623083933, 0.7129583260791823, 0.7562592421181258, 0.5139257712532712, 0.8689183084070378, 0.6672684808700344, 0.6605734341938123, 0.5505104801563829, 0.7134576388457617, 0.823442822691848, 0.9913801880420365, 0.6165047347258314, 0.7996886757345278, 0.8481388880308587, 0.8522080136790522, 0.5897932836606995, 0.771156876152552, 0.5510356612540113, 0.6578324440964098, 0.6457138202492807, 0.9911367913707538, 0.5248811047957239, 0.6141484301425899, 0.9087399914079772, 0.790581953505647, 0.5888556253131594, 0.6195057719503702, 0.6595147340843535, 0.6524919172456565, 0.9400642849312304, 0.6125769444587961, 0.750352918874551, 0.8033214913996014, 0.8211196056581588, 0.9914752680275767, 0.7550504511934852, 0.5235483772123678, 0.9361564286699136, 0.5799278905029444, 0.9590849081330709, 0.9224334821202063, 0.7081943836957807, 0.9094613031482347, 0.904380703328397, 0.84359745671216, 0.9339611355589148, 0.8756442058219829, 0.9188485381353444, 0.9763378455794811, 0.9161671052713245, 0.9473115908848838, 0.5729879195610434, 0.9834166263118729, 0.5619527069236225, 0.7921176205804213, 0.8322783047982937, 0.6299154413689124, 0.802573466582972, 0.6898784515230232, 0.7466445705593787, 0.7843538009738735, 0.9696185378554787, 0.6982025888495218, 0.9196579888636527, 0.9636214400265193, 0.8627298631295017, 0.5580785644376118, 0.5489317866743106, 0.871509612990486, 0.6539803616303441, 0.8747565393180291, 0.5396286177074465, 0.782779613067146, 0.7833069703721437, 0.6726498402206711, 0.7327802731735245, 0.8517853148340808, 0.6612182153214113, 0.8187890561935812, 0.5704385012399777, 0.7123175762933001, 0.7729127808290588, 0.6830285973022691, 0.7476596999875179, 0.7441981493343945, 0.9007783674611813, 0.5581802164023413, 0.7531744912124689, 0.8495750579806974, 0.5257322072732205, 0.8379961164905294, 0.8855152651167507, 0.9631141864569008, 0.8782456045347471, 0.8744236851152649, 0.8833684824291834, 0.8067273989525323, 0.8569416113659729, 0.9636328797574103, 0.6993166409398601, 0.921029759218223, 0.7331705402531216, 0.8375902243012158, 0.8676724326080212, 0.952872812438859, 0.9607407009491611, 0.8690281798213235, 0.7464999323915353, 0.635340708838696, 0.527435575502132, 0.5020376260243427, 0.8352045476537078, 0.5236437871113133, 0.740187321066831, 0.6511889416298872, 0.7057602881180496, 0.7816998830405436, 0.7081787221046907, 0.9524588697073446, 0.5135214193156884, 0.819107555527264, 0.8394616410846986, 0.6983418659474152, 0.9300815213261634, 0.765379220770962, 0.8832927540831483, 0.6397968559896783, 0.6307885562633466, 0.6243960435977307, 0.8983567875378664, 0.7394741210700552, 0.6139383538357537, 0.9342263664895698, 0.6083713019891525, 0.9695106893503138, 0.9042516383406076, 0.6948760804982662, 0.6125178444551923, 0.8624571500738387, 0.7612194100896719, 0.9026997798741612, 0.5846281208250365, 0.5907880897973495, 0.5470987128376599, 0.6913947590334706, 0.5557666711635025, 0.749186339660893, 0.9469789142222573, 0.6704085657444505, 0.931252814491027, 0.6834717195126212, 0.9775332668457983, 0.5421832108704444, 0.8799751955542345, 0.6313757538058999, 0.900565057055833, 0.7646676040552842, 0.6895233665102731, 0.9675131263006869, 0.5960027219997419, 0.7101149428280267, 0.6579295333164019, 0.6397824595997621, 0.6927334168044533, 0.9388940229755562, 0.6066445277715575, 0.6841064619291677, 0.9592365284537095, 0.5090230084565892, 0.9069938068823447, 0.5355540106569048, 0.5018026272303946, 0.6414543070734959, 0.5373889134105951, 0.8927295866976588, 0.7161590302729053, 0.8708221800568803, 0.5972919627664417, 0.9691132990283955, 0.695485429463514, 0.7640371125167134, 0.9072009780844209, 0.5718691359262729, 0.7867451976664088, 0.721095040313823, 0.8369326333072787, 0.6380336990196198, 0.7399092576875842, 0.6736624729894246, 0.7924547270226392, 0.55868469076503, 0.9591190933591147, 0.6714667829676866, 0.9842869874592254, 0.5411637199805095, 0.6568275721223935, 0.7474251946105642, 0.7572459610394608, 0.9817734897169774, 0.810208117235583, 0.8408891288269975, 0.8249510961398214, 0.7396111830302783, 0.680272212771687, 0.7442733965418862, 0.5788584482993259, 0.7273229060598572, 0.8161104312641383, 0.7903584006399587, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 47, 49, 51, 53, 56, 58, 60, 62, 64, 66, 68, 70, 73, 75, 77, 79, 81, 83, 85, 87, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 486, 488, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 512, 514, 516, 518, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 682, 684, 686, 688, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 947, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1045, 1047, 1050, 1052, 1054, 1056, 1059, 1061, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1155, 1158, 1160, 1163, 1165, 1167, 1169, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1205, 1207, 1210, 1212, 1214, 1216, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1235, 1237, 1239, 1241, 1244, 1246, 1248, 1250, 1253, 1255, 1258, 1260, 1263, 1265, 1268, 1270, 1273, 1275, 1277, 1279, 1281, 1283, 1286, 1288, 1291, 1293, 1296, 1298, 1301, 1303, 1306, 1308, 1311, 1313, 1316, 1318, 1321, 1323, 1326, 1328, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1348, 1350, 1353, 1355, 1361, 1363, 1365, 1367, 1369, 1371, 1374, 1376, 1378, 1380, 1382, 1384, 1387, 1389, 1391, 1393, 1396, 1398, 1401, 1403, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1435, 1437, 1439, 1441, 1443, 1445, 1448, 1450, 1453, 1455, 1457, 1459, 1461, 1463, 1466, 1468, 1471, 1473, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1516, 1518, 1520, 1522, 1525, 1527, 1530, 1532, 1538, 1540, 1542, 1544, 1546, 1548, 1551, 1553, 1556, 1558, 1561, 1563, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1580, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1639, 1641, 1643, 1645, 1648, 1650, 1652, 1654, 1656, 1658, 1660, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1694, 1696, 1698, 1700, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1728, 1730, 1732, 1734, 1736, 1738, 1740, 1742, 1744, 1746, 1748, 1750, 1752, 1754, 1756, 1758, 1760, 1762, 1764, 1766, 1768, 1770, 1772, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1790, 1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1809, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1880, 1882, 1884, 1886, 1888, 1890, 1893, 1895, 1898, 1900, 1903, 1905, 1908, 1910, 1912, 1914, 1917, 1919, 1921, 1923, 1926, 1928, 1932, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, 1960, 1962, 1964, 1966, 1968, 1970, 1973, 1975, 1978, 1980, 1983, 1985, 1988, 1990, 1993, 1995, 1997, 1999, 2002, 2004, 2007, 2009, 2014, 2016, 2019, 2021, 2024, 2026, 2029, 2031, 2034, 2036, 2039, 2041, 2044, 2046, 2048, 2050, 2052, 2054, 2057, 2059, 2062, 2064, 2066, 2068, 2071, 2073, 2075, 2077, 2079, 2081, 2083, 2085, 2087, 2089, 2092, 2094, 2099, 2101, 2104, 2106, 2109, 2111, 2113, 2115, 2117, 2119, 2121, 2123, 2125, 2127, 2129, 2131, 2133, 2135, 2137, 2139, 2141, 2143, 2145, 2147, 2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167, 2170, 2172, 2174, 2176, 2179, 2181, 2184, 2186, 2192, 2194, 2196, 2198, 2200, 2202, 2205, 2207, 2210, 2212, 2215, 2217, 2220, 2222, 2224, 2226, 2228, 2230, 2233, 2235, 2238, 2240, 2243, 2245, 2248, 2250, 2253, 2255, 2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2279, 2281, 2284, 2286, 2292, 2294, 2296, 2298, 2301, 2303, 2306, 2308, 2317, 2319, 2322, 2324, 2327, 2329, 2332, 2334, 2337, 2339, 2343, 2345, 2347, 2349, 2352, 2354, 2356, 2358, 2360, 2362, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 2396, 2398, 2400, 2402, 2404, 2406, 2408, 2410, 2412, 2414, 2416, 2418, 2420, 2422, 2424, 2426, 2428, 2430, 2432, 2434, 2436, 2438, 2441, 2443, 2445, 2447, 2450, 2452, 2454, 2456, 2458, 2460, 2462, 2464, 2466, 2468, 2470, 2472, 2474, 2476, 2479, 2481, 2483, 2485, 2487, 2489, 2491, 2493, 2495, 2497, 2499, 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2516, 2518, 2520, 2522, 2524, 2526, 2528, 2530, 2533, 2535, 2537, 2539, 2541, 2543, 2545, 2547, 2549, 2551, 2553, 2555, 2557, 2559, 2561, 2563, 2566, 2568, 2570, 2572, 2574, 2576, 2578, 2580, 2582, 2584, 2586, 2588, 2590, 2592, 2594, 2596, 2598, 2600, 2602, 2604, 2606, 2608, 2610, 2612, 2614, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2644, 2646, 2648, 2650, 2652, 2654, 2656, 2658, 2660, 2662, 2664, 2666, 2668, 2670, 2672, 2674, 2676, 2678, 2680, 2682, 2684, 2686, 2688, 2690, 2692, 2694, 2696, 2699, 2701, 2703, 2705, 2708, 2710, 2712, 2714, 2716, 2718, 2720, 2722, 2724, 2726, 2729, 2731, 2734, 2736, 2738, 2740, 2742, 2744, 2746, 2748, 2750, 2752, 2754, 2756, 2759, 2761, 2763, 2765, 2767, 2769, 2771, 2773, 2776, 2778, 2780, 2782, 2785, 2787, 2789, 2791, 2793, 2795, 2797, 2799, 2801, 2803, 2805, 2807, 2810, 2812, 2814, 2816, 2819, 2821, 2823, 2825, 2827, 2829, 2831, 2833, 2835, 2837, 2840, 2842, 2844, 2846, 2848, 2850, 2852, 2854, 2856, 2858, 2861, 2863, 2865, 2867, 2869, 2871, 2873, 2875, 2877, 2879, 2881, 2883, 2885, 2887, 2889, 2891, 2893, 2895, 2898, 2900, 2903, 2905, 2908, 2910, 2913, 2915, 2917, 2919, 2921, 2923, 2926, 2928, 2931, 2933, 2935, 2937, 2940, 2942, 2945, 2947, 2950, 2952, 2954, 2956, 2958, 2960, 2963, 2965, 2967, 2969, 2971, 2973, 2976, 2978, 2980, 2982, 2984, 2986, 2989, 2991, 2994, 2996, 2999, 3001, 3005, 3007, 3009, 3011, 3015, 3017, 3019, 3021, 3023, 3025, 3028, 3030, 3033, 3035, 3038, 3040, 3043, 3045, 3048, 3050, 3053, 3055, 3061, 3063, 3065, 3067, 3069, 3071, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3139, 3141, 3145, 3147, 3150, 3152, 3155, 3157, 3160, 3162, 3165, 3167, 3170, 3172, 3175, 3177, 3180, 3182, 3184, 3186, 3188, 3190, 3193, 3195, 3198, 3200, 3203, 3205, 3208, 3210, 3212, 3214, 3216, 3218, 3221, 3223, 3226, 3228, 3231, 3233, 3236, 3238, 3241, 3243, 3246, 3248, 3251, 3253, 3256, 3258, 3261, 3263, 3269, 3271, 3274, 3276, 3279, 3281, 3284, 3286, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3316, 3318, 3320, 3322, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3350, 3352, 3355, 3357, 3359, 3361, 3364, 3366, 3369, 3371, 3374, 3376, 3379, 3381, 3384, 3386, 3389, 3391, 3394, 3396, 3399, 3401, 3404, 3406, 3409, 3411, 3414, 3416, 3419, 3421, 3424, 3426, 3428, 3430, 3433, 3435, 3437, 3439, 3442, 3444, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3469, 3471, 3473, 3475, 3480, 3482, 3484, 3486, 3488, 3490, 3493, 3495, 3498, 3500, 3503, 3505, 3508, 3510, 3512, 3514, 3516, 3518, 3521, 3523, 3525, 3527, 3530, 3532, 3535, 3537, 3542, 3544, 3546, 3548, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3623, 3625, 3628, 3630, 3632, 3634, 3637, 3639, 3641, 3643, 3647, 3649, 3651, 3653, 3656, 3658, 3660, 3662, 3665, 3667, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3752, 3754, 3757, 3759, 3762, 3764, 3767, 3769, 3772, 3774, 3777, 3779, 3782, 3784, 3787, 3789, 3792, 3794, 3797, 3799, 3802, 3804, 3808, 3810, 3812, 3814, 3817, 3819, 3822, 3824, 3827, 3829, 3832, 3834, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3854, 3856, 3858, 3860, 3863, 3865, 3869, 3871, 3873, 3875, 3878, 3880, 3883, 3885, 3888, 3890, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 3907, 3909, 3911, 3913, 3915, 3917, 3919, 3921, 3923, 3925, 3927, 3929, 3931, 3933, 3935, 3937, 3939, 3941, 3943, 3945, 3947, 3949, 3951, 3953, 3955, 3958, 3960, 3963, 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3995, 3997, 3999, 4001, 4003, 4005, 4007, 4009, 4011, 4013, 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078, 4080, 4082, 4084, 4086, 4088, 4090, 4092, 4094, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 4110, 4112, 4114, 4116, 4118, 4120, 4122, 4124, 4126, 4128, 4130, 4132, 4134, 4136, 4138, 4140, 4142, 4144, 4146, 4148, 4150, 4152, 4154, 4156, 4158, 4160, 4162, 4164, 4166, 4168, 4170, 4172, 4174, 4176, 4178, 4181, 4183, 4185, 4187, 4189, 4191, 4193, 4195, 4197, 4199, 4201, 4203, 4205, 4207, 4209, 4211, 4213, 4215, 4217, 4219, 4221, 4223, 4225, 4227, 4229, 4231, 4233, 4235, 4237, 4239, 4241, 4243, 4246, 4248, 4250, 4252, 4254, 4256, 4258, 4260, 4262, 4264, 4266, 4268, 4270, 4272, 4274, 4276, 4278, 4280, 4282, 4284, 4286, 4288, 4290, 4292, 4294, 4296, 4298, 4300, 4302, 4304, 4306, 4308, 4310, 4312, 4314, 4316, 4318, 4320, 4322, 4324, 4326, 4328, 4330, 4332, 4335, 4337, 4339, 4341, 4344, 4346, 4348, 4350, 4353, 4355, 4357, 4359, 4361, 4363, 4366, 4368, 4371, 4373, 4376, 4378, 4380, 4382, 4384, 4386, 4389, 4391, 4393, 4395, 4397, 4399, 4402, 4404, 4407, 4409, 4412, 4414, 4417, 4419, 4421, 4423, 4433, 4435, 4438, 4440, 4446, 4448, 4450, 4452, 4454, 4456, 4459, 4461, 4464, 4466, 4469, 4471, 4474, 4476, 4479, 4481, 4484, 4486, 4489, 4491, 4494, 4496, 4499, 4501, 4504, 4506, 4509, 4511, 4514, 4516, 4518, 4520, 4522, 4524, 4527, 4529, 4532, 4534, 4537, 4539, 4542, 4544, 4546, 4548, 4550, 4552, 4554, 4556, 4558, 4560, 4562, 4564, 4566, 4568, 4570, 4572, 4574, 4576, 4578, 4580, 4582, 4584, 4586, 4588, 4590, 4592, 4594, 4596, 4598, 4600, 4602, 4604, 4607, 4609, 4611, 4613, 4615, 4617, 4619, 4621, 4624, 4626, 4628, 4630, 4632, 4634, 4636, 4638, 4642, 4644, 4646, 4648, 4651, 4653, 4656, 4658, 4664, 4666, 4669, 4671, 4677, 4679, 4682, 4684, 4690, 4692, 4694, 4696, 4699, 4701, 4703, 4705, 4709, 4711, 4713, 4715, 4717, 4719, 4721, 4723, 4725, 4727, 4729, 4731, 4734, 4736, 4738, 4740, 4742, 4744, 4746, 4748, 4752, 4754, 4756, 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, 4774, 4778, 4780, 4782, 4784, 4787, 4789, 4792, 4794, 4800, 4802, 4805, 4807, 4810, 4812, 4815, 4817, 4819, 4821, 4823, 4825, 4828, 4830, 4832, 4834, 4836, 4838, 4841, 4843, 4846, 4848, 4851, 4853, 4856, 4858, 4860, 4862, 4864, 4866, 4869, 4871, 4873, 4875, 4877, 4879, 4881, 4883, 4885, 4887, 4890, 4892, 4895, 4897, 4900, 4902, 4904, 4906, 4908, 4910, 4912, 4914, 4916, 4918, 4920, 4922, 4924, 4926, 4928, 4930, 4932, 4934, 4936, 4938, 4940, 4942, 4944, 4946, 4948, 4950, 4952, 4954, 4956, 4958, 4961, 4963, 4965, 4967, 4969, 4971, 4973, 4975, 4977, 4979, 4981, 4983, 4986, 4988, 4990, 4992, 4994, 4996, 4999, 5001, 5004, 5006, 5009, 5011, 5017, 5019, 5022, 5024, 5027, 5029, 5031, 5033, 5035, 5037, 5040, 5042, 5048, 5050, 5053, 5055, 5057, 5059, 5061, 5063, 5065, 5067, 5069, 5071, 5073, 5075, 5077, 5079, 5081, 5083, 5085, 5087, 5089, 5091, 5093, 5095, 5097, 5099, 5101, 5103, 5105, 5107, 5109, 5111, 5114, 5116, 5120, 5122, 5124, 5126, 5128, 5130, 5133, 5135, 5138, 5140, 5143, 5145, 5148, 5150, 5152, 5154, 5156, 5158, 5161, 5163, 5165, 5167, 5169, 5171, 5174, 5176, 5179, 5181, 5184, 5186, 5189, 5191, 5193, 5195, 5198, 5200, 5202, 5204, 5208, 5210, 5212, 5214, 5217, 5219, 5221, 5223, 5226, 5228, 5230, 5232, 5235, 5237, 5240, 5242, 5244, 5246, 5248, 5250, 5252, 5254, 5256, 5258, 5260, 5262, 5264, 5266, 5268, 5270, 5272, 5274, 5276, 5278, 5280, 5282, 5285, 5287, 5289, 5291, 5294, 5296, 5298, 5300, 5302, 5304, 5306, 5308, 5311, 5313, 5315, 5317, 5320, 5322, 5324, 5326, 5328, 5330, 5332, 5334, 5336, 5338, 5340, 5342, 5345, 5347, 5349, 5351, 5353, 5355, 5357, 5359, 5361, 5363, 5365, 5367, 5369, 5371, 5374, 5376, 5378, 5380, 5382, 5384, 5387, 5389, 5392, 5394, 5397, 5399, 5402, 5404, 5407, 5409, 5412, 5414, 5417, 5419, 5422, 5424, 5427, 5429, 5432, 5434, 5437, 5439, 5442, 5444, 5447, 5449, 5452, 5454, 5457, 5459, 5461, 5463, 5465, 5467, 5469, 5471, 5473, 5475, 5477, 5479, 5482, 5484, 5486, 5488, 5490, 5492, 5495, 5497, 5500, 5502, 5506, 5508, 5511, 5513, 5516, 5518, 5520, 5522, 5524, 5526, 5529, 5531, 5534, 5536, 5539, 5541, 5544, 5546, 5549, 5551, 5553, 5555, 5557, 5559, 5562, 5564, 5567, 5569, 5571, 5573, 5575, 5577, 5579, 5581, 5584, 5586, 5588, 5590, 5593, 5595, 5597, 5599, 5601, 5603, 5605, 5607, 5609, 5611, 5613, 5615, 5617, 5619, 5621, 5623, 5625, 5627, 5629, 5631, 5633, 5635, 5637, 5639, 5641, 5643, 5645, 5647, 5649, 5651, 5653, 5655, 5657, 5659, 5661, 5663, 5665, 5667, 5669, 5671, 5673, 5675, 5677, 5679, 5681, 5683, 5685, 5687, 5689, 5691, 5693, 5695, 5697, 5699, 5701, 5703, 5705, 5707, 5709, 5711, 5713, 5715, 5717, 5719, 5721, 5723, 5725, 5727, 5729, 5731, 5733, 5735, 5737, 5739, 5741, 5743, 5745, 5747, 5749, 5751, 5753, 5755, 5757, 5759, 5761, 5763, 5765, 5767, 5769, 5771, 5773, 5775, 5777, 5779, 5781, 5783, 5785, 5787, 5789, 5791, 5793, 5795, 5797, 5799, 5801, 5803, 5805, 5807, 5809, 5811, 5813, 5815, 5817, 5819, 5821, 5823, 5826, 5828, 5830, 5832, 5835, 5837, 5839, 5841, 5843, 5845, 5847, 5849, 5851, 5853, 5855, 5857, 5859, 5861, 5863, 5865, 5867, 5869, 5871, 5873, 5875, 5877, 5879, 5881, 5884, 5886, 5889, 5891, 5893, 5895, 5898, 5900, 5902, 5904, 5907, 5909, 5911, 5913, 5915, 5917, 5919, 5921, 5923, 5925, 5927, 5929, 5932, 5934, 5937, 5939, 5941, 5943, 5945, 5947, 5949, 5951, 5953, 5955, 5957, 5959, 5961, 5963, 5965, 5967, 5969, 5971, 5973, 5975, 5977, 5979, 5981, 5983, 5986, 5988, 5990, 5992, 5994, 5996, 5998, 6000, 6002, 6004, 6007, 6009, 6011, 6013, 6015, 6017, 6019, 6021, 6023, 6025, 6027, 6029, 6031, 6033, 6036, 6038, 6041, 6043, 6045, 6047, 6049, 6051, 6053, 6055, 6058, 6060, 6064, 6066, 6068, 6070, 6074, 6076, 6079, 6081, 6084, 6086, 6089, 6091, 6094, 6096, 6098, 6100, 6102, 6104, 6107, 6109, 6111, 6113, 6115, 6117, 6120, 6122, 6124, 6126, 6129, 6131, 6134, 6136, 6142, 6144, 6146, 6148, 6150, 6152, 6155, 6157, 6159, 6161, 6163, 6165, 6167, 6169, 6171, 6173, 6175, 6177, 6179, 6181, 6183, 6185, 6187, 6189, 6191, 6193, 6196, 6198, 6200, 6202, 6204, 6206, 6208, 6210, 6213, 6215, 6218, 6220, 6223, 6225, 6228, 6230, 6233, 6235, 6238, 6240, 6243, 6245, 6248, 6250, 6256, 6258, 6261, 6263, 6266, 6268, 6271, 6273, 6276, 6278, 6281, 6283, 6286, 6288, 6290, 6292, 6294, 6296, 6299, 6301, 6303, 6305, 6307, 6309, 6311, 6313, 6316, 6318, 6324, 6326, 6329, 6331, 6334, 6336, 6338, 6340, 6343, 6345, 6347, 6349, 6353, 6355, 6358, 6360, 6363, 6365, 6368, 6370, 6373, 6375, 6378, 6380, 6383, 6385, 6388, 6390, 6393, 6395, 6398, 6400, 6402, 6404, 6406, 6408, 6410, 6412, 6414, 6416, 6419, 6421, 6424, 6426, 6429, 6431, 6434, 6436, 6438, 6440, 6442, 6444, 6447, 6449, 6452, 6454, 6456, 6458, 6460, 6462, 6465, 6467, 6469, 6471, 6473, 6475, 6478, 6480, 6482, 6484, 6486, 6488, 6490, 6492, 6494, 6496, 6498, 6500, 6502, 6504, 6506, 6508, 6510, 6512, 6514, 6516, 6518, 6520, 6522, 6524, 6526, 6528, 6530, 6532, 6534, 6536, 6538, 6540, 6542, 6544, 6546, 6548, 6550, 6552, 6554, 6556, 6558, 6560, 6562, 6564, 6566, 6568, 6570, 6572, 6574, 6576, 6578, 6580, 6582, 6584, 6586, 6588, 6590, 6592, 6594, 6596, 6599, 6601, 6603, 6605, 6608, 6610, 6612, 6614, 6616, 6618, 6620, 6622, 6625, 6627, 6630, 6632, 6634, 6636, 6638, 6640, 6642, 6644, 6647, 6649, 6651, 6653, 6655, 6657, 6659, 6661, 6664, 6666, 6668, 6670, 6672, 6674, 6677, 6679, 6682, 6684, 6687, 6689, 6692, 6694, 6697, 6699, 6702, 6704, 6707, 6709, 6712, 6714, 6717, 6719, 6722, 6724, 6727, 6729, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6786, 6788, 6791, 6793, 6796, 6798, 6801, 6803, 6805, 6807, 6810, 6812, 6814, 6816, 6819, 6821, 6825, 6827, 6829, 6831, 6834, 6836, 6839, 6841, 6844, 6846, 6849, 6851, 6854, 6856, 6859, 6861, 6863, 6865, 6867, 6869, 1537, 1535, 1136, 1408, 1406, 1136, 5391, 5386, 5391, 5386, 5391, 5386, 5411, 5494, 5416, 5494, 6701, 6706, 6255, 6253, 6387, 6382, 6387, 6382, 6701, 6706, 6706, 6701, 5416, 5411, 5426, 5421, 5416, 5411, 5426, 5421, 5416, 5411, 3060, 3058, 3268, 3268, 5416, 5411, 6800, 6141, 6139, 6141, 6139, 6255, 6253, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 6800, 4429, 4427, 4442, 4437, 4660, 4655, 4660, 4655, 4673, 4668, 4676, 4674, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4689, 4687, 4689, 4687, 4660, 4655, 4809, 4804, 4429, 4427, 4442, 4437, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4429, 4427, 4442, 4437, 4443, 4445, 4429, 4427, 4442, 4437, 4443, 4445, 4416, 4416, 4442, 4437, 4388, 4388, 4429, 4427, 4432, 4430, 4429, 4427, 4432, 4430, 4797, 4799, 4708, 4698, 4799, 4797, 5456, 5451, 5456, 5451, 5416, 5411, 5047, 5045, 5047, 5045, 5391, 5386, 5391, 5386, 5416, 5411, 5416, 5411, 3220, 2478, 2511, 2478, 2511, 3220, 3477, 3479, 815, 3831, 3826, 3831, 3826, 816, 3831, 3826, 3796, 3791, 3060, 3058, 3060, 3058, 2758, 2511, 2511, 2478, 2478, 3290, 1300, 1295, 1310, 1305, 1320, 1315, 1300, 1295, 1310, 1305, 1204, 1310, 1305, 1204, 1320, 1315, 1320, 1315, 1360, 1358, 1360, 1358, 1405, 1400, 1408, 1406, 1360, 1358, 1360, 1358, 1405, 1400, 1408, 1406, 1360, 1358, 1360, 1358, 1405, 1400, 1408, 1406, 1360, 1358, 1360, 1358, 1408, 1406, 1272, 1267, 1272, 1267, 1272, 1267, 1272, 1267, 1320, 1315, 1320, 1315, 1360, 1358, 1360, 1358, 1408, 1406, 1408, 1406, 1537, 1535, 1537, 1535, 1987, 1987, 1838, 1838, 2070, 2098, 2001, 2013, 2001, 2013, 2070, 2098, 2260, 2258, 2191, 2189, 2191, 2189, 2260, 2258, 2291, 2289, 2313, 2311, 2313, 2311, 2314, 2291, 2289, 2313, 2311, 2313, 2311, 2316, 2289, 2291, 2291, 2289, 2313, 2311, 2313, 2311, 2316, 2314, 3060, 3058, 3278, 3273, 3278, 3273, 3268, 3266, 2930, 2930, 2925, 3060, 3058, 3164, 3159, 3164, 3159, 3235, 3235, 2758, 2925, 2925, 2988, 2988, 3060, 3058, 3014, 3014, 3060, 3058, 3144, 3144, 3268, 3266, 3268, 3266, 3290, 3403, 3398, 3403, 3398, 3550, 3552, 3477, 3479, 3479, 3477, 3541, 3541, 3552, 3550, 3887, 3892, 3831, 3826, 3831, 3826, 3579, 3578, 3887, 3892, 3796, 3791, 3796, 3791, 3796, 3791, 3821, 3816, 3821, 3816, 3831, 3826, 3796, 3791, 3796, 3791, 3821, 3816, 3831, 3826, 3831, 3826, 5406, 5406, 6093, 6088, 6093, 6088, 6255, 6253, 5016, 5014, 5391, 5386, 5391, 5386, 4445, 4443, 4687, 4689, 4689, 4687, 4429, 4427, 4442, 4437, 4429, 4427, 4442, 4437, 4429, 4427, 4432, 4430, 4429, 4427, 4432, 4430, 4445, 4443, 4429, 4427, 4432, 4430, 4429, 4427, 4432, 4430, 4445, 4443, 4689, 4687, 4698, 4708, 4663, 4661, 4663, 4661, 4673, 4668, 4676, 4674, 4663, 4661, 4663, 4661, 4673, 4668, 4676, 4674, 4663, 4661, 4663, 4661, 4676, 4674, 4689, 4687, 4698, 4708, 4797, 4799, 4799, 4797, 4809, 4804, 4799, 4797, 4799, 4797, 4809, 4804, 4777, 4799, 4797, 4799, 4797, 4777, 4799, 4797, 4799, 4797, 5016, 5014, 5008, 5008, 5045, 5047, 5016, 5014, 5016, 5014, 5047, 5045, 5047, 5045, 5119, 5119, 5207, 5207, 5391, 5386, 5391, 5386, 5373, 5373, 5456, 5451, 5456, 5451, 5481, 5481, 5505, 5505, 6320, 6315, 6320, 6315, 6323, 6321, 6320, 6315, 6320, 6315, 6323, 6321, 6731, 6726, 6731, 6726, 9144, 9146, 9148, 9150, 6255, 6253, 9166, 9168, 9170, 9172, 9174, 9176, 9178, 9180, 9182, 9184, 9186, 9188, 6141, 6139, 6141, 6139, 6255, 6253, 6320, 6315, 6320, 6315, 6323, 6321, 6315, 6320, 6320, 6315, 6320, 6315, 6320, 6315, 6323, 6321, 6320, 6315, 6320, 6315, 6323, 6321, 6451, 6446, 6451, 6446, 6711, 6711, 9355, 9357, 9359, 9361, 9363, 9365, 9367, 9369, 9371, 9373, 9375, 9377, 9379, 9381, 9383, 9385, 9387, 9389, 9391, 9393, 9396, 9398, 9400, 9402, 6141, 6139, 6141, 6139, 6073, 6073, 6141, 6139, 6141, 6139, 6255, 6253, 6255, 6253, 6255, 6253, 6323, 6321, 6323, 6321, 6323, 6321, 6352, 6352, 6858, 6858, 9573, 9575, 9578, 9580, 9582, 9584, 9586, 9588, 9590, 9592, 9594, 9596, 9665, 9667, 9670, 9672, 9678, 9680, 9682, 9684, 9687, 9689, 9692, 9694, 9700, 9702, 9705, 9707, 9710, 9712, 9715, 9717, 9577, 9674, 9577, 9675, 9677, 9674, 9697, 9697, 9726, 9724, 9721, 9726, 9724, 9723, 9726, 9724, 9699, 9699, 9723, 9721, 9697, 9699, 9697, 9699, 9723, 9721, 9723, 9721, 9726, 9724, 9677, 9675, 9677, 9675, 9699, 9697, 9699, 9697, 9723, 9721, 9726, 9724, 9723, 9721, 9726, 9724, 13, 14, 15, 13648, 13650, 13652, 13654, 13656, 13658, 13660, 13662, 13664, 13666, 13668, 13670, 13672, 13674, 13676, 13678, 13680, 13682, 13684, 13686, 13688, 13690, 13692, 13694, 13696, 13698, 13700, 13702, 13704, 13706, 13708, 13710, 13712, 13714, 13716, 13718, 13720, 13722, 13724, 13726, 13728, 13730, 13732, 13734, 13736, 13738, 13740, 13742, 13744, 13746, 13748, 13750, 13752, 13754, 13756, 13758, 13760, 13762, 13764, 13766, 13768, 13770, 13772, 13774, 13776, 13778, 13780, 13782, 13784, 13786, 13788, 13790, 13792, 13794, 13796, 13798, 13800, 13802, 13804, 13806, 13808, 13810, 13812, 13814, 13816, 13818, 13820, 13822, 13824, 13826, 13828, 13830, 13832, 13834, 13836, 13838, 13840, 13842, 13844, 13846, 13848, 13850, 13852, 13854, 13856, 13858, 13860, 13862, 13864, 13866, 13868, 13870, 13872, 13874, 13876, 13878, 13880, 13882, 13884, 13886, 13888, 13890, 13892, 13894, 13896, 13898, 13900, 13902, 13904, 13906, 13908, 13910, 13912, 13914, 13916, 13918, 13920, 13922, 13924, 13926, 13928, 13930, 13932, 13934, 13936, 13938, 13940, 13942, 13944, 13946, 13948, 13950, 13952, 13954, 13956, 13958, 13960, 13962, 13964, 13966, 13968, 13970, 13972, 13974, 13976, 13978, 13980, 13982, 13984, 13986, 13988, 13990, 13992, 13994, 13996, 13998, 14000, 14002, 14004, 14006, 14008, 14010, 14012, 14014, 14016, 14018, 14020, 14022, 14024, 14026, 14028, 14030, 14032, 14034, 14036, 14038, 14040, 14042, 14044, 14046, 14048, 14050, 14052, 14054, 14056, 14058, 14060, 14062, 14064, 14066, 14068, 14070, 14072, 14074, 14076, 14078, 14080, 14082, 14084, 14086, 14088, 14090, 14092, 14094, 14096, 14098, 14100, 14102, 14104, 14106, 14108, 14110, 14112, 14114, 14116, 14118, 14120, 14122, 14124, 14126, 14128, 14130, 14132, 14134, 14136, 14138, 14140, 14142, 14144, 14146, 14148, 14150, 14152, 14154, 14156, 14158, 14160, 14162, 14164, 14166, 14168, 14170, 14172, 14174, 14176, 14178, 14180, 14182, 14184, 14186, 14188, 14190, 14192, 14194, 14196, 14198, 14200, 14202, 14204, 14206, 14208, 14210, 14212, 14214, 14216, 14218, 14220, 14222, 14224, 14226, 14228, 14230, 14232, 14234, 14236, 14238, 14240, 14242, 14244, 14246, 14248, 14250, 14252, 14254, 14256, 14258, 14260, 14262, 14264, 14266, 14268, 14270, 14272, 14274, 14276, 14278, 14280, 14282, 14284, 14286, 14288, 14290, 14292, 14294, 14296, 14298, 14300, 14302, 14304, 14306, 14308, 14310, 14312, 14314, 14316, 14318, 14320, 14322, 14324, 14326, 14328, 14330, 14332, 14334, 14336, 14338, 14340, 14342, 14344, 14346, 14348, 14350, 14352, 14354, 14356, 14358, 14360, 14362, 14364, 14366, 14368, 14370, 14372, 14374, 14376, 14378, 14380, 14382, 14384, 14386, 14388, 14390, 14392, 14394, 14396, 14398, 14400, 14402, 14404, 14406, 14408, 14410, 14412, 14414, 14416, 14418, 14420, 14422, 14424, 14426, 14428, 14430, 14432, 14434, 14436, 14438, 14440, 14442, 14444, 14446, 14448, 14450, 14452, 14454, 14456, 14458, 14460, 14462, 14464, 14466, 14468, 14470, 14472, 14474, 14476, 14478, 14480, 14482, 14484, 14486, 14488, 14490, 14492, 14494, 14496, 14498, 14500, 14502, 14504, 14506, 14508, 14510, 14512, 14514, 14516, 14518, 14520, 14522, 14524, 14526, 14528, 14530, 14532, 14534, 14536, 14538, 14540, 14542, 14544, 14546, 14548, 14550, 14552, 14554, 14556, 14558, 14560, 14562, 14564, 14566, 14568, 14570, 14572, 14574, 14576, 14578, 14580, 14582, 14584, 14586, 14588, 14590, 14592, 14594, 14596, 14598, 14600, 14602, 14604, 14606, 14608, 14610, 14612, 14614, 14616, 14618, 14620, 14622, 14624, 14626, 14628, 14630, 14632, 14634, 14636, 14638, 14640, 14642, 14644, 14646, 14648, 14650, 14652, 14654, 14656, 14658, 14660, 14662, 14664, 14666, 14668, 14670, 14672, 14674, 14676, 14678, 14680, 14682, 14684, 14686, 14688, 14690, 14692, 14694, 14696, 14698, 14700, 14702, 14704, 14706, 14708, 14710, 14712, 14714, 14716, 14718, 14720, 14722, 14724, 14726, 14728, 14730, 14732, 14734, 14736, 14738, 14740, 14742, 14744, 14746, 14748, 14750, 14752, 14754, 14756, 14758, 14760, 14762, 14764, 14766, 14768, 14770, 14772, 14774, 14776, 14778, 14780, 14782, 14784, 14786, 14788, 14790, 14792, 14794, 14796, 14798, 14800, 14802, 14804, 14806, 14808, 14810, 14812, 14814, 14816, 14818, 14820, 14822, 14824, 14826, 14828, 14830, 14832, 14834, 14836, 14838, 14840, 14842, 14844, 14846, 14848, 14850, 14852, 14854, 14856, 14858, 14860, 14862, 14864, 14866, 14868, 14870, 14872, 14874, 14876, 14878, 14880, 14882, 14884, 14886, 14888, 14890, 14892, 14894, 14896, 14898, 14900, 14902, 14904, 14906, 14908, 14910, 14912, 14914, 14916, 14918, 14920, 14922, 14924, 14926, 14928, 14930, 14932, 14934, 14936, 14938, 14940, 14942, 14944, 14946, 14948, 14950, 14952, 14954, 14956, 14958, 14960, 14962, 14964, 14966, 14968, 14970, 14972, 14974, 14976, 14978, 14980, 14982, 14984, 14986, 14988, 14990, 14992, 14994, 14996, 14998, 15000, 15002, 15004, 15006, 15008, 15010, 15012, 15014, 15016, 15018, 15020, 15022, 15024, 15026, 15028, 15030, 15032, 15034, 15036, 15038, 15040, 15042, 15044, 15046, 15048, 15050, 15052, 15054, 15056, 15058, 15060, 15062, 15064, 15066, 15068, 15070, 15072, 15074, 15076, 15078, 15080, 15082, 15084, 15086, 15088, 15090, 15092, 15094, 15096, 15098, 15100, 15102, 15104, 15106, 15108, 15110, 15112, 15114, 15116, 15118, 15120, 15122, 15124, 15126, 15128, 15130, 15132, 15134, 15136, 15138, 15140, 15142, 15144, 15146, 15148, 15150, 15152, 15154, 15156, 15158, 15160, 15162, 15164, 15166, 15168, 15170, 15172, 15174, 15176, 15178, 15180, 15182, 15184, 15186, 15188, 15190, 15192, 15194, 15196, 15198, 15200, 15202, 15204, 15206, 15208, 15210, 15212, 15214, 15216, 15218, 15220, 15222, 15224, 15226, 15228, 15230, 15232, 15234, 15236, 15238, 15240, 15242, 15244, 15246, 15248, 15250, 15252, 15254, 15256, 15258, 15260, 15262, 15264, 15266, 15268, 15270, 15272, 15274, 15276, 15278, 15280, 15282, 15284, 15286, 15288, 15290, 15292, 15294, 15296, 15298, 15300, 15302, 15304, 15306, 15308, 15310, 15312, 15314, 15316, 15318, 15320, 15322, 15324, 15326, 15328, 15330, 15332, 15334, 15336, 15338, 15340, 15342, 15344, 15346, 15348, 15350, 15352, 15354, 15356, 15358, 15360, 15362, 15364, 15366, 15368, 15370, 15372, 15374, 15376, 15378, 15380, 15382, 15384, 15386, 15388, 15390, 15392, 15394, 15396, 15398, 15400, 15402, 15404, 15406, 15408, 15410, 15412, 15414, 15416, 15418, 15420, 15422, 15424, 15426, 15428, 15430, 15432, 15434, 15436, 15438, 15440, 15442, 15444, 15446, 15448, 15450, 15452, 15454, 15456, 15458, 15460, 15462, 15464, 15466, 15468, 15470, 15472, 15474, 15476, 15478, 15480, 15482, 15484, 15486, 15488, 15490, 15492, 15494, 15496, 15498, 15500, 15502, 15504, 15506, 15508, 15510, 15512, 15514, 15516, 15518, 15520, 15522, 15524, 15526, 15528, 15530, 15532, 15534, 15536, 15538, 15540, 15542, 15544, 15546, 15548, 15550, 15552, 15554, 15556, 15558, 15560, 15562, 15564, 15566, 15568, 15570, 15572, 15574, 15576, 15578, 15580, 15582, 15584, 15586, 15588, 15590, 15592, 15594, 15596, 15598, 15600, 15602, 15604, 15606, 15608, 15610, 15612, 15614, 15616, 15618, 15620, 15622, 15624, 15626, 15628, 15630, 15632, 15634, 15636, 15638, 15640, 15642, 15644, 15646, 15648, 15650, 15652, 15654, 15656, 15658, 15660, 15662, 15664, 15666, 15668, 15670, 15672, 15674, 15676, 15678, 15680, 15682, 15684, 15686, 15688, 15690, 15692, 15694, 15696, 15698, 15700, 15702, 15704, 15706, 15708, 15710, 15712, 15714, 15716, 15718, 15720, 15722, 15724, 15726, 15728, 15730, 15732, 15734, 15736, 15738, 15740, 15742, 15744, 15746, 15748, 15750, 15752, 15754, 15756, 15758, 15760, 15762, 15764, 15766, 15768, 15770, 15772, 15774, 15776, 15778, 15780, 15782, 15784, 15786, 15788, 15790, 15792, 15794, 15796, 15798, 15800, 15802, 15804, 15806, 15808, 15810, 15812, 15814, 15816, 15818, 15820, 15822, 15824, 15826, 15828, 15830, 15832, 15834, 15836, 15838, 15840, 15842, 15844, 15846, 15848, 15850, 15852, 15854, 15856, 15858, 15860, 15862, 15864, 15866, 15868, 15870, 15872, 15874, 15876, 15878, 15880, 15882, 15884, 15886, 15888, 15890, 15892, 15894, 15896, 15898, 15900, 15902, 15904, 15906, 15908, 15910, 15912, 15914, 15916, 15918, 15920, 15922, 15924, 15926, 15928, 15930, 15932, 15934, 15936, 15938, 15940, 15942, 15944, 15946, 15948, 15950, 15952, 15954, 15956, 15958, 15960, 15962, 15964, 15966, 15968, 15970, 15972, 15974, 15976, 15978, 15980, 15982, 15984, 15986, 15988, 15990, 15992, 15994, 15996, 15998, 16000, 16002, 16004, 16006, 16008, 16010, 16012, 16014, 16016, 16018, 16020, 16022, 16024, 16026, 16028, 16030, 16032, 16034, 16036, 16038, 16040, 16042, 16044, 16046, 16048, 16050, 16052, 16054, 16056, 16058, 16060, 16062, 16064, 16066, 16068, 16070, 16072, 16074, 16076, 16078, 16080, 16082, 16084, 16086, 16088, 16090, 16092, 16094, 16096, 16098, 16100, 16102, 16104, 16106, 16108, 16110, 16112, 16114, 16116, 16118, 16120, 16122, 16124, 16126, 16128, 16130, 16132, 16134, 16136, 16138, 16140, 16142, 16144, 16146, 16148, 16150, 16152, 16154, 16156, 16158, 16160, 16162, 16164, 16166, 16168, 16170, 16172, 16174, 16176, 16178, 16180, 16182, 16184, 16186, 16188, 16190, 16192, 16194, 16196, 16198, 16200, 16202, 16204, 16206, 16208, 16210, 16212, 16214, 16216, 16218, 16220, 16222, 16224, 16226, 16228, 16230, 16232, 16234, 16236, 16238, 16240, 16242, 16244, 16246, 16248, 16250, 16252, 16254, 16256, 16258, 16260, 16262, 16264, 16266, 16268, 16270, 16272, 16274, 16276, 16278, 16280, 16282, 16284, 16286, 16288, 16290, 16292, 16294, 16296, 16298, 16300, 16302, 16304, 16306, 16308, 16310, 16312, 16314, 16316, 16318, 16320, 16322, 16324, 16326, 16328, 16330, 16332, 16334, 16336, 16338, 16340, 16342, 16344, 16346, 16348, 16350, 16352, 16354, 16356, 16358, 16360, 16362, 16364, 16366, 16368, 16370, 16372, 16374, 16376, 16378, 16380, 16382, 16384, 16386, 16388, 16390, 16392, 16394, 16396, 16398, 16400, 16402, 16404, 16406, 16408, 16410, 16412, 16414, 16416, 16418, 16420, 16422, 16424, 16426, 16428, 16430, 16432, 16434, 16436, 16438, 16440, 16442, 16444, 16446, 16448, 16450, 16452, 16454, 16456, 16458, 16460, 16462, 16464, 16466, 16468, 16470, 16472, 16474, 16476, 16478, 16480, 16482, 16484, 16486, 16488, 16490, 16492, 16494, 16496, 16498, 16500, 16502, 16504, 16506, 16508, 16510, 16512, 16514, 16516, 16518, 16520, 16522, 16524, 16526, 16528, 16530, 16532, 16534, 16536, 16538, 16540, 16542, 16544, 16546, 16548, 16550, 16552, 16554, 16556, 16558, 16560, 16562, 16564, 16566, 16568, 16570, 16572, 16574, 16576, 16578, 16580, 16582, 16584, 16586, 16588, 16590, 16592, 16594, 16596, 16598, 16600, 16602, 16604, 16606, 16608, 16610, 16612, 16614, 16616, 16618, 16620, 16622, 16624, 16626, 16628, 16630, 16632, 16634, 16636, 16638, 16640, 16642, 16644, 16646, 16648, 16650, 16652, 16654, 16656, 16658, 16660, 16662, 16664, 16666, 16668, 16670, 16672, 16674, 16676, 16678, 16680, 16682, 16684, 16686, 16688, 16690, 16692, 16694, 16696, 16698, 16700, 16702, 16704, 16706, 16708, 16710, 16712, 16714, 16716, 16718, 16720, 16722, 16724, 16726, 16728, 16730, 16732, 16734, 16736, 16738, 16740, 16742, 16744, 16746, 16748, 16750, 16752, 16754, 16756, 16758, 16760, 16762, 16764, 16766, 16768, 16770, 16772, 16774, 16776, 16777, 16778, 16779, 16780, 16781, 16782, 16783, 16784, 16785, 16786, 16787, 16788, 16789, 16790, 16791, 16792, 16793, 16794, 16795, 16796, 16797, 16798, 16799, 16800, 16801, 16802, 16803, 16804, 16805, 16806, 16807, 16808, 16809, 16810, 16811, 16812, 16813, 16814, 16815, 16816, 16817, 16818, 16819, 16820, 16821, 16822, 16823, 16824, 16825, 16826, 16827, 16829, 16831, 16833, 16835, 16836, 16837, 16838, 16839, 16840, 16841, 16842, 16843, 16844, 16845, 16846, 16847, 16848, 16849, 16850, 16851, 16852, 16853, 16854, 16855, 16856, 16857, 16858, 16859, 16860, 16861, 16862, 16863, 16864, 16865, 16866, 16867, 16868, 16869, 16870, 16871, 16872, 16873, 16874, 16875, 16876, 16877, 16878, 16879, 16880, 16881, 16882, 16883, 16884, 16885, 16886, 16887, 16888, 16889, 16890, 16891, 16892, 16893, 16894, 16895, 16896, 16897, 16898, 16899, 16900, 16901, 16902, 16903, 16904, 16905, 16906, 16907, 16908, 16909, 16910, 16911, 16912, 16913, 16914, 16915, 16916, 16917, 16918, 16919, 16920, 16921, 16922, 16923, 16924, 16925, 16926, 16927, 16928, 16929, 16930, 16931, 16932, 16933, 16934, 16935, 16936, 16937, 16938, 16939, 16940, 16941, 16942, 16943, 16944, 16945, 16946, 16947, 16948, 16949, 16950, 16951, 16952, 16953, 16954, 16955, 16956, 16957, 16958, 16959, 16960, 16961, 16962, 16963, 16964, 16965, 16966, 16967, 16968, 16969, 16970, 16971, 16972, 16973, 16974, 16975, 16976, 16977, 16978, 16979, 16980, 16981, 16982, 16983, 16984, 16985, 16986, 16987, 16988, 16989, 16990, 16991, 16992, 16993, 16994, 16995, 16996, 16997, 16998, 16999, 17000, 17001, 17002, 17003, 17004, 17005, 17006, 17007, 17008, 17009, 17010, 17011, 17012, 17013, 17014, 17015, 17016, 17017, 17018, 17019, 17020, 17021, 17022, 17023, 17024, 17025, 17026, 17027, 17028, 17029, 17030, 17031, 17032, 17033, 17034, 17035, 17036, 17037, 17038, 17039, 17040, 17041, 17042, 17043, 17044, 17045, 17046, 17047, 17048, 17049, 17050, 17051, 17052, 17053, 17054, 17055, 17056, 17057, 17058, 17059, 17060, 17061, 17062, 17063, 17064, 17065, 17066, 17067, 17068, 17069, 17070, 17071, 17072, 17073, 17074, 17075, 17076, 17077, 17078, 17079, 17080, 17081, 17082, 17083, 17084, 17085, 17086, 17087, 17088, 17089, 17090, 17091, 17092, 17093, 17094, 17095, 17096, 17097, 17098, 17099, 17100, 17101, 17102, 17103, 17104, 17105, 17106, 17107, 17108, 17109, 17110, 17111, 17112, 17113, 17114, 17115, 17116, 17117, 17118, 17119, 17120, 17121, 17122, 17123, 17124, 17125, 17126, 17127, 17128, 17129, 17130, 17131, 17132, 17133, 17134, 17135, 17136, 17137, 17138, 17139, 17140, 17141, 17142, 17143, 17144, 17145, 17146, 17147, 17148, 17149, 17150, 17151, 17152, 17153, 17154, 17155, 17156, 17157, 17158, 17159, 17160, 17161, 17162, 17163, 17164, 17165, 17166, 17167, 17168, 17169, 17170, 17171, 17172, 17173, 17174, 17175, 17176, 17177, 17178, 17179, 17180, 17181, 17182, 17183, 17184, 17185, 17186, 17187, 17188, 17189, 17190, 17191, 17192, 17193, 17194, 17195, 17196, 17197, 17198, 17199, 17200, 17201, 17202, 17203, 17204, 17205, 17206, 17207, 17208, 17209, 17210, 17211, 17212, 17213, 17214, 17215, 17216, 17217, 17218, 17219, 17220, 17221, 17222, 17223, 17224, 17225, 17226, 17227, 17228, 17229, 17230, 17231, 17232, 17233, 17234, 17235, 17236, 17237, 17238, 17239, 17240, 17241, 17242, 17243, 17244, 17245, 17246, 17247, 17248, 17249, 17250, 17251, 17252, 17253, 17254, 17255, 17256, 17257, 17258, 17259, 17260, 17261, 17262, 17263, 17264, 17265, 17266, 17267, 17268, 17269, 17270, 17271, 17272, 17273, 17274, 17275, 17276, 17277, 17278, 17279, 17280, 17281, 17282, 17283, 17284, 17285, 17286, 17287, 17288, 17289, 17290, 17291, 17292, 17293, 17294, 17295, 17296, 17297, 17298, 17299, 17300, 17301, 17302, 17303, 17304, 17305, 17307, 17309, 17310, 17311, 17313, 17315, 17317, 17319, 17321, 17323, 17324, 17325, 17326, 17327, 17328, 17329, 17330, 17331, 17332, 17333, 17334, 17335, 17336, 17337, 17338, 17339, 17340, 17341, 17342, 17343, 17344, 17345, 17346, 17347, 17348, 17349, 17350, 17351, 17352, 17353, 17354, 17355, 17356, 17357, 17359, 17361, 17363, 17365, 17367, 17369, 17371, 17373, 17375, 17377, 17379, 17381, 17382, 17383, 17384, 17385, 17386, 17387, 17388, 17389, 17390, 17391, 17392, 17393, 17394, 17395, 17396, 17397, 17398, 17399, 17400, 17401, 17402, 17403, 17404, 17405, 17406, 17407, 17409, 17411, 17413, 17415, 17417, 17419, 17421, 17423, 17425, 17427, 17429, 17431, 17433, 17435, 17437, 17439, 17440, 17441, 17442, 17443, 17444, 17445, 17446, 17447, 17448, 17449, 17450, 17451, 17452, 17453, 17454, 17455, 17456, 17457, 17458, 17459, 17460, 17461, 17462, 17463, 17464, 17465, 17466, 17467, 17468, 17469, 17470, 17471, 17472, 17473, 17474, 17475, 17476, 17477, 17478, 17479, 17480, 17481, 17482, 17483, 17484, 9, 10, 11, 12, 13, 14, 15, 1136, 19052, 1560, 1555, 1550, 1405, 1400, 19055, 19058, 19060, 19062, 5481, 5118, 5113, 5118, 5113, 18751, 5515, 5510, 17498, 18751, 19068, 6252, 6247, 19070, 19072, 6397, 6392, 17504, 19074, 6795, 6790, 6785, 6823, 6818, 18977, 6838, 6833, 6843, 6853, 6848, 6853, 6848, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18977, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18986, 6833, 6838, 6843, 18995, 6871, 19076, 19078, 19080, 19082, 19084, 19086, 19088, 2912, 2897, 3047, 3042, 3230, 3225, 3245, 3240, 3255, 3245, 3240, 3250, 2988, 3052, 3042, 3047, 3052, 19090, 3057, 3057, 3278, 3273, 3235, 3220, 2912, 3273, 3230, 3278, 18707, 5386, 5391, 5401, 5396, 5373, 18714, 5391, 5386, 5401, 5396, 5406, 19094, 5426, 5421, 5436, 5431, 5446, 5441, 6785, 6083, 6078, 6093, 6088, 18389, 6106, 6040, 6035, 6119, 6133, 6138, 19097, 6062, 6057, 6083, 6078, 6093, 6088, 18866, 6106, 6040, 6035, 6119, 6133, 6138, 19099, 6212, 6217, 6222, 6227, 6237, 6232, 6242, 6252, 6247, 19101, 6265, 6260, 6270, 17537, 4388, 19108, 4432, 4430, 19110, 4468, 4463, 4458, 4503, 4498, 4513, 4508, 17549, 4388, 19112, 19114, 19116, 19118, 19120, 19122, 19124, 19126, 4686, 4681, 19128, 4686, 4681, 19130, 17556, 17558, 19132, 4796, 4791, 19134, 17562, 4827, 17565, 4840, 17568, 4388, 19136, 4430, 4432, 19138, 4443, 4445, 4370, 4365, 4375, 17576, 4388, 4483, 4478, 4493, 4488, 4493, 4488, 19140, 19142, 19144, 19146, 19148, 19150, 17585, 4827, 17588, 4840, 19152, 4432, 4430, 19154, 19156, 19158, 4432, 4430, 19160, 19162, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4536, 4531, 4401, 4411, 4411, 4432, 4430, 19166, 4443, 4445, 17611, 17613, 19170, 19172, 19174, 19176, 4442, 4437, 4445, 4443, 4468, 4463, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4536, 4531, 4541, 4526, 4660, 4655, 4676, 4674, 613, 4827, 613, 17638, 19180, 4791, 4796, 19182, 4796, 4791, 4791, 4796, 646, 646, 647, 647, 5515, 5510, 19184, 5515, 5510, 19186, 5533, 5528, 5344, 5548, 5543, 5548, 5543, 5548, 5543, 5494, 4180, 5561, 4180, 5561, 5515, 5510, 18744, 5548, 5543, 18751, 4180, 5561, 5566, 18707, 5386, 5391, 5401, 5396, 5373, 18714, 5391, 5386, 5401, 5396, 5406, 19188, 5426, 5421, 5436, 5431, 5446, 5441, 680, 4960, 19190, 5052, 5044, 680, 19192, 5052, 5044, 18677, 19194, 18675, 19196, 5401, 5396, 5401, 5396, 19198, 19200, 5426, 5421, 4180, 5561, 4180, 5561, 4180, 5561, 2018, 2108, 2103, 2011, 2006, 2023, 2108, 2103, 3266, 3278, 3273, 3283, 19203, 19205, 3164, 3159, 3268, 3373, 3368, 3383, 3378, 3393, 3388, 3413, 3408, 3423, 3418, 3502, 3497, 3507, 3502, 3497, 3492, 3766, 3761, 3776, 3771, 3786, 3781, 3796, 3791, 18302, 3821, 3816, 19211, 3821, 3816, 19213, 18302, 3821, 3816, 3821, 3816, 19216, 3786, 3781, 19218, 2907, 2902, 2912, 2907, 2902, 2897, 2988, 3037, 3032, 3047, 3042, 3057, 3052, 19220, 3037, 3032, 3042, 3037, 3032, 3047, 3057, 3052, 19222, 3265, 3113, 3268, 3266, 3278, 3273, 3283, 3278, 3273, 3288, 3113, 3278, 3273, 3288, 3283, 3278, 3273, 3283, 3278, 3273, 3288, 3245, 3240, 3255, 3250, 3265, 3268, 3266, 3278, 3273, 3288, 3283, 2478, 2511, 3393, 3388, 3354, 3349, 3403, 3398, 3446, 3441, 1262, 1257, 1272, 1267, 17794, 1285, 19230, 19232, 19234, 19236, 19238, 19241, 19244, 1262, 1257, 1272, 1267, 17775, 1252, 1310, 1305, 1320, 1315, 1262, 1257, 1272, 1267, 17739, 1252, 1204, 1300, 1295, 1310, 1305, 19246, 1330, 1325, 17806, 1357, 1352, 19248, 1357, 1352, 19250, 1042, 1373, 1042, 19252, 19254, 1330, 1325, 17806, 1357, 1352, 19256, 1357, 1352, 19258, 1043, 1373, 1043, 1386, 19260, 19262, 1330, 1325, 17806, 1357, 1352, 19264, 1357, 1352, 19266, 17812, 1373, 17815, 1386, 19268, 19270, 1330, 1325, 1063, 1058, 1357, 1352, 19272, 1357, 1352, 19274, 17812, 1373, 17815, 1386, 1405, 1400, 19276, 17748, 17750, 1452, 1447, 17754, 1490, 1485, 1500, 1495, 1510, 1505, 1136, 1262, 1257, 19278, 1262, 1257, 19280, 17763, 1252, 1262, 1257, 19282, 1262, 1257, 19284, 17768, 1252, 1204, 1300, 1295, 1310, 1305, 19286, 1262, 1257, 1272, 1267, 17775, 1252, 1204, 1300, 1295, 1310, 1305, 19288, 1262, 1257, 1272, 1267, 17781, 1252, 1262, 1257, 1272, 1267, 17787, 1252, 1262, 1257, 1272, 1267, 17794, 1285, 1290, 1300, 1295, 1310, 1305, 1320, 1315, 1330, 1325, 17806, 1357, 1352, 19290, 1357, 1352, 19292, 17812, 1373, 17815, 1386, 1405, 1400, 19294, 1405, 1400, 19296, 17822, 17824, 1434, 1429, 17828, 1452, 1447, 17832, 1470, 1465, 1480, 1475, 1490, 1485, 1500, 1495, 1510, 1505, 1515, 1534, 1529, 19298, 1534, 1529, 19300, 1560, 1555, 1550, 1560, 1555, 1565, 17854, 1982, 1977, 2023, 2018, 2033, 2028, 2038, 17863, 2096, 2091, 1902, 1897, 17868, 17869, 1982, 1977, 1982, 1977, 2011, 2006, 2023, 2018, 2033, 2028, 2043, 17882, 2096, 2091, 2098, 1902, 1897, 1892, 1902, 1897, 1930, 1925, 17892, 2023, 2018, 17896, 2204, 2219, 2288, 2283, 2291, 2289, 2305, 2310, 2313, 2311, 2316, 2314, 2326, 2321, 2336, 2316, 2314, 2321, 2316, 2314, 2326, 2336, 2288, 2283, 2291, 2289, 2305, 2310, 2313, 2311, 2316, 2314, 2326, 2321, 2341, 2316, 2314, 2321, 2316, 2314, 2326, 2341, 1902, 1897, 1930, 1925, 1930, 1925, 1982, 1977, 2061, 2056, 1902, 1897, 1892, 1902, 1897, 1907, 1930, 1925, 17939, 1930, 1925, 17942, 1982, 1977, 1987, 1982, 1977, 1992, 2011, 2006, 2011, 2006, 1982, 1977, 1992, 1987, 2011, 2006, 2011, 2006, 2023, 2018, 2033, 2028, 2043, 2038, 17967, 2061, 2056, 2096, 2091, 2108, 2103, 17975, 2096, 2091, 2108, 2103, 2188, 2183, 2189, 2188, 2183, 2191, 17987, 2204, 2219, 18006, 2232, 17993, 2247, 2257, 2252, 19314, 2188, 2183, 19316, 2188, 2183, 19318, 2214, 2209, 2204, 2214, 2209, 2219, 18006, 2232, 2242, 2237, 2247, 2257, 2252, 19320, 2288, 2283, 2288, 2283, 2288, 2283, 19322, 2310, 2305, 19324, 2310, 2305, 19326, 2326, 2321, 2331, 2341, 2336, 2288, 2283, 2288, 2283, 19329, 2310, 2305, 19331, 2310, 2305, 19333, 2326, 2321, 2331, 2341, 2336, 2288, 2283, 2288, 2283, 2288, 2283, 19338, 2310, 2305, 19340, 2310, 2305, 19342, 19344, 2326, 2321, 2331, 2341, 2336, 18106, 18186, 18188, 2912, 18029, 2988, 18106, 18186, 18188, 3037, 3032, 3047, 3042, 19346, 18035, 2930, 2925, 2897, 18040, 18042, 18044, 2930, 18047, 2962, 18050, 2975, 19348, 3245, 3245, 3265, 3113, 19350, 3174, 3169, 3174, 3169, 3202, 3197, 3240, 3250, 3240, 3265, 3113, 19352, 2478, 3225, 3225, 3245, 3240, 3265, 3113, 3278, 3273, 3288, 3283, 2511, 2907, 2902, 2897, 2907, 2902, 2912, 2532, 3037, 3032, 3047, 3042, 3057, 3052, 3060, 3058, 2565, 2993, 3060, 3058, 2907, 2902, 18087, 18089, 18091, 19355, 2939, 18094, 2988, 18106, 18186, 18188, 3027, 3037, 3032, 3042, 3037, 3032, 3047, 3057, 3052, 19357, 18180, 2988, 18106, 18186, 18188, 3027, 3032, 3037, 3042, 3047, 3057, 3052, 3154, 3149, 19359, 3154, 3149, 19361, 3174, 3169, 3174, 3169, 3207, 3230, 3230, 3230, 3245, 3240, 3255, 3245, 3240, 3250, 3265, 3113, 3265, 3113, 2912, 2897, 18132, 18134, 18136, 2930, 2962, 18140, 18142, 18144, 2993, 18147, 19370, 2907, 2902, 2897, 2902, 2907, 2912, 18166, 2930, 2925, 2860, 2939, 2860, 2949, 18155, 2962, 18158, 2975, 2907, 2902, 2897, 2907, 2902, 2912, 18166, 2930, 2925, 2944, 2939, 2944, 2949, 18174, 2962, 18177, 2975, 18180, 2993, 2988, 3003, 2998, 18186, 18188, 3027, 3037, 3032, 3047, 3042, 3057, 3052, 19374, 3174, 3169, 3202, 3197, 3207, 3202, 3197, 3230, 3225, 3220, 3230, 3225, 3113, 3113, 3154, 3149, 3164, 3159, 3174, 3169, 3174, 3169, 3154, 3149, 3164, 3159, 3174, 3169, 3179, 3202, 3197, 3192, 3202, 3197, 3207, 3230, 3225, 3220, 3230, 3225, 3235, 3245, 3240, 3255, 3250, 3265, 19378, 3265, 19380, 3278, 3273, 3288, 3283, 18246, 3373, 3368, 3393, 3388, 19383, 3393, 3388, 19385, 3502, 3497, 3492, 3539, 3534, 3354, 3349, 18261, 3373, 3368, 3383, 3378, 3393, 3388, 3403, 3398, 3413, 3408, 3423, 3418, 3446, 3441, 18276, 3446, 3441, 18279, 18281, 18283, 18285, 19391, 3502, 3497, 3492, 3502, 3497, 3507, 3539, 3534, 3520, 3539, 3534, 3539, 3534, 18300, 19395, 19397, 18302, 3821, 3816, 19399, 3821, 3816, 19401, 19403, 3877, 3882, 3887, 3892, 3600, 3877, 3882, 3887, 3892, 3621, 19405, 3664, 18365, 3766, 3761, 3776, 3771, 3786, 3781, 19407, 3786, 3781, 19409, 3801, 18358, 3821, 3816, 3831, 3826, 3766, 3761, 3776, 3771, 3786, 3781, 19411, 3806, 18358, 19413, 19415, 19417, 18318, 3766, 3761, 3776, 3771, 3786, 3781, 19419, 3786, 3781, 19421, 3806, 18358, 19423, 3831, 3826, 3836, 3664, 18324, 3836, 3664, 18327, 18328, 18330, 3887, 3892, 18334, 18336, 18338, 3786, 3781, 3796, 3791, 18346, 18347, 3821, 3816, 19425, 3766, 3761, 3776, 3771, 3786, 3781, 3791, 3796, 18346, 18347, 3821, 3816, 19427, 3766, 3761, 3776, 3771, 3786, 3781, 3796, 3791, 3806, 3801, 18358, 3821, 3816, 3831, 3826, 3836, 18365, 3867, 3862, 18368, 3867, 3862, 18371, 3882, 3877, 3892, 3887, 18377, 4889, 5188, 5188, 4889, 5113, 5113, 5113, 5113, 5118, 5118, 5118, 5118, 5548, 5543, 5543, 5548, 5494, 5494, 6083, 6078, 6093, 6088, 6040, 6035, 6106, 18884, 6119, 6133, 6138, 6141, 6139, 6062, 6057, 18875, 6133, 6138, 6139, 6141, 18891, 6154, 6083, 6078, 6093, 6088, 18389, 6106, 6040, 6035, 6119, 6138, 6133, 6141, 6139, 18875, 6062, 6057, 6083, 6078, 19431, 6083, 6078, 19433, 6138, 6133, 6141, 6139, 6217, 6212, 6227, 6222, 6237, 6232, 4015, 6252, 6247, 19435, 6265, 6260, 18398, 6217, 6212, 6227, 6222, 6237, 6232, 4015, 5446, 5441, 5533, 5528, 4180, 4180, 4536, 4531, 4526, 4536, 4531, 4541, 4655, 4660, 4663, 4661, 4673, 4668, 4676, 4674, 4689, 18428, 4827, 18431, 4840, 19437, 5142, 5137, 5132, 5147, 18665, 5173, 18646, 5160, 5183, 5178, 5188, 4889, 5118, 5113, 5113, 5118, 5142, 5137, 5132, 5142, 5137, 5147, 18665, 5173, 18662, 5160, 5183, 5178, 5188, 4889, 18671, 18673, 18438, 19439, 18440, 19441, 5401, 5396, 5401, 5396, 5401, 5396, 5406, 5416, 5411, 5426, 5421, 5436, 5431, 5446, 5441, 4180, 4442, 4437, 19443, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4686, 4681, 4686, 4681, 4686, 4681, 19447, 4370, 4365, 4375, 18467, 4388, 4406, 4401, 4406, 4416, 4370, 4365, 4375, 18477, 4388, 4411, 4401, 4411, 4416, 19449, 4432, 4430, 19451, 4443, 4445, 4370, 4365, 4375, 18495, 4388, 4411, 4406, 4401, 4411, 4406, 4416, 19453, 4430, 4432, 19455, 4443, 4445, 4370, 4365, 4375, 18495, 4388, 4411, 4406, 4401, 4411, 4406, 4416, 19457, 19459, 19461, 19463, 4442, 4437, 19465, 19467, 19469, 19471, 19473, 4442, 4437, 19475, 4468, 4463, 4458, 4468, 4463, 4473, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4536, 4531, 4526, 4536, 4531, 4541, 4660, 4655, 4663, 4661, 18532, 4676, 4674, 4686, 4681, 4686, 4681, 18540, 18542, 4660, 4655, 19481, 4660, 4655, 19483, 19485, 19487, 4660, 4655, 19489, 4660, 4655, 19491, 19493, 19495, 4660, 4655, 19497, 4660, 4655, 19499, 4673, 4668, 19501, 4686, 4681, 19503, 18560, 18562, 4796, 4791, 4796, 4791, 4796, 4791, 19509, 19511, 4796, 4791, 19513, 4796, 4791, 19515, 19517, 4796, 4791, 19520, 4796, 4791, 19522, 4809, 4804, 4796, 4791, 19525, 4796, 4791, 19527, 4809, 4804, 4814, 18587, 4827, 18590, 4840, 4850, 4845, 4855, 18596, 4868, 18599, 19529, 4889, 5160, 5183, 5178, 4889, 4998, 5003, 4998, 5003, 4998, 5003, 5013, 5014, 5016, 18611, 18613, 4960, 5052, 5044, 18618, 4960, 5052, 5044, 5003, 4998, 5013, 5008, 19535, 5026, 5021, 5003, 4998, 5013, 5008, 19537, 5026, 5021, 18635, 5039, 19539, 5044, 19541, 5052, 5142, 5137, 5147, 5132, 18644, 5173, 18646, 5160, 5183, 5178, 5188, 5118, 5113, 5118, 5113, 5142, 5137, 5132, 5142, 5137, 5147, 18662, 5160, 18665, 5173, 5183, 5178, 5188, 18671, 18673, 18675, 19547, 18677, 19549, 5401, 5396, 5401, 5396, 5401, 5396, 5406, 5416, 5411, 5426, 5421, 5436, 5431, 5446, 5441, 5515, 5510, 19553, 5515, 5510, 19555, 5533, 5528, 5344, 5548, 5543, 5548, 5543, 5548, 5543, 5494, 5561, 18704, 5344, 18707, 5391, 5386, 5401, 5396, 5373, 18714, 5391, 5386, 5401, 5396, 5406, 5416, 5411, 5426, 5421, 5436, 5431, 5446, 5441, 5515, 5510, 5456, 5451, 18731, 5538, 5548, 5543, 5481, 5548, 5543, 5494, 5561, 5561, 5515, 5510, 18744, 5533, 5528, 5538, 5548, 5543, 18751, 5561, 5566, 6280, 6285, 19561, 6285, 6280, 19563, 19565, 6333, 6328, 18817, 19567, 18819, 19569, 19571, 6333, 6328, 6362, 6357, 6352, 6362, 6357, 6367, 6377, 6372, 18828, 19005, 6663, 6629, 6624, 6676, 6686, 6681, 6696, 6691, 6701, 6721, 6716, 19573, 6721, 6716, 19575, 6629, 6624, 6663, 6629, 6624, 6676, 6686, 6681, 6696, 6691, 6706, 6721, 6716, 6731, 6726, 6818, 6818, 6818, 6818, 6252, 6247, 19579, 6265, 6260, 6275, 6270, 6367, 6377, 6372, 18769, 6823, 6823, 6083, 6078, 6088, 6093, 18881, 6106, 6040, 6035, 6119, 6133, 6138, 6141, 6139, 6062, 6057, 18875, 6133, 6138, 19587, 6133, 6138, 19589, 18891, 6154, 6217, 6212, 6227, 6222, 6237, 6232, 6242, 6252, 6247, 19591, 6265, 6260, 18786, 6285, 6280, 19593, 6285, 6280, 19595, 19597, 18791, 18793, 18795, 19601, 6362, 6357, 6352, 6362, 6357, 6367, 6377, 6372, 18804, 6397, 6392, 6423, 6423, 18809, 6285, 6280, 19603, 6285, 6280, 19605, 19607, 6333, 6328, 18817, 19609, 18819, 19611, 19613, 6333, 6328, 6362, 6357, 6367, 6362, 6357, 6352, 6377, 6372, 18828, 6397, 6392, 5888, 5883, 6428, 6428, 18834, 19615, 18836, 19617, 18838, 6464, 19005, 6663, 6629, 6624, 6676, 6686, 6681, 6696, 6691, 6706, 6701, 6721, 6716, 6731, 6726, 19005, 6663, 19008, 6676, 6686, 6681, 6696, 6691, 6706, 6701, 6721, 6716, 6731, 6726, 6701, 6706, 6646, 6795, 6790, 6800, 6785, 18852, 6764, 18854, 6858, 18856, 6871, 18859, 6764, 6858, 18861, 6871, 6083, 6078, 6093, 6088, 18866, 6106, 6040, 6035, 6119, 6133, 6138, 19633, 6062, 6057, 18875, 6083, 6078, 6093, 6088, 18866, 6106, 6040, 6035, 6119, 6133, 6138, 19635, 6062, 6057, 18875, 6083, 6078, 6093, 6088, 18881, 6106, 18884, 6119, 6138, 6133, 19639, 6138, 6133, 19641, 18891, 6154, 6212, 6217, 6222, 6227, 6237, 6232, 6242, 6252, 6247, 19643, 6265, 6260, 6270, 6252, 6247, 19645, 6265, 6260, 6275, 6217, 6212, 6227, 6222, 6237, 6232, 6242, 6252, 6247, 19647, 6265, 6260, 6275, 6270, 6285, 6280, 6320, 6315, 19649, 6328, 19651, 6333, 18926, 6320, 6315, 19653, 6333, 6328, 6362, 6357, 6362, 6357, 6362, 6357, 6367, 6377, 6372, 6387, 6382, 6397, 6392, 18945, 6428, 6423, 6418, 6428, 6423, 6433, 18953, 6451, 6446, 18957, 6464, 18960, 6477, 6721, 6716, 6731, 6726, 18967, 6764, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18977, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18986, 6833, 6838, 6843, 6853, 6848, 6853, 6848, 18995, 6871, 6629, 6624, 6663, 19008, 6676, 6686, 6681, 6696, 6691, 6701, 6706, 6646, 6721, 6716, 6731, 6726, 19005, 6663, 19008, 6676, 6686, 6681, 6696, 6691, 6706, 6701, 6711, 6721, 6716, 6731, 6726, 6795, 6790, 6800, 6785, 6823, 6818, 19039, 6823, 6818, 19042, 19027, 6764, 19030, 6858, 19050, 6871, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 19039, 6823, 6818, 19042, 6838, 6833, 6843, 6853, 6848, 6858, 19050, 6871, 19226, 19225, 19228, 19227, 19228, 19227, 19228, 19227, 9709, 9704, 9719, 9714, 19683, 9709, 9704, 9719, 9714, 19686, 19689, 19226, 19225, 19228, 19227, 19693, 9726, 9724, 19582, 9677, 9675, 9696, 9691, 19695, 9709, 9704, 9719, 9714, 19622, 9677, 9675, 9696, 9691, 19697, 9709, 9704, 9719, 9714, 19699, 9726, 9724, 9577, 9577, 9696, 9691, 9709, 9704, 9719, 9714, 19701, 19703, 9674, 19705, 9674, 19707, 9696, 9691, 19709, 9696, 9691, 19711, 9709, 9704, 9719, 9714, 19713, 19715, 19717, 19719, 19679, 19678, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 19728, 19730, 19731, 19732, 19733, 19734, 19739, 19740, 19741, 19742, 19743, 19744, 19745, 19746, 19747, 19748, 19750, 19751, 19754, 19755, 19756, 19758, 19759, 19760, 19761, 19762, 19763, 19764, 19765, 19766, 19767, 19768, 19769, 19770, 19771, 19772, 19773, 19774, 19775, 19776, 19777, 19778, 19779, 19780, 19781, 19782, 19783, 19784, 19785, 19786, 19787, 19788, 19789, 19790, 19791, 19792, 19793, 19801, 19802, 19803, 19804, 19805, 19806, 19807, 19808, 19809, 19810, 19811, 19812, 19813, 19814, 19815, 19816, 19817, 19819, 19820, 19821, 19822, 19823, 19824, 19825, 19826, 19827, 19828, 19829, 19830, 19831, 19832, 19833, 19834, 19835, 19836, 19837, 19838, 19839, 19840, 19842, 19843, 19844, 19845, 19846, 19847, 19848, 19849, 19850, 19851, 19852, 19853, 19854, 19855, 19856, 19857, 19858, 19859, 19861, 19862, 19863, 19864, 19865, 19866, 19867, 19868, 19869, 19870, 19871, 19872, 19873, 19875, 19876, 19877, 19878, 19879, 19880, 19881, 19882, 19883, 19885, 19886, 19887, 19888, 19889, 19891, 19892, 19894, 19895, 19896, 19897, 19898, 19899, 19900, 19901, 19902, 19911, 19912, 19914, 19915, 19917, 19918, 19920, 19921, 19923, 19924, 19925, 19926, 19927, 19928, 19930, 19931, 19933, 19934, 19935, 19936, 19937, 19938, 19939, 19940, 19941, 19942, 19943, 19944, 19945, 19952, 19953, 19954, 19955, 19957, 19958, 19962, 19963, 19966, 19967, 19968, 19969, 19970, 19971, 19972, 19973, 19974, 19975, 19976, 19977, 19978, 19979, 19980, 19982, 19983, 19984, 19985, 19990, 19991, 19992, 19993, 19994, 19995, 19996, 19997, 19998, 19999, 20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008, 20009, 20010, 20011, 20012, 20013, 20014, 20015, 20017, 20018, 20020, 20021, 20022, 20023, 20024, 20025, 20026, 20027, 20028, 20029, 20031, 20032, 20034, 20035, 20036, 20037, 20038, 20039, 20040, 20041, 20042, 20043, 20044, 20045, 20046, 20047, 20048, 20049, 20050, 20051, 20052, 20053, 20054, 20055, 20056, 20057, 20058, 20059, 20060, 20061, 20062, 20063, 20064, 20065, 20066, 20067, 20068, 20070, 20071, 20072, 20073, 20074, 20075, 20076, 20077, 20079, 20080, 20081, 20083, 20084, 20085, 20087, 20089, 20090, 20091, 20092, 20095, 20096, 20097, 20098, 20099, 20100, 20101, 20102, 20103, 20104, 20105, 20106, 20107, 20108, 20109, 20110, 20111, 20112, 20113, 20114, 20117, 20118, 20119, 20120, 20121, 20122, 20123, 20124, 20125, 20126, 20127, 20128, 20129, 20130, 20131, 20132, 20133, 20134, 20135, 20136, 20137, 20138, 20139, 20140, 20141, 20142, 20143, 20144, 20145, 20146, 20148, 20149, 20151, 20152, 20153, 20154, 20155, 20157, 20158, 20160, 20161, 20162, 20163, 20164, 20165, 20166, 20167, 20168, 20169, 20170, 20171, 20172, 20174, 20175, 20176, 20177, 20178, 20179, 20180, 20181, 20183, 20184, 20185, 20186, 20187, 20188, 20189, 20190, 20191, 20192, 20193, 20194, 20195, 20196, 20197, 20198, 20199, 20200, 20201, 20202, 20203, 20204, 20205, 20206, 20207, 20208, 20209, 20210, 20211, 20212, 20213, 20214, 20215, 20216, 20217, 20218, 20219, 20220, 20221, 20222, 20223, 20224, 20225, 20226, 20227, 20228, 20229, 20230, 20238, 20239, 20240, 20241, 20242, 20243, 20244, 20245, 20246, 20247, 20248, 20249, 20250, 20251, 20252, 20253, 20254, 20255, 20256, 20257, 20258, 20260, 20261, 20262, 20263, 20264, 20266, 20267, 20269, 20270, 20271, 20274, 20275, 20276, 20277, 20278, 20280, 20281, 20283, 20284, 20285, 20286, 20289, 20290, 20291, 20292, 20293, 20295, 20296, 20298, 20299, 20300, 20301, 20304, 20305, 20306, 20307, 20308, 20309, 20311, 20312, 20314, 20315, 20316, 20317, 20318, 20319, 20321, 20322, 20323, 20324, 20325, 20326, 20327, 20328, 20329, 20330, 20331, 20332, 20333, 20334, 20336, 20337, 20339, 20340, 20341, 20342, 20344, 20345, 20347, 20348, 20349, 20350, 20351, 20352, 20353, 20355, 20356, 20357, 20358, 20359, 20360, 20361, 20362, 20363, 20364, 20365, 20367, 20368, 20369, 20370, 20371, 20372, 20373, 20374, 20375, 20376, 20377, 20378, 20379, 20380, 20381, 20382, 20383, 20384, 20385, 20386, 20387, 20388, 20389, 20390, 20391, 20392, 20393, 20394, 20395, 20396, 20398, 20399, 20401, 20402, 20403, 20404, 20405, 20406, 20408, 20409, 20411, 20412, 20413, 20414, 20415, 20416, 20417, 20418, 20419, 20420, 20421, 20422, 20423, 20424, 20425, 20426, 20427, 20428, 20429, 20430, 20431, 20433, 20434, 20436, 20437, 20438, 20439, 20440, 20441, 20442, 20443, 20444, 20445, 20446, 20447, 20448, 20449, 20450, 20451, 20452, 20453, 20454, 20455, 20456, 20457, 20458, 20459, 20460, 20461, 20462, 20463, 20464, 20465, 20466, 20467, 20468, 20469, 20470, 20471, 20472, 20473, 20474, 20475, 20476, 20477, 20478, 20479, 20480, 20481, 20482, 20483, 20484, 20485, 20486, 20487, 20488, 20489, 20490, 20491, 20492, 20493, 20494, 20495, 20496, 20497, 20498, 20499, 20500, 20501, 20502, 20503, 20504, 20505, 20506, 20507, 20508, 20509, 20510, 20511, 20512, 20513, 20514, 20515, 20516, 20517, 20518, 20519, 20520, 20521, 20522, 20523, 20524, 20525, 20526, 20527, 20528, 20529, 20530, 20531, 20532, 20533, 20534, 20535, 20536, 20537, 20538, 20539, 20540, 20541, 20542, 20543, 20544, 20545, 20546, 20547, 20548, 20549, 20550, 20551, 20552, 20553, 20554, 20555, 20556, 20557, 20558, 20559, 20560, 20561, 20562, 20563, 20564, 20565, 20566, 20567, 20568, 20569, 20570, 20571, 20572, 20573, 20574, 20575, 20576, 20577, 20578, 20579, 20580, 20581, 20582, 20583, 20584, 20585, 20586, 20587, 20588, 20589, 20590, 20591, 20592, 20593, 20594, 20595, 20596, 20597, 20599, 20600, 20602, 20603, 20605, 20606, 20607, 20608, 20609, 20610, 20611, 20612, 20613, 20614, 20615, 20616, 20617, 20619, 20620, 20621, 20622, 20623, 20624, 20626, 20627, 20629, 20630, 20632, 20633, 20634, 20635, 20636, 20637, 20638, 20639, 20640, 20642, 20643, 20645, 20646, 20648, 20649, 20650, 20651, 20652, 20653, 20654, 20655, 20656, 20657, 20658, 20660, 20661, 20663, 20664, 20667, 20668, 20669, 20670, 20671, 20672, 20673, 20674, 20675, 20676, 20677, 20678, 20679, 20680, 20681, 20682, 20683, 20684, 20686, 20687, 20688, 20689, 20690, 20691, 20692, 20693, 20694, 20695, 20696, 20697, 20699, 20700, 20701, 20702, 20704, 20705, 20706, 20707, 20708, 20709, 20710, 20711, 20712, 20713, 20714, 20716, 20717, 20718, 20719, 20720, 20721, 20722, 20723, 20724, 20725, 20726, 20727, 20728, 20729, 20730, 20731, 20732, 20733, 20734, 20735, 20736, 20737, 20738, 20739, 20740, 20741, 20742, 20743, 20744, 20745, 20746, 20747, 20748, 20749, 20750, 20751, 20753, 20754, 20755, 20756, 20757, 20758, 20759, 20760, 20761, 20762, 20763, 20764, 20765, 20766, 20767, 20769, 20770, 20771, 20772, 20773, 20774, 20775, 20776, 20777, 20778, 20779, 20780, 20781, 20782, 20784, 20785, 20787, 20788, 20789, 20790, 20791, 20792, 20793, 20794, 20795, 20796, 20797, 20798, 20799, 20800, 20801, 20802, 20803, 20804, 20805, 20806, 20807, 20808, 20809, 20810, 20811, 20812, 20813, 20814, 20815, 20816, 20818, 20819, 20820, 20821, 20822, 20823, 20824, 20825, 20826, 20827, 20828, 20829, 20830, 20831, 20832, 20833, 20834, 20835, 20836, 20837, 20838, 20839, 20840, 20841, 20842, 20843, 20844, 20845, 20846, 20847, 20848, 20849, 20850, 20851, 20852, 20853, 20854, 20855, 20856, 20857, 20858, 20859, 20860, 20861, 20862, 20863, 20864, 20865, 20867, 20868, 20869, 20870, 20871, 20872, 20873, 20874, 20875, 20876, 20877, 20878, 20879, 20880, 20881, 20882, 20883, 20884, 20885, 20886, 20887, 20888, 20889, 20890, 20891, 20892, 20893, 20894, 20895, 20896, 20897, 20898, 20899, 20900, 20901, 20902, 20903, 20904, 20905, 20906, 20907, 20908, 20909, 20910, 20911, 20912, 20914, 20916, 20917, 20918, 20919, 20920, 20921, 20922, 20923, 20924, 20926, 20927, 20929, 20930, 20931, 20932, 20933, 20934, 20935, 20936, 20937, 20938, 20939, 20940, 20941, 20942, 20943, 20944, 20945, 20946, 20947, 20948, 20949, 20950, 20951, 20952, 20953, 20954, 20955, 20956, 20957, 20959, 20960, 20961, 20962, 20963, 20964, 20965, 20966, 20967, 20968, 20969, 20970, 20971, 20972, 20975, 20976, 20977, 20979, 20980, 20983, 20984, 20985, 20986, 20987, 20988, 20989, 20990, 20991, 20992, 20994, 20995, 20996, 20997, 20998, 20999, 21000, 21001, 21003, 21004, 21006, 21007, 21008, 21009, 21010, 21011, 21012, 21013, 21014, 21015, 21016, 21017, 21019, 21020, 21024, 21025, 21026, 21027, 21028, 21029, 21030, 21032, 21033, 21035, 21036, 21038, 21039, 21040, 21041, 21042, 21043, 21044, 21045, 21046, 21047, 21048, 21049, 21050, 21051, 21052, 21053, 21054, 21055, 21056, 21057, 21058, 21059, 21060, 21062, 21063, 21064, 21065, 21066, 21067, 21068, 21069, 21070, 21071, 21072, 21073, 21075, 21076, 21077, 21078, 21079, 21080, 21081, 21082, 21083, 21084, 21085, 21086, 21087, 21088, 21089, 21090, 21091, 21092, 21093, 21094, 21095, 21096, 21097, 21098, 21099, 21100, 21101, 21102, 21103, 21104, 21105, 21106, 21107, 21108, 21109, 21110, 21111, 21112, 21113, 21114, 21115, 21116, 21117, 21118, 21119, 21120, 21121, 21122, 21123, 21124, 21125, 21126, 21127, 21128, 21129, 21130, 21131, 21132, 21133, 21134, 21135, 21136, 21137, 21138, 21139, 21140, 21141, 21142, 21143, 21144, 21145, 21146, 21147, 21148, 21149, 21150, 21151, 21152, 21153, 21154, 21155, 21156, 21157, 21158, 21159, 21160, 21162, 21163, 21165, 21166, 21167, 21168, 21169, 21170, 21171, 21172, 21173, 21174, 21175, 21176, 21177, 21179, 21180, 21181, 21182, 21183, 21184, 21185, 21186, 21187, 21188, 21189, 21190, 21191, 21192, 21193, 21194, 21195, 21196, 21197, 21198, 21199, 21200, 21201, 21202, 21203, 21204, 21205, 21206, 21207, 21208, 21209, 21210, 21211, 21212, 21213, 21215, 21216, 21217, 21218, 21219, 21220, 21221, 21222, 21223, 21224, 21225, 21226, 21227, 21228, 21229, 21230, 21231, 21232, 21233, 21234, 21235, 21236, 21237, 21238, 21239, 21240, 21241, 21242, 21243, 21244, 21245, 21246, 21247, 21249, 21251, 21252, 21253, 21254, 21255, 21256, 21257, 21258, 21259, 21260, 21261, 21262, 21263, 21264, 21265, 21266, 21267, 21268, 21270, 21271, 21272, 21273, 21274, 21275, 21276, 21277, 21278, 21279, 21280, 21281, 21282, 21283, 21285, 21286, 21287, 21288, 21289, 21290, 21291, 21292, 21293, 21294, 21295, 21296, 21297, 21298, 21299, 21300, 21301, 21302, 21304, 21305, 21307, 21308, 21309, 21310, 21311, 21312, 21313, 21314, 21315, 21316, 21317, 21318, 21319, 21321, 21322, 21324, 21325, 21326, 21327, 21328, 21329, 21330, 21331, 21332, 21333, 21334, 21335, 21336, 21341, 21342, 21348, 21349, 21351, 21352, 21353, 21354, 21355, 21356, 21357, 21358, 21359, 21360, 21361, 21362, 21363, 21364, 21365, 21366, 21367, 21368, 21369, 21370, 21371, 21372, 21373, 21374, 21375, 21376, 21377, 21378, 21379, 21380, 21381, 21382, 21383, 21384, 21385, 21387, 21388, 21392, 21393, 21395, 21396, 21400, 21401, 21403, 21404, 21406, 21407, 21409, 21410, 21412, 21413, 21414, 21415, 21416, 21417, 21418, 21419, 21422, 21423, 21425, 21426, 21429, 21430, 21432, 21433, 21435, 21436, 21437, 21438, 21440, 21441, 21443, 21444, 21445, 21446, 21447, 21448, 21449, 21450, 21451, 21452, 21453, 21454, 21455, 21457, 21458, 21459, 21460, 21461, 21462, 21463, 21464, 21465, 21466, 21467, 21468, 21469, 21470, 21471, 21472, 21473, 21474, 21475, 21476, 21477, 21478, 21479, 21480, 21481, 21482, 21483, 21485, 21486, 21487, 21488, 21489, 21490, 21492, 21493, 21494, 21495, 21497, 21499, 21500, 21501, 21502, 21503, 21504, 21505, 21506, 21507, 21508, 21509, 21510, 21511, 21512, 21513, 21514, 21515, 21516, 21517, 21518, 21519, 21520, 21521, 21522, 21523, 21524, 21525, 21526, 21527, 21528, 21529, 21530, 21532, 21534, 21535, 21536, 21537, 21538, 21539, 21540, 21541, 21542, 21543, 21544, 21545, 21546, 21547, 21548, 21549, 21550, 21552, 21553, 21555, 21556, 21557, 21558, 21559, 21560, 21561, 21562, 21563, 21564, 21565, 21566, 21567, 21568, 21569, 21570, 21571, 21572, 21573, 21574, 21575, 21576, 21577, 21578, 21579, 21580, 21581, 21582, 21583, 21584, 21585, 21586, 21587, 21588, 21589, 21590, 21591, 21592, 21593, 21594, 21595, 21596, 21597, 21598, 21599, 21600, 21601, 21602, 21603, 21604, 21605, 21606, 21607, 21608, 21609, 21610, 21611, 21612, 21613, 21614, 21616, 21617, 21620, 21621, 21622, 21624, 21627, 21628, 21629, 21630, 21631, 21632, 21633, 21634, 21635, 21636, 21637, 21638, 21639, 21640, 21641, 21642, 21643, 21644, 21645, 21646, 21647, 21648, 21649, 21651, 21652, 21654, 21655, 21656, 21657, 21658, 21659, 21660, 21661, 21662, 21663, 21664, 21665, 21666, 21667, 21668, 21669, 21670, 21671, 21672, 21673, 21674, 21676, 21677, 21678, 21679, 21680, 21681, 21682, 21683, 21684, 21685, 21686, 21687, 21688, 21689, 21690, 21691, 21692, 21693, 21694, 21695, 21696, 21697, 21698, 21699, 21700, 21701, 21702, 21703, 21705, 21706, 21708, 21709, 21710, 21711, 21712, 21713, 21714, 21715, 21716, 21717, 21718, 21720, 21721, 21722, 21723, 21724, 21726, 21727, 21730, 21731, 21732, 21734, 21735, 21736, 21737, 21738, 21739, 21740, 21741, 21742, 21743, 21744, 21745, 21746, 21747, 21748, 21749, 21751, 21752, 21755, 21756, 21757, 21759, 21762, 21763, 21764, 21765, 21766, 21767, 21768, 21769, 21770, 21771, 21772, 21773, 21774, 21775, 21776, 21777, 21778, 21779, 21781, 21783, 21784, 21785, 21786, 21787, 21788, 21789, 21790, 21791, 21792, 21793, 21794, 21795, 21796, 21797, 21798, 21799, 21800, 21801, 21802, 21803, 21804, 21805, 21806, 21807, 21808, 21809, 21810, 21811, 21812, 21813, 21814, 21815, 21816, 21817, 21818, 21819, 21820, 21821, 21822, 21823, 21824, 21825, 21826, 21827, 21828, 21829, 21830, 21831, 21832, 21833, 21834, 21835, 21836, 21837, 21838, 21839, 21840, 21841, 21842, 21844, 21845, 21846, 21847, 21848, 21849, 21850, 21851, 21852, 21853, 21854, 21855, 21856, 21857, 21859, 21860, 21861, 21862, 21863, 21864, 21865, 21866, 21867, 21868, 21869, 21870, 21871, 21873, 21874, 21876, 21877, 21878, 21879, 21880, 21881, 21882, 21883, 21884, 21885, 21886, 21888, 21889, 21890, 21891, 21892, 21894, 21895, 21896, 21897, 21898, 21899, 21900, 21901, 21902, 21903, 21904, 21905, 21907, 21908, 21909, 21910, 21911, 21912, 21913, 21914, 21916, 21918, 21919, 21920, 21921, 21923, 21924, 21925, 21926, 21927, 21928, 21929, 21930, 21931, 21932, 21933, 21934, 21935, 21936, 21937, 21938, 21939, 21940, 21941, 21942, 21943, 21944, 21945, 21946, 21947, 21948, 21949, 21950, 21951, 21952, 21953, 21954, 21955, 21956, 21957, 21958, 21959, 21960, 21961, 21962, 21963, 21964, 21965, 21966, 21967, 21968, 21969, 21970, 21971, 21972, 21973, 21974, 21975, 21976, 21977, 21978, 21979, 21980, 21981, 21982, 21983, 21984, 21985, 21986, 21987, 21988, 21989, 21990, 21991, 21992, 21993, 21994, 21995, 21996, 21997, 21998, 21999, 22000, 22001, 22002, 22003, 22004, 22005, 22006, 22007, 22008, 22009, 22010, 22011, 22012, 22013, 22014, 22015, 22016, 22017, 22018, 22019, 22020, 22021, 22022, 22023, 22024, 22025, 22026, 22027, 22028, 22029, 22030, 22031, 22032, 22033, 22034, 22035, 22036, 22037, 22038, 22039, 22040, 22041, 22042, 22043, 22044, 22045, 22046, 22047, 22048, 22049, 22050, 22051, 19799, 19797, 22052, 22053, 22054, 22055, 22056, 22057, 22058, 22059, 22060, 22061, 22062, 22063, 22065, 22066, 22067, 22068, 21389, 21386, 19906, 21397, 21394, 21399, 21405, 21402, 19519, 19519, 21389, 21386, 21397, 21394, 21405, 21402, 19960, 19965, 19989, 19987, 19989, 19987, 21391, 21399, 19519, 21391, 21399, 19519, 19519, 20116, 20115, 22071, 22072, 22073, 22074, 20231, 20233, 20234, 20237, 20237, 20273, 20288, 20303, 21023, 21061, 21340, 21338, 21347, 21345, 21391, 21399, 19519, 19519, 22076, 22077, 22078, 22079, 22080, 22081, 22082, 22084, 22085, 22086, 22087, 22088, 22089, 22090, 22091, 22092, 22094, 22095, 22096, 22097, 22099, 22100, 22101, 22102, 22103, 22104, 22105, 22106, 22107, 22108, 22111, 22113, 22115, 22116, 22118, 22119, 22121, 22122, 22123, 22124, 22114, 22112, 22114, 22112, 22114, 22112, 22114, 22129, 22130, 22070, 22064, 22070, 22069, 22128, 22110, 22128, 22126, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22145, 22148, 22151, 22153, 22156, 22160, 22162, 22165, 22168, 22171, 22174, 22176, 22178, 22181, 22184, 22187, 22190, 22193, 22196, 22203, 22205, 22207, 22210, 22220, 22229, 22231, 22235, 22237, 22240, 22242, 22244, 22247, 22249, 22253, 22256, 22258, 22260, 22262, 22266, 22269, 22271, 22273, 22275, 22278, 22280, 22285, 22287, 22290, 22292, 22296, 22298, 22302, 22310, 22312, 22314, 22319, 22321, 22323, 22329, 22331, 22333, 22335, 22337, 22339, 22341, 22346, 22348, 22352, 22354, 22356, 22358, 22360, 22362, 22364, 22366, 22370, 22372, 22378, 22380, 22382, 22388, 22390, 22392, 22395, 22397, 22399, 22402, 22404, 22406, 22409, 22412, 22416, 22418, 22422, 22424, 22427, 22429, 22431, 22435, 22438, 22442, 22444, 22446, 22448, 22450, 22452, 22455, 22457, 22460, 22463, 22466, 22469, 22471, 22473, 22475, 22477, 22479, 22482, 22485, 22487, 22489, 22491, 22494, 22496, 22499, 22501, 22503, 22505, 22508, 22512, 22514, 22516, 22518, 22521, 22524, 22526, 22528, 22530, 22533, 22537, 22539, 22541, 22544, 22547, 22549, 22552, 22554, 22556, 22558, 22560, 22562, 22564, 22566, 22568, 22570, 22574, 22576, 22580, 22582, 22584, 22586, 22591, 22593, 22595, 22598, 22600, 22605, 22608, 22610, 22616, 22619, 22621, 22627, 22629, 22631, 22633, 22639, 22643, 22646, 22648, 22650, 22653, 22655, 22659, 22661, 22666, 22668, 22670, 22672, 22677, 22679, 22681, 22683, 22687, 22689, 22693, 22695, 22700, 22702, 22704, 22706, 22709, 22711, 22717, 22719, 22723, 22726, 22729, 22731, 22733, 22735, 22737, 22740, 22742, 22744, 22747, 22751, 22753, 22755, 22759, 22761, 22765, 22767, 22769, 22771, 22773, 22777, 22780, 22783, 22785, 22788, 22791, 22793, 22795, 22797, 22799, 22801, 22803, 22806, 22809, 22813, 22815, 22817, 22819, 22821, 22823, 22826, 22829, 22833, 22835, 22837, 22839, 22841, 22843, 22846, 22849, 22852, 22855, 22858, 22861, 22863, 22865, 22867, 22869, 22871, 22873, 22875, 22877, 22880, 22882, 22884, 22887, 22889, 22891, 22894, 22898, 22904, 22906, 22908, 22910, 22913, 22918, 22921, 22923, 22925, 22927, 22929, 22931, 22933, 22936, 22938, 22940, 22942, 22944, 22946, 22949, 22951, 22953, 22955, 22957, 22959, 22961, 22964, 22975, 22977, 22980, 22993, 22995, 22997, 22999, 23004, 23009, 23011, 23013, 23015, 23018, 23021, 23025, 23027, 23029, 23031, 23035, 23037, 23049, 23052, 23055, 23063, 23065, 23067, 23069, 23071, 23073, 23075, 23081, 23084, 23087, 23089, 23103, 23106, 23110, 23120, 23123, 23127, 23138, 23140, 23145, 23147, 23149, 23151, 23153, 23156, 23158, 23161, 23165, 23167, 23169, 23171, 23173, 23175, 23177, 23180, 23183, 23186, 23189, 23192, 23194, 23198, 23200, 23203, 23205, 23207, 23209, 23212, 23214, 23217, 23219, 23221, 23223, 23225, 23227, 23229, 23232, 23238, 23241, 23244, 23247, 23249, 23253, 23255, 23257, 23259, 23262, 23264, 23269, 23271, 23273, 23275, 23279, 23281, 23283, 23285, 23287, 23292, 23294, 23296, 23298, 23302, 23304, 23307, 23312, 23317, 23319, 23323, 23325, 23327, 23329, 23331, 23335, 23337, 23339, 23341, 23343, 23345, 23348, 23350, 23354, 23357, 23360, 23362, 23365, 23367, 23377, 23379, 23383, 23385, 23387, 23392, 23394, 23396, 23399, 23401, 23405, 23407, 23411, 23414, 23416, 23419, 23421, 23423, 23425, 23427, 23429, 23431, 23433, 23436, 23438, 23441, 23443, 23445, 23448, 23450, 23454, 23457, 23460, 23462, 23464, 23466, 23473, 23475, 23481, 23483, 23485, 23487, 23489, 23492, 23499, 23501, 23507, 23509, 23511, 23514, 23516, 23518, 23520, 23523, 23525, 23527, 23529, 23531, 23533, 23535, 23537, 23539, 23548, 23557, 23559, 23561, 23566, 23569, 23572, 23574, 23576, 23581, 23584, 23587, 23589, 23591, 23594, 23597, 23599, 23601, 23603, 23605, 23608, 23611, 23613, 23616, 23618, 23620, 23624, 23626, 23628, 23630, 23632, 23634, 23636, 23638, 23642, 23644, 23646, 23648, 23650, 23652, 23654, 23656, 23658, 23660, 23662, 23669, 23677, 23680, 23682, 23684, 23687, 23692, 23696, 23698, 23700, 23702, 23704, 23706, 23708, 23714, 23716, 23722, 23725, 23727, 23729, 23732, 23739, 23746, 23748, 23750, 23753, 23755, 23757, 23759, 23761, 23763, 23765, 23768, 23770, 23772, 23779, 23781, 23785, 23787, 23790, 23792, 23794, 23796, 23798, 23800, 23804, 23807, 23812, 23815, 23818, 23823, 23825, 23827, 23831, 23833, 23836, 23839, 23844, 23847, 23849, 23852, 23854, 23856, 23859, 23862, 23864, 23867, 23869, 23875, 23877, 23879, 23882, 23887, 23889, 23893, 23896, 23898, 23900, 23903, 23905, 23909, 23911, 23913, 23916, 23918, 23921, 23923, 23928, 23931, 23934, 23937, 23942, 23944, 23946, 23950, 23952, 23955, 23958, 23961, 23963, 23973, 23976, 23978, 23980, 23982, 23984, 23990, 23992, 23994, 23996, 23998, 24000, 24003, 24005, 24018, 24020, 24024, 24027, 24029, 24032, 24034, 24038, 24041, 24043, 24046, 24048, 24054, 24056, 24060, 24062, 24064, 24067, 24069, 24072, 24074, 24077, 24079, 24081, 24084, 24086, 24088, 24090, 24092, 24097, 24099, 24101, 24103, 24105, 24108, 24110, 24112, 24115, 24118, 24122, 24128, 24130, 24134, 24137, 24140, 24143, 24146, 24149, 24152, 24155, 24157, 24161, 24166, 24168, 24170, 24173, 24175, 24181, 24183, 24185, 24188, 24190, 24192, 24194, 24196, 24199, 24208, 24211, 24214, 24217, 24220, 24223, 22642, 19736, 19737, 21248, 19738, 19738, 21250, 24172, 24187, 22200, 24172, 24187, 24228, 24229, 23096, 19367, 19366, 23132, 23130, 23136, 23097, 23101, 19369, 19368, 23144, 19373, 19372, 24230, 24232, 23132, 23042, 23136, 23134, 22213, 23144, 19373, 20173, 19818, 22511, 20173, 20182, 22971, 23191, 23188, 20752, 19354, 19366, 23132, 23042, 23136, 23134, 22971, 23048, 19373, 19372, 20752, 19354, 19366, 23191, 23160, 24234, 23191, 23188, 24236, 22252, 22265, 19638, 24238, 24240, 24242, 24244, 22284, 22295, 24246, 24247, 24248, 24249, 24250, 24251, 24252, 24253, 19506, 19505, 20016, 24254, 24255, 22307, 22305, 23673, 22309, 22318, 24256, 24257, 24258, 24259, 24260, 24261, 20016, 22328, 22326, 23543, 23547, 23545, 23552, 23556, 23554, 24262, 24263, 23565, 23580, 24264, 24265, 23543, 23547, 22343, 23552, 23554, 19165, 19164, 19169, 19168, 23580, 24266, 24267, 24268, 24269, 19506, 19505, 20016, 24270, 23472, 22375, 23673, 23472, 22375, 24271, 24272, 19506, 19505, 20016, 24273, 24274, 23472, 23470, 23673, 22434, 23711, 22434, 23711, 20088, 20086, 22758, 22776, 22758, 24275, 24276, 19208, 19389, 19388, 19209, 19390, 19388, 19210, 19215, 22758, 22776, 22511, 23144, 19373, 24277, 24279, 20958, 19390, 19389, 19388, 20958, 19390, 19389, 20973, 23334, 8430, 22573, 24281, 24282, 24283, 24284, 24285, 22579, 22589, 22615, 22603, 24286, 22615, 22613, 24287, 22626, 22624, 24288, 22638, 22636, 22642, 22658, 22664, 22675, 22686, 22692, 22698, 22716, 22714, 22722, 22758, 22776, 22805, 22812, 22825, 22832, 22903, 22901, 22917, 22971, 23048, 19373, 19372, 23115, 23113, 23119, 23117, 23132, 23042, 23136, 23134, 22971, 23048, 19373, 19372, 23132, 23042, 22986, 19367, 19366, 22990, 22988, 23191, 23188, 23083, 23002, 23006, 23083, 23002, 23006, 23191, 23188, 23083, 23002, 23006, 23191, 23188, 23017, 23132, 23042, 23136, 23134, 23034, 23144, 19373, 19372, 23034, 23115, 23113, 23119, 23117, 20752, 19354, 19366, 23132, 23042, 23136, 23134, 23044, 23048, 19373, 19372, 23058, 23062, 19373, 19372, 19364, 19363, 23160, 23202, 23096, 19367, 19366, 23132, 23130, 23136, 23097, 23101, 19369, 19368, 23144, 19373, 19372, 23115, 23113, 23119, 23117, 23132, 23130, 23136, 23134, 23144, 19373, 20915, 20913, 20915, 20913, 23202, 20958, 19390, 19389, 19388, 20958, 19390, 19389, 20973, 20974, 8430, 23347, 20982, 8485, 8485, 23334, 20993, 8430, 23268, 23278, 23290, 24289, 24290, 23291, 23301, 8430, 23316, 23322, 23334, 23353, 8485, 23721, 23719, 23676, 23738, 19546, 19545, 19544, 19543, 19546, 19545, 19544, 19543, 19546, 19545, 21250, 21248, 19560, 19559, 23391, 19638, 23404, 23410, 19638, 24053, 24051, 24059, 21250, 21248, 19560, 19559, 19560, 19559, 23472, 23470, 23691, 23695, 23711, 23713, 23712, 23480, 23478, 23498, 23496, 19546, 19545, 21250, 21248, 23822, 23543, 23547, 23545, 23552, 23556, 23554, 23565, 23580, 24291, 24292, 24293, 24294, 19480, 19479, 24295, 24296, 19506, 19505, 24297, 24298, 23668, 23666, 23673, 23691, 23695, 23711, 23713, 23712, 23676, 23738, 19546, 19545, 23691, 23695, 23711, 23713, 23712, 23721, 23719, 23738, 23736, 19546, 19545, 21533, 21531, 19560, 19559, 23777, 23803, 19560, 19559, 23822, 21625, 21623, 23843, 19619, 19619, 24201, 23884, 24219, 24216, 24299, 24201, 23884, 24219, 24216, 24302, 24304, 24306, 24308, 23892, 19638, 23908, 21733, 19600, 19599, 24120, 24117, 24127, 24125, 21760, 21758, 24120, 24117, 21782, 21780, 24127, 23970, 23972, 23989, 23987, 24008, 24012, 24010, 24014, 24017, 24015, 24311, 24313, 24315, 24317, 24319, 24023, 19638, 24037, 19638, 24053, 24051, 24059, 24095, 24094, 24127, 24125, 24133, 24160, 24323, 24325, 24327, 24165, 24180, 24178, 24203, 24207, 24205, 24227, 24331, 24333, 24335, 24337, 24339, 24340, 24341, 24342, 24343, 24344, 24345, 24348, 24349, 24350, 24351, 22114, 22112, 24352, 24353, 22114, 22112, 24354, 24355, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 25092, 22725, 22728, 24576, 24578, 22144, 20435, 19729, 22749, 22147, 19054, 19735, 19057, 25093, 25094, 25095, 25096, 25097, 24463, 23381, 22150, 25098, 24849, 19544, 19543, 19065, 19557, 22155, 19065, 19557, 22155, 24463, 19067, 19558, 22158, 24849, 19067, 19558, 22159, 25099, 25100, 19752, 19753, 22164, 19757, 22183, 22167, 22170, 22173, 19658, 19657, 22183, 22180, 22186, 22192, 22189, 22195, 22198, 25101, 25102, 25103, 25104, 24470, 22202, 22201, 25106, 25107, 25108, 25109, 25110, 25111, 25112, 25113, 25114, 25115, 25116, 25117, 25118, 24387, 20817, 24722, 19377, 19376, 24478, 23179, 23182, 23155, 23188, 23191, 22212, 22209, 24513, 24508, 22546, 22543, 23122, 22969, 24673, 25121, 25122, 25123, 25124, 25125, 25126, 25127, 19372, 25128, 22216, 22215, 25129, 25130, 25131, 22523, 22520, 25132, 20786, 20783, 19377, 19376, 24478, 23179, 23182, 23185, 23188, 22222, 24512, 19092, 22535, 22532, 24516, 24478, 23182, 23185, 23188, 22222, 19093, 22223, 22222, 25133, 24672, 24726, 23179, 20786, 20783, 23185, 23182, 25134, 25135, 23083, 23002, 20915, 20715, 24735, 23122, 22224, 25136, 25137, 25138, 25139, 25140, 25141, 25142, 25143, 25144, 25145, 25146, 24672, 20685, 25147, 25148, 25149, 20786, 20783, 19376, 19377, 24726, 23179, 23182, 23077, 25150, 25151, 23086, 23083, 20915, 20715, 24735, 20786, 20783, 19376, 19377, 23185, 23182, 25153, 25154, 23086, 23083, 20915, 20715, 24735, 24392, 22233, 24394, 22239, 24396, 24398, 19096, 19096, 19096, 19096, 24136, 24145, 22246, 24210, 24400, 22255, 25156, 19860, 19637, 24405, 22268, 25157, 19874, 25158, 24409, 22277, 19884, 22282, 19107, 19107, 19107, 19107, 25163, 22316, 24413, 24863, 22289, 24416, 25164, 22316, 24879, 25165, 25168, 25171, 21408, 19916, 19913, 25173, 25174, 24832, 24834, 23468, 19478, 19445, 25175, 21420, 19508, 19507, 21427, 21424, 21434, 21431, 19524, 21442, 21439, 23664, 25178, 25179, 25180, 23671, 25181, 22316, 24420, 24421, 25182, 22316, 24424, 24425, 25183, 25185, 25187, 24832, 25189, 19508, 19507, 20019, 25190, 25191, 25192, 23541, 25193, 25194, 25195, 23550, 25196, 25197, 24426, 24427, 25200, 23563, 23571, 23568, 24867, 24868, 25201, 23578, 23586, 23583, 25202, 24436, 23593, 24429, 24431, 23456, 23459, 25204, 23541, 25205, 25206, 25207, 23550, 25208, 25209, 25210, 24433, 24434, 23563, 25211, 25212, 23571, 23568, 24867, 24868, 25213, 23578, 23586, 23583, 25214, 24436, 23596, 24439, 24441, 22369, 22368, 21389, 21386, 21397, 21394, 21405, 21402, 21408, 21411, 21284, 25218, 25219, 24832, 24444, 19478, 19445, 23468, 25220, 19507, 19178, 19178, 19507, 19178, 19507, 19524, 19178, 19507, 23664, 25222, 25223, 25224, 23671, 24832, 24444, 19508, 19179, 19179, 19508, 19179, 19508, 19179, 19508, 25225, 25226, 21389, 21386, 21397, 21394, 21405, 21402, 21408, 21411, 21284, 25229, 25230, 24832, 24834, 23468, 19478, 19445, 25231, 19508, 19507, 20019, 21427, 21424, 21434, 21431, 19524, 21442, 21439, 23664, 25234, 25235, 25236, 23671, 25237, 25238, 23513, 19552, 19551, 20033, 20030, 22394, 22401, 19558, 19557, 19560, 19559, 22408, 22411, 22414, 24459, 22420, 24461, 22426, 24463, 24465, 25239, 24466, 25240, 24467, 25241, 25242, 23752, 19552, 19551, 24849, 19560, 19559, 23822, 24470, 24851, 19560, 19559, 23822, 22782, 22848, 22854, 22750, 22860, 19303, 19302, 19309, 19308, 24630, 19311, 19310, 24586, 25243, 24594, 24474, 22782, 22848, 22764, 22763, 22860, 19303, 19302, 19309, 19308, 24593, 25244, 24639, 24476, 22782, 22848, 22764, 22854, 19303, 19302, 22860, 19309, 19308, 24630, 19311, 19310, 24586, 25245, 24594, 24476, 19202, 19363, 20786, 20783, 19377, 19376, 24726, 23179, 23185, 23182, 19207, 19364, 24512, 22462, 22535, 22465, 25246, 20786, 20783, 19377, 19376, 24478, 23179, 23182, 23185, 19364, 19207, 24512, 22468, 22535, 22532, 24516, 24743, 20928, 20925, 24747, 23234, 23231, 25248, 25249, 23243, 23211, 19394, 19393, 23246, 25250, 19387, 24480, 20925, 20928, 24483, 23234, 23231, 25251, 25252, 22484, 22481, 23246, 19394, 19393, 25253, 19387, 24787, 24789, 23347, 24792, 24487, 24489, 25254, 20150, 20147, 25255, 20156, 21061, 24787, 21034, 20159, 23347, 24792, 22848, 22845, 22764, 22854, 24630, 19311, 19310, 22860, 19303, 19302, 19309, 19308, 24586, 25256, 24593, 25257, 24594, 24640, 22510, 22507, 24689, 24673, 25258, 25259, 25260, 19372, 24498, 20173, 22523, 22520, 20182, 19377, 19376, 23191, 23188, 24512, 24504, 22535, 22532, 24516, 24512, 24513, 24508, 22546, 22543, 24512, 24513, 24515, 24516, 24743, 20925, 20928, 24747, 23234, 23231, 25263, 25264, 25265, 23243, 23211, 23246, 19394, 19393, 25266, 19387, 24743, 24519, 24747, 23234, 23231, 25267, 25268, 25269, 23243, 23240, 23246, 19394, 19393, 25270, 24782, 24784, 25271, 21074, 23309, 23306, 23359, 24777, 25272, 24522, 25273, 24557, 24557, 20237, 24524, 25279, 24526, 24528, 25280, 24529, 20259, 22597, 20268, 20265, 25281, 25282, 22607, 20282, 20279, 25284, 25285, 22618, 20297, 20294, 25287, 25288, 24541, 20313, 20310, 25290, 25291, 20320, 25292, 22645, 24547, 22652, 20338, 20335, 25293, 20346, 20343, 25294, 24553, 20354, 24556, 25295, 24557, 20366, 24560, 25296, 24562, 25297, 24564, 25298, 24565, 24567, 22708, 20400, 20397, 25299, 25300, 20410, 20407, 25301, 22725, 22728, 24576, 24578, 22739, 20435, 20432, 22749, 22746, 22848, 22782, 22854, 22750, 19303, 19302, 22860, 19309, 19308, 24630, 19311, 19310, 24586, 25302, 24587, 24640, 22782, 22848, 22764, 22763, 22860, 19303, 19302, 19309, 19308, 24630, 19311, 19310, 24593, 25303, 24594, 24640, 22782, 22848, 22854, 22787, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 22848, 22782, 22787, 22854, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 24599, 24601, 24603, 24605, 25304, 22811, 22808, 25305, 24609, 24611, 24613, 25306, 22831, 22828, 25307, 22848, 22845, 22854, 22851, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 24639, 24640, 22848, 22845, 22854, 22851, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 24639, 24640, 22896, 22893, 24643, 25308, 25309, 20598, 20604, 20601, 22915, 22912, 22920, 25310, 20618, 20625, 19337, 19336, 20631, 20628, 24656, 24657, 20641, 19337, 19336, 20647, 20644, 24662, 24663, 20659, 19337, 19336, 20665, 20662, 24669, 24670, 23122, 22969, 25311, 25312, 25313, 25314, 24672, 20768, 23108, 23105, 24707, 25315, 25316, 25317, 25318, 23122, 22969, 24673, 25319, 25320, 25321, 25322, 25323, 25324, 25325, 25326, 24672, 20685, 23122, 23125, 24673, 25327, 25328, 23125, 22982, 25329, 25330, 25331, 25332, 25333, 24722, 19376, 19377, 24726, 23179, 23182, 23077, 25334, 25335, 25336, 25337, 20715, 20915, 24682, 25338, 19376, 19377, 23179, 23182, 23077, 25339, 25340, 20715, 20915, 24682, 25341, 24722, 19376, 19377, 24726, 23179, 23182, 23185, 25342, 25343, 25344, 25345, 20715, 20915, 24682, 25346, 19377, 19376, 25347, 25348, 23083, 23086, 20915, 20913, 24682, 25349, 23023, 23020, 24710, 25350, 25351, 25352, 25353, 25354, 25355, 25356, 25357, 24686, 24688, 25358, 24689, 23108, 23105, 24707, 25359, 25360, 25361, 25362, 23122, 23125, 25363, 25364, 25365, 25366, 25367, 25368, 25369, 25370, 25371, 25372, 25373, 23054, 23051, 20768, 25374, 25375, 25376, 25377, 24695, 20866, 20786, 20783, 19377, 19376, 24726, 23179, 23182, 23077, 25378, 25379, 25380, 23086, 23083, 20915, 20913, 24735, 25381, 23092, 23091, 25382, 25383, 25384, 25385, 25386, 25387, 25388, 25389, 25390, 25391, 25392, 25393, 25394, 24714, 20817, 23108, 23105, 24707, 25395, 25396, 25397, 25398, 23125, 23122, 24710, 25399, 25400, 25401, 25402, 24711, 25403, 25404, 19372, 24714, 20866, 24722, 19377, 19376, 24726, 23179, 23182, 23155, 23191, 23160, 25405, 25406, 24722, 19377, 19376, 24726, 23179, 23185, 23182, 23191, 23188, 24733, 25407, 25408, 24735, 25409, 24743, 20928, 20925, 24747, 23234, 23231, 25410, 25411, 25412, 23243, 23211, 23246, 19394, 19393, 25413, 19387, 24743, 24745, 24747, 23234, 23231, 25414, 25415, 25416, 23243, 23240, 19394, 19393, 23246, 25417, 23309, 23306, 23359, 25418, 25419, 24787, 24789, 25420, 20981, 20978, 25421, 23359, 23356, 24758, 25422, 24760, 25423, 24782, 24784, 25424, 21074, 24787, 24789, 23347, 24792, 23309, 23306, 23359, 25425, 25426, 25427, 24762, 21005, 21002, 25428, 24766, 24768, 21018, 21031, 25429, 25430, 24782, 24784, 25432, 21074, 24771, 21034, 21031, 25433, 24774, 23309, 23306, 23310, 24777, 25434, 25435, 24779, 25436, 21061, 24782, 24784, 25437, 21074, 24787, 24789, 23347, 24792, 25438, 23359, 23356, 24796, 25439, 24921, 25440, 25441, 24797, 19544, 19543, 23734, 23731, 25442, 25443, 24798, 25444, 25445, 25446, 25447, 25448, 25449, 25450, 25451, 25452, 25453, 19557, 19557, 19557, 19558, 19558, 19558, 19429, 19551, 23774, 19551, 19429, 23381, 23382, 25454, 25455, 19430, 19552, 24849, 24851, 25456, 25457, 19552, 19430, 24802, 25458, 23389, 24805, 25459, 19637, 24808, 25460, 24810, 23413, 25461, 24813, 19637, 25462, 21164, 21161, 25463, 25464, 24818, 25465, 24820, 23435, 21178, 23440, 24825, 23447, 25466, 25467, 23513, 19552, 19551, 24849, 24827, 21554, 21551, 23767, 23774, 19558, 19557, 25468, 25469, 25470, 25471, 23459, 23456, 21411, 21284, 24832, 24834, 23468, 19478, 19445, 25472, 25473, 23686, 19532, 19531, 23689, 25474, 24912, 25475, 24913, 24915, 24916, 24918, 24919, 25476, 25477, 25478, 24836, 25479, 25480, 24838, 19544, 19543, 23494, 23491, 25481, 25482, 24844, 25483, 25484, 25485, 25486, 23513, 19552, 19551, 24849, 24851, 25487, 21269, 24854, 24856, 19446, 19445, 21284, 25488, 23541, 25489, 25490, 25491, 23550, 25492, 25493, 24862, 24863, 25494, 23563, 23571, 23568, 24867, 24868, 25495, 23578, 23586, 23583, 25496, 21343, 25498, 21350, 23596, 23593, 24877, 24879, 23610, 23607, 24883, 24884, 19478, 19477, 25500, 25501, 21389, 21386, 21397, 21394, 21405, 21402, 21408, 21411, 25504, 25505, 21420, 19508, 19507, 21427, 21424, 21434, 21431, 19524, 21442, 21439, 23664, 25508, 25509, 25510, 23671, 23686, 19532, 19531, 23689, 25511, 24912, 25512, 24913, 24915, 24916, 24918, 24919, 25513, 25514, 25515, 23675, 23734, 23731, 25516, 25517, 23679, 25518, 25519, 23686, 19532, 19531, 23689, 25520, 24912, 25521, 24913, 24915, 24916, 24918, 24919, 25522, 25523, 25524, 24921, 25525, 25526, 23724, 19544, 19543, 23734, 23731, 25527, 25528, 23741, 25529, 25530, 25531, 25532, 23752, 19552, 19551, 24932, 24934, 21554, 21551, 23767, 23774, 19558, 19557, 25533, 25534, 23814, 25535, 23820, 24941, 23783, 24943, 23789, 24946, 24948, 24950, 25536, 23809, 23806, 25537, 25538, 23814, 23817, 23820, 25539, 21618, 21615, 24958, 25540, 25541, 24959, 23838, 23835, 23841, 23846, 25542, 24965, 25543, 21653, 21650, 23861, 23858, 24971, 25544, 24973, 25545, 25546, 25547, 25548, 21675, 24976, 23881, 23957, 23960, 25550, 25551, 25552, 25553, 24979, 23895, 25558, 24982, 25559, 19637, 21707, 21704, 25560, 24987, 23915, 21719, 23920, 21906, 25046, 21728, 21725, 24999, 25561, 25562, 25563, 25000, 23933, 23930, 23936, 24114, 25564, 25565, 25059, 25566, 25567, 21753, 21750, 24999, 25568, 25569, 25000, 23957, 23954, 23960, 25005, 25570, 25571, 25572, 25573, 25574, 25575, 23975, 25576, 25008, 19619, 25011, 25577, 25578, 25013, 19620, 25016, 24002, 25019, 25579, 25580, 25581, 25582, 25583, 25584, 25021, 24026, 25590, 21843, 25591, 19637, 25026, 24040, 25592, 21858, 25593, 19637, 25031, 25594, 25595, 21875, 21872, 25596, 25035, 24066, 21887, 24071, 21893, 24076, 25042, 24083, 21906, 25046, 25048, 25597, 25598, 25049, 25050, 24107, 19656, 19655, 25055, 24114, 24120, 24117, 25059, 25599, 25600, 25061, 25601, 24139, 24136, 24142, 24148, 24145, 24151, 24154, 25602, 19658, 19657, 25606, 24163, 25073, 24172, 25076, 25607, 25608, 25078, 24187, 25081, 25083, 24201, 24198, 25609, 25610, 25611, 24213, 24210, 24219, 24216, 24222, 25612, 24225, 25275, 25275, 25617, 25619, 25621, 19691, 19692, 25623, 19692, 19691, 19681, 19682, 19682, 19681, 19691, 19692, 25160, 25624, 25162, 25626, 19692, 19691, 25275, 25277, 25549, 25554, 22083, 25557, 25585, 22093, 25588, 25589, 25628, 25629, 22117, 22120, 25605, 25630, 25632, 25633, 22120, 22117, 25616, 25634, 15, 25649, 25650, 25651, 25652, 25653, 25654, 25655, 25656, 25657, 25658, 25659, 25660, 25666, 25667, 25668, 25670, 25671, 25672, 25673, 25674, 25675, 25676, 25677, 25678, 25679, 25680, 25681, 25682, 25683, 25684, 25685, 25686, 25689, 25690, 25691, 25692, 25693, 25694, 25695, 25696, 25697, 25698, 25699, 25700, 25701, 25702, 25703, 25704, 25705, 25710, 25711, 25712, 25713, 25716, 25718, 25720, 25723, 25726, 25727, 25728, 25729, 25730, 25731, 25732, 25733, 25734, 25735, 25736, 25737, 25738, 25739, 25740, 25741, 25742, 25743, 25744, 25745, 25746, 25748, 25753, 25751, 25755, 25756, 25760, 25761, 25763, 25764, 25765, 25766, 25767, 25768, 25769, 25770, 25771, 25772, 25773, 25774, 25775, 25776, 25777, 25778, 25779, 25780, 25781, 25782, 25783, 25784, 25785, 25787, 25788, 25789, 25790, 25791, 25792, 25793, 25794, 25796, 25797, 25798, 25799, 25800, 25801, 25802, 25803, 25806, 25808, 25811, 25814, 25815, 25816, 25819, 25820, 25821, 25822, 25823, 25824, 25825, 25826, 25827, 25829, 25830, 25831, 25832, 25833, 25834, 25835, 25836, 25837, 25838, 25839, 25840, 25842, 25843, 25844, 25845, 25846, 25847, 25848, 25849, 25850, 25851, 25852, 25853, 25854, 25855, 25856, 25857, 25858, 25859, 25860, 25861, 25862, 25864, 25865, 25866, 25867, 25869, 25871, 25872, 25873, 25874, 25875, 25876, 25877, 25878, 25880, 25881, 25882, 25883, 25884, 25886, 25887, 25891, 25892, 25893, 25894, 25896, 25897, 25898, 25899, 25900, 25902, 25903, 25904, 25905, 25906, 25907, 25908, 25909, 25910, 25911, 25912, 25913, 25916, 25918, 25919, 25920, 25922, 25923, 25924, 25928, 25930, 25931, 25932, 25933, 25936, 25937, 25940, 25941, 25943, 25944, 25946, 25947, 25948, 25949, 25950, 25952, 25953, 25954, 25956, 25957, 25958, 25959, 25960, 25961, 25963, 25964, 25967, 25968, 25971, 25972, 25973, 25976, 25977, 25978, 25979, 25981, 25982, 25983, 25985, 25986, 25987, 25988, 25989, 25990, 25991, 25992, 25993, 25994, 25995, 25996, 25997, 25998, 25999, 26000, 26002, 26003, 26004, 26005, 26006, 26008, 26009, 26010, 26011, 26012, 26013, 26014, 26015, 26016, 26017, 26018, 26021, 26022, 26023, 26024, 26025, 26026, 26027, 26028, 26029, 26030, 26031, 26032, 26034, 26035, 26036, 26037, 26038, 26039, 26040, 26041, 26042, 26043, 26045, 26046, 26047, 26048, 26049, 26051, 26052, 26053, 26054, 26055, 26056, 26057, 26058, 26059, 26060, 26061, 26062, 26065, 26068, 26069, 26070, 26071, 26072, 26073, 26074, 26075, 26076, 26077, 26078, 26079, 26080, 26081, 26082, 26083, 26084, 26085, 26086, 26087, 26089, 26091, 26092, 26094, 26095, 26096, 26097, 26098, 26099, 26100, 26101, 26102, 26103, 26104, 26105, 26106, 26107, 26108, 26109, 26110, 26111, 26112, 26113, 26114, 26115, 26116, 26117, 26118, 26120, 26121, 26122, 26123, 26124, 26125, 26126, 26127, 26128, 26129, 26130, 26131, 26133, 26134, 26135, 26136, 26137, 26138, 26139, 26140, 26141, 26142, 26143, 26144, 26145, 26146, 26147, 26149, 26150, 26151, 26152, 26153, 26154, 26155, 26156, 26157, 26158, 26159, 26160, 26161, 26162, 26163, 26164, 26165, 26166, 26168, 26169, 26170, 26171, 26172, 26173, 26174, 26175, 26176, 26177, 26178, 26179, 26180, 26181, 26182, 26183, 26184, 26185, 26186, 26187, 26188, 26189, 26191, 26192, 26193, 26194, 26195, 26197, 26198, 26199, 26200, 26201, 26202, 26203, 26204, 26206, 26207, 26208, 26209, 26210, 26212, 26213, 26214, 26215, 26216, 26217, 26218, 26220, 26221, 26223, 26224, 26225, 26226, 26227, 26228, 26229, 26230, 26231, 26232, 26233, 26234, 26235, 26236, 26237, 26238, 26239, 26240, 26241, 26242, 26244, 26246, 26247, 26248, 26249, 26250, 26251, 26255, 26253, 26256, 26257, 26258, 26259, 26260, 26261, 26262, 26263, 26264, 26265, 26266, 26267, 26268, 26269, 26270, 26271, 26272, 26273, 26274, 26275, 26276, 26277, 26278, 26279, 26280, 26281, 26282, 26283, 26284, 26285, 26288, 26289, 26290, 26291, 26292, 26294, 26295, 26296, 26297, 26298, 26299, 26300, 26303, 26304, 26305, 26306, 26307, 26309, 26310, 26312, 26313, 26314, 26315, 26316, 26318, 26320, 26321, 26322, 26323, 26325, 26326, 26328, 26329, 26330, 26331, 26332, 26333, 26335, 26336, 26337, 26338, 26340, 26341, 26342, 26343, 26345, 26346, 26347, 26348, 26350, 26352, 26353, 26354, 26355, 26356, 26358, 26359, 26361, 26362, 26363, 26365, 26366, 26367, 26369, 26371, 26373, 26374, 26375, 26376, 26377, 26378, 26380, 26381, 26383, 26384, 26385, 26386, 26387, 26388, 26389, 26390, 26391, 26392, 26393, 26394, 26395, 26396, 26397, 26398, 26399, 26400, 26401, 26402, 26403, 26404, 26406, 26407, 26408, 26409, 26410, 26411, 26412, 26413, 26414, 26415, 26416, 26417, 26418, 26419, 26420, 26422, 26423, 26424, 26425, 26426, 26427, 26428, 26429, 26430, 26431, 26432, 26433, 26434, 26435, 26436, 26437, 26438, 26439, 26440, 26441, 26442, 26443, 26444, 26445, 26446, 26447, 26448, 26449, 26450, 26451, 26452, 26453, 26454, 26455, 26456, 26457, 26459, 26460, 26462, 26463, 26464, 26466, 26467, 26469, 26470, 26471, 26472, 26473, 26474, 26475, 26476, 26477, 26478, 26479, 26480, 26481, 26482, 26483, 26484, 26485, 26486, 26487, 26488, 26489, 26490, 26491, 26492, 26493, 26494, 26495, 26496, 26497, 26498, 26499, 26500, 26501, 26502, 26503, 26504, 26505, 26506, 26508, 26509, 26510, 26511, 26512, 26513, 26515, 26516, 26517, 26518, 26519, 26520, 26521, 26522, 26523, 26524, 26525, 26526, 26527, 26528, 26529, 26530, 26531, 26532, 26533, 26534, 26535, 26536, 26537, 26538, 26540, 26543, 26544, 26545, 26546, 26547, 26548, 26550, 26552, 26553, 26554, 26555, 26557, 26560, 26563, 26564, 26565, 26566, 26567, 26568, 26570, 26571, 26572, 26575, 26577, 26578, 26579, 26580, 26581, 26582, 26583, 26584, 26586, 26588, 26589, 26590, 26592, 26593, 26594, 26595, 26596, 26597, 26599, 26600, 26601, 26603, 26604, 26605, 26606, 26607, 26608, 26609, 26610, 26612, 26614, 26615, 26616, 26618, 26619, 26620, 26622, 26623, 26624, 26625, 26626, 26628, 26629, 26630, 26631, 26633, 26636, 26639, 26640, 26642, 26643, 26644, 26645, 26646, 26648, 26650, 26651, 26652, 26655, 26657, 26660, 26663, 26664, 26665, 26667, 26670, 26671, 26672, 26673, 26674, 26675, 26676, 26677, 26678, 26679, 26680, 26683, 26684, 26685, 26686, 26687, 26689, 26690, 26691, 26694, 26696, 26698, 26701, 26704, 26705, 26706, 26707, 26708, 26709, 26711, 26713, 26714, 26715, 26716, 26718, 26720, 26723, 26721, 26724, 26725, 26726, 26727, 26728, 26729, 26730, 26731, 26732, 26733, 26734, 26735, 26737, 26738, 26739, 26740, 26741, 26742, 26743, 26744, 26745, 26746, 26747, 26749, 26751, 26752, 26753, 26754, 26755, 26756, 26757, 26760, 26761, 26762, 26763, 26764, 26766, 26767, 26768, 26769, 26770, 26771, 26772, 26775, 26776, 26777, 26778, 26779, 26781, 26782, 26783, 26786, 26787, 26789, 26790, 26792, 26793, 26794, 26796, 26798, 26799, 26801, 26802, 26803, 26804, 26805, 26806, 26807, 26808, 26812, 26813, 26814, 26816, 26817, 26818, 26819, 26822, 26823, 26825, 26826, 26827, 26828, 26830, 26831, 26832, 26833, 26834, 26837, 26839, 26840, 26841, 26843, 26844, 26845, 26846, 26847, 26849, 26850, 26851, 26853, 26854, 26856, 26857, 26858, 26859, 26860, 26861, 26863, 26864, 26866, 26868, 26870, 26872, 26874, 26875, 26876, 26877, 26878, 26879, 26880, 26881, 26882, 26883, 26884, 26885, 26886, 26887, 26889, 26890, 26891, 26892, 26893, 26895, 26896, 26897, 26899, 26900, 26902, 26903, 26905, 26906, 26908, 26909, 26911, 26912, 26913, 26915, 26917, 26918, 26919, 26920, 26921, 26922, 26923, 26925, 26926, 26927, 26928, 26929, 26930, 26931, 26932, 26933, 26934, 26935, 26936, 26938, 26940, 26941, 26942, 26943, 26944, 26945, 26946, 26947, 26948, 26949, 26951, 26952, 26953, 26954, 26956, 26958, 26959, 26960, 26961, 26962, 26964, 26966, 26967, 26969, 26970, 26971, 26972, 26973, 26974, 26976, 26977, 26979, 26981, 26982, 26983, 26984, 26985, 26987, 26988, 26989, 26990, 26991, 26992, 26994, 26995, 26998, 26999, 27001, 27002, 27004, 27005, 27006, 27007, 27008, 27010, 27011, 27012, 27014, 27016, 27017, 27018, 27019, 27020, 27021, 27022, 27023, 27024, 27025, 27026, 27027, 27029, 27030, 27031, 27032, 27033, 27034, 27035, 27036, 27037, 27039, 27040, 27041, 27042, 27043, 27044, 27045, 27046, 27047, 27048, 27049, 27050, 27053, 27054, 27055, 27056, 27057, 27059, 27061, 27062, 27063, 27064, 27065, 27067, 27069, 27070, 27071, 27072, 27074, 27075, 27077, 27078, 27079, 27080, 27082, 27084, 27085, 27086, 27087, 27088, 27090, 27092, 27093, 27095, 27096, 27097, 27098, 27099, 27100, 27102, 27103, 27105, 27107, 27108, 27109, 27110, 27111, 27112, 27113, 27114, 27115, 27116, 27117, 27118, 27120, 27122, 27123, 27124, 27125, 27126, 27127, 27128, 27129, 27131, 27132, 27133, 27135, 27136, 27137, 27139, 27140, 27141, 27142, 27144, 27145, 27146, 27147, 27148, 27150, 27152, 27153, 27154, 27155, 27156, 27158, 27159, 27161, 27163, 27164, 27165, 27166, 27167, 27168, 27170, 27172, 27173, 27175, 27177, 27178, 27179, 27181, 27182, 27183, 27184, 27185, 27186, 27187, 27188, 27189, 27190, 27193, 27194, 27195, 27196, 27197, 27198, 27200, 27201, 27203, 27204, 27205, 27206, 27208, 27209, 27210, 27211, 27212, 27213, 27215, 27217, 27219, 27221, 27222, 27223, 27224, 27226, 27227, 27228, 27229, 27230, 27232, 27235, 27237, 27238, 27240, 27242, 27243, 27244, 27246, 27248, 27249, 27250, 27252, 27253, 27255, 27256, 27257, 27258, 27259, 27260, 27261, 27262, 27263, 27264, 27265, 27266, 27268, 27269, 27270, 27271, 27272, 27273, 27274, 27275, 27276, 27277, 27278, 27280, 27282, 27283, 27284, 27285, 27286, 27287, 27288, 27290, 27291, 27293, 27294, 27295, 27296, 27297, 27299, 27300, 27301, 27302, 27303, 27304, 27306, 27308, 27309, 27310, 27311, 27312, 27314, 27315, 27316, 27320, 27321, 27322, 27323, 27324, 27325, 27326, 27327, 27328, 27329, 27330, 27331, 27333, 27335, 27336, 25170, 25167, 25503, 25502, 27337, 27338, 26785, 26810, 26821, 27339, 27340, 27341, 27342, 27343, 27344, 27345, 27346, 27347, 27349, 27350, 27351, 27353, 27355, 27356, 27357, 7, 8, 9, 10, 11, 12, 13, 14, 15, 27365, 27367, 27373, 27376, 27378, 27381, 27385, 27389, 27396, 27400, 27402, 27405, 27410, 27412, 27415, 27416, 27420, 27424, 27426, 27428, 27432, 27434, 27440, 27441, 27443, 27445, 27447, 27451, 27453, 27457, 27461, 27463, 27466, 27471, 27473, 27476, 27478, 27481, 27483, 27486, 27489, 27490, 27492, 27496, 27499, 27501, 27504, 27506, 27508, 27511, 27513, 27531, 27535, 25879, 25885, 27553, 27558, 27561, 27564, 27566, 27569, 25915, 25917, 25921, 27581, 25935, 25939, 25945, 27592, 25951, 27597, 27603, 25962, 25966, 27608, 27611, 27612, 25980, 27617, 27623, 27625, 27627, 27629, 27632, 27637, 27640, 27642, 27644, 27647, 26020, 27654, 27656, 27658, 27660, 27663, 27665, 27667, 27670, 27675, 27678, 27681, 27683, 27686, 26064, 27691, 27694, 27697, 27700, 27714, 27718, 27723, 27726, 27728, 27730, 27733, 27736, 27741, 27743, 27745, 27748, 27753, 27755, 27757, 27760, 27763, 27768, 27770, 27772, 27776, 27778, 27782, 27784, 27786, 27790, 27792, 27796, 27800, 27803, 27806, 27808, 26196, 27813, 27816, 27819, 27821, 26211, 27831, 27833, 27836, 27840, 27842, 27845, 27847, 27850, 27856, 27861, 27864, 27867, 27869, 27873, 27879, 27886, 27889, 27891, 27892, 27894, 26293, 27901, 27903, 27904, 27906, 27912, 27926, 27930, 27934, 27938, 27945, 27947, 27960, 27963, 27970, 27972, 27974, 27976, 27978, 27981, 27984, 27989, 27991, 27993, 27996, 27999, 28004, 28006, 28008, 28010, 28013, 28019, 28021, 28023, 28025, 28028, 28038, 28043, 28045, 28047, 28049, 28051, 28054, 28062, 28064, 28066, 28068, 28071, 28079, 28084, 28086, 28088, 28090, 28093, 28097, 28100, 28104, 28107, 28111, 28113, 28116, 28121, 28126, 28129, 28133, 28135, 28138, 28142, 28146, 28149, 28152, 28155, 28159, 28163, 28167, 28170, 28173, 28175, 28178, 28183, 28187, 28192, 28194, 28197, 28198, 28201, 28204, 28206, 28210, 28212, 28213, 28215, 28218, 28220, 28223, 28224, 28227, 28232, 28239, 28243, 28247, 28249, 28253, 28257, 28259, 28265, 28268, 28270, 28271, 28273, 26765, 28280, 28282, 28283, 28285, 28288, 28293, 28295, 28306, 28310, 28314, 28320, 28323, 28336, 28342, 28344, 28359, 28367, 26898, 26901, 28380, 28382, 28383, 28394, 28399, 28402, 28407, 28409, 28413, 28417, 28431, 28433, 28439, 28447, 26993, 26997, 27003, 28457, 27009, 28462, 28466, 28470, 28474, 28477, 28479, 28481, 28486, 28489, 28491, 28494, 27052, 28499, 28511, 28516, 28530, 28532, 28538, 28543, 28546, 28559, 28565, 28570, 28573, 28575, 28577, 28585, 28591, 27176, 28594, 28602, 28605, 28607, 28614, 28619, 28626, 28639, 27241, 28643, 27247, 28648, 28664, 28669, 28674, 28677, 27289, 27292, 28692, 28695, 28697, 27313, 27360, 27362, 27364, 26319, 26364, 26372, 26324, 27921, 26372, 26327, 27924, 25283, 25289, 27941, 27965, 27967, 27369, 26319, 26364, 26372, 26324, 27921, 26372, 26327, 27924, 25283, 25289, 27370, 27965, 27967, 27371, 27711, 28340, 27708, 27706, 27710, 28563, 27725, 27708, 27706, 27710, 27725, 28370, 27711, 28426, 28424, 27712, 28340, 28348, 27708, 27706, 27710, 27130, 28563, 27725, 27720, 27711, 28426, 28424, 27712, 28340, 28348, 27708, 27706, 27710, 27130, 28563, 27725, 28370, 27720, 28673, 28686, 28690, 26916, 28388, 28390, 28392, 28601, 28663, 28661, 28672, 26916, 28388, 28390, 28392, 28601, 28663, 28661, 28672, 28673, 28690, 25706, 28703, 28686, 28690, 28706, 28370, 27722, 27414, 27418, 27423, 25119, 27438, 25754, 25759, 27450, 27456, 28151, 27465, 27775, 27781, 28185, 28200, 27470, 26627, 27485, 27488, 28182, 28185, 27495, 25152, 28256, 25155, 27519, 27517, 27521, 28708, 28710, 28679, 28676, 28679, 28676, 27533, 25870, 27538, 27540, 28679, 28676, 28712, 28716, 27547, 27595, 27599, 27578, 27610, 27615, 27619, 27621, 27552, 28718, 28719, 27557, 27576, 27595, 27599, 27578, 27610, 27615, 27619, 27579, 28483, 28720, 28721, 28412, 25199, 25198, 27595, 27599, 27601, 27610, 27615, 27619, 27621, 27636, 27653, 27674, 27711, 27712, 27121, 27704, 27708, 27706, 27710, 27711, 27712, 28370, 27720, 27722, 27725, 26119, 27740, 26132, 27752, 26148, 27767, 28182, 27858, 28191, 28196, 27863, 27775, 27781, 27789, 27795, 28136, 27858, 28191, 28196, 27863, 27775, 27781, 27789, 27795, 27826, 27828, 27830, 28327, 27839, 26245, 26243, 27855, 28136, 27858, 28196, 28191, 27863, 28209, 27872, 27877, 25261, 27882, 27884, 27899, 27910, 27911, 26317, 26319, 26364, 25278, 27919, 26372, 26324, 27921, 26372, 26327, 27924, 25283, 25286, 25289, 27941, 27965, 27967, 27944, 26372, 27950, 26372, 26364, 27953, 26372, 26370, 26368, 27958, 27965, 27967, 27969, 26405, 27988, 26421, 28003, 28016, 28018, 28031, 28033, 28083, 28036, 26458, 28041, 26465, 28057, 28061, 28059, 28074, 28078, 28076, 28083, 28096, 28103, 28110, 28115, 28120, 28125, 28128, 28236, 28136, 28185, 28141, 26591, 28151, 26602, 28162, 26617, 28256, 26627, 28182, 28185, 28186, 28191, 28196, 28203, 28209, 26688, 28222, 28226, 28231, 28236, 28241, 28246, 28251, 28256, 28262, 26750, 28278, 28724, 28292, 26797, 26795, 28300, 28301, 28303, 28305, 28725, 28312, 28726, 28317, 28318, 28322, 26835, 28327, 28328, 28330, 28331, 28333, 28335, 26852, 28340, 28348, 28349, 28350, 28351, 28352, 28371, 28406, 26986, 28371, 28406, 26986, 28371, 28363, 28362, 28406, 26986, 28370, 28373, 28372, 26904, 26916, 28388, 28390, 28392, 28473, 28398, 28455, 28460, 28444, 28465, 28412, 28422, 28421, 28426, 28424, 28427, 28429, 28437, 28443, 26986, 26986, 28455, 28460, 28444, 28465, 28473, 28485, 28455, 28460, 28465, 28464, 28473, 28485, 28504, 28503, 28508, 28506, 28509, 28515, 28521, 28520, 28525, 28523, 28526, 28528, 28536, 28542, 27121, 27138, 28555, 28553, 28557, 27130, 28563, 27138, 28569, 28580, 28581, 28584, 28588, 28597, 28599, 28601, 28611, 28613, 28618, 28623, 28625, 28629, 28631, 28633, 28686, 28637, 28636, 28647, 28651, 28653, 28655, 28657, 28659, 28663, 28661, 28672, 28673, 28688, 28690, 28694, 28736, 28686, 28688, 28690, 28694, 28740, 27352, 27358, 27352, 27358, 27352, 27358, 27334, 27332, 27358, 28729, 28727, 28729, 28734, 28732, 28734, 27352, 27358, 9, 10, 11, 12, 13, 14, 15, 28808, 28809, 28816, 28827, 28836, 28850, 28851, 28856, 28858, 28860, 28865, 28870, 28874, 28891, 28896, 28904, 28917, 28922, 28936, 28941, 28970, 28972, 28974, 29027, 29032, 29051, 29053, 29056, 29057, 29060, 29074, 29079, 29081, 29084, 29086, 29108, 29112, 29118, 29119, 29120, 28753, 29121, 29122, 29123, 29124, 29125, 29126, 29127, 29128, 28924, 29129, 28926, 29130, 28927, 29131, 29132, 29133, 29134, 28933, 29135, 29136, 29137, 29138, 29139, 29140, 29141, 29142, 28924, 29143, 28926, 29144, 28927, 29145, 29146, 29147, 29148, 28933, 29149, 29150, 28755, 29151, 29152, 29153, 29154, 29155, 28755, 29156, 29157, 29158, 28862, 29159, 29160, 29161, 29162, 29163, 29164, 29165, 28755, 28346, 29166, 29167, 29168, 29169, 29170, 28862, 29171, 29172, 28861, 29173, 29174, 29175, 29176, 29177, 29178, 29042, 28346, 29179, 29180, 29181, 29182, 29183, 28862, 29184, 29185, 29186, 28545, 28861, 29187, 29188, 29189, 29190, 28385, 29191, 29192, 29193, 29194, 29195, 29196, 29197, 29109, 29198, 29048, 29049, 28385, 29199, 29200, 29201, 29202, 29203, 29204, 29205, 29109, 29206, 29207, 29208, 27407, 27398, 28761, 27407, 27404, 29209, 29211, 29212, 29214, 29215, 28765, 29216, 28767, 29217, 29218, 28768, 28770, 27430, 25120, 29219, 27436, 29220, 28774, 25757, 29221, 29016, 25762, 29222, 29223, 28778, 28780, 29224, 27459, 29225, 29020, 28783, 29226, 27875, 29227, 28879, 28784, 29228, 26167, 28180, 29229, 29001, 29230, 28987, 29231, 27475, 28788, 29232, 28790, 29233, 28791, 29234, 28792, 28180, 29235, 28997, 29236, 29237, 28794, 27498, 28797, 29238, 29239, 28799, 27510, 28802, 29240, 29241, 29242, 29243, 29246, 29247, 29114, 29116, 29114, 29248, 29249, 29114, 29116, 28803, 29250, 28804, 29251, 29252, 29253, 29114, 29254, 29255, 29114, 29116, 27586, 27588, 29258, 28820, 29259, 28822, 29260, 29261, 28823, 27606, 28826, 29262, 29263, 28830, 29264, 29265, 28831, 29266, 27555, 29269, 27571, 27568, 25177, 28813, 27586, 27588, 29270, 28820, 29271, 28822, 29272, 29273, 28823, 28826, 27606, 29274, 29275, 28830, 29276, 29277, 28831, 29278, 28485, 29281, 28496, 28493, 25507, 29078, 27588, 27586, 29282, 29283, 28820, 29284, 28822, 29285, 29286, 28823, 28826, 27606, 29287, 29288, 28830, 29289, 29290, 28831, 27631, 25217, 25216, 27634, 29291, 27649, 27646, 25233, 25221, 28841, 29292, 27688, 27685, 25233, 25232, 28855, 27669, 25228, 25227, 27672, 29293, 27688, 27685, 25233, 28855, 29294, 29295, 27696, 29296, 29297, 29298, 29299, 29300, 29301, 29302, 29303, 29304, 29305, 28862, 29306, 28864, 28867, 29307, 29308, 28869, 28943, 29309, 29310, 28873, 28876, 29311, 29312, 28983, 29313, 28997, 29314, 28189, 29315, 29000, 29316, 28907, 27866, 29317, 29318, 28879, 28877, 29319, 26167, 29320, 28884, 28877, 29321, 27798, 28180, 29322, 28997, 29323, 28189, 29324, 29000, 29325, 28907, 27866, 29326, 29327, 28879, 28881, 29328, 26167, 29329, 28884, 28886, 29330, 27798, 28888, 28889, 28890, 28893, 28894, 28895, 29331, 29332, 29333, 28898, 29334, 28899, 28900, 29335, 28902, 28903, 29336, 29337, 29338, 28983, 29339, 28997, 29340, 27859, 29341, 28189, 29342, 28907, 27866, 29343, 29344, 28909, 28910, 29345, 27875, 29346, 25262, 29347, 29348, 29349, 28913, 28914, 28916, 29350, 28919, 28921, 29351, 29352, 27914, 29353, 29354, 29355, 29356, 29357, 29358, 29359, 29360, 29361, 29362, 29363, 28924, 29364, 28925, 29365, 28926, 29366, 28927, 29367, 29368, 29369, 29370, 28933, 29371, 26360, 26357, 29372, 29373, 29374, 29375, 29376, 29377, 29378, 29379, 28930, 28931, 29380, 29381, 29382, 28933, 28935, 28938, 29383, 29384, 28940, 28943, 29385, 29386, 28945, 28948, 28947, 29387, 29388, 28950, 28953, 28952, 29389, 29390, 28034, 29391, 28968, 28089, 29392, 26461, 29393, 29394, 26468, 29395, 28957, 28960, 28959, 29396, 29397, 29398, 28962, 28965, 28964, 29399, 29400, 29401, 28081, 29402, 28968, 28089, 29403, 29404, 29405, 28131, 28977, 29406, 28118, 29407, 28123, 29408, 28980, 29409, 28131, 29410, 28983, 29411, 28997, 29412, 29413, 28984, 28144, 28986, 29414, 29415, 28987, 28172, 28989, 29416, 29417, 28990, 28165, 28992, 29418, 29419, 28993, 28172, 28995, 29420, 28180, 29421, 28997, 29422, 29016, 29423, 28189, 29424, 29000, 29425, 29001, 28200, 29003, 29426, 29427, 29005, 29007, 29009, 29428, 29011, 29429, 29013, 29430, 28229, 29431, 28234, 29432, 29016, 29433, 29434, 29017, 29019, 29435, 29436, 29020, 29022, 29437, 29438, 29023, 29024, 29026, 29439, 29029, 29031, 28290, 29441, 29034, 29035, 29442, 29443, 29444, 29445, 29446, 29447, 28308, 29041, 29037, 29449, 29038, 29451, 29452, 29039, 29453, 28325, 29454, 29455, 29456, 29457, 29458, 29459, 29460, 29041, 29461, 29462, 29042, 28346, 29463, 29464, 29465, 29466, 29467, 29468, 29469, 29470, 29471, 29472, 29473, 29044, 29474, 29475, 29476, 29477, 29478, 29045, 29479, 29480, 29481, 29046, 29482, 29047, 29048, 29049, 28385, 29483, 29484, 29485, 29486, 29487, 29488, 28401, 28406, 28453, 28451, 29489, 29065, 29490, 29067, 29491, 29492, 28445, 29054, 28483, 25503, 25502, 28485, 29493, 28496, 28493, 25507, 29078, 29494, 29495, 29496, 29497, 29498, 29499, 29058, 28435, 29500, 29501, 29502, 29503, 28453, 28451, 29504, 29065, 29505, 29067, 29506, 29507, 28445, 29069, 29508, 28476, 28483, 25503, 25502, 29509, 28496, 28493, 25507, 29078, 28453, 28451, 29510, 29065, 29511, 29067, 29512, 29513, 28468, 29069, 29514, 28476, 28483, 25503, 25502, 29515, 28496, 28493, 25507, 29078, 29516, 29517, 29518, 29519, 29520, 29082, 28513, 29521, 29522, 29523, 29524, 29525, 29526, 29527, 29082, 28534, 29528, 29529, 28545, 29530, 29531, 29532, 29533, 29534, 29535, 28561, 29536, 29537, 29538, 28567, 28572, 28574, 29091, 28579, 29539, 29540, 28582, 29541, 28587, 29542, 28589, 29094, 27180, 29095, 29543, 29544, 29545, 28606, 28604, 28609, 29546, 29547, 29548, 28616, 28621, 29549, 29550, 28627, 29551, 29552, 29553, 28684, 29554, 29114, 29555, 29556, 29116, 29117, 29103, 29104, 29105, 29106, 29557, 27254, 29558, 29559, 29560, 29561, 29562, 29563, 29564, 29109, 29565, 28684, 29566, 29567, 29568, 29114, 29569, 28679, 28676, 28684, 29571, 29572, 29573, 29114, 29574, 29116, 29117, 29570, 29575, 29244, 29245, 29210, 29576, 29213, 29577, 29244, 29245, 29570, 29578, 29575, 29579, 29570, 29580, 29575, 29581, 29256, 29582, 29583, 29257, 29584, 29585, 29586, 29587, 29588, 29589, 29590, 29570, 29591, 29575, 29592, 14, 15, 29640, 29641, 29643, 29646, 29649, 29651, 29653, 29658, 29659, 29661, 29664, 29667, 29669, 29671, 29676, 28420, 29679, 29680, 29607, 29685, 29686, 29689, 29607, 28861, 28420, 29693, 29697, 29698, 29700, 29704, 29707, 28420, 29710, 29714, 29715, 29717, 29721, 29607, 29725, 29726, 29731, 29737, 27393, 29739, 29741, 29742, 29743, 29749, 27395, 29751, 29755, 29756, 29757, 29758, 29759, 29765, 29767, 29770, 29771, 29772, 29773, 29775, 29777, 29778, 29780, 29781, 29784, 29785, 29787, 29789, 29790, 29792, 29794, 29795, 29797, 29798, 29800, 29802, 29804, 29805, 29807, 29809, 29811, 29812, 29814, 29817, 29818, 29819, 29822, 29823, 29824, 29826, 29829, 29831, 29832, 29833, 29834, 29836, 29837, 29838, 29840, 29844, 29845, 29847, 29848, 29849, 29850, 29852, 29854, 29857, 29858, 29859, 28828, 29862, 29865, 29866, 29867, 25901, 29869, 29870, 29871, 25176, 29872, 29873, 29874, 29876, 29878, 29881, 29882, 29883, 28828, 29886, 29889, 29890, 29891, 25929, 29893, 29894, 29895, 25506, 29896, 29897, 29898, 29899, 29901, 29903, 29906, 29907, 29908, 28828, 29911, 29914, 29915, 29916, 29917, 29918, 26007, 29920, 29921, 29922, 29923, 29924, 29926, 29927, 29928, 29929, 29930, 29931, 29932, 29933, 29934, 26050, 29936, 29937, 29938, 25232, 29939, 29607, 29942, 28859, 29945, 29609, 28861, 29953, 29955, 29956, 28866, 29959, 29960, 28871, 29963, 29964, 28875, 29967, 29969, 29971, 29973, 29975, 29976, 29979, 29980, 29982, 29984, 29985, 29987, 29988, 29990, 29992, 29994, 29996, 29997, 30000, 30001, 30003, 30005, 30006, 30008, 30009, 30010, 30011, 28892, 30012, 30013, 30014, 28897, 30018, 30020, 30021, 30023, 28905, 30024, 30025, 30028, 30030, 30032, 30034, 30036, 30037, 30040, 30041, 30043, 30045, 30049, 30050, 30051, 28918, 30053, 30054, 26308, 30057, 30059, 30063, 30066, 30069, 30071, 30073, 30075, 30080, 30082, 30083, 30085, 30088, 30092, 30093, 30097, 30098, 30099, 28937, 30102, 30103, 28942, 30106, 30107, 30108, 30111, 30112, 30113, 30116, 30118, 30119, 30121, 30124, 30126, 30127, 30128, 30130, 30132, 30133, 30134, 30136, 30138, 30140, 30141, 28971, 28973, 28975, 30145, 30146, 30148, 30150, 30152, 30154, 30156, 30158, 30161, 30162, 30163, 30166, 30167, 30168, 30171, 30172, 30173, 30176, 30177, 30178, 30180, 30182, 30184, 30186, 30188, 30190, 30191, 30192, 30195, 30196, 30197, 30199, 30201, 30203, 30205, 30207, 30210, 30211, 30214, 30215, 30218, 30219, 30220, 29028, 30222, 30223, 26780, 30224, 30226, 30227, 30228, 30234, 30235, 30236, 30238, 30241, 30243, 30251, 30254, 30255, 30267, 30269, 30273, 28371, 30275, 30277, 30279, 30280, 30281, 30282, 29625, 30289, 28405, 30290, 30291, 30292, 30294, 30296, 30297, 30299, 30300, 30301, 30302, 30303, 30304, 28476, 30306, 30307, 30308, 25506, 30309, 28420, 30310, 30312, 30316, 30317, 29629, 30322, 30323, 30325, 30327, 30328, 30330, 30331, 30333, 30334, 30335, 30336, 30338, 30339, 30340, 25506, 30341, 30342, 30343, 30345, 30347, 30348, 30350, 30351, 30353, 30354, 30355, 30356, 30358, 30359, 30360, 25506, 30361, 28502, 30362, 30364, 30367, 30368, 28519, 30370, 30372, 30376, 30377, 29633, 30380, 28549, 30383, 30387, 30391, 30392, 30393, 30394, 30395, 30398, 30400, 30402, 30403, 30404, 30405, 30409, 30410, 30411, 30415, 30416, 30419, 30423, 30425, 30426, 30428, 30429, 30430, 30431, 30432, 30433, 30435, 30441, 28667, 30443, 30445, 30449, 30451, 30452, 29636, 30453, 30457, 30459, 30460, 29638, 29656, 29674, 29684, 29690, 29951, 29706, 29708, 29723, 29727, 30461, 30462, 29730, 29730, 29736, 29734, 29754, 30463, 29762, 30464, 29736, 29734, 29748, 29746, 29754, 30465, 29762, 30467, 30469, 30470, 30440, 30438, 30437, 30448, 30471, 30456, 30473, 30440, 30438, 30437, 30448, 30475, 30456, 30477, 30440, 30438, 29843, 30448, 30479, 30480, 30456, 30482, 29951, 29944, 29954, 29951, 29954, 29958, 29962, 29966, 30016, 30250, 30048, 30248, 30246, 30056, 30246, 30240, 30246, 30078, 30095, 30101, 30105, 30110, 30115, 30217, 30217, 30248, 30246, 30233, 30231, 30246, 30240, 30246, 30250, 30248, 30246, 30257, 30263, 30262, 30259, 30263, 30263, 30262, 30266, 30266, 30265, 30272, 30272, 30271, 30320, 30321, 30399, 30408, 30399, 30408, 30399, 30408, 30408, 30285, 30320, 30321, 30382, 30389, 30408, 30407, 30418, 30413, 30422, 30399, 30408, 30413, 30418, 30422, 30408, 30407, 30413, 30418, 30422, 30440, 30438, 30437, 30448, 30490, 30456, 30492, 30489, 30485, 30485, 30489, 30485, 30487, 30489, 14, 15, 30511, 30514, 30518, 30519, 30520, 30527, 30533, 30538, 30544, 30546, 30549, 29769, 30556, 30559, 30561, 29783, 29788, 29793, 30573, 29816, 29821, 30601, 30606, 30608, 30611, 30613, 30617, 30614, 30619, 30624, 30626, 30629, 30631, 30635, 30632, 30637, 30643, 30645, 30648, 30652, 30653, 30655, 30658, 30660, 30663, 30667, 30671, 30668, 30673, 30675, 30677, 30678, 30682, 30685, 30688, 30694, 29978, 29983, 30706, 29999, 30004, 30716, 30720, 30725, 30733, 30039, 30737, 30741, 30744, 30081, 30757, 30763, 30766, 30768, 30771, 30776, 30777, 30779, 30783, 30789, 30790, 30791, 30160, 30165, 30170, 30175, 30194, 30209, 30213, 30835, 30838, 30855, 30278, 30862, 30864, 30866, 30873, 30877, 30881, 30878, 30883, 30888, 30889, 30897, 30903, 30900, 30905, 30913, 30919, 30916, 30921, 30926, 30931, 30933, 30390, 30945, 30947, 30414, 30964, 30968, 30970, 30975, 30496, 30061, 28701, 28723, 29645, 29648, 30062, 29654, 29652, 30072, 29650, 30976, 30503, 28723, 30062, 30061, 28702, 29666, 29663, 29672, 29670, 30072, 29668, 30977, 30510, 29712, 29716, 30512, 29682, 30978, 30517, 29716, 30515, 29688, 30979, 30517, 30980, 29695, 29699, 30522, 29702, 30981, 30525, 30982, 30526, 29712, 29716, 30529, 29719, 30983, 30532, 30984, 30535, 30987, 29728, 30450, 30988, 29729, 29732, 30541, 30989, 30990, 29740, 30991, 29753, 30450, 30993, 29761, 30974, 30458, 29732, 30541, 30995, 30996, 29740, 29744, 30541, 30997, 30998, 29752, 30999, 29753, 30450, 31001, 29761, 30974, 30458, 29764, 29764, 29766, 30204, 29768, 29776, 30204, 30564, 30567, 30570, 29968, 30204, 29799, 30193, 29801, 29806, 29808, 30204, 29810, 30155, 30204, 29813, 30204, 29815, 29820, 29825, 29828, 30450, 30974, 30458, 30962, 30961, 30959, 31005, 31006, 31007, 30444, 31008, 30446, 30450, 31010, 30454, 30974, 30458, 30962, 30961, 30959, 31012, 31013, 31014, 30444, 31015, 30446, 30450, 31017, 30454, 30974, 30458, 30962, 30961, 29841, 29839, 31019, 31020, 31021, 30444, 31022, 30446, 30450, 31025, 30454, 30974, 30458, 29855, 29853, 30605, 29863, 30610, 29879, 29877, 30623, 29887, 30628, 31027, 29952, 29904, 29902, 30642, 29912, 30647, 29941, 30256, 30850, 31028, 29947, 31029, 30679, 29949, 30256, 30850, 31030, 29952, 31031, 30679, 31032, 31033, 31034, 29968, 30204, 29970, 29974, 29972, 30697, 30700, 29989, 30204, 29991, 29995, 29993, 30709, 30712, 30714, 30718, 31035, 30722, 30721, 31036, 30022, 30027, 30029, 30204, 30031, 30035, 30033, 30736, 31037, 30739, 30742, 31038, 31039, 31040, 31041, 30058, 31042, 31043, 30061, 30062, 28723, 30065, 30068, 28722, 30076, 30074, 30072, 30070, 31044, 30753, 30087, 30759, 31045, 30760, 31046, 31047, 31048, 31049, 30775, 30117, 30781, 30785, 30788, 30139, 30155, 30204, 30147, 30151, 30149, 30153, 30155, 30204, 30193, 30157, 30204, 30159, 30164, 30169, 30174, 30179, 30181, 30204, 30183, 30185, 30189, 30187, 30193, 30818, 30198, 30200, 30204, 30202, 30206, 30204, 30208, 31050, 31051, 30833, 30836, 29440, 30840, 31052, 31053, 30842, 31054, 31055, 31056, 29448, 30252, 29450, 31057, 30242, 31058, 30237, 30244, 31059, 31060, 31061, 30252, 30870, 30295, 30872, 30314, 30256, 30850, 30870, 30295, 30872, 30314, 30258, 31062, 30385, 31063, 31064, 30260, 31065, 31066, 30261, 31067, 31068, 31069, 30264, 31070, 31071, 30274, 31072, 30268, 30385, 31073, 31074, 30274, 31075, 30385, 31076, 30865, 31077, 31078, 31079, 31080, 31081, 31082, 30283, 30860, 31083, 31084, 30870, 30295, 30872, 30314, 30318, 30886, 30385, 30865, 30870, 30295, 30872, 30314, 30318, 30886, 31085, 30385, 31086, 30893, 30326, 30895, 30896, 30909, 30346, 30911, 30912, 30366, 30369, 30924, 30374, 30378, 30929, 31087, 30385, 31088, 30935, 31089, 31090, 31091, 31092, 30424, 31093, 30396, 30939, 30957, 30955, 31094, 31095, 31096, 31097, 30424, 31098, 30420, 30957, 30955, 31099, 31100, 31101, 31102, 30424, 31103, 30420, 30957, 30955, 30962, 30961, 30959, 31104, 31105, 31106, 30444, 31107, 30446, 30450, 31109, 30454, 30974, 30458, 30472, 30474, 30472, 30474, 30466, 30468, 30472, 30474, 30472, 30474, 30476, 30478, 31024, 30483, 31111, 31112, 31113, 31114, 30491, 30493, 31115, 31116, 31117, 30491, 30493, 7, 8, 9, 10, 11, 12, 13, 14, 15, 30616, 30634, 31158, 31160, 31162, 31164, 30670, 30681, 30684, 30687, 31183, 31189, 30762, 30765, 31216, 30880, 31223, 30902, 31227, 30918, 31242, 31243, 31244, 31245, 31246, 31247, 31248, 31249, 31250, 31251, 31252, 31254, 31255, 31256, 31257, 31258, 31259, 31260, 31261, 31262, 31263, 31264, 31266, 31267, 29677, 31268, 31269, 31270, 31272, 29691, 31273, 31274, 31275, 31277, 29691, 31123, 31279, 29692, 31280, 31281, 31282, 31284, 31286, 31287, 29709, 31288, 31289, 31290, 31292, 29724, 31294, 31296, 31240, 31297, 31299, 31300, 31301, 31302, 31127, 31304, 31306, 31307, 29760, 30548, 31309, 31310, 31311, 31312, 31313, 31314, 31127, 31316, 31317, 31318, 31319, 31128, 31321, 31323, 31324, 29760, 30548, 31326, 31327, 31328, 29763, 31329, 29763, 31330, 31331, 31332, 31333, 30554, 31132, 31334, 31335, 31133, 31134, 30563, 31336, 30566, 31337, 30569, 31338, 31339, 31340, 31341, 31342, 31343, 30574, 31344, 31345, 31346, 31347, 31348, 31349, 31350, 31351, 31352, 30582, 31353, 30585, 31354, 31355, 31240, 31356, 31357, 31358, 31359, 31360, 31361, 31362, 31238, 31365, 31367, 31240, 31368, 31370, 31371, 31372, 31373, 31374, 31375, 31376, 31238, 31379, 31381, 31240, 31382, 31384, 31385, 31386, 31387, 31388, 31389, 31390, 31391, 31238, 31394, 31396, 31240, 31397, 31399, 31400, 31401, 31402, 31403, 29851, 31404, 31405, 29861, 29860, 31406, 31145, 30612, 31407, 31408, 29875, 31409, 31410, 29885, 29884, 31411, 31152, 30630, 31171, 31413, 31414, 31415, 30639, 31416, 31417, 29910, 29909, 31418, 31159, 31165, 31165, 31419, 29940, 31420, 31421, 29950, 31169, 31423, 31425, 31426, 29948, 31427, 31428, 29950, 31171, 31430, 31432, 31436, 31437, 31438, 31439, 31440, 31175, 30696, 31441, 30699, 31442, 31443, 31444, 31445, 31446, 31447, 31178, 30708, 31448, 30711, 31449, 31450, 31181, 31451, 31182, 31453, 31454, 31456, 31457, 31458, 31459, 31460, 31461, 31462, 31184, 30735, 31463, 31186, 31465, 31187, 31466, 31188, 31467, 31469, 31471, 31472, 31474, 31475, 31476, 31477, 31478, 31479, 31480, 31481, 31482, 31483, 31485, 30091, 31486, 31487, 31489, 31193, 31194, 31494, 31495, 31196, 31195, 31197, 31496, 31198, 31497, 31498, 31499, 30144, 30143, 30142, 31500, 31501, 31502, 31503, 31504, 31505, 31506, 31507, 31508, 31509, 31510, 31511, 30801, 31512, 30804, 31513, 30807, 31514, 30810, 31515, 31516, 31517, 31518, 31519, 31520, 31521, 31522, 31523, 30821, 31524, 31525, 31526, 31527, 31528, 31529, 31530, 30829, 30831, 31533, 31209, 31534, 31210, 31535, 31536, 31539, 31540, 31543, 31544, 31545, 31547, 31549, 31550, 31551, 31554, 31555, 31556, 30293, 31557, 31217, 31558, 30884, 31559, 31560, 31561, 31562, 30293, 31563, 31217, 31564, 30884, 31565, 31567, 31568, 31570, 31573, 31574, 31577, 31578, 31580, 31582, 31583, 31584, 31217, 31586, 31211, 31588, 31590, 31212, 31591, 30951, 31593, 31595, 31597, 31598, 31599, 31601, 31602, 30293, 31603, 31217, 31604, 30884, 31605, 31606, 30288, 31214, 31607, 31608, 31609, 31610, 30293, 31611, 31217, 31612, 30884, 31613, 31614, 30319, 31616, 31618, 31619, 30324, 31620, 31621, 31622, 31623, 30344, 31624, 31625, 31626, 30922, 31627, 31628, 31629, 30927, 31630, 31631, 30379, 31233, 31633, 31635, 31235, 31636, 30937, 30942, 31640, 31642, 31643, 31644, 31645, 31235, 31646, 30942, 30951, 31650, 31652, 31653, 31654, 31235, 31655, 30949, 30951, 31659, 31661, 31662, 31663, 31664, 31665, 31666, 31667, 31238, 31670, 31672, 31240, 31673, 31675, 31676, 31677, 31678, 31679, 31680, 31681, 31682, 31683, 31684, 31685, 31686, 31687, 31688, 31689, 31690, 31691, 31696, 31697, 31701, 31702, 8, 9, 10, 11, 12, 13, 14, 15, 31147, 31154, 31167, 31219, 31225, 31229, 31733, 31735, 31737, 31739, 31741, 31744, 31746, 31748, 31750, 31752, 31756, 31757, 31271, 31761, 31762, 31276, 31766, 31767, 31769, 31770, 31283, 31285, 31776, 31777, 31291, 31781, 31293, 31295, 31784, 31298, 31787, 31790, 31305, 31794, 31795, 31308, 31797, 31799, 31802, 31804, 31807, 31322, 31811, 31812, 31325, 31814, 31816, 31818, 31820, 31823, 31824, 31825, 31827, 31828, 31829, 31831, 31833, 31835, 31838, 31840, 31842, 31845, 31847, 31850, 31852, 31855, 31857, 31859, 31862, 31863, 31366, 31866, 31369, 31869, 31871, 31874, 31875, 31380, 31878, 31383, 31881, 31883, 31885, 31887, 31888, 31395, 31891, 31398, 31894, 31898, 31896, 31901, 31902, 31904, 31905, 31908, 31906, 31911, 31912, 31914, 31915, 31916, 31920, 31918, 31923, 31924, 31926, 30651, 30657, 31927, 30666, 30662, 31928, 30666, 31930, 31931, 31933, 31934, 31424, 31938, 31939, 31941, 31942, 31431, 31719, 31720, 31721, 31945, 31948, 31950, 31951, 31953, 31955, 31958, 31960, 31961, 31963, 31966, 31968, 31969, 31722, 31973, 31976, 31978, 31979, 31981, 31983, 31985, 31990, 31992, 31994, 31996, 31998, 32001, 30084, 31724, 31725, 32005, 32006, 32007, 32009, 32010, 32011, 32013, 32015, 32017, 32018, 32019, 32020, 32023, 32026, 32029, 32032, 32034, 32036, 32038, 32040, 32044, 32046, 32048, 32050, 32053, 32056, 32057, 32059, 32061, 32063, 32065, 32068, 32069, 32072, 32076, 32074, 30876, 32078, 32080, 32081, 32085, 32083, 30876, 32087, 32089, 32090, 32093, 31572, 31576, 31581, 30876, 32102, 32104, 31589, 32107, 32109, 32112, 32117, 32115, 30876, 32119, 32121, 32122, 32124, 32125, 32130, 32128, 32132, 30876, 32134, 32135, 32137, 32141, 32139, 30337, 32146, 32144, 30357, 32150, 32151, 32154, 32155, 32157, 32158, 31634, 32161, 32163, 32164, 32165, 32166, 32168, 32170, 32172, 32173, 32174, 32176, 32178, 32180, 32181, 32182, 32184, 32186, 32189, 32190, 31671, 32193, 31674, 32196, 31732, 31743, 31754, 32000, 32004, 32071, 31988, 32067, 32073, 32000, 32004, 32067, 32073, 32071, 31988, 32064, 32000, 32004, 32092, 32095, 32097, 32101, 32127, 31617, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32230, 32233, 32235, 32238, 31755, 31278, 31768, 31775, 32258, 31793, 31810, 32295, 32297, 32301, 32304, 32308, 32311, 32316, 32320, 31900, 32323, 30618, 32326, 31910, 32329, 30636, 31412, 32333, 31922, 32337, 32338, 32340, 32341, 32343, 30672, 31929, 31422, 31937, 31429, 32354, 32355, 32356, 32370, 32378, 32381, 32384, 32383, 32385, 32386, 32390, 32395, 32416, 32418, 32422, 32423, 30882, 32079, 32428, 32429, 30882, 32088, 32437, 31587, 32445, 32446, 30882, 32120, 32453, 32455, 30882, 32133, 32460, 32461, 30904, 32463, 32464, 30920, 32149, 32153, 31632, 32475, 32481, 32486, 32488, 32492, 32495, 32496, 32497, 32242, 32245, 32251, 32250, 32256, 32254, 32266, 31789, 31791, 32266, 31801, 31803, 31806, 31808, 32275, 32348, 32353, 32055, 31822, 31464, 32280, 32043, 31975, 32283, 32282, 31834, 31832, 31830, 32288, 32043, 31837, 31841, 32043, 31849, 32028, 31844, 31853, 31851, 32348, 32296, 31864, 32303, 31876, 32310, 31889, 32318, 32348, 32179, 31658, 31657, 32162, 31639, 31638, 32348, 32353, 32498, 32499, 31493, 31492, 32014, 32012, 32359, 32043, 31947, 31954, 31952, 32364, 32043, 31957, 31964, 31962, 32055, 32052, 31464, 32375, 32377, 32368, 32367, 32500, 32501, 32502, 32503, 32064, 32504, 32505, 31493, 31492, 32014, 32012, 32373, 32043, 31975, 31980, 31464, 32375, 32377, 32376, 32506, 32507, 32508, 32509, 32510, 32511, 32512, 31493, 31492, 32014, 32012, 32043, 32022, 32025, 32028, 32031, 32039, 32037, 32035, 32033, 32408, 32043, 32042, 32049, 32055, 32052, 31532, 31531, 32415, 32414, 32066, 32067, 32062, 32073, 32471, 32513, 32434, 32514, 32434, 32515, 32435, 32516, 32436, 32440, 32108, 31638, 31639, 32110, 32111, 31658, 31657, 32114, 32191, 32494, 32517, 32451, 32518, 31615, 32471, 32162, 31639, 31638, 32171, 31649, 31648, 32179, 31658, 31657, 32191, 32494, 15, 32528, 32530, 32537, 32538, 32547, 32549, 32551, 32553, 32556, 32336, 32339, 32342, 32562, 32571, 32574, 32578, 32580, 32582, 32583, 32586, 32587, 32589, 32592, 32593, 32454, 32597, 32600, 32601, 32603, 32604, 32241, 32533, 32616, 32244, 32533, 32617, 32249, 32618, 32619, 32253, 32620, 32621, 32536, 32536, 32622, 32623, 32624, 32625, 32626, 32627, 32628, 32629, 32630, 31899, 31909, 32345, 32631, 32564, 32632, 32566, 32633, 32634, 32635, 32636, 32637, 32638, 32639, 32640, 32641, 32642, 32643, 32644, 32645, 32646, 32647, 32648, 32649, 32650, 32651, 32652, 32653, 31899, 31909, 32345, 32554, 32654, 32539, 32655, 32298, 32656, 32541, 32657, 32305, 32658, 32543, 32659, 32313, 32660, 32545, 32661, 31899, 31909, 32345, 32662, 32554, 32663, 32664, 32665, 32487, 32666, 32667, 32668, 32477, 31921, 32345, 32669, 32564, 32350, 32670, 32566, 31433, 31434, 32673, 32674, 31435, 32577, 32675, 32676, 32677, 32678, 32679, 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, 32698, 32694, 32696, 31972, 32701, 32702, 32577, 32703, 32704, 32705, 32706, 32707, 32708, 32709, 32710, 32711, 32712, 32713, 32715, 32720, 32721, 31491, 31490, 32577, 32722, 32723, 32724, 32725, 32726, 32727, 32728, 32729, 32730, 32731, 32732, 32733, 32734, 32735, 32736, 32737, 32738, 32739, 32740, 32741, 32742, 32743, 32744, 32745, 32064, 32746, 32077, 32426, 32747, 32607, 32086, 32432, 32749, 32433, 32751, 32753, 32755, 32756, 32590, 32757, 32758, 32759, 32477, 32760, 32761, 32762, 32763, 32487, 32764, 32765, 32612, 32766, 32118, 32449, 32768, 32131, 32457, 32770, 32142, 32147, 32468, 32466, 32771, 32607, 32772, 32773, 32774, 32477, 32775, 32776, 32777, 32482, 32778, 32779, 32780, 32487, 32489, 32781, 32612, 32782, 9, 10, 11, 12, 13, 14, 15, 32529, 32531, 32814, 32815, 32817, 32818, 32820, 32821, 32823, 32824, 32826, 32827, 32786, 32787, 31903, 32837, 32789, 31913, 32838, 32791, 32839, 32841, 32843, 32844, 32846, 32848, 32850, 32852, 32855, 32859, 32861, 32863, 31903, 32865, 32789, 31913, 32866, 32791, 32867, 32868, 32870, 32872, 32874, 32876, 32878, 32880, 32882, 31903, 32884, 32789, 31913, 32885, 32791, 32886, 32888, 32890, 32892, 32894, 32896, 31925, 32897, 32796, 32560, 32558, 32898, 32900, 32901, 32903, 32572, 32003, 32904, 32905, 32908, 32906, 32909, 32910, 32799, 32912, 32915, 32917, 32920, 32922, 32924, 32926, 32930, 32572, 32003, 32932, 32935, 32936, 32799, 32938, 32942, 32944, 32946, 32572, 32003, 32950, 32951, 32948, 32952, 32953, 32799, 32955, 32957, 32960, 32962, 32964, 32968, 32970, 32972, 32071, 32977, 32975, 32979, 32802, 32980, 32982, 32983, 32804, 32984, 32748, 32986, 32750, 32752, 32754, 32809, 32991, 32993, 32995, 32998, 33000, 33003, 33005, 32807, 33006, 32767, 33008, 32809, 33009, 32769, 33011, 32811, 33012, 32813, 33013, 33014, 33016, 33018, 33020, 33022, 33024, 33026, 33028, 33029, 33031, 32199, 32207, 32835, 32830, 32201, 32835, 32833, 32203, 32215, 32205, 32207, 32209, 32211, 33002, 32213, 33002, 32213, 32215, 14, 15, 33040, 33041, 33043, 33045, 33052, 33053, 33054, 33056, 33057, 33059, 32840, 32842, 33065, 33067, 33068, 33069, 33072, 33074, 33075, 33077, 33079, 33087, 33089, 33090, 33092, 32887, 33099, 33101, 33102, 33103, 32899, 32902, 33108, 33109, 33110, 33116, 33117, 33119, 33123, 33124, 33125, 33126, 33127, 33130, 33131, 33134, 33135, 33136, 33137, 33142, 33143, 33145, 33147, 33151, 33152, 33155, 32981, 33159, 33166, 32990, 33174, 33178, 33182, 33184, 33185, 33015, 33064, 33149, 33049, 33047, 33168, 31694, 33196, 32198, 33197, 32206, 33170, 31695, 33198, 33199, 33200, 33201, 33202, 33203, 33188, 31698, 33192, 31700, 33030, 33204, 32214, 33064, 33149, 33205, 32204, 32873, 33206, 32206, 32877, 33207, 32208, 32881, 33208, 32210, 33095, 31692, 33097, 31693, 33168, 31694, 33170, 31695, 33209, 33210, 32212, 33122, 33132, 33149, 33161, 33163, 33164, 33165, 33168, 31694, 33190, 33170, 31695, 33211, 33212, 32212, 33176, 33180, 33188, 31698, 33190, 31699, 33192, 31700, 33030, 33213, 32214, 13, 14, 15, 33222, 33224, 33232, 33234, 33237, 33239, 33242, 33243, 33250, 33258, 33139, 33266, 33269, 32672, 32613, 33251, 33229, 33282, 33255, 32719, 32615, 32614, 33265, 33283, 33071, 32858, 33284, 33285, 33219, 33218, 33286, 33287, 33289, 33291, 33292, 33293, 33294, 32200, 33297, 32202, 33226, 33227, 33300, 33301, 33302, 33303, 33304, 33306, 32672, 32671, 33251, 33229, 33307, 33255, 32719, 32718, 33265, 33308, 33071, 32858, 33236, 33310, 33311, 33313, 33314, 33316, 33317, 33319, 33241, 33320, 33321, 33322, 33323, 33246, 33247, 33324, 33325, 33326, 33327, 33330, 32672, 32671, 33251, 33331, 33120, 33118, 33255, 32700, 32699, 33259, 33332, 32941, 33261, 32719, 32718, 33265, 33333, 32967, 33271, 33272, 33273, 33334, 33335, 33336, 33337, 33274, 33275, 33338, 33339, 33340, 33341, 33342, 33345, 33276, 33346, 33277, 33347, 33279, 33278, 33281, 33348, 33349, 33350, 33351, 33352, 33353, 33354, 33356, 33367, 33368, 33372, 33373, 33374, 33375, 33376, 33378, 33379, 33380, 33381, 33382, 33140, 33267, 33384, 33385, 33388, 33389, 33386, 33288, 33290, 33397, 33399, 33225, 33223, 33400, 33401, 33305, 33408, 33409, 33410, 33411, 33413, 33414, 33415, 33416, 33140, 33267, 33418, 33419, 33235, 33233, 33420, 33309, 33312, 33315, 33318, 33240, 33238, 33428, 33433, 33434, 33329, 33440, 33441, 33442, 33444, 33445, 33446, 33447, 33448, 33449, 33128, 33451, 33452, 33453, 33454, 33455, 33140, 33457, 33267, 33458, 33459, 33460, 33465, 33466, 33344, 33473, 33475, 33477, 33478, 33479, 33355, 33483, 33391, 33395, 33405, 33483, 33403, 33483, 33432, 33430, 33438, 33483, 33436, 33471, 33483, 33468, 33485, 33483, 33481, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33490, 33491, 33114, 33494, 33496, 33500, 33501, 33502, 33488, 33504, 33296, 33299, 33511, 33512, 33516, 33114, 33519, 33521, 33524, 33525, 33526, 33528, 33529, 33535, 33536, 33488, 33541, 33114, 33443, 33547, 33550, 33450, 33553, 33556, 33558, 33456, 33567, 33514, 33571, 33572, 33508, 33507, 33573, 33514, 33515, 33574, 33575, 33576, 33539, 33534, 33533, 33532, 33531, 33539, 33577, 33578, 33579, 33539, 33540, 33580, 33581, 33582, 33462, 33461, 33560, 33463, 33563, 33464, 33564, 33583, 33584, 33585, 33476, 33474, 33570, 33586, 33587, 33588, 14, 15, 33602, 33604, 33499, 33383, 33600, 33608, 33506, 33612, 33615, 33523, 33417, 33600, 33621, 33623, 33625, 33627, 33628, 33549, 33555, 33635, 33600, 33495, 33637, 33610, 33640, 33641, 33611, 33643, 33644, 33646, 33520, 33648, 33649, 33650, 33651, 33652, 33653, 33654, 33657, 33658, 33660, 33552, 33662, 33663, 33664, 33665, 33666, 33667, 33668, 33670, 33569, 33672, 33673, 33674, 33676, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33493, 33683, 33684, 33518, 33690, 33691, 33543, 33700, 33701, 33682, 33686, 33703, 33706, 33639, 33513, 33708, 33710, 33689, 33530, 33712, 33714, 33537, 33717, 33538, 33719, 33546, 33697, 33721, 33698, 33722, 33724, 33726, 33728, 33730, 33733, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33744, 33753, 33746, 33754, 33755, 33705, 33758, 33759, 33747, 33761, 33749, 33762, 33763, 33765, 33767, 33768, 33750, 33769, 33770, 33772, 33751, 33773, 33776, 33777, 33778, 9, 10, 11, 12, 13, 14, 15, 33792, 33794, 33702, 33796, 33797, 33707, 33800, 33802, 33711, 33716, 33718, 33808, 33812, 33813, 33815, 33771, 33771, 33771, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33827, 33825, 33839, 33752, 33799, 33831, 33840, 33760, 33807, 33766, 33804, 33836, 33841, 33809, 33816, 33814, 33857, 33859, 33860, 33856, 33861, 33863, 33864, 33865, 33866, 33867, 33869, 33870, 33871, 13, 14, 15, 33872, 33875, 33876, 33878, 33881, 33883, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33888, 33874, 33890, 33891, 33892, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33893, 33907, 33905, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33920, 33921, 33922, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33936, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33952, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 48, 50, 52, 54, 57, 59, 61, 63, 65, 67, 69, 71, 74, 76, 78, 80, 82, 84, 86, 88, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 487, 489, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 513, 515, 517, 519, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 683, 685, 687, 689, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 948, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1039, 1041, 1046, 1048, 1051, 1053, 1055, 1057, 1060, 1062, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1089, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1154, 1156, 1159, 1161, 1164, 1166, 1168, 1170, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1206, 1208, 1211, 1213, 1215, 1217, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1236, 1238, 1240, 1242, 1245, 1247, 1249, 1251, 1254, 1256, 1259, 1261, 1264, 1266, 1269, 1271, 1274, 1276, 1278, 1280, 1282, 1284, 1287, 1289, 1292, 1294, 1297, 1299, 1302, 1304, 1307, 1309, 1312, 1314, 1317, 1319, 1322, 1324, 1327, 1329, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1349, 1351, 1354, 1356, 1362, 1364, 1366, 1368, 1370, 1372, 1375, 1377, 1379, 1381, 1383, 1385, 1388, 1390, 1392, 1394, 1397, 1399, 1402, 1404, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1436, 1438, 1440, 1442, 1444, 1446, 1449, 1451, 1454, 1456, 1458, 1460, 1462, 1464, 1467, 1469, 1472, 1474, 1477, 1479, 1482, 1484, 1487, 1489, 1492, 1494, 1497, 1499, 1502, 1504, 1507, 1509, 1512, 1514, 1517, 1519, 1521, 1523, 1526, 1528, 1531, 1533, 1539, 1541, 1543, 1545, 1547, 1549, 1552, 1554, 1557, 1559, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1640, 1642, 1644, 1646, 1649, 1651, 1653, 1655, 1657, 1659, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1727, 1729, 1731, 1733, 1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753, 1755, 1757, 1759, 1761, 1763, 1765, 1767, 1769, 1771, 1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1791, 1793, 1795, 1797, 1799, 1801, 1803, 1805, 1807, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868, 1870, 1872, 1874, 1876, 1878, 1881, 1883, 1885, 1887, 1889, 1891, 1894, 1896, 1899, 1901, 1904, 1906, 1909, 1911, 1913, 1915, 1918, 1920, 1922, 1924, 1927, 1929, 1933, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1951, 1953, 1955, 1957, 1959, 1961, 1963, 1965, 1967, 1969, 1971, 1974, 1976, 1979, 1981, 1984, 1986, 1989, 1991, 1994, 1996, 1998, 2000, 2003, 2005, 2008, 2010, 2015, 2017, 2020, 2022, 2025, 2027, 2030, 2032, 2035, 2037, 2040, 2042, 2045, 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2063, 2065, 2067, 2069, 2072, 2074, 2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090, 2093, 2095, 2100, 2102, 2105, 2107, 2110, 2112, 2114, 2116, 2118, 2120, 2122, 2124, 2126, 2128, 2130, 2132, 2134, 2136, 2138, 2140, 2142, 2144, 2146, 2148, 2150, 2152, 2154, 2156, 2158, 2160, 2162, 2164, 2166, 2168, 2171, 2173, 2175, 2177, 2180, 2182, 2185, 2187, 2193, 2195, 2197, 2199, 2201, 2203, 2206, 2208, 2211, 2213, 2216, 2218, 2221, 2223, 2225, 2227, 2229, 2231, 2234, 2236, 2239, 2241, 2244, 2246, 2249, 2251, 2254, 2256, 2263, 2265, 2267, 2269, 2271, 2273, 2275, 2277, 2280, 2282, 2285, 2287, 2293, 2295, 2297, 2299, 2302, 2304, 2307, 2309, 2318, 2320, 2323, 2325, 2328, 2330, 2333, 2335, 2338, 2340, 2344, 2346, 2348, 2350, 2353, 2355, 2357, 2359, 2361, 2363, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 2397, 2399, 2401, 2403, 2405, 2407, 2409, 2411, 2413, 2415, 2417, 2419, 2421, 2423, 2425, 2427, 2429, 2431, 2433, 2435, 2437, 2439, 2442, 2444, 2446, 2448, 2451, 2453, 2455, 2457, 2459, 2461, 2463, 2465, 2467, 2469, 2471, 2473, 2475, 2477, 2480, 2482, 2484, 2486, 2488, 2490, 2492, 2494, 2496, 2498, 2500, 2502, 2504, 2506, 2508, 2510, 2513, 2515, 2517, 2519, 2521, 2523, 2525, 2527, 2529, 2531, 2534, 2536, 2538, 2540, 2542, 2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558, 2560, 2562, 2564, 2567, 2569, 2571, 2573, 2575, 2577, 2579, 2581, 2583, 2585, 2587, 2589, 2591, 2593, 2595, 2597, 2599, 2601, 2603, 2605, 2607, 2609, 2611, 2613, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2643, 2645, 2647, 2649, 2651, 2653, 2655, 2657, 2659, 2661, 2663, 2665, 2667, 2669, 2671, 2673, 2675, 2677, 2679, 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, 2697, 2700, 2702, 2704, 2706, 2709, 2711, 2713, 2715, 2717, 2719, 2721, 2723, 2725, 2727, 2730, 2732, 2735, 2737, 2739, 2741, 2743, 2745, 2747, 2749, 2751, 2753, 2755, 2757, 2760, 2762, 2764, 2766, 2768, 2770, 2772, 2774, 2777, 2779, 2781, 2783, 2786, 2788, 2790, 2792, 2794, 2796, 2798, 2800, 2802, 2804, 2806, 2808, 2811, 2813, 2815, 2817, 2820, 2822, 2824, 2826, 2828, 2830, 2832, 2834, 2836, 2838, 2841, 2843, 2845, 2847, 2849, 2851, 2853, 2855, 2857, 2859, 2862, 2864, 2866, 2868, 2870, 2872, 2874, 2876, 2878, 2880, 2882, 2884, 2886, 2888, 2890, 2892, 2894, 2896, 2899, 2901, 2904, 2906, 2909, 2911, 2914, 2916, 2918, 2920, 2922, 2924, 2927, 2929, 2932, 2934, 2936, 2938, 2941, 2943, 2946, 2948, 2951, 2953, 2955, 2957, 2959, 2961, 2964, 2966, 2968, 2970, 2972, 2974, 2977, 2979, 2981, 2983, 2985, 2987, 2990, 2992, 2995, 2997, 3000, 3002, 3006, 3008, 3010, 3012, 3016, 3018, 3020, 3022, 3024, 3026, 3029, 3031, 3034, 3036, 3039, 3041, 3044, 3046, 3049, 3051, 3054, 3056, 3062, 3064, 3066, 3068, 3070, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3140, 3142, 3146, 3148, 3151, 3153, 3156, 3158, 3161, 3163, 3166, 3168, 3171, 3173, 3176, 3178, 3181, 3183, 3185, 3187, 3189, 3191, 3194, 3196, 3199, 3201, 3204, 3206, 3209, 3211, 3213, 3215, 3217, 3219, 3222, 3224, 3227, 3229, 3232, 3234, 3237, 3239, 3242, 3244, 3247, 3249, 3252, 3254, 3257, 3259, 3262, 3264, 3270, 3272, 3275, 3277, 3280, 3282, 3285, 3287, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3317, 3319, 3321, 3323, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3351, 3353, 3356, 3358, 3360, 3362, 3365, 3367, 3370, 3372, 3375, 3377, 3380, 3382, 3385, 3387, 3390, 3392, 3395, 3397, 3400, 3402, 3405, 3407, 3410, 3412, 3415, 3417, 3420, 3422, 3425, 3427, 3429, 3431, 3434, 3436, 3438, 3440, 3443, 3445, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3470, 3472, 3474, 3476, 3481, 3483, 3485, 3487, 3489, 3491, 3494, 3496, 3499, 3501, 3504, 3506, 3509, 3511, 3513, 3515, 3517, 3519, 3522, 3524, 3526, 3528, 3531, 3533, 3536, 3538, 3543, 3545, 3547, 3549, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3624, 3626, 3629, 3631, 3633, 3635, 3638, 3640, 3642, 3644, 3648, 3650, 3652, 3654, 3657, 3659, 3661, 3663, 3666, 3668, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3753, 3755, 3758, 3760, 3763, 3765, 3768, 3770, 3773, 3775, 3778, 3780, 3783, 3785, 3788, 3790, 3793, 3795, 3798, 3800, 3803, 3805, 3809, 3811, 3813, 3815, 3818, 3820, 3823, 3825, 3828, 3830, 3833, 3835, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3855, 3857, 3859, 3861, 3864, 3866, 3870, 3872, 3874, 3876, 3879, 3881, 3884, 3886, 3889, 3891, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918, 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, 3952, 3954, 3956, 3959, 3961, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, 4017, 4019, 4021, 4023, 4025, 4027, 4029, 4031, 4033, 4035, 4037, 4039, 4041, 4043, 4045, 4047, 4049, 4051, 4053, 4055, 4057, 4059, 4061, 4063, 4065, 4067, 4069, 4071, 4073, 4075, 4077, 4079, 4081, 4083, 4085, 4087, 4089, 4091, 4093, 4095, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 4111, 4113, 4115, 4117, 4119, 4121, 4123, 4125, 4127, 4129, 4131, 4133, 4135, 4137, 4139, 4141, 4143, 4145, 4147, 4149, 4151, 4153, 4155, 4157, 4159, 4161, 4163, 4165, 4167, 4169, 4171, 4173, 4175, 4177, 4179, 4182, 4184, 4186, 4188, 4190, 4192, 4194, 4196, 4198, 4200, 4202, 4204, 4206, 4208, 4210, 4212, 4214, 4216, 4218, 4220, 4222, 4224, 4226, 4228, 4230, 4232, 4234, 4236, 4238, 4240, 4242, 4244, 4247, 4249, 4251, 4253, 4255, 4257, 4259, 4261, 4263, 4265, 4267, 4269, 4271, 4273, 4275, 4277, 4279, 4281, 4283, 4285, 4287, 4289, 4291, 4293, 4295, 4297, 4299, 4301, 4303, 4305, 4307, 4309, 4311, 4313, 4315, 4317, 4319, 4321, 4323, 4325, 4327, 4329, 4331, 4333, 4336, 4338, 4340, 4342, 4345, 4347, 4349, 4351, 4354, 4356, 4358, 4360, 4362, 4364, 4367, 4369, 4372, 4374, 4377, 4379, 4381, 4383, 4385, 4387, 4390, 4392, 4394, 4396, 4398, 4400, 4403, 4405, 4408, 4410, 4413, 4415, 4418, 4420, 4422, 4424, 4434, 4436, 4439, 4441, 4447, 4449, 4451, 4453, 4455, 4457, 4460, 4462, 4465, 4467, 4470, 4472, 4475, 4477, 4480, 4482, 4485, 4487, 4490, 4492, 4495, 4497, 4500, 4502, 4505, 4507, 4510, 4512, 4515, 4517, 4519, 4521, 4523, 4525, 4528, 4530, 4533, 4535, 4538, 4540, 4543, 4545, 4547, 4549, 4551, 4553, 4555, 4557, 4559, 4561, 4563, 4565, 4567, 4569, 4571, 4573, 4575, 4577, 4579, 4581, 4583, 4585, 4587, 4589, 4591, 4593, 4595, 4597, 4599, 4601, 4603, 4605, 4608, 4610, 4612, 4614, 4616, 4618, 4620, 4622, 4625, 4627, 4629, 4631, 4633, 4635, 4637, 4639, 4643, 4645, 4647, 4649, 4652, 4654, 4657, 4659, 4665, 4667, 4670, 4672, 4678, 4680, 4683, 4685, 4691, 4693, 4695, 4697, 4700, 4702, 4704, 4706, 4710, 4712, 4714, 4716, 4718, 4720, 4722, 4724, 4726, 4728, 4730, 4732, 4735, 4737, 4739, 4741, 4743, 4745, 4747, 4749, 4753, 4755, 4757, 4759, 4761, 4763, 4765, 4767, 4769, 4771, 4773, 4775, 4779, 4781, 4783, 4785, 4788, 4790, 4793, 4795, 4801, 4803, 4806, 4808, 4811, 4813, 4816, 4818, 4820, 4822, 4824, 4826, 4829, 4831, 4833, 4835, 4837, 4839, 4842, 4844, 4847, 4849, 4852, 4854, 4857, 4859, 4861, 4863, 4865, 4867, 4870, 4872, 4874, 4876, 4878, 4880, 4882, 4884, 4886, 4888, 4891, 4893, 4896, 4898, 4901, 4903, 4905, 4907, 4909, 4911, 4913, 4915, 4917, 4919, 4921, 4923, 4925, 4927, 4929, 4931, 4933, 4935, 4937, 4939, 4941, 4943, 4945, 4947, 4949, 4951, 4953, 4955, 4957, 4959, 4962, 4964, 4966, 4968, 4970, 4972, 4974, 4976, 4978, 4980, 4982, 4984, 4987, 4989, 4991, 4993, 4995, 4997, 5000, 5002, 5005, 5007, 5010, 5012, 5018, 5020, 5023, 5025, 5028, 5030, 5032, 5034, 5036, 5038, 5041, 5043, 5049, 5051, 5054, 5056, 5058, 5060, 5062, 5064, 5066, 5068, 5070, 5072, 5074, 5076, 5078, 5080, 5082, 5084, 5086, 5088, 5090, 5092, 5094, 5096, 5098, 5100, 5102, 5104, 5106, 5108, 5110, 5112, 5115, 5117, 5121, 5123, 5125, 5127, 5129, 5131, 5134, 5136, 5139, 5141, 5144, 5146, 5149, 5151, 5153, 5155, 5157, 5159, 5162, 5164, 5166, 5168, 5170, 5172, 5175, 5177, 5180, 5182, 5185, 5187, 5190, 5192, 5194, 5196, 5199, 5201, 5203, 5205, 5209, 5211, 5213, 5215, 5218, 5220, 5222, 5224, 5227, 5229, 5231, 5233, 5236, 5238, 5241, 5243, 5245, 5247, 5249, 5251, 5253, 5255, 5257, 5259, 5261, 5263, 5265, 5267, 5269, 5271, 5273, 5275, 5277, 5279, 5281, 5283, 5286, 5288, 5290, 5292, 5295, 5297, 5299, 5301, 5303, 5305, 5307, 5309, 5312, 5314, 5316, 5318, 5321, 5323, 5325, 5327, 5329, 5331, 5333, 5335, 5337, 5339, 5341, 5343, 5346, 5348, 5350, 5352, 5354, 5356, 5358, 5360, 5362, 5364, 5366, 5368, 5370, 5372, 5375, 5377, 5379, 5381, 5383, 5385, 5388, 5390, 5393, 5395, 5398, 5400, 5403, 5405, 5408, 5410, 5413, 5415, 5418, 5420, 5423, 5425, 5428, 5430, 5433, 5435, 5438, 5440, 5443, 5445, 5448, 5450, 5453, 5455, 5458, 5460, 5462, 5464, 5466, 5468, 5470, 5472, 5474, 5476, 5478, 5480, 5483, 5485, 5487, 5489, 5491, 5493, 5496, 5498, 5501, 5503, 5507, 5509, 5512, 5514, 5517, 5519, 5521, 5523, 5525, 5527, 5530, 5532, 5535, 5537, 5540, 5542, 5545, 5547, 5550, 5552, 5554, 5556, 5558, 5560, 5563, 5565, 5568, 5570, 5572, 5574, 5576, 5578, 5580, 5582, 5585, 5587, 5589, 5591, 5594, 5596, 5598, 5600, 5602, 5604, 5606, 5608, 5610, 5612, 5614, 5616, 5618, 5620, 5622, 5624, 5626, 5628, 5630, 5632, 5634, 5636, 5638, 5640, 5642, 5644, 5646, 5648, 5650, 5652, 5654, 5656, 5658, 5660, 5662, 5664, 5666, 5668, 5670, 5672, 5674, 5676, 5678, 5680, 5682, 5684, 5686, 5688, 5690, 5692, 5694, 5696, 5698, 5700, 5702, 5704, 5706, 5708, 5710, 5712, 5714, 5716, 5718, 5720, 5722, 5724, 5726, 5728, 5730, 5732, 5734, 5736, 5738, 5740, 5742, 5744, 5746, 5748, 5750, 5752, 5754, 5756, 5758, 5760, 5762, 5764, 5766, 5768, 5770, 5772, 5774, 5776, 5778, 5780, 5782, 5784, 5786, 5788, 5790, 5792, 5794, 5796, 5798, 5800, 5802, 5804, 5806, 5808, 5810, 5812, 5814, 5816, 5818, 5820, 5822, 5824, 5827, 5829, 5831, 5833, 5836, 5838, 5840, 5842, 5844, 5846, 5848, 5850, 5852, 5854, 5856, 5858, 5860, 5862, 5864, 5866, 5868, 5870, 5872, 5874, 5876, 5878, 5880, 5882, 5885, 5887, 5890, 5892, 5894, 5896, 5899, 5901, 5903, 5905, 5908, 5910, 5912, 5914, 5916, 5918, 5920, 5922, 5924, 5926, 5928, 5930, 5933, 5935, 5938, 5940, 5942, 5944, 5946, 5948, 5950, 5952, 5954, 5956, 5958, 5960, 5962, 5964, 5966, 5968, 5970, 5972, 5974, 5976, 5978, 5980, 5982, 5984, 5987, 5989, 5991, 5993, 5995, 5997, 5999, 6001, 6003, 6005, 6008, 6010, 6012, 6014, 6016, 6018, 6020, 6022, 6024, 6026, 6028, 6030, 6032, 6034, 6037, 6039, 6042, 6044, 6046, 6048, 6050, 6052, 6054, 6056, 6059, 6061, 6065, 6067, 6069, 6071, 6075, 6077, 6080, 6082, 6085, 6087, 6090, 6092, 6095, 6097, 6099, 6101, 6103, 6105, 6108, 6110, 6112, 6114, 6116, 6118, 6121, 6123, 6125, 6127, 6130, 6132, 6135, 6137, 6143, 6145, 6147, 6149, 6151, 6153, 6156, 6158, 6160, 6162, 6164, 6166, 6168, 6170, 6172, 6174, 6176, 6178, 6180, 6182, 6184, 6186, 6188, 6190, 6192, 6194, 6197, 6199, 6201, 6203, 6205, 6207, 6209, 6211, 6214, 6216, 6219, 6221, 6224, 6226, 6229, 6231, 6234, 6236, 6239, 6241, 6244, 6246, 6249, 6251, 6257, 6259, 6262, 6264, 6267, 6269, 6272, 6274, 6277, 6279, 6282, 6284, 6287, 6289, 6291, 6293, 6295, 6297, 6300, 6302, 6304, 6306, 6308, 6310, 6312, 6314, 6317, 6319, 6325, 6327, 6330, 6332, 6335, 6337, 6339, 6341, 6344, 6346, 6348, 6350, 6354, 6356, 6359, 6361, 6364, 6366, 6369, 6371, 6374, 6376, 6379, 6381, 6384, 6386, 6389, 6391, 6394, 6396, 6399, 6401, 6403, 6405, 6407, 6409, 6411, 6413, 6415, 6417, 6420, 6422, 6425, 6427, 6430, 6432, 6435, 6437, 6439, 6441, 6443, 6445, 6448, 6450, 6453, 6455, 6457, 6459, 6461, 6463, 6466, 6468, 6470, 6472, 6474, 6476, 6479, 6481, 6483, 6485, 6487, 6489, 6491, 6493, 6495, 6497, 6499, 6501, 6503, 6505, 6507, 6509, 6511, 6513, 6515, 6517, 6519, 6521, 6523, 6525, 6527, 6529, 6531, 6533, 6535, 6537, 6539, 6541, 6543, 6545, 6547, 6549, 6551, 6553, 6555, 6557, 6559, 6561, 6563, 6565, 6567, 6569, 6571, 6573, 6575, 6577, 6579, 6581, 6583, 6585, 6587, 6589, 6591, 6593, 6595, 6597, 6600, 6602, 6604, 6606, 6609, 6611, 6613, 6615, 6617, 6619, 6621, 6623, 6626, 6628, 6631, 6633, 6635, 6637, 6639, 6641, 6643, 6645, 6648, 6650, 6652, 6654, 6656, 6658, 6660, 6662, 6665, 6667, 6669, 6671, 6673, 6675, 6678, 6680, 6683, 6685, 6688, 6690, 6693, 6695, 6698, 6700, 6703, 6705, 6708, 6710, 6713, 6715, 6718, 6720, 6723, 6725, 6728, 6730, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6787, 6789, 6792, 6794, 6797, 6799, 6802, 6804, 6806, 6808, 6811, 6813, 6815, 6817, 6820, 6822, 6826, 6828, 6830, 6832, 6835, 6837, 6840, 6842, 6845, 6847, 6850, 6852, 6855, 6857, 6860, 6862, 6864, 6866, 6868, 6870, 1524, 1524, 16, 1407, 1407, 17, 5216, 5216, 5216, 5216, 5225, 5225, 92, 5310, 92, 5319, 46, 46, 6195, 6195, 55, 55, 72, 72, 89, 89, 89, 89, 92, 92, 90, 90, 92, 92, 91, 91, 92, 92, 3059, 3059, 3260, 3267, 690, 690, 169, 6128, 6128, 6128, 6128, 6195, 6195, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 202, 4343, 4343, 4334, 4334, 295, 295, 295, 295, 4623, 4623, 4641, 4641, 295, 295, 295, 295, 295, 295, 295, 295, 4245, 4245, 4688, 4688, 295, 295, 4733, 4733, 4343, 4343, 4334, 4334, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 4343, 4343, 4334, 4334, 4444, 4444, 4343, 4343, 4334, 4334, 4425, 4425, 485, 490, 4334, 4334, 511, 520, 4428, 4428, 4431, 4431, 4428, 4428, 4426, 4426, 4798, 4798, 4606, 4606, 4798, 4798, 5284, 5284, 5293, 5293, 690, 690, 681, 681, 681, 681, 5225, 5225, 5216, 5216, 690, 690, 690, 690, 2728, 945, 945, 946, 946, 2733, 3478, 3478, 3807, 3718, 3718, 3756, 3756, 3807, 3756, 3756, 3636, 3636, 2839, 2839, 3059, 3059, 971, 945, 946, 945, 946, 971, 1012, 1012, 1014, 1014, 1171, 1171, 1013, 1013, 1014, 1014, 1015, 1016, 1016, 1017, 1171, 1171, 1209, 1209, 1347, 1347, 1359, 1359, 1044, 1044, 1395, 1395, 1347, 1347, 1359, 1359, 1044, 1044, 1395, 1395, 1347, 1347, 1359, 1359, 1049, 1049, 1395, 1395, 1347, 1347, 1359, 1359, 1407, 1407, 1157, 1157, 1162, 1162, 1157, 1157, 1162, 1162, 1171, 1171, 1209, 1209, 1347, 1347, 1359, 1359, 1395, 1395, 1407, 1407, 1524, 1524, 1536, 1536, 1638, 1647, 1808, 1837, 1879, 1879, 1972, 1972, 2012, 2012, 2097, 2097, 2169, 2169, 2178, 2178, 2190, 2190, 2259, 2259, 2290, 2290, 2300, 2300, 2312, 2312, 2261, 2290, 2290, 2300, 2300, 2312, 2312, 2261, 2278, 2278, 2290, 2290, 2300, 2300, 2312, 2312, 2315, 2315, 2839, 2839, 2440, 2440, 2449, 2449, 3260, 3260, 2775, 2784, 2784, 2839, 2839, 2698, 2698, 2707, 2707, 2728, 2733, 3289, 2775, 2784, 2809, 2818, 2839, 2839, 3004, 3013, 3059, 3059, 3138, 3143, 3260, 3260, 3267, 3267, 3289, 3315, 3315, 3324, 3324, 3363, 3363, 3468, 3468, 3478, 3478, 3529, 3540, 3551, 3551, 3553, 3553, 3718, 3718, 3756, 3756, 3655, 3655, 3622, 3622, 3636, 3636, 3645, 3645, 3645, 3645, 3627, 3627, 3627, 3627, 3756, 3756, 3636, 3636, 3645, 3645, 3646, 3646, 3718, 3718, 3756, 3756, 5234, 5239, 3957, 3957, 3962, 3962, 6195, 6195, 4985, 4985, 5216, 5216, 5225, 5225, 4425, 4425, 4245, 4245, 4245, 4245, 4343, 4343, 4334, 4334, 4343, 4343, 4352, 4352, 4428, 4428, 4426, 4426, 4428, 4428, 4431, 4431, 4425, 4425, 4428, 4428, 4426, 4426, 4428, 4428, 4431, 4431, 4444, 4444, 4688, 4688, 4606, 4606, 4650, 4650, 4662, 4662, 4623, 4623, 4641, 4641, 4650, 4650, 4662, 4662, 4640, 4640, 4641, 4641, 4650, 4650, 4662, 4662, 4675, 4675, 4688, 4688, 4707, 4707, 4786, 4786, 4798, 4798, 4733, 4733, 4786, 4786, 4798, 4798, 4750, 4750, 4751, 4786, 4786, 4798, 4798, 4776, 4786, 4786, 4798, 4798, 4985, 4985, 4894, 4899, 5046, 5046, 4985, 4985, 5015, 5015, 5046, 5046, 5046, 5046, 5197, 5206, 5197, 5206, 5216, 5216, 5225, 5225, 5234, 5239, 5284, 5284, 5293, 5293, 5310, 5319, 5499, 5504, 5825, 5825, 5834, 5834, 6298, 6298, 5825, 5825, 5834, 5834, 6322, 6322, 5583, 5583, 5592, 5592, 9145, 9147, 9149, 9151, 6195, 6195, 9167, 9169, 9171, 9173, 9175, 9177, 9179, 9181, 9183, 9185, 9187, 9189, 6128, 6128, 6140, 6140, 6195, 6195, 5825, 5825, 5834, 5834, 6298, 6298, 5825, 5825, 5834, 5834, 5825, 5825, 5834, 5834, 6298, 6298, 5825, 5825, 5834, 5834, 6322, 6322, 5897, 5897, 5906, 5906, 5931, 5936, 9356, 9358, 9360, 9362, 9364, 9366, 9368, 9370, 9372, 9374, 9376, 9378, 9380, 9382, 9384, 9386, 9388, 9390, 9392, 9394, 9397, 9399, 9401, 9403, 6128, 6128, 6140, 6140, 6063, 6072, 6128, 6128, 6140, 6140, 6195, 6195, 6195, 6195, 6254, 6254, 6298, 6298, 6298, 6298, 6322, 6322, 6342, 6351, 6598, 6607, 9574, 9576, 9579, 9581, 9583, 9585, 9587, 9589, 9591, 9593, 9595, 9597, 9666, 9668, 9671, 9673, 9679, 9681, 9683, 9685, 9688, 9690, 9693, 9695, 9701, 9703, 9706, 9708, 9711, 9713, 9716, 9718, 6904, 6904, 6958, 9669, 9669, 6958, 9686, 9698, 9720, 9720, 9722, 9720, 9720, 9722, 9725, 9725, 9686, 9698, 9395, 9395, 9686, 9686, 9698, 9698, 9395, 9395, 9722, 9722, 9720, 9720, 9669, 9669, 9676, 9676, 9686, 9686, 9698, 9698, 9722, 9722, 9720, 9720, 9722, 9722, 9725, 9725, 13, 14, 15, 13649, 13651, 13653, 13655, 13657, 13659, 13661, 13663, 13665, 13667, 13669, 13671, 13673, 13675, 13677, 13679, 13681, 13683, 13685, 13687, 13689, 13691, 13693, 13695, 13697, 13699, 13701, 13703, 13705, 13707, 13709, 13711, 13713, 13715, 13717, 13719, 13721, 13723, 13725, 13727, 13729, 13731, 13733, 13735, 13737, 13739, 13741, 13743, 13745, 13747, 13749, 13751, 13753, 13755, 13757, 13759, 13761, 13763, 13765, 13767, 13769, 13771, 13773, 13775, 13777, 13779, 13781, 13783, 13785, 13787, 13789, 13791, 13793, 13795, 13797, 13799, 13801, 13803, 13805, 13807, 13809, 13811, 13813, 13815, 13817, 13819, 13821, 13823, 13825, 13827, 13829, 13831, 13833, 13835, 13837, 13839, 13841, 13843, 13845, 13847, 13849, 13851, 13853, 13855, 13857, 13859, 13861, 13863, 13865, 13867, 13869, 13871, 13873, 13875, 13877, 13879, 13881, 13883, 13885, 13887, 13889, 13891, 13893, 13895, 13897, 13899, 13901, 13903, 13905, 13907, 13909, 13911, 13913, 13915, 13917, 13919, 13921, 13923, 13925, 13927, 13929, 13931, 13933, 13935, 13937, 13939, 13941, 13943, 13945, 13947, 13949, 13951, 13953, 13955, 13957, 13959, 13961, 13963, 13965, 13967, 13969, 13971, 13973, 13975, 13977, 13979, 13981, 13983, 13985, 13987, 13989, 13991, 13993, 13995, 13997, 13999, 14001, 14003, 14005, 14007, 14009, 14011, 14013, 14015, 14017, 14019, 14021, 14023, 14025, 14027, 14029, 14031, 14033, 14035, 14037, 14039, 14041, 14043, 14045, 14047, 14049, 14051, 14053, 14055, 14057, 14059, 14061, 14063, 14065, 14067, 14069, 14071, 14073, 14075, 14077, 14079, 14081, 14083, 14085, 14087, 14089, 14091, 14093, 14095, 14097, 14099, 14101, 14103, 14105, 14107, 14109, 14111, 14113, 14115, 14117, 14119, 14121, 14123, 14125, 14127, 14129, 14131, 14133, 14135, 14137, 14139, 14141, 14143, 14145, 14147, 14149, 14151, 14153, 14155, 14157, 14159, 14161, 14163, 14165, 14167, 14169, 14171, 14173, 14175, 14177, 14179, 14181, 14183, 14185, 14187, 14189, 14191, 14193, 14195, 14197, 14199, 14201, 14203, 14205, 14207, 14209, 14211, 14213, 14215, 14217, 14219, 14221, 14223, 14225, 14227, 14229, 14231, 14233, 14235, 14237, 14239, 14241, 14243, 14245, 14247, 14249, 14251, 14253, 14255, 14257, 14259, 14261, 14263, 14265, 14267, 14269, 14271, 14273, 14275, 14277, 14279, 14281, 14283, 14285, 14287, 14289, 14291, 14293, 14295, 14297, 14299, 14301, 14303, 14305, 14307, 14309, 14311, 14313, 14315, 14317, 14319, 14321, 14323, 14325, 14327, 14329, 14331, 14333, 14335, 14337, 14339, 14341, 14343, 14345, 14347, 14349, 14351, 14353, 14355, 14357, 14359, 14361, 14363, 14365, 14367, 14369, 14371, 14373, 14375, 14377, 14379, 14381, 14383, 14385, 14387, 14389, 14391, 14393, 14395, 14397, 14399, 14401, 14403, 14405, 14407, 14409, 14411, 14413, 14415, 14417, 14419, 14421, 14423, 14425, 14427, 14429, 14431, 14433, 14435, 14437, 14439, 14441, 14443, 14445, 14447, 14449, 14451, 14453, 14455, 14457, 14459, 14461, 14463, 14465, 14467, 14469, 14471, 14473, 14475, 14477, 14479, 14481, 14483, 14485, 14487, 14489, 14491, 14493, 14495, 14497, 14499, 14501, 14503, 14505, 14507, 14509, 14511, 14513, 14515, 14517, 14519, 14521, 14523, 14525, 14527, 14529, 14531, 14533, 14535, 14537, 14539, 14541, 14543, 14545, 14547, 14549, 14551, 14553, 14555, 14557, 14559, 14561, 14563, 14565, 14567, 14569, 14571, 14573, 14575, 14577, 14579, 14581, 14583, 14585, 14587, 14589, 14591, 14593, 14595, 14597, 14599, 14601, 14603, 14605, 14607, 14609, 14611, 14613, 14615, 14617, 14619, 14621, 14623, 14625, 14627, 14629, 14631, 14633, 14635, 14637, 14639, 14641, 14643, 14645, 14647, 14649, 14651, 14653, 14655, 14657, 14659, 14661, 14663, 14665, 14667, 14669, 14671, 14673, 14675, 14677, 14679, 14681, 14683, 14685, 14687, 14689, 14691, 14693, 14695, 14697, 14699, 14701, 14703, 14705, 14707, 14709, 14711, 14713, 14715, 14717, 14719, 14721, 14723, 14725, 14727, 14729, 14731, 14733, 14735, 14737, 14739, 14741, 14743, 14745, 14747, 14749, 14751, 14753, 14755, 14757, 14759, 14761, 14763, 14765, 14767, 14769, 14771, 14773, 14775, 14777, 14779, 14781, 14783, 14785, 14787, 14789, 14791, 14793, 14795, 14797, 14799, 14801, 14803, 14805, 14807, 14809, 14811, 14813, 14815, 14817, 14819, 14821, 14823, 14825, 14827, 14829, 14831, 14833, 14835, 14837, 14839, 14841, 14843, 14845, 14847, 14849, 14851, 14853, 14855, 14857, 14859, 14861, 14863, 14865, 14867, 14869, 14871, 14873, 14875, 14877, 14879, 14881, 14883, 14885, 14887, 14889, 14891, 14893, 14895, 14897, 14899, 14901, 14903, 14905, 14907, 14909, 14911, 14913, 14915, 14917, 14919, 14921, 14923, 14925, 14927, 14929, 14931, 14933, 14935, 14937, 14939, 14941, 14943, 14945, 14947, 14949, 14951, 14953, 14955, 14957, 14959, 14961, 14963, 14965, 14967, 14969, 14971, 14973, 14975, 14977, 14979, 14981, 14983, 14985, 14987, 14989, 14991, 14993, 14995, 14997, 14999, 15001, 15003, 15005, 15007, 15009, 15011, 15013, 15015, 15017, 15019, 15021, 15023, 15025, 15027, 15029, 15031, 15033, 15035, 15037, 15039, 15041, 15043, 15045, 15047, 15049, 15051, 15053, 15055, 15057, 15059, 15061, 15063, 15065, 15067, 15069, 15071, 15073, 15075, 15077, 15079, 15081, 15083, 15085, 15087, 15089, 15091, 15093, 15095, 15097, 15099, 15101, 15103, 15105, 15107, 15109, 15111, 15113, 15115, 15117, 15119, 15121, 15123, 15125, 15127, 15129, 15131, 15133, 15135, 15137, 15139, 15141, 15143, 15145, 15147, 15149, 15151, 15153, 15155, 15157, 15159, 15161, 15163, 15165, 15167, 15169, 15171, 15173, 15175, 15177, 15179, 15181, 15183, 15185, 15187, 15189, 15191, 15193, 15195, 15197, 15199, 15201, 15203, 15205, 15207, 15209, 15211, 15213, 15215, 15217, 15219, 15221, 15223, 15225, 15227, 15229, 15231, 15233, 15235, 15237, 15239, 15241, 15243, 15245, 15247, 15249, 15251, 15253, 15255, 15257, 15259, 15261, 15263, 15265, 15267, 15269, 15271, 15273, 15275, 15277, 15279, 15281, 15283, 15285, 15287, 15289, 15291, 15293, 15295, 15297, 15299, 15301, 15303, 15305, 15307, 15309, 15311, 15313, 15315, 15317, 15319, 15321, 15323, 15325, 15327, 15329, 15331, 15333, 15335, 15337, 15339, 15341, 15343, 15345, 15347, 15349, 15351, 15353, 15355, 15357, 15359, 15361, 15363, 15365, 15367, 15369, 15371, 15373, 15375, 15377, 15379, 15381, 15383, 15385, 15387, 15389, 15391, 15393, 15395, 15397, 15399, 15401, 15403, 15405, 15407, 15409, 15411, 15413, 15415, 15417, 15419, 15421, 15423, 15425, 15427, 15429, 15431, 15433, 15435, 15437, 15439, 15441, 15443, 15445, 15447, 15449, 15451, 15453, 15455, 15457, 15459, 15461, 15463, 15465, 15467, 15469, 15471, 15473, 15475, 15477, 15479, 15481, 15483, 15485, 15487, 15489, 15491, 15493, 15495, 15497, 15499, 15501, 15503, 15505, 15507, 15509, 15511, 15513, 15515, 15517, 15519, 15521, 15523, 15525, 15527, 15529, 15531, 15533, 15535, 15537, 15539, 15541, 15543, 15545, 15547, 15549, 15551, 15553, 15555, 15557, 15559, 15561, 15563, 15565, 15567, 15569, 15571, 15573, 15575, 15577, 15579, 15581, 15583, 15585, 15587, 15589, 15591, 15593, 15595, 15597, 15599, 15601, 15603, 15605, 15607, 15609, 15611, 15613, 15615, 15617, 15619, 15621, 15623, 15625, 15627, 15629, 15631, 15633, 15635, 15637, 15639, 15641, 15643, 15645, 15647, 15649, 15651, 15653, 15655, 15657, 15659, 15661, 15663, 15665, 15667, 15669, 15671, 15673, 15675, 15677, 15679, 15681, 15683, 15685, 15687, 15689, 15691, 15693, 15695, 15697, 15699, 15701, 15703, 15705, 15707, 15709, 15711, 15713, 15715, 15717, 15719, 15721, 15723, 15725, 15727, 15729, 15731, 15733, 15735, 15737, 15739, 15741, 15743, 15745, 15747, 15749, 15751, 15753, 15755, 15757, 15759, 15761, 15763, 15765, 15767, 15769, 15771, 15773, 15775, 15777, 15779, 15781, 15783, 15785, 15787, 15789, 15791, 15793, 15795, 15797, 15799, 15801, 15803, 15805, 15807, 15809, 15811, 15813, 15815, 15817, 15819, 15821, 15823, 15825, 15827, 15829, 15831, 15833, 15835, 15837, 15839, 15841, 15843, 15845, 15847, 15849, 15851, 15853, 15855, 15857, 15859, 15861, 15863, 15865, 15867, 15869, 15871, 15873, 15875, 15877, 15879, 15881, 15883, 15885, 15887, 15889, 15891, 15893, 15895, 15897, 15899, 15901, 15903, 15905, 15907, 15909, 15911, 15913, 15915, 15917, 15919, 15921, 15923, 15925, 15927, 15929, 15931, 15933, 15935, 15937, 15939, 15941, 15943, 15945, 15947, 15949, 15951, 15953, 15955, 15957, 15959, 15961, 15963, 15965, 15967, 15969, 15971, 15973, 15975, 15977, 15979, 15981, 15983, 15985, 15987, 15989, 15991, 15993, 15995, 15997, 15999, 16001, 16003, 16005, 16007, 16009, 16011, 16013, 16015, 16017, 16019, 16021, 16023, 16025, 16027, 16029, 16031, 16033, 16035, 16037, 16039, 16041, 16043, 16045, 16047, 16049, 16051, 16053, 16055, 16057, 16059, 16061, 16063, 16065, 16067, 16069, 16071, 16073, 16075, 16077, 16079, 16081, 16083, 16085, 16087, 16089, 16091, 16093, 16095, 16097, 16099, 16101, 16103, 16105, 16107, 16109, 16111, 16113, 16115, 16117, 16119, 16121, 16123, 16125, 16127, 16129, 16131, 16133, 16135, 16137, 16139, 16141, 16143, 16145, 16147, 16149, 16151, 16153, 16155, 16157, 16159, 16161, 16163, 16165, 16167, 16169, 16171, 16173, 16175, 16177, 16179, 16181, 16183, 16185, 16187, 16189, 16191, 16193, 16195, 16197, 16199, 16201, 16203, 16205, 16207, 16209, 16211, 16213, 16215, 16217, 16219, 16221, 16223, 16225, 16227, 16229, 16231, 16233, 16235, 16237, 16239, 16241, 16243, 16245, 16247, 16249, 16251, 16253, 16255, 16257, 16259, 16261, 16263, 16265, 16267, 16269, 16271, 16273, 16275, 16277, 16279, 16281, 16283, 16285, 16287, 16289, 16291, 16293, 16295, 16297, 16299, 16301, 16303, 16305, 16307, 16309, 16311, 16313, 16315, 16317, 16319, 16321, 16323, 16325, 16327, 16329, 16331, 16333, 16335, 16337, 16339, 16341, 16343, 16345, 16347, 16349, 16351, 16353, 16355, 16357, 16359, 16361, 16363, 16365, 16367, 16369, 16371, 16373, 16375, 16377, 16379, 16381, 16383, 16385, 16387, 16389, 16391, 16393, 16395, 16397, 16399, 16401, 16403, 16405, 16407, 16409, 16411, 16413, 16415, 16417, 16419, 16421, 16423, 16425, 16427, 16429, 16431, 16433, 16435, 16437, 16439, 16441, 16443, 16445, 16447, 16449, 16451, 16453, 16455, 16457, 16459, 16461, 16463, 16465, 16467, 16469, 16471, 16473, 16475, 16477, 16479, 16481, 16483, 16485, 16487, 16489, 16491, 16493, 16495, 16497, 16499, 16501, 16503, 16505, 16507, 16509, 16511, 16513, 16515, 16517, 16519, 16521, 16523, 16525, 16527, 16529, 16531, 16533, 16535, 16537, 16539, 16541, 16543, 16545, 16547, 16549, 16551, 16553, 16555, 16557, 16559, 16561, 16563, 16565, 16567, 16569, 16571, 16573, 16575, 16577, 16579, 16581, 16583, 16585, 16587, 16589, 16591, 16593, 16595, 16597, 16599, 16601, 16603, 16605, 16607, 16609, 16611, 16613, 16615, 16617, 16619, 16621, 16623, 16625, 16627, 16629, 16631, 16633, 16635, 16637, 16639, 16641, 16643, 16645, 16647, 16649, 16651, 16653, 16655, 16657, 16659, 16661, 16663, 16665, 16667, 16669, 16671, 16673, 16675, 16677, 16679, 16681, 16683, 16685, 16687, 16689, 16691, 16693, 16695, 16697, 16699, 16701, 16703, 16705, 16707, 16709, 16711, 16713, 16715, 16717, 16719, 16721, 16723, 16725, 16727, 16729, 16731, 16733, 16735, 16737, 16739, 16741, 16743, 16745, 16747, 16749, 16751, 16753, 16755, 16757, 16759, 16761, 16763, 16765, 16767, 16769, 16771, 16773, 16775, 6873, 6874, 6878, 6881, 6882, 6883, 6884, 6885, 6886, 6887, 6888, 6889, 6891, 6896, 6901, 6902, 6905, 6906, 6909, 6910, 6911, 6912, 6916, 6917, 6954, 6955, 6956, 6957, 6959, 6960, 6961, 6962, 6963, 6964, 6965, 6966, 6967, 6968, 6986, 6987, 6990, 6993, 7012, 7013, 7020, 7033, 7034, 7048, 7049, 7059, 7060, 16828, 16830, 16832, 16834, 7080, 7083, 7084, 7087, 7088, 7098, 7099, 7100, 7101, 7102, 7103, 7104, 7105, 7106, 7107, 7108, 7109, 7110, 7111, 7112, 7113, 7116, 7117, 7120, 7121, 7124, 7125, 7128, 7129, 7136, 7137, 7140, 7141, 7155, 7156, 7157, 7158, 7159, 7160, 7161, 7162, 7163, 7164, 7165, 7166, 7171, 7172, 7175, 7176, 7177, 7178, 7179, 7180, 7183, 7184, 7185, 7186, 7199, 7201, 7204, 7205, 7209, 7211, 7212, 7213, 7214, 7215, 7216, 7217, 7218, 7219, 7238, 7243, 7248, 7249, 7252, 7253, 7264, 7265, 7268, 7269, 7305, 7306, 7315, 7316, 7320, 7321, 7325, 7326, 7328, 7329, 7334, 7335, 7336, 7337, 7354, 7359, 7360, 7361, 7362, 7365, 7369, 7378, 7394, 7397, 7398, 7401, 7402, 7404, 7409, 7410, 7413, 7414, 7428, 7429, 7438, 7439, 7450, 7456, 7457, 7464, 7465, 7477, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503, 7504, 7505, 7506, 7507, 7508, 7509, 7531, 7532, 7538, 7539, 7542, 7543, 7547, 7548, 7549, 7550, 7556, 7557, 7560, 7561, 7566, 7567, 7568, 7569, 7575, 7576, 7579, 7580, 7585, 7586, 7587, 7588, 7595, 7596, 7599, 7600, 7607, 7608, 7623, 7624, 7627, 7628, 7633, 7634, 7637, 7638, 7646, 7647, 7659, 7660, 7691, 7692, 7695, 7696, 7703, 7704, 7707, 7708, 7730, 7731, 7734, 7735, 7759, 7762, 7819, 7827, 7839, 7840, 7861, 7864, 7871, 7874, 7886, 7892, 7910, 7911, 7914, 7915, 7918, 7919, 7933, 7934, 7941, 7942, 7945, 7946, 7949, 7950, 7951, 7961, 7962, 7965, 7966, 7969, 7970, 7971, 7979, 7982, 7985, 7986, 7989, 7990, 7993, 7994, 7995, 7996, 8015, 8016, 8029, 8030, 8035, 8036, 8048, 8049, 8085, 8087, 8088, 8104, 8105, 8120, 8121, 8124, 8125, 8133, 8135, 8146, 8150, 8152, 8157, 8159, 8163, 8164, 8204, 8206, 8215, 8216, 8237, 8240, 8265, 8266, 8268, 8269, 8274, 8280, 8281, 8284, 8285, 8293, 8295, 8315, 8317, 8319, 8320, 8332, 8335, 8337, 8338, 8339, 8340, 8344, 8345, 8348, 8349, 8350, 8351, 8362, 8363, 8372, 8373, 8376, 8377, 8390, 8391, 8394, 8395, 8396, 8397, 8398, 8399, 8407, 8408, 8411, 8412, 8415, 8416, 8441, 8442, 8455, 8456, 8500, 8505, 8546, 8547, 8550, 8551, 8565, 8566, 8602, 8603, 8637, 8638, 8640, 8641, 8660, 8661, 8672, 8675, 8678, 8679, 8698, 8699, 8702, 8703, 8717, 8718, 8721, 8722, 8736, 8737, 8738, 8739, 8740, 8741, 8742, 8743, 8746, 8747, 8748, 8749, 8750, 8751, 8752, 8753, 8754, 8755, 8758, 8759, 8789, 8792, 8794, 8796, 8799, 8800, 8803, 8804, 8805, 8806, 8807, 8808, 8811, 8812, 8815, 8816, 8817, 8818, 8819, 8820, 8823, 8824, 8827, 8828, 8831, 8832, 8835, 8836, 8838, 8840, 8843, 8846, 8849, 8850, 8851, 8852, 8855, 8856, 8859, 8860, 8861, 8862, 8863, 8866, 8867, 8870, 8871, 8874, 8877, 8878, 8881, 8882, 8896, 8897, 8905, 8908, 8917, 8922, 8929, 8930, 8937, 8938, 8943, 8944, 8946, 8947, 8962, 8965, 8980, 8982, 8984, 8985, 8987, 8988, 8991, 8994, 9008, 9009, 9012, 9013, 9019, 9022, 9062, 9064, 9078, 9079, 9082, 9083, 9084, 9085, 9089, 9090, 9092, 9093, 9094, 9095, 9119, 9120, 9123, 9124, 17306, 17308, 9154, 9155, 17312, 17314, 17316, 17318, 17320, 17322, 9208, 9209, 9212, 9213, 9225, 9226, 9232, 9233, 9236, 9237, 9238, 9239, 9241, 9243, 9245, 9246, 9263, 9264, 9267, 9268, 9269, 9270, 9274, 9275, 9277, 9278, 9279, 9280, 9299, 9300, 9302, 9303, 9317, 9332, 17358, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17376, 17378, 17380, 9415, 9416, 9431, 9432, 9435, 9437, 9448, 9449, 9452, 9453, 9465, 9466, 9472, 9473, 9486, 9487, 9496, 9497, 9499, 9500, 9505, 9506, 9511, 9514, 9567, 9570, 17408, 17410, 17412, 17414, 17416, 17418, 17420, 17422, 17424, 17426, 17428, 17430, 17432, 17434, 17436, 17438, 9766, 9767, 9788, 9791, 9792, 9793, 9947, 9948, 9977, 9978, 9979, 9984, 9985, 9986, 9987, 9988, 9991, 9992, 11318, 11319, 11336, 11337, 11412, 11413, 11418, 11419, 11485, 11486, 11487, 11488, 11513, 11514, 11516, 11517, 11520, 11521, 11524, 11525, 11530, 11531, 11532, 11533, 11534, 11535, 11536, 11537, 9, 10, 11, 12, 13, 14, 15, 17488, 19053, 17490, 17489, 17491, 17820, 17819, 19056, 19059, 19061, 19063, 17492, 17494, 17493, 17494, 17494, 5310, 17496, 17495, 17497, 5319, 19069, 17500, 17499, 19071, 19073, 17502, 17501, 17503, 19075, 18970, 17505, 17506, 18976, 17507, 6809, 18987, 17508, 18989, 18991, 18990, 18993, 18992, 18970, 18969, 18971, 18973, 18972, 18974, 18976, 18975, 6809, 18979, 18978, 18980, 18982, 18981, 18983, 18985, 18984, 6824, 18988, 18987, 18989, 18994, 18996, 19077, 19079, 19081, 19083, 19085, 19087, 19089, 17509, 17510, 17512, 17511, 18121, 17513, 18124, 17514, 17515, 17517, 17516, 17518, 17519, 18102, 17520, 17521, 18111, 19091, 18103, 18112, 17523, 17522, 17524, 17525, 17526, 18241, 18233, 18242, 18706, 17527, 17645, 18711, 18710, 18712, 18713, 18716, 18715, 18718, 18717, 18719, 19095, 17656, 17655, 18687, 18686, 18689, 18688, 19023, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18871, 18870, 19098, 18873, 18776, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18871, 18870, 19100, 18906, 17528, 17530, 17529, 18911, 18910, 18912, 17532, 17531, 19102, 17534, 17533, 17535, 17536, 17538, 19109, 17540, 17539, 19111, 17542, 17541, 17543, 17545, 17544, 17547, 17546, 17548, 17550, 19113, 19115, 19117, 19119, 19121, 19123, 19125, 19127, 17552, 17551, 19129, 17554, 17553, 19131, 17555, 17557, 19133, 17560, 17559, 19135, 17561, 17563, 17564, 17566, 17567, 17569, 19137, 17571, 17570, 19139, 17573, 17572, 18473, 18473, 17574, 17575, 17577, 17579, 17578, 17581, 17580, 17583, 17582, 19141, 19143, 19145, 19147, 19149, 19151, 17584, 17586, 17587, 17589, 19153, 17591, 17590, 19155, 19157, 19159, 17593, 17592, 19161, 19163, 17595, 17594, 17597, 17596, 17599, 17598, 17601, 17600, 17603, 17602, 17604, 17605, 17606, 18487, 17607, 19167, 17609, 17608, 17610, 17612, 19171, 19173, 19175, 19177, 17615, 17614, 17617, 17616, 17619, 17618, 17621, 17620, 17623, 17622, 17625, 17624, 17627, 17626, 17629, 17628, 17630, 17631, 17633, 17632, 17635, 17634, 18427, 17636, 18430, 17637, 19181, 17640, 17639, 19183, 17642, 17641, 17644, 17643, 18427, 18430, 17649, 18634, 18691, 18690, 19185, 18693, 18692, 19187, 18695, 18694, 18705, 18697, 18696, 18699, 18698, 18700, 18749, 18701, 18410, 17657, 18411, 18740, 18742, 18741, 18743, 18749, 18748, 18750, 18446, 18752, 18753, 18706, 18708, 17645, 18711, 18710, 17646, 17647, 18716, 18715, 18718, 18717, 18719, 19189, 17656, 17655, 18687, 18686, 18689, 17648, 17649, 17650, 19191, 17652, 17651, 18634, 19193, 17654, 17653, 18676, 19195, 18674, 19197, 18679, 18678, 18718, 18680, 19199, 19201, 17656, 17655, 18410, 17657, 18411, 18740, 18446, 18752, 17857, 17659, 17658, 17661, 17660, 17858, 17663, 17662, 17699, 17665, 17664, 17666, 19204, 19206, 17668, 17667, 17700, 17670, 17669, 17672, 17671, 17674, 17673, 17676, 17675, 17678, 17677, 17680, 17679, 17681, 17683, 17682, 17684, 18349, 17685, 18351, 18350, 18353, 18352, 18355, 18354, 18301, 18304, 18303, 19212, 18306, 18305, 19214, 18301, 18304, 18303, 18306, 18305, 19217, 18320, 18319, 19219, 17687, 17686, 17688, 18160, 17689, 17690, 18104, 18031, 18030, 18033, 18032, 18103, 18102, 19221, 17692, 17691, 17693, 17695, 17694, 17696, 18112, 18111, 19223, 17698, 17697, 17700, 17699, 17702, 17701, 17703, 17705, 17704, 17706, 17707, 17709, 17708, 17711, 17710, 17713, 17712, 17714, 17716, 17715, 17717, 18236, 18235, 18238, 17718, 17719, 17721, 17720, 18242, 17722, 18244, 17723, 17725, 17724, 17727, 17726, 17729, 17728, 17731, 17730, 17733, 17732, 17790, 17789, 17792, 17791, 17793, 17795, 19231, 19233, 19235, 19237, 19239, 19242, 19245, 17771, 17770, 17773, 17772, 17774, 17776, 17735, 17734, 17737, 17736, 17771, 17770, 17773, 17772, 17738, 17776, 17777, 17798, 17778, 17800, 17799, 19247, 17804, 17803, 17805, 17808, 17740, 19249, 17810, 17809, 19251, 17811, 17813, 17814, 19253, 19255, 17804, 17741, 17805, 17808, 17740, 19257, 17810, 17809, 19259, 17744, 17813, 17814, 17816, 19261, 19263, 17804, 17803, 17805, 17808, 17740, 19265, 17810, 17809, 19267, 17744, 17813, 17814, 17816, 19269, 19271, 17804, 17741, 17743, 17742, 17808, 17807, 19273, 17810, 17809, 19275, 17744, 17813, 17814, 17816, 17746, 17745, 19277, 17747, 17749, 17752, 17751, 17753, 17756, 17755, 17758, 17757, 17760, 17759, 17761, 17762, 17789, 19279, 17789, 17789, 19281, 1218, 17764, 17766, 17765, 19283, 17789, 17767, 19285, 1243, 17769, 17777, 17798, 17778, 17800, 17799, 19287, 17771, 17770, 17773, 17772, 17774, 17776, 17777, 17798, 17778, 17800, 17799, 19289, 17779, 17789, 17792, 17780, 1218, 17782, 17784, 17783, 17786, 17785, 1243, 17788, 17790, 17789, 17792, 17791, 17793, 17795, 17796, 17798, 17797, 17800, 17799, 17802, 17801, 17804, 17803, 17805, 17808, 17807, 19291, 17810, 17809, 19293, 17811, 17813, 17814, 17816, 17818, 17817, 19295, 17820, 17819, 19297, 17821, 17823, 17826, 17825, 17827, 17830, 17829, 17831, 17834, 17833, 17836, 17835, 17838, 17837, 17840, 17839, 17842, 17841, 17843, 17845, 17844, 19299, 17847, 17846, 19301, 17849, 17848, 17850, 17852, 17851, 17853, 1916, 17856, 17855, 17858, 17857, 17860, 17859, 17861, 17862, 17865, 17864, 17867, 17866, 1931, 1916, 17871, 17870, 17873, 17872, 17875, 17874, 17877, 17876, 17879, 17878, 17880, 17881, 17884, 17883, 17885, 17887, 17886, 17888, 17890, 17889, 17891, 17938, 1916, 17894, 17893, 17895, 17898, 17897, 17903, 17902, 17905, 17904, 17907, 17906, 17909, 17899, 17911, 17910, 17913, 17912, 17900, 17916, 17915, 17917, 17919, 17918, 17920, 17901, 17903, 17902, 17905, 17904, 17907, 17906, 17909, 17908, 17911, 17910, 17913, 17912, 17914, 17916, 17915, 17917, 17919, 17918, 17920, 17921, 17923, 17922, 17938, 17924, 17926, 17925, 17928, 17927, 17930, 17929, 17932, 17931, 17933, 17935, 17934, 17936, 17938, 17937, 1916, 17941, 17940, 1931, 17944, 17943, 17945, 17947, 17946, 17948, 17950, 17949, 17959, 17951, 17953, 17952, 17955, 17954, 17957, 17956, 17959, 17958, 17961, 17960, 17963, 17962, 17965, 17964, 17966, 17969, 17968, 17971, 17970, 17973, 17972, 17974, 17977, 17976, 17979, 17978, 17981, 17980, 17982, 17984, 17983, 17985, 17986, 17989, 17988, 17990, 17991, 17992, 18010, 18012, 17994, 19315, 17996, 17995, 19317, 17998, 17997, 19319, 18000, 17999, 18001, 18003, 18002, 18004, 18005, 18007, 18009, 18008, 18010, 18012, 18011, 19321, 18014, 18013, 18016, 18015, 18018, 18017, 19323, 18020, 18019, 19325, 18022, 18021, 19327, 18024, 18023, 18025, 18027, 18026, 18016, 18015, 18018, 18017, 19330, 18020, 18019, 19332, 18022, 18021, 19334, 18024, 18023, 18025, 18027, 18026, 18014, 18013, 18016, 18015, 18018, 18017, 19339, 18020, 18019, 19341, 18022, 18021, 19343, 19345, 18024, 18023, 18025, 18027, 18026, 2342, 2342, 2342, 18028, 18179, 18181, 2351, 2351, 2351, 18031, 18030, 18033, 18032, 19347, 18034, 18037, 18036, 18038, 18039, 18041, 18043, 18045, 18046, 18048, 18049, 18051, 19349, 18062, 18124, 18053, 18052, 19351, 18221, 18054, 18221, 18055, 18057, 18056, 18061, 18058, 18123, 18060, 18059, 19353, 18245, 18229, 18232, 18062, 18061, 18064, 18063, 18066, 18065, 18068, 18067, 18245, 18160, 18069, 18070, 18072, 18071, 18073, 18179, 18075, 18074, 18077, 18076, 18079, 18078, 18081, 18080, 18179, 18182, 18083, 18082, 18085, 18084, 18086, 18088, 18090, 19356, 18092, 18093, 18095, 18105, 18185, 18187, 18189, 18097, 18096, 18098, 18100, 18099, 18101, 18103, 18102, 19358, 18179, 18104, 18105, 18185, 18187, 18189, 18108, 18107, 18110, 18109, 18112, 18111, 18114, 18113, 19360, 18116, 18115, 19362, 18118, 18117, 18221, 18119, 18120, 18230, 18121, 18122, 18124, 18123, 18125, 18127, 18126, 18128, 18239, 18207, 18240, 18208, 18129, 18130, 18131, 18133, 18135, 18137, 18138, 18139, 18141, 18143, 18145, 18146, 19371, 18160, 18159, 18161, 18162, 18148, 18149, 18165, 18168, 18150, 18151, 18170, 18152, 18153, 18154, 18156, 18157, 18178, 18160, 18159, 18161, 18163, 18162, 18164, 18165, 18168, 18167, 18169, 18170, 18171, 18172, 18173, 18175, 18176, 18178, 18179, 18182, 18181, 18184, 18183, 18185, 18187, 18189, 18191, 18190, 18193, 18192, 18195, 18194, 19375, 18221, 18196, 18198, 18197, 18199, 18201, 18200, 18203, 18202, 18204, 18206, 18205, 18207, 18208, 18210, 18209, 18212, 18211, 18214, 18213, 18221, 18215, 18217, 18216, 18219, 18218, 18221, 18220, 18222, 18224, 18223, 18225, 18227, 18226, 18228, 18230, 18229, 18231, 18233, 18232, 18234, 18236, 18235, 18238, 18237, 18239, 19379, 18240, 19381, 18242, 18241, 18244, 18243, 18245, 18248, 18247, 18250, 18249, 19384, 18252, 18251, 19386, 18254, 18253, 18255, 18257, 18256, 18259, 18258, 18260, 18263, 18262, 18265, 18264, 18267, 18266, 18269, 18268, 18271, 18270, 18273, 18272, 18275, 18274, 3432, 18278, 18277, 3447, 18280, 18282, 18284, 19392, 18287, 18286, 18288, 18290, 18289, 18291, 18293, 18292, 18294, 18296, 18295, 18298, 18297, 18299, 19396, 19398, 18301, 18304, 18303, 19400, 18306, 18305, 19402, 19404, 18308, 18307, 18310, 18309, 18311, 18313, 18312, 18315, 18314, 18316, 19406, 18317, 3669, 18349, 18348, 18351, 18350, 18320, 18319, 19408, 18321, 18320, 19410, 18356, 3807, 18360, 18359, 18362, 18361, 18349, 18348, 18351, 18350, 18321, 18320, 19412, 18357, 3807, 19414, 19416, 19418, 3751, 18349, 18348, 18351, 18350, 18320, 18319, 19420, 18321, 18320, 19422, 18357, 3807, 19424, 18362, 18361, 18323, 18322, 3655, 18326, 18325, 3669, 3853, 18329, 18332, 18331, 18333, 18335, 18337, 18353, 18352, 18355, 18354, 18345, 3751, 18360, 18359, 19426, 18340, 18339, 18342, 18341, 18353, 18343, 18354, 18344, 18345, 3751, 18360, 18359, 19428, 18349, 18348, 18351, 18350, 18353, 18352, 18355, 18354, 18357, 18356, 3807, 18360, 18359, 18362, 18361, 18363, 18364, 18367, 18366, 3853, 18370, 18369, 3868, 18373, 18372, 18375, 18374, 18376, 18600, 18669, 18650, 18602, 18651, 18653, 18670, 18672, 18652, 18436, 18670, 18672, 18379, 18378, 18381, 18380, 18382, 18750, 18877, 18770, 18879, 18878, 18384, 18383, 18882, 18883, 18863, 18871, 18870, 18775, 18774, 18873, 18776, 18874, 18386, 18385, 18388, 18387, 18890, 18892, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18870, 18390, 18775, 18774, 18874, 18873, 18776, 18392, 18391, 19432, 18392, 18392, 19434, 18394, 18393, 18396, 18395, 18400, 18397, 18402, 18401, 18404, 18403, 18405, 18902, 18901, 19436, 18904, 18903, 18905, 18400, 18399, 18402, 18401, 18404, 18403, 18405, 18407, 18406, 18409, 18408, 18410, 18411, 18413, 18412, 18414, 18416, 18415, 18417, 18419, 18418, 18421, 18420, 18423, 18422, 18425, 18424, 18426, 18427, 18429, 18430, 18432, 19438, 18640, 18639, 18641, 18433, 18434, 18666, 18645, 18647, 18649, 18435, 18650, 18600, 18652, 18651, 18653, 18436, 18656, 18655, 18657, 18659, 18658, 18660, 18664, 18666, 18661, 18647, 18649, 18601, 18650, 18602, 18670, 18672, 18437, 19440, 18439, 19442, 18442, 18441, 18718, 18443, 18718, 18444, 18445, 18683, 18682, 18685, 18684, 18687, 18686, 18689, 18688, 18446, 18448, 18447, 19444, 18450, 18449, 18452, 18451, 18454, 18453, 18456, 18455, 18458, 18457, 18460, 18459, 18462, 18461, 19448, 18464, 18463, 18465, 18466, 18468, 18469, 18470, 18471, 18472, 18474, 18473, 18475, 18476, 18478, 18479, 18480, 18481, 18482, 19450, 18484, 18483, 19452, 18485, 18489, 18492, 18491, 18493, 18494, 18496, 18498, 18497, 18499, 18501, 18500, 18486, 19454, 18488, 18487, 19456, 18490, 18489, 18492, 18491, 18493, 18494, 18496, 18498, 18497, 18499, 18501, 18500, 18502, 19458, 19460, 19462, 19464, 18504, 18503, 19466, 19468, 19470, 19472, 19474, 18506, 18505, 19476, 18508, 18507, 18509, 18511, 18510, 18512, 18514, 18513, 18516, 18515, 18518, 18517, 18520, 18519, 18522, 18521, 18523, 18525, 18524, 18526, 18528, 18527, 18530, 18529, 18531, 18534, 18533, 18536, 18535, 18538, 18537, 18539, 18541, 18544, 18543, 19482, 18546, 18545, 19484, 19486, 19488, 18548, 18547, 19490, 18550, 18549, 19492, 19494, 19496, 18552, 18551, 19498, 18554, 18553, 19500, 18556, 18555, 19502, 18558, 18557, 19504, 18559, 18561, 18564, 18563, 18566, 18565, 18568, 18567, 19510, 19512, 18570, 18569, 19514, 18572, 18571, 19516, 19518, 18574, 18573, 19521, 18576, 18575, 19523, 18578, 18577, 18580, 18579, 19526, 18582, 18581, 19528, 18584, 18583, 18585, 18586, 18588, 18589, 18591, 18593, 18592, 18594, 18595, 18597, 18598, 19530, 18600, 18647, 18649, 18601, 18602, 18603, 18628, 18604, 18628, 18606, 18605, 18607, 18609, 18608, 18610, 18612, 18614, 18616, 18615, 18617, 18619, 18621, 18620, 18623, 18622, 18625, 18624, 19536, 18627, 18626, 18629, 18628, 18631, 18630, 19538, 18633, 18632, 18634, 18636, 19540, 18637, 19542, 18638, 18640, 18639, 18642, 18641, 18643, 18666, 18645, 18647, 18649, 18648, 18650, 18652, 18651, 18654, 18653, 18656, 18655, 18657, 18659, 18658, 18660, 18661, 18663, 18664, 18666, 18668, 18667, 18669, 18670, 18672, 18674, 19548, 18676, 19550, 18679, 18678, 18718, 18680, 18718, 18718, 18681, 18683, 18682, 18685, 18684, 18687, 18686, 18689, 18688, 18691, 18690, 19554, 18693, 18692, 19556, 18695, 18694, 18705, 18697, 18696, 18699, 18698, 18700, 18749, 18701, 18702, 18703, 18705, 18706, 18709, 18708, 18711, 18710, 18712, 18713, 18716, 18715, 18718, 18717, 18719, 18721, 18720, 18723, 18722, 18725, 18724, 18727, 18726, 18742, 18741, 18729, 18728, 18730, 18732, 18734, 18733, 18735, 18737, 18736, 18738, 18739, 18740, 18742, 18741, 18743, 18746, 18745, 18747, 18749, 18748, 18750, 18752, 18753, 18810, 18754, 19562, 18813, 18812, 19564, 19566, 18815, 18814, 18816, 19568, 18818, 19570, 19572, 18930, 18755, 18823, 18822, 18824, 18936, 18820, 18766, 18826, 18825, 18827, 19004, 19006, 18841, 18840, 19009, 19011, 19010, 19013, 19012, 18842, 18757, 18756, 19574, 18759, 18758, 19576, 18761, 18760, 19006, 18841, 18840, 19009, 19011, 19010, 19013, 19012, 18762, 19018, 19017, 19020, 18843, 18763, 19040, 19037, 19040, 18914, 18913, 19580, 18916, 18915, 18765, 18764, 18766, 18768, 18767, 6809, 19038, 19041, 18877, 18770, 18771, 18879, 18880, 18882, 18773, 18772, 18885, 18871, 18870, 18775, 18774, 18873, 18776, 18874, 18778, 18777, 19588, 18780, 18779, 19590, 18890, 18892, 18907, 18906, 18909, 18908, 18911, 18910, 18912, 18782, 18781, 19592, 18784, 18783, 18785, 18788, 18787, 19594, 18813, 18789, 19596, 19598, 18790, 18792, 18794, 19602, 18797, 18796, 18798, 18936, 18799, 18800, 18802, 18801, 18803, 18806, 18805, 18807, 18949, 18808, 18811, 18810, 19604, 18813, 18812, 19606, 19608, 18815, 18814, 18816, 19610, 18818, 19612, 19614, 18930, 18929, 18936, 18820, 18821, 18823, 18822, 18824, 18826, 18825, 18827, 18830, 18829, 18832, 18831, 18947, 18950, 18833, 19616, 18835, 19618, 18837, 18839, 19004, 19006, 18841, 18840, 19009, 19011, 19010, 19013, 19012, 19015, 18842, 19018, 19017, 19020, 18843, 19004, 19006, 19007, 19009, 19011, 19010, 19013, 19012, 19015, 19014, 19018, 19017, 19020, 19019, 18845, 18844, 18846, 18848, 18847, 18850, 18849, 18851, 18853, 5985, 18855, 5985, 18857, 18858, 18860, 18862, 6006, 18862, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18871, 18870, 19634, 18873, 18864, 18874, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18869, 18871, 18870, 19636, 18873, 18872, 18874, 18877, 18876, 18879, 18878, 18880, 18882, 18883, 18885, 18887, 18886, 19640, 18889, 18888, 19642, 18890, 18892, 18894, 18893, 18896, 18895, 18898, 18897, 18899, 18902, 18900, 19644, 18904, 18903, 18905, 18902, 18901, 19646, 18904, 18903, 18905, 18907, 18906, 18909, 18908, 18911, 18910, 18912, 18914, 18913, 19648, 18916, 18915, 18918, 18917, 18920, 18919, 18922, 18921, 19650, 18923, 19652, 18924, 18925, 18928, 18927, 19654, 18930, 18929, 18932, 18931, 18934, 18933, 18936, 18935, 18937, 18939, 18938, 18941, 18940, 18943, 18942, 18944, 18947, 18946, 18948, 18950, 18949, 18951, 18952, 18955, 18954, 18956, 18958, 18959, 18961, 18963, 18962, 18965, 18964, 18966, 18968, 18970, 18969, 18971, 18973, 18972, 18974, 18976, 18975, 6809, 18979, 18978, 18980, 18982, 18981, 18983, 18985, 18984, 6824, 18988, 18987, 18989, 18991, 18990, 18993, 18992, 18994, 18996, 18998, 18997, 18999, 19007, 19009, 19011, 19000, 19013, 19012, 19014, 19001, 19002, 19018, 19017, 19020, 19003, 19004, 19006, 19007, 19009, 19011, 19010, 19013, 19012, 19015, 19014, 19016, 19018, 19017, 19020, 19019, 19022, 19021, 19024, 19023, 19038, 19025, 6809, 19041, 19040, 6824, 19026, 19028, 19029, 19048, 19049, 19051, 19032, 19031, 19033, 19035, 19034, 19036, 19038, 19037, 6809, 19041, 19040, 6824, 19044, 19043, 19045, 19047, 19046, 19048, 19049, 19051, 19229, 19229, 19229, 19229, 19365, 19365, 19365, 19365, 19104, 19104, 19106, 19106, 19684, 19104, 19103, 19106, 19105, 19687, 19690, 19229, 19229, 19229, 19229, 19694, 19578, 19577, 19581, 19584, 19583, 19585, 19668, 19696, 19672, 19586, 19674, 19673, 19621, 19624, 19623, 19626, 19625, 19698, 19628, 19627, 19630, 19629, 19700, 19632, 19631, 19665, 19659, 19668, 19660, 19662, 19661, 19664, 19663, 19702, 19704, 19665, 19706, 19666, 19708, 19668, 19667, 19710, 19670, 19669, 19712, 19672, 19671, 19674, 19673, 19714, 19716, 19718, 19720, 19680, 19680, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6872, 6875, 6876, 6877, 6879, 6880, 6890, 6892, 6893, 6894, 6895, 6897, 6898, 6899, 6900, 6903, 6907, 6908, 6913, 6914, 6915, 6918, 6919, 6920, 6921, 6922, 6923, 6924, 6925, 6926, 6927, 6928, 6929, 6930, 6931, 6932, 6933, 6934, 6935, 6936, 6937, 6938, 6939, 6940, 6941, 6942, 6943, 6944, 6945, 6946, 6947, 6948, 6949, 6950, 6951, 6952, 6953, 6969, 6970, 6971, 6972, 6973, 6974, 6975, 6976, 6977, 6978, 6979, 6980, 6981, 6982, 6983, 6984, 6985, 6988, 6989, 6991, 6992, 6994, 6995, 6996, 6997, 6998, 6999, 7000, 7001, 7002, 7003, 7004, 7005, 7006, 7007, 7008, 7009, 7010, 7011, 7014, 7015, 7016, 7017, 7018, 7019, 7021, 7022, 7023, 7024, 7025, 7026, 7027, 7028, 7029, 7030, 7031, 7032, 7035, 7036, 7037, 7038, 7039, 7040, 7041, 7042, 7043, 7044, 7045, 7046, 7047, 7050, 7051, 7052, 7053, 7054, 7055, 7056, 7057, 7058, 7061, 7062, 7063, 7081, 7082, 7085, 7086, 7089, 7090, 7091, 7092, 7093, 7094, 7095, 7096, 7097, 7114, 7115, 7118, 7119, 7122, 7123, 7126, 7127, 7130, 7131, 7132, 7133, 7134, 7135, 7138, 7139, 7142, 7143, 7144, 7145, 7146, 7147, 7148, 7149, 7150, 7151, 7152, 7153, 7154, 7167, 7168, 7169, 7170, 7173, 7174, 7181, 7182, 7187, 7188, 7189, 7190, 7191, 7192, 7193, 7194, 7195, 7196, 7197, 7198, 7200, 7202, 7203, 7206, 7207, 7208, 7210, 7220, 7221, 7222, 7223, 7224, 7225, 7226, 7227, 7228, 7229, 7230, 7231, 7232, 7233, 7234, 7235, 7236, 7237, 7239, 7240, 7241, 7242, 7244, 7245, 7246, 7247, 7250, 7251, 7254, 7255, 7256, 7257, 7258, 7259, 7260, 7261, 7262, 7263, 7266, 7267, 7270, 7271, 7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280, 7281, 7282, 7283, 7284, 7285, 7286, 7287, 7288, 7289, 7290, 7291, 7292, 7293, 7294, 7295, 7296, 7297, 7298, 7299, 7300, 7301, 7302, 7303, 7304, 7307, 7308, 7309, 7310, 7311, 7312, 7313, 7314, 7317, 7318, 7319, 7322, 7323, 7324, 7327, 7330, 7331, 7332, 7333, 7338, 7339, 7340, 7341, 7342, 7343, 7344, 7345, 7346, 7347, 7348, 7349, 7350, 7351, 7352, 7353, 7355, 7356, 7357, 7358, 7363, 7364, 7366, 7367, 7368, 7370, 7371, 7372, 7373, 7374, 7375, 7376, 7377, 7379, 7380, 7381, 7382, 7383, 7384, 7385, 7386, 7387, 7388, 7389, 7390, 7391, 7392, 7393, 7395, 7396, 7399, 7400, 7403, 7405, 7406, 7407, 7408, 7411, 7412, 7415, 7416, 7417, 7418, 7419, 7420, 7421, 7422, 7423, 7424, 7425, 7426, 7427, 7430, 7431, 7432, 7433, 7434, 7435, 7436, 7437, 7440, 7441, 7442, 7443, 7444, 7445, 7446, 7447, 7448, 7449, 7451, 7452, 7453, 7454, 7455, 7458, 7459, 7460, 7461, 7462, 7463, 7466, 7467, 7468, 7469, 7470, 7471, 7472, 7473, 7474, 7475, 7476, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7485, 7486, 7487, 7488, 7489, 7490, 7491, 7492, 7493, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7533, 7534, 7535, 7536, 7537, 7540, 7541, 7544, 7545, 7546, 7551, 7552, 7553, 7554, 7555, 7558, 7559, 7562, 7563, 7564, 7565, 7570, 7571, 7572, 7573, 7574, 7577, 7578, 7581, 7582, 7583, 7584, 7589, 7590, 7591, 7592, 7593, 7594, 7597, 7598, 7601, 7602, 7603, 7604, 7605, 7606, 7609, 7610, 7611, 7612, 7613, 7614, 7615, 7616, 7617, 7618, 7619, 7620, 7621, 7622, 7625, 7626, 7629, 7630, 7631, 7632, 7635, 7636, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7661, 7662, 7663, 7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7676, 7677, 7678, 7679, 7680, 7681, 7682, 7683, 7684, 7685, 7686, 7687, 7688, 7689, 7690, 7693, 7694, 7697, 7698, 7699, 7700, 7701, 7702, 7705, 7706, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7732, 7733, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743, 7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7758, 7760, 7761, 7763, 7764, 7765, 7766, 7767, 7768, 7769, 7770, 7771, 7772, 7773, 7774, 7775, 7776, 7777, 7778, 7779, 7780, 7781, 7782, 7783, 7784, 7785, 7786, 7787, 7788, 7789, 7790, 7791, 7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7804, 7805, 7806, 7807, 7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7818, 7820, 7821, 7822, 7823, 7824, 7825, 7826, 7828, 7829, 7830, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855, 7856, 7857, 7858, 7859, 7860, 7862, 7863, 7865, 7866, 7867, 7868, 7869, 7870, 7872, 7873, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7887, 7888, 7889, 7890, 7891, 7893, 7894, 7895, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7904, 7905, 7906, 7907, 7908, 7909, 7912, 7913, 7916, 7917, 7920, 7921, 7922, 7923, 7924, 7925, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7935, 7936, 7937, 7938, 7939, 7940, 7943, 7944, 7947, 7948, 7952, 7953, 7954, 7955, 7956, 7957, 7958, 7959, 7960, 7963, 7964, 7967, 7968, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7980, 7981, 7983, 7984, 7987, 7988, 7991, 7992, 7997, 7998, 7999, 8000, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8008, 8009, 8010, 8011, 8012, 8013, 8014, 8017, 8018, 8019, 8020, 8021, 8022, 8023, 8024, 8025, 8026, 8027, 8028, 8031, 8032, 8033, 8034, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 8045, 8046, 8047, 8050, 8051, 8052, 8053, 8054, 8055, 8056, 8057, 8058, 8059, 8060, 8061, 8062, 8063, 8064, 8065, 8066, 8067, 8068, 8069, 8070, 8071, 8072, 8073, 8074, 8075, 8076, 8077, 8078, 8079, 8080, 8081, 8082, 8083, 8084, 8086, 8089, 8090, 8091, 8092, 8093, 8094, 8095, 8096, 8097, 8098, 8099, 8100, 8101, 8102, 8103, 8106, 8107, 8108, 8109, 8110, 8111, 8112, 8113, 8114, 8115, 8116, 8117, 8118, 8119, 8122, 8123, 8126, 8127, 8128, 8129, 8130, 8131, 8132, 8134, 8136, 8137, 8138, 8139, 8140, 8141, 8142, 8143, 8144, 8145, 8147, 8148, 8149, 8151, 8153, 8154, 8155, 8156, 8158, 8160, 8161, 8162, 8165, 8166, 8167, 8168, 8169, 8170, 8171, 8172, 8173, 8174, 8175, 8176, 8177, 8178, 8179, 8180, 8181, 8182, 8183, 8184, 8185, 8186, 8187, 8188, 8189, 8190, 8191, 8192, 8193, 8194, 8195, 8196, 8197, 8198, 8199, 8200, 8201, 8202, 8203, 8205, 8207, 8208, 8209, 8210, 8211, 8212, 8213, 8214, 8217, 8218, 8219, 8220, 8221, 8222, 8223, 8224, 8225, 8226, 8227, 8228, 8229, 8230, 8231, 8232, 8233, 8234, 8235, 8236, 8238, 8239, 8241, 8242, 8243, 8244, 8245, 8246, 8247, 8248, 8249, 8250, 8251, 8252, 8253, 8254, 8255, 8256, 8257, 8258, 8259, 8260, 8261, 8262, 8263, 8264, 8267, 8270, 8271, 8272, 8273, 8275, 8276, 8277, 8278, 8279, 8282, 8283, 8286, 8287, 8288, 8289, 8290, 8291, 8292, 8294, 8296, 8297, 8298, 8299, 8300, 8301, 8302, 8303, 8304, 8305, 8306, 8307, 8308, 8309, 8310, 8311, 8312, 8313, 8314, 8316, 8318, 8321, 8322, 8323, 8324, 8325, 8326, 8327, 8328, 8329, 8330, 8331, 8333, 8334, 8336, 8341, 8342, 8343, 8346, 8347, 8352, 8353, 8354, 8355, 8356, 8357, 8358, 8359, 8360, 8361, 8364, 8365, 8366, 8367, 8368, 8369, 8370, 8371, 8374, 8375, 8378, 8379, 8380, 8381, 8382, 8383, 8384, 8385, 8386, 8387, 8388, 8389, 8392, 8393, 8400, 8401, 8402, 8403, 8404, 8405, 8406, 8409, 8410, 8413, 8414, 8417, 8418, 8419, 8420, 8421, 8422, 8423, 8424, 8425, 8426, 8427, 8428, 8429, 8431, 8432, 8433, 8434, 8435, 8436, 8437, 8438, 8439, 8440, 8443, 8444, 8445, 8446, 8447, 8448, 8449, 8450, 8451, 8452, 8453, 8454, 8457, 8458, 8459, 8460, 8461, 8462, 8463, 8464, 8465, 8466, 8467, 8468, 8469, 8470, 8471, 8472, 8473, 8474, 8475, 8476, 8477, 8478, 8479, 8480, 8481, 8482, 8483, 8484, 8486, 8487, 8488, 8489, 8490, 8491, 8492, 8493, 8494, 8495, 8496, 8497, 8498, 8499, 8501, 8502, 8503, 8504, 8506, 8507, 8508, 8509, 8510, 8511, 8512, 8513, 8514, 8515, 8516, 8517, 8518, 8519, 8520, 8521, 8522, 8523, 8524, 8525, 8526, 8527, 8528, 8529, 8530, 8531, 8532, 8533, 8534, 8535, 8536, 8537, 8538, 8539, 8540, 8541, 8542, 8543, 8544, 8545, 8548, 8549, 8552, 8553, 8554, 8555, 8556, 8557, 8558, 8559, 8560, 8561, 8562, 8563, 8564, 8567, 8568, 8569, 8570, 8571, 8572, 8573, 8574, 8575, 8576, 8577, 8578, 8579, 8580, 8581, 8582, 8583, 8584, 8585, 8586, 8587, 8588, 8589, 8590, 8591, 8592, 8593, 8594, 8595, 8596, 8597, 8598, 8599, 8600, 8601, 8604, 8605, 8606, 8607, 8608, 8609, 8610, 8611, 8612, 8613, 8614, 8615, 8616, 8617, 8618, 8619, 8620, 8621, 8622, 8623, 8624, 8625, 8626, 8627, 8628, 8629, 8630, 8631, 8632, 8633, 8634, 8635, 8636, 8639, 8642, 8643, 8644, 8645, 8646, 8647, 8648, 8649, 8650, 8651, 8652, 8653, 8654, 8655, 8656, 8657, 8658, 8659, 8662, 8663, 8664, 8665, 8666, 8667, 8668, 8669, 8670, 8671, 8673, 8674, 8676, 8677, 8680, 8681, 8682, 8683, 8684, 8685, 8686, 8687, 8688, 8689, 8690, 8691, 8692, 8693, 8694, 8695, 8696, 8697, 8700, 8701, 8704, 8705, 8706, 8707, 8708, 8709, 8710, 8711, 8712, 8713, 8714, 8715, 8716, 8719, 8720, 8723, 8724, 8725, 8726, 8727, 8728, 8729, 8730, 8731, 8732, 8733, 8734, 8735, 8744, 8745, 8756, 8757, 8760, 8761, 8762, 8763, 8764, 8765, 8766, 8767, 8768, 8769, 8770, 8771, 8772, 8773, 8774, 8775, 8776, 8777, 8778, 8779, 8780, 8781, 8782, 8783, 8784, 8785, 8786, 8787, 8788, 8790, 8791, 8793, 8795, 8797, 8798, 8801, 8802, 8809, 8810, 8813, 8814, 8821, 8822, 8825, 8826, 8829, 8830, 8833, 8834, 8837, 8839, 8841, 8842, 8844, 8845, 8847, 8848, 8853, 8854, 8857, 8858, 8864, 8865, 8868, 8869, 8872, 8873, 8875, 8876, 8879, 8880, 8883, 8884, 8885, 8886, 8887, 8888, 8889, 8890, 8891, 8892, 8893, 8894, 8895, 8898, 8899, 8900, 8901, 8902, 8903, 8904, 8906, 8907, 8909, 8910, 8911, 8912, 8913, 8914, 8915, 8916, 8918, 8919, 8920, 8921, 8923, 8924, 8925, 8926, 8927, 8928, 8931, 8932, 8933, 8934, 8935, 8936, 8939, 8940, 8941, 8942, 8945, 8948, 8949, 8950, 8951, 8952, 8953, 8954, 8955, 8956, 8957, 8958, 8959, 8960, 8961, 8963, 8964, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8975, 8976, 8977, 8978, 8979, 8981, 8983, 8986, 8989, 8990, 8992, 8993, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007, 9010, 9011, 9014, 9015, 9016, 9017, 9018, 9020, 9021, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9032, 9033, 9034, 9035, 9036, 9037, 9038, 9039, 9040, 9041, 9042, 9043, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9051, 9052, 9053, 9054, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9063, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9073, 9074, 9075, 9076, 9077, 9080, 9081, 9086, 9087, 9088, 9091, 9096, 9097, 9098, 9099, 9100, 9101, 9102, 9103, 9104, 9105, 9106, 9107, 9108, 9109, 9110, 9111, 9112, 9113, 9114, 9115, 9116, 9117, 9118, 9121, 9122, 9125, 9126, 9127, 9128, 9129, 9130, 9131, 9132, 9133, 9134, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9152, 9153, 9156, 9157, 9158, 9159, 9160, 9161, 9162, 9163, 9164, 9165, 9190, 9191, 9192, 9193, 9194, 9195, 9196, 9197, 9198, 9199, 9200, 9201, 9202, 9203, 9204, 9205, 9206, 9207, 9210, 9211, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9224, 9227, 9228, 9229, 9230, 9231, 9234, 9235, 9240, 9242, 9244, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9262, 9265, 9266, 9271, 9272, 9273, 9276, 9281, 9282, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9293, 9294, 9295, 9296, 9297, 9298, 9301, 9304, 9305, 9306, 9307, 9308, 9309, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9318, 9319, 9320, 9321, 9322, 9323, 9324, 9325, 9326, 9327, 9328, 9329, 9330, 9331, 9333, 9334, 9335, 9336, 9337, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9347, 9348, 9349, 9350, 9351, 9352, 9353, 9354, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9412, 9413, 9414, 9417, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9433, 9434, 9436, 9438, 9439, 9440, 9441, 9442, 9443, 9444, 9445, 9446, 9447, 9450, 9451, 9454, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9462, 9463, 9464, 9467, 9468, 9469, 9470, 9471, 9474, 9475, 9476, 9477, 9478, 9479, 9480, 9481, 9482, 9483, 9484, 9485, 9488, 9489, 9490, 9491, 9492, 9493, 9494, 9495, 9498, 9501, 9502, 9503, 9504, 9507, 9508, 9509, 9510, 9512, 9513, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9530, 9531, 9532, 9533, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9549, 9550, 9551, 9552, 9553, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9566, 9568, 9569, 9571, 9572, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9616, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9641, 9642, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9661, 9662, 9663, 9664, 19798, 19796, 9827, 9828, 9831, 9832, 9924, 9925, 9939, 9940, 9973, 9974, 9975, 9976, 9980, 9981, 9982, 9983, 19904, 19903, 19905, 19908, 19907, 21398, 19910, 19909, 19922, 21428, 19947, 19946, 19948, 19949, 19950, 19949, 19959, 19964, 19988, 19986, 19988, 19986, 21390, 21398, 21421, 21390, 21398, 21421, 21428, 19224, 19224, 10409, 10410, 10413, 10414, 19240, 20232, 19240, 20235, 20236, 20272, 20287, 20302, 21022, 21021, 21339, 21337, 21346, 21344, 21390, 21398, 21421, 21428, 11320, 11321, 11331, 11332, 11333, 11334, 11335, 11338, 11339, 11340, 11341, 11407, 11408, 11409, 11410, 11411, 11414, 11415, 11416, 11417, 11420, 11421, 11477, 11478, 11479, 11480, 11481, 11482, 11483, 11484, 11512, 11515, 11518, 11519, 11522, 11523, 11526, 11527, 11528, 11529, 19675, 19675, 19676, 19676, 19677, 19677, 19680, 11684, 11685, 19685, 19685, 19688, 19688, 22127, 22109, 22127, 22125, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22146, 22149, 22152, 22154, 22157, 22161, 22163, 22166, 22169, 22172, 22175, 22177, 22179, 22182, 22185, 22188, 22191, 22194, 22197, 22204, 22206, 22208, 22211, 22221, 22230, 22232, 22236, 22238, 22241, 22243, 22245, 22248, 22250, 22254, 22257, 22259, 22261, 22263, 22267, 22270, 22272, 22274, 22276, 22279, 22281, 22286, 22288, 22291, 22293, 22297, 22299, 22303, 22311, 22313, 22315, 22320, 22322, 22324, 22330, 22332, 22334, 22336, 22338, 22340, 22342, 22347, 22349, 22353, 22355, 22357, 22359, 22361, 22363, 22365, 22367, 22371, 22373, 22379, 22381, 22383, 22389, 22391, 22393, 22396, 22398, 22400, 22403, 22405, 22407, 22410, 22413, 22417, 22419, 22423, 22425, 22428, 22430, 22432, 22436, 22439, 22443, 22445, 22447, 22449, 22451, 22453, 22456, 22458, 22461, 22464, 22467, 22470, 22472, 22474, 22476, 22478, 22480, 22483, 22486, 22488, 22490, 22492, 22495, 22497, 22500, 22502, 22504, 22506, 22509, 22513, 22515, 22517, 22519, 22522, 22525, 22527, 22529, 22531, 22534, 22538, 22540, 22542, 22545, 22548, 22550, 22553, 22555, 22557, 22559, 22561, 22563, 22565, 22567, 22569, 22571, 22575, 22577, 22581, 22583, 22585, 22587, 22592, 22594, 22596, 22599, 22601, 22606, 22609, 22611, 22617, 22620, 22622, 22628, 22630, 22632, 22634, 22640, 22644, 22647, 22649, 22651, 22654, 22656, 22660, 22662, 22667, 22669, 22671, 22673, 22678, 22680, 22682, 22684, 22688, 22690, 22694, 22696, 22701, 22703, 22705, 22707, 22710, 22712, 22718, 22720, 22724, 22727, 22730, 22732, 22734, 22736, 22738, 22741, 22743, 22745, 22748, 22752, 22754, 22756, 22760, 22762, 22766, 22768, 22770, 22772, 22774, 22778, 22781, 22784, 22786, 22789, 22792, 22794, 22796, 22798, 22800, 22802, 22804, 22807, 22810, 22814, 22816, 22818, 22820, 22822, 22824, 22827, 22830, 22834, 22836, 22838, 22840, 22842, 22844, 22847, 22850, 22853, 22856, 22859, 22862, 22864, 22866, 22868, 22870, 22872, 22874, 22876, 22878, 22881, 22883, 22885, 22888, 22890, 22892, 22895, 22899, 22905, 22907, 22909, 22911, 22914, 22919, 22922, 22924, 22926, 22928, 22930, 22932, 22934, 22937, 22939, 22941, 22943, 22945, 22947, 22950, 22952, 22954, 22956, 22958, 22960, 22962, 22965, 22976, 22978, 22981, 22994, 22996, 22998, 23000, 23005, 23010, 23012, 23014, 23016, 23019, 23022, 23026, 23028, 23030, 23032, 23036, 23038, 23050, 23053, 23056, 23064, 23066, 23068, 23070, 23072, 23074, 23076, 23082, 23085, 23088, 23090, 23104, 23107, 23111, 23121, 23124, 23128, 23139, 23141, 23146, 23148, 23150, 23152, 23154, 23157, 23159, 23162, 23166, 23168, 23170, 23172, 23174, 23176, 23178, 23181, 23184, 23187, 23190, 23193, 23195, 23199, 23201, 23204, 23206, 23208, 23210, 23213, 23215, 23218, 23220, 23222, 23224, 23226, 23228, 23230, 23233, 23239, 23242, 23245, 23248, 23250, 23254, 23256, 23258, 23260, 23263, 23265, 23270, 23272, 23274, 23276, 23280, 23282, 23284, 23286, 23288, 23293, 23295, 23297, 23299, 23303, 23305, 23308, 23313, 23318, 23320, 23324, 23326, 23328, 23330, 23332, 23336, 23338, 23340, 23342, 23344, 23346, 23349, 23351, 23355, 23358, 23361, 23363, 23366, 23368, 23378, 23380, 23384, 23386, 23388, 23393, 23395, 23397, 23400, 23402, 23406, 23408, 23412, 23415, 23417, 23420, 23422, 23424, 23426, 23428, 23430, 23432, 23434, 23437, 23439, 23442, 23444, 23446, 23449, 23451, 23455, 23458, 23461, 23463, 23465, 23467, 23474, 23476, 23482, 23484, 23486, 23488, 23490, 23493, 23500, 23502, 23508, 23510, 23512, 23515, 23517, 23519, 23521, 23524, 23526, 23528, 23530, 23532, 23534, 23536, 23538, 23540, 23549, 23558, 23560, 23562, 23567, 23570, 23573, 23575, 23577, 23582, 23585, 23588, 23590, 23592, 23595, 23598, 23600, 23602, 23604, 23606, 23609, 23612, 23614, 23617, 23619, 23621, 23625, 23627, 23629, 23631, 23633, 23635, 23637, 23639, 23643, 23645, 23647, 23649, 23651, 23653, 23655, 23657, 23659, 23661, 23663, 23670, 23678, 23681, 23683, 23685, 23688, 23693, 23697, 23699, 23701, 23703, 23705, 23707, 23709, 23715, 23717, 23723, 23726, 23728, 23730, 23733, 23740, 23747, 23749, 23751, 23754, 23756, 23758, 23760, 23762, 23764, 23766, 23769, 23771, 23773, 23780, 23782, 23786, 23788, 23791, 23793, 23795, 23797, 23799, 23801, 23805, 23808, 23813, 23816, 23819, 23824, 23826, 23828, 23832, 23834, 23837, 23840, 23845, 23848, 23850, 23853, 23855, 23857, 23860, 23863, 23865, 23868, 23870, 23876, 23878, 23880, 23883, 23888, 23890, 23894, 23897, 23899, 23901, 23904, 23906, 23910, 23912, 23914, 23917, 23919, 23922, 23924, 23929, 23932, 23935, 23938, 23943, 23945, 23947, 23951, 23953, 23956, 23959, 23962, 23964, 23974, 23977, 23979, 23981, 23983, 23985, 23991, 23993, 23995, 23997, 23999, 24001, 24004, 24006, 24019, 24021, 24025, 24028, 24030, 24033, 24035, 24039, 24042, 24044, 24047, 24049, 24055, 24057, 24061, 24063, 24065, 24068, 24070, 24073, 24075, 24078, 24080, 24082, 24085, 24087, 24089, 24091, 24093, 24098, 24100, 24102, 24104, 24106, 24109, 24111, 24113, 24116, 24119, 24123, 24129, 24131, 24135, 24138, 24141, 24144, 24147, 24150, 24153, 24156, 24158, 24162, 24167, 24169, 24171, 24174, 24176, 24182, 24184, 24186, 24189, 24191, 24193, 24195, 24197, 24200, 24209, 24212, 24215, 24218, 24221, 24224, 22641, 22415, 22421, 23505, 22415, 22421, 23506, 19749, 19749, 22199, 19794, 19795, 9794, 9795, 23095, 23094, 23093, 23131, 23129, 23135, 23133, 23100, 23099, 23098, 23143, 23142, 23102, 24231, 24233, 23131, 23129, 23135, 23133, 23137, 23143, 23142, 22214, 22217, 23137, 22218, 22219, 22970, 22226, 23078, 23041, 23040, 23039, 23131, 23129, 23135, 23133, 22970, 23047, 23046, 23045, 23041, 23040, 23039, 22226, 23078, 24235, 22226, 23078, 24237, 22251, 22264, 23398, 24239, 24241, 24243, 24245, 22283, 22294, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 22301, 22300, 22377, 10024, 10027, 22306, 22304, 23672, 22308, 22317, 10046, 10047, 10048, 10049, 10050, 10051, 22377, 22327, 22325, 23542, 23546, 23544, 23551, 23555, 23553, 10068, 10070, 23564, 23579, 10081, 10082, 23542, 23546, 23544, 23551, 23553, 22345, 22344, 22351, 22350, 23579, 10111, 10112, 10121, 10124, 23641, 23640, 22377, 10140, 22376, 22374, 23672, 22376, 22374, 10167, 10170, 23641, 23640, 22377, 10187, 10190, 22385, 22384, 23672, 22386, 22387, 22433, 22437, 22441, 22440, 22757, 22775, 22757, 10302, 10303, 23237, 23235, 23216, 23237, 23236, 23216, 22493, 22498, 22757, 22775, 23137, 23143, 23142, 24278, 24280, 23237, 23236, 23235, 23216, 23237, 23236, 23235, 23251, 23333, 23314, 22572, 10460, 10461, 10462, 10463, 10465, 22578, 22588, 22604, 22602, 10480, 22614, 22612, 10486, 22625, 22623, 10492, 22637, 22635, 22641, 22657, 22663, 22674, 22685, 22691, 22697, 22715, 22713, 22721, 22757, 22775, 19304, 19305, 19304, 19305, 22902, 22900, 22916, 22970, 22968, 22967, 22966, 23114, 23112, 23118, 23116, 23131, 23129, 23135, 23133, 22970, 22974, 22973, 22972, 23131, 23129, 22985, 22984, 22983, 22989, 22987, 23008, 23007, 22992, 22991, 19365, 22992, 22991, 19365, 23008, 23007, 23003, 23001, 19365, 23008, 23007, 19365, 23131, 23129, 23135, 23133, 23024, 23143, 23142, 23102, 23033, 23114, 23112, 23118, 23116, 23041, 23040, 23039, 23131, 23129, 23135, 23133, 23043, 23047, 23046, 23045, 23057, 23061, 23060, 23059, 23080, 23079, 23078, 19365, 23095, 23094, 23093, 23131, 23129, 23135, 23133, 23100, 23099, 23098, 23143, 23142, 23102, 23114, 23112, 23118, 23116, 23131, 23129, 23135, 23133, 23143, 23142, 23164, 23163, 23197, 23196, 19382, 23237, 23236, 23235, 23216, 23237, 23236, 23235, 23251, 23311, 23314, 23252, 23267, 23261, 23266, 23333, 23311, 23314, 23267, 23277, 23289, 10969, 10970, 23333, 23300, 23314, 23315, 23321, 23333, 23352, 23364, 23720, 23718, 23735, 23737, 23743, 23742, 23370, 23369, 23372, 23371, 23374, 23373, 23376, 23375, 23506, 23505, 23453, 23452, 23390, 23398, 23403, 23409, 23418, 24052, 24050, 24058, 23506, 23505, 23453, 23452, 23453, 23452, 23471, 23469, 23690, 23694, 23710, 21498, 21496, 23479, 23477, 23497, 23495, 23504, 23503, 23506, 23505, 23522, 23542, 23546, 23544, 23551, 23555, 23553, 23564, 23579, 11162, 11163, 11165, 11166, 23623, 23622, 11182, 11185, 23641, 23640, 11195, 11198, 23667, 23665, 23672, 23690, 23674, 23710, 21498, 21496, 23735, 23737, 23743, 23742, 23690, 23694, 23710, 21498, 21496, 23720, 23718, 23737, 23735, 23743, 23742, 23745, 23744, 23811, 23775, 23776, 23802, 23811, 23810, 23821, 23830, 23829, 23842, 23851, 23866, 23872, 23871, 23874, 23873, 24300, 23886, 23885, 23886, 23885, 24303, 24305, 24307, 24309, 23891, 23902, 23907, 23927, 23926, 23925, 23940, 23939, 24126, 24124, 23949, 23948, 23966, 23965, 23968, 23967, 24126, 23969, 23971, 23988, 23986, 24007, 24011, 24009, 24013, 24016, 24016, 24312, 24314, 24316, 24318, 24320, 24022, 24031, 24036, 24045, 24052, 24050, 24058, 21917, 21915, 24126, 24124, 24132, 24159, 24324, 24326, 24328, 24164, 24179, 24177, 24202, 24206, 24204, 24226, 24332, 24334, 24336, 24338, 11638, 11639, 11640, 11641, 11677, 11678, 11683, 11779, 11780, 11782, 11783, 24322, 24321, 12458, 12459, 24330, 24329, 12473, 12474, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 9727, 24573, 24574, 24575, 24577, 24548, 24581, 24580, 24583, 24368, 24548, 24369, 24548, 9740, 9741, 9742, 9743, 9744, 19064, 24800, 24799, 9748, 19064, 24371, 24370, 24800, 24799, 24955, 24940, 24938, 24955, 19066, 24800, 24799, 24372, 19066, 24940, 24939, 24955, 9768, 9769, 24373, 25054, 24374, 25054, 24381, 24375, 24376, 24377, 24379, 24378, 24381, 24380, 24382, 24384, 24383, 24385, 24386, 9787, 9789, 9790, 25105, 19800, 24708, 24709, 9799, 9800, 9801, 9802, 9803, 9804, 9805, 9806, 9807, 9808, 9809, 9810, 9811, 24685, 24715, 24721, 24724, 24723, 24725, 24676, 24718, 24717, 24730, 24388, 24390, 24389, 22536, 24507, 24510, 24509, 24708, 24690, 22979, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 24712, 9844, 24501, 24500, 9847, 9848, 9849, 24501, 24500, 9852, 24698, 24697, 24724, 24723, 24725, 24676, 24718, 24677, 24730, 24731, 24511, 24503, 24506, 24391, 19224, 24725, 24718, 24677, 24730, 24731, 24503, 24730, 24731, 9876, 24671, 24725, 24727, 24698, 24697, 24729, 24728, 9884, 9885, 24701, 24702, 24704, 24703, 24734, 24708, 24690, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 24671, 24693, 9906, 9907, 9908, 24698, 24697, 24699, 24675, 24725, 24727, 24728, 24729, 9917, 9918, 24702, 24701, 24704, 24703, 22225, 24698, 24697, 24699, 24675, 24729, 24728, 9932, 9933, 24702, 24701, 24704, 24703, 22227, 22228, 24393, 22234, 24395, 19841, 24397, 25063, 25066, 25082, 25087, 25062, 25065, 25082, 25086, 24399, 24401, 9959, 24402, 24403, 24404, 24406, 9964, 24407, 9966, 24408, 24410, 24411, 24412, 25063, 25066, 25082, 25087, 9993, 24422, 19890, 19893, 24414, 24415, 9999, 24422, 24878, 25166, 25169, 25172, 24893, 24418, 24417, 10013, 10014, 19919, 24833, 24858, 24886, 24857, 10020, 24897, 24896, 24419, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 10034, 10035, 10036, 24906, 10038, 24422, 19929, 19932, 10042, 24422, 24423, 24853, 25184, 25186, 25188, 19951, 10053, 24447, 24446, 24445, 10057, 10058, 10059, 24860, 10061, 10062, 10063, 24861, 10065, 10066, 19956, 19961, 10071, 24864, 24866, 24865, 21320, 21323, 10077, 24869, 24871, 24870, 25203, 24435, 24874, 24428, 24430, 24432, 24442, 10089, 24860, 10091, 10092, 10093, 24861, 10095, 10096, 10097, 21303, 19981, 24864, 10101, 10102, 24866, 24865, 21320, 21323, 10107, 24869, 24871, 24870, 25215, 24435, 24437, 24438, 24440, 24829, 24442, 24888, 24887, 24890, 24889, 24892, 24891, 24893, 24894, 24859, 10130, 10131, 24443, 24833, 24886, 24857, 24885, 10137, 24446, 24445, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 10149, 10150, 10151, 24906, 24443, 24833, 24447, 24445, 24899, 24898, 24901, 24900, 24904, 24903, 10163, 10164, 24888, 24887, 24890, 24889, 24892, 24891, 24893, 24894, 24859, 10176, 10177, 24831, 24833, 24858, 24886, 24857, 10183, 24447, 24446, 24445, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 10197, 10198, 10199, 24906, 10201, 10202, 24847, 24846, 24845, 24449, 24448, 24450, 24453, 24452, 24451, 24455, 24454, 24456, 24457, 24458, 22415, 24460, 22421, 24462, 20069, 24464, 10223, 20078, 10225, 20082, 10227, 10228, 24930, 24469, 24468, 20093, 24472, 24471, 24473, 20094, 24850, 24472, 24471, 24473, 24595, 24588, 24624, 24623, 24626, 24590, 24589, 24628, 24591, 24629, 24632, 24631, 22454, 10254, 22886, 22779, 24595, 24588, 24623, 24624, 24626, 24590, 24589, 24628, 24475, 24592, 10267, 22886, 22779, 24595, 24588, 24623, 24624, 24590, 24589, 24584, 24628, 24591, 24629, 24632, 24631, 22459, 10283, 22886, 22779, 24730, 24731, 24698, 24697, 24724, 24723, 24725, 24716, 24729, 24718, 24730, 24731, 24511, 24503, 24506, 24477, 25247, 24698, 24697, 24724, 24723, 24725, 24676, 24718, 24677, 24731, 24719, 24511, 24503, 24506, 24505, 19224, 24479, 24738, 24737, 24746, 24749, 24748, 10325, 10326, 24751, 24739, 24754, 24753, 24752, 10332, 24741, 24736, 24517, 24481, 24482, 24749, 24520, 10340, 10341, 24485, 24484, 24740, 24754, 24753, 10347, 24518, 24786, 24788, 24790, 24791, 24486, 24488, 10355, 24491, 24490, 10358, 24493, 24492, 24786, 24773, 24494, 24790, 24791, 24622, 24595, 24623, 24624, 24629, 24632, 24631, 24626, 24590, 24589, 24628, 24591, 24585, 10379, 24592, 10381, 22886, 22779, 24496, 24495, 24715, 22979, 10388, 10389, 10390, 24712, 24497, 24499, 24501, 24500, 24502, 24724, 24723, 24731, 24730, 24511, 24503, 24506, 24505, 19224, 24511, 22536, 24507, 24510, 24509, 24511, 22551, 24514, 19229, 24736, 24517, 24738, 24746, 24749, 24520, 10425, 10426, 10427, 24751, 24739, 24740, 24754, 24753, 10433, 24518, 24742, 24744, 24746, 24749, 24520, 10440, 10441, 10442, 24751, 24750, 24740, 24754, 24753, 10448, 24781, 24783, 10451, 24785, 24776, 24775, 24794, 23311, 10457, 24521, 10459, 19240, 19243, 24558, 24523, 10469, 24525, 24527, 10472, 22590, 24530, 24531, 24533, 24532, 10478, 10479, 24534, 24536, 24535, 10484, 10485, 24537, 24539, 24538, 10490, 10491, 24540, 24543, 24542, 10496, 10497, 24544, 10499, 24545, 24546, 24548, 24550, 24549, 10505, 24552, 24551, 10508, 22665, 24554, 24555, 10512, 22676, 24558, 24559, 10516, 24561, 10518, 24563, 10520, 22699, 24566, 24568, 24570, 24569, 10526, 10527, 24572, 24571, 10530, 24573, 24574, 24575, 24577, 24579, 24581, 24580, 24583, 24582, 24622, 24621, 24624, 24623, 24590, 24589, 24584, 24628, 24591, 24629, 24632, 24631, 24585, 10553, 22886, 22779, 24595, 24588, 24623, 24624, 24626, 24590, 24589, 24628, 24591, 24629, 24632, 24631, 24592, 10569, 22886, 22779, 24595, 24596, 24618, 24617, 24626, 24625, 24628, 24627, 24619, 24632, 24631, 24633, 24635, 24620, 19306, 24596, 24621, 24623, 24597, 24626, 24625, 24628, 24627, 24629, 24632, 24631, 24598, 24635, 24636, 19312, 22790, 24600, 24602, 24604, 10606, 24607, 24606, 10609, 24608, 24610, 24612, 10613, 24615, 24614, 10616, 24622, 24616, 24618, 24617, 24626, 24625, 24628, 24627, 24619, 24632, 24631, 24633, 24635, 24620, 19306, 22886, 19307, 24622, 24621, 24624, 24623, 24626, 24625, 24628, 24627, 24629, 24632, 24631, 24633, 24635, 24636, 19312, 22886, 19313, 24642, 24641, 22897, 10654, 10655, 24644, 24646, 24645, 24648, 24647, 24649, 10662, 24650, 24653, 24652, 24651, 24655, 24654, 19328, 22935, 24659, 24658, 24664, 24661, 24660, 19335, 22948, 24666, 24665, 24664, 24668, 24667, 20666, 22963, 24708, 24690, 10687, 10688, 10689, 10690, 24671, 24693, 24706, 24705, 23109, 10696, 10697, 10698, 10699, 24708, 24690, 22979, 10703, 10704, 10705, 10706, 10707, 10708, 10709, 10710, 24671, 24693, 24708, 24690, 22979, 10716, 10717, 24690, 24708, 10720, 10721, 10722, 10723, 10724, 24721, 24699, 24675, 24725, 24676, 24718, 24677, 10732, 10733, 10734, 10735, 24680, 24674, 20698, 10739, 24699, 24675, 24676, 24718, 24677, 10745, 10746, 24680, 24674, 20703, 10750, 24721, 24699, 24675, 24725, 24676, 24718, 24677, 10758, 10759, 10760, 10761, 24680, 24678, 24681, 10765, 24700, 24699, 10768, 10769, 24701, 24679, 24704, 24680, 24681, 10775, 24684, 24683, 23126, 10779, 10780, 10781, 10782, 10783, 10784, 10785, 10786, 24685, 24687, 10789, 24715, 24706, 24705, 23109, 10794, 10795, 10796, 10797, 24708, 24690, 10800, 10801, 10802, 10803, 10804, 10805, 10806, 10807, 10808, 10809, 10810, 24692, 24691, 24693, 10814, 10815, 10816, 10817, 24694, 24696, 24698, 24697, 24700, 24699, 24725, 24727, 24728, 24729, 10828, 10829, 10830, 24702, 24701, 24704, 24703, 24734, 10836, 24708, 24709, 10839, 10840, 10841, 10842, 10843, 10844, 10845, 10846, 10847, 10848, 10849, 10850, 10851, 24713, 24715, 24706, 24705, 23109, 10857, 10858, 10859, 10860, 24709, 24708, 23126, 10864, 10865, 10866, 10867, 23137, 10869, 10870, 24712, 24713, 24715, 24721, 24724, 24723, 24725, 24716, 24718, 24717, 24720, 24719, 10883, 10884, 24721, 24724, 24723, 24725, 24727, 24729, 24728, 24731, 24730, 24732, 10895, 10896, 24734, 10898, 24736, 24738, 24737, 24746, 24749, 24748, 10905, 10906, 10907, 24751, 24739, 24740, 24754, 24753, 10913, 24741, 24742, 24744, 24746, 24749, 24748, 10920, 10921, 10922, 24751, 24750, 24754, 24753, 24752, 10928, 24776, 24775, 24794, 10932, 10933, 24786, 24788, 10936, 24756, 24755, 10939, 24794, 24793, 24757, 10943, 24759, 10945, 24781, 24783, 10948, 24785, 24786, 24788, 24790, 24791, 24776, 24775, 24794, 10957, 10958, 10959, 24761, 24764, 24763, 10963, 24765, 24767, 24769, 24772, 10968, 25431, 24781, 24783, 10973, 24785, 24770, 24773, 24772, 10978, 21037, 24776, 24775, 24793, 23311, 10984, 10985, 24778, 10987, 24780, 24781, 24783, 10991, 24785, 24786, 24788, 24790, 24791, 10997, 24794, 24793, 24795, 11001, 24920, 11003, 11004, 24922, 24924, 24923, 24926, 24925, 11010, 11011, 24907, 11013, 11014, 11015, 11016, 11017, 11018, 11019, 11020, 11021, 11022, 24938, 24799, 24955, 24939, 24799, 24955, 24847, 24845, 24940, 24942, 24944, 24800, 24955, 11036, 11037, 24847, 24846, 24848, 24850, 11042, 11043, 24942, 24944, 24801, 11047, 24803, 24804, 11050, 24806, 24807, 11053, 24809, 24811, 11056, 24812, 24814, 11059, 24816, 24815, 11062, 11063, 24817, 11065, 24819, 24821, 24822, 24823, 24824, 24826, 11072, 11073, 24847, 24846, 24845, 24848, 24850, 24936, 24935, 24828, 24940, 24939, 24938, 11085, 11086, 11087, 11088, 24830, 24829, 24894, 24859, 24831, 24833, 24858, 24886, 24857, 11098, 11099, 24910, 24909, 24908, 24911, 11104, 19533, 11106, 19534, 24914, 21214, 24917, 21491, 11112, 11113, 11114, 24835, 11116, 11117, 24837, 24840, 24839, 24842, 24841, 11123, 11124, 24843, 11126, 11127, 11128, 11129, 24847, 24846, 24845, 24848, 24850, 11135, 24852, 24853, 24855, 24858, 24857, 24859, 11142, 24860, 11144, 11145, 11146, 24861, 11148, 11149, 21303, 21306, 11152, 24864, 24866, 24865, 21320, 21323, 11158, 24869, 24871, 24870, 25497, 24872, 25499, 24873, 24875, 24874, 24876, 24878, 24881, 24880, 24882, 23615, 24886, 24885, 11178, 11179, 24888, 24887, 24890, 24889, 24892, 24891, 24893, 24894, 11190, 11191, 24897, 24896, 24895, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 11205, 11206, 11207, 24906, 24910, 24909, 24908, 24911, 11213, 19533, 11215, 19534, 24914, 21456, 24917, 21491, 11221, 11222, 11223, 24922, 24926, 24925, 11227, 11228, 24907, 11230, 11231, 24910, 24909, 24908, 24911, 11236, 19533, 11238, 19534, 24914, 21484, 24917, 21491, 11244, 11245, 11246, 24920, 11248, 11249, 24922, 24924, 24923, 24926, 24925, 11255, 11256, 24927, 11258, 11259, 11260, 11261, 24930, 24929, 24928, 24931, 24933, 24936, 24935, 24937, 24940, 24939, 24938, 11273, 11274, 24953, 11276, 24955, 23778, 24942, 23784, 24944, 24945, 24947, 24949, 11285, 24952, 24951, 11288, 11289, 24953, 24954, 24955, 11293, 24957, 24956, 21619, 11297, 11298, 21626, 24961, 24960, 24962, 24963, 11304, 24964, 11306, 24967, 24966, 24969, 24968, 24970, 11312, 24972, 11314, 11315, 11316, 11317, 24974, 24975, 25001, 25002, 24977, 11327, 11328, 11329, 11330, 24978, 24980, 11344, 24981, 11346, 24983, 24985, 24984, 11350, 24986, 24988, 24989, 24990, 25044, 25045, 24992, 24991, 21729, 11360, 11361, 11362, 21761, 24994, 24993, 24995, 24996, 11368, 11369, 23941, 11371, 11372, 24998, 24997, 21754, 11376, 11377, 21761, 25002, 25001, 25003, 25004, 11383, 11384, 11385, 11386, 11387, 11388, 25006, 11390, 25007, 25009, 25010, 11394, 11395, 25012, 25014, 25015, 25017, 25018, 11401, 11402, 11403, 11404, 11405, 11406, 25020, 25022, 11424, 25023, 11426, 25024, 25025, 25027, 11430, 25028, 11432, 25029, 25030, 11435, 11436, 25033, 25032, 11439, 25034, 25036, 25037, 25038, 25039, 25040, 25041, 25043, 25044, 25045, 25047, 11451, 11452, 24096, 21922, 25053, 25052, 25051, 25054, 25056, 25058, 25057, 24121, 11463, 11464, 25060, 11466, 25063, 25062, 25064, 25066, 25065, 25067, 25068, 11474, 25070, 25069, 11489, 25071, 25072, 25074, 25075, 11494, 11495, 25077, 25079, 25080, 25082, 25085, 25084, 11502, 11503, 11504, 25087, 25086, 25089, 25088, 25090, 11510, 25091, 25274, 25274, 25618, 25620, 25622, 25613, 25603, 24346, 25614, 25613, 25613, 25603, 25614, 25613, 25613, 25603, 25159, 25625, 25161, 25627, 25614, 25613, 25274, 25276, 22075, 24301, 25555, 25556, 24310, 25586, 25587, 22098, 12453, 12454, 25613, 25603, 25604, 25631, 12468, 12469, 25614, 25613, 25615, 25635, 15, 9728, 9729, 9730, 9731, 9732, 9733, 9734, 9735, 9736, 9737, 9738, 9739, 9745, 9746, 9747, 9749, 9750, 9751, 9752, 9753, 9754, 9755, 9756, 9757, 9758, 9759, 9760, 9761, 9762, 9763, 9764, 9765, 9770, 9771, 9772, 9773, 9774, 9775, 9776, 9777, 9778, 9779, 9780, 9781, 9782, 9783, 9784, 9785, 9786, 9796, 9797, 9798, 25714, 25717, 25719, 25721, 25724, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9821, 9822, 9823, 9824, 9825, 9826, 9829, 9830, 9833, 9834, 9835, 25747, 25749, 9843, 25752, 9845, 9846, 9850, 9851, 9853, 9854, 9855, 9856, 9857, 9858, 9859, 9860, 9861, 9862, 9863, 9864, 9865, 9866, 9867, 9868, 9869, 9870, 9871, 9872, 9873, 9874, 9875, 9877, 9878, 9879, 9880, 9881, 9882, 9883, 25795, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 25804, 25807, 25809, 25812, 9904, 9905, 25817, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 25828, 9919, 9920, 9921, 9922, 9923, 9926, 9927, 9928, 9929, 9930, 9931, 25841, 9934, 9935, 9936, 9937, 9938, 9941, 9942, 9943, 9944, 9945, 9946, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9960, 9961, 9962, 9963, 9965, 9967, 9968, 9969, 9970, 9971, 9972, 9989, 9990, 9994, 9995, 9996, 9997, 9998, 10000, 10001, 10010, 10011, 10012, 25895, 10015, 10016, 10017, 10018, 10019, 10021, 10022, 10023, 10025, 10026, 10028, 10029, 10030, 10031, 10032, 10033, 25914, 10037, 10039, 10040, 10041, 10043, 10044, 10045, 10052, 10054, 10055, 10056, 25934, 10060, 25938, 10064, 25942, 10067, 10069, 10072, 10073, 10074, 10075, 10076, 10078, 10079, 10080, 10083, 10084, 10085, 10086, 10087, 10088, 10090, 25965, 10094, 25969, 10098, 10099, 10100, 10103, 10104, 10105, 10106, 10108, 10109, 10110, 10113, 10114, 10115, 10116, 10117, 10118, 10119, 10120, 10122, 10123, 10125, 10126, 10127, 10128, 10129, 26001, 10132, 10133, 10134, 10135, 10136, 10138, 10139, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 26019, 10152, 10153, 10154, 10155, 10156, 10157, 10158, 10159, 10160, 10161, 10162, 26033, 10165, 10166, 10168, 10169, 10171, 10172, 10173, 10174, 10175, 26044, 10178, 10179, 10180, 10181, 10182, 10184, 10185, 10186, 10188, 10189, 10191, 10192, 10193, 10194, 10195, 10196, 26063, 10200, 10203, 10204, 10205, 10206, 10207, 10208, 10209, 10210, 10211, 10212, 10213, 10214, 10215, 10216, 10217, 10218, 10219, 10220, 10221, 10222, 10224, 10226, 26093, 10229, 10230, 10231, 10232, 10233, 10234, 10235, 10236, 10237, 10238, 10239, 10240, 10241, 10242, 10243, 10244, 10245, 10246, 10247, 10248, 10249, 10250, 10251, 10252, 10253, 10255, 10256, 10257, 10258, 10259, 10260, 10261, 10262, 10263, 10264, 10265, 10266, 10268, 10269, 10270, 10271, 10272, 10273, 10274, 10275, 10276, 10277, 10278, 10279, 10280, 10281, 10282, 10284, 10285, 10286, 10287, 10288, 10289, 10290, 10291, 10292, 10293, 10294, 10295, 10296, 10297, 10298, 10299, 10300, 10301, 10304, 10305, 10306, 10307, 10308, 10309, 10310, 10311, 10312, 10313, 10314, 10315, 10316, 10317, 10318, 10319, 10320, 10321, 10322, 10323, 10324, 26190, 10327, 10328, 10329, 10330, 10331, 10333, 10334, 10335, 10336, 10337, 10338, 10339, 26205, 10342, 10343, 10344, 10345, 10346, 10348, 10349, 10350, 10351, 10352, 10353, 10354, 10356, 10357, 10359, 10360, 10361, 10362, 10363, 10364, 10365, 10366, 10367, 10368, 10369, 10370, 10371, 10372, 10373, 10374, 10375, 10376, 10377, 10378, 10380, 10382, 10383, 10384, 10385, 10386, 10387, 10391, 26254, 10392, 10393, 10394, 10395, 10396, 10397, 10398, 10399, 10400, 10401, 10402, 10403, 10404, 10405, 10406, 10407, 10408, 10411, 10412, 10415, 10416, 10417, 10418, 10419, 10420, 10421, 10422, 10423, 10424, 26286, 10428, 10429, 10430, 10431, 10432, 10434, 10435, 10436, 10437, 10438, 10439, 26301, 10443, 10444, 10445, 10446, 10447, 10449, 10450, 10452, 10453, 10454, 10455, 10456, 10458, 10464, 10466, 10467, 10468, 10470, 10471, 10473, 10474, 10475, 10476, 10477, 26334, 10481, 10482, 10483, 26339, 10487, 10488, 10489, 26344, 10493, 10494, 10495, 26349, 10498, 10500, 10501, 10502, 10503, 10504, 10506, 10507, 10509, 10510, 10511, 10513, 10514, 10515, 10517, 10519, 10521, 10522, 10523, 10524, 10525, 26379, 10528, 10529, 10531, 10532, 10533, 10534, 10535, 10536, 10537, 10538, 10539, 10540, 10541, 10542, 10543, 10544, 10545, 10546, 10547, 10548, 10549, 10550, 10551, 10552, 10554, 10555, 10556, 10557, 10558, 10559, 10560, 10561, 10562, 10563, 10564, 10565, 10566, 10567, 10568, 10570, 10571, 10572, 10573, 10574, 10575, 10576, 10577, 10578, 10579, 10580, 10581, 10582, 10583, 10584, 10585, 10586, 10587, 10588, 10589, 10590, 10591, 10592, 10593, 10594, 10595, 10596, 10597, 10598, 10599, 10600, 10601, 10602, 10603, 10604, 10605, 10607, 10608, 10610, 10611, 10612, 10614, 10615, 10617, 10618, 10619, 10620, 10621, 10622, 10623, 10624, 10625, 10626, 10627, 10628, 10629, 10630, 10631, 10632, 10633, 10634, 10635, 10636, 10637, 10638, 10639, 10640, 10641, 10642, 10643, 10644, 10645, 10646, 10647, 10648, 10649, 10650, 10651, 10652, 10653, 26507, 10656, 10657, 10658, 10659, 10660, 10661, 10663, 10664, 10665, 10666, 10667, 10668, 10669, 10670, 10671, 10672, 10673, 10674, 10675, 10676, 10677, 10678, 10679, 10680, 10681, 10682, 10683, 10684, 10685, 10686, 26541, 10691, 10692, 10693, 10694, 10695, 26549, 26551, 10700, 10701, 10702, 26556, 26558, 26561, 10711, 10712, 10713, 10714, 10715, 26569, 10718, 10719, 26573, 26576, 10725, 10726, 10727, 10728, 10729, 10730, 10731, 26585, 26587, 10736, 10737, 10738, 10740, 10741, 10742, 10743, 10744, 26598, 10747, 10748, 10749, 10751, 10752, 10753, 10754, 10755, 10756, 10757, 26611, 26613, 10762, 10763, 10764, 10766, 10767, 26621, 10770, 10771, 10772, 10773, 10774, 10776, 10777, 10778, 26632, 26634, 26637, 10787, 10788, 10790, 10791, 10792, 10793, 26647, 26649, 10798, 10799, 26653, 26656, 26658, 26661, 10811, 10812, 10813, 26668, 10818, 10819, 10820, 10821, 10822, 10823, 10824, 10825, 10826, 10827, 26681, 10831, 10832, 10833, 10834, 10835, 10837, 10838, 26692, 26695, 26697, 26699, 26702, 10852, 10853, 10854, 10855, 10856, 26710, 26712, 10861, 10862, 10863, 26717, 26719, 10868, 10871, 26722, 10872, 10873, 10874, 10875, 10876, 10877, 10878, 10879, 10880, 10881, 10882, 26736, 10885, 10886, 10887, 10888, 10889, 10890, 10891, 10892, 10893, 10894, 26748, 10897, 10899, 10900, 10901, 10902, 10903, 10904, 26758, 10908, 10909, 10910, 10911, 10912, 10914, 10915, 10916, 10917, 10918, 10919, 26773, 10923, 10924, 10925, 10926, 10927, 10929, 10930, 10931, 10934, 10935, 10937, 10938, 10940, 10941, 10942, 10944, 10946, 10947, 10949, 10950, 10951, 10952, 10953, 10954, 10955, 10956, 10960, 10961, 10962, 10964, 10965, 10966, 10967, 10971, 10972, 10974, 10975, 10976, 10977, 10979, 10980, 10981, 10982, 10983, 10986, 10988, 10989, 10990, 10992, 10993, 10994, 10995, 10996, 10998, 10999, 11000, 11002, 26855, 11005, 11006, 11007, 11008, 11009, 26862, 11012, 26865, 26867, 26869, 26871, 26873, 11023, 11024, 11025, 11026, 11027, 11028, 11029, 11030, 11031, 11032, 11033, 11034, 11035, 26888, 11038, 11039, 11040, 11041, 26894, 11044, 11045, 11046, 11048, 11049, 11051, 11052, 11054, 11055, 11057, 11058, 11060, 11061, 26914, 11064, 11066, 11067, 11068, 11069, 11070, 11071, 26924, 11074, 11075, 11076, 11077, 11078, 11079, 11080, 11081, 11082, 11083, 11084, 26937, 26939, 11089, 11090, 11091, 11092, 11093, 11094, 11095, 11096, 11097, 26950, 11100, 11101, 11102, 11103, 11105, 11107, 11108, 11109, 11110, 11111, 26965, 11115, 26968, 11118, 11119, 11120, 11121, 11122, 26975, 11125, 26978, 26980, 11130, 11131, 11132, 11133, 11134, 11136, 11137, 11138, 11139, 11140, 11141, 11143, 26996, 11147, 27000, 11150, 11151, 11153, 11154, 11155, 11156, 11157, 11159, 11160, 11161, 11164, 11167, 11168, 11169, 11170, 11171, 11172, 11173, 11174, 11175, 11176, 11177, 27028, 11180, 11181, 11183, 11184, 11186, 11187, 11188, 11189, 27038, 11192, 11193, 11194, 11196, 11197, 11199, 11200, 11201, 11202, 11203, 11204, 27051, 11208, 11209, 11210, 11211, 11212, 11214, 11216, 11217, 11218, 11219, 11220, 27068, 11224, 11225, 11226, 27073, 11229, 27076, 11232, 11233, 11234, 11235, 11237, 11239, 11240, 11241, 11242, 11243, 27091, 11247, 27094, 11250, 11251, 11252, 11253, 11254, 27101, 11257, 27104, 27106, 11262, 11263, 11264, 11265, 11266, 11267, 11268, 11269, 11270, 11271, 11272, 27119, 11275, 11277, 11278, 11279, 11280, 11281, 11282, 11283, 11284, 11286, 11287, 27134, 11290, 11291, 11292, 11294, 11295, 11296, 27143, 11299, 11300, 11301, 11302, 11303, 11305, 11307, 11308, 11309, 11310, 11311, 11313, 27160, 27162, 11322, 11323, 11324, 11325, 11326, 27169, 27171, 11342, 11343, 11345, 11347, 11348, 11349, 11351, 11352, 11353, 11354, 11355, 11356, 11357, 11358, 11359, 27191, 11363, 11364, 11365, 11366, 11367, 27199, 11370, 27202, 11373, 11374, 11375, 27207, 11378, 11379, 11380, 11381, 11382, 27214, 27216, 27218, 11389, 11391, 11392, 11393, 27225, 11396, 11397, 11398, 11399, 11400, 27233, 27236, 11422, 11423, 11425, 11427, 11428, 11429, 11431, 11433, 11434, 27251, 11437, 11438, 11440, 11441, 11442, 11443, 11444, 11445, 11446, 11447, 11448, 11449, 11450, 27267, 11453, 11454, 11455, 11456, 11457, 11458, 11459, 11460, 11461, 11462, 27279, 11465, 11467, 11468, 11469, 11470, 11471, 11472, 11473, 11475, 11476, 11490, 11491, 11492, 11493, 27298, 11496, 11497, 11498, 11499, 11500, 11501, 27307, 11505, 11506, 11507, 11508, 11509, 11511, 11544, 11563, 11679, 11680, 24347, 11686, 11687, 11754, 11755, 11756, 11757, 11776, 11777, 11778, 11781, 11786, 11787, 25889, 25888, 25926, 25925, 12026, 12027, 26784, 26809, 26820, 12389, 12394, 12395, 12396, 12424, 12425, 12426, 12427, 27348, 12455, 12456, 12457, 27354, 12470, 12471, 12472, 7, 8, 9, 10, 11, 12, 13, 14, 15, 27366, 27368, 27374, 27377, 27379, 27382, 27386, 27390, 27397, 27401, 27403, 27406, 27411, 25715, 25722, 25725, 27421, 27425, 27427, 27429, 27433, 27435, 27439, 27442, 27444, 27446, 27448, 27452, 27454, 27458, 27462, 27464, 27467, 27472, 27474, 27477, 27479, 27482, 25805, 25813, 25818, 27491, 27493, 27497, 27500, 27502, 27505, 27507, 27509, 27512, 27514, 25863, 25868, 27545, 27550, 27554, 27559, 27562, 27565, 27567, 27570, 27573, 27574, 27577, 27582, 27585, 27587, 27591, 27593, 27596, 27598, 27604, 27605, 27607, 25970, 25974, 27613, 27616, 27618, 27624, 27626, 27628, 27630, 27633, 27638, 27641, 27643, 27645, 27648, 27651, 27655, 27657, 27659, 27661, 27664, 27666, 27668, 27671, 27676, 27679, 27682, 27684, 27687, 27690, 27692, 27695, 27698, 27701, 27715, 27719, 27724, 27727, 27729, 27731, 27734, 27737, 27742, 27744, 27746, 27749, 27754, 27756, 27758, 27761, 27764, 27769, 27771, 27773, 27777, 27779, 27783, 27785, 27787, 27791, 27793, 27797, 27801, 27804, 27807, 27809, 27811, 27814, 27817, 27820, 27822, 27824, 27832, 27834, 27837, 27841, 27843, 27846, 27848, 27851, 27857, 27860, 27865, 27868, 27870, 27874, 27880, 27887, 27890, 26287, 27893, 27895, 27897, 27902, 26302, 27905, 27907, 27913, 27927, 27931, 27935, 27939, 27946, 27948, 27961, 27964, 27971, 27973, 27975, 27977, 27979, 27982, 27985, 27990, 27992, 27994, 27997, 28000, 28005, 28007, 28009, 28011, 28014, 28020, 28022, 28024, 28026, 28029, 28039, 28044, 28046, 28048, 28050, 28052, 28055, 28063, 28065, 28067, 28069, 28072, 28080, 28085, 28087, 26514, 28091, 28094, 28098, 28101, 28105, 28108, 28112, 26542, 28117, 28122, 26562, 28130, 28134, 26574, 28139, 28143, 28147, 28150, 28153, 28156, 28160, 28164, 28168, 28171, 28174, 28176, 28179, 26638, 28188, 28193, 26654, 26662, 28199, 26669, 28205, 28207, 28211, 26682, 28214, 28216, 28219, 26693, 26700, 26703, 28228, 28233, 28238, 28244, 28248, 28250, 28254, 28258, 28260, 28266, 28269, 26759, 28272, 28274, 28276, 28281, 26774, 28284, 28286, 28289, 28294, 28296, 28307, 28311, 28315, 28321, 28324, 28337, 28343, 28345, 28360, 28368, 28375, 28377, 26907, 26910, 28384, 28395, 28400, 28403, 28408, 28410, 28414, 28418, 28432, 28434, 28440, 28448, 28450, 28452, 28456, 28458, 28461, 28463, 28467, 28471, 28475, 28478, 28480, 28482, 28487, 28490, 28492, 28495, 28498, 28500, 28512, 28517, 28531, 28533, 28539, 28544, 28547, 28560, 28566, 28571, 27149, 28576, 28578, 28586, 27174, 28593, 28595, 28603, 27192, 28608, 28615, 28620, 27220, 27239, 28641, 27245, 28645, 28649, 28665, 28670, 28675, 28678, 28681, 28683, 28693, 28696, 28698, 28700, 25648, 27361, 27363, 27916, 27951, 27956, 27920, 27957, 27956, 27922, 27923, 27928, 27936, 27940, 26382, 27966, 27943, 27916, 27951, 27956, 27920, 27957, 27956, 27922, 27923, 27928, 27936, 27940, 26382, 27966, 27968, 26066, 28339, 25662, 25661, 27372, 27387, 28564, 25665, 25664, 27372, 28564, 27375, 26066, 28425, 28423, 26067, 28339, 28347, 27707, 27705, 27384, 28558, 27387, 27380, 27383, 26066, 28425, 28423, 26067, 28339, 28347, 27707, 27705, 27384, 28558, 27387, 27391, 27388, 27391, 25687, 25687, 25688, 28386, 28387, 27392, 28391, 28600, 28662, 28660, 28671, 28386, 28387, 28389, 28391, 28600, 28662, 28660, 28671, 25707, 25708, 27408, 28704, 25707, 25708, 28707, 25709, 27409, 27413, 27417, 27422, 27431, 27437, 27862, 27862, 27449, 27455, 27460, 27881, 27774, 27780, 28184, 27468, 27469, 27480, 27484, 27487, 28181, 28184, 27494, 27503, 28255, 27515, 27518, 27516, 27520, 28709, 28711, 27523, 27522, 27527, 27526, 27532, 27536, 27537, 27539, 27542, 27541, 28713, 28717, 27546, 27594, 25955, 27548, 27609, 27614, 25984, 27620, 25890, 11807, 11808, 27556, 27575, 27594, 25955, 27600, 27609, 27614, 25984, 27620, 25927, 11836, 11837, 27580, 27590, 27589, 27594, 25955, 27600, 27609, 27614, 25984, 27620, 27635, 27652, 27673, 26066, 26067, 27702, 27703, 27707, 27705, 27709, 26088, 26090, 27717, 28551, 27721, 28564, 27738, 27739, 27750, 27751, 27765, 27766, 28181, 28184, 28190, 28195, 27862, 27774, 27780, 27788, 27794, 28181, 28184, 28190, 28195, 27862, 27774, 27780, 27788, 27794, 27825, 27827, 27829, 28319, 27838, 27853, 27852, 27854, 28181, 28184, 28195, 28190, 27862, 28208, 27871, 27876, 27878, 27881, 27883, 27898, 27909, 26311, 27915, 27916, 27951, 27917, 27918, 27956, 27920, 27957, 27956, 27922, 27923, 27928, 27932, 27936, 27940, 26351, 27942, 27943, 27956, 27949, 27956, 27951, 27952, 27956, 27955, 27954, 27957, 26382, 27966, 27968, 27986, 27987, 28001, 28002, 28015, 28017, 28030, 28032, 28082, 28035, 28037, 28040, 28042, 28056, 28060, 28058, 28073, 28077, 28075, 28082, 28095, 28102, 28109, 28114, 28119, 28124, 28127, 28132, 28181, 28184, 28140, 28148, 28255, 28157, 28161, 28169, 28255, 28177, 28181, 28184, 28240, 28190, 28195, 28202, 28208, 28217, 28221, 28225, 28230, 28235, 28240, 28245, 28261, 28255, 28261, 28263, 28277, 12199, 28291, 28298, 28297, 28299, 26800, 28302, 28304, 12210, 26815, 12215, 28316, 26824, 26829, 28326, 26836, 26838, 28329, 26842, 28332, 28334, 28338, 28339, 28347, 28430, 28436, 28430, 28436, 28353, 28354, 28355, 28356, 28357, 28358, 28361, 28554, 28552, 28364, 28365, 28369, 28554, 28552, 28378, 28386, 28387, 28389, 28391, 28472, 28397, 28454, 28459, 27013, 27015, 28411, 26957, 26955, 28425, 28423, 26963, 28428, 28436, 28442, 28551, 28564, 28454, 28459, 27013, 27015, 28472, 28449, 28454, 28459, 27015, 27013, 28472, 28484, 27060, 27058, 28507, 28505, 27066, 28514, 27083, 27081, 28524, 28522, 27089, 28527, 28535, 28541, 28550, 28551, 28554, 28552, 28556, 28558, 28562, 28564, 28568, 27157, 28635, 28583, 28691, 28596, 28598, 28600, 28610, 28612, 28617, 28622, 28624, 28628, 28630, 28632, 28634, 27234, 27231, 28646, 28650, 28652, 28654, 28656, 28658, 28662, 28660, 28671, 28685, 28687, 28689, 27281, 28737, 28685, 28687, 28689, 27305, 28741, 28738, 28742, 28738, 28742, 28738, 28742, 28715, 28714, 28742, 28728, 28730, 28728, 28730, 28731, 28733, 28738, 28742, 9, 10, 11, 12, 13, 14, 15, 27560, 27563, 27583, 25975, 27639, 27677, 27680, 27693, 27699, 27716, 27732, 27747, 27759, 27810, 27823, 27849, 27896, 27908, 27980, 27995, 28092, 28099, 28106, 28275, 28287, 28396, 28404, 28415, 28419, 28441, 28488, 28501, 28518, 28540, 28548, 28666, 28682, 11538, 11539, 11540, 28752, 11542, 11543, 11545, 11546, 11547, 11548, 11549, 11550, 27925, 11552, 27933, 11554, 27937, 11556, 11557, 11558, 11559, 28932, 11561, 11562, 11564, 11565, 11566, 11567, 11568, 11569, 27925, 11571, 27933, 11573, 27937, 11575, 11576, 11577, 11578, 28932, 11581, 11582, 28341, 11584, 11585, 11586, 11587, 11588, 28341, 11591, 11592, 11593, 28754, 11595, 11597, 11600, 11601, 11602, 11603, 11604, 28341, 29043, 11607, 11608, 11609, 11610, 11611, 28756, 11613, 11614, 28757, 11616, 11618, 11619, 11620, 11621, 11622, 28341, 29043, 11625, 11626, 11627, 11628, 11629, 28758, 11631, 11632, 11634, 29085, 28759, 11637, 11642, 11643, 11644, 29050, 11646, 11647, 11648, 11649, 11650, 11651, 11652, 27394, 11655, 28379, 28381, 29050, 11659, 11660, 11661, 11662, 11663, 11664, 11665, 28668, 11668, 11669, 11670, 28763, 28760, 27399, 28763, 28762, 11676, 11681, 11682, 11688, 11689, 28764, 11691, 28766, 11693, 11694, 27419, 28769, 28771, 28772, 11699, 28773, 11701, 25750, 28775, 11704, 25758, 28776, 11707, 11708, 28777, 28779, 11711, 28781, 11713, 28785, 28782, 11716, 28911, 11718, 28878, 28880, 11721, 28882, 28982, 11724, 25786, 11726, 28785, 11728, 28786, 28787, 11731, 28789, 11733, 25810, 11735, 28981, 28996, 11738, 26635, 11740, 11741, 28793, 28795, 28796, 11745, 11746, 28798, 28800, 28801, 11750, 11751, 11752, 11753, 11758, 11759, 27524, 27525, 27528, 11763, 11764, 27528, 27529, 27530, 11768, 27534, 11770, 11771, 11772, 27543, 11774, 11775, 27543, 27544, 28805, 28814, 11790, 28815, 11792, 28821, 11794, 11795, 27549, 28824, 28806, 11799, 11801, 28829, 11803, 11804, 27551, 11806, 28807, 11810, 28812, 28811, 28810, 27572, 28817, 28814, 11819, 28815, 11821, 28821, 11823, 11824, 27602, 28825, 28824, 11828, 11830, 28829, 11832, 11833, 27622, 11835, 29055, 11839, 29077, 29076, 29075, 27584, 28818, 28817, 11848, 11849, 28819, 11851, 28821, 11853, 11854, 27602, 28825, 28824, 11858, 11860, 28829, 11862, 11863, 27622, 28834, 28833, 28832, 28835, 11869, 28840, 28839, 28838, 28837, 27650, 11876, 28845, 28844, 28843, 28842, 27662, 28848, 28847, 28846, 28849, 11886, 28854, 28853, 28852, 27689, 11893, 11894, 28857, 11898, 11899, 11900, 11901, 11902, 11903, 11904, 11906, 11908, 11909, 29087, 11911, 28863, 27735, 11915, 11916, 28868, 27998, 11920, 11921, 28872, 27762, 11925, 11926, 28906, 11928, 26635, 11930, 28998, 11932, 28999, 11934, 26252, 28908, 11937, 11938, 28878, 28880, 11941, 28882, 11943, 28883, 28885, 11946, 28887, 28906, 11949, 26635, 11951, 28998, 11953, 28999, 11955, 26252, 28908, 11958, 11959, 28878, 28880, 11962, 28882, 11964, 28883, 28885, 11967, 28887, 27799, 27802, 27805, 27812, 27815, 27818, 11977, 11978, 11979, 26219, 11981, 26222, 27835, 11984, 28901, 27844, 11988, 11989, 11990, 28906, 11992, 26635, 11994, 28999, 11996, 28998, 11998, 26252, 28908, 12001, 12002, 29004, 29006, 12005, 28911, 12007, 28912, 12009, 12010, 12011, 27885, 27888, 28915, 12016, 27900, 28920, 12020, 12021, 28923, 12023, 12024, 12025, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 27925, 12037, 27929, 12039, 27933, 12041, 27937, 12043, 12044, 12045, 12046, 28932, 12048, 28929, 28928, 12051, 12052, 12053, 12054, 12055, 12056, 12057, 12058, 27959, 27962, 12061, 12062, 12063, 28932, 28934, 27983, 12068, 12069, 28939, 27998, 12073, 12074, 28944, 28012, 28946, 12078, 12079, 28949, 28027, 28951, 12083, 12084, 28966, 12086, 28967, 28969, 12089, 28954, 12091, 12092, 28955, 12094, 28956, 28053, 28958, 12098, 12099, 12100, 28961, 28070, 28963, 12104, 12105, 12106, 28966, 12108, 28967, 28969, 12112, 12114, 12116, 28976, 26539, 12119, 28978, 12121, 28979, 12123, 26559, 12125, 28981, 12127, 28982, 12129, 26635, 12131, 12132, 28137, 28985, 28145, 12136, 12137, 28252, 28988, 28154, 12141, 12142, 28158, 28991, 28166, 12146, 12147, 28252, 29021, 28994, 12151, 28996, 12153, 26635, 12155, 26641, 12157, 28998, 12159, 28999, 12161, 26659, 29002, 26666, 12165, 12166, 29004, 29006, 29008, 12170, 29010, 12172, 29012, 12174, 29014, 12176, 29015, 12178, 28237, 12180, 12181, 28242, 29018, 12184, 12185, 28252, 29021, 12188, 12189, 28264, 28267, 29025, 12194, 28279, 29030, 29033, 12200, 26788, 26791, 12203, 12204, 12205, 12206, 12207, 12208, 29036, 26811, 28309, 12213, 28313, 12216, 12217, 28319, 12219, 29040, 12221, 12222, 12223, 12224, 12225, 12226, 12227, 26848, 12229, 12230, 28341, 29043, 12233, 12234, 12235, 12236, 12237, 12238, 12239, 12240, 12241, 12242, 12243, 28366, 12245, 12246, 12247, 12248, 12249, 28366, 12251, 12253, 12254, 28374, 12256, 28376, 28379, 28381, 29050, 12261, 12262, 12263, 12264, 12265, 12267, 29052, 29087, 29063, 29062, 12273, 29064, 12275, 29066, 12277, 12278, 29068, 28446, 29073, 29072, 29071, 29055, 12285, 29077, 29076, 29075, 28416, 12293, 12294, 12295, 12296, 12297, 12298, 28430, 29059, 12301, 12303, 12304, 12305, 29063, 29062, 12308, 29064, 12310, 29066, 12312, 12313, 29068, 28446, 12316, 29061, 29073, 29072, 29071, 12321, 29077, 29076, 29075, 28497, 29063, 29062, 12329, 29064, 12331, 29066, 12333, 12334, 29068, 28469, 12337, 29070, 29073, 29072, 29071, 12342, 29077, 29076, 29075, 28497, 12349, 12350, 12351, 12352, 12353, 28510, 29080, 12356, 12358, 12359, 12360, 12361, 12362, 12363, 28529, 29083, 12366, 12368, 29085, 12371, 12372, 12373, 12374, 12375, 12376, 29087, 12378, 12379, 12380, 29088, 29089, 29090, 27151, 29092, 12386, 12387, 29115, 12390, 29093, 12392, 29115, 28590, 29096, 28592, 12400, 12401, 12402, 29098, 29097, 29099, 12406, 12407, 12408, 29100, 29101, 12411, 12412, 29102, 12414, 12415, 12416, 29113, 12418, 28635, 12420, 12421, 29115, 28699, 28638, 28640, 28642, 28644, 12432, 29107, 12434, 12435, 12436, 12437, 12438, 12439, 12440, 28668, 12443, 29113, 12445, 12446, 12447, 28691, 12449, 29111, 29110, 29113, 12461, 12462, 12463, 28691, 12465, 29115, 28699, 27317, 27318, 27319, 28705, 27319, 12579, 28705, 12585, 28735, 28739, 28735, 12644, 28739, 12650, 28735, 12664, 28739, 12670, 28735, 12685, 12686, 28739, 12692, 13048, 13049, 13062, 13063, 13076, 13077, 28735, 13091, 28739, 13097, 14, 15, 11541, 29642, 29644, 29647, 11551, 11553, 11555, 11560, 29660, 29662, 29665, 11570, 11572, 11574, 11579, 29628, 11583, 29681, 25663, 11590, 29687, 11594, 25669, 29634, 29628, 29694, 11605, 11606, 29701, 11612, 11615, 29628, 29711, 11623, 11624, 29718, 11630, 28366, 11635, 11636, 11645, 29738, 29635, 11654, 11656, 11657, 11658, 29750, 29635, 11667, 11671, 11672, 11673, 11674, 11675, 11690, 11692, 11695, 11696, 11697, 11698, 11700, 11702, 11703, 11705, 11706, 11709, 11710, 11712, 11714, 11715, 11717, 11719, 11720, 11722, 11723, 11725, 11727, 11729, 11730, 11732, 11734, 11736, 11737, 11739, 11742, 11743, 11744, 11747, 11748, 11749, 29827, 29830, 11760, 11761, 11762, 29835, 11765, 11766, 11767, 11769, 11773, 29846, 11784, 11785, 11788, 11789, 11791, 11793, 11796, 11797, 11798, 29603, 11802, 11805, 29267, 11809, 29600, 11812, 11813, 11814, 29601, 11816, 11817, 11818, 11820, 11822, 11825, 11826, 11827, 29603, 11831, 11834, 29279, 11838, 29627, 11841, 11842, 11843, 29602, 11845, 11846, 11847, 29900, 11850, 11852, 11855, 11856, 11857, 29603, 11861, 11864, 11865, 11866, 11867, 11868, 29604, 11871, 11872, 11873, 11874, 11875, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 29605, 11888, 11889, 11890, 29606, 11892, 28366, 11896, 29608, 29946, 27713, 29634, 11910, 11912, 11913, 29610, 11917, 11918, 29611, 11922, 11923, 29612, 11927, 11929, 11931, 11933, 11935, 11936, 11939, 11940, 11942, 11944, 11945, 11947, 11948, 11950, 11952, 11954, 11956, 11957, 11960, 11961, 11963, 11965, 11966, 11968, 11969, 11970, 11971, 29613, 11973, 11974, 11975, 29614, 11980, 11982, 11983, 11985, 29615, 11987, 30026, 11991, 11993, 11995, 11997, 11999, 12000, 12003, 12004, 12006, 12008, 12012, 12013, 12014, 29616, 12017, 12018, 29617, 12022, 30060, 30064, 30067, 12036, 12038, 12040, 12042, 12047, 12049, 12050, 30086, 30089, 12059, 12060, 12064, 12065, 12066, 29618, 12070, 12071, 29619, 12075, 12076, 12077, 12080, 12081, 12082, 12085, 12087, 12088, 12090, 12093, 12095, 12096, 12097, 30131, 12101, 12102, 12103, 30137, 12107, 12109, 12110, 29620, 29621, 29622, 12117, 12118, 12120, 12122, 12124, 12126, 12128, 12130, 12133, 12134, 12135, 12138, 12139, 12140, 12143, 12144, 12145, 12148, 12149, 12150, 12152, 12154, 12156, 12158, 12160, 12162, 12163, 12164, 12167, 12168, 12169, 12171, 12173, 12175, 12177, 12179, 12182, 12183, 12186, 12187, 12190, 12191, 12192, 29623, 12195, 12196, 29624, 12198, 12201, 12202, 30229, 12209, 12211, 12212, 12214, 12218, 12220, 12228, 12231, 12232, 12244, 30270, 12250, 29634, 30276, 12255, 12257, 12258, 12259, 12260, 28393, 12268, 29626, 12270, 12271, 12272, 12274, 12276, 30298, 12279, 12280, 12281, 12282, 12283, 12284, 29627, 12287, 12288, 12289, 29630, 12291, 29628, 30311, 30313, 12299, 12300, 28438, 12306, 12307, 12309, 12311, 30329, 12314, 12315, 12317, 12318, 12319, 12320, 12322, 12323, 12324, 29630, 12326, 12327, 12328, 12330, 12332, 30349, 12335, 12336, 12338, 12339, 12340, 12341, 12343, 12344, 12345, 29630, 12347, 29631, 30363, 30365, 12354, 12355, 29632, 30371, 30373, 12364, 12365, 28537, 12369, 29634, 30384, 12377, 12381, 12382, 12383, 12384, 12385, 12388, 12391, 12393, 12397, 12398, 12399, 12403, 12404, 12405, 12409, 12410, 12413, 12417, 12419, 30427, 12422, 12423, 12428, 12429, 12430, 12431, 12433, 30442, 29635, 12442, 12444, 12448, 12450, 12451, 28680, 12460, 12464, 12466, 12467, 29637, 29655, 29673, 29683, 29722, 30381, 29705, 30381, 29722, 30381, 12536, 12537, 30447, 30455, 29735, 29733, 30447, 12555, 30455, 12560, 29735, 29733, 29747, 29745, 30447, 12578, 30455, 12584, 12627, 12630, 30439, 30439, 30436, 30447, 12643, 30455, 12649, 30439, 30439, 30436, 30447, 12663, 30455, 12669, 30439, 30439, 29842, 30447, 12684, 30481, 30455, 12691, 30381, 29943, 30388, 30381, 30388, 29957, 29961, 29965, 30015, 30249, 30047, 30247, 30245, 30055, 30245, 30239, 30245, 30077, 30094, 30100, 30104, 30109, 30114, 30212, 30216, 30247, 30245, 30232, 30230, 30245, 30239, 30245, 30249, 30247, 30245, 30315, 30388, 30386, 30315, 30381, 30388, 30386, 30381, 30388, 30386, 30381, 30388, 30386, 30381, 30388, 30284, 30286, 30284, 30286, 30284, 30286, 30286, 30284, 30381, 30388, 30381, 30388, 30439, 30406, 30417, 30412, 30421, 30406, 30439, 30412, 30417, 30421, 30439, 30406, 30412, 30417, 30421, 30439, 30439, 30436, 30447, 13090, 30455, 13096, 30488, 30484, 30484, 30488, 30484, 30486, 30488, 14, 15, 11580, 11589, 11596, 11598, 11599, 11617, 11633, 11653, 11666, 30547, 30550, 30553, 29774, 29779, 29782, 30562, 30565, 30568, 29803, 30581, 30584, 30602, 30607, 11800, 29268, 11811, 11815, 30615, 30620, 30625, 11829, 29280, 11840, 11844, 30633, 30638, 30644, 11859, 30649, 11870, 30654, 30656, 30659, 30661, 30664, 11887, 11891, 30669, 11895, 11897, 11905, 11907, 11914, 11919, 11924, 29977, 30695, 30698, 29998, 30707, 30710, 11972, 11976, 11986, 30038, 30734, 30046, 12015, 12019, 30754, 30090, 12067, 12072, 30769, 30772, 30122, 30125, 30780, 30784, 12111, 12113, 12115, 30800, 30803, 30806, 30809, 30820, 30828, 30830, 12193, 12197, 12252, 30858, 12266, 12269, 30867, 30874, 12286, 12290, 30879, 12292, 12302, 30890, 30898, 12325, 30901, 30906, 30914, 12346, 30917, 12348, 12357, 12367, 12370, 30936, 30946, 30948, 30950, 12441, 30969, 12452, 12475, 29639, 30756, 30497, 30756, 30498, 30499, 30756, 30502, 30501, 30750, 30500, 12487, 29657, 30756, 30756, 30756, 30504, 30506, 30505, 30509, 30508, 30750, 30507, 12499, 29675, 30528, 30530, 29678, 30513, 12506, 29720, 30530, 29713, 30516, 12512, 29720, 12515, 30521, 30523, 29696, 30524, 12522, 29703, 12524, 30534, 30528, 30530, 29713, 30531, 12531, 29720, 12534, 30534, 12538, 30966, 30967, 12542, 30971, 30536, 30540, 12546, 12547, 30539, 12550, 30966, 30967, 12556, 30971, 30973, 30972, 30536, 30540, 12563, 12564, 30539, 30542, 30540, 12569, 12570, 30545, 12573, 30966, 30967, 12580, 30971, 30973, 30972, 30934, 30934, 30551, 30825, 30552, 30557, 30825, 29786, 29791, 29796, 30571, 30825, 30690, 30819, 30572, 30575, 30576, 30825, 30577, 30578, 30825, 30579, 30825, 30580, 30583, 30586, 30587, 30597, 30600, 30599, 30434, 30960, 30958, 12634, 12635, 12636, 30965, 12639, 30966, 30589, 12645, 30971, 30590, 30589, 30434, 30960, 30958, 12654, 12655, 12656, 30965, 12659, 30966, 30591, 12665, 30971, 30594, 30593, 30434, 30960, 30596, 30595, 12675, 12676, 12677, 30965, 12680, 30966, 30597, 12687, 30971, 30600, 30599, 30604, 30603, 29856, 30609, 29864, 30622, 30621, 29880, 30627, 29888, 12715, 30934, 30641, 30640, 29905, 30646, 29913, 30885, 30851, 30253, 12740, 30676, 12743, 30386, 30885, 30851, 30253, 12750, 30934, 12753, 30386, 12756, 12758, 12760, 30689, 30825, 30690, 30692, 30691, 29981, 29986, 30701, 30825, 30702, 30704, 30703, 30002, 30007, 30713, 30717, 12785, 30019, 30017, 12788, 30723, 30727, 30728, 30825, 30729, 30731, 30730, 30042, 12801, 30738, 30052, 12806, 12807, 12808, 12809, 30745, 12811, 12812, 30756, 30756, 30756, 30747, 30748, 30746, 30752, 30751, 30750, 30749, 12823, 30079, 30756, 30758, 12829, 30096, 12832, 12834, 12836, 12838, 30774, 30773, 30129, 30135, 30787, 30786, 30792, 30825, 30793, 30795, 30794, 30796, 30797, 30825, 30819, 30798, 30825, 30799, 30802, 30805, 30808, 30811, 30812, 30825, 30813, 30814, 30816, 30815, 30819, 30817, 30822, 30823, 30825, 30824, 30826, 30825, 30827, 12889, 12891, 30832, 30221, 30839, 30225, 12898, 12899, 30841, 12901, 12902, 12903, 30843, 30844, 30846, 12907, 30847, 12909, 30845, 30848, 12912, 12913, 12914, 30849, 30869, 30868, 30871, 30885, 30851, 30253, 30869, 30868, 30871, 30885, 30887, 12937, 30853, 12939, 12940, 30887, 12942, 12943, 30932, 12945, 12946, 12947, 30932, 12949, 12950, 30852, 12952, 30932, 30853, 12955, 12956, 30854, 12960, 30856, 12963, 30386, 12966, 12967, 12969, 12970, 12971, 12972, 30861, 30859, 12975, 12976, 30869, 30868, 30871, 30885, 30887, 30315, 30934, 30386, 30869, 30868, 30871, 30885, 30887, 30315, 13004, 30934, 13006, 30892, 30891, 30894, 30332, 30908, 30907, 30910, 30352, 30923, 30925, 30375, 30928, 30930, 30375, 13030, 30934, 13033, 30386, 13036, 13037, 13039, 13041, 30953, 13043, 30940, 30938, 30941, 30397, 13051, 13052, 13054, 13056, 30953, 13058, 30952, 30943, 30401, 13065, 13066, 13068, 13070, 30953, 13072, 30952, 30956, 30954, 30434, 30960, 30958, 13081, 13082, 13083, 30965, 13086, 30966, 30967, 13092, 30971, 30973, 30972, 30985, 30986, 30992, 30994, 31000, 31002, 31003, 31004, 31009, 31011, 31016, 31018, 31023, 31026, 13210, 13215, 13347, 13353, 31108, 31110, 13382, 13387, 13392, 31108, 31110, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31146, 31153, 30650, 31161, 31163, 30665, 31166, 31172, 31173, 31174, 30726, 30755, 31191, 31192, 30875, 31218, 30899, 31224, 30915, 31228, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12488, 12489, 12490, 12491, 12492, 12493, 12494, 12495, 12496, 12497, 12498, 12500, 12501, 31120, 12503, 12504, 12505, 12507, 31121, 12509, 12510, 12511, 12513, 31122, 30534, 12517, 31124, 12519, 12520, 12521, 12523, 12525, 12526, 31125, 12528, 12529, 12530, 12532, 31126, 12535, 12539, 31239, 12541, 12543, 12544, 12545, 31303, 30537, 12549, 12551, 12552, 31130, 31129, 12557, 12558, 12559, 12561, 12562, 31315, 30537, 12566, 12567, 12568, 31320, 30543, 12572, 12574, 12575, 31130, 31129, 12581, 12582, 12583, 31168, 12587, 31168, 12589, 12590, 12591, 12592, 31131, 30555, 12595, 12596, 30558, 30560, 31135, 12600, 31136, 12602, 31137, 12604, 12605, 12606, 12607, 12608, 12609, 31138, 12611, 12612, 12613, 12614, 12615, 12616, 12617, 12618, 12619, 31139, 12621, 31140, 12623, 12624, 30598, 12626, 12628, 12629, 12631, 12632, 12633, 31363, 30963, 12638, 12640, 30588, 12642, 12646, 12647, 12648, 12651, 12652, 12653, 31377, 30963, 12658, 12660, 30592, 12662, 12666, 12667, 12668, 12671, 12672, 12673, 12674, 31392, 30963, 12679, 12681, 30598, 12683, 12688, 12689, 12690, 12693, 12694, 31141, 12696, 12697, 31143, 31142, 12700, 29868, 31144, 12704, 12705, 31148, 12707, 12708, 31150, 31149, 12711, 29892, 31151, 30932, 12717, 12718, 12719, 31155, 12721, 12722, 31157, 31156, 12725, 29919, 29925, 29935, 12735, 31220, 12737, 12738, 31168, 30674, 12742, 12744, 12745, 31220, 12747, 12748, 31170, 30932, 12752, 12754, 12761, 12762, 12763, 12764, 12765, 30693, 31176, 12768, 31177, 12770, 12771, 12772, 12773, 12774, 12775, 30705, 31179, 12778, 31180, 12780, 12781, 30715, 12783, 30719, 12786, 12787, 12789, 12791, 12792, 12793, 12794, 12795, 12796, 30732, 31185, 12799, 30044, 12802, 30740, 12804, 30743, 31468, 31470, 12810, 31473, 12813, 12814, 12815, 12816, 12817, 12818, 12819, 12820, 12821, 12822, 12824, 31190, 12826, 12828, 12830, 30767, 30770, 12839, 12840, 30123, 30120, 30778, 12844, 30782, 12846, 12847, 12848, 31201, 31200, 31199, 12852, 12853, 12854, 12855, 12856, 12857, 12858, 12859, 12860, 12861, 12862, 12863, 31202, 12865, 31203, 12867, 31204, 12869, 31205, 12871, 12872, 12873, 12874, 12875, 12876, 12877, 12878, 12879, 31206, 12881, 12882, 12883, 12884, 12885, 12886, 12887, 31207, 31208, 12892, 30834, 12894, 30837, 12896, 12897, 12900, 31541, 12904, 12905, 12906, 12908, 12910, 12911, 31552, 12915, 12916, 12917, 31215, 12919, 30287, 12923, 31220, 12925, 12926, 12927, 12928, 31215, 12930, 30287, 12934, 31220, 12936, 12938, 31569, 12941, 12944, 31575, 12948, 31579, 12951, 12953, 12954, 31585, 30287, 12959, 30932, 12962, 12964, 30857, 31592, 31237, 31594, 31596, 12973, 12974, 31600, 12977, 12978, 31215, 12980, 30287, 12984, 31220, 12986, 12987, 31213, 30863, 12990, 12991, 12992, 12993, 31215, 12995, 30305, 12999, 31220, 13001, 13002, 31221, 13005, 13007, 13008, 31222, 13010, 13012, 13014, 13015, 31226, 13017, 13019, 13021, 31230, 13023, 13024, 13025, 31231, 13027, 13028, 31232, 30932, 13032, 13034, 30944, 31637, 31234, 31236, 13042, 13044, 13045, 13046, 13047, 30944, 31647, 31236, 31237, 13057, 13059, 13060, 13061, 30944, 31656, 31236, 31237, 13071, 13073, 13074, 13075, 13078, 13079, 13080, 31668, 30963, 13085, 13087, 31239, 13089, 13093, 13094, 13095, 13116, 13117, 13123, 13125, 13131, 13133, 13176, 13178, 13182, 13184, 13188, 13190, 13194, 13196, 13357, 13359, 13396, 13398, 8, 9, 10, 11, 12, 13, 14, 15, 31712, 31713, 31718, 31727, 31729, 31731, 31734, 31736, 31738, 31740, 31742, 31745, 31747, 31749, 31751, 31753, 12502, 31758, 31760, 12508, 31763, 31765, 12514, 12516, 12518, 31771, 31773, 31774, 12527, 31778, 31780, 12533, 31782, 31783, 12540, 31786, 31788, 12548, 31792, 12553, 12554, 31796, 31798, 31800, 12565, 31805, 12571, 31809, 12576, 12577, 31813, 31815, 12586, 12588, 31821, 12593, 12594, 31826, 12597, 12598, 12599, 12601, 12603, 31836, 31839, 12610, 31843, 31846, 31848, 12620, 12622, 12625, 31858, 31860, 31364, 12637, 31865, 12641, 31868, 31870, 31872, 31378, 12657, 31877, 12661, 31880, 31882, 31884, 31886, 31393, 12678, 31890, 12682, 31893, 31895, 12695, 31897, 12698, 12699, 12701, 12702, 12706, 31907, 12709, 12710, 12712, 12713, 12716, 12720, 31919, 12723, 12724, 12726, 31714, 31715, 12729, 31717, 31716, 12732, 31717, 12736, 31932, 12739, 12741, 31936, 12746, 31940, 12749, 12751, 31944, 30680, 30683, 30686, 31946, 31949, 12766, 12767, 12769, 31956, 31959, 12776, 12777, 12779, 12782, 12784, 31970, 30724, 31974, 31977, 12797, 12798, 12800, 12803, 12805, 31991, 31993, 31995, 31997, 31999, 12825, 31723, 30761, 30764, 12835, 12837, 32008, 12841, 12842, 12843, 12845, 32016, 12849, 12850, 12851, 32021, 32024, 32027, 32030, 12864, 12866, 12868, 12870, 32041, 32045, 32047, 12880, 32051, 32054, 12888, 12890, 12893, 12895, 31537, 31542, 31546, 31548, 31553, 12918, 32075, 31726, 12921, 12924, 32082, 12929, 32084, 31726, 12932, 12935, 31566, 31571, 32094, 32096, 32099, 31726, 12958, 12961, 32106, 12965, 12968, 32113, 12979, 32116, 31726, 12982, 12985, 32123, 12988, 12989, 12994, 32129, 12996, 31726, 13000, 32136, 13003, 13009, 32140, 31728, 13016, 32145, 31730, 13022, 32152, 13026, 32156, 13029, 13031, 32160, 13035, 13038, 13040, 31641, 32167, 32169, 13050, 13053, 13055, 31651, 32177, 13064, 13067, 13069, 31660, 32185, 32187, 31669, 13084, 32192, 13088, 32195, 32197, 31241, 31253, 31265, 31484, 31488, 31971, 31452, 31455, 31455, 31484, 31488, 31986, 31986, 31989, 31987, 31986, 31484, 31488, 32091, 32100, 32100, 32100, 32126, 32138, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32231, 32234, 32236, 32239, 32240, 32247, 32248, 32252, 31785, 32263, 32272, 31856, 31861, 31867, 31873, 31879, 32312, 31892, 32319, 32321, 32324, 32224, 32325, 32327, 32330, 32225, 32331, 32332, 32334, 12727, 12728, 12730, 12731, 12733, 32226, 32344, 32347, 32349, 32352, 12755, 12757, 12759, 12790, 32379, 32382, 12827, 32002, 12831, 12833, 32391, 32396, 31538, 32419, 32421, 12920, 32227, 32425, 32427, 12931, 32227, 32431, 12957, 32439, 32444, 12981, 32227, 32448, 32452, 12997, 32227, 32456, 32459, 13011, 32228, 32462, 13018, 32229, 32465, 32467, 32470, 32476, 32175, 32183, 32188, 32194, 13098, 13100, 13102, 31759, 31764, 32255, 31772, 32255, 31779, 32259, 32260, 32261, 32265, 32267, 32268, 32269, 32270, 32274, 31817, 31819, 32411, 32278, 32413, 32279, 32411, 32371, 32400, 32281, 32286, 32285, 32284, 32358, 32411, 32287, 32289, 32411, 32292, 32291, 32290, 32294, 32293, 31854, 32302, 32299, 32302, 32306, 32309, 32314, 32317, 31917, 32483, 32485, 32484, 32472, 32474, 32473, 31935, 31943, 13228, 13230, 32388, 32387, 32393, 32392, 32358, 32411, 32357, 32361, 32360, 32363, 32411, 32362, 32366, 32365, 32411, 32410, 32413, 32412, 31984, 31967, 31965, 13257, 13258, 13259, 13260, 32369, 13263, 13265, 32388, 32387, 32393, 32392, 32372, 32411, 32371, 32374, 32413, 32412, 31984, 31982, 13281, 13282, 13283, 13284, 13285, 13287, 13289, 32388, 32387, 32393, 32392, 32411, 32398, 32399, 32400, 32401, 32405, 32404, 32403, 32402, 32407, 32411, 32406, 32409, 32411, 32410, 32413, 32412, 32060, 32058, 32417, 32420, 32420, 32420, 32159, 13331, 32098, 13334, 32098, 13336, 32098, 13338, 32098, 32105, 32441, 32442, 32474, 32478, 32483, 32485, 32484, 32443, 32490, 32493, 13363, 32450, 13368, 32458, 32159, 32472, 32474, 32473, 32478, 32480, 32479, 32483, 32485, 32484, 32490, 32493, 15, 32232, 32237, 32264, 32273, 32322, 12703, 32328, 12714, 32335, 32557, 32559, 32561, 12734, 32380, 32573, 32397, 32070, 32424, 12922, 32430, 12933, 32438, 32447, 12983, 32596, 12998, 32143, 13013, 32148, 13020, 32532, 32243, 13105, 32535, 32246, 13108, 32534, 13110, 13111, 32535, 13113, 13114, 32262, 32257, 13119, 13120, 13121, 13124, 13126, 13127, 13128, 13129, 13132, 32546, 32550, 32563, 13141, 32276, 13143, 32277, 13145, 13146, 13147, 13148, 13149, 13150, 13151, 13152, 13153, 13154, 13155, 13156, 13157, 13158, 13159, 13160, 13161, 13162, 13163, 13164, 13165, 32546, 32550, 32563, 32346, 13174, 32300, 13177, 32540, 13180, 32300, 13183, 32542, 13186, 32307, 13189, 32544, 13192, 32315, 13195, 32546, 32550, 32563, 13204, 32346, 13206, 13207, 13208, 32610, 13211, 13212, 13213, 32608, 32555, 32563, 13222, 32346, 32565, 13225, 32351, 32567, 32568, 13233, 13234, 32569, 32389, 13237, 13238, 13240, 13241, 13242, 13243, 13244, 13245, 13246, 13247, 13248, 13249, 13250, 13251, 13252, 13253, 13254, 13255, 13256, 13261, 32695, 32697, 32570, 13267, 13268, 32389, 13270, 13271, 13273, 13274, 13275, 13276, 13277, 13278, 13279, 13280, 32714, 32716, 13290, 13291, 32576, 32575, 32389, 13295, 13296, 13298, 13299, 13300, 13301, 13302, 13303, 13304, 13305, 13306, 13307, 13308, 13309, 13310, 13311, 13312, 13313, 13314, 13315, 13316, 13318, 13319, 13320, 32579, 13322, 32581, 32584, 13326, 32469, 32585, 32588, 13332, 32598, 13335, 13337, 13339, 13341, 32103, 13343, 13344, 13345, 32608, 13348, 13349, 13350, 13351, 32610, 13354, 13355, 32491, 13358, 32591, 32594, 13364, 32595, 32598, 13369, 32599, 32602, 32606, 32605, 13376, 32469, 13378, 13379, 13380, 32608, 13383, 13384, 13385, 32609, 13388, 13389, 13390, 32610, 32611, 13394, 32491, 13397, 9, 10, 11, 12, 13, 14, 15, 32784, 32785, 13103, 13104, 13106, 13107, 13109, 32822, 13112, 32825, 13115, 13118, 32262, 32271, 32788, 13135, 32548, 32790, 13138, 32552, 13140, 13142, 13144, 32845, 32847, 32849, 32851, 32853, 32856, 32860, 32862, 32864, 32788, 13167, 32548, 32790, 13170, 32552, 13172, 13173, 13175, 13179, 13181, 13185, 13187, 13191, 13193, 32788, 13198, 32548, 32790, 13201, 32552, 13203, 13205, 32891, 13209, 32895, 13214, 32792, 13217, 32795, 32794, 32793, 13221, 13223, 13224, 13226, 32797, 32798, 13231, 13232, 13235, 32907, 13236, 32911, 32394, 32913, 32916, 32918, 32921, 32923, 32925, 32927, 32931, 32797, 32798, 13266, 13269, 32937, 32394, 32939, 32943, 32945, 32947, 32797, 32798, 13292, 13293, 32949, 13294, 32954, 32394, 32956, 32958, 32961, 32963, 32965, 32969, 32971, 32973, 32800, 13321, 32976, 13323, 32801, 13325, 13327, 13328, 32803, 13330, 32985, 13333, 32987, 32988, 32989, 32805, 13342, 32994, 13346, 32999, 13352, 13356, 13360, 32806, 13362, 33007, 13365, 32808, 13367, 33010, 13370, 32810, 13372, 32812, 13374, 13375, 13377, 33019, 13381, 33023, 13386, 33027, 13391, 13393, 13395, 32831, 32828, 32834, 32829, 32831, 32834, 32832, 32836, 33032, 32871, 32875, 32879, 32883, 33001, 33004, 33001, 33004, 33032, 14, 15, 13099, 13101, 32816, 32819, 13122, 13130, 13134, 13136, 13137, 13139, 33061, 33062, 33066, 32854, 32857, 33070, 13166, 13168, 13169, 13171, 32869, 13197, 13199, 13200, 13202, 33094, 13216, 13218, 13219, 13220, 33105, 33107, 13227, 13229, 33111, 13239, 32914, 32919, 32928, 32929, 13262, 13264, 32933, 13272, 32940, 32717, 13286, 13288, 33138, 13297, 33144, 33146, 32966, 13317, 32978, 13324, 33157, 13329, 13340, 33167, 13361, 13366, 13371, 13373, 33186, 33187, 33121, 33148, 33048, 33046, 32992, 33169, 13423, 33050, 13425, 33051, 32997, 33171, 13429, 13430, 13431, 13433, 13434, 13435, 33017, 33189, 33025, 33193, 33194, 13446, 33195, 33063, 33148, 13467, 33080, 33081, 13470, 33082, 33083, 13473, 33084, 33085, 13476, 33086, 32889, 33096, 32893, 33098, 32992, 33169, 32997, 33171, 13492, 13493, 33172, 33121, 33148, 33148, 33160, 33162, 33179, 33179, 32992, 33169, 32996, 32997, 33171, 13532, 13533, 33172, 33175, 33179, 33017, 33189, 33021, 33191, 33025, 33193, 33194, 13549, 33195, 13, 14, 15, 33055, 33058, 33073, 33076, 33088, 33091, 33100, 33244, 33113, 32934, 33264, 32959, 32974, 33249, 33248, 33115, 33228, 13404, 33254, 33263, 33217, 33216, 33141, 13411, 33231, 33230, 13417, 13418, 33044, 33042, 13421, 13422, 13424, 13426, 13427, 13428, 33295, 33220, 33298, 33221, 33060, 33106, 13441, 13442, 13443, 13444, 13445, 13447, 33249, 33248, 33115, 33228, 13453, 33254, 33263, 33262, 33141, 13459, 33231, 33230, 33078, 13468, 13469, 13471, 13472, 13474, 13475, 13477, 33093, 13481, 13482, 13483, 13484, 33104, 33106, 13488, 13489, 13490, 13491, 13494, 33249, 33248, 33115, 13499, 33253, 33252, 33254, 33257, 33256, 33129, 13507, 33260, 33133, 33263, 33262, 33141, 13514, 33268, 33154, 33156, 33158, 13521, 13522, 13523, 13524, 33177, 33179, 13527, 13528, 13529, 13530, 13531, 13534, 33173, 13536, 33177, 13538, 33183, 33181, 33280, 13542, 13543, 13544, 13545, 13546, 13547, 13548, 13550, 33245, 33112, 33153, 13399, 13400, 13401, 13403, 13405, 13406, 13407, 13408, 13409, 33370, 33371, 13413, 13414, 13419, 13420, 33387, 33392, 33393, 13432, 13436, 33361, 33360, 13439, 13440, 33407, 13448, 13449, 13450, 13452, 13454, 13455, 13456, 13457, 33370, 33371, 13461, 13462, 33363, 33362, 13466, 33421, 33423, 33425, 33427, 33365, 33364, 13480, 13486, 13487, 33439, 13495, 13496, 13497, 13500, 13501, 13502, 13503, 13504, 13505, 33369, 13508, 13509, 13510, 13511, 13512, 33370, 13515, 33371, 13518, 13519, 13520, 13525, 13526, 33472, 13535, 13537, 13539, 13540, 13541, 33487, 33469, 33390, 33394, 33404, 33482, 33402, 33482, 33431, 33429, 33437, 33469, 33435, 33470, 33469, 33467, 33484, 33482, 33480, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33270, 33492, 33489, 33377, 33497, 13410, 13412, 33503, 33366, 33505, 33509, 33510, 13437, 13438, 33517, 33489, 33412, 33522, 13458, 13460, 33527, 13464, 13465, 13478, 13479, 33366, 33542, 33489, 33544, 33548, 13506, 33551, 33554, 13513, 13516, 33557, 33568, 33566, 13558, 13559, 33396, 33396, 13563, 33566, 33406, 13567, 13568, 13569, 33566, 33426, 33424, 33422, 33422, 33566, 13582, 13583, 13584, 33566, 33328, 13588, 13589, 13590, 33562, 33561, 33559, 33562, 33562, 33562, 33343, 13604, 13605, 13606, 33566, 33565, 33486, 13611, 13612, 13613, 14, 15, 13402, 33498, 33605, 33606, 33150, 13416, 33609, 33613, 13451, 33618, 33619, 33150, 33622, 33624, 13485, 13498, 33545, 33630, 33633, 33634, 33150, 33603, 13555, 33396, 13560, 13561, 33398, 13564, 13566, 33647, 33616, 13574, 13576, 13577, 13578, 13579, 13580, 33655, 13585, 13587, 33661, 33631, 13597, 13598, 13599, 13600, 13601, 13602, 13603, 33671, 33636, 13608, 13609, 13610, 33677, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33680, 33607, 13415, 33688, 33620, 13463, 33695, 13517, 13552, 33681, 33685, 13557, 13562, 33704, 33687, 33645, 13571, 33617, 33692, 33713, 33715, 33693, 33656, 33694, 33659, 33696, 33629, 13594, 33632, 33723, 33725, 33727, 33669, 13607, 33675, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33601, 13553, 33745, 13556, 33638, 33756, 13565, 33709, 33614, 13572, 33748, 13575, 33764, 13581, 13586, 33720, 33626, 13592, 13593, 13595, 33699, 33774, 33729, 33731, 33734, 9, 10, 11, 12, 13, 14, 15, 13551, 13554, 33795, 33757, 33642, 33798, 13570, 13573, 33803, 33805, 33806, 13591, 13596, 33775, 33732, 33810, 33810, 33810, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33828, 33793, 13615, 33824, 33829, 33801, 13620, 33830, 33834, 33833, 33832, 33811, 13626, 33835, 33838, 33837, 13614, 13616, 13617, 33826, 13619, 13621, 13622, 13623, 13624, 13625, 13627, 13628, 13629, 13, 14, 15, 33858, 13618, 33862, 33879, 33868, 33884, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33873, 33889, 33877, 33880, 33882, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33908, 33906, 33904, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13630, 13631, 13632, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33937, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33938, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 16
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 13648
#define SIZE_OF_AC 20336
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[2124*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
R[i + 52*t] = A[i + 52*t];
R[i + 53*t] = A[i + 53*t];
R[i + 54*t] = A[i + 54*t];
R[i + 55*t] = A[i + 55*t];
R[i + 56*t] = A[i + 56*t];
R[i + 57*t] = A[i + 57*t];
R[i + 58*t] = A[i + 58*t];
R[i + 59*t] = A[i + 59*t];
R[i + 60*t] = A[i + 60*t];
R[i + 61*t] = A[i + 61*t];
R[i + 62*t] = A[i + 62*t];
R[i + 63*t] = A[i + 63*t];
R[i + 64*t] = A[i + 64*t];
R[i + 65*t] = A[i + 65*t];
R[i + 66*t] = A[i + 66*t];
R[i + 67*t] = A[i + 67*t];
R[i + 68*t] = A[i + 68*t];
R[i + 69*t] = A[i + 69*t];
R[i + 70*t] = A[i + 70*t];
R[i + 71*t] = A[i + 71*t];
R[i + 72*t] = A[i + 72*t];
R[i + 73*t] = A[i + 73*t];
R[i + 74*t] = A[i + 74*t];
R[i + 75*t] = A[i + 75*t];
R[i + 76*t] = A[i + 76*t];
R[i + 77*t] = A[i + 77*t];
R[i + 78*t] = A[i + 78*t];
R[i + 79*t] = A[i + 79*t];
R[i + 80*t] = A[i + 80*t];
R[i + 81*t] = A[i + 81*t];
R[i + 82*t] = A[i + 82*t];
R[i + 83*t] = A[i + 83*t];
R[i + 84*t] = A[i + 84*t];
R[i + 85*t] = A[i + 85*t];
R[i + 86*t] = A[i + 86*t];
R[i + 87*t] = A[i + 87*t];
R[i + 88*t] = A[i + 88*t];
R[i + 89*t] = A[i + 89*t];
R[i + 90*t] = A[i + 90*t];
R[i + 91*t] = A[i + 91*t];
R[i + 92*t] = A[i + 92*t];
R[i + 93*t] = A[i + 93*t];
R[i + 94*t] = A[i + 94*t];
R[i + 95*t] = A[i + 95*t];
R[i + 96*t] = A[i + 96*t];
R[i + 97*t] = A[i + 97*t];
R[i + 98*t] = A[i + 98*t];
R[i + 99*t] = A[i + 99*t];
R[i + 100*t] = A[i + 100*t];
R[i + 101*t] = A[i + 101*t];
R[i + 102*t] = A[i + 102*t];
R[i + 103*t] = A[i + 103*t];
R[i + 104*t] = A[i + 104*t];
R[i + 105*t] = A[i + 105*t];
R[i + 106*t] = A[i + 106*t];
R[i + 107*t] = A[i + 107*t];
R[i + 108*t] = A[i + 108*t];
R[i + 109*t] = A[i + 109*t];
R[i + 110*t] = A[i + 110*t];
R[i + 111*t] = A[i + 111*t];
R[i + 112*t] = A[i + 112*t];
R[i + 113*t] = A[i + 113*t];
R[i + 114*t] = A[i + 114*t];
R[i + 115*t] = A[i + 115*t];
R[i + 116*t] = A[i + 116*t];
R[i + 117*t] = A[i + 117*t];
R[i + 118*t] = A[i + 118*t];
R[i + 119*t] = A[i + 119*t];
R[i + 120*t] = A[i + 120*t];
R[i + 121*t] = A[i + 121*t];
R[i + 122*t] = A[i + 122*t];
R[i + 123*t] = A[i + 123*t];
R[i + 124*t] = A[i + 124*t];
R[i + 125*t] = A[i + 125*t];
R[i + 126*t] = A[i + 126*t];
R[i + 127*t] = A[i + 127*t];
R[i + 128*t] = A[i + 128*t];
R[i + 129*t] = A[i + 129*t];
R[i + 130*t] = A[i + 130*t];
R[i + 131*t] = A[i + 131*t];
R[i + 132*t] = A[i + 132*t];
R[i + 133*t] = A[i + 133*t];
R[i + 134*t] = A[i + 134*t];
R[i + 135*t] = A[i + 135*t];
R[i + 136*t] = A[i + 136*t];
R[i + 137*t] = A[i + 137*t];
R[i + 138*t] = A[i + 138*t];
R[i + 139*t] = A[i + 139*t];
R[i + 140*t] = A[i + 140*t];
R[i + 141*t] = A[i + 141*t];
R[i + 142*t] = A[i + 142*t];
R[i + 143*t] = A[i + 143*t];
R[i + 144*t] = A[i + 144*t];
R[i + 145*t] = A[i + 145*t];
R[i + 146*t] = A[i + 146*t];
R[i + 147*t] = A[i + 147*t];
R[i + 148*t] = A[i + 148*t];
R[i + 149*t] = A[i + 149*t];
R[i + 150*t] = A[i + 150*t];
R[i + 151*t] = A[i + 151*t];
R[i + 152*t] = A[i + 152*t];
R[i + 153*t] = A[i + 153*t];
R[i + 154*t] = A[i + 154*t];
R[i + 155*t] = A[i + 155*t];
R[i + 156*t] = A[i + 156*t];
R[i + 157*t] = A[i + 157*t];
R[i + 158*t] = A[i + 158*t];
R[i + 159*t] = A[i + 159*t];
R[i + 160*t] = A[i + 160*t];
R[i + 161*t] = A[i + 161*t];
R[i + 162*t] = A[i + 162*t];
R[i + 163*t] = A[i + 163*t];
R[i + 164*t] = A[i + 164*t];
R[i + 165*t] = A[i + 165*t];
R[i + 166*t] = A[i + 166*t];
R[i + 167*t] = A[i + 167*t];
R[i + 168*t] = A[i + 168*t];
R[i + 169*t] = A[i + 169*t];
R[i + 170*t] = A[i + 170*t];
R[i + 171*t] = A[i + 171*t];
R[i + 172*t] = A[i + 172*t];
R[i + 173*t] = A[i + 173*t];
R[i + 174*t] = A[i + 174*t];
R[i + 175*t] = A[i + 175*t];
R[i + 176*t] = A[i + 176*t];
R[i + 177*t] = A[i + 177*t];
R[i + 178*t] = A[i + 178*t];
R[i + 179*t] = A[i + 179*t];
R[i + 180*t] = A[i + 180*t];
R[i + 181*t] = A[i + 181*t];
R[i + 182*t] = A[i + 182*t];
R[i + 183*t] = A[i + 183*t];
R[i + 184*t] = A[i + 184*t];
R[i + 185*t] = A[i + 185*t];
R[i + 186*t] = A[i + 186*t];
R[i + 187*t] = A[i + 187*t];
R[i + 188*t] = A[i + 188*t];
R[i + 189*t] = A[i + 189*t];
R[i + 190*t] = A[i + 190*t];
R[i + 191*t] = A[i + 191*t];
R[i + 192*t] = A[i + 192*t];
R[i + 193*t] = A[i + 193*t];
R[i + 194*t] = A[i + 194*t];
R[i + 195*t] = A[i + 195*t];
R[i + 196*t] = A[i + 196*t];
R[i + 197*t] = A[i + 197*t];
R[i + 198*t] = A[i + 198*t];
R[i + 199*t] = A[i + 199*t];
R[i + 200*t] = A[i + 200*t];
R[i + 201*t] = A[i + 201*t];
R[i + 202*t] = A[i + 202*t];
R[i + 203*t] = A[i + 203*t];
R[i + 204*t] = A[i + 204*t];
R[i + 205*t] = A[i + 205*t];
R[i + 206*t] = A[i + 206*t];
R[i + 207*t] = A[i + 207*t];
R[i + 208*t] = A[i + 208*t];
R[i + 209*t] = A[i + 209*t];
R[i + 210*t] = A[i + 210*t];
R[i + 211*t] = A[i + 211*t];
R[i + 212*t] = A[i + 212*t];
R[i + 213*t] = A[i + 213*t];
R[i + 214*t] = A[i + 214*t];
R[i + 215*t] = A[i + 215*t];
R[i + 216*t] = A[i + 216*t];
R[i + 217*t] = A[i + 217*t];
R[i + 218*t] = A[i + 218*t];
R[i + 219*t] = A[i + 219*t];
R[i + 220*t] = A[i + 220*t];
R[i + 221*t] = A[i + 221*t];
R[i + 222*t] = A[i + 222*t];
R[i + 223*t] = A[i + 223*t];
R[i + 224*t] = A[i + 224*t];
R[i + 225*t] = A[i + 225*t];
R[i + 226*t] = A[i + 226*t];
R[i + 227*t] = A[i + 227*t];
R[i + 228*t] = A[i + 228*t];
R[i + 229*t] = A[i + 229*t];
R[i + 230*t] = A[i + 230*t];
R[i + 231*t] = A[i + 231*t];
R[i + 232*t] = A[i + 232*t];
R[i + 233*t] = A[i + 233*t];
R[i + 234*t] = A[i + 234*t];
R[i + 235*t] = A[i + 235*t];
R[i + 236*t] = A[i + 236*t];
R[i + 237*t] = A[i + 237*t];
R[i + 238*t] = A[i + 238*t];
R[i + 239*t] = A[i + 239*t];
R[i + 240*t] = A[i + 240*t];
R[i + 241*t] = A[i + 241*t];
R[i + 242*t] = A[i + 242*t];
R[i + 243*t] = A[i + 243*t];
R[i + 244*t] = A[i + 244*t];
R[i + 245*t] = A[i + 245*t];
R[i + 246*t] = A[i + 246*t];
R[i + 247*t] = A[i + 247*t];
R[i + 248*t] = A[i + 248*t];
R[i + 249*t] = A[i + 249*t];
R[i + 250*t] = A[i + 250*t];
R[i + 251*t] = A[i + 251*t];
R[i + 252*t] = A[i + 252*t];
R[i + 253*t] = A[i + 253*t];
R[i + 254*t] = A[i + 254*t];
R[i + 255*t] = A[i + 255*t];
R[i + 256*t] = A[i + 256*t];
R[i + 257*t] = A[i + 257*t];
R[i + 258*t] = A[i + 258*t];
R[i + 259*t] = A[i + 259*t];
R[i + 260*t] = A[i + 260*t];
R[i + 261*t] = A[i + 261*t];
R[i + 262*t] = A[i + 262*t];
R[i + 263*t] = A[i + 263*t];
R[i + 264*t] = A[i + 264*t];
R[i + 265*t] = A[i + 265*t];
R[i + 266*t] = A[i + 266*t];
R[i + 267*t] = A[i + 267*t];
R[i + 268*t] = A[i + 268*t];
R[i + 269*t] = A[i + 269*t];
R[i + 270*t] = A[i + 270*t];
R[i + 271*t] = A[i + 271*t];
R[i + 272*t] = A[i + 272*t];
R[i + 273*t] = A[i + 273*t];
R[i + 274*t] = A[i + 274*t];
R[i + 275*t] = A[i + 275*t];
R[i + 276*t] = A[i + 276*t];
R[i + 277*t] = A[i + 277*t];
R[i + 278*t] = A[i + 278*t];
R[i + 279*t] = A[i + 279*t];
R[i + 280*t] = A[i + 280*t];
R[i + 281*t] = A[i + 281*t];
R[i + 282*t] = A[i + 282*t];
R[i + 283*t] = A[i + 283*t];
R[i + 284*t] = A[i + 284*t];
R[i + 285*t] = A[i + 285*t];
R[i + 286*t] = A[i + 286*t];
R[i + 287*t] = A[i + 287*t];
R[i + 288*t] = A[i + 288*t];
R[i + 289*t] = A[i + 289*t];
R[i + 290*t] = A[i + 290*t];
R[i + 291*t] = A[i + 291*t];
R[i + 292*t] = A[i + 292*t];
R[i + 293*t] = A[i + 293*t];
R[i + 294*t] = A[i + 294*t];
R[i + 295*t] = A[i + 295*t];
R[i + 296*t] = A[i + 296*t];
R[i + 297*t] = A[i + 297*t];
R[i + 298*t] = A[i + 298*t];
R[i + 299*t] = A[i + 299*t];
R[i + 300*t] = A[i + 300*t];
R[i + 301*t] = A[i + 301*t];
R[i + 302*t] = A[i + 302*t];
R[i + 303*t] = A[i + 303*t];
R[i + 304*t] = A[i + 304*t];
R[i + 305*t] = A[i + 305*t];
R[i + 306*t] = A[i + 306*t];
R[i + 307*t] = A[i + 307*t];
R[i + 308*t] = A[i + 308*t];
R[i + 309*t] = A[i + 309*t];
R[i + 310*t] = A[i + 310*t];
R[i + 311*t] = A[i + 311*t];
R[i + 312*t] = A[i + 312*t];
R[i + 313*t] = A[i + 313*t];
R[i + 314*t] = A[i + 314*t];
R[i + 315*t] = A[i + 315*t];
R[i + 316*t] = A[i + 316*t];
R[i + 317*t] = A[i + 317*t];
R[i + 318*t] = A[i + 318*t];
R[i + 319*t] = A[i + 319*t];
R[i + 320*t] = A[i + 320*t];
R[i + 321*t] = A[i + 321*t];
R[i + 322*t] = A[i + 322*t];
R[i + 323*t] = A[i + 323*t];
R[i + 324*t] = A[i + 324*t];
R[i + 325*t] = A[i + 325*t];
R[i + 326*t] = A[i + 326*t];
R[i + 327*t] = A[i + 327*t];
R[i + 328*t] = A[i + 328*t];
R[i + 329*t] = A[i + 329*t];
R[i + 330*t] = A[i + 330*t];
R[i + 331*t] = A[i + 331*t];
R[i + 332*t] = A[i + 332*t];
R[i + 333*t] = A[i + 333*t];
R[i + 334*t] = A[i + 334*t];
R[i + 335*t] = A[i + 335*t];
R[i + 336*t] = A[i + 336*t];
R[i + 337*t] = A[i + 337*t];
R[i + 338*t] = A[i + 338*t];
R[i + 339*t] = A[i + 339*t];
R[i + 340*t] = A[i + 340*t];
R[i + 341*t] = A[i + 341*t];
R[i + 342*t] = A[i + 342*t];
R[i + 343*t] = A[i + 343*t];
R[i + 344*t] = A[i + 344*t];
R[i + 345*t] = A[i + 345*t];
R[i + 346*t] = A[i + 346*t];
R[i + 347*t] = A[i + 347*t];
R[i + 348*t] = A[i + 348*t];
R[i + 349*t] = A[i + 349*t];
R[i + 350*t] = A[i + 350*t];
R[i + 351*t] = A[i + 351*t];
R[i + 352*t] = A[i + 352*t];
R[i + 353*t] = A[i + 353*t];
R[i + 354*t] = A[i + 354*t];
R[i + 355*t] = A[i + 355*t];
R[i + 356*t] = A[i + 356*t];
R[i + 357*t] = A[i + 357*t];
R[i + 358*t] = A[i + 358*t];
R[i + 359*t] = A[i + 359*t];
R[i + 360*t] = A[i + 360*t];
R[i + 361*t] = A[i + 361*t];
R[i + 362*t] = A[i + 362*t];
R[i + 363*t] = A[i + 363*t];
R[i + 364*t] = A[i + 364*t];
R[i + 365*t] = A[i + 365*t];
R[i + 366*t] = A[i + 366*t];
R[i + 367*t] = A[i + 367*t];
R[i + 368*t] = A[i + 368*t];
R[i + 369*t] = A[i + 369*t];
R[i + 370*t] = A[i + 370*t];
R[i + 371*t] = A[i + 371*t];
R[i + 372*t] = A[i + 372*t];
R[i + 373*t] = A[i + 373*t];
R[i + 374*t] = A[i + 374*t];
R[i + 375*t] = A[i + 375*t];
R[i + 376*t] = A[i + 376*t];
R[i + 377*t] = A[i + 377*t];
R[i + 378*t] = A[i + 378*t];
R[i + 379*t] = A[i + 379*t];
R[i + 380*t] = A[i + 380*t];
R[i + 381*t] = A[i + 381*t];
R[i + 382*t] = A[i + 382*t];
R[i + 383*t] = A[i + 383*t];
R[i + 384*t] = A[i + 384*t];
R[i + 385*t] = A[i + 385*t];
R[i + 386*t] = A[i + 386*t];
R[i + 387*t] = A[i + 387*t];
R[i + 388*t] = A[i + 388*t];
R[i + 389*t] = A[i + 389*t];
R[i + 390*t] = A[i + 390*t];
R[i + 391*t] = A[i + 391*t];
R[i + 392*t] = A[i + 392*t];
R[i + 393*t] = A[i + 393*t];
R[i + 394*t] = A[i + 394*t];
R[i + 395*t] = A[i + 395*t];
R[i + 396*t] = A[i + 396*t];
R[i + 397*t] = A[i + 397*t];
R[i + 398*t] = A[i + 398*t];
R[i + 399*t] = A[i + 399*t];
R[i + 400*t] = A[i + 400*t];
R[i + 401*t] = A[i + 401*t];
R[i + 402*t] = A[i + 402*t];
R[i + 403*t] = A[i + 403*t];
R[i + 404*t] = A[i + 404*t];
R[i + 405*t] = A[i + 405*t];
R[i + 406*t] = A[i + 406*t];
R[i + 407*t] = A[i + 407*t];
R[i + 408*t] = A[i + 408*t];
R[i + 409*t] = A[i + 409*t];
R[i + 410*t] = A[i + 410*t];
R[i + 411*t] = A[i + 411*t];
R[i + 412*t] = A[i + 412*t];
R[i + 413*t] = A[i + 413*t];
R[i + 414*t] = A[i + 414*t];
R[i + 415*t] = A[i + 415*t];
R[i + 416*t] = A[i + 416*t];
R[i + 417*t] = A[i + 417*t];
R[i + 418*t] = A[i + 418*t];
R[i + 419*t] = A[i + 419*t];
R[i + 420*t] = A[i + 420*t];
R[i + 421*t] = A[i + 421*t];
R[i + 422*t] = A[i + 422*t];
R[i + 423*t] = A[i + 423*t];
R[i + 424*t] = A[i + 424*t];
R[i + 425*t] = A[i + 425*t];
R[i + 426*t] = A[i + 426*t];
R[i + 427*t] = A[i + 427*t];
R[i + 428*t] = A[i + 428*t];
R[i + 429*t] = A[i + 429*t];
R[i + 430*t] = A[i + 430*t];
R[i + 431*t] = A[i + 431*t];
R[i + 432*t] = A[i + 432*t];
R[i + 433*t] = A[i + 433*t];
R[i + 434*t] = A[i + 434*t];
R[i + 435*t] = A[i + 435*t];
R[i + 436*t] = A[i + 436*t];
R[i + 437*t] = A[i + 437*t];
R[i + 438*t] = A[i + 438*t];
R[i + 439*t] = A[i + 439*t];
R[i + 440*t] = A[i + 440*t];
R[i + 441*t] = A[i + 441*t];
R[i + 442*t] = A[i + 442*t];
R[i + 443*t] = A[i + 443*t];
R[i + 444*t] = A[i + 444*t];
R[i + 445*t] = A[i + 445*t];
R[i + 446*t] = A[i + 446*t];
R[i + 447*t] = A[i + 447*t];
R[i + 448*t] = A[i + 448*t];
R[i + 449*t] = A[i + 449*t];
R[i + 450*t] = A[i + 450*t];
R[i + 451*t] = A[i + 451*t];
R[i + 452*t] = A[i + 452*t];
R[i + 453*t] = A[i + 453*t];
R[i + 454*t] = A[i + 454*t];
R[i + 455*t] = A[i + 455*t];
R[i + 456*t] = A[i + 456*t];
R[i + 457*t] = A[i + 457*t];
R[i + 458*t] = A[i + 458*t];
R[i + 459*t] = A[i + 459*t];
R[i + 460*t] = A[i + 460*t];
R[i + 461*t] = A[i + 461*t];
R[i + 462*t] = A[i + 462*t];
R[i + 463*t] = A[i + 463*t];
R[i + 464*t] = A[i + 464*t];
R[i + 465*t] = A[i + 465*t];
R[i + 466*t] = A[i + 466*t];
R[i + 467*t] = A[i + 467*t];
R[i + 468*t] = A[i + 468*t];
R[i + 469*t] = A[i + 469*t];
R[i + 470*t] = A[i + 470*t];
R[i + 471*t] = A[i + 471*t];
R[i + 472*t] = A[i + 472*t];
R[i + 473*t] = A[i + 473*t];
R[i + 474*t] = A[i + 474*t];
R[i + 475*t] = A[i + 475*t];
R[i + 476*t] = A[i + 476*t];
R[i + 477*t] = A[i + 477*t];
R[i + 478*t] = A[i + 478*t];
R[i + 479*t] = A[i + 479*t];
R[i + 480*t] = A[i + 480*t];
R[i + 481*t] = A[i + 481*t];
R[i + 482*t] = A[i + 482*t];
R[i + 483*t] = A[i + 483*t];
R[i + 484*t] = A[i + 484*t];
R[i + 485*t] = A[i + 485*t];
R[i + 486*t] = A[i + 486*t];
R[i + 487*t] = A[i + 487*t];
R[i + 488*t] = A[i + 488*t];
R[i + 489*t] = A[i + 489*t];
R[i + 490*t] = A[i + 490*t];
R[i + 491*t] = A[i + 491*t];
R[i + 492*t] = A[i + 492*t];
R[i + 493*t] = A[i + 493*t];
R[i + 494*t] = A[i + 494*t];
R[i + 495*t] = A[i + 495*t];
R[i + 496*t] = A[i + 496*t];
R[i + 497*t] = A[i + 497*t];
R[i + 498*t] = A[i + 498*t];
R[i + 499*t] = A[i + 499*t];
R[i + 500*t] = A[i + 500*t];
R[i + 501*t] = A[i + 501*t];
R[i + 502*t] = A[i + 502*t];
R[i + 503*t] = A[i + 503*t];
R[i + 504*t] = A[i + 504*t];
R[i + 505*t] = A[i + 505*t];
R[i + 506*t] = A[i + 506*t];
R[i + 507*t] = A[i + 507*t];
R[i + 508*t] = A[i + 508*t];
R[i + 509*t] = A[i + 509*t];
R[i + 510*t] = A[i + 510*t];
R[i + 511*t] = A[i + 511*t];
R[i + 512*t] = A[i + 512*t];
R[i + 513*t] = A[i + 513*t];
R[i + 514*t] = A[i + 514*t];
R[i + 515*t] = A[i + 515*t];
R[i + 516*t] = A[i + 516*t];
R[i + 517*t] = A[i + 517*t];
R[i + 518*t] = A[i + 518*t];
R[i + 519*t] = A[i + 519*t];
R[i + 520*t] = A[i + 520*t];
R[i + 521*t] = A[i + 521*t];
R[i + 522*t] = A[i + 522*t];
R[i + 523*t] = A[i + 523*t];
R[i + 524*t] = A[i + 524*t];
R[i + 525*t] = A[i + 525*t];
R[i + 526*t] = A[i + 526*t];
R[i + 527*t] = A[i + 527*t];
R[i + 528*t] = A[i + 528*t];
R[i + 529*t] = A[i + 529*t];
R[i + 530*t] = A[i + 530*t];
R[i + 531*t] = A[i + 531*t];
R[i + 532*t] = A[i + 532*t];
R[i + 533*t] = A[i + 533*t];
R[i + 534*t] = A[i + 534*t];
R[i + 535*t] = A[i + 535*t];
R[i + 536*t] = A[i + 536*t];
R[i + 537*t] = A[i + 537*t];
R[i + 538*t] = A[i + 538*t];
R[i + 539*t] = A[i + 539*t];
R[i + 540*t] = A[i + 540*t];
R[i + 541*t] = A[i + 541*t];
R[i + 542*t] = A[i + 542*t];
R[i + 543*t] = A[i + 543*t];
R[i + 544*t] = A[i + 544*t];
R[i + 545*t] = A[i + 545*t];
R[i + 546*t] = A[i + 546*t];
R[i + 547*t] = A[i + 547*t];
R[i + 548*t] = A[i + 548*t];
R[i + 549*t] = A[i + 549*t];
R[i + 550*t] = A[i + 550*t];
R[i + 551*t] = A[i + 551*t];
R[i + 552*t] = A[i + 552*t];
R[i + 553*t] = A[i + 553*t];
R[i + 554*t] = A[i + 554*t];
R[i + 555*t] = A[i + 555*t];
R[i + 556*t] = A[i + 556*t];
R[i + 557*t] = A[i + 557*t];
R[i + 558*t] = A[i + 558*t];
R[i + 559*t] = A[i + 559*t];
R[i + 560*t] = A[i + 560*t];
R[i + 561*t] = A[i + 561*t];
R[i + 562*t] = A[i + 562*t];
R[i + 563*t] = A[i + 563*t];
R[i + 564*t] = A[i + 564*t];
R[i + 565*t] = A[i + 565*t];
R[i + 566*t] = A[i + 566*t];
R[i + 567*t] = A[i + 567*t];
R[i + 568*t] = A[i + 568*t];
R[i + 569*t] = A[i + 569*t];
R[i + 570*t] = A[i + 570*t];
R[i + 571*t] = A[i + 571*t];
R[i + 572*t] = A[i + 572*t];
R[i + 573*t] = A[i + 573*t];
R[i + 574*t] = A[i + 574*t];
R[i + 575*t] = A[i + 575*t];
R[i + 576*t] = A[i + 576*t];
R[i + 577*t] = A[i + 577*t];
R[i + 578*t] = A[i + 578*t];
R[i + 579*t] = A[i + 579*t];
R[i + 580*t] = A[i + 580*t];
R[i + 581*t] = A[i + 581*t];
R[i + 582*t] = A[i + 582*t];
R[i + 583*t] = A[i + 583*t];
R[i + 584*t] = A[i + 584*t];
R[i + 585*t] = A[i + 585*t];
R[i + 586*t] = A[i + 586*t];
R[i + 587*t] = A[i + 587*t];
R[i + 588*t] = A[i + 588*t];
R[i + 589*t] = A[i + 589*t];
R[i + 590*t] = A[i + 590*t];
R[i + 591*t] = A[i + 591*t];
R[i + 592*t] = A[i + 592*t];
R[i + 593*t] = A[i + 593*t];
R[i + 594*t] = A[i + 594*t];
R[i + 595*t] = A[i + 595*t];
R[i + 596*t] = A[i + 596*t];
R[i + 597*t] = A[i + 597*t];
R[i + 598*t] = A[i + 598*t];
R[i + 599*t] = A[i + 599*t];
R[i + 600*t] = A[i + 600*t];
R[i + 601*t] = A[i + 601*t];
R[i + 602*t] = A[i + 602*t];
R[i + 603*t] = A[i + 603*t];
R[i + 604*t] = A[i + 604*t];
R[i + 605*t] = A[i + 605*t];
R[i + 606*t] = A[i + 606*t];
R[i + 607*t] = A[i + 607*t];
R[i + 608*t] = A[i + 608*t];
R[i + 609*t] = A[i + 609*t];
R[i + 610*t] = A[i + 610*t];
R[i + 611*t] = A[i + 611*t];
R[i + 612*t] = A[i + 612*t];
R[i + 613*t] = A[i + 613*t];
R[i + 614*t] = A[i + 614*t];
R[i + 615*t] = A[i + 615*t];
R[i + 616*t] = A[i + 616*t];
R[i + 617*t] = A[i + 617*t];
R[i + 618*t] = A[i + 618*t];
R[i + 619*t] = A[i + 619*t];
R[i + 620*t] = A[i + 620*t];
R[i + 621*t] = A[i + 621*t];
R[i + 622*t] = A[i + 622*t];
R[i + 623*t] = A[i + 623*t];
R[i + 624*t] = A[i + 624*t];
R[i + 625*t] = A[i + 625*t];
R[i + 626*t] = A[i + 626*t];
R[i + 627*t] = A[i + 627*t];
R[i + 628*t] = A[i + 628*t];
R[i + 629*t] = A[i + 629*t];
R[i + 630*t] = A[i + 630*t];
R[i + 631*t] = A[i + 631*t];
R[i + 632*t] = A[i + 632*t];
R[i + 633*t] = A[i + 633*t];
R[i + 634*t] = A[i + 634*t];
R[i + 635*t] = A[i + 635*t];
R[i + 636*t] = A[i + 636*t];
R[i + 637*t] = A[i + 637*t];
R[i + 638*t] = A[i + 638*t];
R[i + 639*t] = A[i + 639*t];
R[i + 640*t] = A[i + 640*t];
R[i + 641*t] = A[i + 641*t];
R[i + 642*t] = A[i + 642*t];
R[i + 643*t] = A[i + 643*t];
R[i + 644*t] = A[i + 644*t];
R[i + 645*t] = A[i + 645*t];
R[i + 646*t] = A[i + 646*t];
R[i + 647*t] = A[i + 647*t];
R[i + 648*t] = A[i + 648*t];
R[i + 649*t] = A[i + 649*t];
R[i + 650*t] = A[i + 650*t];
R[i + 651*t] = A[i + 651*t];
R[i + 652*t] = A[i + 652*t];
R[i + 653*t] = A[i + 653*t];
R[i + 654*t] = A[i + 654*t];
R[i + 655*t] = A[i + 655*t];
R[i + 656*t] = A[i + 656*t];
R[i + 657*t] = A[i + 657*t];
R[i + 658*t] = A[i + 658*t];
R[i + 659*t] = A[i + 659*t];
R[i + 660*t] = A[i + 660*t];
R[i + 661*t] = A[i + 661*t];
R[i + 662*t] = A[i + 662*t];
R[i + 663*t] = A[i + 663*t];
R[i + 664*t] = A[i + 664*t];
R[i + 665*t] = A[i + 665*t];
R[i + 666*t] = A[i + 666*t];
R[i + 667*t] = A[i + 667*t];
R[i + 668*t] = A[i + 668*t];
R[i + 669*t] = A[i + 669*t];
R[i + 670*t] = A[i + 670*t];
R[i + 671*t] = A[i + 671*t];
R[i + 672*t] = A[i + 672*t];
R[i + 673*t] = A[i + 673*t];
R[i + 674*t] = A[i + 674*t];
R[i + 675*t] = A[i + 675*t];
R[i + 676*t] = A[i + 676*t];
R[i + 677*t] = A[i + 677*t];
R[i + 678*t] = A[i + 678*t];
R[i + 679*t] = A[i + 679*t];
R[i + 680*t] = A[i + 680*t];
R[i + 681*t] = A[i + 681*t];
R[i + 682*t] = A[i + 682*t];
R[i + 683*t] = A[i + 683*t];
R[i + 684*t] = A[i + 684*t];
R[i + 685*t] = A[i + 685*t];
R[i + 686*t] = A[i + 686*t];
R[i + 687*t] = A[i + 687*t];
R[i + 688*t] = A[i + 688*t];
R[i + 689*t] = A[i + 689*t];
R[i + 690*t] = A[i + 690*t];
R[i + 691*t] = A[i + 691*t];
R[i + 692*t] = A[i + 692*t];
R[i + 693*t] = A[i + 693*t];
R[i + 694*t] = A[i + 694*t];
R[i + 695*t] = A[i + 695*t];
R[i + 696*t] = A[i + 696*t];
R[i + 697*t] = A[i + 697*t];
R[i + 698*t] = A[i + 698*t];
R[i + 699*t] = A[i + 699*t];
R[i + 700*t] = A[i + 700*t];
R[i + 701*t] = A[i + 701*t];
R[i + 702*t] = A[i + 702*t];
R[i + 703*t] = A[i + 703*t];
R[i + 704*t] = A[i + 704*t];
R[i + 705*t] = A[i + 705*t];
R[i + 706*t] = A[i + 706*t];
R[i + 707*t] = A[i + 707*t];
R[i + 708*t] = A[i + 708*t];
R[i + 709*t] = A[i + 709*t];
R[i + 710*t] = A[i + 710*t];
R[i + 711*t] = A[i + 711*t];
R[i + 712*t] = A[i + 712*t];
R[i + 713*t] = A[i + 713*t];
R[i + 714*t] = A[i + 714*t];
R[i + 715*t] = A[i + 715*t];
R[i + 716*t] = A[i + 716*t];
R[i + 717*t] = A[i + 717*t];
R[i + 718*t] = A[i + 718*t];
R[i + 719*t] = A[i + 719*t];
R[i + 720*t] = A[i + 720*t];
R[i + 721*t] = A[i + 721*t];
R[i + 722*t] = A[i + 722*t];
R[i + 723*t] = A[i + 723*t];
R[i + 724*t] = A[i + 724*t];
R[i + 725*t] = A[i + 725*t];
R[i + 726*t] = A[i + 726*t];
R[i + 727*t] = A[i + 727*t];
R[i + 728*t] = A[i + 728*t];
R[i + 729*t] = A[i + 729*t];
R[i + 730*t] = A[i + 730*t];
R[i + 731*t] = A[i + 731*t];
R[i + 732*t] = A[i + 732*t];
R[i + 733*t] = A[i + 733*t];
R[i + 734*t] = A[i + 734*t];
R[i + 735*t] = A[i + 735*t];
R[i + 736*t] = A[i + 736*t];
R[i + 737*t] = A[i + 737*t];
R[i + 738*t] = A[i + 738*t];
R[i + 739*t] = A[i + 739*t];
R[i + 740*t] = A[i + 740*t];
R[i + 741*t] = A[i + 741*t];
R[i + 742*t] = A[i + 742*t];
R[i + 743*t] = A[i + 743*t];
R[i + 744*t] = A[i + 744*t];
R[i + 745*t] = A[i + 745*t];
R[i + 746*t] = A[i + 746*t];
R[i + 747*t] = A[i + 747*t];
R[i + 748*t] = A[i + 748*t];
R[i + 749*t] = A[i + 749*t];
R[i + 750*t] = A[i + 750*t];
R[i + 751*t] = A[i + 751*t];
R[i + 752*t] = A[i + 752*t];
R[i + 753*t] = A[i + 753*t];
R[i + 754*t] = A[i + 754*t];
R[i + 755*t] = A[i + 755*t];
R[i + 756*t] = A[i + 756*t];
R[i + 757*t] = A[i + 757*t];
R[i + 758*t] = A[i + 758*t];
R[i + 759*t] = A[i + 759*t];
R[i + 760*t] = A[i + 760*t];
R[i + 761*t] = A[i + 761*t];
R[i + 762*t] = A[i + 762*t];
R[i + 763*t] = A[i + 763*t];
R[i + 764*t] = A[i + 764*t];
R[i + 765*t] = A[i + 765*t];
R[i + 766*t] = A[i + 766*t];
R[i + 767*t] = A[i + 767*t];
R[i + 768*t] = A[i + 768*t];
R[i + 769*t] = A[i + 769*t];
R[i + 770*t] = A[i + 770*t];
R[i + 771*t] = A[i + 771*t];
R[i + 772*t] = A[i + 772*t];
R[i + 773*t] = A[i + 773*t];
R[i + 774*t] = A[i + 774*t];
R[i + 775*t] = A[i + 775*t];
R[i + 776*t] = A[i + 776*t];
R[i + 777*t] = A[i + 777*t];
R[i + 778*t] = A[i + 778*t];
R[i + 779*t] = A[i + 779*t];
R[i + 780*t] = A[i + 780*t];
R[i + 781*t] = A[i + 781*t];
R[i + 782*t] = A[i + 782*t];
R[i + 783*t] = A[i + 783*t];
R[i + 784*t] = A[i + 784*t];
R[i + 785*t] = A[i + 785*t];
R[i + 786*t] = A[i + 786*t];
R[i + 787*t] = A[i + 787*t];
R[i + 788*t] = A[i + 788*t];
R[i + 789*t] = A[i + 789*t];
R[i + 790*t] = A[i + 790*t];
R[i + 791*t] = A[i + 791*t];
R[i + 792*t] = A[i + 792*t];
R[i + 793*t] = A[i + 793*t];
R[i + 794*t] = A[i + 794*t];
R[i + 795*t] = A[i + 795*t];
R[i + 796*t] = A[i + 796*t];
R[i + 797*t] = A[i + 797*t];
R[i + 798*t] = A[i + 798*t];
R[i + 799*t] = A[i + 799*t];
R[i + 800*t] = A[i + 800*t];
R[i + 801*t] = A[i + 801*t];
R[i + 802*t] = A[i + 802*t];
R[i + 803*t] = A[i + 803*t];
R[i + 804*t] = A[i + 804*t];
R[i + 805*t] = A[i + 805*t];
R[i + 806*t] = A[i + 806*t];
R[i + 807*t] = A[i + 807*t];
R[i + 808*t] = A[i + 808*t];
R[i + 809*t] = A[i + 809*t];
R[i + 810*t] = A[i + 810*t];
R[i + 811*t] = A[i + 811*t];
R[i + 812*t] = A[i + 812*t];
R[i + 813*t] = A[i + 813*t];
R[i + 814*t] = A[i + 814*t];
R[i + 815*t] = A[i + 815*t];
R[i + 816*t] = A[i + 816*t];
R[i + 817*t] = A[i + 817*t];
R[i + 818*t] = A[i + 818*t];
R[i + 819*t] = A[i + 819*t];
R[i + 820*t] = A[i + 820*t];
R[i + 821*t] = A[i + 821*t];
R[i + 822*t] = A[i + 822*t];
R[i + 823*t] = A[i + 823*t];
R[i + 824*t] = A[i + 824*t];
R[i + 825*t] = A[i + 825*t];
R[i + 826*t] = A[i + 826*t];
R[i + 827*t] = A[i + 827*t];
R[i + 828*t] = A[i + 828*t];
R[i + 829*t] = A[i + 829*t];
R[i + 830*t] = A[i + 830*t];
R[i + 831*t] = A[i + 831*t];
R[i + 832*t] = A[i + 832*t];
R[i + 833*t] = A[i + 833*t];
R[i + 834*t] = A[i + 834*t];
R[i + 835*t] = A[i + 835*t];
R[i + 836*t] = A[i + 836*t];
R[i + 837*t] = A[i + 837*t];
R[i + 838*t] = A[i + 838*t];
R[i + 839*t] = A[i + 839*t];
R[i + 840*t] = A[i + 840*t];
R[i + 841*t] = A[i + 841*t];
R[i + 842*t] = A[i + 842*t];
R[i + 843*t] = A[i + 843*t];
R[i + 844*t] = A[i + 844*t];
R[i + 845*t] = A[i + 845*t];
R[i + 846*t] = A[i + 846*t];
R[i + 847*t] = A[i + 847*t];
R[i + 848*t] = A[i + 848*t];
R[i + 849*t] = A[i + 849*t];
R[i + 850*t] = A[i + 850*t];
R[i + 851*t] = A[i + 851*t];
R[i + 852*t] = A[i + 852*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 853*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 854*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 855*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 856*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 857*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 858*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 859*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 860*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 861*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 862*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 863*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 864*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 865*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 866*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 867*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 868*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 869*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 870*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 871*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 872*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 873*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 874*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 875*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 876*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 877*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 878*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 879*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 880*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 881*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 882*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 883*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 884*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 885*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 886*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 887*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 888*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 889*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 890*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 891*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 892*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 893*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 894*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 895*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 896*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 897*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 898*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 899*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 900*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 901*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 902*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 903*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 904*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 905*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 906*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 907*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 908*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 909*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 910*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 911*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 912*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 913*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 914*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 915*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 916*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 917*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 918*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 919*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 920*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 921*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 922*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 923*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 924*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 925*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 926*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 927*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 928*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 929*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 930*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 931*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
R[i + 932*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 933*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 934*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 935*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
R[i + 936*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 937*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 938*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 939*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
R[i + 940*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 941*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 942*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 943*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
R[i + 944*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 945*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 946*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
R[i + 947*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 948*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
R[i + 949*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
R[i + 950*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
R[i + 951*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
R[i + 952*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
R[i + 953*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
R[i + 954*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
R[i + 955*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
R[i + 956*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
R[i + 957*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
R[i + 958*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
R[i + 959*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
R[i + 960*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
R[i + 961*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
R[i + 962*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
R[i + 963*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
R[i + 964*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
R[i + 965*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
R[i + 966*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]];
R[i + 967*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]];
R[i + 968*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]];
R[i + 969*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]];
R[i + 970*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]];
R[i + 971*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]];
R[i + 972*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]];
R[i + 973*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]];
R[i + 974*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]];
R[i + 975*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]];
R[i + 976*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]];
R[i + 977*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]];
R[i + 978*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]];
R[i + 979*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]];
R[i + 980*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]];
R[i + 981*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]];
R[i + 982*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]];
R[i + 983*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]];
R[i + 984*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]];
R[i + 985*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]];
R[i + 986*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]];
R[i + 987*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]];
R[i + 988*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]];
R[i + 989*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]];
R[i + 990*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]];
R[i + 991*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]];
R[i + 992*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]];
R[i + 993*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]];
R[i + 994*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]];
R[i + 995*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]];
R[i + 996*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]];
R[i + 997*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]];
R[i + 998*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]];
R[i + 999*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]];
R[i + 1000*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]];
R[i + 1001*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]];
R[i + 1002*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]];
R[i + 1003*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]];
R[i + 1004*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]];
R[i + 1005*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]];
R[i + 1006*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]];
R[i + 1007*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]];
R[i + 1008*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]];
R[i + 1009*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]];
R[i + 1010*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]];
R[i + 1011*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]];
R[i + 1012*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]];
R[i + 1013*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]];
R[i + 1014*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]];
R[i + 1015*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]];
R[i + 1016*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]];
R[i + 1017*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]];
R[i + 1018*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]];
R[i + 1019*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]];
R[i + 1020*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]];
R[i + 1021*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]];
R[i + 1022*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]];
R[i + 1023*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]];
R[i + 1024*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]];
R[i + 1025*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]];
R[i + 1026*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]];
R[i + 1027*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]];
R[i + 1028*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]];
R[i + 1029*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]];
R[i + 1030*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]];
R[i + 1031*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]];
R[i + 1032*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]];
R[i + 1033*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]];
R[i + 1034*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]];
R[i + 1035*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]];
R[i + 1036*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]];
R[i + 1037*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]];
R[i + 1038*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]];
R[i + 1039*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]];
R[i + 1040*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]];
R[i + 1041*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]];
R[i + 1042*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]];
R[i + 1043*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]];
R[i + 1044*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]];
R[i + 1045*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]];
R[i + 1046*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]];
R[i + 1047*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]];
R[i + 1048*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]];
R[i + 1049*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]];
R[i + 1050*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]];
R[i + 1051*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]];
R[i + 1052*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]];
R[i + 1053*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]];
R[i + 1054*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]];
R[i + 1055*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]];
R[i + 1056*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]];
R[i + 1057*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]];
R[i + 1058*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]];
R[i + 1059*t] = Op[i + 206*t] ? R[B[i + 206*t]] * R[C[i + 206*t]] : R[B[i + 206*t]] + R[C[i + 206*t]];
R[i + 1060*t] = Op[i + 207*t] ? R[B[i + 207*t]] * R[C[i + 207*t]] : R[B[i + 207*t]] + R[C[i + 207*t]];
R[i + 1061*t] = Op[i + 208*t] ? R[B[i + 208*t]] * R[C[i + 208*t]] : R[B[i + 208*t]] + R[C[i + 208*t]];
R[i + 1062*t] = Op[i + 209*t] ? R[B[i + 209*t]] * R[C[i + 209*t]] : R[B[i + 209*t]] + R[C[i + 209*t]];
R[i + 1063*t] = Op[i + 210*t] ? R[B[i + 210*t]] * R[C[i + 210*t]] : R[B[i + 210*t]] + R[C[i + 210*t]];
R[i + 1064*t] = Op[i + 211*t] ? R[B[i + 211*t]] * R[C[i + 211*t]] : R[B[i + 211*t]] + R[C[i + 211*t]];
R[i + 1065*t] = Op[i + 212*t] ? R[B[i + 212*t]] * R[C[i + 212*t]] : R[B[i + 212*t]] + R[C[i + 212*t]];
R[i + 1066*t] = Op[i + 213*t] ? R[B[i + 213*t]] * R[C[i + 213*t]] : R[B[i + 213*t]] + R[C[i + 213*t]];
R[i + 1067*t] = Op[i + 214*t] ? R[B[i + 214*t]] * R[C[i + 214*t]] : R[B[i + 214*t]] + R[C[i + 214*t]];
R[i + 1068*t] = Op[i + 215*t] ? R[B[i + 215*t]] * R[C[i + 215*t]] : R[B[i + 215*t]] + R[C[i + 215*t]];
R[i + 1069*t] = Op[i + 216*t] ? R[B[i + 216*t]] * R[C[i + 216*t]] : R[B[i + 216*t]] + R[C[i + 216*t]];
R[i + 1070*t] = Op[i + 217*t] ? R[B[i + 217*t]] * R[C[i + 217*t]] : R[B[i + 217*t]] + R[C[i + 217*t]];
R[i + 1071*t] = Op[i + 218*t] ? R[B[i + 218*t]] * R[C[i + 218*t]] : R[B[i + 218*t]] + R[C[i + 218*t]];
R[i + 1072*t] = Op[i + 219*t] ? R[B[i + 219*t]] * R[C[i + 219*t]] : R[B[i + 219*t]] + R[C[i + 219*t]];
R[i + 1073*t] = Op[i + 220*t] ? R[B[i + 220*t]] * R[C[i + 220*t]] : R[B[i + 220*t]] + R[C[i + 220*t]];
R[i + 1074*t] = Op[i + 221*t] ? R[B[i + 221*t]] * R[C[i + 221*t]] : R[B[i + 221*t]] + R[C[i + 221*t]];
R[i + 1075*t] = Op[i + 222*t] ? R[B[i + 222*t]] * R[C[i + 222*t]] : R[B[i + 222*t]] + R[C[i + 222*t]];
R[i + 1076*t] = Op[i + 223*t] ? R[B[i + 223*t]] * R[C[i + 223*t]] : R[B[i + 223*t]] + R[C[i + 223*t]];
R[i + 1077*t] = Op[i + 224*t] ? R[B[i + 224*t]] * R[C[i + 224*t]] : R[B[i + 224*t]] + R[C[i + 224*t]];
R[i + 1078*t] = Op[i + 225*t] ? R[B[i + 225*t]] * R[C[i + 225*t]] : R[B[i + 225*t]] + R[C[i + 225*t]];
R[i + 1079*t] = Op[i + 226*t] ? R[B[i + 226*t]] * R[C[i + 226*t]] : R[B[i + 226*t]] + R[C[i + 226*t]];
R[i + 1080*t] = Op[i + 227*t] ? R[B[i + 227*t]] * R[C[i + 227*t]] : R[B[i + 227*t]] + R[C[i + 227*t]];
R[i + 1081*t] = Op[i + 228*t] ? R[B[i + 228*t]] * R[C[i + 228*t]] : R[B[i + 228*t]] + R[C[i + 228*t]];
R[i + 1082*t] = Op[i + 229*t] ? R[B[i + 229*t]] * R[C[i + 229*t]] : R[B[i + 229*t]] + R[C[i + 229*t]];
R[i + 1083*t] = Op[i + 230*t] ? R[B[i + 230*t]] * R[C[i + 230*t]] : R[B[i + 230*t]] + R[C[i + 230*t]];
R[i + 1084*t] = Op[i + 231*t] ? R[B[i + 231*t]] * R[C[i + 231*t]] : R[B[i + 231*t]] + R[C[i + 231*t]];
R[i + 1085*t] = Op[i + 232*t] ? R[B[i + 232*t]] * R[C[i + 232*t]] : R[B[i + 232*t]] + R[C[i + 232*t]];
R[i + 1086*t] = Op[i + 233*t] ? R[B[i + 233*t]] * R[C[i + 233*t]] : R[B[i + 233*t]] + R[C[i + 233*t]];
R[i + 1087*t] = Op[i + 234*t] ? R[B[i + 234*t]] * R[C[i + 234*t]] : R[B[i + 234*t]] + R[C[i + 234*t]];
R[i + 1088*t] = Op[i + 235*t] ? R[B[i + 235*t]] * R[C[i + 235*t]] : R[B[i + 235*t]] + R[C[i + 235*t]];
R[i + 1089*t] = Op[i + 236*t] ? R[B[i + 236*t]] * R[C[i + 236*t]] : R[B[i + 236*t]] + R[C[i + 236*t]];
R[i + 1090*t] = Op[i + 237*t] ? R[B[i + 237*t]] * R[C[i + 237*t]] : R[B[i + 237*t]] + R[C[i + 237*t]];
R[i + 1091*t] = Op[i + 238*t] ? R[B[i + 238*t]] * R[C[i + 238*t]] : R[B[i + 238*t]] + R[C[i + 238*t]];
R[i + 1092*t] = Op[i + 239*t] ? R[B[i + 239*t]] * R[C[i + 239*t]] : R[B[i + 239*t]] + R[C[i + 239*t]];
__syncthreads();
R[i + 1093*t] = Op[i + 240*t] ? R[B[i + 240*t]] * R[C[i + 240*t]] : R[B[i + 240*t]] + R[C[i + 240*t]];
R[i + 1094*t] = Op[i + 241*t] ? R[B[i + 241*t]] * R[C[i + 241*t]] : R[B[i + 241*t]] + R[C[i + 241*t]];
R[i + 1095*t] = Op[i + 242*t] ? R[B[i + 242*t]] * R[C[i + 242*t]] : R[B[i + 242*t]] + R[C[i + 242*t]];
R[i + 1096*t] = Op[i + 243*t] ? R[B[i + 243*t]] * R[C[i + 243*t]] : R[B[i + 243*t]] + R[C[i + 243*t]];
R[i + 1097*t] = Op[i + 244*t] ? R[B[i + 244*t]] * R[C[i + 244*t]] : R[B[i + 244*t]] + R[C[i + 244*t]];
R[i + 1098*t] = Op[i + 245*t] ? R[B[i + 245*t]] * R[C[i + 245*t]] : R[B[i + 245*t]] + R[C[i + 245*t]];
R[i + 1099*t] = Op[i + 246*t] ? R[B[i + 246*t]] * R[C[i + 246*t]] : R[B[i + 246*t]] + R[C[i + 246*t]];
R[i + 1100*t] = Op[i + 247*t] ? R[B[i + 247*t]] * R[C[i + 247*t]] : R[B[i + 247*t]] + R[C[i + 247*t]];
R[i + 1101*t] = Op[i + 248*t] ? R[B[i + 248*t]] * R[C[i + 248*t]] : R[B[i + 248*t]] + R[C[i + 248*t]];
R[i + 1102*t] = Op[i + 249*t] ? R[B[i + 249*t]] * R[C[i + 249*t]] : R[B[i + 249*t]] + R[C[i + 249*t]];
R[i + 1103*t] = Op[i + 250*t] ? R[B[i + 250*t]] * R[C[i + 250*t]] : R[B[i + 250*t]] + R[C[i + 250*t]];
R[i + 1104*t] = Op[i + 251*t] ? R[B[i + 251*t]] * R[C[i + 251*t]] : R[B[i + 251*t]] + R[C[i + 251*t]];
R[i + 1105*t] = Op[i + 252*t] ? R[B[i + 252*t]] * R[C[i + 252*t]] : R[B[i + 252*t]] + R[C[i + 252*t]];
R[i + 1106*t] = Op[i + 253*t] ? R[B[i + 253*t]] * R[C[i + 253*t]] : R[B[i + 253*t]] + R[C[i + 253*t]];
R[i + 1107*t] = Op[i + 254*t] ? R[B[i + 254*t]] * R[C[i + 254*t]] : R[B[i + 254*t]] + R[C[i + 254*t]];
R[i + 1108*t] = Op[i + 255*t] ? R[B[i + 255*t]] * R[C[i + 255*t]] : R[B[i + 255*t]] + R[C[i + 255*t]];
R[i + 1109*t] = Op[i + 256*t] ? R[B[i + 256*t]] * R[C[i + 256*t]] : R[B[i + 256*t]] + R[C[i + 256*t]];
R[i + 1110*t] = Op[i + 257*t] ? R[B[i + 257*t]] * R[C[i + 257*t]] : R[B[i + 257*t]] + R[C[i + 257*t]];
R[i + 1111*t] = Op[i + 258*t] ? R[B[i + 258*t]] * R[C[i + 258*t]] : R[B[i + 258*t]] + R[C[i + 258*t]];
R[i + 1112*t] = Op[i + 259*t] ? R[B[i + 259*t]] * R[C[i + 259*t]] : R[B[i + 259*t]] + R[C[i + 259*t]];
R[i + 1113*t] = Op[i + 260*t] ? R[B[i + 260*t]] * R[C[i + 260*t]] : R[B[i + 260*t]] + R[C[i + 260*t]];
R[i + 1114*t] = Op[i + 261*t] ? R[B[i + 261*t]] * R[C[i + 261*t]] : R[B[i + 261*t]] + R[C[i + 261*t]];
R[i + 1115*t] = Op[i + 262*t] ? R[B[i + 262*t]] * R[C[i + 262*t]] : R[B[i + 262*t]] + R[C[i + 262*t]];
R[i + 1116*t] = Op[i + 263*t] ? R[B[i + 263*t]] * R[C[i + 263*t]] : R[B[i + 263*t]] + R[C[i + 263*t]];
R[i + 1117*t] = Op[i + 264*t] ? R[B[i + 264*t]] * R[C[i + 264*t]] : R[B[i + 264*t]] + R[C[i + 264*t]];
R[i + 1118*t] = Op[i + 265*t] ? R[B[i + 265*t]] * R[C[i + 265*t]] : R[B[i + 265*t]] + R[C[i + 265*t]];
R[i + 1119*t] = Op[i + 266*t] ? R[B[i + 266*t]] * R[C[i + 266*t]] : R[B[i + 266*t]] + R[C[i + 266*t]];
R[i + 1120*t] = Op[i + 267*t] ? R[B[i + 267*t]] * R[C[i + 267*t]] : R[B[i + 267*t]] + R[C[i + 267*t]];
R[i + 1121*t] = Op[i + 268*t] ? R[B[i + 268*t]] * R[C[i + 268*t]] : R[B[i + 268*t]] + R[C[i + 268*t]];
R[i + 1122*t] = Op[i + 269*t] ? R[B[i + 269*t]] * R[C[i + 269*t]] : R[B[i + 269*t]] + R[C[i + 269*t]];
R[i + 1123*t] = Op[i + 270*t] ? R[B[i + 270*t]] * R[C[i + 270*t]] : R[B[i + 270*t]] + R[C[i + 270*t]];
R[i + 1124*t] = Op[i + 271*t] ? R[B[i + 271*t]] * R[C[i + 271*t]] : R[B[i + 271*t]] + R[C[i + 271*t]];
R[i + 1125*t] = Op[i + 272*t] ? R[B[i + 272*t]] * R[C[i + 272*t]] : R[B[i + 272*t]] + R[C[i + 272*t]];
R[i + 1126*t] = Op[i + 273*t] ? R[B[i + 273*t]] * R[C[i + 273*t]] : R[B[i + 273*t]] + R[C[i + 273*t]];
R[i + 1127*t] = Op[i + 274*t] ? R[B[i + 274*t]] * R[C[i + 274*t]] : R[B[i + 274*t]] + R[C[i + 274*t]];
R[i + 1128*t] = Op[i + 275*t] ? R[B[i + 275*t]] * R[C[i + 275*t]] : R[B[i + 275*t]] + R[C[i + 275*t]];
R[i + 1129*t] = Op[i + 276*t] ? R[B[i + 276*t]] * R[C[i + 276*t]] : R[B[i + 276*t]] + R[C[i + 276*t]];
R[i + 1130*t] = Op[i + 277*t] ? R[B[i + 277*t]] * R[C[i + 277*t]] : R[B[i + 277*t]] + R[C[i + 277*t]];
R[i + 1131*t] = Op[i + 278*t] ? R[B[i + 278*t]] * R[C[i + 278*t]] : R[B[i + 278*t]] + R[C[i + 278*t]];
R[i + 1132*t] = Op[i + 279*t] ? R[B[i + 279*t]] * R[C[i + 279*t]] : R[B[i + 279*t]] + R[C[i + 279*t]];
R[i + 1133*t] = Op[i + 280*t] ? R[B[i + 280*t]] * R[C[i + 280*t]] : R[B[i + 280*t]] + R[C[i + 280*t]];
R[i + 1134*t] = Op[i + 281*t] ? R[B[i + 281*t]] * R[C[i + 281*t]] : R[B[i + 281*t]] + R[C[i + 281*t]];
R[i + 1135*t] = Op[i + 282*t] ? R[B[i + 282*t]] * R[C[i + 282*t]] : R[B[i + 282*t]] + R[C[i + 282*t]];
R[i + 1136*t] = Op[i + 283*t] ? R[B[i + 283*t]] * R[C[i + 283*t]] : R[B[i + 283*t]] + R[C[i + 283*t]];
R[i + 1137*t] = Op[i + 284*t] ? R[B[i + 284*t]] * R[C[i + 284*t]] : R[B[i + 284*t]] + R[C[i + 284*t]];
R[i + 1138*t] = Op[i + 285*t] ? R[B[i + 285*t]] * R[C[i + 285*t]] : R[B[i + 285*t]] + R[C[i + 285*t]];
R[i + 1139*t] = Op[i + 286*t] ? R[B[i + 286*t]] * R[C[i + 286*t]] : R[B[i + 286*t]] + R[C[i + 286*t]];
R[i + 1140*t] = Op[i + 287*t] ? R[B[i + 287*t]] * R[C[i + 287*t]] : R[B[i + 287*t]] + R[C[i + 287*t]];
R[i + 1141*t] = Op[i + 288*t] ? R[B[i + 288*t]] * R[C[i + 288*t]] : R[B[i + 288*t]] + R[C[i + 288*t]];
R[i + 1142*t] = Op[i + 289*t] ? R[B[i + 289*t]] * R[C[i + 289*t]] : R[B[i + 289*t]] + R[C[i + 289*t]];
R[i + 1143*t] = Op[i + 290*t] ? R[B[i + 290*t]] * R[C[i + 290*t]] : R[B[i + 290*t]] + R[C[i + 290*t]];
R[i + 1144*t] = Op[i + 291*t] ? R[B[i + 291*t]] * R[C[i + 291*t]] : R[B[i + 291*t]] + R[C[i + 291*t]];
R[i + 1145*t] = Op[i + 292*t] ? R[B[i + 292*t]] * R[C[i + 292*t]] : R[B[i + 292*t]] + R[C[i + 292*t]];
R[i + 1146*t] = Op[i + 293*t] ? R[B[i + 293*t]] * R[C[i + 293*t]] : R[B[i + 293*t]] + R[C[i + 293*t]];
R[i + 1147*t] = Op[i + 294*t] ? R[B[i + 294*t]] * R[C[i + 294*t]] : R[B[i + 294*t]] + R[C[i + 294*t]];
R[i + 1148*t] = Op[i + 295*t] ? R[B[i + 295*t]] * R[C[i + 295*t]] : R[B[i + 295*t]] + R[C[i + 295*t]];
R[i + 1149*t] = Op[i + 296*t] ? R[B[i + 296*t]] * R[C[i + 296*t]] : R[B[i + 296*t]] + R[C[i + 296*t]];
R[i + 1150*t] = Op[i + 297*t] ? R[B[i + 297*t]] * R[C[i + 297*t]] : R[B[i + 297*t]] + R[C[i + 297*t]];
R[i + 1151*t] = Op[i + 298*t] ? R[B[i + 298*t]] * R[C[i + 298*t]] : R[B[i + 298*t]] + R[C[i + 298*t]];
R[i + 1152*t] = Op[i + 299*t] ? R[B[i + 299*t]] * R[C[i + 299*t]] : R[B[i + 299*t]] + R[C[i + 299*t]];
R[i + 1153*t] = Op[i + 300*t] ? R[B[i + 300*t]] * R[C[i + 300*t]] : R[B[i + 300*t]] + R[C[i + 300*t]];
R[i + 1154*t] = Op[i + 301*t] ? R[B[i + 301*t]] * R[C[i + 301*t]] : R[B[i + 301*t]] + R[C[i + 301*t]];
R[i + 1155*t] = Op[i + 302*t] ? R[B[i + 302*t]] * R[C[i + 302*t]] : R[B[i + 302*t]] + R[C[i + 302*t]];
R[i + 1156*t] = Op[i + 303*t] ? R[B[i + 303*t]] * R[C[i + 303*t]] : R[B[i + 303*t]] + R[C[i + 303*t]];
R[i + 1157*t] = Op[i + 304*t] ? R[B[i + 304*t]] * R[C[i + 304*t]] : R[B[i + 304*t]] + R[C[i + 304*t]];
R[i + 1158*t] = Op[i + 305*t] ? R[B[i + 305*t]] * R[C[i + 305*t]] : R[B[i + 305*t]] + R[C[i + 305*t]];
R[i + 1159*t] = Op[i + 306*t] ? R[B[i + 306*t]] * R[C[i + 306*t]] : R[B[i + 306*t]] + R[C[i + 306*t]];
R[i + 1160*t] = Op[i + 307*t] ? R[B[i + 307*t]] * R[C[i + 307*t]] : R[B[i + 307*t]] + R[C[i + 307*t]];
R[i + 1161*t] = Op[i + 308*t] ? R[B[i + 308*t]] * R[C[i + 308*t]] : R[B[i + 308*t]] + R[C[i + 308*t]];
R[i + 1162*t] = Op[i + 309*t] ? R[B[i + 309*t]] * R[C[i + 309*t]] : R[B[i + 309*t]] + R[C[i + 309*t]];
R[i + 1163*t] = Op[i + 310*t] ? R[B[i + 310*t]] * R[C[i + 310*t]] : R[B[i + 310*t]] + R[C[i + 310*t]];
R[i + 1164*t] = Op[i + 311*t] ? R[B[i + 311*t]] * R[C[i + 311*t]] : R[B[i + 311*t]] + R[C[i + 311*t]];
R[i + 1165*t] = Op[i + 312*t] ? R[B[i + 312*t]] * R[C[i + 312*t]] : R[B[i + 312*t]] + R[C[i + 312*t]];
R[i + 1166*t] = Op[i + 313*t] ? R[B[i + 313*t]] * R[C[i + 313*t]] : R[B[i + 313*t]] + R[C[i + 313*t]];
R[i + 1167*t] = Op[i + 314*t] ? R[B[i + 314*t]] * R[C[i + 314*t]] : R[B[i + 314*t]] + R[C[i + 314*t]];
R[i + 1168*t] = Op[i + 315*t] ? R[B[i + 315*t]] * R[C[i + 315*t]] : R[B[i + 315*t]] + R[C[i + 315*t]];
R[i + 1169*t] = Op[i + 316*t] ? R[B[i + 316*t]] * R[C[i + 316*t]] : R[B[i + 316*t]] + R[C[i + 316*t]];
R[i + 1170*t] = Op[i + 317*t] ? R[B[i + 317*t]] * R[C[i + 317*t]] : R[B[i + 317*t]] + R[C[i + 317*t]];
R[i + 1171*t] = Op[i + 318*t] ? R[B[i + 318*t]] * R[C[i + 318*t]] : R[B[i + 318*t]] + R[C[i + 318*t]];
R[i + 1172*t] = Op[i + 319*t] ? R[B[i + 319*t]] * R[C[i + 319*t]] : R[B[i + 319*t]] + R[C[i + 319*t]];
R[i + 1173*t] = Op[i + 320*t] ? R[B[i + 320*t]] * R[C[i + 320*t]] : R[B[i + 320*t]] + R[C[i + 320*t]];
R[i + 1174*t] = Op[i + 321*t] ? R[B[i + 321*t]] * R[C[i + 321*t]] : R[B[i + 321*t]] + R[C[i + 321*t]];
R[i + 1175*t] = Op[i + 322*t] ? R[B[i + 322*t]] * R[C[i + 322*t]] : R[B[i + 322*t]] + R[C[i + 322*t]];
R[i + 1176*t] = Op[i + 323*t] ? R[B[i + 323*t]] * R[C[i + 323*t]] : R[B[i + 323*t]] + R[C[i + 323*t]];
R[i + 1177*t] = Op[i + 324*t] ? R[B[i + 324*t]] * R[C[i + 324*t]] : R[B[i + 324*t]] + R[C[i + 324*t]];
R[i + 1178*t] = Op[i + 325*t] ? R[B[i + 325*t]] * R[C[i + 325*t]] : R[B[i + 325*t]] + R[C[i + 325*t]];
R[i + 1179*t] = Op[i + 326*t] ? R[B[i + 326*t]] * R[C[i + 326*t]] : R[B[i + 326*t]] + R[C[i + 326*t]];
R[i + 1180*t] = Op[i + 327*t] ? R[B[i + 327*t]] * R[C[i + 327*t]] : R[B[i + 327*t]] + R[C[i + 327*t]];
R[i + 1181*t] = Op[i + 328*t] ? R[B[i + 328*t]] * R[C[i + 328*t]] : R[B[i + 328*t]] + R[C[i + 328*t]];
R[i + 1182*t] = Op[i + 329*t] ? R[B[i + 329*t]] * R[C[i + 329*t]] : R[B[i + 329*t]] + R[C[i + 329*t]];
R[i + 1183*t] = Op[i + 330*t] ? R[B[i + 330*t]] * R[C[i + 330*t]] : R[B[i + 330*t]] + R[C[i + 330*t]];
R[i + 1184*t] = Op[i + 331*t] ? R[B[i + 331*t]] * R[C[i + 331*t]] : R[B[i + 331*t]] + R[C[i + 331*t]];
R[i + 1185*t] = Op[i + 332*t] ? R[B[i + 332*t]] * R[C[i + 332*t]] : R[B[i + 332*t]] + R[C[i + 332*t]];
R[i + 1186*t] = Op[i + 333*t] ? R[B[i + 333*t]] * R[C[i + 333*t]] : R[B[i + 333*t]] + R[C[i + 333*t]];
R[i + 1187*t] = Op[i + 334*t] ? R[B[i + 334*t]] * R[C[i + 334*t]] : R[B[i + 334*t]] + R[C[i + 334*t]];
R[i + 1188*t] = Op[i + 335*t] ? R[B[i + 335*t]] * R[C[i + 335*t]] : R[B[i + 335*t]] + R[C[i + 335*t]];
R[i + 1189*t] = Op[i + 336*t] ? R[B[i + 336*t]] * R[C[i + 336*t]] : R[B[i + 336*t]] + R[C[i + 336*t]];
R[i + 1190*t] = Op[i + 337*t] ? R[B[i + 337*t]] * R[C[i + 337*t]] : R[B[i + 337*t]] + R[C[i + 337*t]];
R[i + 1191*t] = Op[i + 338*t] ? R[B[i + 338*t]] * R[C[i + 338*t]] : R[B[i + 338*t]] + R[C[i + 338*t]];
R[i + 1192*t] = Op[i + 339*t] ? R[B[i + 339*t]] * R[C[i + 339*t]] : R[B[i + 339*t]] + R[C[i + 339*t]];
R[i + 1193*t] = Op[i + 340*t] ? R[B[i + 340*t]] * R[C[i + 340*t]] : R[B[i + 340*t]] + R[C[i + 340*t]];
R[i + 1194*t] = Op[i + 341*t] ? R[B[i + 341*t]] * R[C[i + 341*t]] : R[B[i + 341*t]] + R[C[i + 341*t]];
R[i + 1195*t] = Op[i + 342*t] ? R[B[i + 342*t]] * R[C[i + 342*t]] : R[B[i + 342*t]] + R[C[i + 342*t]];
R[i + 1196*t] = Op[i + 343*t] ? R[B[i + 343*t]] * R[C[i + 343*t]] : R[B[i + 343*t]] + R[C[i + 343*t]];
R[i + 1197*t] = Op[i + 344*t] ? R[B[i + 344*t]] * R[C[i + 344*t]] : R[B[i + 344*t]] + R[C[i + 344*t]];
R[i + 1198*t] = Op[i + 345*t] ? R[B[i + 345*t]] * R[C[i + 345*t]] : R[B[i + 345*t]] + R[C[i + 345*t]];
R[i + 1199*t] = Op[i + 346*t] ? R[B[i + 346*t]] * R[C[i + 346*t]] : R[B[i + 346*t]] + R[C[i + 346*t]];
R[i + 1200*t] = Op[i + 347*t] ? R[B[i + 347*t]] * R[C[i + 347*t]] : R[B[i + 347*t]] + R[C[i + 347*t]];
R[i + 1201*t] = Op[i + 348*t] ? R[B[i + 348*t]] * R[C[i + 348*t]] : R[B[i + 348*t]] + R[C[i + 348*t]];
R[i + 1202*t] = Op[i + 349*t] ? R[B[i + 349*t]] * R[C[i + 349*t]] : R[B[i + 349*t]] + R[C[i + 349*t]];
R[i + 1203*t] = Op[i + 350*t] ? R[B[i + 350*t]] * R[C[i + 350*t]] : R[B[i + 350*t]] + R[C[i + 350*t]];
R[i + 1204*t] = Op[i + 351*t] ? R[B[i + 351*t]] * R[C[i + 351*t]] : R[B[i + 351*t]] + R[C[i + 351*t]];
R[i + 1205*t] = Op[i + 352*t] ? R[B[i + 352*t]] * R[C[i + 352*t]] : R[B[i + 352*t]] + R[C[i + 352*t]];
R[i + 1206*t] = Op[i + 353*t] ? R[B[i + 353*t]] * R[C[i + 353*t]] : R[B[i + 353*t]] + R[C[i + 353*t]];
R[i + 1207*t] = Op[i + 354*t] ? R[B[i + 354*t]] * R[C[i + 354*t]] : R[B[i + 354*t]] + R[C[i + 354*t]];
R[i + 1208*t] = Op[i + 355*t] ? R[B[i + 355*t]] * R[C[i + 355*t]] : R[B[i + 355*t]] + R[C[i + 355*t]];
R[i + 1209*t] = Op[i + 356*t] ? R[B[i + 356*t]] * R[C[i + 356*t]] : R[B[i + 356*t]] + R[C[i + 356*t]];
R[i + 1210*t] = Op[i + 357*t] ? R[B[i + 357*t]] * R[C[i + 357*t]] : R[B[i + 357*t]] + R[C[i + 357*t]];
R[i + 1211*t] = Op[i + 358*t] ? R[B[i + 358*t]] * R[C[i + 358*t]] : R[B[i + 358*t]] + R[C[i + 358*t]];
R[i + 1212*t] = Op[i + 359*t] ? R[B[i + 359*t]] * R[C[i + 359*t]] : R[B[i + 359*t]] + R[C[i + 359*t]];
R[i + 1213*t] = Op[i + 360*t] ? R[B[i + 360*t]] * R[C[i + 360*t]] : R[B[i + 360*t]] + R[C[i + 360*t]];
R[i + 1214*t] = Op[i + 361*t] ? R[B[i + 361*t]] * R[C[i + 361*t]] : R[B[i + 361*t]] + R[C[i + 361*t]];
R[i + 1215*t] = Op[i + 362*t] ? R[B[i + 362*t]] * R[C[i + 362*t]] : R[B[i + 362*t]] + R[C[i + 362*t]];
R[i + 1216*t] = Op[i + 363*t] ? R[B[i + 363*t]] * R[C[i + 363*t]] : R[B[i + 363*t]] + R[C[i + 363*t]];
R[i + 1217*t] = Op[i + 364*t] ? R[B[i + 364*t]] * R[C[i + 364*t]] : R[B[i + 364*t]] + R[C[i + 364*t]];
R[i + 1218*t] = Op[i + 365*t] ? R[B[i + 365*t]] * R[C[i + 365*t]] : R[B[i + 365*t]] + R[C[i + 365*t]];
R[i + 1219*t] = Op[i + 366*t] ? R[B[i + 366*t]] * R[C[i + 366*t]] : R[B[i + 366*t]] + R[C[i + 366*t]];
R[i + 1220*t] = Op[i + 367*t] ? R[B[i + 367*t]] * R[C[i + 367*t]] : R[B[i + 367*t]] + R[C[i + 367*t]];
R[i + 1221*t] = Op[i + 368*t] ? R[B[i + 368*t]] * R[C[i + 368*t]] : R[B[i + 368*t]] + R[C[i + 368*t]];
R[i + 1222*t] = Op[i + 369*t] ? R[B[i + 369*t]] * R[C[i + 369*t]] : R[B[i + 369*t]] + R[C[i + 369*t]];
R[i + 1223*t] = Op[i + 370*t] ? R[B[i + 370*t]] * R[C[i + 370*t]] : R[B[i + 370*t]] + R[C[i + 370*t]];
R[i + 1224*t] = Op[i + 371*t] ? R[B[i + 371*t]] * R[C[i + 371*t]] : R[B[i + 371*t]] + R[C[i + 371*t]];
R[i + 1225*t] = Op[i + 372*t] ? R[B[i + 372*t]] * R[C[i + 372*t]] : R[B[i + 372*t]] + R[C[i + 372*t]];
R[i + 1226*t] = Op[i + 373*t] ? R[B[i + 373*t]] * R[C[i + 373*t]] : R[B[i + 373*t]] + R[C[i + 373*t]];
R[i + 1227*t] = Op[i + 374*t] ? R[B[i + 374*t]] * R[C[i + 374*t]] : R[B[i + 374*t]] + R[C[i + 374*t]];
R[i + 1228*t] = Op[i + 375*t] ? R[B[i + 375*t]] * R[C[i + 375*t]] : R[B[i + 375*t]] + R[C[i + 375*t]];
R[i + 1229*t] = Op[i + 376*t] ? R[B[i + 376*t]] * R[C[i + 376*t]] : R[B[i + 376*t]] + R[C[i + 376*t]];
R[i + 1230*t] = Op[i + 377*t] ? R[B[i + 377*t]] * R[C[i + 377*t]] : R[B[i + 377*t]] + R[C[i + 377*t]];
R[i + 1231*t] = Op[i + 378*t] ? R[B[i + 378*t]] * R[C[i + 378*t]] : R[B[i + 378*t]] + R[C[i + 378*t]];
R[i + 1232*t] = Op[i + 379*t] ? R[B[i + 379*t]] * R[C[i + 379*t]] : R[B[i + 379*t]] + R[C[i + 379*t]];
__syncthreads();
R[i + 1233*t] = Op[i + 380*t] ? R[B[i + 380*t]] * R[C[i + 380*t]] : R[B[i + 380*t]] + R[C[i + 380*t]];
R[i + 1234*t] = Op[i + 381*t] ? R[B[i + 381*t]] * R[C[i + 381*t]] : R[B[i + 381*t]] + R[C[i + 381*t]];
R[i + 1235*t] = Op[i + 382*t] ? R[B[i + 382*t]] * R[C[i + 382*t]] : R[B[i + 382*t]] + R[C[i + 382*t]];
R[i + 1236*t] = Op[i + 383*t] ? R[B[i + 383*t]] * R[C[i + 383*t]] : R[B[i + 383*t]] + R[C[i + 383*t]];
R[i + 1237*t] = Op[i + 384*t] ? R[B[i + 384*t]] * R[C[i + 384*t]] : R[B[i + 384*t]] + R[C[i + 384*t]];
R[i + 1238*t] = Op[i + 385*t] ? R[B[i + 385*t]] * R[C[i + 385*t]] : R[B[i + 385*t]] + R[C[i + 385*t]];
R[i + 1239*t] = Op[i + 386*t] ? R[B[i + 386*t]] * R[C[i + 386*t]] : R[B[i + 386*t]] + R[C[i + 386*t]];
R[i + 1240*t] = Op[i + 387*t] ? R[B[i + 387*t]] * R[C[i + 387*t]] : R[B[i + 387*t]] + R[C[i + 387*t]];
R[i + 1241*t] = Op[i + 388*t] ? R[B[i + 388*t]] * R[C[i + 388*t]] : R[B[i + 388*t]] + R[C[i + 388*t]];
R[i + 1242*t] = Op[i + 389*t] ? R[B[i + 389*t]] * R[C[i + 389*t]] : R[B[i + 389*t]] + R[C[i + 389*t]];
R[i + 1243*t] = Op[i + 390*t] ? R[B[i + 390*t]] * R[C[i + 390*t]] : R[B[i + 390*t]] + R[C[i + 390*t]];
R[i + 1244*t] = Op[i + 391*t] ? R[B[i + 391*t]] * R[C[i + 391*t]] : R[B[i + 391*t]] + R[C[i + 391*t]];
R[i + 1245*t] = Op[i + 392*t] ? R[B[i + 392*t]] * R[C[i + 392*t]] : R[B[i + 392*t]] + R[C[i + 392*t]];
R[i + 1246*t] = Op[i + 393*t] ? R[B[i + 393*t]] * R[C[i + 393*t]] : R[B[i + 393*t]] + R[C[i + 393*t]];
R[i + 1247*t] = Op[i + 394*t] ? R[B[i + 394*t]] * R[C[i + 394*t]] : R[B[i + 394*t]] + R[C[i + 394*t]];
R[i + 1248*t] = Op[i + 395*t] ? R[B[i + 395*t]] * R[C[i + 395*t]] : R[B[i + 395*t]] + R[C[i + 395*t]];
R[i + 1249*t] = Op[i + 396*t] ? R[B[i + 396*t]] * R[C[i + 396*t]] : R[B[i + 396*t]] + R[C[i + 396*t]];
R[i + 1250*t] = Op[i + 397*t] ? R[B[i + 397*t]] * R[C[i + 397*t]] : R[B[i + 397*t]] + R[C[i + 397*t]];
R[i + 1251*t] = Op[i + 398*t] ? R[B[i + 398*t]] * R[C[i + 398*t]] : R[B[i + 398*t]] + R[C[i + 398*t]];
R[i + 1252*t] = Op[i + 399*t] ? R[B[i + 399*t]] * R[C[i + 399*t]] : R[B[i + 399*t]] + R[C[i + 399*t]];
R[i + 1253*t] = Op[i + 400*t] ? R[B[i + 400*t]] * R[C[i + 400*t]] : R[B[i + 400*t]] + R[C[i + 400*t]];
R[i + 1254*t] = Op[i + 401*t] ? R[B[i + 401*t]] * R[C[i + 401*t]] : R[B[i + 401*t]] + R[C[i + 401*t]];
R[i + 1255*t] = Op[i + 402*t] ? R[B[i + 402*t]] * R[C[i + 402*t]] : R[B[i + 402*t]] + R[C[i + 402*t]];
R[i + 1256*t] = Op[i + 403*t] ? R[B[i + 403*t]] * R[C[i + 403*t]] : R[B[i + 403*t]] + R[C[i + 403*t]];
R[i + 1257*t] = Op[i + 404*t] ? R[B[i + 404*t]] * R[C[i + 404*t]] : R[B[i + 404*t]] + R[C[i + 404*t]];
R[i + 1258*t] = Op[i + 405*t] ? R[B[i + 405*t]] * R[C[i + 405*t]] : R[B[i + 405*t]] + R[C[i + 405*t]];
R[i + 1259*t] = Op[i + 406*t] ? R[B[i + 406*t]] * R[C[i + 406*t]] : R[B[i + 406*t]] + R[C[i + 406*t]];
R[i + 1260*t] = Op[i + 407*t] ? R[B[i + 407*t]] * R[C[i + 407*t]] : R[B[i + 407*t]] + R[C[i + 407*t]];
R[i + 1261*t] = Op[i + 408*t] ? R[B[i + 408*t]] * R[C[i + 408*t]] : R[B[i + 408*t]] + R[C[i + 408*t]];
R[i + 1262*t] = Op[i + 409*t] ? R[B[i + 409*t]] * R[C[i + 409*t]] : R[B[i + 409*t]] + R[C[i + 409*t]];
R[i + 1263*t] = Op[i + 410*t] ? R[B[i + 410*t]] * R[C[i + 410*t]] : R[B[i + 410*t]] + R[C[i + 410*t]];
R[i + 1264*t] = Op[i + 411*t] ? R[B[i + 411*t]] * R[C[i + 411*t]] : R[B[i + 411*t]] + R[C[i + 411*t]];
R[i + 1265*t] = Op[i + 412*t] ? R[B[i + 412*t]] * R[C[i + 412*t]] : R[B[i + 412*t]] + R[C[i + 412*t]];
R[i + 1266*t] = Op[i + 413*t] ? R[B[i + 413*t]] * R[C[i + 413*t]] : R[B[i + 413*t]] + R[C[i + 413*t]];
R[i + 1267*t] = Op[i + 414*t] ? R[B[i + 414*t]] * R[C[i + 414*t]] : R[B[i + 414*t]] + R[C[i + 414*t]];
R[i + 1268*t] = Op[i + 415*t] ? R[B[i + 415*t]] * R[C[i + 415*t]] : R[B[i + 415*t]] + R[C[i + 415*t]];
R[i + 1269*t] = Op[i + 416*t] ? R[B[i + 416*t]] * R[C[i + 416*t]] : R[B[i + 416*t]] + R[C[i + 416*t]];
R[i + 1270*t] = Op[i + 417*t] ? R[B[i + 417*t]] * R[C[i + 417*t]] : R[B[i + 417*t]] + R[C[i + 417*t]];
R[i + 1271*t] = Op[i + 418*t] ? R[B[i + 418*t]] * R[C[i + 418*t]] : R[B[i + 418*t]] + R[C[i + 418*t]];
R[i + 1272*t] = Op[i + 419*t] ? R[B[i + 419*t]] * R[C[i + 419*t]] : R[B[i + 419*t]] + R[C[i + 419*t]];
R[i + 1273*t] = Op[i + 420*t] ? R[B[i + 420*t]] * R[C[i + 420*t]] : R[B[i + 420*t]] + R[C[i + 420*t]];
R[i + 1274*t] = Op[i + 421*t] ? R[B[i + 421*t]] * R[C[i + 421*t]] : R[B[i + 421*t]] + R[C[i + 421*t]];
R[i + 1275*t] = Op[i + 422*t] ? R[B[i + 422*t]] * R[C[i + 422*t]] : R[B[i + 422*t]] + R[C[i + 422*t]];
R[i + 1276*t] = Op[i + 423*t] ? R[B[i + 423*t]] * R[C[i + 423*t]] : R[B[i + 423*t]] + R[C[i + 423*t]];
R[i + 1277*t] = Op[i + 424*t] ? R[B[i + 424*t]] * R[C[i + 424*t]] : R[B[i + 424*t]] + R[C[i + 424*t]];
R[i + 1278*t] = Op[i + 425*t] ? R[B[i + 425*t]] * R[C[i + 425*t]] : R[B[i + 425*t]] + R[C[i + 425*t]];
R[i + 1279*t] = Op[i + 426*t] ? R[B[i + 426*t]] * R[C[i + 426*t]] : R[B[i + 426*t]] + R[C[i + 426*t]];
R[i + 1280*t] = Op[i + 427*t] ? R[B[i + 427*t]] * R[C[i + 427*t]] : R[B[i + 427*t]] + R[C[i + 427*t]];
R[i + 1281*t] = Op[i + 428*t] ? R[B[i + 428*t]] * R[C[i + 428*t]] : R[B[i + 428*t]] + R[C[i + 428*t]];
R[i + 1282*t] = Op[i + 429*t] ? R[B[i + 429*t]] * R[C[i + 429*t]] : R[B[i + 429*t]] + R[C[i + 429*t]];
R[i + 1283*t] = Op[i + 430*t] ? R[B[i + 430*t]] * R[C[i + 430*t]] : R[B[i + 430*t]] + R[C[i + 430*t]];
R[i + 1284*t] = Op[i + 431*t] ? R[B[i + 431*t]] * R[C[i + 431*t]] : R[B[i + 431*t]] + R[C[i + 431*t]];
R[i + 1285*t] = Op[i + 432*t] ? R[B[i + 432*t]] * R[C[i + 432*t]] : R[B[i + 432*t]] + R[C[i + 432*t]];
R[i + 1286*t] = Op[i + 433*t] ? R[B[i + 433*t]] * R[C[i + 433*t]] : R[B[i + 433*t]] + R[C[i + 433*t]];
R[i + 1287*t] = Op[i + 434*t] ? R[B[i + 434*t]] * R[C[i + 434*t]] : R[B[i + 434*t]] + R[C[i + 434*t]];
R[i + 1288*t] = Op[i + 435*t] ? R[B[i + 435*t]] * R[C[i + 435*t]] : R[B[i + 435*t]] + R[C[i + 435*t]];
R[i + 1289*t] = Op[i + 436*t] ? R[B[i + 436*t]] * R[C[i + 436*t]] : R[B[i + 436*t]] + R[C[i + 436*t]];
R[i + 1290*t] = Op[i + 437*t] ? R[B[i + 437*t]] * R[C[i + 437*t]] : R[B[i + 437*t]] + R[C[i + 437*t]];
R[i + 1291*t] = Op[i + 438*t] ? R[B[i + 438*t]] * R[C[i + 438*t]] : R[B[i + 438*t]] + R[C[i + 438*t]];
R[i + 1292*t] = Op[i + 439*t] ? R[B[i + 439*t]] * R[C[i + 439*t]] : R[B[i + 439*t]] + R[C[i + 439*t]];
R[i + 1293*t] = Op[i + 440*t] ? R[B[i + 440*t]] * R[C[i + 440*t]] : R[B[i + 440*t]] + R[C[i + 440*t]];
R[i + 1294*t] = Op[i + 441*t] ? R[B[i + 441*t]] * R[C[i + 441*t]] : R[B[i + 441*t]] + R[C[i + 441*t]];
R[i + 1295*t] = Op[i + 442*t] ? R[B[i + 442*t]] * R[C[i + 442*t]] : R[B[i + 442*t]] + R[C[i + 442*t]];
R[i + 1296*t] = Op[i + 443*t] ? R[B[i + 443*t]] * R[C[i + 443*t]] : R[B[i + 443*t]] + R[C[i + 443*t]];
R[i + 1297*t] = Op[i + 444*t] ? R[B[i + 444*t]] * R[C[i + 444*t]] : R[B[i + 444*t]] + R[C[i + 444*t]];
R[i + 1298*t] = Op[i + 445*t] ? R[B[i + 445*t]] * R[C[i + 445*t]] : R[B[i + 445*t]] + R[C[i + 445*t]];
R[i + 1299*t] = Op[i + 446*t] ? R[B[i + 446*t]] * R[C[i + 446*t]] : R[B[i + 446*t]] + R[C[i + 446*t]];
R[i + 1300*t] = Op[i + 447*t] ? R[B[i + 447*t]] * R[C[i + 447*t]] : R[B[i + 447*t]] + R[C[i + 447*t]];
R[i + 1301*t] = Op[i + 448*t] ? R[B[i + 448*t]] * R[C[i + 448*t]] : R[B[i + 448*t]] + R[C[i + 448*t]];
R[i + 1302*t] = Op[i + 449*t] ? R[B[i + 449*t]] * R[C[i + 449*t]] : R[B[i + 449*t]] + R[C[i + 449*t]];
R[i + 1303*t] = Op[i + 450*t] ? R[B[i + 450*t]] * R[C[i + 450*t]] : R[B[i + 450*t]] + R[C[i + 450*t]];
R[i + 1304*t] = Op[i + 451*t] ? R[B[i + 451*t]] * R[C[i + 451*t]] : R[B[i + 451*t]] + R[C[i + 451*t]];
R[i + 1305*t] = Op[i + 452*t] ? R[B[i + 452*t]] * R[C[i + 452*t]] : R[B[i + 452*t]] + R[C[i + 452*t]];
R[i + 1306*t] = Op[i + 453*t] ? R[B[i + 453*t]] * R[C[i + 453*t]] : R[B[i + 453*t]] + R[C[i + 453*t]];
R[i + 1307*t] = Op[i + 454*t] ? R[B[i + 454*t]] * R[C[i + 454*t]] : R[B[i + 454*t]] + R[C[i + 454*t]];
R[i + 1308*t] = Op[i + 455*t] ? R[B[i + 455*t]] * R[C[i + 455*t]] : R[B[i + 455*t]] + R[C[i + 455*t]];
R[i + 1309*t] = Op[i + 456*t] ? R[B[i + 456*t]] * R[C[i + 456*t]] : R[B[i + 456*t]] + R[C[i + 456*t]];
R[i + 1310*t] = Op[i + 457*t] ? R[B[i + 457*t]] * R[C[i + 457*t]] : R[B[i + 457*t]] + R[C[i + 457*t]];
R[i + 1311*t] = Op[i + 458*t] ? R[B[i + 458*t]] * R[C[i + 458*t]] : R[B[i + 458*t]] + R[C[i + 458*t]];
R[i + 1312*t] = Op[i + 459*t] ? R[B[i + 459*t]] * R[C[i + 459*t]] : R[B[i + 459*t]] + R[C[i + 459*t]];
R[i + 1313*t] = Op[i + 460*t] ? R[B[i + 460*t]] * R[C[i + 460*t]] : R[B[i + 460*t]] + R[C[i + 460*t]];
R[i + 1314*t] = Op[i + 461*t] ? R[B[i + 461*t]] * R[C[i + 461*t]] : R[B[i + 461*t]] + R[C[i + 461*t]];
R[i + 1315*t] = Op[i + 462*t] ? R[B[i + 462*t]] * R[C[i + 462*t]] : R[B[i + 462*t]] + R[C[i + 462*t]];
R[i + 1316*t] = Op[i + 463*t] ? R[B[i + 463*t]] * R[C[i + 463*t]] : R[B[i + 463*t]] + R[C[i + 463*t]];
R[i + 1317*t] = Op[i + 464*t] ? R[B[i + 464*t]] * R[C[i + 464*t]] : R[B[i + 464*t]] + R[C[i + 464*t]];
R[i + 1318*t] = Op[i + 465*t] ? R[B[i + 465*t]] * R[C[i + 465*t]] : R[B[i + 465*t]] + R[C[i + 465*t]];
R[i + 1319*t] = Op[i + 466*t] ? R[B[i + 466*t]] * R[C[i + 466*t]] : R[B[i + 466*t]] + R[C[i + 466*t]];
R[i + 1320*t] = Op[i + 467*t] ? R[B[i + 467*t]] * R[C[i + 467*t]] : R[B[i + 467*t]] + R[C[i + 467*t]];
R[i + 1321*t] = Op[i + 468*t] ? R[B[i + 468*t]] * R[C[i + 468*t]] : R[B[i + 468*t]] + R[C[i + 468*t]];
R[i + 1322*t] = Op[i + 469*t] ? R[B[i + 469*t]] * R[C[i + 469*t]] : R[B[i + 469*t]] + R[C[i + 469*t]];
R[i + 1323*t] = Op[i + 470*t] ? R[B[i + 470*t]] * R[C[i + 470*t]] : R[B[i + 470*t]] + R[C[i + 470*t]];
R[i + 1324*t] = Op[i + 471*t] ? R[B[i + 471*t]] * R[C[i + 471*t]] : R[B[i + 471*t]] + R[C[i + 471*t]];
R[i + 1325*t] = Op[i + 472*t] ? R[B[i + 472*t]] * R[C[i + 472*t]] : R[B[i + 472*t]] + R[C[i + 472*t]];
R[i + 1326*t] = Op[i + 473*t] ? R[B[i + 473*t]] * R[C[i + 473*t]] : R[B[i + 473*t]] + R[C[i + 473*t]];
R[i + 1327*t] = Op[i + 474*t] ? R[B[i + 474*t]] * R[C[i + 474*t]] : R[B[i + 474*t]] + R[C[i + 474*t]];
R[i + 1328*t] = Op[i + 475*t] ? R[B[i + 475*t]] * R[C[i + 475*t]] : R[B[i + 475*t]] + R[C[i + 475*t]];
R[i + 1329*t] = Op[i + 476*t] ? R[B[i + 476*t]] * R[C[i + 476*t]] : R[B[i + 476*t]] + R[C[i + 476*t]];
R[i + 1330*t] = Op[i + 477*t] ? R[B[i + 477*t]] * R[C[i + 477*t]] : R[B[i + 477*t]] + R[C[i + 477*t]];
R[i + 1331*t] = Op[i + 478*t] ? R[B[i + 478*t]] * R[C[i + 478*t]] : R[B[i + 478*t]] + R[C[i + 478*t]];
R[i + 1332*t] = Op[i + 479*t] ? R[B[i + 479*t]] * R[C[i + 479*t]] : R[B[i + 479*t]] + R[C[i + 479*t]];
R[i + 1333*t] = Op[i + 480*t] ? R[B[i + 480*t]] * R[C[i + 480*t]] : R[B[i + 480*t]] + R[C[i + 480*t]];
R[i + 1334*t] = Op[i + 481*t] ? R[B[i + 481*t]] * R[C[i + 481*t]] : R[B[i + 481*t]] + R[C[i + 481*t]];
R[i + 1335*t] = Op[i + 482*t] ? R[B[i + 482*t]] * R[C[i + 482*t]] : R[B[i + 482*t]] + R[C[i + 482*t]];
R[i + 1336*t] = Op[i + 483*t] ? R[B[i + 483*t]] * R[C[i + 483*t]] : R[B[i + 483*t]] + R[C[i + 483*t]];
R[i + 1337*t] = Op[i + 484*t] ? R[B[i + 484*t]] * R[C[i + 484*t]] : R[B[i + 484*t]] + R[C[i + 484*t]];
R[i + 1338*t] = Op[i + 485*t] ? R[B[i + 485*t]] * R[C[i + 485*t]] : R[B[i + 485*t]] + R[C[i + 485*t]];
R[i + 1339*t] = Op[i + 486*t] ? R[B[i + 486*t]] * R[C[i + 486*t]] : R[B[i + 486*t]] + R[C[i + 486*t]];
R[i + 1340*t] = Op[i + 487*t] ? R[B[i + 487*t]] * R[C[i + 487*t]] : R[B[i + 487*t]] + R[C[i + 487*t]];
R[i + 1341*t] = Op[i + 488*t] ? R[B[i + 488*t]] * R[C[i + 488*t]] : R[B[i + 488*t]] + R[C[i + 488*t]];
R[i + 1342*t] = Op[i + 489*t] ? R[B[i + 489*t]] * R[C[i + 489*t]] : R[B[i + 489*t]] + R[C[i + 489*t]];
R[i + 1343*t] = Op[i + 490*t] ? R[B[i + 490*t]] * R[C[i + 490*t]] : R[B[i + 490*t]] + R[C[i + 490*t]];
R[i + 1344*t] = Op[i + 491*t] ? R[B[i + 491*t]] * R[C[i + 491*t]] : R[B[i + 491*t]] + R[C[i + 491*t]];
R[i + 1345*t] = Op[i + 492*t] ? R[B[i + 492*t]] * R[C[i + 492*t]] : R[B[i + 492*t]] + R[C[i + 492*t]];
R[i + 1346*t] = Op[i + 493*t] ? R[B[i + 493*t]] * R[C[i + 493*t]] : R[B[i + 493*t]] + R[C[i + 493*t]];
R[i + 1347*t] = Op[i + 494*t] ? R[B[i + 494*t]] * R[C[i + 494*t]] : R[B[i + 494*t]] + R[C[i + 494*t]];
R[i + 1348*t] = Op[i + 495*t] ? R[B[i + 495*t]] * R[C[i + 495*t]] : R[B[i + 495*t]] + R[C[i + 495*t]];
R[i + 1349*t] = Op[i + 496*t] ? R[B[i + 496*t]] * R[C[i + 496*t]] : R[B[i + 496*t]] + R[C[i + 496*t]];
R[i + 1350*t] = Op[i + 497*t] ? R[B[i + 497*t]] * R[C[i + 497*t]] : R[B[i + 497*t]] + R[C[i + 497*t]];
R[i + 1351*t] = Op[i + 498*t] ? R[B[i + 498*t]] * R[C[i + 498*t]] : R[B[i + 498*t]] + R[C[i + 498*t]];
R[i + 1352*t] = Op[i + 499*t] ? R[B[i + 499*t]] * R[C[i + 499*t]] : R[B[i + 499*t]] + R[C[i + 499*t]];
R[i + 1353*t] = Op[i + 500*t] ? R[B[i + 500*t]] * R[C[i + 500*t]] : R[B[i + 500*t]] + R[C[i + 500*t]];
R[i + 1354*t] = Op[i + 501*t] ? R[B[i + 501*t]] * R[C[i + 501*t]] : R[B[i + 501*t]] + R[C[i + 501*t]];
R[i + 1355*t] = Op[i + 502*t] ? R[B[i + 502*t]] * R[C[i + 502*t]] : R[B[i + 502*t]] + R[C[i + 502*t]];
R[i + 1356*t] = Op[i + 503*t] ? R[B[i + 503*t]] * R[C[i + 503*t]] : R[B[i + 503*t]] + R[C[i + 503*t]];
R[i + 1357*t] = Op[i + 504*t] ? R[B[i + 504*t]] * R[C[i + 504*t]] : R[B[i + 504*t]] + R[C[i + 504*t]];
R[i + 1358*t] = Op[i + 505*t] ? R[B[i + 505*t]] * R[C[i + 505*t]] : R[B[i + 505*t]] + R[C[i + 505*t]];
R[i + 1359*t] = Op[i + 506*t] ? R[B[i + 506*t]] * R[C[i + 506*t]] : R[B[i + 506*t]] + R[C[i + 506*t]];
R[i + 1360*t] = Op[i + 507*t] ? R[B[i + 507*t]] * R[C[i + 507*t]] : R[B[i + 507*t]] + R[C[i + 507*t]];
R[i + 1361*t] = Op[i + 508*t] ? R[B[i + 508*t]] * R[C[i + 508*t]] : R[B[i + 508*t]] + R[C[i + 508*t]];
R[i + 1362*t] = Op[i + 509*t] ? R[B[i + 509*t]] * R[C[i + 509*t]] : R[B[i + 509*t]] + R[C[i + 509*t]];
R[i + 1363*t] = Op[i + 510*t] ? R[B[i + 510*t]] * R[C[i + 510*t]] : R[B[i + 510*t]] + R[C[i + 510*t]];
R[i + 1364*t] = Op[i + 511*t] ? R[B[i + 511*t]] * R[C[i + 511*t]] : R[B[i + 511*t]] + R[C[i + 511*t]];
R[i + 1365*t] = Op[i + 512*t] ? R[B[i + 512*t]] * R[C[i + 512*t]] : R[B[i + 512*t]] + R[C[i + 512*t]];
R[i + 1366*t] = Op[i + 513*t] ? R[B[i + 513*t]] * R[C[i + 513*t]] : R[B[i + 513*t]] + R[C[i + 513*t]];
R[i + 1367*t] = Op[i + 514*t] ? R[B[i + 514*t]] * R[C[i + 514*t]] : R[B[i + 514*t]] + R[C[i + 514*t]];
R[i + 1368*t] = Op[i + 515*t] ? R[B[i + 515*t]] * R[C[i + 515*t]] : R[B[i + 515*t]] + R[C[i + 515*t]];
R[i + 1369*t] = Op[i + 516*t] ? R[B[i + 516*t]] * R[C[i + 516*t]] : R[B[i + 516*t]] + R[C[i + 516*t]];
R[i + 1370*t] = Op[i + 517*t] ? R[B[i + 517*t]] * R[C[i + 517*t]] : R[B[i + 517*t]] + R[C[i + 517*t]];
R[i + 1371*t] = Op[i + 518*t] ? R[B[i + 518*t]] * R[C[i + 518*t]] : R[B[i + 518*t]] + R[C[i + 518*t]];
R[i + 1372*t] = Op[i + 519*t] ? R[B[i + 519*t]] * R[C[i + 519*t]] : R[B[i + 519*t]] + R[C[i + 519*t]];
R[i + 1373*t] = Op[i + 520*t] ? R[B[i + 520*t]] * R[C[i + 520*t]] : R[B[i + 520*t]] + R[C[i + 520*t]];
R[i + 1374*t] = Op[i + 521*t] ? R[B[i + 521*t]] * R[C[i + 521*t]] : R[B[i + 521*t]] + R[C[i + 521*t]];
R[i + 1375*t] = Op[i + 522*t] ? R[B[i + 522*t]] * R[C[i + 522*t]] : R[B[i + 522*t]] + R[C[i + 522*t]];
R[i + 1376*t] = Op[i + 523*t] ? R[B[i + 523*t]] * R[C[i + 523*t]] : R[B[i + 523*t]] + R[C[i + 523*t]];
R[i + 1377*t] = Op[i + 524*t] ? R[B[i + 524*t]] * R[C[i + 524*t]] : R[B[i + 524*t]] + R[C[i + 524*t]];
R[i + 1378*t] = Op[i + 525*t] ? R[B[i + 525*t]] * R[C[i + 525*t]] : R[B[i + 525*t]] + R[C[i + 525*t]];
R[i + 1379*t] = Op[i + 526*t] ? R[B[i + 526*t]] * R[C[i + 526*t]] : R[B[i + 526*t]] + R[C[i + 526*t]];
R[i + 1380*t] = Op[i + 527*t] ? R[B[i + 527*t]] * R[C[i + 527*t]] : R[B[i + 527*t]] + R[C[i + 527*t]];
R[i + 1381*t] = Op[i + 528*t] ? R[B[i + 528*t]] * R[C[i + 528*t]] : R[B[i + 528*t]] + R[C[i + 528*t]];
R[i + 1382*t] = Op[i + 529*t] ? R[B[i + 529*t]] * R[C[i + 529*t]] : R[B[i + 529*t]] + R[C[i + 529*t]];
R[i + 1383*t] = Op[i + 530*t] ? R[B[i + 530*t]] * R[C[i + 530*t]] : R[B[i + 530*t]] + R[C[i + 530*t]];
__syncthreads();
R[i + 1384*t] = Op[i + 531*t] ? R[B[i + 531*t]] * R[C[i + 531*t]] : R[B[i + 531*t]] + R[C[i + 531*t]];
R[i + 1385*t] = Op[i + 532*t] ? R[B[i + 532*t]] * R[C[i + 532*t]] : R[B[i + 532*t]] + R[C[i + 532*t]];
R[i + 1386*t] = Op[i + 533*t] ? R[B[i + 533*t]] * R[C[i + 533*t]] : R[B[i + 533*t]] + R[C[i + 533*t]];
R[i + 1387*t] = Op[i + 534*t] ? R[B[i + 534*t]] * R[C[i + 534*t]] : R[B[i + 534*t]] + R[C[i + 534*t]];
R[i + 1388*t] = Op[i + 535*t] ? R[B[i + 535*t]] * R[C[i + 535*t]] : R[B[i + 535*t]] + R[C[i + 535*t]];
R[i + 1389*t] = Op[i + 536*t] ? R[B[i + 536*t]] * R[C[i + 536*t]] : R[B[i + 536*t]] + R[C[i + 536*t]];
R[i + 1390*t] = Op[i + 537*t] ? R[B[i + 537*t]] * R[C[i + 537*t]] : R[B[i + 537*t]] + R[C[i + 537*t]];
R[i + 1391*t] = Op[i + 538*t] ? R[B[i + 538*t]] * R[C[i + 538*t]] : R[B[i + 538*t]] + R[C[i + 538*t]];
R[i + 1392*t] = Op[i + 539*t] ? R[B[i + 539*t]] * R[C[i + 539*t]] : R[B[i + 539*t]] + R[C[i + 539*t]];
R[i + 1393*t] = Op[i + 540*t] ? R[B[i + 540*t]] * R[C[i + 540*t]] : R[B[i + 540*t]] + R[C[i + 540*t]];
R[i + 1394*t] = Op[i + 541*t] ? R[B[i + 541*t]] * R[C[i + 541*t]] : R[B[i + 541*t]] + R[C[i + 541*t]];
R[i + 1395*t] = Op[i + 542*t] ? R[B[i + 542*t]] * R[C[i + 542*t]] : R[B[i + 542*t]] + R[C[i + 542*t]];
R[i + 1396*t] = Op[i + 543*t] ? R[B[i + 543*t]] * R[C[i + 543*t]] : R[B[i + 543*t]] + R[C[i + 543*t]];
R[i + 1397*t] = Op[i + 544*t] ? R[B[i + 544*t]] * R[C[i + 544*t]] : R[B[i + 544*t]] + R[C[i + 544*t]];
R[i + 1398*t] = Op[i + 545*t] ? R[B[i + 545*t]] * R[C[i + 545*t]] : R[B[i + 545*t]] + R[C[i + 545*t]];
R[i + 1399*t] = Op[i + 546*t] ? R[B[i + 546*t]] * R[C[i + 546*t]] : R[B[i + 546*t]] + R[C[i + 546*t]];
R[i + 1400*t] = Op[i + 547*t] ? R[B[i + 547*t]] * R[C[i + 547*t]] : R[B[i + 547*t]] + R[C[i + 547*t]];
R[i + 1401*t] = Op[i + 548*t] ? R[B[i + 548*t]] * R[C[i + 548*t]] : R[B[i + 548*t]] + R[C[i + 548*t]];
R[i + 1402*t] = Op[i + 549*t] ? R[B[i + 549*t]] * R[C[i + 549*t]] : R[B[i + 549*t]] + R[C[i + 549*t]];
R[i + 1403*t] = Op[i + 550*t] ? R[B[i + 550*t]] * R[C[i + 550*t]] : R[B[i + 550*t]] + R[C[i + 550*t]];
R[i + 1404*t] = Op[i + 551*t] ? R[B[i + 551*t]] * R[C[i + 551*t]] : R[B[i + 551*t]] + R[C[i + 551*t]];
R[i + 1405*t] = Op[i + 552*t] ? R[B[i + 552*t]] * R[C[i + 552*t]] : R[B[i + 552*t]] + R[C[i + 552*t]];
R[i + 1406*t] = Op[i + 553*t] ? R[B[i + 553*t]] * R[C[i + 553*t]] : R[B[i + 553*t]] + R[C[i + 553*t]];
R[i + 1407*t] = Op[i + 554*t] ? R[B[i + 554*t]] * R[C[i + 554*t]] : R[B[i + 554*t]] + R[C[i + 554*t]];
R[i + 1408*t] = Op[i + 555*t] ? R[B[i + 555*t]] * R[C[i + 555*t]] : R[B[i + 555*t]] + R[C[i + 555*t]];
R[i + 1409*t] = Op[i + 556*t] ? R[B[i + 556*t]] * R[C[i + 556*t]] : R[B[i + 556*t]] + R[C[i + 556*t]];
R[i + 1410*t] = Op[i + 557*t] ? R[B[i + 557*t]] * R[C[i + 557*t]] : R[B[i + 557*t]] + R[C[i + 557*t]];
R[i + 1411*t] = Op[i + 558*t] ? R[B[i + 558*t]] * R[C[i + 558*t]] : R[B[i + 558*t]] + R[C[i + 558*t]];
R[i + 1412*t] = Op[i + 559*t] ? R[B[i + 559*t]] * R[C[i + 559*t]] : R[B[i + 559*t]] + R[C[i + 559*t]];
R[i + 1413*t] = Op[i + 560*t] ? R[B[i + 560*t]] * R[C[i + 560*t]] : R[B[i + 560*t]] + R[C[i + 560*t]];
R[i + 1414*t] = Op[i + 561*t] ? R[B[i + 561*t]] * R[C[i + 561*t]] : R[B[i + 561*t]] + R[C[i + 561*t]];
R[i + 1415*t] = Op[i + 562*t] ? R[B[i + 562*t]] * R[C[i + 562*t]] : R[B[i + 562*t]] + R[C[i + 562*t]];
R[i + 1416*t] = Op[i + 563*t] ? R[B[i + 563*t]] * R[C[i + 563*t]] : R[B[i + 563*t]] + R[C[i + 563*t]];
R[i + 1417*t] = Op[i + 564*t] ? R[B[i + 564*t]] * R[C[i + 564*t]] : R[B[i + 564*t]] + R[C[i + 564*t]];
R[i + 1418*t] = Op[i + 565*t] ? R[B[i + 565*t]] * R[C[i + 565*t]] : R[B[i + 565*t]] + R[C[i + 565*t]];
R[i + 1419*t] = Op[i + 566*t] ? R[B[i + 566*t]] * R[C[i + 566*t]] : R[B[i + 566*t]] + R[C[i + 566*t]];
R[i + 1420*t] = Op[i + 567*t] ? R[B[i + 567*t]] * R[C[i + 567*t]] : R[B[i + 567*t]] + R[C[i + 567*t]];
R[i + 1421*t] = Op[i + 568*t] ? R[B[i + 568*t]] * R[C[i + 568*t]] : R[B[i + 568*t]] + R[C[i + 568*t]];
R[i + 1422*t] = Op[i + 569*t] ? R[B[i + 569*t]] * R[C[i + 569*t]] : R[B[i + 569*t]] + R[C[i + 569*t]];
R[i + 1423*t] = Op[i + 570*t] ? R[B[i + 570*t]] * R[C[i + 570*t]] : R[B[i + 570*t]] + R[C[i + 570*t]];
R[i + 1424*t] = Op[i + 571*t] ? R[B[i + 571*t]] * R[C[i + 571*t]] : R[B[i + 571*t]] + R[C[i + 571*t]];
R[i + 1425*t] = Op[i + 572*t] ? R[B[i + 572*t]] * R[C[i + 572*t]] : R[B[i + 572*t]] + R[C[i + 572*t]];
R[i + 1426*t] = Op[i + 573*t] ? R[B[i + 573*t]] * R[C[i + 573*t]] : R[B[i + 573*t]] + R[C[i + 573*t]];
R[i + 1427*t] = Op[i + 574*t] ? R[B[i + 574*t]] * R[C[i + 574*t]] : R[B[i + 574*t]] + R[C[i + 574*t]];
R[i + 1428*t] = Op[i + 575*t] ? R[B[i + 575*t]] * R[C[i + 575*t]] : R[B[i + 575*t]] + R[C[i + 575*t]];
R[i + 1429*t] = Op[i + 576*t] ? R[B[i + 576*t]] * R[C[i + 576*t]] : R[B[i + 576*t]] + R[C[i + 576*t]];
R[i + 1430*t] = Op[i + 577*t] ? R[B[i + 577*t]] * R[C[i + 577*t]] : R[B[i + 577*t]] + R[C[i + 577*t]];
R[i + 1431*t] = Op[i + 578*t] ? R[B[i + 578*t]] * R[C[i + 578*t]] : R[B[i + 578*t]] + R[C[i + 578*t]];
R[i + 1432*t] = Op[i + 579*t] ? R[B[i + 579*t]] * R[C[i + 579*t]] : R[B[i + 579*t]] + R[C[i + 579*t]];
R[i + 1433*t] = Op[i + 580*t] ? R[B[i + 580*t]] * R[C[i + 580*t]] : R[B[i + 580*t]] + R[C[i + 580*t]];
R[i + 1434*t] = Op[i + 581*t] ? R[B[i + 581*t]] * R[C[i + 581*t]] : R[B[i + 581*t]] + R[C[i + 581*t]];
R[i + 1435*t] = Op[i + 582*t] ? R[B[i + 582*t]] * R[C[i + 582*t]] : R[B[i + 582*t]] + R[C[i + 582*t]];
R[i + 1436*t] = Op[i + 583*t] ? R[B[i + 583*t]] * R[C[i + 583*t]] : R[B[i + 583*t]] + R[C[i + 583*t]];
R[i + 1437*t] = Op[i + 584*t] ? R[B[i + 584*t]] * R[C[i + 584*t]] : R[B[i + 584*t]] + R[C[i + 584*t]];
R[i + 1438*t] = Op[i + 585*t] ? R[B[i + 585*t]] * R[C[i + 585*t]] : R[B[i + 585*t]] + R[C[i + 585*t]];
R[i + 1439*t] = Op[i + 586*t] ? R[B[i + 586*t]] * R[C[i + 586*t]] : R[B[i + 586*t]] + R[C[i + 586*t]];
R[i + 1440*t] = Op[i + 587*t] ? R[B[i + 587*t]] * R[C[i + 587*t]] : R[B[i + 587*t]] + R[C[i + 587*t]];
R[i + 1441*t] = Op[i + 588*t] ? R[B[i + 588*t]] * R[C[i + 588*t]] : R[B[i + 588*t]] + R[C[i + 588*t]];
R[i + 1442*t] = Op[i + 589*t] ? R[B[i + 589*t]] * R[C[i + 589*t]] : R[B[i + 589*t]] + R[C[i + 589*t]];
R[i + 1443*t] = Op[i + 590*t] ? R[B[i + 590*t]] * R[C[i + 590*t]] : R[B[i + 590*t]] + R[C[i + 590*t]];
R[i + 1444*t] = Op[i + 591*t] ? R[B[i + 591*t]] * R[C[i + 591*t]] : R[B[i + 591*t]] + R[C[i + 591*t]];
R[i + 1445*t] = Op[i + 592*t] ? R[B[i + 592*t]] * R[C[i + 592*t]] : R[B[i + 592*t]] + R[C[i + 592*t]];
R[i + 1446*t] = Op[i + 593*t] ? R[B[i + 593*t]] * R[C[i + 593*t]] : R[B[i + 593*t]] + R[C[i + 593*t]];
R[i + 1447*t] = Op[i + 594*t] ? R[B[i + 594*t]] * R[C[i + 594*t]] : R[B[i + 594*t]] + R[C[i + 594*t]];
R[i + 1448*t] = Op[i + 595*t] ? R[B[i + 595*t]] * R[C[i + 595*t]] : R[B[i + 595*t]] + R[C[i + 595*t]];
R[i + 1449*t] = Op[i + 596*t] ? R[B[i + 596*t]] * R[C[i + 596*t]] : R[B[i + 596*t]] + R[C[i + 596*t]];
R[i + 1450*t] = Op[i + 597*t] ? R[B[i + 597*t]] * R[C[i + 597*t]] : R[B[i + 597*t]] + R[C[i + 597*t]];
R[i + 1451*t] = Op[i + 598*t] ? R[B[i + 598*t]] * R[C[i + 598*t]] : R[B[i + 598*t]] + R[C[i + 598*t]];
R[i + 1452*t] = Op[i + 599*t] ? R[B[i + 599*t]] * R[C[i + 599*t]] : R[B[i + 599*t]] + R[C[i + 599*t]];
R[i + 1453*t] = Op[i + 600*t] ? R[B[i + 600*t]] * R[C[i + 600*t]] : R[B[i + 600*t]] + R[C[i + 600*t]];
R[i + 1454*t] = Op[i + 601*t] ? R[B[i + 601*t]] * R[C[i + 601*t]] : R[B[i + 601*t]] + R[C[i + 601*t]];
R[i + 1455*t] = Op[i + 602*t] ? R[B[i + 602*t]] * R[C[i + 602*t]] : R[B[i + 602*t]] + R[C[i + 602*t]];
R[i + 1456*t] = Op[i + 603*t] ? R[B[i + 603*t]] * R[C[i + 603*t]] : R[B[i + 603*t]] + R[C[i + 603*t]];
R[i + 1457*t] = Op[i + 604*t] ? R[B[i + 604*t]] * R[C[i + 604*t]] : R[B[i + 604*t]] + R[C[i + 604*t]];
R[i + 1458*t] = Op[i + 605*t] ? R[B[i + 605*t]] * R[C[i + 605*t]] : R[B[i + 605*t]] + R[C[i + 605*t]];
R[i + 1459*t] = Op[i + 606*t] ? R[B[i + 606*t]] * R[C[i + 606*t]] : R[B[i + 606*t]] + R[C[i + 606*t]];
R[i + 1460*t] = Op[i + 607*t] ? R[B[i + 607*t]] * R[C[i + 607*t]] : R[B[i + 607*t]] + R[C[i + 607*t]];
R[i + 1461*t] = Op[i + 608*t] ? R[B[i + 608*t]] * R[C[i + 608*t]] : R[B[i + 608*t]] + R[C[i + 608*t]];
R[i + 1462*t] = Op[i + 609*t] ? R[B[i + 609*t]] * R[C[i + 609*t]] : R[B[i + 609*t]] + R[C[i + 609*t]];
R[i + 1463*t] = Op[i + 610*t] ? R[B[i + 610*t]] * R[C[i + 610*t]] : R[B[i + 610*t]] + R[C[i + 610*t]];
R[i + 1464*t] = Op[i + 611*t] ? R[B[i + 611*t]] * R[C[i + 611*t]] : R[B[i + 611*t]] + R[C[i + 611*t]];
R[i + 1465*t] = Op[i + 612*t] ? R[B[i + 612*t]] * R[C[i + 612*t]] : R[B[i + 612*t]] + R[C[i + 612*t]];
R[i + 1466*t] = Op[i + 613*t] ? R[B[i + 613*t]] * R[C[i + 613*t]] : R[B[i + 613*t]] + R[C[i + 613*t]];
R[i + 1467*t] = Op[i + 614*t] ? R[B[i + 614*t]] * R[C[i + 614*t]] : R[B[i + 614*t]] + R[C[i + 614*t]];
R[i + 1468*t] = Op[i + 615*t] ? R[B[i + 615*t]] * R[C[i + 615*t]] : R[B[i + 615*t]] + R[C[i + 615*t]];
R[i + 1469*t] = Op[i + 616*t] ? R[B[i + 616*t]] * R[C[i + 616*t]] : R[B[i + 616*t]] + R[C[i + 616*t]];
R[i + 1470*t] = Op[i + 617*t] ? R[B[i + 617*t]] * R[C[i + 617*t]] : R[B[i + 617*t]] + R[C[i + 617*t]];
R[i + 1471*t] = Op[i + 618*t] ? R[B[i + 618*t]] * R[C[i + 618*t]] : R[B[i + 618*t]] + R[C[i + 618*t]];
R[i + 1472*t] = Op[i + 619*t] ? R[B[i + 619*t]] * R[C[i + 619*t]] : R[B[i + 619*t]] + R[C[i + 619*t]];
R[i + 1473*t] = Op[i + 620*t] ? R[B[i + 620*t]] * R[C[i + 620*t]] : R[B[i + 620*t]] + R[C[i + 620*t]];
R[i + 1474*t] = Op[i + 621*t] ? R[B[i + 621*t]] * R[C[i + 621*t]] : R[B[i + 621*t]] + R[C[i + 621*t]];
R[i + 1475*t] = Op[i + 622*t] ? R[B[i + 622*t]] * R[C[i + 622*t]] : R[B[i + 622*t]] + R[C[i + 622*t]];
R[i + 1476*t] = Op[i + 623*t] ? R[B[i + 623*t]] * R[C[i + 623*t]] : R[B[i + 623*t]] + R[C[i + 623*t]];
R[i + 1477*t] = Op[i + 624*t] ? R[B[i + 624*t]] * R[C[i + 624*t]] : R[B[i + 624*t]] + R[C[i + 624*t]];
R[i + 1478*t] = Op[i + 625*t] ? R[B[i + 625*t]] * R[C[i + 625*t]] : R[B[i + 625*t]] + R[C[i + 625*t]];
R[i + 1479*t] = Op[i + 626*t] ? R[B[i + 626*t]] * R[C[i + 626*t]] : R[B[i + 626*t]] + R[C[i + 626*t]];
R[i + 1480*t] = Op[i + 627*t] ? R[B[i + 627*t]] * R[C[i + 627*t]] : R[B[i + 627*t]] + R[C[i + 627*t]];
R[i + 1481*t] = Op[i + 628*t] ? R[B[i + 628*t]] * R[C[i + 628*t]] : R[B[i + 628*t]] + R[C[i + 628*t]];
R[i + 1482*t] = Op[i + 629*t] ? R[B[i + 629*t]] * R[C[i + 629*t]] : R[B[i + 629*t]] + R[C[i + 629*t]];
R[i + 1483*t] = Op[i + 630*t] ? R[B[i + 630*t]] * R[C[i + 630*t]] : R[B[i + 630*t]] + R[C[i + 630*t]];
R[i + 1484*t] = Op[i + 631*t] ? R[B[i + 631*t]] * R[C[i + 631*t]] : R[B[i + 631*t]] + R[C[i + 631*t]];
R[i + 1485*t] = Op[i + 632*t] ? R[B[i + 632*t]] * R[C[i + 632*t]] : R[B[i + 632*t]] + R[C[i + 632*t]];
R[i + 1486*t] = Op[i + 633*t] ? R[B[i + 633*t]] * R[C[i + 633*t]] : R[B[i + 633*t]] + R[C[i + 633*t]];
R[i + 1487*t] = Op[i + 634*t] ? R[B[i + 634*t]] * R[C[i + 634*t]] : R[B[i + 634*t]] + R[C[i + 634*t]];
R[i + 1488*t] = Op[i + 635*t] ? R[B[i + 635*t]] * R[C[i + 635*t]] : R[B[i + 635*t]] + R[C[i + 635*t]];
R[i + 1489*t] = Op[i + 636*t] ? R[B[i + 636*t]] * R[C[i + 636*t]] : R[B[i + 636*t]] + R[C[i + 636*t]];
R[i + 1490*t] = Op[i + 637*t] ? R[B[i + 637*t]] * R[C[i + 637*t]] : R[B[i + 637*t]] + R[C[i + 637*t]];
R[i + 1491*t] = Op[i + 638*t] ? R[B[i + 638*t]] * R[C[i + 638*t]] : R[B[i + 638*t]] + R[C[i + 638*t]];
R[i + 1492*t] = Op[i + 639*t] ? R[B[i + 639*t]] * R[C[i + 639*t]] : R[B[i + 639*t]] + R[C[i + 639*t]];
R[i + 1493*t] = Op[i + 640*t] ? R[B[i + 640*t]] * R[C[i + 640*t]] : R[B[i + 640*t]] + R[C[i + 640*t]];
R[i + 1494*t] = Op[i + 641*t] ? R[B[i + 641*t]] * R[C[i + 641*t]] : R[B[i + 641*t]] + R[C[i + 641*t]];
R[i + 1495*t] = Op[i + 642*t] ? R[B[i + 642*t]] * R[C[i + 642*t]] : R[B[i + 642*t]] + R[C[i + 642*t]];
R[i + 1496*t] = Op[i + 643*t] ? R[B[i + 643*t]] * R[C[i + 643*t]] : R[B[i + 643*t]] + R[C[i + 643*t]];
R[i + 1497*t] = Op[i + 644*t] ? R[B[i + 644*t]] * R[C[i + 644*t]] : R[B[i + 644*t]] + R[C[i + 644*t]];
R[i + 1498*t] = Op[i + 645*t] ? R[B[i + 645*t]] * R[C[i + 645*t]] : R[B[i + 645*t]] + R[C[i + 645*t]];
R[i + 1499*t] = Op[i + 646*t] ? R[B[i + 646*t]] * R[C[i + 646*t]] : R[B[i + 646*t]] + R[C[i + 646*t]];
R[i + 1500*t] = Op[i + 647*t] ? R[B[i + 647*t]] * R[C[i + 647*t]] : R[B[i + 647*t]] + R[C[i + 647*t]];
R[i + 1501*t] = Op[i + 648*t] ? R[B[i + 648*t]] * R[C[i + 648*t]] : R[B[i + 648*t]] + R[C[i + 648*t]];
R[i + 1502*t] = Op[i + 649*t] ? R[B[i + 649*t]] * R[C[i + 649*t]] : R[B[i + 649*t]] + R[C[i + 649*t]];
R[i + 1503*t] = Op[i + 650*t] ? R[B[i + 650*t]] * R[C[i + 650*t]] : R[B[i + 650*t]] + R[C[i + 650*t]];
R[i + 1504*t] = Op[i + 651*t] ? R[B[i + 651*t]] * R[C[i + 651*t]] : R[B[i + 651*t]] + R[C[i + 651*t]];
R[i + 1505*t] = Op[i + 652*t] ? R[B[i + 652*t]] * R[C[i + 652*t]] : R[B[i + 652*t]] + R[C[i + 652*t]];
R[i + 1506*t] = Op[i + 653*t] ? R[B[i + 653*t]] * R[C[i + 653*t]] : R[B[i + 653*t]] + R[C[i + 653*t]];
R[i + 1507*t] = Op[i + 654*t] ? R[B[i + 654*t]] * R[C[i + 654*t]] : R[B[i + 654*t]] + R[C[i + 654*t]];
R[i + 1508*t] = Op[i + 655*t] ? R[B[i + 655*t]] * R[C[i + 655*t]] : R[B[i + 655*t]] + R[C[i + 655*t]];
R[i + 1509*t] = Op[i + 656*t] ? R[B[i + 656*t]] * R[C[i + 656*t]] : R[B[i + 656*t]] + R[C[i + 656*t]];
R[i + 1510*t] = Op[i + 657*t] ? R[B[i + 657*t]] * R[C[i + 657*t]] : R[B[i + 657*t]] + R[C[i + 657*t]];
R[i + 1511*t] = Op[i + 658*t] ? R[B[i + 658*t]] * R[C[i + 658*t]] : R[B[i + 658*t]] + R[C[i + 658*t]];
R[i + 1512*t] = Op[i + 659*t] ? R[B[i + 659*t]] * R[C[i + 659*t]] : R[B[i + 659*t]] + R[C[i + 659*t]];
R[i + 1513*t] = Op[i + 660*t] ? R[B[i + 660*t]] * R[C[i + 660*t]] : R[B[i + 660*t]] + R[C[i + 660*t]];
R[i + 1514*t] = Op[i + 661*t] ? R[B[i + 661*t]] * R[C[i + 661*t]] : R[B[i + 661*t]] + R[C[i + 661*t]];
R[i + 1515*t] = Op[i + 662*t] ? R[B[i + 662*t]] * R[C[i + 662*t]] : R[B[i + 662*t]] + R[C[i + 662*t]];
R[i + 1516*t] = Op[i + 663*t] ? R[B[i + 663*t]] * R[C[i + 663*t]] : R[B[i + 663*t]] + R[C[i + 663*t]];
R[i + 1517*t] = Op[i + 664*t] ? R[B[i + 664*t]] * R[C[i + 664*t]] : R[B[i + 664*t]] + R[C[i + 664*t]];
R[i + 1518*t] = Op[i + 665*t] ? R[B[i + 665*t]] * R[C[i + 665*t]] : R[B[i + 665*t]] + R[C[i + 665*t]];
R[i + 1519*t] = Op[i + 666*t] ? R[B[i + 666*t]] * R[C[i + 666*t]] : R[B[i + 666*t]] + R[C[i + 666*t]];
R[i + 1520*t] = Op[i + 667*t] ? R[B[i + 667*t]] * R[C[i + 667*t]] : R[B[i + 667*t]] + R[C[i + 667*t]];
R[i + 1521*t] = Op[i + 668*t] ? R[B[i + 668*t]] * R[C[i + 668*t]] : R[B[i + 668*t]] + R[C[i + 668*t]];
R[i + 1522*t] = Op[i + 669*t] ? R[B[i + 669*t]] * R[C[i + 669*t]] : R[B[i + 669*t]] + R[C[i + 669*t]];
__syncthreads();
R[i + 1523*t] = Op[i + 670*t] ? R[B[i + 670*t]] * R[C[i + 670*t]] : R[B[i + 670*t]] + R[C[i + 670*t]];
R[i + 1524*t] = Op[i + 671*t] ? R[B[i + 671*t]] * R[C[i + 671*t]] : R[B[i + 671*t]] + R[C[i + 671*t]];
R[i + 1525*t] = Op[i + 672*t] ? R[B[i + 672*t]] * R[C[i + 672*t]] : R[B[i + 672*t]] + R[C[i + 672*t]];
R[i + 1526*t] = Op[i + 673*t] ? R[B[i + 673*t]] * R[C[i + 673*t]] : R[B[i + 673*t]] + R[C[i + 673*t]];
R[i + 1527*t] = Op[i + 674*t] ? R[B[i + 674*t]] * R[C[i + 674*t]] : R[B[i + 674*t]] + R[C[i + 674*t]];
R[i + 1528*t] = Op[i + 675*t] ? R[B[i + 675*t]] * R[C[i + 675*t]] : R[B[i + 675*t]] + R[C[i + 675*t]];
R[i + 1529*t] = Op[i + 676*t] ? R[B[i + 676*t]] * R[C[i + 676*t]] : R[B[i + 676*t]] + R[C[i + 676*t]];
R[i + 1530*t] = Op[i + 677*t] ? R[B[i + 677*t]] * R[C[i + 677*t]] : R[B[i + 677*t]] + R[C[i + 677*t]];
R[i + 1531*t] = Op[i + 678*t] ? R[B[i + 678*t]] * R[C[i + 678*t]] : R[B[i + 678*t]] + R[C[i + 678*t]];
R[i + 1532*t] = Op[i + 679*t] ? R[B[i + 679*t]] * R[C[i + 679*t]] : R[B[i + 679*t]] + R[C[i + 679*t]];
R[i + 1533*t] = Op[i + 680*t] ? R[B[i + 680*t]] * R[C[i + 680*t]] : R[B[i + 680*t]] + R[C[i + 680*t]];
R[i + 1534*t] = Op[i + 681*t] ? R[B[i + 681*t]] * R[C[i + 681*t]] : R[B[i + 681*t]] + R[C[i + 681*t]];
R[i + 1535*t] = Op[i + 682*t] ? R[B[i + 682*t]] * R[C[i + 682*t]] : R[B[i + 682*t]] + R[C[i + 682*t]];
R[i + 1536*t] = Op[i + 683*t] ? R[B[i + 683*t]] * R[C[i + 683*t]] : R[B[i + 683*t]] + R[C[i + 683*t]];
R[i + 1537*t] = Op[i + 684*t] ? R[B[i + 684*t]] * R[C[i + 684*t]] : R[B[i + 684*t]] + R[C[i + 684*t]];
R[i + 1538*t] = Op[i + 685*t] ? R[B[i + 685*t]] * R[C[i + 685*t]] : R[B[i + 685*t]] + R[C[i + 685*t]];
R[i + 1539*t] = Op[i + 686*t] ? R[B[i + 686*t]] * R[C[i + 686*t]] : R[B[i + 686*t]] + R[C[i + 686*t]];
R[i + 1540*t] = Op[i + 687*t] ? R[B[i + 687*t]] * R[C[i + 687*t]] : R[B[i + 687*t]] + R[C[i + 687*t]];
R[i + 1541*t] = Op[i + 688*t] ? R[B[i + 688*t]] * R[C[i + 688*t]] : R[B[i + 688*t]] + R[C[i + 688*t]];
R[i + 1542*t] = Op[i + 689*t] ? R[B[i + 689*t]] * R[C[i + 689*t]] : R[B[i + 689*t]] + R[C[i + 689*t]];
R[i + 1543*t] = Op[i + 690*t] ? R[B[i + 690*t]] * R[C[i + 690*t]] : R[B[i + 690*t]] + R[C[i + 690*t]];
R[i + 1544*t] = Op[i + 691*t] ? R[B[i + 691*t]] * R[C[i + 691*t]] : R[B[i + 691*t]] + R[C[i + 691*t]];
R[i + 1545*t] = Op[i + 692*t] ? R[B[i + 692*t]] * R[C[i + 692*t]] : R[B[i + 692*t]] + R[C[i + 692*t]];
R[i + 1546*t] = Op[i + 693*t] ? R[B[i + 693*t]] * R[C[i + 693*t]] : R[B[i + 693*t]] + R[C[i + 693*t]];
R[i + 1547*t] = Op[i + 694*t] ? R[B[i + 694*t]] * R[C[i + 694*t]] : R[B[i + 694*t]] + R[C[i + 694*t]];
R[i + 1548*t] = Op[i + 695*t] ? R[B[i + 695*t]] * R[C[i + 695*t]] : R[B[i + 695*t]] + R[C[i + 695*t]];
R[i + 1549*t] = Op[i + 696*t] ? R[B[i + 696*t]] * R[C[i + 696*t]] : R[B[i + 696*t]] + R[C[i + 696*t]];
R[i + 1550*t] = Op[i + 697*t] ? R[B[i + 697*t]] * R[C[i + 697*t]] : R[B[i + 697*t]] + R[C[i + 697*t]];
R[i + 1551*t] = Op[i + 698*t] ? R[B[i + 698*t]] * R[C[i + 698*t]] : R[B[i + 698*t]] + R[C[i + 698*t]];
R[i + 1552*t] = Op[i + 699*t] ? R[B[i + 699*t]] * R[C[i + 699*t]] : R[B[i + 699*t]] + R[C[i + 699*t]];
R[i + 1553*t] = Op[i + 700*t] ? R[B[i + 700*t]] * R[C[i + 700*t]] : R[B[i + 700*t]] + R[C[i + 700*t]];
R[i + 1554*t] = Op[i + 701*t] ? R[B[i + 701*t]] * R[C[i + 701*t]] : R[B[i + 701*t]] + R[C[i + 701*t]];
R[i + 1555*t] = Op[i + 702*t] ? R[B[i + 702*t]] * R[C[i + 702*t]] : R[B[i + 702*t]] + R[C[i + 702*t]];
R[i + 1556*t] = Op[i + 703*t] ? R[B[i + 703*t]] * R[C[i + 703*t]] : R[B[i + 703*t]] + R[C[i + 703*t]];
R[i + 1557*t] = Op[i + 704*t] ? R[B[i + 704*t]] * R[C[i + 704*t]] : R[B[i + 704*t]] + R[C[i + 704*t]];
R[i + 1558*t] = Op[i + 705*t] ? R[B[i + 705*t]] * R[C[i + 705*t]] : R[B[i + 705*t]] + R[C[i + 705*t]];
R[i + 1559*t] = Op[i + 706*t] ? R[B[i + 706*t]] * R[C[i + 706*t]] : R[B[i + 706*t]] + R[C[i + 706*t]];
R[i + 1560*t] = Op[i + 707*t] ? R[B[i + 707*t]] * R[C[i + 707*t]] : R[B[i + 707*t]] + R[C[i + 707*t]];
R[i + 1561*t] = Op[i + 708*t] ? R[B[i + 708*t]] * R[C[i + 708*t]] : R[B[i + 708*t]] + R[C[i + 708*t]];
R[i + 1562*t] = Op[i + 709*t] ? R[B[i + 709*t]] * R[C[i + 709*t]] : R[B[i + 709*t]] + R[C[i + 709*t]];
R[i + 1563*t] = Op[i + 710*t] ? R[B[i + 710*t]] * R[C[i + 710*t]] : R[B[i + 710*t]] + R[C[i + 710*t]];
R[i + 1564*t] = Op[i + 711*t] ? R[B[i + 711*t]] * R[C[i + 711*t]] : R[B[i + 711*t]] + R[C[i + 711*t]];
R[i + 1565*t] = Op[i + 712*t] ? R[B[i + 712*t]] * R[C[i + 712*t]] : R[B[i + 712*t]] + R[C[i + 712*t]];
R[i + 1566*t] = Op[i + 713*t] ? R[B[i + 713*t]] * R[C[i + 713*t]] : R[B[i + 713*t]] + R[C[i + 713*t]];
R[i + 1567*t] = Op[i + 714*t] ? R[B[i + 714*t]] * R[C[i + 714*t]] : R[B[i + 714*t]] + R[C[i + 714*t]];
R[i + 1568*t] = Op[i + 715*t] ? R[B[i + 715*t]] * R[C[i + 715*t]] : R[B[i + 715*t]] + R[C[i + 715*t]];
R[i + 1569*t] = Op[i + 716*t] ? R[B[i + 716*t]] * R[C[i + 716*t]] : R[B[i + 716*t]] + R[C[i + 716*t]];
R[i + 1570*t] = Op[i + 717*t] ? R[B[i + 717*t]] * R[C[i + 717*t]] : R[B[i + 717*t]] + R[C[i + 717*t]];
R[i + 1571*t] = Op[i + 718*t] ? R[B[i + 718*t]] * R[C[i + 718*t]] : R[B[i + 718*t]] + R[C[i + 718*t]];
R[i + 1572*t] = Op[i + 719*t] ? R[B[i + 719*t]] * R[C[i + 719*t]] : R[B[i + 719*t]] + R[C[i + 719*t]];
R[i + 1573*t] = Op[i + 720*t] ? R[B[i + 720*t]] * R[C[i + 720*t]] : R[B[i + 720*t]] + R[C[i + 720*t]];
R[i + 1574*t] = Op[i + 721*t] ? R[B[i + 721*t]] * R[C[i + 721*t]] : R[B[i + 721*t]] + R[C[i + 721*t]];
R[i + 1575*t] = Op[i + 722*t] ? R[B[i + 722*t]] * R[C[i + 722*t]] : R[B[i + 722*t]] + R[C[i + 722*t]];
R[i + 1576*t] = Op[i + 723*t] ? R[B[i + 723*t]] * R[C[i + 723*t]] : R[B[i + 723*t]] + R[C[i + 723*t]];
R[i + 1577*t] = Op[i + 724*t] ? R[B[i + 724*t]] * R[C[i + 724*t]] : R[B[i + 724*t]] + R[C[i + 724*t]];
R[i + 1578*t] = Op[i + 725*t] ? R[B[i + 725*t]] * R[C[i + 725*t]] : R[B[i + 725*t]] + R[C[i + 725*t]];
R[i + 1579*t] = Op[i + 726*t] ? R[B[i + 726*t]] * R[C[i + 726*t]] : R[B[i + 726*t]] + R[C[i + 726*t]];
R[i + 1580*t] = Op[i + 727*t] ? R[B[i + 727*t]] * R[C[i + 727*t]] : R[B[i + 727*t]] + R[C[i + 727*t]];
R[i + 1581*t] = Op[i + 728*t] ? R[B[i + 728*t]] * R[C[i + 728*t]] : R[B[i + 728*t]] + R[C[i + 728*t]];
R[i + 1582*t] = Op[i + 729*t] ? R[B[i + 729*t]] * R[C[i + 729*t]] : R[B[i + 729*t]] + R[C[i + 729*t]];
R[i + 1583*t] = Op[i + 730*t] ? R[B[i + 730*t]] * R[C[i + 730*t]] : R[B[i + 730*t]] + R[C[i + 730*t]];
R[i + 1584*t] = Op[i + 731*t] ? R[B[i + 731*t]] * R[C[i + 731*t]] : R[B[i + 731*t]] + R[C[i + 731*t]];
R[i + 1585*t] = Op[i + 732*t] ? R[B[i + 732*t]] * R[C[i + 732*t]] : R[B[i + 732*t]] + R[C[i + 732*t]];
R[i + 1586*t] = Op[i + 733*t] ? R[B[i + 733*t]] * R[C[i + 733*t]] : R[B[i + 733*t]] + R[C[i + 733*t]];
R[i + 1587*t] = Op[i + 734*t] ? R[B[i + 734*t]] * R[C[i + 734*t]] : R[B[i + 734*t]] + R[C[i + 734*t]];
R[i + 1588*t] = Op[i + 735*t] ? R[B[i + 735*t]] * R[C[i + 735*t]] : R[B[i + 735*t]] + R[C[i + 735*t]];
R[i + 1589*t] = Op[i + 736*t] ? R[B[i + 736*t]] * R[C[i + 736*t]] : R[B[i + 736*t]] + R[C[i + 736*t]];
R[i + 1590*t] = Op[i + 737*t] ? R[B[i + 737*t]] * R[C[i + 737*t]] : R[B[i + 737*t]] + R[C[i + 737*t]];
R[i + 1591*t] = Op[i + 738*t] ? R[B[i + 738*t]] * R[C[i + 738*t]] : R[B[i + 738*t]] + R[C[i + 738*t]];
R[i + 1592*t] = Op[i + 739*t] ? R[B[i + 739*t]] * R[C[i + 739*t]] : R[B[i + 739*t]] + R[C[i + 739*t]];
R[i + 1593*t] = Op[i + 740*t] ? R[B[i + 740*t]] * R[C[i + 740*t]] : R[B[i + 740*t]] + R[C[i + 740*t]];
R[i + 1594*t] = Op[i + 741*t] ? R[B[i + 741*t]] * R[C[i + 741*t]] : R[B[i + 741*t]] + R[C[i + 741*t]];
R[i + 1595*t] = Op[i + 742*t] ? R[B[i + 742*t]] * R[C[i + 742*t]] : R[B[i + 742*t]] + R[C[i + 742*t]];
R[i + 1596*t] = Op[i + 743*t] ? R[B[i + 743*t]] * R[C[i + 743*t]] : R[B[i + 743*t]] + R[C[i + 743*t]];
R[i + 1597*t] = Op[i + 744*t] ? R[B[i + 744*t]] * R[C[i + 744*t]] : R[B[i + 744*t]] + R[C[i + 744*t]];
R[i + 1598*t] = Op[i + 745*t] ? R[B[i + 745*t]] * R[C[i + 745*t]] : R[B[i + 745*t]] + R[C[i + 745*t]];
R[i + 1599*t] = Op[i + 746*t] ? R[B[i + 746*t]] * R[C[i + 746*t]] : R[B[i + 746*t]] + R[C[i + 746*t]];
R[i + 1600*t] = Op[i + 747*t] ? R[B[i + 747*t]] * R[C[i + 747*t]] : R[B[i + 747*t]] + R[C[i + 747*t]];
R[i + 1601*t] = Op[i + 748*t] ? R[B[i + 748*t]] * R[C[i + 748*t]] : R[B[i + 748*t]] + R[C[i + 748*t]];
R[i + 1602*t] = Op[i + 749*t] ? R[B[i + 749*t]] * R[C[i + 749*t]] : R[B[i + 749*t]] + R[C[i + 749*t]];
__syncthreads();
R[i + 1603*t] = Op[i + 750*t] ? R[B[i + 750*t]] * R[C[i + 750*t]] : R[B[i + 750*t]] + R[C[i + 750*t]];
R[i + 1604*t] = Op[i + 751*t] ? R[B[i + 751*t]] * R[C[i + 751*t]] : R[B[i + 751*t]] + R[C[i + 751*t]];
R[i + 1605*t] = Op[i + 752*t] ? R[B[i + 752*t]] * R[C[i + 752*t]] : R[B[i + 752*t]] + R[C[i + 752*t]];
R[i + 1606*t] = Op[i + 753*t] ? R[B[i + 753*t]] * R[C[i + 753*t]] : R[B[i + 753*t]] + R[C[i + 753*t]];
R[i + 1607*t] = Op[i + 754*t] ? R[B[i + 754*t]] * R[C[i + 754*t]] : R[B[i + 754*t]] + R[C[i + 754*t]];
R[i + 1608*t] = Op[i + 755*t] ? R[B[i + 755*t]] * R[C[i + 755*t]] : R[B[i + 755*t]] + R[C[i + 755*t]];
R[i + 1609*t] = Op[i + 756*t] ? R[B[i + 756*t]] * R[C[i + 756*t]] : R[B[i + 756*t]] + R[C[i + 756*t]];
R[i + 1610*t] = Op[i + 757*t] ? R[B[i + 757*t]] * R[C[i + 757*t]] : R[B[i + 757*t]] + R[C[i + 757*t]];
R[i + 1611*t] = Op[i + 758*t] ? R[B[i + 758*t]] * R[C[i + 758*t]] : R[B[i + 758*t]] + R[C[i + 758*t]];
R[i + 1612*t] = Op[i + 759*t] ? R[B[i + 759*t]] * R[C[i + 759*t]] : R[B[i + 759*t]] + R[C[i + 759*t]];
R[i + 1613*t] = Op[i + 760*t] ? R[B[i + 760*t]] * R[C[i + 760*t]] : R[B[i + 760*t]] + R[C[i + 760*t]];
R[i + 1614*t] = Op[i + 761*t] ? R[B[i + 761*t]] * R[C[i + 761*t]] : R[B[i + 761*t]] + R[C[i + 761*t]];
R[i + 1615*t] = Op[i + 762*t] ? R[B[i + 762*t]] * R[C[i + 762*t]] : R[B[i + 762*t]] + R[C[i + 762*t]];
R[i + 1616*t] = Op[i + 763*t] ? R[B[i + 763*t]] * R[C[i + 763*t]] : R[B[i + 763*t]] + R[C[i + 763*t]];
R[i + 1617*t] = Op[i + 764*t] ? R[B[i + 764*t]] * R[C[i + 764*t]] : R[B[i + 764*t]] + R[C[i + 764*t]];
R[i + 1618*t] = Op[i + 765*t] ? R[B[i + 765*t]] * R[C[i + 765*t]] : R[B[i + 765*t]] + R[C[i + 765*t]];
R[i + 1619*t] = Op[i + 766*t] ? R[B[i + 766*t]] * R[C[i + 766*t]] : R[B[i + 766*t]] + R[C[i + 766*t]];
R[i + 1620*t] = Op[i + 767*t] ? R[B[i + 767*t]] * R[C[i + 767*t]] : R[B[i + 767*t]] + R[C[i + 767*t]];
R[i + 1621*t] = Op[i + 768*t] ? R[B[i + 768*t]] * R[C[i + 768*t]] : R[B[i + 768*t]] + R[C[i + 768*t]];
R[i + 1622*t] = Op[i + 769*t] ? R[B[i + 769*t]] * R[C[i + 769*t]] : R[B[i + 769*t]] + R[C[i + 769*t]];
R[i + 1623*t] = Op[i + 770*t] ? R[B[i + 770*t]] * R[C[i + 770*t]] : R[B[i + 770*t]] + R[C[i + 770*t]];
R[i + 1624*t] = Op[i + 771*t] ? R[B[i + 771*t]] * R[C[i + 771*t]] : R[B[i + 771*t]] + R[C[i + 771*t]];
R[i + 1625*t] = Op[i + 772*t] ? R[B[i + 772*t]] * R[C[i + 772*t]] : R[B[i + 772*t]] + R[C[i + 772*t]];
R[i + 1626*t] = Op[i + 773*t] ? R[B[i + 773*t]] * R[C[i + 773*t]] : R[B[i + 773*t]] + R[C[i + 773*t]];
R[i + 1627*t] = Op[i + 774*t] ? R[B[i + 774*t]] * R[C[i + 774*t]] : R[B[i + 774*t]] + R[C[i + 774*t]];
R[i + 1628*t] = Op[i + 775*t] ? R[B[i + 775*t]] * R[C[i + 775*t]] : R[B[i + 775*t]] + R[C[i + 775*t]];
R[i + 1629*t] = Op[i + 776*t] ? R[B[i + 776*t]] * R[C[i + 776*t]] : R[B[i + 776*t]] + R[C[i + 776*t]];
R[i + 1630*t] = Op[i + 777*t] ? R[B[i + 777*t]] * R[C[i + 777*t]] : R[B[i + 777*t]] + R[C[i + 777*t]];
R[i + 1631*t] = Op[i + 778*t] ? R[B[i + 778*t]] * R[C[i + 778*t]] : R[B[i + 778*t]] + R[C[i + 778*t]];
R[i + 1632*t] = Op[i + 779*t] ? R[B[i + 779*t]] * R[C[i + 779*t]] : R[B[i + 779*t]] + R[C[i + 779*t]];
R[i + 1633*t] = Op[i + 780*t] ? R[B[i + 780*t]] * R[C[i + 780*t]] : R[B[i + 780*t]] + R[C[i + 780*t]];
R[i + 1634*t] = Op[i + 781*t] ? R[B[i + 781*t]] * R[C[i + 781*t]] : R[B[i + 781*t]] + R[C[i + 781*t]];
R[i + 1635*t] = Op[i + 782*t] ? R[B[i + 782*t]] * R[C[i + 782*t]] : R[B[i + 782*t]] + R[C[i + 782*t]];
R[i + 1636*t] = Op[i + 783*t] ? R[B[i + 783*t]] * R[C[i + 783*t]] : R[B[i + 783*t]] + R[C[i + 783*t]];
R[i + 1637*t] = Op[i + 784*t] ? R[B[i + 784*t]] * R[C[i + 784*t]] : R[B[i + 784*t]] + R[C[i + 784*t]];
R[i + 1638*t] = Op[i + 785*t] ? R[B[i + 785*t]] * R[C[i + 785*t]] : R[B[i + 785*t]] + R[C[i + 785*t]];
R[i + 1639*t] = Op[i + 786*t] ? R[B[i + 786*t]] * R[C[i + 786*t]] : R[B[i + 786*t]] + R[C[i + 786*t]];
R[i + 1640*t] = Op[i + 787*t] ? R[B[i + 787*t]] * R[C[i + 787*t]] : R[B[i + 787*t]] + R[C[i + 787*t]];
R[i + 1641*t] = Op[i + 788*t] ? R[B[i + 788*t]] * R[C[i + 788*t]] : R[B[i + 788*t]] + R[C[i + 788*t]];
R[i + 1642*t] = Op[i + 789*t] ? R[B[i + 789*t]] * R[C[i + 789*t]] : R[B[i + 789*t]] + R[C[i + 789*t]];
R[i + 1643*t] = Op[i + 790*t] ? R[B[i + 790*t]] * R[C[i + 790*t]] : R[B[i + 790*t]] + R[C[i + 790*t]];
R[i + 1644*t] = Op[i + 791*t] ? R[B[i + 791*t]] * R[C[i + 791*t]] : R[B[i + 791*t]] + R[C[i + 791*t]];
R[i + 1645*t] = Op[i + 792*t] ? R[B[i + 792*t]] * R[C[i + 792*t]] : R[B[i + 792*t]] + R[C[i + 792*t]];
R[i + 1646*t] = Op[i + 793*t] ? R[B[i + 793*t]] * R[C[i + 793*t]] : R[B[i + 793*t]] + R[C[i + 793*t]];
R[i + 1647*t] = Op[i + 794*t] ? R[B[i + 794*t]] * R[C[i + 794*t]] : R[B[i + 794*t]] + R[C[i + 794*t]];
R[i + 1648*t] = Op[i + 795*t] ? R[B[i + 795*t]] * R[C[i + 795*t]] : R[B[i + 795*t]] + R[C[i + 795*t]];
R[i + 1649*t] = Op[i + 796*t] ? R[B[i + 796*t]] * R[C[i + 796*t]] : R[B[i + 796*t]] + R[C[i + 796*t]];
R[i + 1650*t] = Op[i + 797*t] ? R[B[i + 797*t]] * R[C[i + 797*t]] : R[B[i + 797*t]] + R[C[i + 797*t]];
R[i + 1651*t] = Op[i + 798*t] ? R[B[i + 798*t]] * R[C[i + 798*t]] : R[B[i + 798*t]] + R[C[i + 798*t]];
R[i + 1652*t] = Op[i + 799*t] ? R[B[i + 799*t]] * R[C[i + 799*t]] : R[B[i + 799*t]] + R[C[i + 799*t]];
R[i + 1653*t] = Op[i + 800*t] ? R[B[i + 800*t]] * R[C[i + 800*t]] : R[B[i + 800*t]] + R[C[i + 800*t]];
R[i + 1654*t] = Op[i + 801*t] ? R[B[i + 801*t]] * R[C[i + 801*t]] : R[B[i + 801*t]] + R[C[i + 801*t]];
R[i + 1655*t] = Op[i + 802*t] ? R[B[i + 802*t]] * R[C[i + 802*t]] : R[B[i + 802*t]] + R[C[i + 802*t]];
R[i + 1656*t] = Op[i + 803*t] ? R[B[i + 803*t]] * R[C[i + 803*t]] : R[B[i + 803*t]] + R[C[i + 803*t]];
R[i + 1657*t] = Op[i + 804*t] ? R[B[i + 804*t]] * R[C[i + 804*t]] : R[B[i + 804*t]] + R[C[i + 804*t]];
R[i + 1658*t] = Op[i + 805*t] ? R[B[i + 805*t]] * R[C[i + 805*t]] : R[B[i + 805*t]] + R[C[i + 805*t]];
R[i + 1659*t] = Op[i + 806*t] ? R[B[i + 806*t]] * R[C[i + 806*t]] : R[B[i + 806*t]] + R[C[i + 806*t]];
R[i + 1660*t] = Op[i + 807*t] ? R[B[i + 807*t]] * R[C[i + 807*t]] : R[B[i + 807*t]] + R[C[i + 807*t]];
R[i + 1661*t] = Op[i + 808*t] ? R[B[i + 808*t]] * R[C[i + 808*t]] : R[B[i + 808*t]] + R[C[i + 808*t]];
R[i + 1662*t] = Op[i + 809*t] ? R[B[i + 809*t]] * R[C[i + 809*t]] : R[B[i + 809*t]] + R[C[i + 809*t]];
R[i + 1663*t] = Op[i + 810*t] ? R[B[i + 810*t]] * R[C[i + 810*t]] : R[B[i + 810*t]] + R[C[i + 810*t]];
R[i + 1664*t] = Op[i + 811*t] ? R[B[i + 811*t]] * R[C[i + 811*t]] : R[B[i + 811*t]] + R[C[i + 811*t]];
R[i + 1665*t] = Op[i + 812*t] ? R[B[i + 812*t]] * R[C[i + 812*t]] : R[B[i + 812*t]] + R[C[i + 812*t]];
R[i + 1666*t] = Op[i + 813*t] ? R[B[i + 813*t]] * R[C[i + 813*t]] : R[B[i + 813*t]] + R[C[i + 813*t]];
R[i + 1667*t] = Op[i + 814*t] ? R[B[i + 814*t]] * R[C[i + 814*t]] : R[B[i + 814*t]] + R[C[i + 814*t]];
R[i + 1668*t] = Op[i + 815*t] ? R[B[i + 815*t]] * R[C[i + 815*t]] : R[B[i + 815*t]] + R[C[i + 815*t]];
R[i + 1669*t] = Op[i + 816*t] ? R[B[i + 816*t]] * R[C[i + 816*t]] : R[B[i + 816*t]] + R[C[i + 816*t]];
R[i + 1670*t] = Op[i + 817*t] ? R[B[i + 817*t]] * R[C[i + 817*t]] : R[B[i + 817*t]] + R[C[i + 817*t]];
R[i + 1671*t] = Op[i + 818*t] ? R[B[i + 818*t]] * R[C[i + 818*t]] : R[B[i + 818*t]] + R[C[i + 818*t]];
R[i + 1672*t] = Op[i + 819*t] ? R[B[i + 819*t]] * R[C[i + 819*t]] : R[B[i + 819*t]] + R[C[i + 819*t]];
R[i + 1673*t] = Op[i + 820*t] ? R[B[i + 820*t]] * R[C[i + 820*t]] : R[B[i + 820*t]] + R[C[i + 820*t]];
R[i + 1674*t] = Op[i + 821*t] ? R[B[i + 821*t]] * R[C[i + 821*t]] : R[B[i + 821*t]] + R[C[i + 821*t]];
R[i + 1675*t] = Op[i + 822*t] ? R[B[i + 822*t]] * R[C[i + 822*t]] : R[B[i + 822*t]] + R[C[i + 822*t]];
R[i + 1676*t] = Op[i + 823*t] ? R[B[i + 823*t]] * R[C[i + 823*t]] : R[B[i + 823*t]] + R[C[i + 823*t]];
R[i + 1677*t] = Op[i + 824*t] ? R[B[i + 824*t]] * R[C[i + 824*t]] : R[B[i + 824*t]] + R[C[i + 824*t]];
R[i + 1678*t] = Op[i + 825*t] ? R[B[i + 825*t]] * R[C[i + 825*t]] : R[B[i + 825*t]] + R[C[i + 825*t]];
R[i + 1679*t] = Op[i + 826*t] ? R[B[i + 826*t]] * R[C[i + 826*t]] : R[B[i + 826*t]] + R[C[i + 826*t]];
R[i + 1680*t] = Op[i + 827*t] ? R[B[i + 827*t]] * R[C[i + 827*t]] : R[B[i + 827*t]] + R[C[i + 827*t]];
R[i + 1681*t] = Op[i + 828*t] ? R[B[i + 828*t]] * R[C[i + 828*t]] : R[B[i + 828*t]] + R[C[i + 828*t]];
R[i + 1682*t] = Op[i + 829*t] ? R[B[i + 829*t]] * R[C[i + 829*t]] : R[B[i + 829*t]] + R[C[i + 829*t]];
R[i + 1683*t] = Op[i + 830*t] ? R[B[i + 830*t]] * R[C[i + 830*t]] : R[B[i + 830*t]] + R[C[i + 830*t]];
R[i + 1684*t] = Op[i + 831*t] ? R[B[i + 831*t]] * R[C[i + 831*t]] : R[B[i + 831*t]] + R[C[i + 831*t]];
R[i + 1685*t] = Op[i + 832*t] ? R[B[i + 832*t]] * R[C[i + 832*t]] : R[B[i + 832*t]] + R[C[i + 832*t]];
R[i + 1686*t] = Op[i + 833*t] ? R[B[i + 833*t]] * R[C[i + 833*t]] : R[B[i + 833*t]] + R[C[i + 833*t]];
R[i + 1687*t] = Op[i + 834*t] ? R[B[i + 834*t]] * R[C[i + 834*t]] : R[B[i + 834*t]] + R[C[i + 834*t]];
R[i + 1688*t] = Op[i + 835*t] ? R[B[i + 835*t]] * R[C[i + 835*t]] : R[B[i + 835*t]] + R[C[i + 835*t]];
R[i + 1689*t] = Op[i + 836*t] ? R[B[i + 836*t]] * R[C[i + 836*t]] : R[B[i + 836*t]] + R[C[i + 836*t]];
R[i + 1690*t] = Op[i + 837*t] ? R[B[i + 837*t]] * R[C[i + 837*t]] : R[B[i + 837*t]] + R[C[i + 837*t]];
R[i + 1691*t] = Op[i + 838*t] ? R[B[i + 838*t]] * R[C[i + 838*t]] : R[B[i + 838*t]] + R[C[i + 838*t]];
R[i + 1692*t] = Op[i + 839*t] ? R[B[i + 839*t]] * R[C[i + 839*t]] : R[B[i + 839*t]] + R[C[i + 839*t]];
R[i + 1693*t] = Op[i + 840*t] ? R[B[i + 840*t]] * R[C[i + 840*t]] : R[B[i + 840*t]] + R[C[i + 840*t]];
R[i + 1694*t] = Op[i + 841*t] ? R[B[i + 841*t]] * R[C[i + 841*t]] : R[B[i + 841*t]] + R[C[i + 841*t]];
R[i + 1695*t] = Op[i + 842*t] ? R[B[i + 842*t]] * R[C[i + 842*t]] : R[B[i + 842*t]] + R[C[i + 842*t]];
R[i + 1696*t] = Op[i + 843*t] ? R[B[i + 843*t]] * R[C[i + 843*t]] : R[B[i + 843*t]] + R[C[i + 843*t]];
R[i + 1697*t] = Op[i + 844*t] ? R[B[i + 844*t]] * R[C[i + 844*t]] : R[B[i + 844*t]] + R[C[i + 844*t]];
R[i + 1698*t] = Op[i + 845*t] ? R[B[i + 845*t]] * R[C[i + 845*t]] : R[B[i + 845*t]] + R[C[i + 845*t]];
R[i + 1699*t] = Op[i + 846*t] ? R[B[i + 846*t]] * R[C[i + 846*t]] : R[B[i + 846*t]] + R[C[i + 846*t]];
R[i + 1700*t] = Op[i + 847*t] ? R[B[i + 847*t]] * R[C[i + 847*t]] : R[B[i + 847*t]] + R[C[i + 847*t]];
R[i + 1701*t] = Op[i + 848*t] ? R[B[i + 848*t]] * R[C[i + 848*t]] : R[B[i + 848*t]] + R[C[i + 848*t]];
R[i + 1702*t] = Op[i + 849*t] ? R[B[i + 849*t]] * R[C[i + 849*t]] : R[B[i + 849*t]] + R[C[i + 849*t]];
R[i + 1703*t] = Op[i + 850*t] ? R[B[i + 850*t]] * R[C[i + 850*t]] : R[B[i + 850*t]] + R[C[i + 850*t]];
R[i + 1704*t] = Op[i + 851*t] ? R[B[i + 851*t]] * R[C[i + 851*t]] : R[B[i + 851*t]] + R[C[i + 851*t]];
R[i + 1705*t] = Op[i + 852*t] ? R[B[i + 852*t]] * R[C[i + 852*t]] : R[B[i + 852*t]] + R[C[i + 852*t]];
R[i + 1706*t] = Op[i + 853*t] ? R[B[i + 853*t]] * R[C[i + 853*t]] : R[B[i + 853*t]] + R[C[i + 853*t]];
R[i + 1707*t] = Op[i + 854*t] ? R[B[i + 854*t]] * R[C[i + 854*t]] : R[B[i + 854*t]] + R[C[i + 854*t]];
R[i + 1708*t] = Op[i + 855*t] ? R[B[i + 855*t]] * R[C[i + 855*t]] : R[B[i + 855*t]] + R[C[i + 855*t]];
R[i + 1709*t] = Op[i + 856*t] ? R[B[i + 856*t]] * R[C[i + 856*t]] : R[B[i + 856*t]] + R[C[i + 856*t]];
__syncthreads();
R[i + 1710*t] = Op[i + 857*t] ? R[B[i + 857*t]] * R[C[i + 857*t]] : R[B[i + 857*t]] + R[C[i + 857*t]];
R[i + 1711*t] = Op[i + 858*t] ? R[B[i + 858*t]] * R[C[i + 858*t]] : R[B[i + 858*t]] + R[C[i + 858*t]];
R[i + 1712*t] = Op[i + 859*t] ? R[B[i + 859*t]] * R[C[i + 859*t]] : R[B[i + 859*t]] + R[C[i + 859*t]];
R[i + 1713*t] = Op[i + 860*t] ? R[B[i + 860*t]] * R[C[i + 860*t]] : R[B[i + 860*t]] + R[C[i + 860*t]];
R[i + 1714*t] = Op[i + 861*t] ? R[B[i + 861*t]] * R[C[i + 861*t]] : R[B[i + 861*t]] + R[C[i + 861*t]];
R[i + 1715*t] = Op[i + 862*t] ? R[B[i + 862*t]] * R[C[i + 862*t]] : R[B[i + 862*t]] + R[C[i + 862*t]];
R[i + 1716*t] = Op[i + 863*t] ? R[B[i + 863*t]] * R[C[i + 863*t]] : R[B[i + 863*t]] + R[C[i + 863*t]];
R[i + 1717*t] = Op[i + 864*t] ? R[B[i + 864*t]] * R[C[i + 864*t]] : R[B[i + 864*t]] + R[C[i + 864*t]];
R[i + 1718*t] = Op[i + 865*t] ? R[B[i + 865*t]] * R[C[i + 865*t]] : R[B[i + 865*t]] + R[C[i + 865*t]];
R[i + 1719*t] = Op[i + 866*t] ? R[B[i + 866*t]] * R[C[i + 866*t]] : R[B[i + 866*t]] + R[C[i + 866*t]];
R[i + 1720*t] = Op[i + 867*t] ? R[B[i + 867*t]] * R[C[i + 867*t]] : R[B[i + 867*t]] + R[C[i + 867*t]];
R[i + 1721*t] = Op[i + 868*t] ? R[B[i + 868*t]] * R[C[i + 868*t]] : R[B[i + 868*t]] + R[C[i + 868*t]];
R[i + 1722*t] = Op[i + 869*t] ? R[B[i + 869*t]] * R[C[i + 869*t]] : R[B[i + 869*t]] + R[C[i + 869*t]];
R[i + 1723*t] = Op[i + 870*t] ? R[B[i + 870*t]] * R[C[i + 870*t]] : R[B[i + 870*t]] + R[C[i + 870*t]];
R[i + 1724*t] = Op[i + 871*t] ? R[B[i + 871*t]] * R[C[i + 871*t]] : R[B[i + 871*t]] + R[C[i + 871*t]];
R[i + 1725*t] = Op[i + 872*t] ? R[B[i + 872*t]] * R[C[i + 872*t]] : R[B[i + 872*t]] + R[C[i + 872*t]];
R[i + 1726*t] = Op[i + 873*t] ? R[B[i + 873*t]] * R[C[i + 873*t]] : R[B[i + 873*t]] + R[C[i + 873*t]];
R[i + 1727*t] = Op[i + 874*t] ? R[B[i + 874*t]] * R[C[i + 874*t]] : R[B[i + 874*t]] + R[C[i + 874*t]];
R[i + 1728*t] = Op[i + 875*t] ? R[B[i + 875*t]] * R[C[i + 875*t]] : R[B[i + 875*t]] + R[C[i + 875*t]];
R[i + 1729*t] = Op[i + 876*t] ? R[B[i + 876*t]] * R[C[i + 876*t]] : R[B[i + 876*t]] + R[C[i + 876*t]];
R[i + 1730*t] = Op[i + 877*t] ? R[B[i + 877*t]] * R[C[i + 877*t]] : R[B[i + 877*t]] + R[C[i + 877*t]];
R[i + 1731*t] = Op[i + 878*t] ? R[B[i + 878*t]] * R[C[i + 878*t]] : R[B[i + 878*t]] + R[C[i + 878*t]];
R[i + 1732*t] = Op[i + 879*t] ? R[B[i + 879*t]] * R[C[i + 879*t]] : R[B[i + 879*t]] + R[C[i + 879*t]];
R[i + 1733*t] = Op[i + 880*t] ? R[B[i + 880*t]] * R[C[i + 880*t]] : R[B[i + 880*t]] + R[C[i + 880*t]];
R[i + 1734*t] = Op[i + 881*t] ? R[B[i + 881*t]] * R[C[i + 881*t]] : R[B[i + 881*t]] + R[C[i + 881*t]];
R[i + 1735*t] = Op[i + 882*t] ? R[B[i + 882*t]] * R[C[i + 882*t]] : R[B[i + 882*t]] + R[C[i + 882*t]];
R[i + 1736*t] = Op[i + 883*t] ? R[B[i + 883*t]] * R[C[i + 883*t]] : R[B[i + 883*t]] + R[C[i + 883*t]];
R[i + 1737*t] = Op[i + 884*t] ? R[B[i + 884*t]] * R[C[i + 884*t]] : R[B[i + 884*t]] + R[C[i + 884*t]];
R[i + 1738*t] = Op[i + 885*t] ? R[B[i + 885*t]] * R[C[i + 885*t]] : R[B[i + 885*t]] + R[C[i + 885*t]];
R[i + 1739*t] = Op[i + 886*t] ? R[B[i + 886*t]] * R[C[i + 886*t]] : R[B[i + 886*t]] + R[C[i + 886*t]];
R[i + 1740*t] = Op[i + 887*t] ? R[B[i + 887*t]] * R[C[i + 887*t]] : R[B[i + 887*t]] + R[C[i + 887*t]];
R[i + 1741*t] = Op[i + 888*t] ? R[B[i + 888*t]] * R[C[i + 888*t]] : R[B[i + 888*t]] + R[C[i + 888*t]];
R[i + 1742*t] = Op[i + 889*t] ? R[B[i + 889*t]] * R[C[i + 889*t]] : R[B[i + 889*t]] + R[C[i + 889*t]];
R[i + 1743*t] = Op[i + 890*t] ? R[B[i + 890*t]] * R[C[i + 890*t]] : R[B[i + 890*t]] + R[C[i + 890*t]];
R[i + 1744*t] = Op[i + 891*t] ? R[B[i + 891*t]] * R[C[i + 891*t]] : R[B[i + 891*t]] + R[C[i + 891*t]];
R[i + 1745*t] = Op[i + 892*t] ? R[B[i + 892*t]] * R[C[i + 892*t]] : R[B[i + 892*t]] + R[C[i + 892*t]];
R[i + 1746*t] = Op[i + 893*t] ? R[B[i + 893*t]] * R[C[i + 893*t]] : R[B[i + 893*t]] + R[C[i + 893*t]];
R[i + 1747*t] = Op[i + 894*t] ? R[B[i + 894*t]] * R[C[i + 894*t]] : R[B[i + 894*t]] + R[C[i + 894*t]];
R[i + 1748*t] = Op[i + 895*t] ? R[B[i + 895*t]] * R[C[i + 895*t]] : R[B[i + 895*t]] + R[C[i + 895*t]];
R[i + 1749*t] = Op[i + 896*t] ? R[B[i + 896*t]] * R[C[i + 896*t]] : R[B[i + 896*t]] + R[C[i + 896*t]];
R[i + 1750*t] = Op[i + 897*t] ? R[B[i + 897*t]] * R[C[i + 897*t]] : R[B[i + 897*t]] + R[C[i + 897*t]];
R[i + 1751*t] = Op[i + 898*t] ? R[B[i + 898*t]] * R[C[i + 898*t]] : R[B[i + 898*t]] + R[C[i + 898*t]];
R[i + 1752*t] = Op[i + 899*t] ? R[B[i + 899*t]] * R[C[i + 899*t]] : R[B[i + 899*t]] + R[C[i + 899*t]];
R[i + 1753*t] = Op[i + 900*t] ? R[B[i + 900*t]] * R[C[i + 900*t]] : R[B[i + 900*t]] + R[C[i + 900*t]];
R[i + 1754*t] = Op[i + 901*t] ? R[B[i + 901*t]] * R[C[i + 901*t]] : R[B[i + 901*t]] + R[C[i + 901*t]];
R[i + 1755*t] = Op[i + 902*t] ? R[B[i + 902*t]] * R[C[i + 902*t]] : R[B[i + 902*t]] + R[C[i + 902*t]];
R[i + 1756*t] = Op[i + 903*t] ? R[B[i + 903*t]] * R[C[i + 903*t]] : R[B[i + 903*t]] + R[C[i + 903*t]];
R[i + 1757*t] = Op[i + 904*t] ? R[B[i + 904*t]] * R[C[i + 904*t]] : R[B[i + 904*t]] + R[C[i + 904*t]];
R[i + 1758*t] = Op[i + 905*t] ? R[B[i + 905*t]] * R[C[i + 905*t]] : R[B[i + 905*t]] + R[C[i + 905*t]];
R[i + 1759*t] = Op[i + 906*t] ? R[B[i + 906*t]] * R[C[i + 906*t]] : R[B[i + 906*t]] + R[C[i + 906*t]];
R[i + 1760*t] = Op[i + 907*t] ? R[B[i + 907*t]] * R[C[i + 907*t]] : R[B[i + 907*t]] + R[C[i + 907*t]];
R[i + 1761*t] = Op[i + 908*t] ? R[B[i + 908*t]] * R[C[i + 908*t]] : R[B[i + 908*t]] + R[C[i + 908*t]];
R[i + 1762*t] = Op[i + 909*t] ? R[B[i + 909*t]] * R[C[i + 909*t]] : R[B[i + 909*t]] + R[C[i + 909*t]];
R[i + 1763*t] = Op[i + 910*t] ? R[B[i + 910*t]] * R[C[i + 910*t]] : R[B[i + 910*t]] + R[C[i + 910*t]];
R[i + 1764*t] = Op[i + 911*t] ? R[B[i + 911*t]] * R[C[i + 911*t]] : R[B[i + 911*t]] + R[C[i + 911*t]];
R[i + 1765*t] = Op[i + 912*t] ? R[B[i + 912*t]] * R[C[i + 912*t]] : R[B[i + 912*t]] + R[C[i + 912*t]];
R[i + 1766*t] = Op[i + 913*t] ? R[B[i + 913*t]] * R[C[i + 913*t]] : R[B[i + 913*t]] + R[C[i + 913*t]];
R[i + 1767*t] = Op[i + 914*t] ? R[B[i + 914*t]] * R[C[i + 914*t]] : R[B[i + 914*t]] + R[C[i + 914*t]];
R[i + 1768*t] = Op[i + 915*t] ? R[B[i + 915*t]] * R[C[i + 915*t]] : R[B[i + 915*t]] + R[C[i + 915*t]];
R[i + 1769*t] = Op[i + 916*t] ? R[B[i + 916*t]] * R[C[i + 916*t]] : R[B[i + 916*t]] + R[C[i + 916*t]];
R[i + 1770*t] = Op[i + 917*t] ? R[B[i + 917*t]] * R[C[i + 917*t]] : R[B[i + 917*t]] + R[C[i + 917*t]];
R[i + 1771*t] = Op[i + 918*t] ? R[B[i + 918*t]] * R[C[i + 918*t]] : R[B[i + 918*t]] + R[C[i + 918*t]];
R[i + 1772*t] = Op[i + 919*t] ? R[B[i + 919*t]] * R[C[i + 919*t]] : R[B[i + 919*t]] + R[C[i + 919*t]];
R[i + 1773*t] = Op[i + 920*t] ? R[B[i + 920*t]] * R[C[i + 920*t]] : R[B[i + 920*t]] + R[C[i + 920*t]];
R[i + 1774*t] = Op[i + 921*t] ? R[B[i + 921*t]] * R[C[i + 921*t]] : R[B[i + 921*t]] + R[C[i + 921*t]];
R[i + 1775*t] = Op[i + 922*t] ? R[B[i + 922*t]] * R[C[i + 922*t]] : R[B[i + 922*t]] + R[C[i + 922*t]];
R[i + 1776*t] = Op[i + 923*t] ? R[B[i + 923*t]] * R[C[i + 923*t]] : R[B[i + 923*t]] + R[C[i + 923*t]];
R[i + 1777*t] = Op[i + 924*t] ? R[B[i + 924*t]] * R[C[i + 924*t]] : R[B[i + 924*t]] + R[C[i + 924*t]];
R[i + 1778*t] = Op[i + 925*t] ? R[B[i + 925*t]] * R[C[i + 925*t]] : R[B[i + 925*t]] + R[C[i + 925*t]];
R[i + 1779*t] = Op[i + 926*t] ? R[B[i + 926*t]] * R[C[i + 926*t]] : R[B[i + 926*t]] + R[C[i + 926*t]];
R[i + 1780*t] = Op[i + 927*t] ? R[B[i + 927*t]] * R[C[i + 927*t]] : R[B[i + 927*t]] + R[C[i + 927*t]];
R[i + 1781*t] = Op[i + 928*t] ? R[B[i + 928*t]] * R[C[i + 928*t]] : R[B[i + 928*t]] + R[C[i + 928*t]];
R[i + 1782*t] = Op[i + 929*t] ? R[B[i + 929*t]] * R[C[i + 929*t]] : R[B[i + 929*t]] + R[C[i + 929*t]];
R[i + 1783*t] = Op[i + 930*t] ? R[B[i + 930*t]] * R[C[i + 930*t]] : R[B[i + 930*t]] + R[C[i + 930*t]];
R[i + 1784*t] = Op[i + 931*t] ? R[B[i + 931*t]] * R[C[i + 931*t]] : R[B[i + 931*t]] + R[C[i + 931*t]];
R[i + 1785*t] = Op[i + 932*t] ? R[B[i + 932*t]] * R[C[i + 932*t]] : R[B[i + 932*t]] + R[C[i + 932*t]];
R[i + 1786*t] = Op[i + 933*t] ? R[B[i + 933*t]] * R[C[i + 933*t]] : R[B[i + 933*t]] + R[C[i + 933*t]];
R[i + 1787*t] = Op[i + 934*t] ? R[B[i + 934*t]] * R[C[i + 934*t]] : R[B[i + 934*t]] + R[C[i + 934*t]];
R[i + 1788*t] = Op[i + 935*t] ? R[B[i + 935*t]] * R[C[i + 935*t]] : R[B[i + 935*t]] + R[C[i + 935*t]];
R[i + 1789*t] = Op[i + 936*t] ? R[B[i + 936*t]] * R[C[i + 936*t]] : R[B[i + 936*t]] + R[C[i + 936*t]];
R[i + 1790*t] = Op[i + 937*t] ? R[B[i + 937*t]] * R[C[i + 937*t]] : R[B[i + 937*t]] + R[C[i + 937*t]];
R[i + 1791*t] = Op[i + 938*t] ? R[B[i + 938*t]] * R[C[i + 938*t]] : R[B[i + 938*t]] + R[C[i + 938*t]];
R[i + 1792*t] = Op[i + 939*t] ? R[B[i + 939*t]] * R[C[i + 939*t]] : R[B[i + 939*t]] + R[C[i + 939*t]];
R[i + 1793*t] = Op[i + 940*t] ? R[B[i + 940*t]] * R[C[i + 940*t]] : R[B[i + 940*t]] + R[C[i + 940*t]];
R[i + 1794*t] = Op[i + 941*t] ? R[B[i + 941*t]] * R[C[i + 941*t]] : R[B[i + 941*t]] + R[C[i + 941*t]];
R[i + 1795*t] = Op[i + 942*t] ? R[B[i + 942*t]] * R[C[i + 942*t]] : R[B[i + 942*t]] + R[C[i + 942*t]];
R[i + 1796*t] = Op[i + 943*t] ? R[B[i + 943*t]] * R[C[i + 943*t]] : R[B[i + 943*t]] + R[C[i + 943*t]];
__syncthreads();
R[i + 1797*t] = Op[i + 944*t] ? R[B[i + 944*t]] * R[C[i + 944*t]] : R[B[i + 944*t]] + R[C[i + 944*t]];
R[i + 1798*t] = Op[i + 945*t] ? R[B[i + 945*t]] * R[C[i + 945*t]] : R[B[i + 945*t]] + R[C[i + 945*t]];
R[i + 1799*t] = Op[i + 946*t] ? R[B[i + 946*t]] * R[C[i + 946*t]] : R[B[i + 946*t]] + R[C[i + 946*t]];
R[i + 1800*t] = Op[i + 947*t] ? R[B[i + 947*t]] * R[C[i + 947*t]] : R[B[i + 947*t]] + R[C[i + 947*t]];
R[i + 1801*t] = Op[i + 948*t] ? R[B[i + 948*t]] * R[C[i + 948*t]] : R[B[i + 948*t]] + R[C[i + 948*t]];
R[i + 1802*t] = Op[i + 949*t] ? R[B[i + 949*t]] * R[C[i + 949*t]] : R[B[i + 949*t]] + R[C[i + 949*t]];
R[i + 1803*t] = Op[i + 950*t] ? R[B[i + 950*t]] * R[C[i + 950*t]] : R[B[i + 950*t]] + R[C[i + 950*t]];
R[i + 1804*t] = Op[i + 951*t] ? R[B[i + 951*t]] * R[C[i + 951*t]] : R[B[i + 951*t]] + R[C[i + 951*t]];
R[i + 1805*t] = Op[i + 952*t] ? R[B[i + 952*t]] * R[C[i + 952*t]] : R[B[i + 952*t]] + R[C[i + 952*t]];
R[i + 1806*t] = Op[i + 953*t] ? R[B[i + 953*t]] * R[C[i + 953*t]] : R[B[i + 953*t]] + R[C[i + 953*t]];
R[i + 1807*t] = Op[i + 954*t] ? R[B[i + 954*t]] * R[C[i + 954*t]] : R[B[i + 954*t]] + R[C[i + 954*t]];
R[i + 1808*t] = Op[i + 955*t] ? R[B[i + 955*t]] * R[C[i + 955*t]] : R[B[i + 955*t]] + R[C[i + 955*t]];
R[i + 1809*t] = Op[i + 956*t] ? R[B[i + 956*t]] * R[C[i + 956*t]] : R[B[i + 956*t]] + R[C[i + 956*t]];
R[i + 1810*t] = Op[i + 957*t] ? R[B[i + 957*t]] * R[C[i + 957*t]] : R[B[i + 957*t]] + R[C[i + 957*t]];
R[i + 1811*t] = Op[i + 958*t] ? R[B[i + 958*t]] * R[C[i + 958*t]] : R[B[i + 958*t]] + R[C[i + 958*t]];
R[i + 1812*t] = Op[i + 959*t] ? R[B[i + 959*t]] * R[C[i + 959*t]] : R[B[i + 959*t]] + R[C[i + 959*t]];
R[i + 1813*t] = Op[i + 960*t] ? R[B[i + 960*t]] * R[C[i + 960*t]] : R[B[i + 960*t]] + R[C[i + 960*t]];
R[i + 1814*t] = Op[i + 961*t] ? R[B[i + 961*t]] * R[C[i + 961*t]] : R[B[i + 961*t]] + R[C[i + 961*t]];
R[i + 1815*t] = Op[i + 962*t] ? R[B[i + 962*t]] * R[C[i + 962*t]] : R[B[i + 962*t]] + R[C[i + 962*t]];
R[i + 1816*t] = Op[i + 963*t] ? R[B[i + 963*t]] * R[C[i + 963*t]] : R[B[i + 963*t]] + R[C[i + 963*t]];
R[i + 1817*t] = Op[i + 964*t] ? R[B[i + 964*t]] * R[C[i + 964*t]] : R[B[i + 964*t]] + R[C[i + 964*t]];
R[i + 1818*t] = Op[i + 965*t] ? R[B[i + 965*t]] * R[C[i + 965*t]] : R[B[i + 965*t]] + R[C[i + 965*t]];
R[i + 1819*t] = Op[i + 966*t] ? R[B[i + 966*t]] * R[C[i + 966*t]] : R[B[i + 966*t]] + R[C[i + 966*t]];
R[i + 1820*t] = Op[i + 967*t] ? R[B[i + 967*t]] * R[C[i + 967*t]] : R[B[i + 967*t]] + R[C[i + 967*t]];
R[i + 1821*t] = Op[i + 968*t] ? R[B[i + 968*t]] * R[C[i + 968*t]] : R[B[i + 968*t]] + R[C[i + 968*t]];
R[i + 1822*t] = Op[i + 969*t] ? R[B[i + 969*t]] * R[C[i + 969*t]] : R[B[i + 969*t]] + R[C[i + 969*t]];
R[i + 1823*t] = Op[i + 970*t] ? R[B[i + 970*t]] * R[C[i + 970*t]] : R[B[i + 970*t]] + R[C[i + 970*t]];
R[i + 1824*t] = Op[i + 971*t] ? R[B[i + 971*t]] * R[C[i + 971*t]] : R[B[i + 971*t]] + R[C[i + 971*t]];
R[i + 1825*t] = Op[i + 972*t] ? R[B[i + 972*t]] * R[C[i + 972*t]] : R[B[i + 972*t]] + R[C[i + 972*t]];
R[i + 1826*t] = Op[i + 973*t] ? R[B[i + 973*t]] * R[C[i + 973*t]] : R[B[i + 973*t]] + R[C[i + 973*t]];
R[i + 1827*t] = Op[i + 974*t] ? R[B[i + 974*t]] * R[C[i + 974*t]] : R[B[i + 974*t]] + R[C[i + 974*t]];
R[i + 1828*t] = Op[i + 975*t] ? R[B[i + 975*t]] * R[C[i + 975*t]] : R[B[i + 975*t]] + R[C[i + 975*t]];
R[i + 1829*t] = Op[i + 976*t] ? R[B[i + 976*t]] * R[C[i + 976*t]] : R[B[i + 976*t]] + R[C[i + 976*t]];
R[i + 1830*t] = Op[i + 977*t] ? R[B[i + 977*t]] * R[C[i + 977*t]] : R[B[i + 977*t]] + R[C[i + 977*t]];
R[i + 1831*t] = Op[i + 978*t] ? R[B[i + 978*t]] * R[C[i + 978*t]] : R[B[i + 978*t]] + R[C[i + 978*t]];
R[i + 1832*t] = Op[i + 979*t] ? R[B[i + 979*t]] * R[C[i + 979*t]] : R[B[i + 979*t]] + R[C[i + 979*t]];
R[i + 1833*t] = Op[i + 980*t] ? R[B[i + 980*t]] * R[C[i + 980*t]] : R[B[i + 980*t]] + R[C[i + 980*t]];
R[i + 1834*t] = Op[i + 981*t] ? R[B[i + 981*t]] * R[C[i + 981*t]] : R[B[i + 981*t]] + R[C[i + 981*t]];
R[i + 1835*t] = Op[i + 982*t] ? R[B[i + 982*t]] * R[C[i + 982*t]] : R[B[i + 982*t]] + R[C[i + 982*t]];
R[i + 1836*t] = Op[i + 983*t] ? R[B[i + 983*t]] * R[C[i + 983*t]] : R[B[i + 983*t]] + R[C[i + 983*t]];
R[i + 1837*t] = Op[i + 984*t] ? R[B[i + 984*t]] * R[C[i + 984*t]] : R[B[i + 984*t]] + R[C[i + 984*t]];
R[i + 1838*t] = Op[i + 985*t] ? R[B[i + 985*t]] * R[C[i + 985*t]] : R[B[i + 985*t]] + R[C[i + 985*t]];
R[i + 1839*t] = Op[i + 986*t] ? R[B[i + 986*t]] * R[C[i + 986*t]] : R[B[i + 986*t]] + R[C[i + 986*t]];
R[i + 1840*t] = Op[i + 987*t] ? R[B[i + 987*t]] * R[C[i + 987*t]] : R[B[i + 987*t]] + R[C[i + 987*t]];
R[i + 1841*t] = Op[i + 988*t] ? R[B[i + 988*t]] * R[C[i + 988*t]] : R[B[i + 988*t]] + R[C[i + 988*t]];
R[i + 1842*t] = Op[i + 989*t] ? R[B[i + 989*t]] * R[C[i + 989*t]] : R[B[i + 989*t]] + R[C[i + 989*t]];
R[i + 1843*t] = Op[i + 990*t] ? R[B[i + 990*t]] * R[C[i + 990*t]] : R[B[i + 990*t]] + R[C[i + 990*t]];
R[i + 1844*t] = Op[i + 991*t] ? R[B[i + 991*t]] * R[C[i + 991*t]] : R[B[i + 991*t]] + R[C[i + 991*t]];
R[i + 1845*t] = Op[i + 992*t] ? R[B[i + 992*t]] * R[C[i + 992*t]] : R[B[i + 992*t]] + R[C[i + 992*t]];
R[i + 1846*t] = Op[i + 993*t] ? R[B[i + 993*t]] * R[C[i + 993*t]] : R[B[i + 993*t]] + R[C[i + 993*t]];
R[i + 1847*t] = Op[i + 994*t] ? R[B[i + 994*t]] * R[C[i + 994*t]] : R[B[i + 994*t]] + R[C[i + 994*t]];
R[i + 1848*t] = Op[i + 995*t] ? R[B[i + 995*t]] * R[C[i + 995*t]] : R[B[i + 995*t]] + R[C[i + 995*t]];
R[i + 1849*t] = Op[i + 996*t] ? R[B[i + 996*t]] * R[C[i + 996*t]] : R[B[i + 996*t]] + R[C[i + 996*t]];
__syncthreads();
R[i + 1850*t] = Op[i + 997*t] ? R[B[i + 997*t]] * R[C[i + 997*t]] : R[B[i + 997*t]] + R[C[i + 997*t]];
R[i + 1851*t] = Op[i + 998*t] ? R[B[i + 998*t]] * R[C[i + 998*t]] : R[B[i + 998*t]] + R[C[i + 998*t]];
R[i + 1852*t] = Op[i + 999*t] ? R[B[i + 999*t]] * R[C[i + 999*t]] : R[B[i + 999*t]] + R[C[i + 999*t]];
R[i + 1853*t] = Op[i + 1000*t] ? R[B[i + 1000*t]] * R[C[i + 1000*t]] : R[B[i + 1000*t]] + R[C[i + 1000*t]];
R[i + 1854*t] = Op[i + 1001*t] ? R[B[i + 1001*t]] * R[C[i + 1001*t]] : R[B[i + 1001*t]] + R[C[i + 1001*t]];
R[i + 1855*t] = Op[i + 1002*t] ? R[B[i + 1002*t]] * R[C[i + 1002*t]] : R[B[i + 1002*t]] + R[C[i + 1002*t]];
R[i + 1856*t] = Op[i + 1003*t] ? R[B[i + 1003*t]] * R[C[i + 1003*t]] : R[B[i + 1003*t]] + R[C[i + 1003*t]];
R[i + 1857*t] = Op[i + 1004*t] ? R[B[i + 1004*t]] * R[C[i + 1004*t]] : R[B[i + 1004*t]] + R[C[i + 1004*t]];
R[i + 1858*t] = Op[i + 1005*t] ? R[B[i + 1005*t]] * R[C[i + 1005*t]] : R[B[i + 1005*t]] + R[C[i + 1005*t]];
R[i + 1859*t] = Op[i + 1006*t] ? R[B[i + 1006*t]] * R[C[i + 1006*t]] : R[B[i + 1006*t]] + R[C[i + 1006*t]];
R[i + 1860*t] = Op[i + 1007*t] ? R[B[i + 1007*t]] * R[C[i + 1007*t]] : R[B[i + 1007*t]] + R[C[i + 1007*t]];
R[i + 1861*t] = Op[i + 1008*t] ? R[B[i + 1008*t]] * R[C[i + 1008*t]] : R[B[i + 1008*t]] + R[C[i + 1008*t]];
R[i + 1862*t] = Op[i + 1009*t] ? R[B[i + 1009*t]] * R[C[i + 1009*t]] : R[B[i + 1009*t]] + R[C[i + 1009*t]];
R[i + 1863*t] = Op[i + 1010*t] ? R[B[i + 1010*t]] * R[C[i + 1010*t]] : R[B[i + 1010*t]] + R[C[i + 1010*t]];
R[i + 1864*t] = Op[i + 1011*t] ? R[B[i + 1011*t]] * R[C[i + 1011*t]] : R[B[i + 1011*t]] + R[C[i + 1011*t]];
R[i + 1865*t] = Op[i + 1012*t] ? R[B[i + 1012*t]] * R[C[i + 1012*t]] : R[B[i + 1012*t]] + R[C[i + 1012*t]];
R[i + 1866*t] = Op[i + 1013*t] ? R[B[i + 1013*t]] * R[C[i + 1013*t]] : R[B[i + 1013*t]] + R[C[i + 1013*t]];
R[i + 1867*t] = Op[i + 1014*t] ? R[B[i + 1014*t]] * R[C[i + 1014*t]] : R[B[i + 1014*t]] + R[C[i + 1014*t]];
R[i + 1868*t] = Op[i + 1015*t] ? R[B[i + 1015*t]] * R[C[i + 1015*t]] : R[B[i + 1015*t]] + R[C[i + 1015*t]];
R[i + 1869*t] = Op[i + 1016*t] ? R[B[i + 1016*t]] * R[C[i + 1016*t]] : R[B[i + 1016*t]] + R[C[i + 1016*t]];
R[i + 1870*t] = Op[i + 1017*t] ? R[B[i + 1017*t]] * R[C[i + 1017*t]] : R[B[i + 1017*t]] + R[C[i + 1017*t]];
R[i + 1871*t] = Op[i + 1018*t] ? R[B[i + 1018*t]] * R[C[i + 1018*t]] : R[B[i + 1018*t]] + R[C[i + 1018*t]];
R[i + 1872*t] = Op[i + 1019*t] ? R[B[i + 1019*t]] * R[C[i + 1019*t]] : R[B[i + 1019*t]] + R[C[i + 1019*t]];
R[i + 1873*t] = Op[i + 1020*t] ? R[B[i + 1020*t]] * R[C[i + 1020*t]] : R[B[i + 1020*t]] + R[C[i + 1020*t]];
R[i + 1874*t] = Op[i + 1021*t] ? R[B[i + 1021*t]] * R[C[i + 1021*t]] : R[B[i + 1021*t]] + R[C[i + 1021*t]];
R[i + 1875*t] = Op[i + 1022*t] ? R[B[i + 1022*t]] * R[C[i + 1022*t]] : R[B[i + 1022*t]] + R[C[i + 1022*t]];
R[i + 1876*t] = Op[i + 1023*t] ? R[B[i + 1023*t]] * R[C[i + 1023*t]] : R[B[i + 1023*t]] + R[C[i + 1023*t]];
R[i + 1877*t] = Op[i + 1024*t] ? R[B[i + 1024*t]] * R[C[i + 1024*t]] : R[B[i + 1024*t]] + R[C[i + 1024*t]];
R[i + 1878*t] = Op[i + 1025*t] ? R[B[i + 1025*t]] * R[C[i + 1025*t]] : R[B[i + 1025*t]] + R[C[i + 1025*t]];
R[i + 1879*t] = Op[i + 1026*t] ? R[B[i + 1026*t]] * R[C[i + 1026*t]] : R[B[i + 1026*t]] + R[C[i + 1026*t]];
R[i + 1880*t] = Op[i + 1027*t] ? R[B[i + 1027*t]] * R[C[i + 1027*t]] : R[B[i + 1027*t]] + R[C[i + 1027*t]];
R[i + 1881*t] = Op[i + 1028*t] ? R[B[i + 1028*t]] * R[C[i + 1028*t]] : R[B[i + 1028*t]] + R[C[i + 1028*t]];
R[i + 1882*t] = Op[i + 1029*t] ? R[B[i + 1029*t]] * R[C[i + 1029*t]] : R[B[i + 1029*t]] + R[C[i + 1029*t]];
R[i + 1883*t] = Op[i + 1030*t] ? R[B[i + 1030*t]] * R[C[i + 1030*t]] : R[B[i + 1030*t]] + R[C[i + 1030*t]];
R[i + 1884*t] = Op[i + 1031*t] ? R[B[i + 1031*t]] * R[C[i + 1031*t]] : R[B[i + 1031*t]] + R[C[i + 1031*t]];
R[i + 1885*t] = Op[i + 1032*t] ? R[B[i + 1032*t]] * R[C[i + 1032*t]] : R[B[i + 1032*t]] + R[C[i + 1032*t]];
R[i + 1886*t] = Op[i + 1033*t] ? R[B[i + 1033*t]] * R[C[i + 1033*t]] : R[B[i + 1033*t]] + R[C[i + 1033*t]];
R[i + 1887*t] = Op[i + 1034*t] ? R[B[i + 1034*t]] * R[C[i + 1034*t]] : R[B[i + 1034*t]] + R[C[i + 1034*t]];
R[i + 1888*t] = Op[i + 1035*t] ? R[B[i + 1035*t]] * R[C[i + 1035*t]] : R[B[i + 1035*t]] + R[C[i + 1035*t]];
R[i + 1889*t] = Op[i + 1036*t] ? R[B[i + 1036*t]] * R[C[i + 1036*t]] : R[B[i + 1036*t]] + R[C[i + 1036*t]];
R[i + 1890*t] = Op[i + 1037*t] ? R[B[i + 1037*t]] * R[C[i + 1037*t]] : R[B[i + 1037*t]] + R[C[i + 1037*t]];
R[i + 1891*t] = Op[i + 1038*t] ? R[B[i + 1038*t]] * R[C[i + 1038*t]] : R[B[i + 1038*t]] + R[C[i + 1038*t]];
R[i + 1892*t] = Op[i + 1039*t] ? R[B[i + 1039*t]] * R[C[i + 1039*t]] : R[B[i + 1039*t]] + R[C[i + 1039*t]];
R[i + 1893*t] = Op[i + 1040*t] ? R[B[i + 1040*t]] * R[C[i + 1040*t]] : R[B[i + 1040*t]] + R[C[i + 1040*t]];
R[i + 1894*t] = Op[i + 1041*t] ? R[B[i + 1041*t]] * R[C[i + 1041*t]] : R[B[i + 1041*t]] + R[C[i + 1041*t]];
R[i + 1895*t] = Op[i + 1042*t] ? R[B[i + 1042*t]] * R[C[i + 1042*t]] : R[B[i + 1042*t]] + R[C[i + 1042*t]];
R[i + 1896*t] = Op[i + 1043*t] ? R[B[i + 1043*t]] * R[C[i + 1043*t]] : R[B[i + 1043*t]] + R[C[i + 1043*t]];
R[i + 1897*t] = Op[i + 1044*t] ? R[B[i + 1044*t]] * R[C[i + 1044*t]] : R[B[i + 1044*t]] + R[C[i + 1044*t]];
R[i + 1898*t] = Op[i + 1045*t] ? R[B[i + 1045*t]] * R[C[i + 1045*t]] : R[B[i + 1045*t]] + R[C[i + 1045*t]];
R[i + 1899*t] = Op[i + 1046*t] ? R[B[i + 1046*t]] * R[C[i + 1046*t]] : R[B[i + 1046*t]] + R[C[i + 1046*t]];
R[i + 1900*t] = Op[i + 1047*t] ? R[B[i + 1047*t]] * R[C[i + 1047*t]] : R[B[i + 1047*t]] + R[C[i + 1047*t]];
R[i + 1901*t] = Op[i + 1048*t] ? R[B[i + 1048*t]] * R[C[i + 1048*t]] : R[B[i + 1048*t]] + R[C[i + 1048*t]];
R[i + 1902*t] = Op[i + 1049*t] ? R[B[i + 1049*t]] * R[C[i + 1049*t]] : R[B[i + 1049*t]] + R[C[i + 1049*t]];
R[i + 1903*t] = Op[i + 1050*t] ? R[B[i + 1050*t]] * R[C[i + 1050*t]] : R[B[i + 1050*t]] + R[C[i + 1050*t]];
R[i + 1904*t] = Op[i + 1051*t] ? R[B[i + 1051*t]] * R[C[i + 1051*t]] : R[B[i + 1051*t]] + R[C[i + 1051*t]];
R[i + 1905*t] = Op[i + 1052*t] ? R[B[i + 1052*t]] * R[C[i + 1052*t]] : R[B[i + 1052*t]] + R[C[i + 1052*t]];
__syncthreads();
R[i + 1906*t] = Op[i + 1053*t] ? R[B[i + 1053*t]] * R[C[i + 1053*t]] : R[B[i + 1053*t]] + R[C[i + 1053*t]];
R[i + 1907*t] = Op[i + 1054*t] ? R[B[i + 1054*t]] * R[C[i + 1054*t]] : R[B[i + 1054*t]] + R[C[i + 1054*t]];
R[i + 1908*t] = Op[i + 1055*t] ? R[B[i + 1055*t]] * R[C[i + 1055*t]] : R[B[i + 1055*t]] + R[C[i + 1055*t]];
R[i + 1909*t] = Op[i + 1056*t] ? R[B[i + 1056*t]] * R[C[i + 1056*t]] : R[B[i + 1056*t]] + R[C[i + 1056*t]];
R[i + 1910*t] = Op[i + 1057*t] ? R[B[i + 1057*t]] * R[C[i + 1057*t]] : R[B[i + 1057*t]] + R[C[i + 1057*t]];
R[i + 1911*t] = Op[i + 1058*t] ? R[B[i + 1058*t]] * R[C[i + 1058*t]] : R[B[i + 1058*t]] + R[C[i + 1058*t]];
R[i + 1912*t] = Op[i + 1059*t] ? R[B[i + 1059*t]] * R[C[i + 1059*t]] : R[B[i + 1059*t]] + R[C[i + 1059*t]];
R[i + 1913*t] = Op[i + 1060*t] ? R[B[i + 1060*t]] * R[C[i + 1060*t]] : R[B[i + 1060*t]] + R[C[i + 1060*t]];
R[i + 1914*t] = Op[i + 1061*t] ? R[B[i + 1061*t]] * R[C[i + 1061*t]] : R[B[i + 1061*t]] + R[C[i + 1061*t]];
R[i + 1915*t] = Op[i + 1062*t] ? R[B[i + 1062*t]] * R[C[i + 1062*t]] : R[B[i + 1062*t]] + R[C[i + 1062*t]];
R[i + 1916*t] = Op[i + 1063*t] ? R[B[i + 1063*t]] * R[C[i + 1063*t]] : R[B[i + 1063*t]] + R[C[i + 1063*t]];
R[i + 1917*t] = Op[i + 1064*t] ? R[B[i + 1064*t]] * R[C[i + 1064*t]] : R[B[i + 1064*t]] + R[C[i + 1064*t]];
R[i + 1918*t] = Op[i + 1065*t] ? R[B[i + 1065*t]] * R[C[i + 1065*t]] : R[B[i + 1065*t]] + R[C[i + 1065*t]];
R[i + 1919*t] = Op[i + 1066*t] ? R[B[i + 1066*t]] * R[C[i + 1066*t]] : R[B[i + 1066*t]] + R[C[i + 1066*t]];
R[i + 1920*t] = Op[i + 1067*t] ? R[B[i + 1067*t]] * R[C[i + 1067*t]] : R[B[i + 1067*t]] + R[C[i + 1067*t]];
R[i + 1921*t] = Op[i + 1068*t] ? R[B[i + 1068*t]] * R[C[i + 1068*t]] : R[B[i + 1068*t]] + R[C[i + 1068*t]];
R[i + 1922*t] = Op[i + 1069*t] ? R[B[i + 1069*t]] * R[C[i + 1069*t]] : R[B[i + 1069*t]] + R[C[i + 1069*t]];
R[i + 1923*t] = Op[i + 1070*t] ? R[B[i + 1070*t]] * R[C[i + 1070*t]] : R[B[i + 1070*t]] + R[C[i + 1070*t]];
R[i + 1924*t] = Op[i + 1071*t] ? R[B[i + 1071*t]] * R[C[i + 1071*t]] : R[B[i + 1071*t]] + R[C[i + 1071*t]];
R[i + 1925*t] = Op[i + 1072*t] ? R[B[i + 1072*t]] * R[C[i + 1072*t]] : R[B[i + 1072*t]] + R[C[i + 1072*t]];
R[i + 1926*t] = Op[i + 1073*t] ? R[B[i + 1073*t]] * R[C[i + 1073*t]] : R[B[i + 1073*t]] + R[C[i + 1073*t]];
R[i + 1927*t] = Op[i + 1074*t] ? R[B[i + 1074*t]] * R[C[i + 1074*t]] : R[B[i + 1074*t]] + R[C[i + 1074*t]];
R[i + 1928*t] = Op[i + 1075*t] ? R[B[i + 1075*t]] * R[C[i + 1075*t]] : R[B[i + 1075*t]] + R[C[i + 1075*t]];
R[i + 1929*t] = Op[i + 1076*t] ? R[B[i + 1076*t]] * R[C[i + 1076*t]] : R[B[i + 1076*t]] + R[C[i + 1076*t]];
R[i + 1930*t] = Op[i + 1077*t] ? R[B[i + 1077*t]] * R[C[i + 1077*t]] : R[B[i + 1077*t]] + R[C[i + 1077*t]];
R[i + 1931*t] = Op[i + 1078*t] ? R[B[i + 1078*t]] * R[C[i + 1078*t]] : R[B[i + 1078*t]] + R[C[i + 1078*t]];
R[i + 1932*t] = Op[i + 1079*t] ? R[B[i + 1079*t]] * R[C[i + 1079*t]] : R[B[i + 1079*t]] + R[C[i + 1079*t]];
R[i + 1933*t] = Op[i + 1080*t] ? R[B[i + 1080*t]] * R[C[i + 1080*t]] : R[B[i + 1080*t]] + R[C[i + 1080*t]];
R[i + 1934*t] = Op[i + 1081*t] ? R[B[i + 1081*t]] * R[C[i + 1081*t]] : R[B[i + 1081*t]] + R[C[i + 1081*t]];
R[i + 1935*t] = Op[i + 1082*t] ? R[B[i + 1082*t]] * R[C[i + 1082*t]] : R[B[i + 1082*t]] + R[C[i + 1082*t]];
R[i + 1936*t] = Op[i + 1083*t] ? R[B[i + 1083*t]] * R[C[i + 1083*t]] : R[B[i + 1083*t]] + R[C[i + 1083*t]];
R[i + 1937*t] = Op[i + 1084*t] ? R[B[i + 1084*t]] * R[C[i + 1084*t]] : R[B[i + 1084*t]] + R[C[i + 1084*t]];
R[i + 1938*t] = Op[i + 1085*t] ? R[B[i + 1085*t]] * R[C[i + 1085*t]] : R[B[i + 1085*t]] + R[C[i + 1085*t]];
R[i + 1939*t] = Op[i + 1086*t] ? R[B[i + 1086*t]] * R[C[i + 1086*t]] : R[B[i + 1086*t]] + R[C[i + 1086*t]];
R[i + 1940*t] = Op[i + 1087*t] ? R[B[i + 1087*t]] * R[C[i + 1087*t]] : R[B[i + 1087*t]] + R[C[i + 1087*t]];
R[i + 1941*t] = Op[i + 1088*t] ? R[B[i + 1088*t]] * R[C[i + 1088*t]] : R[B[i + 1088*t]] + R[C[i + 1088*t]];
R[i + 1942*t] = Op[i + 1089*t] ? R[B[i + 1089*t]] * R[C[i + 1089*t]] : R[B[i + 1089*t]] + R[C[i + 1089*t]];
R[i + 1943*t] = Op[i + 1090*t] ? R[B[i + 1090*t]] * R[C[i + 1090*t]] : R[B[i + 1090*t]] + R[C[i + 1090*t]];
R[i + 1944*t] = Op[i + 1091*t] ? R[B[i + 1091*t]] * R[C[i + 1091*t]] : R[B[i + 1091*t]] + R[C[i + 1091*t]];
__syncthreads();
R[i + 1945*t] = Op[i + 1092*t] ? R[B[i + 1092*t]] * R[C[i + 1092*t]] : R[B[i + 1092*t]] + R[C[i + 1092*t]];
R[i + 1946*t] = Op[i + 1093*t] ? R[B[i + 1093*t]] * R[C[i + 1093*t]] : R[B[i + 1093*t]] + R[C[i + 1093*t]];
R[i + 1947*t] = Op[i + 1094*t] ? R[B[i + 1094*t]] * R[C[i + 1094*t]] : R[B[i + 1094*t]] + R[C[i + 1094*t]];
R[i + 1948*t] = Op[i + 1095*t] ? R[B[i + 1095*t]] * R[C[i + 1095*t]] : R[B[i + 1095*t]] + R[C[i + 1095*t]];
R[i + 1949*t] = Op[i + 1096*t] ? R[B[i + 1096*t]] * R[C[i + 1096*t]] : R[B[i + 1096*t]] + R[C[i + 1096*t]];
R[i + 1950*t] = Op[i + 1097*t] ? R[B[i + 1097*t]] * R[C[i + 1097*t]] : R[B[i + 1097*t]] + R[C[i + 1097*t]];
R[i + 1951*t] = Op[i + 1098*t] ? R[B[i + 1098*t]] * R[C[i + 1098*t]] : R[B[i + 1098*t]] + R[C[i + 1098*t]];
R[i + 1952*t] = Op[i + 1099*t] ? R[B[i + 1099*t]] * R[C[i + 1099*t]] : R[B[i + 1099*t]] + R[C[i + 1099*t]];
R[i + 1953*t] = Op[i + 1100*t] ? R[B[i + 1100*t]] * R[C[i + 1100*t]] : R[B[i + 1100*t]] + R[C[i + 1100*t]];
R[i + 1954*t] = Op[i + 1101*t] ? R[B[i + 1101*t]] * R[C[i + 1101*t]] : R[B[i + 1101*t]] + R[C[i + 1101*t]];
R[i + 1955*t] = Op[i + 1102*t] ? R[B[i + 1102*t]] * R[C[i + 1102*t]] : R[B[i + 1102*t]] + R[C[i + 1102*t]];
R[i + 1956*t] = Op[i + 1103*t] ? R[B[i + 1103*t]] * R[C[i + 1103*t]] : R[B[i + 1103*t]] + R[C[i + 1103*t]];
R[i + 1957*t] = Op[i + 1104*t] ? R[B[i + 1104*t]] * R[C[i + 1104*t]] : R[B[i + 1104*t]] + R[C[i + 1104*t]];
R[i + 1958*t] = Op[i + 1105*t] ? R[B[i + 1105*t]] * R[C[i + 1105*t]] : R[B[i + 1105*t]] + R[C[i + 1105*t]];
R[i + 1959*t] = Op[i + 1106*t] ? R[B[i + 1106*t]] * R[C[i + 1106*t]] : R[B[i + 1106*t]] + R[C[i + 1106*t]];
R[i + 1960*t] = Op[i + 1107*t] ? R[B[i + 1107*t]] * R[C[i + 1107*t]] : R[B[i + 1107*t]] + R[C[i + 1107*t]];
R[i + 1961*t] = Op[i + 1108*t] ? R[B[i + 1108*t]] * R[C[i + 1108*t]] : R[B[i + 1108*t]] + R[C[i + 1108*t]];
R[i + 1962*t] = Op[i + 1109*t] ? R[B[i + 1109*t]] * R[C[i + 1109*t]] : R[B[i + 1109*t]] + R[C[i + 1109*t]];
R[i + 1963*t] = Op[i + 1110*t] ? R[B[i + 1110*t]] * R[C[i + 1110*t]] : R[B[i + 1110*t]] + R[C[i + 1110*t]];
R[i + 1964*t] = Op[i + 1111*t] ? R[B[i + 1111*t]] * R[C[i + 1111*t]] : R[B[i + 1111*t]] + R[C[i + 1111*t]];
R[i + 1965*t] = Op[i + 1112*t] ? R[B[i + 1112*t]] * R[C[i + 1112*t]] : R[B[i + 1112*t]] + R[C[i + 1112*t]];
R[i + 1966*t] = Op[i + 1113*t] ? R[B[i + 1113*t]] * R[C[i + 1113*t]] : R[B[i + 1113*t]] + R[C[i + 1113*t]];
R[i + 1967*t] = Op[i + 1114*t] ? R[B[i + 1114*t]] * R[C[i + 1114*t]] : R[B[i + 1114*t]] + R[C[i + 1114*t]];
R[i + 1968*t] = Op[i + 1115*t] ? R[B[i + 1115*t]] * R[C[i + 1115*t]] : R[B[i + 1115*t]] + R[C[i + 1115*t]];
R[i + 1969*t] = Op[i + 1116*t] ? R[B[i + 1116*t]] * R[C[i + 1116*t]] : R[B[i + 1116*t]] + R[C[i + 1116*t]];
R[i + 1970*t] = Op[i + 1117*t] ? R[B[i + 1117*t]] * R[C[i + 1117*t]] : R[B[i + 1117*t]] + R[C[i + 1117*t]];
R[i + 1971*t] = Op[i + 1118*t] ? R[B[i + 1118*t]] * R[C[i + 1118*t]] : R[B[i + 1118*t]] + R[C[i + 1118*t]];
R[i + 1972*t] = Op[i + 1119*t] ? R[B[i + 1119*t]] * R[C[i + 1119*t]] : R[B[i + 1119*t]] + R[C[i + 1119*t]];
R[i + 1973*t] = Op[i + 1120*t] ? R[B[i + 1120*t]] * R[C[i + 1120*t]] : R[B[i + 1120*t]] + R[C[i + 1120*t]];
R[i + 1974*t] = Op[i + 1121*t] ? R[B[i + 1121*t]] * R[C[i + 1121*t]] : R[B[i + 1121*t]] + R[C[i + 1121*t]];
R[i + 1975*t] = Op[i + 1122*t] ? R[B[i + 1122*t]] * R[C[i + 1122*t]] : R[B[i + 1122*t]] + R[C[i + 1122*t]];
R[i + 1976*t] = Op[i + 1123*t] ? R[B[i + 1123*t]] * R[C[i + 1123*t]] : R[B[i + 1123*t]] + R[C[i + 1123*t]];
R[i + 1977*t] = Op[i + 1124*t] ? R[B[i + 1124*t]] * R[C[i + 1124*t]] : R[B[i + 1124*t]] + R[C[i + 1124*t]];
R[i + 1978*t] = Op[i + 1125*t] ? R[B[i + 1125*t]] * R[C[i + 1125*t]] : R[B[i + 1125*t]] + R[C[i + 1125*t]];
R[i + 1979*t] = Op[i + 1126*t] ? R[B[i + 1126*t]] * R[C[i + 1126*t]] : R[B[i + 1126*t]] + R[C[i + 1126*t]];
R[i + 1980*t] = Op[i + 1127*t] ? R[B[i + 1127*t]] * R[C[i + 1127*t]] : R[B[i + 1127*t]] + R[C[i + 1127*t]];
R[i + 1981*t] = Op[i + 1128*t] ? R[B[i + 1128*t]] * R[C[i + 1128*t]] : R[B[i + 1128*t]] + R[C[i + 1128*t]];
__syncthreads();
R[i + 1982*t] = Op[i + 1129*t] ? R[B[i + 1129*t]] * R[C[i + 1129*t]] : R[B[i + 1129*t]] + R[C[i + 1129*t]];
R[i + 1983*t] = Op[i + 1130*t] ? R[B[i + 1130*t]] * R[C[i + 1130*t]] : R[B[i + 1130*t]] + R[C[i + 1130*t]];
R[i + 1984*t] = Op[i + 1131*t] ? R[B[i + 1131*t]] * R[C[i + 1131*t]] : R[B[i + 1131*t]] + R[C[i + 1131*t]];
R[i + 1985*t] = Op[i + 1132*t] ? R[B[i + 1132*t]] * R[C[i + 1132*t]] : R[B[i + 1132*t]] + R[C[i + 1132*t]];
R[i + 1986*t] = Op[i + 1133*t] ? R[B[i + 1133*t]] * R[C[i + 1133*t]] : R[B[i + 1133*t]] + R[C[i + 1133*t]];
R[i + 1987*t] = Op[i + 1134*t] ? R[B[i + 1134*t]] * R[C[i + 1134*t]] : R[B[i + 1134*t]] + R[C[i + 1134*t]];
R[i + 1988*t] = Op[i + 1135*t] ? R[B[i + 1135*t]] * R[C[i + 1135*t]] : R[B[i + 1135*t]] + R[C[i + 1135*t]];
R[i + 1989*t] = Op[i + 1136*t] ? R[B[i + 1136*t]] * R[C[i + 1136*t]] : R[B[i + 1136*t]] + R[C[i + 1136*t]];
R[i + 1990*t] = Op[i + 1137*t] ? R[B[i + 1137*t]] * R[C[i + 1137*t]] : R[B[i + 1137*t]] + R[C[i + 1137*t]];
R[i + 1991*t] = Op[i + 1138*t] ? R[B[i + 1138*t]] * R[C[i + 1138*t]] : R[B[i + 1138*t]] + R[C[i + 1138*t]];
R[i + 1992*t] = Op[i + 1139*t] ? R[B[i + 1139*t]] * R[C[i + 1139*t]] : R[B[i + 1139*t]] + R[C[i + 1139*t]];
R[i + 1993*t] = Op[i + 1140*t] ? R[B[i + 1140*t]] * R[C[i + 1140*t]] : R[B[i + 1140*t]] + R[C[i + 1140*t]];
R[i + 1994*t] = Op[i + 1141*t] ? R[B[i + 1141*t]] * R[C[i + 1141*t]] : R[B[i + 1141*t]] + R[C[i + 1141*t]];
R[i + 1995*t] = Op[i + 1142*t] ? R[B[i + 1142*t]] * R[C[i + 1142*t]] : R[B[i + 1142*t]] + R[C[i + 1142*t]];
R[i + 1996*t] = Op[i + 1143*t] ? R[B[i + 1143*t]] * R[C[i + 1143*t]] : R[B[i + 1143*t]] + R[C[i + 1143*t]];
R[i + 1997*t] = Op[i + 1144*t] ? R[B[i + 1144*t]] * R[C[i + 1144*t]] : R[B[i + 1144*t]] + R[C[i + 1144*t]];
R[i + 1998*t] = Op[i + 1145*t] ? R[B[i + 1145*t]] * R[C[i + 1145*t]] : R[B[i + 1145*t]] + R[C[i + 1145*t]];
R[i + 1999*t] = Op[i + 1146*t] ? R[B[i + 1146*t]] * R[C[i + 1146*t]] : R[B[i + 1146*t]] + R[C[i + 1146*t]];
R[i + 2000*t] = Op[i + 1147*t] ? R[B[i + 1147*t]] * R[C[i + 1147*t]] : R[B[i + 1147*t]] + R[C[i + 1147*t]];
R[i + 2001*t] = Op[i + 1148*t] ? R[B[i + 1148*t]] * R[C[i + 1148*t]] : R[B[i + 1148*t]] + R[C[i + 1148*t]];
R[i + 2002*t] = Op[i + 1149*t] ? R[B[i + 1149*t]] * R[C[i + 1149*t]] : R[B[i + 1149*t]] + R[C[i + 1149*t]];
R[i + 2003*t] = Op[i + 1150*t] ? R[B[i + 1150*t]] * R[C[i + 1150*t]] : R[B[i + 1150*t]] + R[C[i + 1150*t]];
R[i + 2004*t] = Op[i + 1151*t] ? R[B[i + 1151*t]] * R[C[i + 1151*t]] : R[B[i + 1151*t]] + R[C[i + 1151*t]];
R[i + 2005*t] = Op[i + 1152*t] ? R[B[i + 1152*t]] * R[C[i + 1152*t]] : R[B[i + 1152*t]] + R[C[i + 1152*t]];
R[i + 2006*t] = Op[i + 1153*t] ? R[B[i + 1153*t]] * R[C[i + 1153*t]] : R[B[i + 1153*t]] + R[C[i + 1153*t]];
R[i + 2007*t] = Op[i + 1154*t] ? R[B[i + 1154*t]] * R[C[i + 1154*t]] : R[B[i + 1154*t]] + R[C[i + 1154*t]];
R[i + 2008*t] = Op[i + 1155*t] ? R[B[i + 1155*t]] * R[C[i + 1155*t]] : R[B[i + 1155*t]] + R[C[i + 1155*t]];
R[i + 2009*t] = Op[i + 1156*t] ? R[B[i + 1156*t]] * R[C[i + 1156*t]] : R[B[i + 1156*t]] + R[C[i + 1156*t]];
R[i + 2010*t] = Op[i + 1157*t] ? R[B[i + 1157*t]] * R[C[i + 1157*t]] : R[B[i + 1157*t]] + R[C[i + 1157*t]];
R[i + 2011*t] = Op[i + 1158*t] ? R[B[i + 1158*t]] * R[C[i + 1158*t]] : R[B[i + 1158*t]] + R[C[i + 1158*t]];
R[i + 2012*t] = Op[i + 1159*t] ? R[B[i + 1159*t]] * R[C[i + 1159*t]] : R[B[i + 1159*t]] + R[C[i + 1159*t]];
R[i + 2013*t] = Op[i + 1160*t] ? R[B[i + 1160*t]] * R[C[i + 1160*t]] : R[B[i + 1160*t]] + R[C[i + 1160*t]];
__syncthreads();
R[i + 2014*t] = Op[i + 1161*t] ? R[B[i + 1161*t]] * R[C[i + 1161*t]] : R[B[i + 1161*t]] + R[C[i + 1161*t]];
R[i + 2015*t] = Op[i + 1162*t] ? R[B[i + 1162*t]] * R[C[i + 1162*t]] : R[B[i + 1162*t]] + R[C[i + 1162*t]];
R[i + 2016*t] = Op[i + 1163*t] ? R[B[i + 1163*t]] * R[C[i + 1163*t]] : R[B[i + 1163*t]] + R[C[i + 1163*t]];
R[i + 2017*t] = Op[i + 1164*t] ? R[B[i + 1164*t]] * R[C[i + 1164*t]] : R[B[i + 1164*t]] + R[C[i + 1164*t]];
R[i + 2018*t] = Op[i + 1165*t] ? R[B[i + 1165*t]] * R[C[i + 1165*t]] : R[B[i + 1165*t]] + R[C[i + 1165*t]];
R[i + 2019*t] = Op[i + 1166*t] ? R[B[i + 1166*t]] * R[C[i + 1166*t]] : R[B[i + 1166*t]] + R[C[i + 1166*t]];
R[i + 2020*t] = Op[i + 1167*t] ? R[B[i + 1167*t]] * R[C[i + 1167*t]] : R[B[i + 1167*t]] + R[C[i + 1167*t]];
R[i + 2021*t] = Op[i + 1168*t] ? R[B[i + 1168*t]] * R[C[i + 1168*t]] : R[B[i + 1168*t]] + R[C[i + 1168*t]];
R[i + 2022*t] = Op[i + 1169*t] ? R[B[i + 1169*t]] * R[C[i + 1169*t]] : R[B[i + 1169*t]] + R[C[i + 1169*t]];
R[i + 2023*t] = Op[i + 1170*t] ? R[B[i + 1170*t]] * R[C[i + 1170*t]] : R[B[i + 1170*t]] + R[C[i + 1170*t]];
R[i + 2024*t] = Op[i + 1171*t] ? R[B[i + 1171*t]] * R[C[i + 1171*t]] : R[B[i + 1171*t]] + R[C[i + 1171*t]];
R[i + 2025*t] = Op[i + 1172*t] ? R[B[i + 1172*t]] * R[C[i + 1172*t]] : R[B[i + 1172*t]] + R[C[i + 1172*t]];
R[i + 2026*t] = Op[i + 1173*t] ? R[B[i + 1173*t]] * R[C[i + 1173*t]] : R[B[i + 1173*t]] + R[C[i + 1173*t]];
R[i + 2027*t] = Op[i + 1174*t] ? R[B[i + 1174*t]] * R[C[i + 1174*t]] : R[B[i + 1174*t]] + R[C[i + 1174*t]];
R[i + 2028*t] = Op[i + 1175*t] ? R[B[i + 1175*t]] * R[C[i + 1175*t]] : R[B[i + 1175*t]] + R[C[i + 1175*t]];
R[i + 2029*t] = Op[i + 1176*t] ? R[B[i + 1176*t]] * R[C[i + 1176*t]] : R[B[i + 1176*t]] + R[C[i + 1176*t]];
R[i + 2030*t] = Op[i + 1177*t] ? R[B[i + 1177*t]] * R[C[i + 1177*t]] : R[B[i + 1177*t]] + R[C[i + 1177*t]];
R[i + 2031*t] = Op[i + 1178*t] ? R[B[i + 1178*t]] * R[C[i + 1178*t]] : R[B[i + 1178*t]] + R[C[i + 1178*t]];
R[i + 2032*t] = Op[i + 1179*t] ? R[B[i + 1179*t]] * R[C[i + 1179*t]] : R[B[i + 1179*t]] + R[C[i + 1179*t]];
__syncthreads();
R[i + 2033*t] = Op[i + 1180*t] ? R[B[i + 1180*t]] * R[C[i + 1180*t]] : R[B[i + 1180*t]] + R[C[i + 1180*t]];
R[i + 2034*t] = Op[i + 1181*t] ? R[B[i + 1181*t]] * R[C[i + 1181*t]] : R[B[i + 1181*t]] + R[C[i + 1181*t]];
R[i + 2035*t] = Op[i + 1182*t] ? R[B[i + 1182*t]] * R[C[i + 1182*t]] : R[B[i + 1182*t]] + R[C[i + 1182*t]];
R[i + 2036*t] = Op[i + 1183*t] ? R[B[i + 1183*t]] * R[C[i + 1183*t]] : R[B[i + 1183*t]] + R[C[i + 1183*t]];
R[i + 2037*t] = Op[i + 1184*t] ? R[B[i + 1184*t]] * R[C[i + 1184*t]] : R[B[i + 1184*t]] + R[C[i + 1184*t]];
R[i + 2038*t] = Op[i + 1185*t] ? R[B[i + 1185*t]] * R[C[i + 1185*t]] : R[B[i + 1185*t]] + R[C[i + 1185*t]];
R[i + 2039*t] = Op[i + 1186*t] ? R[B[i + 1186*t]] * R[C[i + 1186*t]] : R[B[i + 1186*t]] + R[C[i + 1186*t]];
R[i + 2040*t] = Op[i + 1187*t] ? R[B[i + 1187*t]] * R[C[i + 1187*t]] : R[B[i + 1187*t]] + R[C[i + 1187*t]];
R[i + 2041*t] = Op[i + 1188*t] ? R[B[i + 1188*t]] * R[C[i + 1188*t]] : R[B[i + 1188*t]] + R[C[i + 1188*t]];
R[i + 2042*t] = Op[i + 1189*t] ? R[B[i + 1189*t]] * R[C[i + 1189*t]] : R[B[i + 1189*t]] + R[C[i + 1189*t]];
R[i + 2043*t] = Op[i + 1190*t] ? R[B[i + 1190*t]] * R[C[i + 1190*t]] : R[B[i + 1190*t]] + R[C[i + 1190*t]];
R[i + 2044*t] = Op[i + 1191*t] ? R[B[i + 1191*t]] * R[C[i + 1191*t]] : R[B[i + 1191*t]] + R[C[i + 1191*t]];
R[i + 2045*t] = Op[i + 1192*t] ? R[B[i + 1192*t]] * R[C[i + 1192*t]] : R[B[i + 1192*t]] + R[C[i + 1192*t]];
R[i + 2046*t] = Op[i + 1193*t] ? R[B[i + 1193*t]] * R[C[i + 1193*t]] : R[B[i + 1193*t]] + R[C[i + 1193*t]];
R[i + 2047*t] = Op[i + 1194*t] ? R[B[i + 1194*t]] * R[C[i + 1194*t]] : R[B[i + 1194*t]] + R[C[i + 1194*t]];
R[i + 2048*t] = Op[i + 1195*t] ? R[B[i + 1195*t]] * R[C[i + 1195*t]] : R[B[i + 1195*t]] + R[C[i + 1195*t]];
__syncthreads();
R[i + 2049*t] = Op[i + 1196*t] ? R[B[i + 1196*t]] * R[C[i + 1196*t]] : R[B[i + 1196*t]] + R[C[i + 1196*t]];
R[i + 2050*t] = Op[i + 1197*t] ? R[B[i + 1197*t]] * R[C[i + 1197*t]] : R[B[i + 1197*t]] + R[C[i + 1197*t]];
R[i + 2051*t] = Op[i + 1198*t] ? R[B[i + 1198*t]] * R[C[i + 1198*t]] : R[B[i + 1198*t]] + R[C[i + 1198*t]];
R[i + 2052*t] = Op[i + 1199*t] ? R[B[i + 1199*t]] * R[C[i + 1199*t]] : R[B[i + 1199*t]] + R[C[i + 1199*t]];
R[i + 2053*t] = Op[i + 1200*t] ? R[B[i + 1200*t]] * R[C[i + 1200*t]] : R[B[i + 1200*t]] + R[C[i + 1200*t]];
R[i + 2054*t] = Op[i + 1201*t] ? R[B[i + 1201*t]] * R[C[i + 1201*t]] : R[B[i + 1201*t]] + R[C[i + 1201*t]];
R[i + 2055*t] = Op[i + 1202*t] ? R[B[i + 1202*t]] * R[C[i + 1202*t]] : R[B[i + 1202*t]] + R[C[i + 1202*t]];
R[i + 2056*t] = Op[i + 1203*t] ? R[B[i + 1203*t]] * R[C[i + 1203*t]] : R[B[i + 1203*t]] + R[C[i + 1203*t]];
R[i + 2057*t] = Op[i + 1204*t] ? R[B[i + 1204*t]] * R[C[i + 1204*t]] : R[B[i + 1204*t]] + R[C[i + 1204*t]];
R[i + 2058*t] = Op[i + 1205*t] ? R[B[i + 1205*t]] * R[C[i + 1205*t]] : R[B[i + 1205*t]] + R[C[i + 1205*t]];
R[i + 2059*t] = Op[i + 1206*t] ? R[B[i + 1206*t]] * R[C[i + 1206*t]] : R[B[i + 1206*t]] + R[C[i + 1206*t]];
R[i + 2060*t] = Op[i + 1207*t] ? R[B[i + 1207*t]] * R[C[i + 1207*t]] : R[B[i + 1207*t]] + R[C[i + 1207*t]];
R[i + 2061*t] = Op[i + 1208*t] ? R[B[i + 1208*t]] * R[C[i + 1208*t]] : R[B[i + 1208*t]] + R[C[i + 1208*t]];
R[i + 2062*t] = Op[i + 1209*t] ? R[B[i + 1209*t]] * R[C[i + 1209*t]] : R[B[i + 1209*t]] + R[C[i + 1209*t]];
R[i + 2063*t] = Op[i + 1210*t] ? R[B[i + 1210*t]] * R[C[i + 1210*t]] : R[B[i + 1210*t]] + R[C[i + 1210*t]];
R[i + 2064*t] = Op[i + 1211*t] ? R[B[i + 1211*t]] * R[C[i + 1211*t]] : R[B[i + 1211*t]] + R[C[i + 1211*t]];
__syncthreads();
R[i + 2065*t] = Op[i + 1212*t] ? R[B[i + 1212*t]] * R[C[i + 1212*t]] : R[B[i + 1212*t]] + R[C[i + 1212*t]];
R[i + 2066*t] = Op[i + 1213*t] ? R[B[i + 1213*t]] * R[C[i + 1213*t]] : R[B[i + 1213*t]] + R[C[i + 1213*t]];
R[i + 2067*t] = Op[i + 1214*t] ? R[B[i + 1214*t]] * R[C[i + 1214*t]] : R[B[i + 1214*t]] + R[C[i + 1214*t]];
R[i + 2068*t] = Op[i + 1215*t] ? R[B[i + 1215*t]] * R[C[i + 1215*t]] : R[B[i + 1215*t]] + R[C[i + 1215*t]];
R[i + 2069*t] = Op[i + 1216*t] ? R[B[i + 1216*t]] * R[C[i + 1216*t]] : R[B[i + 1216*t]] + R[C[i + 1216*t]];
R[i + 2070*t] = Op[i + 1217*t] ? R[B[i + 1217*t]] * R[C[i + 1217*t]] : R[B[i + 1217*t]] + R[C[i + 1217*t]];
R[i + 2071*t] = Op[i + 1218*t] ? R[B[i + 1218*t]] * R[C[i + 1218*t]] : R[B[i + 1218*t]] + R[C[i + 1218*t]];
R[i + 2072*t] = Op[i + 1219*t] ? R[B[i + 1219*t]] * R[C[i + 1219*t]] : R[B[i + 1219*t]] + R[C[i + 1219*t]];
R[i + 2073*t] = Op[i + 1220*t] ? R[B[i + 1220*t]] * R[C[i + 1220*t]] : R[B[i + 1220*t]] + R[C[i + 1220*t]];
R[i + 2074*t] = Op[i + 1221*t] ? R[B[i + 1221*t]] * R[C[i + 1221*t]] : R[B[i + 1221*t]] + R[C[i + 1221*t]];
R[i + 2075*t] = Op[i + 1222*t] ? R[B[i + 1222*t]] * R[C[i + 1222*t]] : R[B[i + 1222*t]] + R[C[i + 1222*t]];
__syncthreads();
R[i + 2076*t] = Op[i + 1223*t] ? R[B[i + 1223*t]] * R[C[i + 1223*t]] : R[B[i + 1223*t]] + R[C[i + 1223*t]];
R[i + 2077*t] = Op[i + 1224*t] ? R[B[i + 1224*t]] * R[C[i + 1224*t]] : R[B[i + 1224*t]] + R[C[i + 1224*t]];
R[i + 2078*t] = Op[i + 1225*t] ? R[B[i + 1225*t]] * R[C[i + 1225*t]] : R[B[i + 1225*t]] + R[C[i + 1225*t]];
R[i + 2079*t] = Op[i + 1226*t] ? R[B[i + 1226*t]] * R[C[i + 1226*t]] : R[B[i + 1226*t]] + R[C[i + 1226*t]];
R[i + 2080*t] = Op[i + 1227*t] ? R[B[i + 1227*t]] * R[C[i + 1227*t]] : R[B[i + 1227*t]] + R[C[i + 1227*t]];
R[i + 2081*t] = Op[i + 1228*t] ? R[B[i + 1228*t]] * R[C[i + 1228*t]] : R[B[i + 1228*t]] + R[C[i + 1228*t]];
R[i + 2082*t] = Op[i + 1229*t] ? R[B[i + 1229*t]] * R[C[i + 1229*t]] : R[B[i + 1229*t]] + R[C[i + 1229*t]];
R[i + 2083*t] = Op[i + 1230*t] ? R[B[i + 1230*t]] * R[C[i + 1230*t]] : R[B[i + 1230*t]] + R[C[i + 1230*t]];
R[i + 2084*t] = Op[i + 1231*t] ? R[B[i + 1231*t]] * R[C[i + 1231*t]] : R[B[i + 1231*t]] + R[C[i + 1231*t]];
__syncthreads();
R[i + 2085*t] = Op[i + 1232*t] ? R[B[i + 1232*t]] * R[C[i + 1232*t]] : R[B[i + 1232*t]] + R[C[i + 1232*t]];
R[i + 2086*t] = Op[i + 1233*t] ? R[B[i + 1233*t]] * R[C[i + 1233*t]] : R[B[i + 1233*t]] + R[C[i + 1233*t]];
R[i + 2087*t] = Op[i + 1234*t] ? R[B[i + 1234*t]] * R[C[i + 1234*t]] : R[B[i + 1234*t]] + R[C[i + 1234*t]];
R[i + 2088*t] = Op[i + 1235*t] ? R[B[i + 1235*t]] * R[C[i + 1235*t]] : R[B[i + 1235*t]] + R[C[i + 1235*t]];
R[i + 2089*t] = Op[i + 1236*t] ? R[B[i + 1236*t]] * R[C[i + 1236*t]] : R[B[i + 1236*t]] + R[C[i + 1236*t]];
R[i + 2090*t] = Op[i + 1237*t] ? R[B[i + 1237*t]] * R[C[i + 1237*t]] : R[B[i + 1237*t]] + R[C[i + 1237*t]];
R[i + 2091*t] = Op[i + 1238*t] ? R[B[i + 1238*t]] * R[C[i + 1238*t]] : R[B[i + 1238*t]] + R[C[i + 1238*t]];
R[i + 2092*t] = Op[i + 1239*t] ? R[B[i + 1239*t]] * R[C[i + 1239*t]] : R[B[i + 1239*t]] + R[C[i + 1239*t]];
__syncthreads();
R[i + 2093*t] = Op[i + 1240*t] ? R[B[i + 1240*t]] * R[C[i + 1240*t]] : R[B[i + 1240*t]] + R[C[i + 1240*t]];
R[i + 2094*t] = Op[i + 1241*t] ? R[B[i + 1241*t]] * R[C[i + 1241*t]] : R[B[i + 1241*t]] + R[C[i + 1241*t]];
R[i + 2095*t] = Op[i + 1242*t] ? R[B[i + 1242*t]] * R[C[i + 1242*t]] : R[B[i + 1242*t]] + R[C[i + 1242*t]];
R[i + 2096*t] = Op[i + 1243*t] ? R[B[i + 1243*t]] * R[C[i + 1243*t]] : R[B[i + 1243*t]] + R[C[i + 1243*t]];
R[i + 2097*t] = Op[i + 1244*t] ? R[B[i + 1244*t]] * R[C[i + 1244*t]] : R[B[i + 1244*t]] + R[C[i + 1244*t]];
R[i + 2098*t] = Op[i + 1245*t] ? R[B[i + 1245*t]] * R[C[i + 1245*t]] : R[B[i + 1245*t]] + R[C[i + 1245*t]];
R[i + 2099*t] = Op[i + 1246*t] ? R[B[i + 1246*t]] * R[C[i + 1246*t]] : R[B[i + 1246*t]] + R[C[i + 1246*t]];
__syncthreads();
R[i + 2100*t] = Op[i + 1247*t] ? R[B[i + 1247*t]] * R[C[i + 1247*t]] : R[B[i + 1247*t]] + R[C[i + 1247*t]];
R[i + 2101*t] = Op[i + 1248*t] ? R[B[i + 1248*t]] * R[C[i + 1248*t]] : R[B[i + 1248*t]] + R[C[i + 1248*t]];
R[i + 2102*t] = Op[i + 1249*t] ? R[B[i + 1249*t]] * R[C[i + 1249*t]] : R[B[i + 1249*t]] + R[C[i + 1249*t]];
R[i + 2103*t] = Op[i + 1250*t] ? R[B[i + 1250*t]] * R[C[i + 1250*t]] : R[B[i + 1250*t]] + R[C[i + 1250*t]];
R[i + 2104*t] = Op[i + 1251*t] ? R[B[i + 1251*t]] * R[C[i + 1251*t]] : R[B[i + 1251*t]] + R[C[i + 1251*t]];
__syncthreads();
R[i + 2105*t] = Op[i + 1252*t] ? R[B[i + 1252*t]] * R[C[i + 1252*t]] : R[B[i + 1252*t]] + R[C[i + 1252*t]];
R[i + 2106*t] = Op[i + 1253*t] ? R[B[i + 1253*t]] * R[C[i + 1253*t]] : R[B[i + 1253*t]] + R[C[i + 1253*t]];
R[i + 2107*t] = Op[i + 1254*t] ? R[B[i + 1254*t]] * R[C[i + 1254*t]] : R[B[i + 1254*t]] + R[C[i + 1254*t]];
R[i + 2108*t] = Op[i + 1255*t] ? R[B[i + 1255*t]] * R[C[i + 1255*t]] : R[B[i + 1255*t]] + R[C[i + 1255*t]];
__syncthreads();
R[i + 2109*t] = Op[i + 1256*t] ? R[B[i + 1256*t]] * R[C[i + 1256*t]] : R[B[i + 1256*t]] + R[C[i + 1256*t]];
R[i + 2110*t] = Op[i + 1257*t] ? R[B[i + 1257*t]] * R[C[i + 1257*t]] : R[B[i + 1257*t]] + R[C[i + 1257*t]];
R[i + 2111*t] = Op[i + 1258*t] ? R[B[i + 1258*t]] * R[C[i + 1258*t]] : R[B[i + 1258*t]] + R[C[i + 1258*t]];
__syncthreads();
R[i + 2112*t] = Op[i + 1259*t] ? R[B[i + 1259*t]] * R[C[i + 1259*t]] : R[B[i + 1259*t]] + R[C[i + 1259*t]];
R[i + 2113*t] = Op[i + 1260*t] ? R[B[i + 1260*t]] * R[C[i + 1260*t]] : R[B[i + 1260*t]] + R[C[i + 1260*t]];
__syncthreads();
R[i + 2114*t] = Op[i + 1261*t] ? R[B[i + 1261*t]] * R[C[i + 1261*t]] : R[B[i + 1261*t]] + R[C[i + 1261*t]];
R[i + 2115*t] = Op[i + 1262*t] ? R[B[i + 1262*t]] * R[C[i + 1262*t]] : R[B[i + 1262*t]] + R[C[i + 1262*t]];
__syncthreads();
R[i + 2116*t] = Op[i + 1263*t] ? R[B[i + 1263*t]] * R[C[i + 1263*t]] : R[B[i + 1263*t]] + R[C[i + 1263*t]];
__syncthreads();
R[i + 2117*t] = Op[i + 1264*t] ? R[B[i + 1264*t]] * R[C[i + 1264*t]] : R[B[i + 1264*t]] + R[C[i + 1264*t]];
__syncthreads();
R[i + 2118*t] = Op[i + 1265*t] ? R[B[i + 1265*t]] * R[C[i + 1265*t]] : R[B[i + 1265*t]] + R[C[i + 1265*t]];
__syncthreads();
R[i + 2119*t] = Op[i + 1266*t] ? R[B[i + 1266*t]] * R[C[i + 1266*t]] : R[B[i + 1266*t]] + R[C[i + 1266*t]];
__syncthreads();
R[i + 2120*t] = Op[i + 1267*t] ? R[B[i + 1267*t]] * R[C[i + 1267*t]] : R[B[i + 1267*t]] + R[C[i + 1267*t]];
__syncthreads();
R[i + 2121*t] = Op[i + 1268*t] ? R[B[i + 1268*t]] * R[C[i + 1268*t]] : R[B[i + 1268*t]] + R[C[i + 1268*t]];
__syncthreads();
R[i + 2122*t] = Op[i + 1269*t] ? R[B[i + 1269*t]] * R[C[i + 1269*t]] : R[B[i + 1269*t]] + R[C[i + 1269*t]];
__syncthreads();
R[i + 2123*t] = Op[i + 1270*t] ? R[B[i + 1270*t]] * R[C[i + 1270*t]] : R[B[i + 1270*t]] + R[C[i + 1270*t]];
if (i==0) { final += R[2123*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
| ee5509a5e1cd2db53dec0c742e5606760577344a.cu | float h_A[]= {
0.9216671137735739, 0.9259668865552235, 0.529123634423551, 0.8139788790699016, 0.7667188580252835, 0.6234550692398635, 0.955397987834083, 0.820606072273355, 0.9747061244685017, 0.5449702555130261, 0.6805020971641711, 0.9497025422008357, 0.916780885019636, 0.8645539840766937, 0.8372153538679408, 0.9210798097887585, 0.6595966859568572, 0.7438568195279243, 0.6568512551894685, 0.8097578195479649, 0.9271535983770643, 0.6225497080406767, 0.6873301194093778, 0.6165056765248539, 0.8847068122982977, 0.7582141469711399, 0.7395767647465418, 0.6611136441137175, 0.9321300700185404, 0.6419739391654381, 0.5436808070646744, 0.5257441567517374, 0.6706684093062416, 0.7943614601884345, 0.9445448811114667, 0.5312288371931326, 0.8973316812303838, 0.8285475251020219, 0.7693197108835361, 0.7692744370710647, 0.8397756880782328, 0.9703134985681496, 0.8687405941754177, 0.9363545857880495, 0.9971404633836063, 0.9207732759728204, 0.8356686979855378, 0.8314220262500929, 0.547744432464938, 0.7759442960879432, 0.5263370782925765, 0.6803881976025952, 0.627475904047562, 0.7064388760240781, 0.6000810643243775, 0.9265503671339475, 0.5531206395588608, 0.887921048121384, 0.9010937444937734, 0.5222886419208206, 0.5408002622709964, 0.7157656389351329, 0.5534566516358301, 0.9874200627165446, 0.902134078950197, 0.9810895696041988, 0.5049801008537993, 0.8630281218547906, 0.5553828576975883, 0.7370295178258517, 0.9717126354169827, 0.6784222509046566, 0.6606141613167389, 0.7945288835244577, 0.5932746738017876, 0.579526427543219, 0.545635777066586, 0.821650045836283, 0.5878948697864732, 0.878825744943236, 0.7025796047455404, 0.661734297988421, 0.7242959957669841, 0.9445216605562303, 0.5862038606389584, 0.989491515003887, 0.5468072240497905, 0.9108474255881974, 0.9183192993991219, 0.8776473473897614, 0.8945740763574814, 0.8603233887156332, 0.5112918003847812, 0.7260404448534639, 0.9591112748672374, 0.5204596294799271, 0.7100385315722304, 0.5875454200982264, 0.520802987034169, 0.8215246387542896, 0.8684118875071221, 0.6509443952099467, 0.8134680830432439, 0.8291182949363824, 0.5415536691807257, 0.9293060891204017, 0.7357379484063507, 0.5134029010619998, 0.9065767834173741, 0.8889754524883648, 0.885554902534046, 0.8135773114746045, 0.7507035933733358, 0.5284169643697278, 0.7187493248530583, 0.7424576920280601, 0.5450304533254877, 0.5216399995240176, 0.7265275079522786, 0.9692646488347687, 0.5140448906837614, 0.6930188605704032, 0.5555224592587304, 0.6196737179149066, 0.7195427098268359, 0.7265524586466348, 0.875509488610236, 0.8716799654144672, 0.6937948496109221, 0.6092611535767769, 0.9575060269956277, 0.7930928096830722, 0.7430820800080244, 0.5648397020331211, 0.8163789560504975, 0.6536467088410594, 0.5667497711557377, 0.7536364909472333, 0.5092390071313797, 0.9782514793147645, 0.87301224200337, 0.6829247934029222, 0.9875506720579211, 0.7800669791854336, 0.5798425246214773, 0.6191230385752904, 0.787236650392571, 0.7164397555074665, 0.6677423724617377, 0.6634329991787249, 0.8789784075550298, 0.9692840871528745, 0.8109734658789058, 0.7401964749116179, 0.7134218759037075, 0.9764613662058619, 0.5297657139330443, 0.843031744454393, 0.7699392321560388, 0.9583443082091971, 0.7210666788283511, 0.5945747308328897, 0.7655611313797848, 0.8449268833421848, 0.7998900801072616, 0.9830499597490183, 0.6036895091322468, 0.738989585187496, 0.5895632014670855, 0.8613121714427324, 0.9919103539094855, 0.593438313175546, 0.7431973473569677, 0.5103158472741681, 0.8427014740106553, 0.7620666948473269, 0.5799523202442651, 0.6042796044936183, 0.9946302252046653, 0.501587392583402, 0.8471604574733634, 0.782111312383675, 0.7952005764441605, 0.6795735350900232, 0.8648981997895826, 0.7865921304660962, 0.5583183347803565, 0.7905854430937097, 0.5882550932626784, 0.8994313977567083, 0.5609909688264463, 0.7922913314087107, 0.7859382327481339, 0.6717845610807968, 0.5424367397651949, 0.7164827557302551, 0.8335159492427666, 0.9585170430237094, 0.9293006772505201, 0.9817072734800205, 0.5074702869792, 0.6025053310444526, 0.8918772904771506, 0.6392959616848508, 0.940220688563334, 0.8082450127848257, 0.8637567413045056, 0.6442898991173415, 0.5807528581745736, 0.653985274972398, 0.6006226251904051, 0.8458155592492118, 0.7024506344281192, 0.9381683848787739, 0.873424329865334, 0.8195045589889434, 0.833095348963804, 0.5628720439457162, 0.8532455069025757, 0.6070424549734865, 0.5339705154926186, 0.9468679280506351, 0.8865864462562572, 0.6490017190789034, 0.631924873676879, 0.6719102923644252, 0.6284355127575263, 0.874600323772994, 0.6754474434272286, 0.5886829184066007, 0.761505754682917, 0.8520438493382958, 0.7197303134007649, 0.5687539821828387, 0.8793917387225841, 0.6421222698287155, 0.6250531685567113, 0.970021670886096, 0.6341092319101063, 0.9511977851739615, 0.8173009861591181, 0.6733563389993713, 0.6591781046065724, 0.6364719993813477, 0.9020728451790798, 0.8630492836609356, 0.9767749619170898, 0.6448269404825091, 0.7853827619175091, 0.6532143736983875, 0.7880381764095202, 0.6757606205873756, 0.968222745990454, 0.9298255995504066, 0.5273021514559249, 0.8183816904590533, 0.7984771683680065, 0.8009571879603901, 0.8485324762541582, 0.5855194317801073, 0.7171189032651861, 0.7356352647548356, 0.5992976882353682, 0.6625907087474872, 0.9727704892550105, 0.5283728992484613, 0.9143658588696284, 0.6555492483666501, 0.9904883064507276, 0.5491229642506774, 0.7747194226975798, 0.8703680914870984, 0.555637432631982, 0.9242173691155551, 0.6725011587098031, 0.9432622373696058, 0.6530692820057429, 0.5610627291899675, 0.596617893571048, 0.68200111939416, 0.5199490339357886, 0.767796534610827, 0.7737318199710886, 0.7621488815348187, 0.762866296656554, 0.7397292035088329, 0.8824325191147508, 0.5220060883929296, 0.8612015256044017, 0.5675781505262227, 0.7837009509750368, 0.6321620745718581, 0.5772997978831373, 0.5419292708913468, 0.7235321943354327, 0.9477915035522984, 0.8021179009179382, 0.909730843780943, 0.5833259669181641, 0.9425017147454485, 0.894595081508261, 0.8909960203026366, 0.8401854519342586, 0.7976813415999713, 0.6875244905234732, 0.9945196388575134, 0.6785950597371104, 0.8017849880204823, 0.5473296936612452, 0.8011075458627961, 0.5157089543675681, 0.9173454113074128, 0.539016664199054, 0.9153521831226112, 0.7324681880351431, 0.649776626062358, 0.6290724974530969, 0.7544058818768641, 0.6891335869470445, 0.6002679162914446, 0.7837463066729005, 0.586224505493124, 0.6120664131484358, 0.5008220794298333, 0.9747794210880714, 0.6458858104855871, 0.906322031520653, 0.8737247593180527, 0.7714982736741113, 0.7085568638355604, 0.9990333211658603, 0.7640119328616719, 0.6887165766382841, 0.8663972341564308, 0.5845991124178218, 0.5637029040943806, 0.7346593530485449, 0.7891755139252982, 0.7709428836960096, 0.6406110713246898, 0.9441740739069884, 0.6081207187482933, 0.8944817121232926, 0.8080339967627799, 0.8930083440400667, 0.7045218910466198, 0.7902919287335399, 0.9594777990540604, 0.8059361521697647, 0.9557765750809311, 0.8176036545107347, 0.500529621179729, 0.6991048812058782, 0.9276082707765305, 0.641107504036851, 0.9777775532517992, 0.9626129252086482, 0.9827579013481113, 0.7767985336583361, 0.6801057155465532, 0.9456954383489884, 0.681708523586133, 0.5478289650100251, 0.5210212258761937, 0.9232931819531824, 0.9750198747304095, 0.9887271980472914, 0.7775726456785058, 0.5559002140547222, 0.9030007752860083, 0.5254342784494093, 0.8069566704985469, 0.8482212219518261, 0.9450056053087255, 0.6922516509573722, 0.5366578854188919, 0.7859603969546526, 0.658756951715851, 0.6881638025132244, 0.7868757403865683, 0.5276991495376908, 0.7768594125195742, 0.7427509832434228, 0.5204001900306612, 0.6985499827822527, 0.6857784575835362, 0.8773720422084488, 0.8124085229980192, 0.8018952976818463, 0.6765048792585264, 0.7069779721987237, 0.6962624408127829, 0.6465318177278605, 0.6774667602833642, 0.6635379577432889, 0.6132462223745183, 0.5411691225315017, 0.833643556061747, 0.9991720993329812, 0.7725213506317767, 0.85907790946419, 0.8252719998338963, 0.6277951927768464, 0.8983378701888862, 0.9601948910970051, 0.8961300698794863, 0.9108312726534498, 0.6315999696626221, 0.9423590564795141, 0.5325946576517946, 0.849040346638893, 0.537148364005497, 0.7065723767965002, 0.8960343140720672, 0.6604650409526562, 0.9819507080954655, 0.5771271491953495, 0.9016900255814859, 0.7979078340497731, 0.6537775157313379, 0.7011071077150637, 0.9548623894281789, 0.8830138235912647, 0.9292150300356683, 0.5849341803762471, 0.7662603352806626, 0.8092041040491773, 0.5552713103276209, 0.9696961000728477, 0.7559463183851474, 0.99141408105196, 0.9363097025280913, 0.6436420695261068, 0.8252355973284545, 0.6787933654125349, 0.5334194975657831, 0.6829777700587618, 0.795145763407717, 0.5078471846363453, 0.9529816592066132, 0.9162126284449145, 0.7460541027992926, 0.960575964918089, 0.8629794063767344, 0.64890871485968, 0.7978403271210119, 0.7149411021312915, 0.800162738131813, 0.6145938813068041, 0.8973278134397308, 0.5305249880819985, 0.9693017167901166, 0.5965297151541311, 0.8948829235829735, 0.5747327758677809, 0.5482917489204502, 0.5811440962718639, 0.9640363242184689, 0.8758298245041101, 0.5776014297493163, 0.9888630721062226, 0.8526997795027349, 0.8363150569837465, 0.9450214356300706, 0.5481696306964134, 0.5901488162540396, 0.7850052098646734, 0.7909501124513845, 0.6336144491154045, 0.7626166729425833, 0.9003679640146613, 0.8698017432354006, 0.8375228698476831, 0.7362060656261855, 0.9327292206971012, 0.9865637851430111, 0.8589088716118296, 0.878639926837649, 0.8805491249095778, 0.7555255588595426, 0.5060910247106656, 0.7953013014674081, 0.533584602384032, 0.6076550544013842, 0.5386915517011587, 0.5255790731672769, 0.8978278600164893, 0.874102689119535, 0.9946029637623885, 0.6404247047043987, 0.7456119246635018, 0.5420222832508359, 0.5945748807479805, 0.6985173971019021, 0.628038333452225, 0.7180584909114431, 0.7832539302403989, 0.6358244004847529, 0.6822436564811202, 0.995775057430544, 0.6653434114675126, 0.5901907587565823, 0.6591988430900728, 0.9238206803506717, 0.5500180833871403, 0.7814947630638838, 0.6480839685324531, 0.5210074873723858, 0.810195192012664, 0.6661956240830971, 0.8941922598608865, 0.6124301660464329, 0.9924445288248418, 0.8729939057219813, 0.8013471592216475, 0.9449524963406729, 0.6748622109338589, 0.6471096458994134, 0.6038686077572357, 0.8148691916819335, 0.7249506525285494, 0.8198573107894667, 0.9689822119172304, 0.8088066066727744, 0.5314860905320825, 0.8459312941432855, 0.9259494861762931, 0.9813526212320538, 0.5108795770368002, 0.7296607999471256, 0.7026107666002752, 0.9232789414134731, 0.8033725745680294, 0.5908702428140199, 0.9361460801398024, 0.74408436454596, 0.5894009881511981, 0.8665282500139995, 0.8362198036945517, 0.7813236795368261, 0.9561706558304732, 0.5432566222203219, 0.9895690452397705, 0.7968365965484724, 0.7452791417032931, 0.8281341979719787, 0.741180240135333, 0.6742534884708479, 0.7516041484214463, 0.7549631982368905, 0.5728064538341096, 0.6479400964895048, 0.9417429920690994, 0.7601396456767089, 0.6019577696564704, 0.8036791779050894, 0.9766538809998122, 0.8881973742027274, 0.9621635407323594, 0.7894556532235659, 0.899813817598998, 0.6979301484616577, 0.868343764613014, 0.7226658720677055, 0.8671543651906082, 0.9315829964026322, 0.9239443290873267, 0.7951213099549391, 0.9564218208074375, 0.5234178117816819, 0.6145821325806528, 0.6984054114075906, 0.6938317520203263, 0.5417998576723205, 0.9857771503128859, 0.9748240095967549, 0.9599600318457008, 0.9442725976223674, 0.7663957767719287, 0.8390376223782419, 0.6426285469064341, 0.8052665578898907, 0.8352712672517705, 0.9053455822976855, 0.7037565794225322, 0.9088258644660407, 0.5504093273498039, 0.8676760755498834, 0.6310533370087177, 0.5732195535080751, 0.6872822108757679, 0.7775485038250733, 0.9125699648696226, 0.960436363206784, 0.8988413329636344, 0.630282530154656, 0.9971667933259553, 0.6295110137134554, 0.726762252338643, 0.8663836403431763, 0.9163900500215992, 0.9234055643549428, 0.6480305534625808, 0.5496200094480316, 0.6379102508432486, 0.5105749550161138, 0.56832845402508, 0.8557972695862612, 0.8222521154912541, 0.7628575347814696, 0.6482748235352652, 0.7659260305993768, 0.7066273305339461, 0.9056450877183424, 0.9726087264481125, 0.5643989692027088, 0.8883352783105487, 0.621521268379231, 0.7811704837231688, 0.7235530890409703, 0.694897462091832, 0.5562282436776539, 0.5089829538380776, 0.5903699516309031, 0.6343146066607598, 0.8295241388710166, 0.6131798520928614, 0.8703904950966039, 0.8955380556638892, 0.7448742408448301, 0.8120571130259104, 0.5274140188603744, 0.6190941609608469, 0.8631061483216109, 0.9089289226812072, 0.7944174652477043, 0.5309113818506521, 0.8112294946280179, 0.5108559117688328, 0.8086458087207212, 0.7514928505011462, 0.9827453081133848, 0.9822163691498444, 0.8640624433042137, 0.6161505052370182, 0.8253614804851583, 0.8977122545550367, 0.7161619750604278, 0.8018502976667362, 0.5870588185437267, 0.820969004523819, 0.8409869639835703, 0.666795854649106, 0.9395030350229625, 0.6824938116071633, 0.9868827323452644, 0.6794221191901662, 0.729630126402192, 0.549366404230716, 0.6686336758858258, 0.7684044893923059, 0.9958793039239806, 0.8859762137139124, 0.6820876032248455, 0.8889634837189839, 0.7618696343333502, 0.7203059505078497, 0.5091125992371971, 0.5272768113981139, 0.8413652466668688, 0.6206955053685164, 0.686755831042797, 0.7745626307447179, 0.7831206389991652, 0.8275174522611409, 0.9792083885838145, 0.974905588431755, 0.9830206923053897, 0.9257713227026451, 0.878470293887782, 0.5739577418151882, 0.9229939055320724, 0.7947202750083115, 0.660523243284155, 0.676341101372957, 0.5788217788093197, 0.547254637391827, 0.7378906801138472, 0.5401205712414019, 0.9487351412024605, 0.9303657987330848, 0.6228935638348114, 0.9310965627926562, 0.740974648004825, 0.9656387925690042, 0.644245744131611, 0.5806758930420364, 0.7412969181322414, 0.9701938743992404, 0.8596678802192648, 0.9314912832974012, 0.5984016829852306, 0.8768192304455749, 0.546268425295013, 0.5118740025596062, 0.5292038870988811, 0.6130259284679909, 0.8326442940639478, 0.9899071518869458, 0.5408998564598531, 0.7783280325641129, 0.8351573277069222, 0.5042045347251798, 0.9795335322064445, 0.8484564627556755, 0.9829354095928053, 0.9437571154489093, 0.5502528966250156, 0.7825765938341879, 0.7794124833264172, 0.7646336469137793, 0.8533342813312019, 0.8605595431710716, 0.702874029717541, 0.9042576622623957, 0.8642045106530776, 0.7931129163389936, 0.6358497708897893, 0.9052734891501634, 0.9386891094443137, 0.8695174695335605, 0.9119693254714878, 0.975204416405018, 0.5661694133390474, 0.6171068823784494, 0.88041026043042, 0.6906375673158645, 0.533618933915891, 0.954880441631982, 0.5222346563262485, 0.8426573513653492, 0.5464200177608265, 0.8642337086677473, 0.6436384313580408, 0.6913296673597247, 0.5066954581177479, 0.945660593361934, 0.5109180579107948, 0.5251830696447451, 0.9292990624602904, 0.7218149367294755, 0.6291839883994259, 0.882392443705077, 0.8685291183514765, 0.6971893137367153, 0.9795108541263691, 0.6794005379110508, 0.8968882477578769, 0.5385940249817389, 0.6698423831983245, 0.9907231821353459, 0.533335673324423, 0.9224016984335824, 0.9497854515481365, 0.5346639122504808, 0.5007762621759675, 0.537887498274092, 0.5185242539483925, 0.5587143070589908, 0.7673851288354463, 0.8206473968540803, 0.8147642572325544, 0.8548711463436075, 0.5382016692864937, 0.6851101132364461, 0.65194330950609, 0.5223271043064521, 0.603453533384509, 0.8190155985810478, 0.6934251320246801, 0.6184013478707955, 0.7582775646762517, 0.8471397520740511, 0.8473882424150514, 0.5188059369415656, 0.9749836136604184, 0.9545219256881399, 0.9715242891874024, 0.8104754985878846, 0.5029088920427389, 0.9851400643846813, 0.9289848672406951, 0.8468457279891457, 0.5174803805786039, 0.8790110414528569, 0.7606983889226775, 0.5384250074763902, 0.6756849705427539, 0.6191942580153251, 0.9647992525340439, 0.6015455307857078, 0.6843490560989096, 0.7321570585242833, 0.9531529169137323, 0.5908737003817123, 0.8074315248346582, 0.966526471095507, 0.8257513037492404, 0.7994036151801811, 0.8733447611366283, 0.5050614334859463, 0.5038429747091849, 0.5542977537646783, 0.9770140167260556, 0.8008256592081022, 0.9660876507953511, 0.5502938570579414, 0.9084347804450148, 0.9267648026056765, 0.825007474657154, 0.9518749409775471, 0.5306549690432831, 0.7891377669509345, 0.5581345004013425, 0.5651586687373928, 0.6470634654728478, 0.6244257144418144, 0.9380832982253642, 0.7786511316487337, 0.9225617861691511, 0.6536220941988623, 0.9512222355874635, 0.9177719847403267, 0.6430382156396826, 0.6320785579152588, 0.8670830915803722, 0.9161318618699865, 0.7077059927763325, 0.7012298601567291, 0.9037223067951062, 0.9558588577222036, 0.8249815957914077, 0.5000469528249509, 0.7191409224327417, 0.8818097062966732, 0.853534387453789, 0.9544091433704234, 0.688084638177503, 0.5731592091659972, 0.8766047416567597, 0.6222707839996628, 0.6209749449551556, 0.8200248700723859, 0.873429049653955, 0.7045833122130492, 0.5660229349993555, 0.7646959646116624, 0.7148810725577941, 0.8105834337970546, 0.8060720801406966, 0.5085858607781255, 0.5099329721340045, 0.5553284019543476, 0.7509586720006531, 0.7465057647272786, 0.5056654123091001, 0.9794532765316755, 0.5721059817264368, 0.6731997091408666, 0.5566621634346356, 0.835987694235389, 0.6892489203004126, 0.5983134235669962, 0.9019379558674235, 0.628891805416975, 0.8225510960500011, 0.6018118388380014, 0.7459145116974089, 0.7335382623954554, 0.5594763531643043, 0.8061429384462783, 0.8586505106471622, 0.6840731229131954, 0.7264516497766653, 0.7802173972037252, 0.9262043298891021, 0.5029120856790592, 0.8816737961250458, 0.5396912791196441, 0.8716927606245692, 0.5216223009499844, 0.8193335786457265, 0.6965126542766815, 0.7857571833683645, 0.640701498493339, 0.6872471056551462, 0.8992170561823896, 0.948896232021339, 0.8181019673852571, 0.9288557544946763, 0.6194987388466021, 0.5499633253044904, 0.5985503359140414, 0.6944570143091652, 0.7277885116340552, 0.9316180922859238, 0.5895499714184234, 0.5517735224504497, 0.8815447255998352, 0.595504276749134, 0.9626862052108424, 0.8574837968436988, 0.6027632943070532, 0.5391643269047095, 0.7843694175083837, 0.7817072409659447, 0.7987365536575867, 0.9575769386195299, 0.837039755945976, 0.5912668960967795, 0.9595544604050215, 0.7651488597109803, 0.5102266173421838, 0.602453774707782, 0.5931335918947596, 0.6474215405287799, 0.9415459709398168, 0.5747896954089827, 0.8919229557666805, 0.7632923610551576, 0.678961137478115, 0.9888885036470201, 0.995575205012369, 0.9697335972260528, 0.5654598340889327, 0.6195395144642922, 0.5446093770023626, 0.6595011489244844, 0.7719678564260256, 0.5187010328409289, 0.672581104800426, 0.8347733862907214, 0.7102790758409926, 0.5318722366440634, 0.5176415593488158, 0.7168519118414326, 0.9103802301887599, 0.7748645485421517, 0.6557325481735871, 0.9488038778663602, 0.7832915049647408, 0.6565923199473312, 0.8333545207522052, 0.930835031204955, 0.5875871093109455, 0.7377030631955565, 0.5546759032272931, 0.9585304621990633, 0.7185398691508715, 0.6201637421665822, 0.9395025797669436, 0.7708952618723155, 0.9227900533550503, 0.6335835978590527, 0.5332313259341875, 0.57934820513474, 0.7373713596841834, 0.995044874346324, 0.9859078657766813, 0.7679507205962619, 0.6556474294677952, 0.9774276562049402, 0.5766423688830888, 0.8594824341409192, 0.8165963087399671, 0.7830655672196515, 0.7703237041573574, 0.9898213247891225, 0.868248106066982, 0.9914859626533479, 0.8506458964408128, 0.7051513094910113, 0.6440081183259749, 0.7390796513173647, 0.6316871168824272, 0.6380469165629579, 0.9676067549121243, 0.8473940876998567, 0.857481548548724, 0.9165726895338917, 0.6766997439462077, 0.8073860138839496, 0.883105263907328, 0.6318838965980128, 0.58933109972014, 0.6394829769196315, 0.5982245489836427, 0.8740871399670815, 0.9729931410451973, 0.8614022786734691, 0.9586729458690313, 0.8402993180167142, 0.84392688302327, 0.8455924455761777, 0.8340209229083142, 0.9889443457403624, 0.6869252000992945, 0.6756311369032765, 0.6909029240264284, 0.8024917780893785, 0.9861020083088645, 0.7391122847852283, 0.6245542816511129, 0.7048968841131351, 0.7267146842097345, 0.6948911026933527, 0.9656872150376348, 0.506583704876449, 0.8617454698287362, 0.6480245826611977, 0.782469315968259, 0.7961073029686467, 0.6873693278870787, 0.5460907942019348, 0.640731217745252, 0.6042275328810488, 0.9785317305439865, 0.6377238277506051, 0.7463989333608662, 0.9382189544466375, 0.6305470235915822, 0.9838993811049291, 0.876689826660352, 0.8970890185216724, 0.5967346010874301, 0.6939211072694553, 0.7635526500150227, 0.7919330994515923, 0.6921956716910898, 0.9325282265830159, 0.9668705977288559, 0.7944263249934285, 0.6603393909033087, 0.8760300225076659, 0.9758697842887976, 0.9833047954643165, 0.6614349100137609, 0.9042485486719412, 0.8250554687591679, 0.7496634456956157, 0.8601764267871554, 0.779516428932857, 0.7671342178079599, 0.9029361009479866, 0.7881706389254973, 0.6056854517614012, 0.8780734961617007, 0.5166106740563123, 0.5614031679632778, 0.962596040240443, 0.8589207264795227, 0.9479565835878379, 0.8596562775226775, 0.7544331633024149, 0.9612785571592898, 0.7741860603194581, 0.549175336699811, 0.7936852731156976, 0.5853513791378279, 0.998134315791664, 0.711483846535307, 0.8673559282292914, 0.8894598442227005, 0.8773589263209093, 0.881494663413888, 0.7452420685822241, 0.5512879556910861, 0.6214452148327507, 0.5696569488592175, 0.9635768717746669, 0.7046256965837097, 0.8157666249205264, 0.6983666962065178, 0.7164080337373797, 0.6702456836432855, 0.9042459676381059, 0.5714586350381845, 0.7815952725144056, 0.9933507666806158, 0.9942893580798527, 0.668129771504675, 0.6963103861152862, 0.5211497188486723, 0.539778083054081, 0.582310844471948, 0.5280232017494186, 0.693891739477448, 0.9968940183494145, 0.9924484664817492, 0.5640056920098295, 0.5605519506270331, 0.616960658406681, 0.999694703254854, 0.5977582949070652, 0.5057688188506425, 0.6460174239578158, 0.9326932622601605, 0.9449011554292818, 0.9322562831303829, 0.9068408997770132, 0.7513069895362594, 0.8336057564032469, 0.7787897757051964, 0.5295226802818913, 0.9781323384521723, 0.9782738102557322, 0.8976582857709368, 0.5167486925069503, 0.8617552897210083, 0.8891166191830291, 0.5657298076417347, 0.7354210285119069, 0.7818965201627845, 0.7340462918447159, 0.7212394687479131, 0.809678601149967, 0.7237386473694959, 0.9433474349187121, 0.7699083509802724, 0.7031392330615165, 0.7152561340857625, 0.8949907980694933, 0.8917291202850698, 0.6144227047818664, 0.6301557441769972, 0.6382523577564776, 0.6448361445500618, 0.9523748853329643, 0.7857866305261341, 0.7621073911976689, 0.6815296155089878, 0.7060218158313946, 0.9318302465107239, 0.9201074573882175, 0.7770810032027037, 0.7824549612008919, 0.763206730849868, 0.9890220072315994, 0.945012657382885, 0.7127142864168685, 0.5803220582608013, 0.9127847442690014, 0.7255541855395691, 0.7179803446527518, 0.6181358062769367, 0.5690378138174337, 0.565847677508386, 0.5290919801062635, 0.5710930759624069, 0.511434734576071, 0.818842547572367, 0.7858129905385549, 0.9182710332208974, 0.6075024256757066, 0.7869372820948817, 0.5458093282964245, 0.8520944364109528, 0.5525418035225851, 0.9812542707851029, 0.884040582076986, 0.5745859897104377, 0.8504840920139489, 0.9206215105578315, 0.6291951772907747, 0.5243462342579568, 0.9055715504243531, 0.6503291486188121, 0.5901473399081442, 0.6866528933001382, 0.7663290043525336, 0.7620055640547345, 0.5168994304409589, 0.7517334877396036, 0.7949890872005867, 0.5349388057529925, 0.5277616015933263, 0.9284322734979356, 0.9811232191923034, 0.5442885411419439, 0.5595017344792519, 0.993815954837556, 0.7810080548808955, 0.7856324501105842, 0.5012366501761663, 0.9803587886521816, 0.8240725685662416, 0.8839051583036196, 0.8941503946504055, 0.5078471888124179, 0.7040107679954666, 0.8159866204594579, 0.8086634552343939, 0.5845548316101646, 0.5833594970761069, 0.6438083702895601, 0.9621142473399726, 0.5887027953050363, 0.8367053911240048, 0.9384007648485128, 0.5807622551576057, 0.5335251858730323, 0.8267063232498886, 0.8820131241617852, 0.6398507212209206, 0.7623700623931121, 0.6274467824547376, 0.7577903481228104, 0.7925391895925162, 0.6519646196311729, 0.5891308440120491, 0.7480206575704471, 0.7902085967334395, 0.8410974321779182, 0.7049684772071141, 0.789523225360353, 0.7157207947401047, 0.7658980110992637, 0.8516079454062001, 0.6255426072169553, 0.8043563762066168, 0.5400456411616363, 0.6867381562575054, 0.5034928218124997, 0.8627662552271966, 0.9816889539440665, 0.7224488953850285, 0.8028129099387518, 0.926211920471341, 0.6533133976381724, 0.6349295570521011, 0.7062523237090994, 0.8194789877545345, 0.9258251660465973, 0.7816123547860668, 0.6647091214875547, 0.5054349835130504, 0.6877772465152158, 0.8003420788471775, 0.570652294352374, 0.8063778530152308, 0.7583271608009006, 0.5605529483246536, 0.7116497016906277, 0.5250817382996807, 0.9887335353867786, 0.8063895187721736, 0.9460790213818583, 0.9888244044170562, 0.6795760830128597, 0.7249964436695808, 0.517682564876955, 0.9935376750299947, 0.878730520174906, 0.8497997458804403, 0.8053957366752131, 0.5003997631094433, 0.7328467351592665, 0.7005335874576561, 0.6827573069567194, 0.8020961239126243, 0.734409734601474, 0.7007012949682707, 0.6227776423320106, 0.6414436612564258, 0.6298672100655409, 0.9787780844313614, 0.5101422585063303, 0.9438203981814932, 0.655130402810983, 0.7422560583474436, 0.7177621220472759, 0.9702414229485843, 0.8770775492870528, 0.5845088488331953, 0.9670097333516035, 0.7818517995734362, 0.5066541898217352, 0.7064959923419996, 0.7360859982588992, 0.8614799714434951, 0.6200478205584103, 0.8277998893089273, 0.6471747374819139, 0.5730222327666661, 0.6409383676870992, 0.9412412475420646, 0.8640779815808062, 0.9783604198017595, 0.8859664365440569, 0.7119407833502625, 0.7515597477249905, 0.5814193027485708, 0.7352542627187919, 0.6415218220120338, 0.5308149447742508, 0.700259908900839, 0.9623183835457934, 0.7714868814631721, 0.6331948504153944, 0.6302566311778752, 0.7091194208830387, 0.8334075704561972, 0.6290894531771447, 0.5097550781364463, 0.7948968670968533, 0.8051403743496856, 0.7668946394280755, 0.786475414184878, 0.8169047556243374, 0.5840506812552487, 0.8276685219935866, 0.6985162038202466, 0.7275029241100196, 0.6467098486684593, 0.7819833043112013, 0.7604952434776018, 0.9933768523900466, 0.8447041349515533, 0.5257066660473596, 0.7237902519880188, 0.570686018163802, 0.7893302513178245, 0.8033373057568056, 0.5505890603434999, 0.551380811706228, 0.611967739343661, 0.8248628700999945, 0.789202696737912, 0.8579453726858131, 0.5508403649190379, 0.5531953113602495, 0.6847139997961984, 0.9456311656620024, 0.5600180545404805, 0.6405261452836511, 0.6608320249870092, 0.6562723847634783, 0.8046366064712689, 0.9468514809791011, 0.5397890736886071, 0.6235742791055154, 0.8385732668737202, 0.5582805371261237, 0.7449589560164497, 0.8214747457273245, 0.6317841217302552, 0.5117410720111055, 0.8144050418268279, 0.8337382858617299, 0.7115122178537665, 0.8366687709708095, 0.9806400967361413, 0.8458037391362809, 0.6648409205906362, 0.9736155610418002, 0.8026780036911342, 0.7868520015613119, 0.9047925047718597, 0.7627141778132296, 0.7417107271895707, 0.9883599040220807, 0.7719154092065674, 0.838003122620097, 0.63127607774271, 0.7889407835454753, 0.5569777690385331, 0.9306060671447074, 0.9270907811139246, 0.959877121850353, 0.8694490536550761, 0.7527296001755716, 0.585339742432902, 0.5988141657271393, 0.9305511213237412, 0.7808102946270414, 0.719532052527251, 0.7457333979178264, 0.6492340645514302, 0.6288566934236766, 0.963549642326252, 0.7670054293487237, 0.5270397030277134, 0.7358888589585801, 0.6693393023864271, 0.9374624265574645, 0.7829386998521632, 0.7118286973083596, 0.6413636182582083, 0.8592802550053222, 0.7495177251303903, 0.575109366346383, 0.5382619854710495, 0.9146727321495629, 0.8072497878643471, 0.9261192387222685, 0.6557712500467408, 0.5626508223837102, 0.7694851719219118, 0.5327667025545326, 0.5574875857024476, 0.6551172676213356, 0.8954688318377988, 0.7419290343467295, 0.8270849167463623, 0.9686120814731446, 0.9014755464084104, 0.8622260641885435, 0.5943410459991977, 0.6544167469934972, 0.7405285407975928, 0.9858163950875409, 0.6417956173053092, 0.7009196215673653, 0.6942555671913129, 0.8623840165059408, 0.7419834721848009, 0.9171544865753265, 0.9931705381797595, 0.7848053972320059, 0.7212536899577293, 0.5230993151430317, 0.612843348213165, 0.6563822224984408, 0.6478036035425043, 0.6022634679577985, 0.5506466136062316, 0.8195194881169214, 0.7928249592178214, 0.5825476206670253, 0.7589467887723953, 0.9341235463817583, 0.5963372666500124, 0.8376664250257624, 0.5020560574888158, 0.7460504424640362, 0.9038514224052916, 0.6708753390299005, 0.9508945759465965, 0.8691871715687083, 0.6156280825843977, 0.7442182447841424, 0.5620870130803717, 0.8014573096968337, 0.7572914656987377, 0.8905185928729915, 0.7411824542800673, 0.7278459706167695, 0.8616986102495499, 0.597572014029275, 0.8651861723454074, 0.5453022369105054, 0.7518083321984251, 0.853305900313726, 0.8000166594467777, 0.6780044686328024, 0.7411978454807818, 0.5290271366924479, 0.7321971833001963, 0.9328146010786914, 0.5059672278817323, 0.9363178181119695, 0.7612513393659137, 0.5169717343596048, 0.6710931300876426, 0.8237506040640201, 0.6705765542682307, 0.6467778845737362, 0.6221771667588778, 0.822860824079428, 0.5435053908200917, 0.5716925195309062, 0.7645177998717916, 0.6449767295070419, 0.7579266187107521, 0.6629883235187158, 0.9184980228703506, 0.6574156291761165, 0.5660335561644976, 0.7419334648549664, 0.5252361123604492, 0.7022133883263915, 0.6069530204484435, 0.840624832119031, 0.7554371424887718, 0.7031087273497425, 0.5675741256472842, 0.8940676464458657, 0.5481251188102441, 0.5186581229541638, 0.9736170451128141, 0.5804958334816349, 0.7667453136039686, 0.9296648667507496, 0.6458255009220575, 0.879678370689515, 0.9861491219501701, 0.7738940045904027, 0.7744105135443132, 0.7062041696045598, 0.9064574397944303, 0.5518477568334876, 0.9056635792781211, 0.5319174933211595, 0.7096837405740473, 0.6341911676433946, 0.8267545898309308, 0.5226783712536641, 0.7352479243151311, 0.7004099202178383, 0.74560488159848, 0.6323187388048438, 0.6840540504525183, 0.8292724078193561, 0.9067576699178141, 0.5005511832257777, 0.6379702186213629, 0.7847885270550696, 0.5161455469585536, 0.9629587706413951, 0.7991563015717698, 0.681595782423281, 0.7470784788414035, 0.8737848681573055, 0.5251658723308554, 0.7620679009362251, 0.7067204054200809, 0.5511302082916567, 0.7934760188962106, 0.610023102551059, 0.8735471227313313, 0.8429515599659176, 0.8680832524814843, 0.8226505656133603, 0.6540461757995218, 0.5187501961573847, 0.6665947552978801, 0.9842064223872922, 0.9213538024990637, 0.6262383774723317, 0.6104757781247561, 0.9376298558710241, 0.7684369311756902, 0.6319620588611553, 0.7047816270924204, 0.7384916935177619, 0.8794297902885422, 0.9070104131197243, 0.9094709287170251, 0.8834257124375424, 0.8616222950029991, 0.7026086056355892, 0.752608374756182, 0.8223035871898103, 0.881229670293258, 0.8913339227594382, 0.7141692933758009, 0.6624934972432198, 0.5275303174351731, 0.5913473780996347, 0.8174407243924495, 0.6817949398940973, 0.6017574659614358, 0.679858664056066, 0.8824096018198546, 0.8154708737059717, 0.5286129345973527, 0.7051693043391822, 0.7457709772260301, 0.7284002144937857, 0.5642195080137955, 0.5987216310442116, 0.5307456909031247, 0.5179929909604788, 0.9660951781118005, 0.9358890161534696, 0.9943510377708138, 0.819959017457845, 0.6309585760167764, 0.6452281576419192, 0.6141138627256467, 0.9944764290698378, 0.8836506708865322, 0.6038312272402626, 0.6348926975926352, 0.9090091310770609, 0.6455243840022198, 0.5132390719562758, 0.7375031816524507, 0.930376597550834, 0.5711800106484257, 0.6109934952353684, 0.6319075657011446, 0.7512594695216468, 0.5732172933494201, 0.9817874641525488, 0.9522344858790579, 0.9084822064317449, 0.9051712577129669, 0.6570335912702843, 0.6714882309606236, 0.7848517219826309, 0.7190137029448019, 0.7487076369736234, 0.5899697101389321, 0.5787201571843514, 0.867434648453336, 0.5925222420012082, 0.8411693894689111, 0.6003908049070508, 0.8870111330991746, 0.7053009987236214, 0.8461769162208173, 0.77778972518772, 0.6169665185827642, 0.9760993709261493, 0.9821782767799458, 0.5457557296561075, 0.5216980550926974, 0.7649002916318361, 0.6557254619628229, 0.6198782012401618, 0.9296387965265422, 0.6836636232451007, 0.8437764031345535, 0.719970512462723, 0.905030168451894, 0.8372986477362061, 0.6646166808588734, 0.9105877217991786, 0.5055158713581248, 0.9799252750525893, 0.7074351577095138, 0.7112763133210374, 0.5201616968341588, 0.9487538807704881, 0.798831327592181, 0.6526318643354257, 0.5521598988398242, 0.8246729200080916, 0.91434754402491, 0.6385959683066096, 0.938727414343203, 0.8282599145832673, 0.8270558917655529, 0.929902546974928, 0.6668983811331604, 0.5676680719863137, 0.8841810101721649, 0.8904485303191043, 0.9868883453713758, 0.7521159849997042, 0.8846341551034866, 0.6996369735837882, 0.5459845661619562, 0.6892594283523037, 0.759444049968131, 0.9496034937561737, 0.8865621761960463, 0.8990653960674029, 0.5119083241512825, 0.8154480586507606, 0.9634348253908026, 0.9569694798018062, 0.5008136730232805, 0.500891934934077, 0.9476909302302678, 0.909960745950898, 0.8721105275141994, 0.80732460082626, 0.9222846024729547, 0.7342817978452927, 0.5176670150611016, 0.8113034160884381, 0.6627421844186328, 0.9861028751705712, 0.5931934745517473, 0.9357622211095176, 0.9061344122939461, 0.8607640969911841, 0.6188952208248051, 0.9820261485340711, 0.9980115190286941, 0.8679860254853973, 0.5135596238257629, 0.8936052895602973, 0.8841207911253555, 0.6666044810551673, 0.6140777593482087, 0.692351145771175, 0.5967588164618752, 0.9051812533887376, 0.6760946849940804, 0.8531308333803611, 0.9192934925836713, 0.6267362578541635, 0.6111613385931397, 0.8635439298514969, 0.695308576521262, 0.6655579147325794, 0.6736350556942764, 0.9075537195073717, 0.826428245240014, 0.6634690326474955, 0.6443596242189039, 0.7154278026364844, 0.8612698391931082, 0.5855702379721556, 0.5181153702598698, 0.7905900257642282, 0.5268147480668288, 0.7269525913013763, 0.8137547677380472, 0.6227016676418715, 0.6395947580681127, 0.9448696482415905, 0.5556403158034176, 0.5160276828716296, 0.9783293807302829, 0.6370681322262641, 0.977862639404877, 0.6781477625021648, 0.5614145965671615, 0.6448412459964972, 0.6422385587949027, 0.6560127556290263, 0.8196590233612948, 0.664433887647206, 0.979576888821041, 0.8719073371337125, 0.9041625666630493, 0.8914823275921122, 0.9183116333994672, 0.6114834100717577, 0.9757449158288911, 0.8086284147108984, 0.9396780849926079, 0.5891233367095016, 0.8119729521906072, 0.7767581245684333, 0.7951189330475095, 0.6645122458894936, 0.6914682543753763, 0.7298753046114064, 0.9997662828351666, 0.7681675636424525, 0.9848867257954905, 0.7837199432827113, 0.5008124686322651, 0.5530650043025814, 0.7996583778012137, 0.5952508177767724, 0.5395517024726416, 0.5209573587721816, 0.5249380791134702, 0.6996265086868436, 0.7500590089502168, 0.6356538616599412, 0.5955780019583825, 0.9959775345561508, 0.5729569568037796, 0.606684772457051, 0.6991657296912013, 0.9859576675700996, 0.9459977698336584, 0.8918924031484018, 0.9498980704079647, 0.8866842077037829, 0.830447182366132, 0.8045076505999222, 0.8063336127820586, 0.8049190191438017, 0.7247812350819139, 0.7367008316847972, 0.8776752659408713, 0.6852939506488774, 0.655767550467349, 0.8434790828304859, 0.5212518058273122, 0.972140433688844, 0.9213108596946251, 0.8037813386462072, 0.7321653485052018, 0.8458775662320196, 0.5371920061765371, 0.736332189756966, 0.7533139590639941, 0.7982215688648778, 0.5738039928298189, 0.6896783704953389, 0.938815177008558, 0.7759709671034214, 0.9771657386433807, 0.5667899213152976, 0.6806834416404638, 0.675000634078732, 0.9115955548694313, 0.8624831780004305, 0.5173070907790026, 0.833934020332165, 0.81356438974143, 0.6023068196408612, 0.7085748474371925, 0.6854472243138341, 0.8550523303064395, 0.7430826719768953, 0.9615508147583659, 0.864779382861024, 0.8834072183878618, 0.8893122314996325, 0.7627238082749295, 0.5581528186530738, 0.5262904953166789, 0.7774659708148595, 0.9998137345004395, 0.5309876071946549, 0.8914371073924378, 0.7492095429710193, 0.6219763259325135, 0.7294652490132449, 0.7097430564567107, 0.7542281139748754, 0.6603791443701118, 0.6460802083801935, 0.9493168205384496, 0.6821496589590872, 0.5275850303095146, 0.8019777978059113, 0.5479373150426716, 0.9950454260360384, 0.929080541488293, 0.6057978718502397, 0.7882955839204489, 0.7799859261337179, 0.6833792839019688, 0.6839021751586496, 0.6187298359805373, 0.9886432655295452, 0.5418412518362523, 0.6335489618718533, 0.5216825564206147, 0.8829044112395026, 0.6316234248434097, 0.5767122021912718, 0.9042304332460286, 0.9352376541295586, 0.7314514890778353, 0.5923722909254066, 0.5410660795956526, 0.9661081061560532, 0.6972083253229387, 0.9914522248197629, 0.5879723450762414, 0.7535538426810933, 0.6087760546015412, 0.7214324750881919, 0.8667594789024313, 0.9659275275081762, 0.8660593483161793, 0.5044414379486236, 0.9850809531466471, 0.9543288239576782, 0.8539714657374136, 0.5445818407971541, 0.8871130772073916, 0.6638357639208705, 0.9271531752278197, 0.8890279766325324, 0.6500513149521344, 0.5819646970586044, 0.8573889279970115, 0.5181955738074646, 0.5172910636175574, 0.5950854815726876, 0.7162480858979885, 0.8491324294779745, 0.5209397690622037, 0.7305861427457396, 0.775022506630562, 0.5637944148905267, 0.8910221045652591, 0.7172986933048117, 0.7037987442194873, 0.8614415171919263, 0.7646696013539243, 0.565064055424439, 0.7892434560222991, 0.6363553178948065, 0.5550835248094188, 0.9822793336866487, 0.9242492544623759, 0.7371436306982793, 0.5326017983034801, 0.510344757039304, 0.5891503672302391, 0.6380551613782393, 0.6377548020296753, 0.6385547344901867, 0.5050028049726104, 0.6211268156932697, 0.6933481318353545, 0.890756003317956, 0.7556414625442993, 0.943785138684367, 0.7538051851326814, 0.5301126679940031, 0.5638041147484133, 0.7078447299454526, 0.7062749145463163, 0.7201820480665235, 0.6563707778868852, 0.817987653947188, 0.661785473147359, 0.7361945088470101, 0.850208718210759, 0.8334400370694939, 0.8680663933896592, 0.6396706310039934, 0.8772472472039117, 0.9453408207563374, 0.5971229467337729, 0.6995680038970282, 0.9974688756455805, 0.8498525995593812, 0.6554537650172639, 0.5934935948420612, 0.8046528637455062, 0.5043584058831712, 0.843393378683677, 0.9770852916906162, 0.9258237335023769, 0.9418090982045895, 0.7232557646836759, 0.9415396907857592, 0.8589258723799877, 0.7387609075699979, 0.6802281285214127, 0.5492271017948716, 0.7503989863782983, 0.7226762938707219, 0.9730398093116197, 0.8443572575977245, 0.8876968229859434, 0.9236617153042712, 0.5266279236938434, 0.939506664550944, 0.774598652596193, 0.7718946892874505, 0.7224853460432685, 0.6681710693003422, 0.9458466098029894, 0.561904744023082, 0.5000254274361318, 0.7401225448033275, 0.7363826947811298, 0.7128052076760445, 0.977333808051756, 0.9147481784048166, 0.9544953786032451, 0.9977612154538347, 0.5270610323936222, 0.8195428787733308, 0.7636999774011413, 0.9219336556022592, 0.750015254096302, 0.9745234575535204, 0.9170288153429474, 0.5149510462186235, 0.8769914438647263, 0.8744969685531001, 0.7951048399969892, 0.7724128709400182, 0.9738938212880166, 0.6267843801673204, 0.6901447455575923, 0.9639156204537488, 0.7241518583993587, 0.5741193947199115, 0.8406498749631632, 0.5853989656152097, 0.6258022504914131, 0.9531262159153916, 0.9346163282154896, 0.9381644218949154, 0.8683631475107916, 0.9119718941198189, 0.8745685705646724, 0.9655997324998655, 0.6106696851322003, 0.9275738935355664, 0.7053878779522316, 0.5894113015599114, 0.9011080881978446, 0.5357165904104386, 0.7427953555570181, 0.8633613262005048, 0.6079607109280012, 0.7893379897284258, 0.694403701797583, 0.5747866459400556, 0.7776758277852445, 0.7710777492959329, 0.780027622133494, 0.6191014619035511, 0.7689517412222513, 0.7478126057471011, 0.8724761226004878, 0.6995888720985874, 0.8204434146721773, 0.8263246531764548, 0.6921658873786969, 0.5711149032967113, 0.5947721533085231, 0.8996061572749458, 0.836195485306293, 0.8469626470052265, 0.5628806995828515, 0.9037081092540709, 0.8074922167272036, 0.8668864284550146, 0.6587344445952257, 0.5173848458371322, 0.6053392861131255, 0.756503317379369, 0.6493656977521975, 0.8299960191876762, 0.9002011673448891, 0.9241709856304257, 0.6971502749978216, 0.5707245540472572, 0.7320577012047178, 0.5333826425358694, 0.9498272529455414, 0.7556141861268241, 0.838538960824177, 0.5206895840607018, 0.84050958497649, 0.7238049414529346, 0.9472717433172894, 0.6913276863370048, 0.8756048862416508, 0.9896517016465669, 0.8233316205569392, 0.8043632209417028, 0.8154463146056603, 0.6723943642247665, 0.8544661265156519, 0.9191260101916038, 0.5876692566268746, 0.8566595824210747, 0.7782243044255192, 0.615411202087323, 0.8341614063612921, 0.7364778138781638, 0.8309710071847838, 0.9293752562144686, 0.9393710571751837, 0.8693808195005546, 0.9570378343731991, 0.878342608042524, 0.7273527446790536, 0.8622010743943685, 0.7639256298692352, 0.6899452692711436, 0.5208744197426183, 0.8194536747943877, 0.6813180134087871, 0.9184588426430118, 0.6538058431915783, 0.7481166126353576, 0.9483775020828586, 0.8677247516156861, 0.8718267299747298, 0.984064463938188, 0.5465413431851942, 0.7278198892156332, 0.5272674423961954, 0.607081412844473, 0.9117143325486674, 0.8051611524232607, 0.6245806068276458, 0.5794890035832927, 0.5958854826486918, 0.6843285490929049, 0.7517021844868392, 0.7553122562092678, 0.5849326921207143, 0.6955801190393369, 0.9964140009713187, 0.9758098805349426, 0.809183197021641, 0.8969584410716571, 0.7511211681776977, 0.7455855469850854, 0.5515970064835132, 0.7474454956134564, 0.7324302619194603, 0.548163614468874, 0.649600770695111, 0.8620009719889867, 0.6742680243251216, 0.9266011151044349, 0.7068667199647505, 0.5607469361639303, 0.5566812716716744, 0.6440157323468853, 0.580165318716734, 0.6634180513148218, 0.512496047176882, 0.9670918930216199, 0.7962650121608199, 0.6660355678732796, 0.7171495569569071, 0.5785482247314544, 0.5008459699488154, 0.8932131433174106, 0.6892122728599488, 0.5657648190944582, 0.5375439211787556, 0.8092115002943252, 0.6403985642340273, 0.5167019280969183, 0.744884777795993, 0.7984725072251524, 0.7236275431170557, 0.8370426620251687, 0.8116432723157918, 0.699576926595131, 0.8408088889835106, 0.7264616150114027, 0.8273113298528554, 0.6038165638409823, 0.5450282708460006, 0.8918701345447992, 0.8657985668972333, 0.5408175763671523, 0.7600614881790015, 0.9145515606461536, 0.5480514271858998, 0.8960774182011206, 0.8863881683780687, 0.5334640071338519, 0.5484522820311637, 0.7017485661681451, 0.5317760661558291, 0.7865715999245827, 0.818834423501025, 0.6106847824797025, 0.5238298034809007, 0.6182732452650475, 0.8596236609801673, 0.895674364250482, 0.5242299169789866, 0.7046635028423154, 0.7222659480293994, 0.6240593849495728, 0.9706479582535341, 0.8291346815905388, 0.6098661059024504, 0.7903450781545551, 0.9397814086177511, 0.614392801651706, 0.5858715060097055, 0.6369744854754043, 0.7332906607372982, 0.5196249614580837, 0.8466966040923827, 0.931883509921039, 0.5600740075435553, 0.6685248790352829, 0.7538853791903136, 0.5266490924643138, 0.6639092448516721, 0.7614712050726256, 0.5976102658920773, 0.9229761894009949, 0.5485264168811499, 0.8204246510571301, 0.7920778407449542, 0.7986127301359651, 0.5197357113761125, 0.6964198471554277, 0.7240986461015781, 0.6766114295867378, 0.8394645246347903, 0.6795600464632704, 0.9063247471940573, 0.5599783639782336, 0.8189245565741594, 0.678178819752929, 0.9519109241141486, 0.5744012451450682, 0.5015178683428515, 0.6477462205101983, 0.8203080891529182, 0.6732376503755453, 0.6631139320351856, 0.8985968821706292, 0.534467391412328, 0.9585495332982978, 0.7478510859129774, 0.8517040442360259, 0.8626413241431303, 0.5508121847622662, 0.5983977881105242, 0.5256800298146505, 0.666446117457814, 0.5415425985141005, 0.6576383790104291, 0.7218358110561718, 0.7358866722226498, 0.9589864589947997, 0.9974456805194367, 0.7177555056029419, 0.7450861712732544, 0.6766298877570418, 0.6751064249283597, 0.7637123299719584, 0.996263759137641, 0.7870106633974234, 0.9116490117689205, 0.9219049273740918, 0.7603840632508736, 0.6808176610351038, 0.7123660121480082, 0.9100791191587887, 0.8963598815057874, 0.6815710056250776, 0.6522263284822875, 0.6110971527820436, 0.7924473713369649, 0.9064534793382832, 0.5998535870931572, 0.5531795276546378, 0.7583476545811292, 0.6626130200464817, 0.82144638452223, 0.9548753392781786, 0.522325174586451, 0.9609873071125818, 0.6879982590294426, 0.7562395657053043, 0.916090173249865, 0.7853970241481645, 0.8723215816132952, 0.7272132993184843, 0.9858097522164576, 0.8846725813215883, 0.6665521797941456, 0.800244814324659, 0.7028890918699306, 0.9272076621224977, 0.5091511310941095, 0.9016209720265302, 0.7680343489413552, 0.5160694414831838, 0.7382051056022627, 0.6391199773116663, 0.9863123644698222, 0.6449446029324186, 0.6876376766522452, 0.9321008631042913, 0.5483486308106629, 0.5927230816959939, 0.7683749158486973, 0.543608239600696, 0.8984181246992834, 0.5758194563096474, 0.995309199940579, 0.8666919721586628, 0.9120060405670498, 0.9601394779117387, 0.842560544005106, 0.7462227456610766, 0.8798227722906202, 0.9740114427219154, 0.7187432427491971, 0.55953689495599, 0.5062885992866598, 0.9644973443228567, 0.8246203859779844, 0.9797904209012898, 0.9478725241045431, 0.7390302645631974, 0.6288306019096355, 0.9746290811314917, 0.5718084374465005, 0.9106681803773329, 0.9882322617960813, 0.6664797609808313, 0.8443178238748257, 0.6370912344962041, 0.8438424870588062, 0.7391840727059162, 0.7191859223740228, 0.8424865462872456, 0.9450440156314714, 0.5038311854360225, 0.8802336960084556, 0.6968017548084691, 0.995851623843993, 0.5297976497073484, 0.6700849078372233, 0.5008739072953022, 0.7933326607775384, 0.6422086330474224, 0.5539650161513019, 0.5169201071017562, 0.9500493471907951, 0.8591145039924564, 0.9455175410672488, 0.6677835620369059, 0.9244915523864796, 0.8059206699894786, 0.8172131583330743, 0.7783463220548712, 0.9032515130615626, 0.7405214046981496, 0.7457693523382725, 0.6527620554490086, 0.8042035931681192, 0.9601080110086135, 0.9261730867762222, 0.9714396231917928, 0.7156494589466947, 0.8291143822699782, 0.6166236629006266, 0.6486024681768643, 0.95248438644456, 0.9330789802675555, 0.7351383657494535, 0.6758259003587624, 0.9163615483561494, 0.8726496219761017, 0.5171231661009701, 0.7107138122451102, 0.5630714799090925, 0.9730234486696643, 0.7041789090849541, 0.8592602620102574, 0.9402533110835662, 0.7134901144077617, 0.6783649649462968, 0.5882087047965749, 0.604846211449191, 0.8431334230270895, 0.5264905580176578, 0.770659494617265, 0.6783397112182434, 0.561760707622118, 0.6501211540651648, 0.7704897879076128, 0.7948163857098847, 0.5141015406629316, 0.722018309678393, 0.9414231923929354, 0.5987212762526237, 0.6570671970012331, 0.5942542303055613, 0.5810638795909882, 0.7512514659742325, 0.8295958472359901, 0.8289995518545317, 0.7152500177444701, 0.7074493886609099, 0.9489193017586339, 0.5534580518593148, 0.5757697490820046, 0.6973524285539843, 0.952907863542449, 0.7997036829648836, 0.5009932659438392, 0.9516431598424724, 0.9147584692813532, 0.5455209816624105, 0.9926169677081438, 0.7168574142871716, 0.9133274350914906, 0.7678644452752019, 0.6114136414734443, 0.9935248956668492, 0.7860268795590357, 0.5657921103859921, 0.8705474983805882, 0.5473553767495565, 0.6307355764676275, 0.5407599105432583, 0.5227288345463961, 0.9072387549110901, 0.9991163897075708, 0.9158470998177526, 0.6205472023880897, 0.982882839157422, 0.6629495967456318, 0.6682375867786177, 0.6670623524630204, 0.6403583269921148, 0.5114618277097999, 0.5142885526174652, 0.9627717350437712, 0.6585001754861917, 0.5816369721669221, 0.6489403926026054, 0.5958148393618129, 0.8151892358439243, 0.7892280040349682, 0.5161061420953379, 0.8787538394474602, 0.9693198657930219, 0.8089248766620887, 0.8115419722044646, 0.8283262737917221, 0.5173007022038625, 0.6405231967048743, 0.9065229877246581, 0.8515555993203523, 0.6344667869876397, 0.5647223811405556, 0.8643447031312907, 0.915305566489595, 0.5761789181667456, 0.6942139877761839, 0.6261641733781513, 0.7866109130029919, 0.6944991405531085, 0.9139809596532751, 0.7813811345898016, 0.958131581822836, 0.6178169172873619, 0.6072635123832437, 0.6006762004700781, 0.9913478563144043, 0.7139496410391665, 0.5813374367881725, 0.6429788542480084, 0.9086105633786247, 0.6307945075615828, 0.7903746459641727, 0.8871741424823981, 0.8700612044766536, 0.6081196192362803, 0.5219904243709447, 0.9845430973769125, 0.725595597497359, 0.5808041551856565, 0.8961437104310912, 0.5972936429987712, 0.6660091515734143, 0.8805359470826463, 0.8926217419546579, 0.6773856265021672, 0.8954449724758038, 0.5590473860103944, 0.9346056797557112, 0.6128429198293597, 0.5713069593651716, 0.7385500086157397, 0.8808403485812116, 0.8347354844422884, 0.8470547264646218, 0.6171958649048304, 0.9134135563150849, 0.5804764995144827, 0.9968508213653636, 0.8299498294884899, 0.5424781993830292, 0.6151452666764194, 0.8789689097544671, 0.8286483678312297, 0.6264463832522078, 0.97959205806016, 0.9564118714450914, 0.6232902986168195, 0.5655743311883095, 0.8538490576047382, 0.8207231950169875, 0.7353304935472038, 0.8293944589532669, 0.8537297145379604, 0.953410668011491, 0.555061100432882, 0.8765967984594267, 0.6476883653564282, 0.746363763074968, 0.5754453439278662, 0.6093935021183574, 0.5268344631641896, 0.5825873711536004, 0.8736711737406146, 0.7538634322390283, 0.5662025548463988, 0.7737031620426296, 0.9687377826101504, 0.7100079768998684, 0.5711801645122865, 0.5779115734301702, 0.5872719087523406, 0.8656746668000932, 0.6620523478696088, 0.6327161098212486, 0.722554220657954, 0.9401576337323104, 0.7927674845158732, 0.7743281527460237, 0.8740562230736812, 0.9451444140433779, 0.9908018303923034, 0.571448871822182, 0.7344009260829856, 0.9267189334104584, 0.7701346151747348, 0.8149000852150965, 0.7403406837345254, 0.6817805468247987, 0.8578694404725296, 0.8072589398323616, 0.7451362445805279, 0.5749478073751535, 0.8512754759893068, 0.7432880645611157, 0.7684410967927551, 0.7623309065587089, 0.6264386496321328, 0.7791835292689946, 0.5889891970529668, 0.9185291946889405, 0.8183421517831962, 0.8106012954402826, 0.8194915596299497, 0.5883764392494619, 0.7675039092224392, 0.8507485595767853, 0.9765747108287348, 0.5361015135372025, 0.6241779854695968, 0.5996388179360326, 0.8794350765825356, 0.7903846908706378, 0.6898676264987217, 0.6969975784219045, 0.9219081051081447, 0.6614897491996066, 0.5816971898775614, 0.8844386450862658, 0.7524452651167064, 0.6598923885844306, 0.8914015265527055, 0.6170222048998922, 0.6120750314680887, 0.9552618458121597, 0.8814662800557718, 0.6871496498015863, 0.9466536337704443, 0.7336804652353215, 0.6888770274760445, 0.7914782291896192, 0.5269140873524412, 0.6062872863042144, 0.8489134842963831, 0.9584518268580691, 0.7095208956238865, 0.5241990271250478, 0.7625730044461534, 0.7313896501628946, 0.5752732794367506, 0.8893460100706294, 0.8619076346496057, 0.7717107348942223, 0.5836468889854184, 0.6370555399730218, 0.586023901650879, 0.5414855055023964, 0.5584029283598384, 0.9033546803670113, 0.9407163084564012, 0.8451096578713595, 0.9069535535038491, 0.762657464166566, 0.9011710531695479, 0.8838138032715205, 0.8143528277663148, 0.9568685675329014, 0.8059140504257594, 0.6690303722427509, 0.5004485195856947, 0.6462210456350267, 0.5975668340832492, 0.8499671350568214, 0.929823889912581, 0.9849875318777168, 0.7152455189217097, 0.7557122727080332, 0.5417800468841938, 0.687514639912614, 0.7975393949962701, 0.9386451449147588, 0.8814467869447274, 0.5431486747334489, 0.8544678946294395, 0.9687581689888833, 0.6320836860543512, 0.7939096399815228, 0.792711559997678, 0.6291345324706898, 0.8772640147328536, 0.9764658240265771, 0.9167886281493398, 0.7222289092763061, 0.8433416575182235, 0.7335975357464466, 0.5613672488305446, 0.7771199894702139, 0.8835875923256653, 0.5570816491938695, 0.8686344153215597, 0.9529652258078783, 0.6177240954567544, 0.6814456458049272, 0.980624254398271, 0.5536451915282534, 0.9293295874332383, 0.991717889037926, 0.7436590390121083, 0.9474775817423802, 0.8788875014507112, 0.8786588706595608, 0.7419659575801654, 0.9616148516956865, 0.6995128752496608, 0.9482983947153762, 0.7749173962741085, 0.7867846380803369, 0.681542363633993, 0.9658395653877965, 0.9654220331534193, 0.6973987313043307, 0.5809047515521133, 0.5748169467489102, 0.7575660542532718, 0.6681296808915259, 0.8451211845451565, 0.6506310523908239, 0.6297268838676231, 0.7129738386857994, 0.6813213996093047, 0.9494833041308472, 0.9333375258111951, 0.780887192412042, 0.7510259105894446, 0.5444223557943852, 0.8439811348476157, 0.9087060796209493, 0.6072843619434538, 0.6561319923672162, 0.7416167864019135, 0.5569880283148245, 0.8632670034335959, 0.6270881572050617, 0.5698502273904741, 0.7757773127494239, 0.5136907636423829, 0.9406512924271724, 0.8107880951205583, 0.5433572702473952, 0.6485363251225513, 0.7332449480122797, 0.6124944356166648, 0.9839993728629857, 0.81775051347553, 0.9421091968096209, 0.9751516866339122, 0.8718521725371495, 0.9045811365859182, 0.8309256808215815, 0.9382515916419809, 0.5046476144838992, 0.8392510307550388, 0.8027814623000871, 0.5118227480195101, 0.870387054734302, 0.5645352858954812, 0.5989641406439755, 0.5991847206917045, 0.5947915456744497, 0.5785095340651988, 0.9279896275144226, 0.8910029776840207, 0.6039506126500283, 0.9035674582554344, 0.7131294095148977, 0.6421719409942792, 0.7006466331914014, 0.9827291832533168, 0.6591010525803978, 0.552837540156766, 0.685483268223853, 0.822848492861449, 0.8942250957429615, 0.5383400052987974, 0.5514188326494728, 0.5806938909793542, 0.8816291753045151, 0.8026104215632716, 0.8960173257121651, 0.6776591072437597, 0.5673964663056502, 0.5980630811507732, 0.6838276876912747, 0.8004117242379176, 0.7434529608493728, 0.5407977889740246, 0.9139520512874688, 0.9487882995488135, 0.9104486014034687, 0.6176979853067907, 0.9235297974631616, 0.6793843918054661, 0.7674983295109492, 0.9749227836612235, 0.7525647578768645, 0.6140218163785687, 0.8275286762079491, 0.9726362460784626, 0.940373460197685, 0.5267930619391865, 0.9349988794256849, 0.6511544723571583, 0.8126226745056053, 0.5719443541020935, 0.6298079940138772, 0.920990741962288, 0.5155551804820478, 0.7582743769292755, 0.8965964797859519, 0.5092545647752258, 0.8670968216274367, 0.7568335501525225, 0.6073101388565901, 0.687738320877251, 0.6976271074984104, 0.552043232616812, 0.5014532258867168, 0.6742062255399086, 0.9198287088964983, 0.7230436526408749, 0.7987713349400303, 0.6675911006865661, 0.5685117914367295, 0.9601250641777064, 0.7691198231583392, 0.8638539753904315, 0.6432291795900615, 0.5651237062900314, 0.7438607000362611, 0.6240164973452791, 0.7183716304453734, 0.7239599322687591, 0.8273192638495226, 0.7953491390680196, 0.6690492584141314, 0.6778296331700115, 0.9800754711976176, 0.7059720438449026, 0.6561737293042589, 0.5363878025111016, 0.674186507464603, 0.9722164384626109, 0.8895510048503524, 0.6074385101949717, 0.606685196736555, 0.5587836175297294, 0.6702096646787044, 0.5577728395759654, 0.5969665476704351, 0.6544030006745836, 0.8171365066295144, 0.6724784447969203, 0.5949408188432157, 0.836462219852199, 0.8136001746461938, 0.8701821408263306, 0.9702453878622757, 0.5963983166830017, 0.554110797884894, 0.9503827790580617, 0.9644498144671124, 0.7440279914981902, 0.8489723247774501, 0.7570589203545905, 0.6943208496534414, 0.8738968164664827, 0.7799711051778684, 0.8151395110682196, 0.5736932639528851, 0.6609226907758939, 0.9441684765509875, 0.5628267613057175, 0.9984424297719551, 0.9832907481630968, 0.8841708958779819, 0.8283036167893705, 0.5610020056093679, 0.9825669830926015, 0.8146427776555525, 0.8243130211353669, 0.7372033012909179, 0.7345024616630519, 0.730991008330822, 0.8030471631259428, 0.8199893097501061, 0.5759811354291611, 0.5251148978159614, 0.8082891311412381, 0.8991451635106373, 0.891658843909261, 0.8752632835139953, 0.8227869480799929, 0.8556278518271838, 0.8858479475698553, 0.6926406674701582, 0.9131710102757638, 0.5013226871556226, 0.8831675947259869, 0.9511578378875993, 0.6707430039535975, 0.5360047630929234, 0.882398838978089, 0.8296223288025535, 0.8168941510762678, 0.989327140258687, 0.5736521318446457, 0.773606085087504, 0.7164859679897169, 0.9426158023283177, 0.9348262514515744, 0.8462975208337078, 0.9606119363415349, 0.7598518072786569, 0.6826465151255033, 0.6365873322522196, 0.8410467461056443, 0.7173202964703158, 0.9019875412701253, 0.5035819810260516, 0.8094057816439135, 0.8606470489572551, 0.6420856719702883, 0.9179042100687703, 0.5555018610706175, 0.9241686243720717, 0.842067684517461, 0.8520044496346373, 0.7513406484048377, 0.944281698155607, 0.7326739760574545, 0.7572876778160289, 0.9286254503392263, 0.8604597659146092, 0.8720872865464331, 0.5428520792892535, 0.6471611726327748, 0.6223487825699641, 0.6552699843572486, 0.9085305485099517, 0.7667255048477155, 0.7422675448777764, 0.994143152327678, 0.8567872649994377, 0.8118124299026702, 0.7791283135633289, 0.5589414288019998, 0.5389206229192537, 0.8595338905978887, 0.9971670361757786, 0.5379497136418303, 0.5327419293182468, 0.9228854965790831, 0.8728859056205439, 0.6950863876515626, 0.6994377364116546, 0.8373306747363738, 0.5024095877747297, 0.5152708878237167, 0.9997743811774906, 0.7697817307553014, 0.7074622036303814, 0.8411553193731504, 0.583222847799081, 0.5004869362934715, 0.6379736521856527, 0.6345391877296133, 0.9308247468862663, 0.5923092341555988, 0.6077007467198213, 0.8104044102092867, 0.5132180242588369, 0.7261362843002563, 0.9650295161783975, 0.7600689705412269, 0.86830386213186, 0.6152447169268365, 0.6665930872446589, 0.6647596028306956, 0.6738932056748773, 0.831954105791794, 0.579645634183224, 0.7190682485936566, 0.5267391833741185, 0.8672957449804916, 0.756784349763842, 0.9448598845430417, 0.9434905360979802, 0.9782645690563916, 0.694272993130983, 0.5138129146377044, 0.8497177490003818, 0.7880741459058247, 0.5664069350762695, 0.9335312773341948, 0.6123776449438758, 0.813707642785415, 0.6984054135036769, 0.5636955252723582, 0.6448459960123984, 0.7402065631126629, 0.6948455997879595, 0.6027944618108441, 0.8579111316721326, 0.8031758468851828, 0.6659200610741576, 0.812120317188655, 0.9854613631337802, 0.7053259818159642, 0.8368728418455988, 0.7682378935797884, 0.9445679982034973, 0.6595552647143927, 0.7011681094598292, 0.7843247812617844, 0.8845879719785676, 0.9376727847864766, 0.8600717555728472, 0.6985796180662106, 0.5201431433730678, 0.7383128581170035, 0.851078231419939, 0.8375382323840497, 0.5400827176961015, 0.578842596511979, 0.5783563025656988, 0.7613971331450595, 0.7774558306042696, 0.8474699877888138, 0.5019933216289075, 0.6583544521417617, 0.7090560208506097, 0.8262571686962222, 0.7264267046263004, 0.7014780529452084, 0.7334926849802597, 0.7251345038974732, 0.9276941526996416, 0.8667918771714495, 0.6229947831487681, 0.7000292448253775, 0.9173869986771501, 0.6746565973731085, 0.9326080929713245, 0.5376521972391337, 0.8715434243796012, 0.9331697410180659, 0.5764049510692764, 0.6906888050881691, 0.5770053069864011, 0.8623383239154008, 0.8934350641071431, 0.5136546439554712, 0.8913202223194161, 0.6094115551713013, 0.8749955836869501, 0.8378637214810893, 0.8430761405114326, 0.7900646763954025, 0.8063718570524847, 0.7172061725412688, 0.9727418740701654, 0.5382888756185344, 0.6272774738541127, 0.8379863779537109, 0.9608414502128895, 0.709341134110709, 0.7938332658177483, 0.7903003404588433, 0.551445448734772, 0.8490519828307794, 0.9856921124395114, 0.5830126768325816, 0.9829149423697384, 0.9107762737163019, 0.628374920363531, 0.6857158572863713, 0.9108309940183728, 0.568636610033302, 0.6247033408611327, 0.6391545718879734, 0.6086982775363714, 0.6985590831314061, 0.7581990905048568, 0.562214666069007, 0.6721272260325029, 0.7200369511923448, 0.7540490867619375, 0.7062125104446992, 0.9369694927605341, 0.8913659170173578, 0.7214968493655436, 0.9929514413646938, 0.5021137067148868, 0.508107190805376, 0.7706144789097304, 0.8858958911108707, 0.7797087377195959, 0.6269523243389383, 0.9781331590502216, 0.8299294022304835, 0.8365389038965221, 0.8978611117685522, 0.7070865812394249, 0.9761042230747874, 0.7264833943746083, 0.7169184987290508, 0.6216394819955386, 0.7103452956250196, 0.5294199985051538, 0.8902745325577556, 0.7448779052005168, 0.7535936342466744, 0.8230549305380278, 0.7880586741386058, 0.5612564225275911, 0.7862628273311236, 0.9495132803058794, 0.5721306379726792, 0.5537781080005326, 0.6616912184643665, 0.868094849485553, 0.6062447778127855, 0.8400967513409803, 0.6843211494951167, 0.8068394075032945, 0.9758581641753536, 0.7581929007742503, 0.6214059110853666, 0.6482032289767079, 0.5772153635562383, 0.7607581410657636, 0.6651069333280322, 0.6235586509990727, 0.9032079367768049, 0.6326196308875603, 0.5781251047716036, 0.8122763230543477, 0.9553967983355045, 0.8398478187618807, 0.5490449712912693, 0.7513177707148324, 0.8834470464793862, 0.6705463018886006, 0.6891884108739874, 0.7122472722384333, 0.5358843661281966, 0.7921334941683512, 0.9844705764563417, 0.5292312802452259, 0.824155185184233, 0.6598583012994599, 0.6269051476490545, 0.6419982121447021, 0.773427117773128, 0.8912179397086246, 0.794432478197579, 0.5808644906864568, 0.5490630454127229, 0.8492992129427506, 0.6579238470173302, 0.9436789332946627, 0.5859365868856452, 0.9610216291933318, 0.5922897633772748, 0.5980233743250968, 0.5732483847777736, 0.7828453092877443, 0.76238925013941, 0.6045202987651834, 0.9863539300366156, 0.5479959926462739, 0.5934977745869859, 0.6687976214487499, 0.9594776837296665, 0.556169142825283, 0.8816050907244007, 0.5823190095331806, 0.7396020117875266, 0.822428252085884, 0.6419171183774246, 0.7837830603599905, 0.8663563195215196, 0.6337359518948089, 0.7012294709713258, 0.600798302319282, 0.6976161104910148, 0.5015668646417728, 0.6767174239618943, 0.6021369237485426, 0.8370831427385736, 0.6100373641505488, 0.6278185152485618, 0.9992869242328888, 0.5474052235543903, 0.8862322775416778, 0.6273242027378986, 0.5190587595287706, 0.5548844187864879, 0.5650845137350797, 0.9816839756868436, 0.5960033693843689, 0.7394471987794418, 0.7364640344415228, 0.9681280047896479, 0.9960645587147172, 0.9235824465771382, 0.980365719164238, 0.7626026725497262, 0.7196039579422824, 0.8273260592877953, 0.5302599180206797, 0.8347624711610805, 0.818588610169569, 0.7159405274643966, 0.8145205386993444, 0.6201139409418963, 0.6013922521012822, 0.6821541265133997, 0.6603949525481106, 0.755956336515834, 0.7600549747744086, 0.9715834211158483, 0.8897699577668605, 0.7081202130723793, 0.6663133042237852, 0.532883082046719, 0.6666324587876931, 0.8756771251304618, 0.8331511687032793, 0.7674325619458121, 0.9283248876876451, 0.6886749441012117, 0.7372152724350004, 0.7216944933919807, 0.6930480047005831, 0.6108180531420058, 0.6000852090234333, 0.8058099016776172, 0.9156516953026206, 0.8311525018082917, 0.6690327473271138, 0.6139130356566923, 0.9821702527991532, 0.7677428342160522, 0.9565100810908558, 0.7031420752357818, 0.7945168387252348, 0.8905127605456926, 0.9761407938820292, 0.8639928651931525, 0.7882409308419308, 0.995668624269426, 0.5024506289549826, 0.8906658684494655, 0.8182001188614719, 0.7506326880170122, 0.9535796776553385, 0.6742706285985915, 0.6588154881207708, 0.7159580399594389, 0.8794986724351553, 0.7059151388943556, 0.7313671623016074, 0.5059605580148157, 0.9857414022452751, 0.5368996997824332, 0.6605271172126146, 0.6561268238569695, 0.7670056876112998, 0.7125285529501798, 0.6919043446866149, 0.7520490549098657, 0.5909477976086389, 0.7928039952174308, 0.66708705535326, 0.7197798391297962, 0.7397947276512106, 0.7443847349210058, 0.5265751403150821, 0.5921580842769807, 0.9088333937528829, 0.6725600584753625, 0.6929872225726035, 0.9089042160177825, 0.8713883116521075, 0.9138008064878678, 0.6874875812966084, 0.6616081528826188, 0.9134263756167035, 0.9989606849399351, 0.5791655082597176, 0.7693298177860453, 0.9205873285996702, 0.8454993122239228, 0.7815593993877026, 0.7530257099485884, 0.9424346696736157, 0.9355158003948083, 0.9755083596961343, 0.8327285928502746, 0.8803622105397699, 0.9549052827306124, 0.7000525517765492, 0.7009661826601719, 0.8557716569064662, 0.8263779531599444, 0.6420122103001542, 0.5380693306452808, 0.8479386969233536, 0.6828819254392312, 0.6968165390453455, 0.639289601235821, 0.8393725097516722, 0.78618177089106, 0.7813974036698299, 0.9225821013634699, 0.7951878596581078, 0.5871361991187928, 0.9954103159651109, 0.7181741184986452, 0.5963716281806355, 0.9086976215031731, 0.9183928306027697, 0.9894531102534194, 0.6168461738130534, 0.9294547404568266, 0.8309775844769116, 0.541543889193647, 0.7416515159659963, 0.7514298224971772, 0.5149847141854995, 0.7184850961070275, 0.5192513278400134, 0.8954997007050874, 0.9384314328223511, 0.7875242005402265, 0.8396527394626145, 0.7844204594992523, 0.6860262662639752, 0.6161210757880231, 0.9572818204584636, 0.8786359388315437, 0.7422249806718209, 0.5621185877914125, 0.6020776986417843, 0.8023679550942133, 0.7464730398713582, 0.6542049617955598, 0.7613640137532547, 0.9511343762276497, 0.8042915828257695, 0.5696032322973441, 0.7315854681973316, 0.9656364712596832, 0.535695894763572, 0.8037225002647701, 0.8370201611440459, 0.8457391948080885, 0.5197122874647486, 0.8295230383085626, 0.6097450817619172, 0.9965639381079814, 0.6790344609328618, 0.9397964946267389, 0.6612303652269587, 0.5582890049476301, 0.7036924750239331, 0.9806421960855765, 0.6528231506869547, 0.5270203660303475, 0.970622878788389, 0.6239677690717654, 0.7349917398503893, 0.9142267061757641, 0.9000819306547982, 0.9328466571009701, 0.840849535095235, 0.6458945740143803, 0.7094047260709637, 0.7536321962771266, 0.9401339220049307, 0.5024794463707992, 0.9834957775678107, 0.7496325723443159, 0.7198401721384431, 0.7271723863584658, 0.931895249661926, 0.8970558214591866, 0.9303334834510879, 0.6823077293443005, 0.5356448410066761, 0.6076295688372243, 0.6132384729583824, 0.8999357867991074, 0.5317822275331008, 0.5551175293368934, 0.8418229409112669, 0.65501809421875, 0.5481206455381993, 0.9764345455931037, 0.5721719630019337, 0.842131729894942, 0.6117058144289462, 0.771629021952049, 0.6557047697363825, 0.6224199544029378, 0.9024012253449603, 0.7368764794458296, 0.8124358894249768, 0.9052831358513603, 0.9932007113823733, 0.5176854029234931, 0.9631747526993908, 0.8664480218973079, 0.5970319684041945, 0.5391686099274546, 0.8798345520252816, 0.5540981861582686, 0.6139311317531977, 0.9380622275835937, 0.9822581749195181, 0.7326082740141475, 0.6073617757517626, 0.5298271432369572, 0.8579692606374871, 0.8543655379046864, 0.6392234430288181, 0.7537570987717757, 0.8479931865988414, 0.5103318632105194, 0.5889444692786188, 0.9693229615712141, 0.6399570395374428, 0.8896537479141577, 0.974809036219452, 0.5805986215735818, 0.7222205012733589, 0.599763196917183, 0.9289681553730402, 0.7712938886389852, 0.9924437538412736, 0.5718373108460542, 0.7274568443648963, 0.75905534997771, 0.6039765742609216, 0.564021599834234, 0.5144015190821487, 0.6510107085766106, 0.5915089293357968, 0.7637876657174084, 0.5836953240193629, 0.9159617778326037, 0.5580828634445751, 0.9092651045185938, 0.5309373102262285, 0.9064053876660834, 0.8111431328977808, 0.843137802755112, 0.8728119564584498, 0.5904089729871229, 0.90219557014581, 0.9472607300371636, 0.9337218641915803, 0.8780944349340809, 0.5554591739080438, 0.7502426261875105, 0.7375293973831438, 0.7480598944965731, 0.9354919590773998, 0.8868513591976728, 0.6961282623781372, 0.582805016658795, 0.814367285858478, 0.8178238701366772, 0.5133203358149843, 0.6812331163708643, 0.584299521331404, 0.9144009769626703, 0.5996400540890074, 0.7673347898550376, 0.8262300488305223, 0.7813719079437307, 0.5566038824807877, 0.8822122784269697, 0.9258652797710123, 0.8540575969312789, 0.843211803678547, 0.8143556575587858, 0.9862198378750249, 0.9633919291690676, 0.6843942457617882, 0.9723192039966584, 0.9197308376857061, 0.6292043926118017, 0.7684424263523295, 0.9156783870229452, 0.6896165021382399, 0.7091679643809116, 0.576131028898794, 0.9965699765200079, 0.7619432624127153, 0.732847999141733, 0.60341015195903, 0.7416796201115806, 0.9922692193422218, 0.6876457139543957, 0.6158122421214377, 0.7368107380767555, 0.6927561749158355, 0.510570639921827, 0.5825634976087368, 0.8294388523137202, 0.8857250926014479, 0.9393046574741934, 0.9571666619455044, 0.6194268052599208, 0.5882646483231497, 0.610497049069062, 0.5879186818809916, 0.5486493753286665, 0.7096776358739912, 0.9449562115533007, 0.8274383407892139, 0.6491222111115731, 0.662442446004956, 0.9967888559229849, 0.600287470123257, 0.5600012997205974, 0.5565976989704479, 0.8111837213831832, 0.8061958315407716, 0.8606022639331934, 0.7293988357238033, 0.9149770087064019, 0.6289567885283724, 0.6249324175723125, 0.5014618040758707, 0.5355188232786513, 0.6476981600652525, 0.7974930661925387, 0.9611682070797261, 0.6134032782304832, 0.7517654257927224, 0.6536255202009639, 0.7864601061921246, 0.5796729643610739, 0.7278722749602573, 0.9304416177254222, 0.5338736334191783, 0.5175650319778484, 0.8503587107435642, 0.9173819633210781, 0.7202780213636873, 0.8551021480381888, 0.5799698734956589, 0.8301070603531466, 0.7515752980946818, 0.8305477754475854, 0.8361478389798631, 0.6696556901080518, 0.5735046782051229, 0.6424162724512468, 0.8541662031600575, 0.8135271349443122, 0.8361632795056265, 0.5333344722950251, 0.6523098666929699, 0.6749098767311414, 0.6465536418729791, 0.7437718116478131, 0.6062715024711461, 0.8529116467961935, 0.5949637443150905, 0.9259558918297277, 0.6859714995669919, 0.5239151932389465, 0.8076102129088503, 0.5425668092303539, 0.5921267596072874, 0.5660716838196405, 0.8039802112431444, 0.6111328449981048, 0.89068533614921, 0.888955264448629, 0.5196207939112973, 0.8948811016960416, 0.9752015786127566, 0.8252118427985056, 0.9492639850952784, 0.7117553106344916, 0.9854438497024245, 0.9324743506337807, 0.889915438235605, 0.8377014046229831, 0.836387385987389, 0.758250574654558, 0.5112773701879887, 0.5334767545613277, 0.8941213252193365, 0.8036998036979875, 0.6078850632413633, 0.6697791571509871, 0.7343492270245612, 0.7457386507781518, 0.8420789427972841, 0.500298690449372, 0.5511928907606685, 0.9646468831585449, 0.6674789324314846, 0.8517709594645596, 0.5202204446745966, 0.8800993097772417, 0.6321459684447619, 0.6798754761316463, 0.8384278457523815, 0.5293641002078835, 0.6079350271338901, 0.8893665901724246, 0.8458118664752445, 0.5276598607369047, 0.5364123148175421, 0.9375228600160033, 0.5041093415571196, 0.569653664728266, 0.9341349992817596, 0.7233949455094992, 0.617126209946989, 0.6546285705609678, 0.654863701224081, 0.7115446512885852, 0.5770419421565367, 0.8822239989069645, 0.8941435902673784, 0.937584380610978, 0.8770918337849043, 0.7759848161749294, 0.532909176286783, 0.9505778926039941, 0.7472014391694513, 0.5575475392474971, 0.9951962282252098, 0.5982224441018168, 0.5305937528289497, 0.6102577521663904, 0.9644378076794101, 0.7627789058442784, 0.6516514317229195, 0.9176045439439648, 0.9836234568347106, 0.958563684290414, 0.8346330909920261, 0.8722054530903873, 0.7286370868299501, 0.8399420060092435, 0.9764490215597557, 0.9699134610367794, 0.9922487333234404, 0.6318396387142269, 0.6113441644173485, 0.5863458070772187, 0.9818244624486603, 0.7413662234949521, 0.5042683495942042, 0.970319393286766, 0.5913455913588883, 0.9100743983344928, 0.9173877735612657, 0.6456325763249092, 0.8285610473014046, 0.6249147432135169, 0.7284078773612962, 0.8048767026458759, 0.893820223372771, 0.6765534422072945, 0.6818712597018406, 0.9072829091901661, 0.7097199804903149, 0.7535415803858985, 0.9886694144439403, 0.8849950235557109, 0.8763136900961428, 0.9054169874717167, 0.54947736215335, 0.6241516593033436, 0.8967640417175657, 0.8408896218206692, 0.8394986480589515, 0.7835124105122946, 0.7811352059948994, 0.9209309094644896, 0.9142590483277938, 0.9073931044583838, 0.546186577769713, 0.772561742846035, 0.6417432208700913, 0.6475433806756226, 0.6930034528479914, 0.8644605244512521, 0.6816553926060309, 0.7587772583855201, 0.8058384586494272, 0.5435576071658241, 0.6974590191644201, 0.5049787962420342, 0.7321443338990294, 0.7760604122638503, 0.8856484341260353, 0.751308759672884, 0.5739450940466968, 0.6670309844869554, 0.7574326823088016, 0.6685956223124518, 0.8130402970245794, 0.5463837990117768, 0.8737497686285399, 0.884535306271566, 0.5864769564367442, 0.5028674411459895, 0.7061499971425365, 0.6178335875874057, 0.6518054111524243, 0.758031443327677, 0.9079831355977048, 0.5728231459889814, 0.7472669801994942, 0.8001958981562629, 0.7843192625362014, 0.9995043887842836, 0.7729325404380817, 0.5681405576807377, 0.8286634218255572, 0.7779194638814642, 0.8906587510896199, 0.5040152976963306, 0.5762674262341791, 0.6911786866211643, 0.6505305427421322, 0.6280978925347521, 0.6901189045251916, 0.8896559561818342, 0.5803559050902054, 0.8629978805739356, 0.6036572880542779, 0.6452806908962789, 0.6956875943829542, 0.971794763201996, 0.7769519789737716, 0.5100811205067248, 0.8922898170119224, 0.8210850077510192, 0.79244722099454, 0.5625238869177696, 0.7968211990624027, 0.5074728267272246, 0.8535594413281502, 0.9233429114146139, 0.8471822893849955, 0.6607944702020383, 0.9705467770073621, 0.6446225320062715, 0.605475275848498, 0.6487271655933571, 0.8140189255406248, 0.8422916265025313, 0.8749972745877082, 0.9304066146250758, 0.5566728557411276, 0.971907469322446, 0.6164640112846772, 0.513501869199827, 0.5842123432286923, 0.7576311321048799, 0.6640648172999034, 0.8602937117990037, 0.9430973878682349, 0.6600763893987327, 0.9795707854050302, 0.9907174297586817, 0.5144014348413808, 0.7858267311474205, 0.5865661120807408, 0.9892844809711958, 0.902418525502636, 0.7048547138085701, 0.7161372436800431, 0.9026402802783977, 0.7169626896748142, 0.7193792394732779, 0.9131186645109142, 0.766899665702337, 0.8269903705501794, 0.7604672056306743, 0.8477214098247756, 0.9628793808742733, 0.913426258307533, 0.6264567053564076, 0.9367439685161982, 0.9463543125037811, 0.5345937538698344, 0.8161626799886705, 0.9762697591360123, 0.9558399313912003, 0.5224098208999817, 0.8119339987688166, 0.6841382296579064, 0.7426810192379125, 0.7972676522640316, 0.5592692501236417, 0.6234725038730666, 0.5715073559462196, 0.6578018137816256, 0.7396596551723362, 0.9616452563347047, 0.5441287601838876, 0.9913593806506097, 0.7292383993102203, 0.5809147475487599, 0.6093723091129992, 0.9176043436549315, 0.8142766999857667, 0.5756086678123156, 0.5124335113601741, 0.545234981111641, 0.9983719510618303, 0.7311544677841881, 0.7615634657147141, 0.8898266431273785, 0.8518506996567201, 0.5900321987113523, 0.6013987220380507, 0.7982570637248863, 0.6532833501192564, 0.9961307225382755, 0.5970153035139798, 0.6227946392372982, 0.8450141980637875, 0.7713751912444469, 0.8391307995834035, 0.9737458422377778, 0.8633152955893169, 0.5263151221503957, 0.6477269284977154, 0.5482175737237887, 0.6970704055990331, 0.8587000731299443, 0.5640588885907306, 0.8746978418760929, 0.7050585154674196, 0.593994282260023, 0.5656005263098478, 0.9884958833868766, 0.6275568023894906, 0.874992515971041, 0.6502772650304256, 0.5444736115189264, 0.7027268157635234, 0.6811350376952451, 0.7116491968288641, 0.5994275157892888, 0.592444343217987, 0.6308650200063217, 0.9115175077374501, 0.6828338875976427, 0.9689605984590903, 0.5277726294405292, 0.6681948902443453, 0.8774663484801646, 0.815003654155101, 0.9357932523353396, 0.6571107657596733, 0.7310591770851315, 0.9020167184770407, 0.8559015931825302, 0.9491727996991424, 0.7763394172870616, 0.9680303562250392, 0.5872204845873076, 0.9133061021192695, 0.7837931192784728, 0.5474015348215433, 0.6640243292302193, 0.6116647662249037, 0.6665099407719157, 0.7425079965999197, 0.5756073348929036, 0.7477637847626863, 0.5927525694817427, 0.9538321666822156, 0.8114739532188548, 0.9131236012797843, 0.9997414732492376, 0.90242812741988, 0.5664983273773818, 0.7652193858643603, 0.9561846385673001, 0.8025973970501394, 0.957972951709235, 0.583019253595593, 0.9853891562508299, 0.625790882930424, 0.715565470547862, 0.6961383184081635, 0.5768130900482586, 0.5515826597434768, 0.9404172171674547, 0.6237953542340624, 0.7978629058579874, 0.8669930393766455, 0.5374284443571289, 0.9306391044333711, 0.9567827167262613, 0.9858875758399301, 0.8395766761299435, 0.9066834245199289, 0.7789584349228103, 0.6257783347329777, 0.6071451738452169, 0.942036910215707, 0.651877290368349, 0.6123916991500019, 0.9462286985610339, 0.8551739039987898, 0.6562753963614649, 0.811823416616839, 0.8874474525160181, 0.9211907166208921, 0.5061043008783714, 0.6903706498616344, 0.5271223498267887, 0.5731428089603887, 0.6928879505741554, 0.5578525283730408, 0.6864725885908483, 0.6784444074474691, 0.7024844815111351, 0.876059094807746, 0.6355877593469905, 0.5526474385070931, 0.5084747233768838, 0.940265609615863, 0.5698551317587652, 0.7432564525259401, 0.589416823229026, 0.9768479572342932, 0.990102999398915, 0.6584002864924634, 0.743195033047356, 0.808390329912215, 0.9488618267028148, 0.8306332983477815, 0.9362119532271809, 0.949160538673679, 0.9945953047064429, 0.6981749829720478, 0.788988265328338, 0.6914919155249318, 0.6851976191554938, 0.8186119174645252, 0.5306061840773579, 0.7686852835452944, 0.769248970834416, 0.7975639144263396, 0.5736270421240353, 0.5361230810888848, 0.8630797856991369, 0.6200458649894671, 0.8708246249251125, 0.5671788764882053, 0.7040564957994349, 0.8491959178571916, 0.5985192713900369, 0.8979659388303616, 0.7265597583104256, 0.6586462158721815, 0.6381329576972551, 0.704416384279922, 0.7178918396386115, 0.52933863592449, 0.9748897290914786, 0.5886149341102709, 0.6990836855495869, 0.502248646142496, 0.6134056484079298, 0.5968187656957123, 0.5965124559672692, 0.6924334116890349, 0.7197314315858099, 0.9889459487133816, 0.7617170428238299, 0.9255581042684362, 0.6854651420416511, 0.9936488123728695, 0.7202572469864783, 0.7832605275607804, 0.9955708328086449, 0.8226312181912858, 0.5979888508299722, 0.6260395174709371, 0.7159674193513927, 0.9076673168245484, 0.8183825711789987, 0.9480513960994938, 0.845866444028899, 0.678962705406913, 0.6309192306711322, 0.5580094211563917, 0.7685264809556049, 0.5281583766280201, 0.5318176871348835, 0.7153945173260163, 0.7429567502345875, 0.6586034253343385, 0.75659032532731, 0.7731171100750749, 0.5981597457685324, 0.9650925643493824, 0.7066532104908274, 0.9134520412985019, 0.6888809870860001, 0.578932953246084, 0.6124540605672533, 0.8312724994357055, 0.5546145091282197, 0.5118496645597177, 0.5715237205993112, 0.7012192941668298, 0.8104812991822465, 0.8347479021480744, 0.6888389880586117, 0.5664702930595494, 0.7515925277150961, 0.9868800403856945, 0.663228799418198, 0.5195478548818439, 0.9281935227880443, 0.6495138949760149, 0.6909473031119053, 0.8650818133700751, 0.6282504345591884, 0.6944054204566451, 0.6172038406378619, 0.7756573305180305, 0.8669145374089108, 0.7091713571431275, 0.8792517642678765, 0.5315426893779114, 0.7719252753114908, 0.9470746325584232, 0.7957401343201738, 0.9705405588132312, 0.7260450567694852, 0.7792730242791701, 0.8777807375633655, 0.5492221573183207, 0.8466724251718755, 0.6989780936145418, 0.8092525875209902, 0.7337837994303922, 0.5596767575740467, 0.8602305280164488, 0.5046308870008049, 0.7954458129919246, 0.5147137335403709, 0.8454022220616496, 0.5147191547496379, 0.6851042842185957, 0.6836101158652245, 0.8844857827761219, 0.6166753541760757, 0.8477945561086097, 0.628484142268962, 0.7495685706572516, 0.7680548060939838, 0.9566469334978671, 0.6503589652050329, 0.7472179616145207, 0.6288634584865183, 0.6326548578501511, 0.7906227576284608, 0.8260142746412232, 0.9491765788935989, 0.711457398407071, 0.991287019803617, 0.5417560807851582, 0.9254143264309848, 0.6825676210867061, 0.7443438727920569, 0.8493851687587468, 0.522513821750058, 0.5220927420051407, 0.978835991002152, 0.6871613315338474, 0.6349538584583161, 0.6475550427739141, 0.8636392611382702, 0.8850948933324643, 0.6825515796540094, 0.6985081665195144, 0.68084296153053, 0.5565383932317135, 0.9699515285812754, 0.6199029261430282, 0.9998680015036105, 0.7210524944998677, 0.8339989842537829, 0.8484867722549152, 0.8517054554443488, 0.9101549376613866, 0.5903272221375762, 0.7617145231525861, 0.7251646420332338, 0.7886260617775689, 0.87383062241891, 0.9317761225455787, 0.8566197671014135, 0.6129917013521511, 0.7806470630620417, 0.513056815634956, 0.7523203680518478, 0.8808355445125694, 0.8556640479972064, 0.5136383065316764, 0.8681602814458975, 0.6153878088366056, 0.923732056220931, 0.5248725268299727, 0.5009395105920962, 0.5924026752037398, 0.8454552957987089, 0.7377333644332547, 0.8731932536815259, 0.9575186243549065, 0.9666564122964525, 0.5455012086892286, 0.7985426786607335, 0.5270210402764428, 0.9004597805504706, 0.9658821604585754, 0.999222852085121, 0.8975177636480007, 0.9681857160668006, 0.8554459964025822, 0.9686989937279606, 0.5046942149226912, 0.8778030375089005, 0.635114202220084, 0.5232855798155707, 0.8294483214971278, 0.5103389101964448, 0.9973794454516229, 0.9573081645001518, 0.7131417903322546, 0.7069757531695728, 0.7043017530915971, 0.906850982078423, 0.611740565979522, 0.8706298487924232, 0.9731935121409188, 0.7276997849049915, 0.6997962810032685, 0.5098815609842537, 0.9012731825306322, 0.7487485742912507, 0.9854084389561553, 0.8854948007519725, 0.6134004564374049, 0.528269588733693, 0.6557304790165518, 0.7722080111764996, 0.7317820915522507, 0.8988326255508774, 0.5246875938001455, 0.8098307118642044, 0.731508491590775, 0.5576317545391438, 0.8877296414917858, 0.9029762480003575, 0.6403361521196262, 0.52680067162606, 0.6840145981293477, 0.9700862866726572, 0.6831004523429902, 0.9163691084801209, 0.5900896915146651, 0.7851614053960677, 0.5740782708250063, 0.7139672338553928, 0.6931140494998568, 0.782284485286739, 0.9512951595097082, 0.5121309173729429, 0.6756201667289847, 0.6507232391970132, 0.8187839634290239, 0.573653283732743, 0.8118567344663915, 0.9283514729406346, 0.8415648013015882, 0.5760511668641464, 0.5881762957370895, 0.6509884090086566, 0.9475832069490029, 0.6815610432816507, 0.9904989888269977, 0.723943322552264, 0.505461770088973, 0.8990961255945016, 0.5372007463990541, 0.8016902111591504, 0.9864368863165764, 0.7453091874429847, 0.5690055631569877, 0.9797026918453455, 0.7664547423574721, 0.98942414087818, 0.6181781981649184, 0.7254158942124425, 0.7700601142048233, 0.5645101398548824, 0.8124458930738365, 0.7699811808420823, 0.6941704312701418, 0.5915464516534601, 0.5953823457974323, 0.5022592033632213, 0.9671181613518485, 0.7750304526941181, 0.9814128459386697, 0.5030236137883567, 0.9641591216227432, 0.5795317114758327, 0.7591104195840634, 0.5857770830800775, 0.7074064651460573, 0.7461307975277133, 0.8150459056124245, 0.7729732352186266, 0.8541080254663813, 0.7443874074497544, 0.9618056434660313, 0.7386932150778767, 0.954901680448417, 0.6128648375118482, 0.5242539013404158, 0.6586027615636157, 0.7314214767646057, 0.9855554440255082, 0.6961135710117814, 0.519104660932838, 0.9452664367122584, 0.6659270762788059, 0.5656472380693114, 0.7431741708025408, 0.6603661232751206, 0.8162968887218094, 0.9097148038629974, 0.785105087728482, 0.7338704429848057, 0.5328526938842912, 0.9280416967795566, 0.7750382551177579, 0.7053497347886374, 0.7067979218730323, 0.9988593445011217, 0.8993818641133612, 0.5504373566608851, 0.6699121806589375, 0.8568105801514609, 0.88125052203259, 0.9577039875150163, 0.5718199199744618, 0.791472085069657, 0.5983425738227612, 0.7939035038608163, 0.7635957958382101, 0.7879993746338181, 0.679563098433883, 0.9960043706730175, 0.9041312703635793, 0.5562178588247184, 0.710847366105956, 0.8658738747554198, 0.8016032501282371, 0.9499123569700078, 0.9918570997628936, 0.5385107518881784, 0.5179342808326803, 0.8920859953827034, 0.9488254927037983, 0.7983173068465046, 0.5057555630660096, 0.7403495798375939, 0.5238726414368522, 0.7815355028159165, 0.7250599941522018, 0.5925019205564901, 0.6351801799547223, 0.7330294920241971, 0.9107899157652668, 0.6362171652140044, 0.6576482449856796, 0.5023212278218689, 0.9742195317237607, 0.5638725681682097, 0.5071143408974959, 0.5938413117235017, 0.8570630937806789, 0.60346404625177, 0.9017586311306516, 0.7091151483497689, 0.62921705075889, 0.6613343494330254, 0.8889252498330327, 0.8112200813827187, 0.7477215061314588, 0.6627336489683948, 0.7811018466773055, 0.7824766348227208, 0.6909903399536467, 0.572028705838343, 0.5809569186955231, 0.7913894923017031, 0.7443589372253501, 0.8432496647478271, 0.6810443784679505, 0.9248146428343241, 0.9853504974176922, 0.5313564047850929, 0.5923432049833091, 0.8841924052057906, 0.7382460649832651, 0.9037545930579777, 0.9374561844921683, 0.7896933628044946, 0.5252335062757836, 0.721322102332089, 0.8219149646703166, 0.8849296245404927, 0.7642284089570559, 0.5462010143178277, 0.787895120453965, 0.6319558168800552, 0.5816126598012248, 0.624438040493389, 0.8363759108444633, 0.725078998413833, 0.9242128313942214, 0.6097660709533614, 0.9995662022951413, 0.5587838751527439, 0.903834987314011, 0.9766237157376048, 0.768384795808029, 0.6089791514502723, 0.5732132038061345, 0.5683569540390572, 0.5652975171574637, 0.9900769337421751, 0.7924575146565511, 0.7434190688077589, 0.6675577291277932, 0.7885635441372425, 0.8651571746011999, 0.9035304495964904, 0.5450104256350956, 0.9356564125980775, 0.6514835892240267, 0.5724443643086463, 0.7879508688190562, 0.8436620160542458, 0.77437128267245, 0.5313398882209144, 0.8648281365242654, 0.8047414671038939, 0.7347816265933247, 0.8858010726280914, 0.5259226990183148, 0.8872623341938497, 0.8519765830605531, 0.6349685550842841, 0.7598322782827183, 0.8197699030818066, 0.5639812365536361, 0.966467844707396, 0.8619640327921279, 0.5797388683198478, 0.9847312970167896, 0.7103222910853257, 0.7807443137885584, 0.6389405470341019, 0.7873960823940143, 0.8981876412721721, 0.7880249273192739, 0.7085868091309155, 0.5034744619519913, 0.7315078201896503, 0.5006820087792563, 0.7006138911792099, 0.7117343046152138, 0.6610037232863523, 0.7703562811089484, 0.6305751896871108, 0.5184623818361409, 0.9537161253321361, 0.5445399512655904, 0.8617347555201884, 0.8399578196426051, 0.6491938444290165, 0.7527870377391765, 0.7424816621137633, 0.8725570924906392, 0.7470381648661563, 0.8014368840308512, 0.9025209019408988, 0.8893931209458334, 0.9983475922647447, 0.7300525937925054, 0.5334407896758244, 0.931506738539178, 0.9309087110029369, 0.8741803136008417, 0.9169414997068327, 0.9987307080198823, 0.7075264396015957, 0.5344982683855752, 0.9031953142232105, 0.6568856857250064, 0.9619936094043586, 0.5849982826977098, 0.9106915829375877, 0.6351942040540446, 0.6046812529538368, 0.5229802679565478, 0.6345783934652262, 0.8664753246135278, 0.7816396183297436, 0.6315209438186895, 0.7791579530309378, 0.5967141508166496, 0.5562899773210062, 0.6481190517046596, 0.649305111704004, 0.611980077705233, 0.7240501242111507, 0.5040289949325936, 0.5071734728070347, 0.6669192851322561, 0.6400862149407924, 0.9223372113733603, 0.9734272705632658, 0.7348043454185353, 0.591832800744664, 0.784244247409756, 0.8902534860830471, 0.7808660813782506, 0.7530868331005135, 0.5205061620073341, 0.5208092179527344, 0.574852137384517, 0.979144329609055, 0.8095869371077964, 0.8164963714086, 0.9941913510639124, 0.5596469423126396, 0.7486984063004066, 0.790672611834595, 0.8091458343493686, 0.7604629127649142, 0.9918671258509382, 0.9375025819877933, 0.5982274653239246, 0.9037214227682322, 0.642043485962257, 0.5850621611045898, 0.5755495849733616, 0.7872235171364034, 0.5092434855468917, 0.5474737636786109, 0.7375422554056691, 0.961290568010969, 0.7537073713096569, 0.8090647078766322, 0.828670748080933, 0.5862432877720123, 0.879380501607439, 0.7778807984580716, 0.629169712247287, 0.9312940816027969, 0.517432983917077, 0.5050180988386053, 0.6444799062759012, 0.7964198441329627, 0.6163508272144814, 0.597497478035176, 0.799070197179431, 0.5069245848603505, 0.7127532681225406, 0.7571198604810296, 0.9909127058847533, 0.933820996650019, 0.7447461744909085, 0.9315109289893784, 0.6856658609113162, 0.8876093077761951, 0.965597705391162, 0.6521707082798518, 0.9839606934349443, 0.9306755512869855, 0.7448809842763622, 0.6360117739161775, 0.853519725395664, 0.6321560640585013, 0.9539122628271877, 0.7396390229443454, 0.7852050439601828, 0.880075135138529, 0.8325135069175278, 0.7829768924023014, 0.9093282308214669, 0.6539237837663004, 0.8638838890607159, 0.5665054122281621, 0.677183169457008, 0.6670031809594612, 0.7206659438128236, 0.8959562580974219, 0.5421658683140596, 0.6245011135711354, 0.7760281066083721, 0.9194578199106611, 0.5487369966099462, 0.8095413623007865, 0.6788807757602013, 0.8863193508808466, 0.6040090441662469, 0.8681869003935613, 0.6805616979557707, 0.7283823316445873, 0.7436766800404275, 0.7872678040395824, 0.8665639185415166, 0.5335171969290213, 0.8969439001871081, 0.5100738846305676, 0.7466574390262143, 0.5296965456812869, 0.8503668950382548, 0.8531394277069615, 0.7368328058998992, 0.9175410804561921, 0.7771257184061442, 0.911948177640515, 0.7167455602997854, 0.8360069632051086, 0.5177079429301323, 0.8303565306554884, 0.5748839566529362, 0.6679665487275244, 0.6565200873949109, 0.7024238461491299, 0.861149670030356, 0.9043800020792766, 0.9407371353809034, 0.5991380338305026, 0.9345140804441838, 0.7146707216889873, 0.6783081562916747, 0.5847432663129872, 0.8894055805536452, 0.8174027614627601, 0.6673091327811238, 0.9306728717727067, 0.7148127806619258, 0.932277249872112, 0.5155798547016849, 0.7908256854057454, 0.9573560737079864, 0.6710347200318898, 0.8721833726417756, 0.7185697334611609, 0.7826687857416825, 0.5927814765524002, 0.5370344497593589, 0.6739702957526515, 0.5178483750768806, 0.5698818089765529, 0.5002272324988125, 0.7715845506596647, 0.627315811552265, 0.9135496522717781, 0.7056267230233447, 0.990576835388455, 0.6357922913460203, 0.7627882193516673, 0.6061280450666033, 0.5867884488405632, 0.7529944748586043, 0.8861038716163026, 0.5749362143828816, 0.8971326031219784, 0.7934479094400515, 0.9821079728327391, 0.9925029365094504, 0.7608628625255107, 0.5819408295350038, 0.582229533905438, 0.5870800119760736, 0.7943625729729308, 0.7850005286764965, 0.6428435257974827, 0.7805353847731515, 0.9603822163600837, 0.8314305471043031, 0.6398775194024496, 0.8213085769706212, 0.6443078077656882, 0.5366919562578676, 0.9566447369001998, 0.8289736815535838, 0.7379516131652808, 0.6672341712829228, 0.9028438690407206, 0.5316111321340256, 0.741683134493758, 0.9618091861614229, 0.995096521770565, 0.9574724829853174, 0.7655070420703565, 0.5451245175643264, 0.6997504516706753, 0.9056378105106231, 0.5114755789287153, 0.5545250745590357, 0.565489840793902, 0.5050614128305649, 0.8119766876557201, 0.7553533737453226, 0.8548621313112779, 0.6954986189816639, 0.9134839109321897, 0.7305060938106878, 0.9187623714640085, 0.7043244705905043, 0.5549417165423842, 0.6579693643893074, 0.8094059450311376, 0.8529402660051191, 0.5141374064785692, 0.5610100431789452, 0.5039162594711493, 0.7549924322485755, 0.9293182192542326, 0.6200823008452536, 0.9897692028834567, 0.845290598964923, 0.6509520933871176, 0.5870783138716529, 0.9226154922612884, 0.5734030063202342, 0.7463168981547899, 0.8588454737063763, 0.5082912608979979, 0.5597469304167135, 0.5776532833110142, 0.7639041508860402, 0.9403836125494844, 0.9531124930329016, 0.9591290022779524, 0.609828308893869, 0.9458896021607384, 0.8901704689249386, 0.7117431991619552, 0.6376079599723906, 0.8608365264317535, 0.9074606539773098, 0.6267690826057057, 0.875029772235273, 0.7464587129405821, 0.7042744918321705, 0.898593176227544, 0.6055946900931157, 0.7068458666942409, 0.9626103266546835, 0.9054224754145239, 0.8403894925342543, 0.9103648987726622, 0.6732743631453464, 0.7473413818761461, 0.6578296382110436, 0.6829685318475627, 0.5613805810156309, 0.8527580853345949, 0.8205569084339316, 0.9851645869910722, 0.6102914146509679, 0.8484944290971078, 0.6504234441751715, 0.8632355382986991, 0.820482690744111, 0.5701899621404849, 0.5973875223640845, 0.9517419099573426, 0.9332098835030636, 0.8850935305331309, 0.8570043298855421, 0.9521287839163322, 0.9745678237374664, 0.9894551111971632, 0.5318389940424684, 0.7305147271857177, 0.6055730400570342, 0.8117984227917701, 0.9622214714725345, 0.5699758873782265, 0.5494945509390948, 0.738575481142271, 0.7828964963379996, 0.605870495835157, 0.7692813925621698, 0.823314439417344, 0.9651385396726666, 0.6350600368350792, 0.7438882390353672, 0.5940221976978319, 0.9730684327996642, 0.7369034838007074, 0.9384167021597074, 0.9057389032233585, 0.8675917393565464, 0.9334878751298188, 0.6910717833173633, 0.9104160006286093, 0.8594354835366123, 0.6376050280249175, 0.9361528172239829, 0.6631958056987723, 0.6183294977910736, 0.6223437627643955, 0.7144101500514191, 0.9518816922166529, 0.7670373201523522, 0.7647492857576637, 0.5826298836263939, 0.9442140496405889, 0.9979583760031439, 0.8651208074394114, 0.5329668142232502, 0.7110255790475349, 0.5737664465117902, 0.6951563473599501, 0.6339426997850415, 0.7244342232188153, 0.7569684011600817, 0.7557006872787138, 0.7638444058364573, 0.978221621631057, 0.6801348489125162, 0.6565239722145642, 0.9274354957710176, 0.6195204702750413, 0.9858412041210194, 0.5419778935085784, 0.9358476839640878, 0.9772067261197692, 0.7470764417206719, 0.9533411453804412, 0.898065691455392, 0.692762260092944, 0.8636816850314042, 0.6208268291809766, 0.9060333797568805, 0.6101662260123728, 0.6908347005508357, 0.8168877573015518, 0.6637455576457161, 0.9080047036136256, 0.7948562879088221, 0.6807014395880531, 0.7843120622110102, 0.6622894802740545, 0.6683188944583764, 0.7485639701766951, 0.8811405502139662, 0.8364584633993899, 0.835253764353633, 0.7380775430735609, 0.9644542000356928, 0.6632370490678856, 0.6781189595837167, 0.7321792835038026, 0.6933904008412003, 0.8116497269032026, 0.9529774660366498, 0.5973188692883368, 0.9857016338320305, 0.9170297439592918, 0.7093198803574543, 0.9083216025108516, 0.6123663690578309, 0.9020612004616424, 0.8493778344557164, 0.9949422334566542, 0.7966525958370415, 0.6191402988246217, 0.9920100021609468, 0.5240338986326976, 0.7622603380748776, 0.6860554478030287, 0.9465412163733903, 0.9390245292365542, 0.7564727604220183, 0.6578509121299283, 0.6693134371053673, 0.6341862099164013, 0.6182778854231661, 0.56382675468219, 0.8311880705870431, 0.5878208052517822, 0.9847107196418174, 0.8654675464250812, 0.7798256087152433, 0.8762864130226986, 0.8520939344769818, 0.9146248806861705, 0.9501482174811509, 0.8492435666174758, 0.812307664200139, 0.7891422393020311, 0.9476540363817627, 0.6419615239402378, 0.6973273272588552, 0.8469754340873489, 0.7536054406020001, 0.7149110778559304, 0.580939875065916, 0.9578791711017789, 0.5334125740537462, 0.7733988902714124, 0.7836610967798014, 0.9371157171003535, 0.9949316143949323, 0.995970699390573, 0.6298814978264717, 0.822557585826974, 0.6946939667863967, 0.7159426980008361, 0.7493757442468818, 0.6457727042871291, 0.5027653632070037, 0.9384997241733334, 0.6627788861670041, 0.7522738670703528, 0.6254403116355132, 0.9954480256218727, 0.7912353328654131, 0.5165094868243046, 0.8933411084830939, 0.7663458407461039, 0.7894704545155808, 0.7085418422982902, 0.9494093984891057, 0.6146936796343276, 0.9036636546664242, 0.7307808417442754, 0.6277116627470154, 0.9700855051811194, 0.7185671897496738, 0.547789688843485, 0.9616808478840433, 0.872567879936194, 0.6948735965722321, 0.5320436298404432, 0.8097670855666254, 0.7557927896779557, 0.5289376566162238, 0.6482306730106798, 0.9564901196276678, 0.8761190802322487, 0.5346384668958861, 0.8919965416292511, 0.623916971504094, 0.6360462387523821, 0.8283805772547486, 0.8521120343871866, 0.5902353383669849, 0.7164561987841103, 0.8923595343903112, 0.9611911571860436, 0.6288047337842293, 0.6028928120150476, 0.5879999874391185, 0.9580857767264522, 0.5998747356405761, 0.5610170362031999, 0.6702473232353281, 0.5175902269144954, 0.5874000145370389, 0.9070844712938807, 0.7301247717593108, 0.5436316828825247, 0.6240740137831222, 0.6063617315487606, 0.9364081056955892, 0.6595484066953254, 0.6803528312982925, 0.6377927885828183, 0.7158677930286338, 0.8484162022218453, 0.6130740762030165, 0.7330230619154061, 0.9225657425706255, 0.9471189802963923, 0.8098907551971939, 0.5142356771085276, 0.8407298981745199, 0.660765612745165, 0.8153219486070691, 0.5980205681598147, 0.7975760887710962, 0.6115201172382305, 0.8204201946426055, 0.966699574837776, 0.8624843179974305, 0.636675301039552, 0.7923415680901023, 0.5371838483510898, 0.9714030636257345, 0.6564389433765491, 0.6918293040793935, 0.5804877837938685, 0.6495292326026679, 0.841528152110926, 0.581910046515228, 0.9748019365815921, 0.9792938951574468, 0.5103526058196024, 0.5180364674142408, 0.5206198386866664, 0.8370184016062335, 0.7235184511310173, 0.6406294197447667, 0.8883829138066919, 0.5228496618828617, 0.5599625197545003, 0.8995153141191241, 0.9459989916042502, 0.6736533050037716, 0.5018366823930801, 0.7428839556474796, 0.9521208489070663, 0.689263224619918, 0.5637783366285749, 0.556724679953464, 0.7913779822675193, 0.8794779552902567, 0.78222777276939, 0.8368371406882462, 0.7146510065906551, 0.7223221883146498, 0.7620213280636259, 0.5874154895920893, 0.8645401455906134, 0.88902791897316, 0.7975590731933635, 0.8704035705543989, 0.6335536301132205, 0.946831308885151, 0.6369016131161434, 0.5058287526683423, 0.5247413319005931, 0.6468784482618033, 0.6959897299370564, 0.5910156292595761, 0.9352740753557651, 0.9475929679829304, 0.9603556166762732, 0.5382658036766805, 0.755224464656749, 0.597621459212843, 0.8624913320934032, 0.9332160778900838, 0.7944315150080581, 0.6319417428154329, 0.6884878296238199, 0.8247443087116646, 0.5331194183825446, 0.9964981916751118, 0.7616316186258048, 0.7164229260297518, 0.915902913503972, 0.7535111993766261, 0.888424892029793, 0.6103586057005062, 0.5646677611792807, 0.9128370432169816, 0.9493448005135315, 0.7423175556816761, 0.7326753466011232, 0.5722584876669231, 0.9343589918628212, 0.812626152105616, 0.6712529812475354, 0.725593006887213, 0.8754261469292633, 0.8465528695410676, 0.9884321141206938, 0.8604895176705588, 0.8328210746064184, 0.8116941256957738, 0.8697082508990124, 0.8715218571562302, 0.9772462473431681, 0.7530534307551483, 0.9164396213298943, 0.546318859741125, 0.7079294517978144, 0.779104209371171, 0.6174245073248683, 0.6347400287540844, 0.6432941558408505, 0.734504031267535, 0.7484417438737337, 0.7386556190218361, 0.5547935693827457, 0.5560996056463322, 0.8591042331316334, 0.8588369341325341, 0.7900983525744665, 0.9778085249688033, 0.7242821058876339, 0.7518234321322614, 0.6398053679148747, 0.7055149853807676, 0.7159079335945835, 0.7133771985101964, 0.7524565466092881, 0.9136527124133766, 0.5702378120272291, 0.5177765732912135, 0.8884841721150794, 0.5321264010547221, 0.5721715161966341, 0.8887831890708056, 0.7475884805642521, 0.5433089387030123, 0.5474861982392579, 0.9009720647342847, 0.58107451032276, 0.7214400361783191, 0.842036583124816, 0.6861102826477937, 0.7849843433674293, 0.7781247191515106, 0.645315076755161, 0.5173411758774483, 0.5424615283145791, 0.7112368722736455, 0.9638767978398111, 0.6801691934372915, 0.6668988330991603, 0.83483545308073, 0.7476640647598576, 0.9229365271365151, 0.5131546387147847, 0.9761628947914731, 0.580997220048671, 0.9388296275827948, 0.9976020927147775, 0.6748400236294192, 0.971832245444031, 0.9667056318508997, 0.6075330258763375, 0.9226943942231801, 0.682511528080349, 0.9857881210802208, 0.9168438448187509, 0.9584065438896643, 0.620465366371892, 0.7813126568648288, 0.6945732346048412, 0.7998195172380794, 0.5671880986997778, 0.8496023794938576, 0.6213886385464458, 0.9365692455761303, 0.7827971188392562, 0.6558473671045175, 0.7147528269630758, 0.9605887691578998, 0.9651401594710198, 0.8904216833280082, 0.6408728302858866, 0.9355913060317791, 0.6239331203759768, 0.6239265967371393, 0.8815066063302409, 0.9271750971506783, 0.5872070720171185, 0.6541837361799661, 0.6648025912135113, 0.8800897232256225, 0.9646972216867352, 0.8564496166733047, 0.7954612389477489, 0.8018572536136546, 0.7368077062126361, 0.5615441647931354, 0.9067951782486163, 0.7307328754428681, 0.5721968026974031, 0.8858483670683425, 0.692162814934307, 0.6556809288740023, 0.8457015586750469, 0.9683434767594734, 0.8935338696526679, 0.5601641480311508, 0.5303437265826243, 0.7287401572548897, 0.5099937766594732, 0.6544272499539991, 0.5833156537844908, 0.9391406210766067, 0.993744676130049, 0.972243812995608, 0.8810321792615954, 0.7614210763464557, 0.9106878541826937, 0.7786169844678759, 0.9542014733636626, 0.7621711894638106, 0.6778063899278132, 0.8949694093692783, 0.6842431740108286, 0.9207639998082516, 0.5403966371084841, 0.9002480105296271, 0.8855656817085555, 0.9479983383560329, 0.639197648946185, 0.5364799329983814, 0.9211958689334698, 0.7726115697371396, 0.6663333652514676, 0.9840721712983641, 0.8479144332353785, 0.6244537119702945, 0.8618394032787833, 0.548959256757342, 0.7852261316981382, 0.843158932121135, 0.8070307147585145, 0.7840267252788236, 0.7311940896022665, 0.8478102997330577, 0.8704709479359671, 0.5088815192543511, 0.8993688568604927, 0.5404512344587518, 0.7491943759957727, 0.9419977154980564, 0.7100423885388494, 0.8894836777343098, 0.5009271214948339, 0.818007186908571, 0.7502383476500147, 0.7336121376598952, 0.982264502655048, 0.767396823405647, 0.8672528713041836, 0.7451599329198744, 0.6790825545089858, 0.7921740579202579, 0.90113199168579, 0.621457037218418, 0.9469328986695984, 0.7193322807093752, 0.8061324136965343, 0.7114953532408704, 0.7915820920787524, 0.5592361854457446, 0.9455618396191587, 0.967374651392527, 0.7846586606657, 0.5564994828088583, 0.8164792593124451, 0.624052332030862, 0.9115194599670198, 0.6055557627451109, 0.8004681406837666, 0.5360724504142322, 0.7742520437884532, 0.8333236731361652, 0.7238981395301625, 0.6522076722909161, 0.7901493750088351, 0.987656128222595, 0.6313950590723031, 0.5111337511351441, 0.9925443969974723, 0.807769898352081, 0.8451862513937655, 0.8861406838916412, 0.7555662921683335, 0.800366178042524, 0.9363632640638551, 0.547570727541185, 0.684393141513131, 0.5593176179485029, 0.9446053606401024, 0.946299033858322, 0.759812182676486, 0.8365028267896049, 0.9917960906042824, 0.7254030484933631, 0.6848874799046828, 0.5181812754446133, 0.6888889290663069, 0.5233318523853832, 0.6677237283448025, 0.5402498902682621, 0.6423044105385645, 0.9685205531820618, 0.8954069176570909, 0.9536888127158618, 0.6579723972084346, 0.5616157635109977, 0.7133668574592944, 0.7747775554327208, 0.9718814128375408, 0.9004149899921183, 0.9037229026829712, 0.8945312264959524, 0.8166717364397114, 0.796442270770692, 0.8638789218042029, 0.7192143298847276, 0.6032359909995749, 0.8199498913534986, 0.9750033502480067, 0.6880753980731347, 0.8863445417911063, 0.9174458798837946, 0.583509003432199, 0.6837577044083423, 0.8603904137565367, 0.8674965836969752, 0.8973069146667755, 0.9823894364250612, 0.5195574345092677, 0.9019047304255278, 0.6668468407918526, 0.9960896001258335, 0.57461679551327, 0.7847427205067534, 0.8518483991613927, 0.9994132673389152, 0.7851984325674142, 0.805504827576133, 0.7859231810998053, 0.8334603113443356, 0.9188401485326088, 0.8481918777190194, 0.8291281583762748, 0.74761552727818, 0.8190321512784611, 0.6545285595299648, 0.8596298465771894, 0.9492035530026637, 0.9449913352845507, 0.524232295834198, 0.7204987328608028, 0.9730108239173486, 0.7896988032427688, 0.7331000891487568, 0.722164777691116, 0.7201104458308536, 0.7540055561202527, 0.5999175479395931, 0.8896919913601615, 0.7023386633963506, 0.8823415325687671, 0.7736733571272534, 0.8130869144941575, 0.5581532041861805, 0.59267791783405, 0.7477404327249555, 0.8364353292038835, 0.5112494385462871, 0.9415032612385543, 0.6030619078936028, 0.8282231706941614, 0.9219814951982956, 0.6042815686901672, 0.8649832213522914, 0.8704331137716113, 0.8515664101846716, 0.5312791587883494, 0.5132780907810911, 0.8543802284606137, 0.5400377463092558, 0.8914013072739788, 0.7322265749576984, 0.7288916086875139, 0.8993089893714744, 0.8318245189829434, 0.5494148015796778, 0.5268340970106551, 0.5162368807104829, 0.634657970498449, 0.7391855333993445, 0.5394717278099426, 0.7247290066435037, 0.915014942249861, 0.8614278273933464, 0.6212000670487117, 0.7618334207417254, 0.6901159810595486, 0.6372838384502196, 0.9614375126796443, 0.9084286005216535, 0.7683267896104367, 0.979739528041361, 0.5322087214097759, 0.677389838966787, 0.6099913948916957, 0.518685515366273, 0.5258558586733868, 0.8404624184124547, 0.783107200834799, 0.8168877687804317, 0.9440906117180341, 0.5915871412789702, 0.7155083195774129, 0.8494719616594506, 0.8206840848573154, 0.8840366746841697, 0.5940864892989604, 0.9625202359433089, 0.7170013959181833, 0.9812977389374758, 0.6492350560336124, 0.6363597247504806, 0.7160241797785473, 0.6440470597847234, 0.6796498258682144, 0.8575227254509543, 0.66547600149669, 0.7413663675068235, 0.6210378637199936, 0.688891959703017, 0.9518665986662196, 0.8980156391873513, 0.8591566833204757, 0.9827072624650137, 0.6257353053849599, 0.9236015640509191, 0.7609215573253497, 0.6268519264908718, 0.5048596310752997, 0.7969502122663539, 0.7954097652063203, 0.7558359287304273, 0.5722208780626803, 0.9096929453262113, 0.7989427237937796, 0.7018487706484159, 0.7218093113962288, 0.8091745562908543, 0.7404061541095757, 0.6836815871203421, 0.9013612479872923, 0.9038521647214823, 0.6370600439093486, 0.7632436673887038, 0.6452138690432688, 0.89883944092138, 0.7197515705990098, 0.7646078654996592, 0.8392947868387901, 0.6337658670591697, 0.963903717921917, 0.740669744829889, 0.9275738296356356, 0.920901227832003, 0.529347134818861, 0.5220730091606502, 0.6718771017095652, 0.5102994518998817, 0.5110095322378143, 0.8385947277588246, 0.550046788229519, 0.5563381519906799, 0.9263598839208965, 0.7284474735851905, 0.6338088706610153, 0.7945179677044245, 0.9030968262292847, 0.6735441900022652, 0.9310233864522878, 0.7902625368934596, 0.5400281508502177, 0.6271743584542803, 0.8135211602051378, 0.7100520171153991, 0.8483297250931388, 0.8355270300712945, 0.8411142759057504, 0.5323546941155959, 0.5601873712253603, 0.6469869134804409, 0.9103098387033637, 0.8251884074766609, 0.8111856607195499, 0.8934272642341184, 0.5145363461049262, 0.7071295131917874, 0.6425587568663564, 0.7771088039191361, 0.7086382740596433, 0.7382621368711736, 0.9654912099412221, 0.9816897696612976, 0.8625219525900318, 0.5569088611287505, 0.5722312078852959, 0.6313694636345413, 0.8005805248253577, 0.7737108147889447, 0.7262379752294681, 0.6208750233113259, 0.9017410931637637, 0.9955474759286791, 0.9923750256185633, 0.9861585542797995, 0.6262395936733746, 0.6582975932864659, 0.8982619158477658, 0.9752667047556878, 0.7758505112097835, 0.622802208289711, 0.6836093736150988, 0.5386040910492049, 0.5423871484571003, 0.7025425981385482, 0.643079829780306, 0.726235308930603, 0.5371456326601523, 0.5850356818954697, 0.6197297945352759, 0.6102769919102071, 0.8140625525701708, 0.7859419745876225, 0.8397341030604777, 0.8822588518917414, 0.9149957529004557, 0.6004479392874982, 0.8646318130499591, 0.5375682054326609, 0.5817248662572607, 0.610239582381995, 0.8245201836520089, 0.5717396521450391, 0.6055253583817947, 0.8070814570216864, 0.8003542316979448, 0.7377516278675103, 0.6348961643523008, 0.7575621941108874, 0.6417639815874614, 0.8542244337818206, 0.8989031852778737, 0.6769232432267304, 0.7292331021157007, 0.9035932751276445, 0.5941638773899262, 0.5732649856403749, 0.5823933621407975, 0.9734759254144262, 0.7322731376155186, 0.6777984841453153, 0.7214369056875807, 0.9391785010015795, 0.8187401575791504, 0.8997368632453318, 0.9666474838479255, 0.9302481530244513, 0.5895740947998367, 0.8750873593864277, 0.846156651092459, 0.8497892826007127, 0.9594291101972122, 0.5309264006466587, 0.6461132923198729, 0.6058485900104501, 0.8410961395125862, 0.9614917890441848, 0.715427417641967, 0.6380395098663345, 0.6490140480455839, 0.9868685504316494, 0.503150282964443, 0.7715149769733514, 0.7910972058170889, 0.5671795848487021, 0.5511240314886784, 0.6724075478029479, 0.647197013735372, 0.5990136052784172, 0.9817557167428403, 0.8096864787344082, 0.6910410683753112, 0.5018895136212136, 0.9442494463633817, 0.6643861426183688, 0.7197001933319646, 0.7339071537282209, 0.5238791887581662, 0.9353611374339593, 0.8195803678140776, 0.7747842452678699, 0.7381248578258269, 0.5598860020442309, 0.6514652799448352, 0.6863479934180822, 0.5373712414862809, 0.5249975464837005, 0.5280936934069054, 0.5269383755916286, 0.5198611894558367, 0.5216729947055688, 0.540273086426588, 0.6648499223279636, 0.927747465936461, 0.7358517712969235, 0.5136869035550389, 0.5607993894301244, 0.5572455341563796, 0.7623357822718201, 0.9755718838076354, 0.747455677472561, 0.5460416946375806, 0.8506326622366782, 0.5123799034975105, 0.6700437736537596, 0.7524404498641188, 0.7936726498399898, 0.6482858102903355, 0.6918307790692904, 0.5362925567445394, 0.5227375249350111, 0.6731497614230084, 0.9528180530735932, 0.9898968395838428, 0.8748675161539647, 0.6574304122320023, 0.6684991542141377, 0.7437968907342514, 0.9137147842303848, 0.7414686778769922, 0.9540124217165116, 0.6292874748908375, 0.9817046743846204, 0.5144340416443955, 0.9872243933997009, 0.9095638401115518, 0.6754596023703134, 0.8542911127354094, 0.6685248672396963, 0.7833829332432648, 0.9812366020665229, 0.8048663515666151, 0.9157618239453229, 0.7880451118918126, 0.7500863161909559, 0.5880415795964637, 0.6351158321368573, 0.5855121680516236, 0.6954985700629188, 0.8053285012978684, 0.9244368705994056, 0.8006315823409067, 0.8095217491974301, 0.7598360394451066, 0.9688500063451155, 0.7092294023846166, 0.6314973361963954, 0.7342038453438753, 0.8859111588836623, 0.8011873789849867, 0.9334176131360783, 0.7339730719586757, 0.5588616462417627, 0.737657732565123, 0.868585027442178, 0.5445109613002692, 0.6673433059881293, 0.6988024495869134, 0.5157178263156391, 0.5395271352586505, 0.8481628891866954, 0.9324592516593919, 0.6246178568549781, 0.8260325374237583, 0.7212938412920943, 0.5726576101255787, 0.8209346514293777, 0.5108058168086897, 0.9856498903190667, 0.9544501082474894, 0.9349293364849063, 0.692585836817279, 0.7808302354091972, 0.9838113630456338, 0.5389838255416257, 0.6034725901006793, 0.8316497714150399, 0.8749053566065914, 0.648945498783579, 0.9737886100041577, 0.7723563883323128, 0.5008279330967019, 0.787116639419347, 0.9479234514401975, 0.6782480451711745, 0.7374267486721663, 0.6458074906278434, 0.7460585167199334, 0.7832054420183322, 0.9217116984268392, 0.9389206824934463, 0.8507691314472945, 0.5034380103753959, 0.893039252237585, 0.9583771358039115, 0.639377666830192, 0.8233615763949378, 0.9693943880994937, 0.6082512123458352, 0.6911415669063239, 0.5847438334646642, 0.5818164344012322, 0.7294725716934021, 0.8752470656655612, 0.6976800893467131, 0.814310049650173, 0.6179091227267974, 0.5475779656088522, 0.6994115010447791, 0.8583420433348233, 0.9524677467375537, 0.5149527282879423, 0.570171307532666, 0.7588268885513052, 0.6601404055428983, 0.5674875439619949, 0.5712938937748859, 0.578742923344147, 0.6575377498733981, 0.5514797911624192, 0.8728833916508638, 0.7221500407093423, 0.7239129296576878, 0.9293894011817154, 0.8873234564933248, 0.9455476215546941, 0.5348533139228975, 0.9977865317251147, 0.7230765946126323, 0.5602168110321393, 0.7808961612048668, 0.9778516840391309, 0.578791228007137, 0.9744969100263318, 0.6444785482875955, 0.6666819952087444, 0.8588688918559978, 0.6798100966616158, 0.8504518021547537, 0.9629777329123743, 0.7948792471595791, 0.8325949091380869, 0.7664527861457241, 0.5119401134353643, 0.9885687865236161, 0.6935632038151153, 0.5552979426143005, 0.5215979186003623, 0.502809387178616, 0.8101895506879346, 0.8725985312986207, 0.642542130850923, 0.9717920269546558, 0.9981476844261328, 0.8266951097609672, 0.8606713345356837, 0.859006638420805, 0.9444419996959761, 0.852544612256288, 0.645045896708733, 0.673732752741481, 0.9028938833228648, 0.9022490629049897, 0.573276660805133, 0.8201335269807379, 0.8062246536445227, 0.9425494999736497, 0.7698234237028811, 0.7812627803707208, 0.6601270281625344, 0.6833211044094266, 0.9005189293119379, 0.741905035103029, 0.9708722901361506, 0.9325434198380227, 0.7326945968566655, 0.5076657090574668, 0.6107934609634899, 0.9509734913599894, 0.9082229102525441, 0.7447839562866976, 0.9356071289269521, 0.6628083125418147, 0.8449091337043921, 0.8872363884723455, 0.608359764772767, 0.7311550484317273, 0.77246101831335, 0.7760237255612348, 0.6270887423493159, 0.9226031382487441, 0.5510781038912547, 0.9376045600543428, 0.6047265543338174, 0.5361037785907956, 0.7417506395783122, 0.739019189977881, 0.7409128581034403, 0.9110898085509713, 0.9578754190903733, 0.9993470945636754, 0.6760689894765377, 0.7636267493353869, 0.7770520816106123, 0.8345795677178554, 0.9869821144203226, 0.9767631886355026, 0.5303008866140309, 0.7890424214710354, 0.8000845401377714, 0.7914186366956131, 0.8989338078068876, 0.7574630677174261, 0.8631027136699847, 0.6577004229639347, 0.708742269093042, 0.566354619763062, 0.5718628019150936, 0.503071762393992, 0.9936069991169546, 0.962458279288257, 0.8009830708807595, 0.516296680166372, 0.5982880086104149, 0.7919350790157077, 0.7914604345452405, 0.8096570387403117, 0.8916886167050608, 0.5378980926893404, 0.7785838600191306, 0.9325184492015317, 0.7065347828019137, 0.8638726854745045, 0.599868821401625, 0.9576394188010167, 0.8444778973519227, 0.8242972062767895, 0.7017233579654094, 0.8579343122443654, 0.6111085786914451, 0.8191434332908873, 0.5118465635570333, 0.6951236647285647, 0.6218925094353663, 0.9260611398122116, 0.5946048582097629, 0.5219800930521022, 0.8549608692035228, 0.9221636727215834, 0.8394696494802536, 0.9026817674300412, 0.6589401326909363, 0.6667786782201075, 0.6630666177241462, 0.5691756323747791, 0.7751022762494933, 0.8025802575433575, 0.9752508495090317, 0.5872715917137229, 0.9367445940265996, 0.6829744575984515, 0.5063431323254117, 0.8099951671770089, 0.8256876270513587, 0.9842076596584544, 0.6055571012264312, 0.9414607054006818, 0.8892877027164785, 0.9766506907532224, 0.7929005927890584, 0.6105548731023314, 0.9435103349347012, 0.6364340208533934, 0.9637043939286849, 0.553922585275437, 0.9889048167578277, 0.6109328610509381, 0.866716290336103, 0.5776389328213496, 0.6041192546632266, 0.6989293207183811, 0.8572626312900489, 0.9951525019350285, 0.9790668392256985, 0.6490245022332725, 0.9656396978663327, 0.726755341428553, 0.6377000816634117, 0.9706060824884167, 0.7616071133075443, 0.5034711847133995, 0.6757382439512098, 0.6333253522326063, 0.655177853130088, 0.9770602363010077, 0.7509882178363865, 0.9207294120615884, 0.7241774205574782, 0.5990542853450753, 0.9406030861703343, 0.5279575365022711, 0.9728210792856089, 0.6366383981185642, 0.5513830863720265, 0.7150131965955506, 0.9210092678557928, 0.6974403023539286, 0.7117871903969986, 0.6118048766721813, 0.6786294320228123, 0.9717589272386185, 0.6839306113777397, 0.6222545678732214, 0.5843225706364517, 0.816516113924897, 0.9240337965740428, 0.6940046964376436, 0.9484094727002607, 0.7096442763585953, 0.5968897388443893, 0.5656724383781112, 0.5051217990688686, 0.9230175150202338, 0.9282755869883049, 0.687631412102969, 0.5398725192761846, 0.750843475982037, 0.7032362243484255, 0.8445449295503269, 0.7702341349400381, 0.6781213474066514, 0.9131490546689942, 0.675732278517527, 0.7157672360319696, 0.516016852396953, 0.6953534614156713, 0.8734654922526535, 0.7804881329640294, 0.5833350948287985, 0.5594148997068207, 0.9637860164679073, 0.54738006867765, 0.6020572898814258, 0.7462015263928137, 0.9273891464340291, 0.5833379172995456, 0.6873189619998237, 0.5105666829168423, 0.9136397716431675, 0.7583470261022474, 0.9344039229863914, 0.5412404719952592, 0.6943692173877771, 0.8178552463182654, 0.9863349636090745, 0.812619476124713, 0.9756985635794557, 0.5009468515129654, 0.5124187348819722, 0.5416450834510933, 0.6251388704310689, 0.749794645584661, 0.7956471867393897, 0.6309024893310049, 0.7824916748792973, 0.7249351319256507, 0.9356907222525706, 0.6836747343377789, 0.6345942338519615, 0.6210794467088041, 0.7285615696251786, 0.8115343717340306, 0.5359882097558171, 0.9064085878492965, 0.9572387603576407, 0.8384985077148235, 0.609957434753001, 0.5418231463127732, 0.5951368927136766, 0.7333430622614748, 0.5348068518160805, 0.5846475427357793, 0.8026477405284358, 0.5514632923977401, 0.75104217037273, 0.7528090568224972, 0.5134841941827111, 0.6590530618916643, 0.6190456246579895, 0.9034647023539315, 0.8382733410452762, 0.6360240818779539, 0.7176371458686219, 0.5059167571292171, 0.9328627294121602, 0.7073815919089477, 0.8074106623910939, 0.5697177093608167, 0.6136214129423891, 0.8140261727601713, 0.8402597825237004, 0.5442825053249002, 0.6405244187761032, 0.5502522712467122, 0.6385015547358888, 0.8463338156891069, 0.6345946566548436, 0.5766744956885794, 0.8875220259826198, 0.5537175930472589, 0.9493754882139969, 0.5839936004502078, 0.5150660491593223, 0.5000392666591574, 0.9349992433613223, 0.5012125441484563, 0.8070979910942205, 0.6851246383004808, 0.726591770731496, 0.7784760528240624, 0.8523733379207701, 0.762766136536754, 0.8873176156917695, 0.6926804091070734, 0.5778813708802364, 0.9556020485303524, 0.8612694661480146, 0.6669779209256248, 0.995865549871345, 0.8447402019599726, 0.6560031084553939, 0.9896496746136852, 0.6214555651419851, 0.9894336531445542, 0.9032378350111951, 0.8941567086943745, 0.9492337164574749, 0.5838736350433884, 0.5491771663993514, 0.532951682378328, 0.8273922207629032, 0.7907663082339706, 0.609920160817432, 0.960317325147463, 0.7541672038644947, 0.7762204052370741, 0.9636236898544819, 0.8748422594149271, 0.7043211027472938, 0.8086957609981333, 0.5553759788182938, 0.7785748555325445, 0.7755426511621469, 0.5839985593224928, 0.8313267914639748, 0.8309740057379031, 0.9598253050949141, 0.5914592751999057, 0.5937644147683505, 0.7974635721162611, 0.6573531853598971, 0.9378782694634245, 0.8651310751102228, 0.5522065172995136, 0.5296476000042134, 0.9043383571738728, 0.7972368064139712, 0.569351099300613, 0.9110433944250793, 0.905727643318029, 0.6264608890572052, 0.7117474707315747, 0.512657542583697, 0.7564182637506036, 0.8310815744909419, 0.7653473022162028, 0.5188725791153027, 0.9102537176948475, 0.567464225725248, 0.7384189980150471, 0.7269944798638353, 0.9743701277021364, 0.518723120735142, 0.7189650224598585, 0.8773490836361324, 0.810602448879796, 0.6688554418442395, 0.9679116313712568, 0.9620843701706048, 0.9629227870166467, 0.8936724485090056, 0.6277642139745165, 0.963551265999379, 0.6340997666338266, 0.9292695610913646, 0.5966338687509076, 0.5310442316611228, 0.9318355804785958, 0.8248986769713833, 0.6079451312207413, 0.9391224711062762, 0.6936850888136467, 0.7713377879573884, 0.534463660555052, 0.8591469749341554, 0.8027566274254397, 0.9129955371352383, 0.9971274573044289, 0.747615817057609, 0.6841980607771536, 0.7866421020249239, 0.6104261487274811, 0.8944436005690404, 0.9985118722376358, 0.5270420036704774, 0.6699825766949941, 0.674784453599302, 0.8360193827264525, 0.9372533124523315, 0.7677635444946864, 0.9492489735625587, 0.5200769571519746, 0.997196905939027, 0.5825643019333262, 0.7546581795434796, 0.9102851210730737, 0.5714184872055691, 0.8962717649297255, 0.6931020705826774, 0.6845143622088485, 0.8409423380598593, 0.7734679475339662, 0.9066184629886563, 0.8624307854320123, 0.7494768248713953, 0.6245030994015324, 0.8255850177365647, 0.7122906633492101, 0.8901182468479936, 0.8766228332646482, 0.5850932963474207, 0.8452258949036013, 0.6007362697264276, 0.770431344531564, 0.804587090234633, 0.6654636909215492, 0.8322027554457749, 0.8621105081784558, 0.9287222468910685, 0.8315631551845997, 0.6750700791938962, 0.9236967412563286, 0.921182726057747, 0.8659835462599514, 0.8416422713057718, 0.5025418342726374, 0.6261161991687716, 0.5022091917007929, 0.5798928080065433, 0.5363576071107952, 0.669321760789553, 0.6058564889216264, 0.949921351216715, 0.769697909051791, 0.8072330848745706, 0.9229931489233538, 0.9405511861531048, 0.9054548979093746, 0.5432350603691514, 0.5037711072100077, 0.9567080160878203, 0.7602639315118973, 0.7362844826923686, 0.8125483577018, 0.5070662239247713, 0.8731969325018999, 0.7832588623898722, 0.6639170365721706, 0.7374720286951952, 0.9959382560561849, 0.8964147119829369, 0.9666666444231455, 0.5472655302784217, 0.5261671368780566, 0.6979253195861839, 0.8123692988777219, 0.6275802582996411, 0.815054061122898, 0.985602039718307, 0.5370218840351981, 0.7126101340392066, 0.9522998385310161, 0.61974962869716, 0.5982490497504899, 0.845078745932625, 0.8932673766938211, 0.5295815801762733, 0.5999261155208725, 0.6702417264681098, 0.8594253683141393, 0.8052683353229192, 0.7278016429209253, 0.7409097465099967, 0.8197406973287888, 0.513652548227453, 0.6420963097134954, 0.5603168349048113, 0.8172826001923763, 0.5393178754445933, 0.5332718323313894, 0.8519098401370075, 0.8262042935470987, 0.9922044126418307, 0.7845126330467393, 0.6663749832298982, 0.5987134484306595, 0.9672149199694884, 0.6248393052635071, 0.6062108300189502, 0.9248148008476116, 0.899555141689558, 0.6773681537039467, 0.7200500977450486, 0.9687138689305688, 0.9767348427100875, 0.6054688359882712, 0.8351133322729526, 0.6062457966071007, 0.6816282021822521, 0.7364293734327925, 0.802672011819912, 0.7865980760794906, 0.7609240959704748, 0.8285098509945197, 0.8603402414436888, 0.6962186221989842, 0.7452711046221938, 0.9722666122281686, 0.557468984823842, 0.9309111487841875, 0.62284183383019, 0.832422201295617, 0.7232267588097433, 0.5741903520926116, 0.5981352179523063, 0.8256760856588591, 0.9584928740300414, 0.5768374678332068, 0.990903917743431, 0.7084666723715222, 0.6634823747826015, 0.5355127449845863, 0.7822253527509735, 0.6701070755386045, 0.5428637926882567, 0.647256917232664, 0.7568676124509219, 0.9692849764605767, 0.8194730992579946, 0.5651589214688639, 0.8040566954573232, 0.5429332779042513, 0.5406937004180606, 0.8918178778327361, 0.6086653711738561, 0.8800308104626838, 0.8561798024991795, 0.6923889677769046, 0.8853399901343189, 0.6836649855626138, 0.7748478042444298, 0.6002161848701351, 0.8326188015968141, 0.6972354923184344, 0.5708936291386966, 0.6818619011004947, 0.8545730400544295, 0.8723305220762754, 0.77861089974283, 0.8162508324650718, 0.915349636702316, 0.7008135438572292, 0.6176489834168915, 0.9281367934767483, 0.7312299744609534, 0.675819504029632, 0.8504152248216135, 0.585173016608992, 0.8565292819132899, 0.6859066648085864, 0.7774916802034492, 0.5735319485523755, 0.5519996099143231, 0.7085236585315975, 0.8982749853422598, 0.9685455356492862, 0.6014647954908803, 0.6206324004634931, 0.7854355539837583, 0.6381949377370446, 0.8655253007187989, 0.6579097957014209, 0.5056257281065363, 0.9769617407348852, 0.8873224954015593, 0.7379857994601929, 0.6555354101494208, 0.9367706567962744, 0.7713150968578296, 0.8012475976613829, 0.6818826851552302, 0.8606527132326749, 0.6693918815980305, 0.6729963239339816, 0.8428500348309795, 0.5711291304962418, 0.7284553951449911, 0.7768262605668825, 0.7692036384502099, 0.5324369442341351, 0.7031816447735146, 0.603544063665086, 0.6777282594173946, 0.6871375876584906, 0.5140556195493159, 0.7995058520068807, 0.9308008149349221, 0.9490582773495729, 0.7567373813716625, 0.9633017597458, 0.8656737762784562, 0.9809215064277793, 0.5430884668806616, 0.5177917403728367, 0.9233513662226196, 0.566957789763743, 0.7414991893280478, 0.7060478587016069, 0.6183922960718176, 0.7806391659076204, 0.5220521483235272, 0.8206707497429648, 0.8481840151042581, 0.9326198370282011, 0.9567209200769395, 0.5535557431531415, 0.9015954238080286, 0.8453266986902508, 0.6287591138160658, 0.5969346763611441, 0.9862292826146752, 0.7907527131485528, 0.6656703071531956, 0.8143377910309539, 0.6148848234114292, 0.5926609099983556, 0.7100064730808691, 0.5528261875323367, 0.6760433499110663, 0.8215723629471162, 0.9374890526573204, 0.7989220983160481, 0.6368835098332224, 0.8594838047802251, 0.55843655410501, 0.7926585646827202, 0.9516104540133019, 0.5244915958587759, 0.8060114213093871, 0.9408497333571726, 0.6503022498030764, 0.949089952112198, 0.7083390474850555, 0.9471869322954924, 0.9794295398420312, 0.8491822024523947, 0.6392068444928434, 0.9517221354543837, 0.8959840507104817, 0.9838327981692905, 0.5750723475710346, 0.6427206077801404, 0.99212118298145, 0.5947001604916384, 0.8494298112530936, 0.9103428534755811, 0.8245154098781077, 0.7741536452820699, 0.9233893651221811, 0.6859919474795948, 0.7574448498308981, 0.5616124186847737, 0.6190830948447096, 0.8659163045563885, 0.6473601946447097, 0.6674709831655921, 0.6157730571356623, 0.7135238973856037, 0.7475837338338929, 0.5284805263987723, 0.9081569600028363, 0.5521564507727594, 0.8083040687399924, 0.6398784598288266, 0.720505105777344, 0.8741866445431536, 0.5888295047945112, 0.6449689732332794, 0.5622253346882016, 0.6186832464234064, 0.5113818953912067, 0.7369367095955264, 0.9691441897164556, 0.764878259229943, 0.6011265226260765, 0.5931400626107023, 0.9643543589892132, 0.6503747824087764, 0.5449893691027146, 0.5553607341244646, 0.7470825912894278, 0.7327812081658533, 0.8757017980531236, 0.5688839041389342, 0.7780138281284166, 0.6612354634649129, 0.7916310114367473, 0.7634786798540194, 0.7107678451016344, 0.9117519421446598, 0.9840235809876633, 0.727233302384568, 0.7715268431428877, 0.9073530857485845, 0.9941484090474397, 0.9079723655440115, 0.7028689438273585, 0.6082420347002233, 0.8919024961570929, 0.5888126359937447, 0.9563212769059618, 0.8675847612488778, 0.5926151949166716, 0.6538519170646964, 0.8446051722533761, 0.5677460206083618, 0.7561735096343865, 0.8668520953745842, 0.7565576655961541, 0.7514981373169209, 0.6729963571257735, 0.5067779263285822, 0.9525110993993968, 0.5858806382214847, 0.5511393415400861, 0.8369371126080578, 0.8087878651407572, 0.8190395921013353, 0.90825633460382, 0.7436270964347929, 0.9738189574747631, 0.9125750742255893, 0.8188989860715459, 0.9818972286316547, 0.6692139356310622, 0.7037812623564682, 0.7696629510028221, 0.9132544253347732, 0.8486112444372502, 0.7426203280589634, 0.5312177688774493, 0.5958537352475961, 0.6229694668580726, 0.7635250125858071, 0.6000527090801319, 0.5415395013904307, 0.8736993894215761, 0.9374919101933652, 0.537450681210444, 0.6365990284993457, 0.9022842419458068, 0.9512380825638279, 0.5850482938269438, 0.6828459369970097, 0.823021140044931, 0.6194110061235774, 0.9006064165911718, 0.7300085996680875, 0.5681278451538685, 0.718237659137091, 0.6126807681872347, 0.8086374764876656, 0.827855281702863, 0.7442797594040728, 0.5957011764555046, 0.9654592716966832, 0.5350053849772012, 0.6105068637607387, 0.7213911274739186, 0.7959925269089096, 0.7674827518295776, 0.7323465547303107, 0.7855242013743642, 0.577834970957525, 0.6360982123245609, 0.6439552417385199, 0.9846722714351649, 0.7438655356359871, 0.6454360128116033, 0.5085080769796839, 0.7314519882521999, 0.9135436818191751, 0.6576222745170242, 0.7355832445748569, 0.6452301757532583, 0.6956611241317715, 0.9203763986207976, 0.8642612829143659, 0.5907182529941462, 0.8402747194853737, 0.9512720736303928, 0.5684626649723701, 0.7905640325301343, 0.5884525038171619, 0.9631106723990945, 0.9702205715832801, 0.727096831454149, 0.5752283587760649, 0.5210542855586748, 0.5818621489011698, 0.9891931027000938, 0.9355650861161892, 0.7443110518179695, 0.9057157080431305, 0.8239512163055607, 0.75573553307936, 0.5535781585433803, 0.6743348114091862, 0.9638795687896289, 0.9756590436224598, 0.9329214317502044, 0.6760076407856148, 0.5913444438431286, 0.6461707047020011, 0.7027474841821523, 0.5119886340738745, 0.836669926726046, 0.7644616633901054, 0.5912264542425754, 0.8169898904829358, 0.9791524235554845, 0.7854859100627366, 0.6478876236033945, 0.8612329600235856, 0.5199892079085677, 0.5461294730985549, 0.8951319386599894, 0.6265404768102655, 0.7391865007028358, 0.793622719427687, 0.9855373531293146, 0.7355614926065314, 0.9091836813263208, 0.6260572231845112, 0.5466313587936067, 0.8081449963038877, 0.7227710221912304, 0.6220760443198827, 0.857566637308771, 0.980344102562143, 0.5537185864373197, 0.6152210731824982, 0.634175350434081, 0.5046470262571372, 0.6661155963370186, 0.9831513799527677, 0.7055685126307994, 0.6371081761756275, 0.9382759930905693, 0.7927792184220019, 0.678789857515375, 0.5112463945973067, 0.8342099057445561, 0.9971873128898955, 0.6480772591693902, 0.8805668316419292, 0.9387323182130501, 0.7024638761645723, 0.7837102894004315, 0.5264643563373144, 0.7877400990821826, 0.8126961234134398, 0.7187987644671028, 0.8388518888546945, 0.7720566930241868, 0.510461601114337, 0.7889839501348093, 0.7831728300319954, 0.8374364967310325, 0.5575941507893972, 0.5847541178414828, 0.5711239243352655, 0.5314178730983741, 0.7992889042607869, 0.509954804403177, 0.5009972170713588, 0.9798670888380112, 0.5429867007959885, 0.5677120974428653, 0.6171073307276179, 0.8084125091573613, 0.6315549395892892, 0.7230849392527211, 0.6797071627139104, 0.9412166672869154, 0.5737705282431329, 0.9635226545930025, 0.6874611602331034, 0.5029893940315628, 0.7772228145285395, 0.586539217749387, 0.7580517978503387, 0.746017570923279, 0.7568825544046776, 0.565208584821175, 0.7685880252409154, 0.5218836866860661, 0.6264151032118899, 0.8943507514676579, 0.9338160719827094, 0.7509928050767949, 0.9996539062167273, 0.7499384494819343, 0.6669438569207757, 0.917664894587708, 0.9182371905477896, 0.6273708373540885, 0.7720217429391993, 0.999576758615524, 0.8675484235103079, 0.9261468266772273, 0.7178305684852333, 0.759846799071375, 0.544867836247851, 0.7410904886863929, 0.8097739465801272, 0.8148944326031204, 0.9312990910196831, 0.7063265500828251, 0.8944605784763431, 0.9955358531725214, 0.6538217433643974, 0.9946930503549869, 0.668108980652885, 0.8524183356415282, 0.8523763530458056, 0.5270045579312084, 0.5036400098817472, 0.9668095096312559, 0.94653479669861, 0.6679724798073583, 0.7124526625645001, 0.5477324754907589, 0.5344799113572549, 0.7379699562572815, 0.8255113791826703, 0.5111712522338886, 0.6346539268032771, 0.5649555508506262, 0.8322497940039693, 0.6641289608291825, 0.5110899912353581, 0.6463987852591151, 0.9933753445806726, 0.7593696364378808, 0.8367532065994969, 0.5428868152926717, 0.7666242943240016, 0.5917579382072506, 0.9041392011853346, 0.9541386161526159, 0.6166600894767538, 0.6713590779619028, 0.9975404490347903, 0.996308197123388, 0.7846042621495347, 0.9709548350208956, 0.9311624645543151, 0.8729232421703541, 0.8787227056721492, 0.7462309943230621, 0.9872196234850119, 0.5381772841169736, 0.7448711336759151, 0.8322280382753161, 0.7144184484674657, 0.9126379025326973, 0.8094167269447191, 0.951562105934744, 0.6040432612643509, 0.7696773554749266, 0.7081641757988499, 0.7405527019355218, 0.9831198320295774, 0.8604495558011944, 0.7404745762113755, 0.8290018571600816, 0.7268821700670188, 0.8400111620394037, 0.7243510081477836, 0.674666917062057, 0.5273684201777415, 0.5776839010732273, 0.9271514818787591, 0.5398818058784609, 0.6194467099677113, 0.7116005173603769, 0.9991903887220492, 0.8147528876197454, 0.7001769848416133, 0.8422484692326335, 0.9228467659223615, 0.8836960620072648, 0.5806169248891131, 0.7110423324450474, 0.5671177083985344, 0.727878018141914, 0.8914480664773619, 0.6167911966443238, 0.9127874379006454, 0.5630544817641895, 0.5321394514727613, 0.7396439599912522, 0.6446339525658222, 0.8665326162793483, 0.8643839312510044, 0.5696851450039568, 0.7202945390090463, 0.5155482609584428, 0.6748301887872173, 0.7830985923325158, 0.9327932372045782, 0.5039589350137526, 0.6726040838206643, 0.5114507756770525, 0.9181387517658355, 0.5321559484474032, 0.8162409554431903, 0.7662836341961987, 0.8893481801035326, 0.7263104571276324, 0.8807681184294094, 0.786168857605418, 0.9435921151909993, 0.6721069238375659, 0.7402776941141498, 0.6653357388481412, 0.8207862658810701, 0.7644222852016392, 0.6271274227675144, 0.9455124811528045, 0.7727093359955668, 0.7755545603384424, 0.6351249036490738, 0.6158882041583026, 0.5284674923598409, 0.9858109521630491, 0.7838711628832729, 0.8674154659667108, 0.8688361417769831, 0.7354566714218418, 0.9250078357044795, 0.8340156326561563, 0.5426661734518841, 0.8489981494765831, 0.8186246506489838, 0.9125264421985273, 0.8453335280889195, 0.9493254150239725, 0.9506902263891219, 0.7670876709387999, 0.730407132604942, 0.8844805667479888, 0.9541807739459605, 0.8200663988918473, 0.8836803502840291, 0.6477621897808299, 0.9698293101615928, 0.6970686071032199, 0.6836130535189044, 0.5587364513929024, 0.5058953367968233, 0.8463494220311992, 0.5676171839097062, 0.7426861456623269, 0.7269063142547658, 0.592361810990852, 0.6546121799942486, 0.5212552096604248, 0.822657397149354, 0.8186668710315186, 0.8265556968821101, 0.6052724657887545, 0.5048670823976928, 0.8366537020858298, 0.6902580289515878, 0.6459061886330209, 0.8116790315586114, 0.5984419602333355, 0.8125543120706907, 0.8981562169846034, 0.6920334700988581, 0.8731676413001581, 0.7296288334651466, 0.7474225403710775, 0.8658182108489143, 0.6363010604786765, 0.9296159801463377, 0.776244309296453, 0.8861029741741773, 0.5584576558162251, 0.7112105173087626, 0.5023048091828969, 0.603199183427834, 0.8116785553497544, 0.9235123069066798, 0.5002165521212099, 0.8470544017871128, 0.545757277563675, 0.7684201493495113, 0.5235131460881723, 0.7031002792551487, 0.5427297003279323, 0.6484313228271297, 0.9367805842422139, 0.8246277777680251, 0.7355793336517344, 0.5562345087453929, 0.8641445858317434, 0.7694217811396502, 0.7499457324854767, 0.9959521567230796, 0.9114467713573493, 0.9343206157738644, 0.7711882753450874, 0.6629834058650261, 0.5239677942419438, 0.5626123001055539, 0.5490217791159427, 0.5087047411185999, 0.5634409682689796, 0.6008919823069818, 0.6306925216317467, 0.9981868204079383, 0.5877533954674311, 0.5551935143819265, 0.6400685431151479, 0.8426155711404304, 0.6511992210124624, 0.531009566103037, 0.8265263720791223, 0.7796041092352683, 0.7310742589185497, 0.9210203390911876, 0.5297206066790543, 0.5583039715177951, 0.5309310249648862, 0.764484451237845, 0.9564844671104927, 0.8921051453852875, 0.7179884007331963, 0.6605785484424316, 0.9204020238383401, 0.823636834665612, 0.9434286075471616, 0.7356266042034474, 0.8824246507326796, 0.713776520605774, 0.6397810540306872, 0.9120727893227702, 0.5309199257423955, 0.7042590119246288, 0.7185915438485845, 0.7280561803791392, 0.785887973642114, 0.8978141328089366, 0.5619248777610866, 0.9076571265174007, 0.5092736014180451, 0.6543573962397131, 0.8488680061120388, 0.780006587584401, 0.5270275821378965, 0.5262363935268559, 0.7062009847608354, 0.637102436047473, 0.7718440804289202, 0.7291657601649492, 0.9121544562967727, 0.6758421575501203, 0.9713722033987666, 0.9174848955972378, 0.8692764823767132, 0.685132152223991, 0.7069975047957622, 0.7473237037762936, 0.8965086295568194, 0.977650927214354, 0.6193371488155119, 0.5258354717701192, 0.8721496593776146, 0.6189379616488555, 0.7144649101559211, 0.9042105546292121, 0.6411078391899923, 0.515593136786936, 0.918406220401448, 0.686173312598797, 0.9750488464633291, 0.5412477911179696, 0.7043844523932244, 0.9579197751808968, 0.6671337677838602, 0.6690820274775544, 0.6721806691219613, 0.9747934341589481, 0.7613873576506273, 0.9662790428947363, 0.7850921740070298, 0.8232182439823429, 0.951829107017769, 0.5138079944610605, 0.6692081431290201, 0.8250031717648754, 0.5141347963740474, 0.5083427305553911, 0.5448558789028874, 0.7398922545810716, 0.6916743210082879, 0.8846613868795434, 0.5677127452975965, 0.5051317585841877, 0.6642815852254187, 0.6983868575818674, 0.9105844356297033, 0.707021274466119, 0.8064342179549404, 0.5309647885518964, 0.7866400931841762, 0.7245891883134639, 0.9505179568573249, 0.5629343809126935, 0.6471077575719015, 0.9313976787118161, 0.644501412533785, 0.9854001714114657, 0.8664750318162362, 0.9185103766469811, 0.7209558158696543, 0.543121331318809, 0.8942554129059062, 0.5779107852087313, 0.8051006350120722, 0.7717028036727749, 0.834298339110104, 0.8070285945836502, 0.8689019833381815, 0.6781151975231137, 0.8060417098918757, 0.8695271655784573, 0.962719779696348, 0.7781275616362786, 0.7094012730920389, 0.9281372618696491, 0.808728713135998, 0.6695428917523818, 0.9372281172929032, 0.8259394376032987, 0.6259972197002488, 0.8258915991240723, 0.6263461685731155, 0.5342097583067902, 0.7923305813816093, 0.7637000758218915, 0.6494324892192158, 0.8184012917953769, 0.86926579754843, 0.5523324686307655, 0.735619984383646, 0.645035663727604, 0.9887574859556982, 0.5579932220996028, 0.9986108611477695, 0.6626534077632318, 0.926208520493804, 0.6784848650049617, 0.8178686750718624, 0.956047381428324, 0.6861231067032398, 0.8822232225669648, 0.7500628229959201, 0.7635381471429424, 0.8498533815033646, 0.6316008190964739, 0.952656813815955, 0.9238433752876782, 0.5281374921771211, 0.6099247238324044, 0.8831911035448566, 0.6282611405241336, 0.7371195282472097, 0.8323888813922373, 0.661265480559343, 0.6603986016715373, 0.975420152414312, 0.824585364300529, 0.9724822048426212, 0.5132722222722432, 0.8001872904805885, 0.6279860411311726, 0.9616109018370627, 0.7530471628125248, 0.8525780878940638, 0.7546138984014494, 0.9534885049347459, 0.8718874961005038, 0.690890881873713, 0.935352932442304, 0.5507582497865618, 0.8246148997414503, 0.8682003443493073, 0.9289257656163088, 0.6467745449982281, 0.8787013150852137, 0.5946443871889959, 0.9023948545707113, 0.6515762545848449, 0.5866881051575117, 0.8044343054808836, 0.9398111612394889, 0.8621379997978955, 0.6134018461799253, 0.6905739646351703, 0.5491285634435807, 0.8340282145025864, 0.6379176998452243, 0.8077007891235459, 0.9239501495601992, 0.622913840393809, 0.9308326813232127, 0.9080952694985185, 0.7823847426446415, 0.7126266362792011, 0.8951757332633813, 0.9330209642148461, 0.8105182079102384, 0.6458246124491469, 0.8498843653507917, 0.7137253388494257, 0.9026700978471847, 0.7848839093294422, 0.6359310797322881, 0.7411336706759242, 0.5439122679306694, 0.859842082746241, 0.7390374447972763, 0.599925216349594, 0.8428789387318829, 0.9562516264118018, 0.7133027874225739, 0.6162125789546928, 0.6594423350722588, 0.8648677007942274, 0.7204264815076062, 0.8497094893170116, 0.7107279100161454, 0.6014161528377286, 0.9926971653298364, 0.5161190249030319, 0.6037780242515192, 0.5956889991915146, 0.6126570317277158, 0.5576048323669197, 0.5139523286804082, 0.6477505548217909, 0.9044605275491019, 0.5035048922273457, 0.521443057373846, 0.6631101793838245, 0.9170626222004616, 0.8769872096479172, 0.7606616155605965, 0.5288924315578927, 0.9635197741611838, 0.6872982604203566, 0.8184958865291864, 0.5483707815159965, 0.5820958562286016, 0.6973966265959773, 0.831741972156314, 0.7953687070063196, 0.8758295616452784, 0.719608738241817, 0.5390668102698974, 0.6882143982401849, 0.6975762570445236, 0.5338150031024935, 0.7853608472128872, 0.5801313494469591, 0.7459349307792235, 0.5186287276533443, 0.7195219566820134, 0.8627333746876672, 0.9832891736845644, 0.9088906156192551, 0.868792300890675, 0.604573911324918, 0.6314131104112364, 0.8249237811263161, 0.7322144873304749, 0.9279000188327395, 0.9299801201961228, 0.7611936106282134, 0.9024363077098239, 0.8799406384004573, 0.6114927681487199, 0.7515405323938269, 0.6672112445072889, 0.8739242544558256, 0.9365588752786322, 0.6291752544672066, 0.5602369865233567, 0.7363394513202572, 0.89686037246018, 0.9283870266398906, 0.7253428623862012, 0.5685412989180011, 0.62524199024412, 0.5775093686023742, 0.8805962420877957, 0.7633075236653208, 0.9583726746290752, 0.5000577791942538, 0.9839859097801374, 0.8478566354675321, 0.625375803836255, 0.9158830819446666, 0.7033440463039768, 0.9391831477169177, 0.9377955841732608, 0.8220867520653625, 0.5189188140130653, 0.6518018662160456, 0.8647300687947892, 0.5292938411826922, 0.7157953056904329, 0.7691433232830407, 0.5029923875585651, 0.8980111306366005, 0.899581282318821, 0.5005886140274424, 0.8693118500146004, 0.749817768183314, 0.8081116231834096, 0.5243210523631947, 0.5314505979755901, 0.7432309827234714, 0.7427654794888032, 0.6801714491748037, 0.5548353197194504, 0.642399211274417, 0.8417019435615438, 0.9430241927337402, 0.5199041304288148, 0.9355008376894365, 0.8133400258918573, 0.51055699657285, 0.5151589410529308, 0.8524269567265967, 0.6653524219906262, 0.7625793339462129, 0.5427050179441465, 0.6197435642741382, 0.6082401113100515, 0.9171184587259967, 0.6954018212961611, 0.9060071160293799, 0.9482790534665326, 0.5868851051663212, 0.9421095924804728, 0.9937672404852453, 0.7970030299042608, 0.6334347936262634, 0.5426167790682626, 0.9964357546790694, 0.6178754806772097, 0.610985258814065, 0.9759772737297119, 0.8537968242607776, 0.7823493446011867, 0.9848366237291069, 0.8096679430329425, 0.5715835962118969, 0.6657919693325226, 0.5902150990252015, 0.8919097999226239, 0.5977566935637117, 0.6642895417090637, 0.6850491801612228, 0.6555599837939109, 0.9107870934375218, 0.694281627736526, 0.8425122736356317, 0.849390334154022, 0.73203003395747, 0.8152047956905886, 0.7639237237582128, 0.5014649858355451, 0.6410760378300207, 0.9518097426253751, 0.5666834572861182, 0.6137960603799518, 0.6937898798384126, 0.6817737038609202, 0.7899883007043327, 0.8352334355316635, 0.7933968411437118, 0.6244430059296312, 0.6510899653910809, 0.6568204177364352, 0.9262659484601363, 0.9898352030041284, 0.5686171220082015, 0.8374690318562625, 0.990252990463848, 0.9138666102300594, 0.6471293202625439, 0.7807470708986379, 0.6592224219965032, 0.5797638982470112, 0.8229299971379401, 0.5679533052603534, 0.7087496748260693, 0.7236117501202894, 0.6522488624885658, 0.604469804784922, 0.6192754261848669, 0.7989454145783709, 0.9482969779416663, 0.8533425542420714, 0.54392583750337, 0.5361138210426564, 0.5350488079863229, 0.9998169124656113, 0.6120443022127269, 0.5050970277633939, 0.6819628952271102, 0.7986698648973365, 0.6304841128473444, 0.9193748318519392, 0.8317756506352325, 0.7595534185669646, 0.8876991244332386, 0.773109236369753, 0.8286024468886313, 0.8700845117920686, 0.8734780242400348, 0.5519832249819789, 0.597653514893006, 0.6204952080583143, 0.6780146615371225, 0.7077214489302213, 0.5380712735885391, 0.6018939148629825, 0.9716763638110371, 0.7353310168928955, 0.5675008423676775, 0.7239319763811465, 0.9051807356490416, 0.5071452808187754, 0.7734156016775335, 0.5180771388832801, 0.8384925311533069, 0.5879824252995702, 0.7715886335033841, 0.6896794477648678, 0.6816931814462492, 0.5565719107128644, 0.5395986817552783, 0.9621578629890699, 0.8292166223397806, 0.7466576628373809, 0.5541381209273302, 0.5144165751956713, 0.5033172374331185, 0.9316284948039614, 0.7831161062944936, 0.8780732618800372, 0.6886502688358875, 0.6312335341000522, 0.8852229552761948, 0.8098616510136243, 0.7063531117441764, 0.8270271216554453, 0.5366713478927885, 0.992061399568087, 0.7386382275562304, 0.6510270148588049, 0.7473420482905186, 0.5363275292932727, 0.9405362653811689, 0.796243088171958, 0.7027221266321015, 0.8044790615622768, 0.7679157855890035, 0.9693352428803728, 0.6731181175478624, 0.7491222638243311, 0.994136454715711, 0.554444544886805, 0.5681696778845032, 0.8029207853867069, 0.6989827989087196, 0.5973005182720861, 0.6100628004933664, 0.8300456585975724, 0.8153710557482154, 0.6752054651583943, 0.8588517159430198, 0.9982496202494672, 0.6435906936015021, 0.6600158796449536, 0.8001254256325656, 0.781659276306993, 0.8430711076255264, 0.5937130366156099, 0.8401813028278653, 0.6965307996964054, 0.5499183520663791, 0.5765514286394222, 0.7302831677491594, 0.7443177104209422, 0.9191491693599627, 0.954930283814498, 0.7844998284999989, 0.956059346676702, 0.9789185062498594, 0.5571305092177539, 0.67646718556722, 0.705515586450257, 0.8891360930566139, 0.7366368949823658, 0.9962335671104361, 0.5719886435822993, 0.5019193062441968, 0.5474176965972062, 0.8780871418505896, 0.89188865253533, 0.5500871221133379, 0.8988104219663047, 0.5413888091459624, 0.6725241742154879, 0.9569820754504905, 0.5042904725527979, 0.8769055650290091, 0.5677274616176587, 0.6059418026118518, 0.5552588615213832, 0.6214387242780913, 0.660958809800936, 0.8096487330496678, 0.5733694078680085, 0.617683090477003, 0.95068211257062, 0.7660298087150648, 0.9324717169620234, 0.8318199431082534, 0.957993176993367, 0.668750856303751, 0.6796099976139836, 0.7860712605246531, 0.505896510098759, 0.5802345532902302, 0.8632125542946796, 0.9318344075291092, 0.5891932399985773, 0.86839427577612, 0.5346169449528835, 0.806071619085903, 0.6975657149906165, 0.8723523428395017, 0.5838354687952514, 0.6867436051086168, 0.7891339108651831, 0.9902747299024339, 0.7318489708403285, 0.5487891064947032, 0.9470812393302092, 0.8435625809009686, 0.7306120359635915, 0.7705597758162295, 0.686148874820971, 0.5104757232534871, 0.7925852088875154, 0.7466318362875519, 0.5225716846605646, 0.984980696052126, 0.7738087862248992, 0.6666628598354025, 0.6093580584688351, 0.987350805931072, 0.9148102001426299, 0.5139661204111012, 0.5075471089033605, 0.7726952530562045, 0.8090889310246678, 0.8017470554523652, 0.816504092598958, 0.5908317105312143, 0.6776835014856731, 0.9302058858828263, 0.8893427292139602, 0.9263044154046836, 0.815121999018413, 0.5369789015191556, 0.9843375474510958, 0.862148044959157, 0.5030881093676809, 0.6837159724193983, 0.7654775442320314, 0.5284454707893518, 0.5266651789092671, 0.8635168909470163, 0.5929561279107423, 0.5837103872636057, 0.819323120342299, 0.7190569966413938, 0.726098028719343, 0.9417056047400691, 0.9291271744028274, 0.8673303241626622, 0.5915361144959737, 0.9122164499383449, 0.8196587120519341, 0.7137444019037469, 0.9337248945379601, 0.8368829360922516, 0.8632658692401454, 0.9709123494026608, 0.5021320741132014, 0.9196645352503487, 0.7201625352324423, 0.5748623750920008, 0.7899600486762379, 0.7971525599509026, 0.7014791724153668, 0.673775414478569, 0.602549513373169, 0.6675408345523928, 0.6625568140179224, 0.9491887850227618, 0.6965675561056668, 0.9812314600727468, 0.537104656872978, 0.6657424398355059, 0.7169744526345376, 0.5224099239401654, 0.7543055808136876, 0.8673524322027428, 0.76526604384146, 0.7272988715905602, 0.7089201888918701, 0.82992386622858, 0.6748766924713965, 0.9330125792168433, 0.7785391664726462, 0.5983611549904522, 0.6975228073616458, 0.5490957842062099, 0.5218408091314498, 0.5807397237706922, 0.8203892163410624, 0.5024401321850673, 0.563089018586474, 0.6519652005143522, 0.8287159596575362, 0.7503692359306482, 0.6869501742718438, 0.7465192829035193, 0.8434937100295481, 0.8930515741922826, 0.8970287093548893, 0.734784019738367, 0.7558116992377513, 0.6176130942663063, 0.7403401127852169, 0.9494180493970918, 0.7534259867613342, 0.9814281726621628, 0.8448689900676196, 0.6443571948909876, 0.5896600794296798, 0.5632366511781279, 0.9465820994970866, 0.5741966858203453, 0.6909024978107532, 0.8214671930777402, 0.6579627446075389, 0.5244741855649464, 0.8333153310132175, 0.6964310291045633, 0.9503277971257762, 0.538000676453273, 0.610504727360466, 0.9537587691496733, 0.5688811555162946, 0.5258252144361527, 0.7763217381714098, 0.8536631217974211, 0.9205017864298222, 0.9811849880952082, 0.8757059948784334, 0.9710411648445549, 0.7505486840180691, 0.866551665333946, 0.5592304511248376, 0.7538417913916001, 0.8493747810864221, 0.5379107936545338, 0.7206538787065615, 0.9839814730072989, 0.5634052587242933, 0.5746556533443803, 0.6737314137412048, 0.723042712710908, 0.9217411003561168, 0.7214047875638809, 0.9996470526894871, 0.9180706842303228, 0.6392340350075378, 0.6194844556102006, 0.7730744819700488, 0.8261786586382031, 0.7249601558752136, 0.813745716987981, 0.788671998784937, 0.5586038984839505, 0.9256849143474335, 0.8995341700726758, 0.6244977188719486, 0.5098254170964984, 0.9682559802517348, 0.6805415239235658, 0.6774802362193539, 0.5696350952365454, 0.5588914051987492, 0.6261772863029069, 0.6632182152864498, 0.6531986298174521, 0.8692215532748822, 0.9429605469259603, 0.6499022471998924, 0.9687848531914778, 0.7687451102641741, 0.7020552278413245, 0.6976937304716404, 0.9626459591193646, 0.899324652320924, 0.7626928562712203, 0.7933660619540637, 0.6758213677306288, 0.6752134375571114, 0.9140879164715667, 0.9278972069586214, 0.598467045785223, 0.6568056946059463, 0.963079437726591, 0.5671378062761058, 0.8802188998292182, 0.6840935618949646, 0.8483062963285806, 0.9157809442650571, 0.667866481675819, 0.9521206907813737, 0.8356806597864489, 0.9884007301616815, 0.8232524868302993, 0.7503414460190757, 0.9426846564451785, 0.6438433622890158, 0.5414764928833953, 0.5549423156539428, 0.9578258155211894, 0.7529381549920954, 0.5629109212570895, 0.850373999832803, 0.902018760013165, 0.9888818771045639, 0.812681372390517, 0.9530681801203658, 0.6410751949507808, 0.5459961620037641, 0.8342147792004835, 0.535550833716812, 0.8130202121114553, 0.7605513431306602, 0.7675214472652813, 0.6974340922448211, 0.9509251471987582, 0.54290667803535, 0.8334897781170811, 0.5648639434486378, 0.6465122400713653, 0.97229720331211, 0.9621935795533001, 0.6729288172477268, 0.7549985772724193, 0.7894774745325739, 0.8006193137834079, 0.7298653234454616, 0.9740857174628067, 0.9936768445834434, 0.762423693954322, 0.5609811338144908, 0.9131985340176627, 0.7677329471255903, 0.8321411498927178, 0.7985704694144342, 0.947917793855903, 0.5343495330869947, 0.9802202908957489, 0.7276627097658713, 0.598139321628373, 0.5223764670196325, 0.5009283613664826, 0.9498881806890891, 0.8647699218954146, 0.9170286321005576, 0.6362016657223848, 0.5065577896085021, 0.6477935663125494, 0.6849518141113276, 0.9828642169711065, 0.6388790475977224, 0.905902980703784, 0.5432540952222453, 0.9262263993262512, 0.683261006532454, 0.6365498569292907, 0.6134806938976519, 0.620459912056069, 0.5003133016602277, 0.6998105043765243, 0.7949659178406169, 0.5896420001820742, 0.9468397225996401, 0.9829441887635324, 0.8236943162524016, 0.5653241307059595, 0.8218321542625129, 0.869898317709512, 0.5226934342242441, 0.6089840093931727, 0.6273604937983819, 0.9643960492059269, 0.7247708778219095, 0.5447631563952018, 0.6900653014528751, 0.8326602340580782, 0.6549269218216451, 0.5720827173238281, 0.8395194875648527, 0.9557455659864991, 0.5519649079311079, 0.744542848323019, 0.8858214863190208, 0.9917704289366249, 0.8907004318041425, 0.9671464312866488, 0.9525320087811608, 0.7610832108510217, 0.6146437533796734, 0.6203016872527087, 0.8235617894473442, 0.7111218898822971, 0.7041746889016567, 0.6960297486834592, 0.6348800121593381, 0.8598993689040956, 0.5217408677197135, 0.9967151646695395, 0.8370606258021032, 0.9895552548522227, 0.8581179875333542, 0.5395002778947329, 0.7245274718651709, 0.6227606199381441, 0.5729219558702328, 0.8773925898053457, 0.8356227556886306, 0.5389234410175339, 0.59145368744027, 0.6836269806672401, 0.758121600906925, 0.9542220148056574, 0.6647207162147066, 0.8199763000146758, 0.66675198349945, 0.5472465202677816, 0.7108002893769438, 0.8199150400238899, 0.7780859839293288, 0.7933575789245431, 0.8575682599753696, 0.8151971750340504, 0.5568533192009404, 0.8006372701455708, 0.7619772864758318, 0.8548446999162798, 0.8879921752395592, 0.9247327464086033, 0.9711740844339702, 0.8100545282171335, 0.742701289035117, 0.8262799652480649, 0.6481324931563608, 0.7009197558269551, 0.801667887583188, 0.8028651947212226, 0.5612644508835924, 0.6663567440163423, 0.9451641791954737, 0.8729980770848149, 0.6292940973862557, 0.9536923655244157, 0.8976199787670138, 0.7233930562096185, 0.8143877125189747, 0.7654400195432582, 0.8405555331146073, 0.7564605127760247, 0.6884762337508459, 0.6964572485367925, 0.886520312043674, 0.6995006370138571, 0.9943869722770642, 0.8203934391508851, 0.8534876338657695, 0.6481915580028381, 0.8100502806427312, 0.5563331376561425, 0.9154039101499499, 0.6774773008390982, 0.9857052934820876, 0.9783010244966088, 0.5978225987032018, 0.5997644529082331, 0.8867800908066936, 0.6347881238158926, 0.6053175673209674, 0.5580767436065999, 0.6358180025442135, 0.5865877425821086, 0.9007401966107615, 0.6891833527588704, 0.9608456104326573, 0.510642243214017, 0.8820985713944914, 0.8160111274474944, 0.819678217972656, 0.8872062905215561, 0.8265489570464071, 0.6779965035387925, 0.8440063429057509, 0.9975546130356586, 0.5668785719635784, 0.9265490614084575, 0.8419367103428017, 0.8637886910397377, 0.9184172866190286, 0.9815432505049343, 0.8438321024646801, 0.669853124973354, 0.7417681151822157, 0.5632691780994121, 0.6896765070698891, 0.8970317351185302, 0.7420858402368051, 0.638202316698968, 0.7705889842956324, 0.5680022543162297, 0.8399161551068282, 0.9787462581763378, 0.6875450108822153, 0.567506891807962, 0.8620372666604812, 0.7879695729288905, 0.769561917742366, 0.843314642119354, 0.8176300892246411, 0.7859592107457547, 0.5035631970757929, 0.9121607126562545, 0.5340475013388022, 0.8841610700099609, 0.7602502713182524, 0.714272229108668, 0.8699838986225781, 0.8815900952845603, 0.6371582397348443, 0.5534103758506121, 0.7513429111877867, 0.9352700262469047, 0.8620842711357508, 0.9815154578166976, 0.5274679054278502, 0.5946610979795909, 0.6270125094061709, 0.8782806826743412, 0.5393092425386068, 0.5173393715584027, 0.7450310857196292, 0.6826043078190993, 0.6748176828943, 0.9496885165717635, 0.664114895250508, 0.6972584331164307, 0.9826245684606423, 0.922332134222148, 0.5417354575865632, 0.6961694456526608, 0.6508233599597419, 0.6205156597231859, 0.7110717776750299, 0.9138872374318562, 0.7556920395806475, 0.786216133499837, 0.565557123828536, 0.5794766908273565, 0.8106257989061291, 0.5600759126077421, 0.7625805743128926, 0.8481457811176831, 0.6269387858900277, 0.5098536632204409, 0.6169067208149214, 0.7509733571011472, 0.686091549941874, 0.6931455098917298, 0.5697581358510742, 0.6526051239044441, 0.9195095330961984, 0.5090582196796174, 0.9382041750310233, 0.7979358431256751, 0.5575939464509814, 0.6330711423570188, 0.6584689622250143, 0.9489443996198307, 0.6263675479891027, 0.9498006081962078, 0.8575531951160686, 0.8198910793297804, 0.5025558223278572, 0.5291506327553475, 0.9947534682103085, 0.8484907226629219, 0.6126531023353766, 0.823853533700668, 0.8402947211949117, 0.513079853590318, 0.5363590904541584, 0.5111670989567352, 0.8142517191122458, 0.5667543004652906, 0.7443649123655183, 0.6715671077336407, 0.6260777265541623, 0.6828216486124413, 0.8257723500524998, 0.9717320586494979, 0.6327164552670443, 0.5735497384388073, 0.8231924047653978, 0.602916626349524, 0.6335020649355599, 0.6549946465772312, 0.7480204089146247, 0.5262339610512139, 0.9988030139546034, 0.8960357560943086, 0.8450919208829379, 0.8959266129425225, 0.6594870745808087, 0.7685269530076804, 0.9178440912584496, 0.7313984671601129, 0.8005699358636621, 0.8967830934015127, 0.6386319366399367, 0.6259705264796198, 0.8813646510149092, 0.7081774090730928, 0.6906585574361498, 0.6312568563310496, 0.7469581112850916, 0.7438135329675212, 0.5541338810319202, 0.6581876825300045, 0.7234351910256018, 0.7630560055139658, 0.5723050206664599, 0.9832369364180524, 0.8376439223918346, 0.7979162183704729, 0.7862568869492068, 0.8495850368911004, 0.8877373546377314, 0.8065316024370293, 0.8977379879143036, 0.8261112868880016, 0.6815665038031119, 0.9351767052537221, 0.5495289547645605, 0.6635178132661115, 0.5511662134113707, 0.5525804175658349, 0.6413126584730846, 0.7262217158257849, 0.6132329769557705, 0.5920096550854164, 0.7254263774542871, 0.6305425805772074, 0.8765607355567007, 0.585819909395307, 0.9523909797736125, 0.7779090351269023, 0.7470514345074004, 0.949012727098375, 0.8096244338824778, 0.6544989488073659, 0.7158427912451508, 0.9623535652085724, 0.5464536346670268, 0.8820963588583696, 0.5176609351985862, 0.8313330972797395, 0.7216029798628936, 0.6292373361719178, 0.6027592061566918, 0.7776547296063132, 0.9846474222791428, 0.9682656681393065, 0.5944753749815213, 0.6171104735105781, 0.7888315280990634, 0.8792654649620906, 0.7872853128770132, 0.9736270985754585, 0.7326105205586322, 0.6105879550811275, 0.7144495055201419, 0.8410947632263073, 0.7495612217168315, 0.8378726962031999, 0.7249338957791372, 0.9627164136567637, 0.8242260521255416, 0.7500872960304581, 0.562009139947627, 0.9955704643632719, 0.6097437762552133, 0.8393141569084934, 0.9013018201597598, 0.7566662930162193, 0.6275657508028659, 0.7771385537876218, 0.8677993606642519, 0.9429020872980662, 0.8632319525780825, 0.6354084558332898, 0.8346963680385617, 0.5188463734404185, 0.605935228242096, 0.723986812344729, 0.6484396012972797, 0.8719890383773565, 0.8816048992549905, 0.6812392364615307, 0.6468122206578286, 0.9292161445968067, 0.7382602322437704, 0.9070388519104522, 0.6238244234574846, 0.5944605462795571, 0.7871026542042949, 0.6137815412243306, 0.9208939702504568, 0.5232314827588108, 0.9773648889229092, 0.8753705000229164, 0.6768888249819291, 0.5552616946016324, 0.8214878538148168, 0.8922189475279962, 0.6473715252461343, 0.5010575874477694, 0.7261401193004414, 0.5053661266414875, 0.7861582996992149, 0.9659023261298825, 0.7089483593037691, 0.6369879781065595, 0.7692089980893784, 0.6738672782756603, 0.9003253837730443, 0.87258299503376, 0.582494568750497, 0.6333291938053854, 0.9144309061256621, 0.9164708197209165, 0.7027022324300142, 0.5189030111963369, 0.9917322320690363, 0.5535493475292388, 0.861894059707146, 0.9366803682756882, 0.8176048776442745, 0.9594920861033736, 0.9506468215398346, 0.7302917544668059, 0.8607023921679804, 0.5139796448775245, 0.7343014850082304, 0.8261509126922342, 0.6212128612462586, 0.9430099679034633, 0.9008493156180504, 0.5724682910307877, 0.9289036272476647, 0.6304286231161866, 0.8419034191775419, 0.8624472298408585, 0.5397786086769298, 0.6006270029869929, 0.5275058894126794, 0.5530850429369006, 0.9634072494082186, 0.5784777539328666, 0.5579117802839166, 0.6753850168526687, 0.5653148529448984, 0.6159301930331291, 0.6472745470939852, 0.514891641526345, 0.5712311207722495, 0.5112378602286369, 0.8181679695649431, 0.679221268278043, 0.9720637831583179, 0.7048454834170113, 0.5327108046400962, 0.7325153477754263, 0.9361211553869453, 0.9227770631242613, 0.6185960784325476, 0.7348284484309849, 0.6587672574530243, 0.9156984352136961, 0.852035942712472, 0.8257848736675468, 0.7602513511188869, 0.9674326846956262, 0.7353887596043504, 0.5506697597158241, 0.5574847646239598, 0.8844954621783534, 0.9239288653462556, 0.8052570597567704, 0.5499954907744088, 0.877216836721963, 0.731433923003443, 0.841931996051793, 0.9900703721270798, 0.6574222696618801, 0.9237169242670993, 0.5293443133522993, 0.7691805931682868, 0.7491898836151948, 0.6735660908546869, 0.8208924875043004, 0.8700123160238031, 0.6764799995464666, 0.5225499094042994, 0.6837701709115769, 0.7414528241301479, 0.5361968913711108, 0.6504287866121936, 0.914005126127613, 0.8792235958927161, 0.6168117611931403, 0.9150733673999156, 0.8011623107441164, 0.561995314778267, 0.8606221456992051, 0.5716373951806171, 0.6719254887078532, 0.7229776387412603, 0.9153869555939124, 0.6968410300950708, 0.8109037853038864, 0.649567078255038, 0.6088979736018822, 0.820583796594342, 0.5110137902800198, 0.7534526290882841, 0.7653651320928447, 0.90597731921986, 0.5396744779723806, 0.7369258452971881, 0.6722457948182561, 0.990493705084418, 0.7205922573954, 0.748785935010519, 0.6855451189783339, 0.8882047645522989, 0.7869539477802572, 0.615865432154809, 0.8525067609184833, 0.9649262658904434, 0.51907464204399, 0.8748190915562188, 0.98882026685235, 0.7821573165308313, 0.7993892969483727, 0.6063388683222191, 0.7495519561803916, 0.9033682301360413, 0.7031775335645012, 0.6065204384944094, 0.700243868069037, 0.9013264018893167, 0.5038986014060347, 0.8137321326807733, 0.5487164077368853, 0.7943134175790953, 0.8856767998509485, 0.5447086694815917, 0.8382790206813407, 0.7413327297333465, 0.7592638810180795, 0.5118643689068822, 0.7304673760063718, 0.6685206474967422, 0.8473337552765117, 0.7485068735031175, 0.7973506000117021, 0.5966571498832132, 0.8581670649525934, 0.9499847076427659, 0.930030282199322, 0.5772196628031078, 0.8635896201020599, 0.6911919862908293, 0.6870011877958819, 0.7655678511422448, 0.9472689912428409, 0.5506145787746555, 0.5505525463821661, 0.936211427811467, 0.8686457728023731, 0.8772228748086839, 0.5449257831974454, 0.6717825495774536, 0.8976007161273589, 0.9325636425138342, 0.5173358155642809, 0.5109918616502291, 0.9285060735330433, 0.5763756045433561, 0.6078670025665242, 0.5919207943727225, 0.8146248676312148, 0.8200526030645239, 0.6988680267400142, 0.7916079946143312, 0.8519493175757175, 0.8619097562684734, 0.7204914408953362, 0.7274147356347185, 0.914248454088844, 0.7751339119457573, 0.8474671309277927, 0.6683409236504583, 0.5117580163915878, 0.9834642947491044, 0.751049771681691, 0.7960506950239372, 0.7101115212885796, 0.5375484292850223, 0.6849187320471852, 0.740233920462613, 0.8670219293877097, 0.6759315481123946, 0.7711232409040614, 0.7244341124055647, 0.9482176314100208, 0.9394795310819621, 0.8251169485808874, 0.9308379843132351, 0.7768794750272867, 0.9515412757111991, 0.6160083266830307, 0.7042729034693638, 0.957925378437712, 0.6084567422304702, 0.5632258292668968, 0.8210782430742456, 0.9452288202511758, 0.5066685927984618, 0.5377127409850126, 0.8560579750066777, 0.9545323581936089, 0.5412431581448278, 0.8234544492953193, 0.9455570335995138, 0.6048468191993973, 0.5346696802271569, 0.9566801192073742, 0.520368550867651, 0.9448701482281618, 0.7338294742856515, 0.7399688105693817, 0.5915877879841107, 0.5618064413478951, 0.8842686724925468, 0.613288954301067, 0.5957944077425856, 0.5250803093415194, 0.6360596526971407, 0.8230232957700672, 0.7685709398845139, 0.7003879180203121, 0.7925883235379777, 0.5765473534064796, 0.8535881464958508, 0.6406616970551404, 0.7960705969926134, 0.667545936963887, 0.5898337728010129, 0.9718350078103184, 0.7658225768229521, 0.6969002609951208, 0.5698220015758422, 0.5948350348571644, 0.8271988629100391, 0.8953020299765728, 0.6827695599653352, 0.9943391972185065, 0.6168079413233408, 0.5613324878571494, 0.9140251425097408, 0.6287909686768656, 0.7247465485884879, 0.5543840359254533, 0.749745999165528, 0.8936963764188102, 0.9919152524806039, 0.816289065522855, 0.5875574072590574, 0.5542742982773547, 0.7662787677490386, 0.5213380505859606, 0.5507017673792358, 0.6335106661685728, 0.8998373498791792, 0.6431423373503686, 0.657559630807917, 0.5651906791381527, 0.9093765377013225, 0.7864642699885906, 0.6884857414685577, 0.8416811100508435, 0.7794005947100422, 0.9032300929124337, 0.7212432745595792, 0.8299390470464829, 0.6323931314184619, 0.9831566294336953, 0.9006323070876951, 0.888244752958592, 0.7460944128380982, 0.8657144790905793, 0.8428007824871999, 0.731436740530289, 0.5693633281289037, 0.7508967673482881, 0.7498081313007685, 0.5135959012557095, 0.8972420905548613, 0.7201699010540452, 0.6861231925770315, 0.7423109886605521, 0.5678044591350295, 0.7370444140239594, 0.6193735393085958, 0.5006499174363966, 0.8259108597858293, 0.540103780458975, 0.6988686125756502, 0.8471080173452654, 0.7825488569006074, 0.7619649745503321, 0.5989183087498622, 0.8873331837309928, 0.814400355738212, 0.7531135524575839, 0.9331004326454664, 0.8420699402344713, 0.6848397756152054, 0.7083866213831937, 0.751064401521079, 0.5511020150726594, 0.5904789182873579, 0.7658322519375201, 0.9637328463907303, 0.7613424582130816, 0.7982584935135164, 0.5090907115094327, 0.5660528935093676, 0.824326114744694, 0.6411971660452318, 0.5791521605644928, 0.9296349116898779, 0.7222746128897204, 0.5788079780450297, 0.9571104063660261, 0.9717627337116496, 0.9903664346713927, 0.7532352284684806, 0.8622293516381233, 0.7250124808315008, 0.8188818114887281, 0.9573962933489404, 0.5879975461014971, 0.6757976515319732, 0.5745667811937021, 0.8633782497020501, 0.9504024074885765, 0.625282968234216, 0.7243157078936511, 0.9697600235407751, 0.7190582422140136, 0.6377792196641994, 0.5234528108116235, 0.9465572424827515, 0.550129845665297, 0.5151702598405513, 0.7778395857863358, 0.7985329790044373, 0.5248915752066232, 0.737616241754933, 0.5035213767560469, 0.8059295083073574, 0.5600327426337088, 0.7620534788245364, 0.6294609899268465, 0.8118217258867659, 0.8250749475881529, 0.9096735745716311, 0.6600745272098678, 0.5299262007856276, 0.8758570585534342, 0.6102001100013886, 0.9180795215807709, 0.8996173152157186, 0.6260290955934245, 0.9049766842053386, 0.5989906444681823, 0.6019065971559077, 0.9902282141527949, 0.9115089950709434, 0.6573494963080175, 0.6321505051434555, 0.8558865168341661, 0.582784871026137, 0.8380531992965288, 0.6704576638826097, 0.8997598350935687, 0.657908779973781, 0.675952108029501, 0.8318514895217466, 0.8286085891020736, 0.6672973362775286, 0.5500486109317955, 0.8783639703828469, 0.5818845861218802, 0.5356185119122717, 0.765989289113606, 0.627750003177433, 0.5551023902480505, 0.7650348740063769, 0.8058910534595795, 0.891784112602062, 0.906697884196118, 0.6681702710943966, 0.6585402133110658, 0.8511471290589099, 0.5510438452260253, 0.9239345549913942, 0.9005686592344647, 0.9713171113552186, 0.5582671921681899, 0.6973123012463238, 0.5191996826025855, 0.6080823014966176, 0.8175252410078931, 0.7059968902817324, 0.5968952313413398, 0.7665562934594614, 0.7747339795761414, 0.613550570741267, 0.898501671199977, 0.6077758458395346, 0.9941822516170065, 0.6559035592549407, 0.987615878695826, 0.5276190249165856, 0.8897570260704477, 0.8530280798881091, 0.9356378388010999, 0.9259132837020175, 0.8022082004762856, 0.8076298512566639, 0.8663344796382106, 0.6719871215590179, 0.9396497774534588, 0.6769964142781693, 0.6820050279841984, 0.7033436147326058, 0.6164252836658448, 0.9921244036918004, 0.963104757453525, 0.6317850870019371, 0.6395263971902858, 0.5803888997745585, 0.5206734135207015, 0.5884262306478718, 0.941740956573067, 0.9113101481781289, 0.8385563145593948, 0.7659296096193375, 0.7380366848687308, 0.8585284721588077, 0.6759857526933563, 0.5806504627490463, 0.9690394014306821, 0.5367254992130097, 0.525410665072933, 0.6466583200279806, 0.8717538860607642, 0.6088989480823765, 0.8224472662810164, 0.7124146320123471, 0.7050438290305696, 0.6201993370066445, 0.9881896188530422, 0.8712970769853917, 0.7649412560987339, 0.9947333293603701, 0.8300646592692115, 0.9301604472656366, 0.907860576722283, 0.7030511170536878, 0.705647018379691, 0.8722756997468808, 0.6790271263271409, 0.8657444322536907, 0.823356949892082, 0.8051842625578691, 0.8055056957937127, 0.799921195660403, 0.6535140808500328, 0.5716511122953435, 0.9704208445082035, 0.6162670202431607, 0.8576432525570918, 0.9336314765017143, 0.7300903624758298, 0.7942489241675008, 0.7813294235023105, 0.9203710285510914, 0.7226521351592334, 0.7762493100043286, 0.6757453566292384, 0.9031855187068207, 0.8539188508414891, 0.6740469900048862, 0.6190052129106343, 0.5712236825852508, 0.5859096428330488, 0.6686451031934264, 0.7085048900688125, 0.8648958248943523, 0.7987444153433043, 0.9098587804468852, 0.5720477116639935, 0.9608115661421195, 0.9182441438533953, 0.6166074188697499, 0.8838129415010282, 0.8698466772931654, 0.953034488275111, 0.6812225854250429, 0.5545305532391946, 0.5558268852938302, 0.7874219998161728, 0.9487465843293579, 0.8606361765031318, 0.8836918815961555, 0.8756167809846236, 0.6952053976413081, 0.985315958618731, 0.645056983324513, 0.520588783640038, 0.9793086607880441, 0.6060657649914263, 0.7584605490749321, 0.9448674850411432, 0.9292241704947568, 0.9866268365086971, 0.5717773447633844, 0.5875167880568788, 0.6288177437778986, 0.9850147064600626, 0.6846646625403404, 0.5910140389198368, 0.7867727493582136, 0.6338948645563487, 0.7567761848827812, 0.5871550928949574, 0.6115941411445491, 0.789526386309104, 0.5632814431450048, 0.601144457537686, 0.642373098644154, 0.8728971149645948, 0.502460610624194, 0.6737244323093441, 0.7449332753584716, 0.7219871547306973, 0.8994475856506726, 0.9550442277846831, 0.5751775245747792, 0.5613306381275425, 0.6931615614247966, 0.8120917358508175, 0.7706272421285101, 0.6023295569215569, 0.8694478299825259, 0.9655871496882871, 0.7612053388089376, 0.7620097284927044, 0.5773911862947334, 0.6639569597980737, 0.6295607522539917, 0.813522016869495, 0.6640709933649265, 0.5252437355528481, 0.9262936067407029, 0.7670165052232678, 0.5023597839319631, 0.9541007537958119, 0.5711231200102524, 0.881883755652858, 0.6134217474037961, 0.6775862795531706, 0.534734195139827, 0.714557410370556, 0.7380215532686084, 0.7428949813900866, 0.72465025549974, 0.532164715448096, 0.5935038094579259, 0.9363810444628665, 0.7954662714679772, 0.5355866116646468, 0.8820043338845257, 0.8587325909510577, 0.580902721335165, 0.7755556224099927, 0.5277981140498147, 0.6554040809889321, 0.5323573212141062, 0.5189437600436185, 0.683079866195905, 0.5846062521143292, 0.7653048103393465, 0.9691090476955062, 0.9496939798595256, 0.8755425600331175, 0.5547957058166093, 0.8526766938607282, 0.8375328712197896, 0.8401365234028744, 0.5302386441630519, 0.7353653670453577, 0.6866361408400885, 0.5567200554428446, 0.8162585961735183, 0.5602748207627459, 0.5200404021176728, 0.9188601882390706, 0.7943084912819973, 0.5740976937983508, 0.5715038067718914, 0.9460635395064275, 0.8088422676953992, 0.7485986604423719, 0.6758363260197797, 0.9742038778979673, 0.855058482660801, 0.8803786393360761, 0.8301285583223847, 0.6268584947511391, 0.9459684613428287, 0.7702963900626292, 0.8140790693065083, 0.5830563180802633, 0.8429290179455283, 0.6881458950780083, 0.6233807512949392, 0.9411036776342137, 0.9772449657385469, 0.9331135193973528, 0.6051894034156275, 0.8857005932606112, 0.6205685333134852, 0.9349095594396509, 0.6870994424215626, 0.7702963200211639, 0.6330905581106776, 0.7335703507703141, 0.9806125047611549, 0.809935771392029, 0.9680318445356297, 0.9907955010576935, 0.7018576774311336, 0.8737619819731841, 0.7430281544282362, 0.5346162891122459, 0.5313448151861109, 0.9498325975184183, 0.9197729717831442, 0.9419502807872836, 0.5224001067261326, 0.6265886757355831, 0.9427293942169503, 0.9026766144827201, 0.5864619673227689, 0.8926426749964536, 0.7198942903832104, 0.6411891935610923, 0.8344929170139922, 0.588405286635808, 0.8707474022862429, 0.7285309486122931, 0.9617662585193423, 0.889979542242541, 0.6491607370917211, 0.8132998863080803, 0.5785662326739717, 0.5988388979940804, 0.9193026527612438, 0.9804952641261735, 0.829352118388035, 0.5355540262635212, 0.941242155705027, 0.878959651644897, 0.8041875528696139, 0.5962700465902669, 0.7222650193598619, 0.7973453839984679, 0.7497821231081976, 0.7467536348852084, 0.523875900967779, 0.6050936127763273, 0.8619714795941609, 0.7928492616410567, 0.5184431153082757, 0.8461802185585174, 0.5650504109985135, 0.8199115062242581, 0.6649595562431209, 0.8878006054055887, 0.8327746815521813, 0.819120642793767, 0.8147525176427104, 0.6260215616247357, 0.670799528440656, 0.6477954081586846, 0.6706465863293012, 0.8976475569839268, 0.7228434424748698, 0.5040094477593858, 0.7635646127721865, 0.6545380677150301, 0.6177041268358816, 0.7008703449576072, 0.9807913727760703, 0.615117474430808, 0.872712830734838, 0.6164566770718011, 0.6518306439591928, 0.5536003978548509, 0.9057425444466782, 0.651327498564467, 0.8750545348961674, 0.5485690194510258, 0.7046673340076635, 0.71843081886911, 0.5999630510665281, 0.6756407397864563, 0.6887861333168779, 0.694913787159859, 0.6888864786445565, 0.5087171350062574, 0.6849145265656034, 0.7728288340701824, 0.7870175413027949, 0.8417980786625336, 0.7055402244015437, 0.7863869559775242, 0.6490659033993795, 0.7388765089193521, 0.8682408552364653, 0.8263757844753712, 0.758871706900969, 0.7602705935933283, 0.6472411803433875, 0.7259650168063405, 0.5389532105367616, 0.8409169409196784, 0.8464650843324759, 0.9350882087776262, 0.8605232408211891, 0.7939712398364309, 0.8418020487372474, 0.8876181435360168, 0.9707864811510643, 0.7515688042119097, 0.5996741509371599, 0.8413762402160891, 0.6953202514334009, 0.5790983737766018, 0.5237383223225864, 0.9976036599191442, 0.6974060324537301, 0.750890705558378, 0.7121031540502588, 0.9332030828502587, 0.9388803390145135, 0.8252357354665766, 0.5956661024468823, 0.6352404631787756, 0.5657937301729925, 0.9322737775009885, 0.5103707410643037, 0.5222172405715066, 0.7543507866898926, 0.5532166253651667, 0.6341690297962665, 0.9000279863314429, 0.8597248566966185, 0.8742078543006462, 0.7572359263914215, 0.7833427208335813, 0.8141618537390469, 0.5645738606790078, 0.7252005023610126, 0.8824147351787281, 0.8986464328417412, 0.9400837422386391, 0.9665573717123819, 0.9003557953852034, 0.8190898206767318, 0.5015205447179772, 0.5348005376116955, 0.7143814770594434, 0.9245885507420415, 0.7669613706189886, 0.7919605643836028, 0.6036380126007359, 0.8336020336584449, 0.9795507623982922, 0.8352137264496835, 0.5512713680653604, 0.5521341336007183, 0.5242499542182981, 0.9316364341604466, 0.6412947407753605, 0.5432375238664342, 0.5489393820851522, 0.9753924634925674, 0.5724560469532951, 0.5736908081528617, 0.7376027923984716, 0.7882604474636408, 0.9197181006419016, 0.6483751687889916, 0.5439128712687834, 0.9662975739208575, 0.8689190978497114, 0.7059015331205292, 0.9033684570927076, 0.6119243493915545, 0.599088565082854, 0.8996019604917032, 0.5818037601224803, 0.6542580759644486, 0.9452984953677468, 0.5949326166097579, 0.6728003070848034, 0.6015790506179525, 0.9538325669461212, 0.5334336782584617, 0.5011813477014594, 0.6909660367168466, 0.5604647681884702, 0.549473336737705, 0.5847587247658192, 0.5174832502156003, 0.5998657790716546, 0.9696818039299651, 0.9562950045906856, 0.9786327390512979, 0.6103669384467313, 0.6903592935279297, 0.8828671923233538, 0.9869734336452283, 0.618901331060258, 0.5396665887779374, 0.6083749195509901, 0.5487636325907506, 0.5354532821554758, 0.825246479260421, 0.6659189667420435, 0.9641847891092528, 0.8925926863996794, 0.5653289865145408, 0.9290371903423068, 0.9783708332670252, 0.8280476938241061, 0.7329226455775693, 0.695200490429299, 0.8378249586484021, 0.7175864941424048, 0.7299079414878753, 0.5178369997856186, 0.5307383235381322, 0.8664212242643519, 0.9757082406368283, 0.8476019742736709, 0.8750368552155969, 0.6840740315040172, 0.9271388469543946, 0.5483408704977355, 0.5306987073746443, 0.9866461671216615, 0.9401627307352052, 0.6851293660516289, 0.989374291904175, 0.9173407903444842, 0.9131396687204718, 0.5928497694983489, 0.8921253247785039, 0.6404921406509265, 0.6590034407245933, 0.6368873996556527, 0.6154957412619169, 0.5626138069783115, 0.5559227522488596, 0.780325893402523, 0.7185348870457486, 0.748429057843059, 0.7318489123429515, 0.512974173239293, 0.5375232751260086, 0.5444508050851451, 0.5135545088738855, 0.6663138433250435, 0.7137778541813065, 0.5176790833839024, 0.6993095963511424, 0.5559854745501589, 0.927393025795457, 0.6574929060417009, 0.66050952250461, 0.9018020336401099, 0.5628730954958898, 0.5625100861292662, 0.6923612441188374, 0.96993766665067, 0.6491808132740051, 0.658345050718145, 0.5697324787857312, 0.6554197659149847, 0.681595387799711, 0.5923065737465381, 0.8358004860922268, 0.5620553345269275, 0.7262017805883108, 0.5105689284392927, 0.9460960841543795, 0.6256476585762547, 0.550005091863761, 0.8823108462105582, 0.6660697910219056, 0.5971310825452371, 0.7163047628547002, 0.6299944572446625, 0.9202825484050157, 0.5283342821472989, 0.8470460048110457, 0.9827102484558055, 0.8417970932978674, 0.6169094057126969, 0.8701026674137329, 0.5929527960066063, 0.6324026699514409, 0.593413894023409, 0.6255380537540426, 0.5027640299688362, 0.9723198524079664, 0.7764055455878347, 0.9306896495620276, 0.7493069476193279, 0.562484949706866, 0.7996250069772091, 0.6189210420271674, 0.6418247095045443, 0.9600603911350425, 0.981982703109771, 0.7705830654066324, 0.5332880817866366, 0.5694493862503373, 0.8605846730300757, 0.6594330046218453, 0.532151151953562, 0.8202478687477963, 0.5277557846501773, 0.5290185243333995, 0.9542667014222752, 0.8245237592114034, 0.6129066039564643, 0.6937407554159649, 0.5043392251368963, 0.5602130503767873, 0.8357452980500577, 0.8716287530008084, 0.9498828962664604, 0.8659184760275654, 0.9823140291765984, 0.7008021742620822, 0.8834722874562448, 0.9824001830612819, 0.6791840515819725, 0.6483856950777767, 0.649968557250207, 0.6001849757083981, 0.7223302289696527, 0.8024251841426631, 0.8634342311600989, 0.5169009331377228, 0.9662773993893077, 0.7765493528649623, 0.7254095259321445, 0.727129301782401, 0.6727081800273026, 0.7666467340918106, 0.8540759581629306, 0.687559070702519, 0.8601807748699746, 0.619445354491728, 0.7593909383075799, 0.9703445872432859, 0.8998495397565269, 0.8396752873302114, 0.7161121031292113, 0.749498855719513, 0.704654070555183, 0.9153502275815859, 0.6821065802138799, 0.8411995685342668, 0.9955173637121844, 0.6451170613880599, 0.5740634450108866, 0.66442700382197, 0.9454696988476781, 0.8354436383484761, 0.9678099486521383, 0.9002160200993903, 0.5577686342724, 0.7684614918484495, 0.6181945317796389, 0.8336289296354471, 0.9096686003034127, 0.8084634352261246, 0.9954437016808999, 0.9109337807708693, 0.542692059468711, 0.7583156349076676, 0.8674652471456393, 0.8514226155580098, 0.506900177038757, 0.5442004810826535, 0.8514775330833759, 0.626462119388369, 0.511862131336785, 0.8227734604824068, 0.620895187804642, 0.8210969266747374, 0.5721707063734223, 0.6787945436338478, 0.7236644768741474, 0.5397434424799972, 0.7619962291166397, 0.8474891231540558, 0.7499879450220742, 0.8711158050387391, 0.6261916122417712, 0.8721147723322011, 0.5879894138688491, 0.5089613325914653, 0.9245416091368196, 0.5180033582070525, 0.6373984585536747, 0.9341692760181095, 0.6868403459597207, 0.5824600376158422, 0.9087907847415326, 0.9678963281142069, 0.8662511466593132, 0.8281853875270615, 0.8905730723794523, 0.803515186377235, 0.6051932113720374, 0.8644762975673759, 0.6150414645233476, 0.9258064237093282, 0.9303138869690724, 0.8577384416141227, 0.505896797661946, 0.7596439741954497, 0.6929367954775999, 0.7172749409695984, 0.9414812728553392, 0.5902193625707517, 0.9700380497689913, 0.5217811846889718, 0.6184980009234367, 0.5277381630570539, 0.7934396376606434, 0.5977099346707444, 0.9599214508677109, 0.5770555969104993, 0.5449676219878758, 0.7710347723278393, 0.9745099887470595, 0.5443195456485818, 0.8909160672013308, 0.9280242818814788, 0.9566738198102659, 0.7042339636776744, 0.9162469396363988, 0.573071726861343, 0.766818149672752, 0.541685646829758, 0.6081215708554822, 0.7026737492599313, 0.6934210071349887, 0.8008161330706527, 0.5378678596843129, 0.7364504232549932, 0.7679519475924441, 0.6663558160903691, 0.6717726743281361, 0.8136787266738638, 0.9133869028869999, 0.54201302534837, 0.725256636850235, 0.7431041466744208, 0.7829171960686965, 0.9542961748100589, 0.7278832298114261, 0.5213547593524555, 0.5788151959928478, 0.530917350620761, 0.5877692598878214, 0.8068518217696246, 0.691330062521076, 0.5859168351043973, 0.8156253805076512, 0.6389657508795474, 0.7682465518475344, 0.6729189934642386, 0.9755968950502857, 0.9235884855964993, 0.7682823795244674, 0.556547830351385, 0.9181431679404277, 0.7466280819559477, 0.6619975322643356, 0.8292933002106285, 0.555678388120137, 0.7043782549347142, 0.6325618997993612, 0.8848029902019758, 0.5627466983669587, 0.6752874010855486, 0.6141496050872751, 0.6849985589774341, 0.9531847355751917, 0.512691382092538, 0.6519844194003923, 0.6492910296331261, 0.9615797792049522, 0.714045156061828, 0.6167180847099561, 0.7699240701651358, 0.817659615063669, 0.903922893021033, 0.9461095826527588, 0.7279027324881104, 0.5366210621055633, 0.7648939442789948, 0.6174449703406262, 0.8398918706694232, 0.5763262027301563, 0.7529782111862842, 0.7985926257767835, 0.8041388965837635, 0.8802351364708525, 0.7107706426547469, 0.6555384837883365, 0.7152228413136109, 0.9904149509513913, 0.6141781788307905, 0.5639344563331482, 0.5868347012515496, 0.9816677037151273, 0.9208199999173612, 0.7453818028143753, 0.9709222741340808, 0.5564895851010512, 0.8926440452194039, 0.6710175682265584, 0.8000770290474418, 0.5149079966050134, 0.8027983702502337, 0.6150870741556888, 0.9747516446356383, 0.959988098007055, 0.8211530903052813, 0.6977037599904928, 0.8945371466640016, 0.7818075049871255, 0.9516455502899213, 0.5527795807967668, 0.9675748431286151, 0.981099188928729, 0.9089721794407005, 0.8673040452370431, 0.679987961284993, 0.7103761028944429, 0.9109712947185835, 0.5566394610762241, 0.6360999921602584, 0.9423503200460436, 0.952649627467711, 0.6825217973256271, 0.6728257061309948, 0.5058796924725897, 0.6699698915046006, 0.6861886614706515, 0.834822071394409, 0.6298885608511081, 0.5448163434900164, 0.5993762359350454, 0.5921937982530016, 0.7298164889729954, 0.9648801239824929, 0.8689441609705755, 0.814080300564279, 0.5752254158935599, 0.6759611216135382, 0.8020691537230942, 0.5949350320325141, 0.6889367796147561, 0.6212501698876923, 0.8763808159972218, 0.6775655336944391, 0.5675327750721475, 0.70584945190642, 0.5077288461352205, 0.9046226382947079, 0.6796226440544724, 0.9187291222452718, 0.5069421954499779, 0.5038947562920253, 0.6509185083764708, 0.5845088264350369, 0.5642642822361438, 0.7879178009916094, 0.6295349394913368, 0.5858034416035156, 0.7077125166366502, 0.6809177589101957, 0.8904987779449438, 0.7764474434444204, 0.8991373931060274, 0.5751199370936136, 0.6633893965961093, 0.6604205725085899, 0.95960530662307, 0.7824783557599465, 0.963669243119569, 0.8162240189000338, 0.9583482476933879, 0.5782179261589959, 0.8142450962465292, 0.9293125871760721, 0.7139715549471628, 0.6215605361766585, 0.5898700950976031, 0.6701031399498145, 0.7899024897709117, 0.7315438332047909, 0.7343198264777968, 0.5231632374167393, 0.7303232052293694, 0.5104761910616764, 0.8990616528671433, 0.8577605680658276, 0.569186862395054, 0.6320641976403885, 0.5031605295812585, 0.9716738806165182, 0.5438828039267748, 0.7602515876419256, 0.7930214636164927, 0.7546032182022457, 0.5212174262693634, 0.5262113294922479, 0.5683028426093731, 0.6066564782053392, 0.8777248965684252, 0.7523213816664709, 0.8434753949553973, 0.8306365940957856, 0.6889848532125536, 0.7145859659199729, 0.9957434164171688, 0.6578851383298614, 0.5014579075283225, 0.5757560208123773, 0.5648806937551252, 0.8705767286364579, 0.5646880482989844, 0.8349283326252841, 0.9722161037327262, 0.5015215938829879, 0.5097935139540002, 0.687274679874318, 0.5229079434802062, 0.5716276613984506, 0.7958740434206778, 0.7142155703412395, 0.8654395578440137, 0.994806852979961, 0.6556560726356176, 0.7741592060935174, 0.8514937745526325, 0.6709663716428071, 0.9664096758087319, 0.9682211428010034, 0.6437021696047956, 0.6875275068774256, 0.6977220088542435, 0.5027015179217347, 0.8924298924125942, 0.5790708824921, 0.6724028751180934, 0.6285541637330476, 0.5762314290850374, 0.9655564346850165, 0.6334261555120037, 0.9057215445682762, 0.8755063357634865, 0.6007061373942025, 0.9779640745804545, 0.554050614406367, 0.6577253523925167, 0.8861921814472979, 0.9385651544934673, 0.6086355632556777, 0.9014332471498584, 0.665980035926153, 0.7904453879650565, 0.9527948836714559, 0.526926906852854, 0.667565172776565, 0.8627104052160981, 0.6146941966902582, 0.7649932055942912, 0.6930318529178939, 0.782497156235595, 0.9076293963850417, 0.7995976582337183, 0.968452171156633, 0.6103549996026874, 0.7355528493596413, 0.7619548065122121, 0.559307415375837, 0.6013440388508529, 0.6924274380164934, 0.7816190674607186, 0.5208622909344763, 0.520459980026413, 0.5453795825432717, 0.5397147554021606, 0.7316225680109656, 0.7331909515805597, 0.9086266273311245, 0.8033930570077801, 0.7453742176630862, 0.6495507803800962, 0.8525347870638668, 0.8544100909312595, 0.7732910945680993, 0.9264158323297158, 0.6996035966085891, 0.8521683992864342, 0.8860150637175559, 0.5904408788760029, 0.7205423573643264, 0.533856785068281, 0.9021876511450807, 0.7754852535111529, 0.9721003679595741, 0.7485574615970252, 0.6799011160386764, 0.8167901496287806, 0.7614457754359016, 0.5395273009582886, 0.874045199495892, 0.9472883068282947, 0.8025437487950279, 0.5248517866064586, 0.8184913917146308, 0.9792818761378814, 0.8483548015440541, 0.6035674002065153, 0.5170885077313301, 0.9285332980600611, 0.9085272966993554, 0.6990985882581635, 0.7380731495060222, 0.5591267987748653, 0.5996273667986312, 0.7912940091809645, 0.9035986833860188, 0.9671108253291714, 0.5432790408784876, 0.6952447296671933, 0.6120027266002355, 0.5527390879159451, 0.5186894059293159, 0.6746338584080429, 0.5192303859478286, 0.5932315490771689, 0.7335247083229268, 0.9204923780781618, 0.7128881381730712, 0.8624752478783524, 0.879806180226099, 0.8436313120789638, 0.7894862711098602, 0.5201468364419395, 0.7568205951428757, 0.6833085810306713, 0.9685234094639541, 0.8155709546639882, 0.6331726275249911, 0.8081123429827732, 0.8219346563458187, 0.936434664542732, 0.7256070300577464, 0.6791457202643266, 0.6986342771864538, 0.9535724523881355, 0.8168024191240619, 0.7638310089566872, 0.8598066369029675, 0.7884069231189875, 0.8235076509023885, 0.6579202417118759, 0.5902586193182868, 0.6223726463296129, 0.9660633609352243, 0.6503242896894703, 0.6896063810137991, 0.6128947536379585, 0.5545845640701546, 0.5402454869780122, 0.9399289137309998, 0.7032413336333619, 0.5782178160423928, 0.5494592334048825, 0.6187478390252601, 0.7690333750383032, 0.581275413361635, 0.9825184666118802, 0.7533126224421118, 0.6766074241999127, 0.5775908404529225, 0.7063197170882771, 0.8576974792297944, 0.7217075070513674, 0.6839978524638509, 0.5534742510713389, 0.9106424890567621, 0.9561121260708758, 0.767072596178155, 0.6504644994038037, 0.6393555780553144, 0.845216570170833, 0.8261667801389472, 0.7298403312624755, 0.6595466248793277, 0.9345055181870027, 0.6198170908925191, 0.9608011530913083, 0.5111720205935295, 0.9867705468685462, 0.9402763278682669, 0.803678700947656, 0.8505558353998938, 0.6344285489209867, 0.8152012233798952, 0.5530050253211909, 0.5566216821781544, 0.5833894669769608, 0.9092066374903908, 0.6234687279449965, 0.7794358877372354, 0.7496719190992079, 0.5335759024209206, 0.6469183616745596, 0.6026014851173884, 0.821206292899862, 0.7537845336694702, 0.6959033675892267, 0.6568874325962265, 0.9623266187588355, 0.5289971313791488, 0.5002840648385878, 0.7467831227213513, 0.9246990706596491, 0.8858228202765808, 0.9704309327223359, 0.9538398905356427, 0.5738361514258059, 0.9683458209462519, 0.6178672821347777, 0.5445167137304125, 0.9744419395579537, 0.6913415606575306, 0.594342548159051, 0.9589895734726248, 0.9839262646212866, 0.8428886290914162, 0.9304286167503343, 0.6991781820113081, 0.6980201634110673, 0.6258738824862575, 0.885212206876415, 0.940046489476273, 0.5529200991089074, 0.5477596450437086, 0.5935320498421326, 0.7037879742825275, 0.5207648259242388, 0.8116336788994523, 0.6898996367084265, 0.7781605116710751, 0.841699279313996, 0.8079109732922893, 0.7962394608337753, 0.5553710598246921, 0.8994493607961103, 0.7871429198107819, 0.6150068586585972, 0.8609957854978727, 0.9293097775845793, 0.8671215675364474, 0.5990244292613935, 0.6610823377263677, 0.6800202166606217, 0.5006572717379356, 0.7443808576097803, 0.6690738837941872, 0.8663505071780522, 0.87503025264999, 0.7596565704566891, 0.875665458552762, 0.7824293999264846, 0.6756166185772822, 0.8392564307395609, 0.7655789423496597, 0.8224577721501691, 0.9731946585739254, 0.7604547532799975, 0.6490870605897648, 0.5293050594982103, 0.7220688819588065, 0.7748027264429302, 0.6007555918108791, 0.6965426479848937, 0.7811991121443956, 0.8245961520188537, 0.8270720284736646, 0.9361947511739146, 0.55765461700437, 0.9766197381107335, 0.6440753443365373, 0.8107708864998401, 0.8033093387135131, 0.872512048922736, 0.9854769888076755, 0.7625600195261744, 0.7633816656253142, 0.9322886638662085, 0.5807854944890684, 0.9449772923570263, 0.7924090685705829, 0.9826387779522046, 0.9736340868981148, 0.7526360455074783, 0.8051441315583848, 0.9511068594505951, 0.5501478868236753, 0.808482406491918, 0.9015175987613242, 0.8697219066348713, 0.5437962880863704, 0.5080952030617141, 0.5697279615203752, 0.6424173520722365, 0.6674788847228975, 0.9501274390392864, 0.80805283618588, 0.6136833514789499, 0.6018733753528003, 0.6801436779712622, 0.5942461980486634, 0.7788250017132181, 0.7952381905022247, 0.7787468198436001, 0.5094713989194088, 0.9232278297946617, 0.7993503809976664, 0.5336003814284302, 0.9169294385712541, 0.8180553108994795, 0.5088899282614795, 0.7910467834131915, 0.9589072692257254, 0.7087694073477259, 0.8081433078400163, 0.6504880791595209, 0.7258689160935168, 0.8051130540861801, 0.6149979777012178, 0.9061502553216942, 0.5183196826887508, 0.7980549990330649, 0.519207646749718, 0.894276076081657, 0.5194843691615258, 0.7160555742619694, 0.7878045038225634, 0.9384645639115323, 0.8453096395513864, 0.5702473094296707, 0.7717068635043582, 0.5411092057460474, 0.6158098142927518, 0.5631170466068787, 0.8548134435824688, 0.9292854116760844, 0.7095165796612242, 0.8872260355716302, 0.6629474527832082, 0.7383218873748978, 0.6328277521672023, 0.5239687825434277, 0.9165349883938992, 0.6857320399543914, 0.5565731684027704, 0.5925449540067151, 0.7021159588302159, 0.7680063481591578, 0.5483711679414587, 0.7650558274361963, 0.7029909270811426, 0.5902953400583097, 0.8190611982074361, 0.7611685374820367, 0.8054120774459479, 0.8790047491569999, 0.8949154202434753, 0.806528685351799, 0.7247990822092615, 0.6208122289044287, 0.8555462647045395, 0.9164097949403246, 0.9731046857865806, 0.9057444746838841, 0.6848411381410828, 0.6442187270108444, 0.5715218408910602, 0.9493113564346929, 0.7950798120608012, 0.613199380979824, 0.9310572149679698, 0.8084061363046946, 0.9841856142185937, 0.5879460118096054, 0.6859928001761246, 0.752199432452834, 0.7774055124184058, 0.5024798092558632, 0.8469960937091385, 0.7470424165915601, 0.7485562077083412, 0.8919300145863469, 0.9110158011519865, 0.6213205992214139, 0.6825393212273578, 0.7827795227157295, 0.8097622346575791, 0.8656036488380721, 0.5813462517494754, 0.7446577775693579, 0.5171282516136704, 0.5464843769680995, 0.5031282087854462, 0.5067881131788623, 0.865038488365861, 0.6885930913070378, 0.6279069455187765, 0.6220211506108742, 0.8012307502704152, 0.5408649180645592, 0.6699135669523227, 0.5390696030740132, 0.7089536942071151, 0.9529190283328561, 0.5477981297310479, 0.5908081866202959, 0.8743580356946319, 0.5374688172583211, 0.9112195486927535, 0.8252126447451517, 0.5631115630800205, 0.8405237126821734, 0.5206708544478329, 0.9421298738748682, 0.5736744961463727, 0.645370212572673, 0.5631902418665614, 0.9886649620172487, 0.6452792934230961, 0.5772891711044903, 0.8128490016162399, 0.6984117187632636, 0.515246458522628, 0.8867756285142829, 0.8804562897779185, 0.7590101040007499, 0.5775247735904198, 0.5231790663223221, 0.7484160486204599, 0.5248445424010859, 0.7090744387630475, 0.6955102369826636, 0.6667039281018805, 0.5249033533407815, 0.5738433109558123, 0.8846960349284785, 0.7821887723890877, 0.838189798072696, 0.6101620225441838, 0.616234965129905, 0.5757043712702437, 0.8422048409986094, 0.8315342168351418, 0.5983201937846212, 0.5163736767389673, 0.9959891231456286, 0.7468185381293722, 0.6160580627277077, 0.8056276830451359, 0.6723682529613956, 0.9815202818134445, 0.863334445709177, 0.5871294154287162, 0.8292675580359092, 0.8781914077901485, 0.8105595217833212, 0.7627138435234047, 0.5133330638182625, 0.9150308457876093, 0.7235049087440282, 0.631577755944976, 0.6001413609871002, 0.8798759283144801, 0.5991607735432115, 0.6946675041944292, 0.9749284051191018, 0.5626949995911733, 0.8887687896627021, 0.9130505298999987, 0.920681592589019, 0.7819569916405928, 0.8132404244314897, 0.9225855411059843, 0.9862090975673115, 0.6070302463563115, 0.5241313899166071, 0.8971817328576748, 0.531387332603124, 0.8245630853625467, 0.9450216193210498, 0.6198890153662797, 0.7776489236998307, 0.9471462575991767, 0.9143097841478744, 0.582270919354441, 0.9838758682654509, 0.5657776951379965, 0.7303909445496246, 0.7514317217624777, 0.9963887210129464, 0.8178237379746466, 0.5417006097723811, 0.8205285493970387, 0.6603375463406403, 0.7478445219680957, 0.5135381633897821, 0.860708509568298, 0.7543298235575211, 0.9926702898942891, 0.627869930043902, 0.5369439820216722, 0.7376408749198897, 0.8617418097475916, 0.5195118074000534, 0.6509871722811174, 0.5169164141580462, 0.506854263289062, 0.875250254520769, 0.9916133154945594, 0.8152162044000315, 0.9921127176449176, 0.8005634751975907, 0.6528895569143718, 0.9445658034582574, 0.8775008084259559, 0.7266744026447018, 0.5609096969558951, 0.8293021154871291, 0.8670010526765987, 0.6808459933220715, 0.9608264582161803, 0.8279466982006547, 0.7554190730141334, 0.977897193767469, 0.997295671195453, 0.7722614665008297, 0.8956534217232477, 0.5345972967856558, 0.6028665559083268, 0.6043695974730873, 0.7993803148853398, 0.6330217228257666, 0.7183225154704749, 0.6319594022598625, 0.8845029505974085, 0.7240380662442457, 0.9166115475563683, 0.9317373066948063, 0.8986473115287902, 0.9220792794697379, 0.6555779901964581, 0.741804506036783, 0.6986240266107933, 0.544322769166643, 0.9635175579705871, 0.6558502246824522, 0.6121445410227712, 0.9598715280786585, 0.7946567855188624, 0.5272082113793213, 0.5986248863131058, 0.5672628227928724, 0.5615762605848086, 0.7286610971947292, 0.7004220172835361, 0.614284527495911, 0.5238091653173995, 0.7574754803304095, 0.6109710586211345, 0.9993467994522978, 0.5112995399070247, 0.777219807555878, 0.7439795950766576, 0.700881462951735, 0.6350522186937668, 0.8661622684631717, 0.6814724523989466, 0.8858532525067782, 0.5904112023149835, 0.7144097307226638, 0.6979163869817508, 0.5457711602239327, 0.7766771844262235, 0.5599398580936734, 0.5864546067743786, 0.9439957403540136, 0.6954677243130956, 0.797803711278561, 0.8026408300949259, 0.7216975647431131, 0.9948771978358865, 0.8104877595813813, 0.9651349966280991, 0.8511626473504177, 0.8010961399666299, 0.8994021616119323, 0.5717122627195772, 0.5761588000624911, 0.589791831203178, 0.7400231999906346, 0.6994320762072432, 0.637055965963883, 0.8265757255703843, 0.8763618759314351, 0.9702720250205857, 0.5401392879870723, 0.6281271647342948, 0.7450094742999751, 0.8872303574449344, 0.6234722391154623, 0.5712356315051725, 0.8815099214699725, 0.6032598181300725, 0.936385319376053, 0.5427785341286708, 0.7955411017797415, 0.9766816333681742, 0.7623561804402867, 0.9110963137810579, 0.7732042887267006, 0.7377502174045518, 0.7120547191244331, 0.6668978745300735, 0.8549527453201329, 0.8872393827175555, 0.9951350316665786, 0.7857128701685783, 0.9982562212853715, 0.9201134452463362, 0.6021519425203737, 0.9994320171197875, 0.9595222962732118, 0.6923855096575731, 0.9841745831324711, 0.7008711187651926, 0.665274003104921, 0.5766143939063056, 0.7954265614592271, 0.6858801836087615, 0.8290635124121566, 0.8855299681993158, 0.9128650799163551, 0.7247257779769968, 0.6399814076723913, 0.8734596564100097, 0.6149961793330867, 0.759202576633471, 0.5814469246721403, 0.8595951127583523, 0.8629537030624894, 0.90778637111458, 0.5070632565274242, 0.856025883847876, 0.6348802371923654, 0.7142879980190131, 0.7895703135138148, 0.5612087445668664, 0.7820276810200505, 0.6870234009184563, 0.813232216123801, 0.833417514673811, 0.8668673376498361, 0.7603571795644565, 0.7932296616738366, 0.7242120620422228, 0.6027936413433973, 0.9008053961111564, 0.8641570275137036, 0.5734888384098857, 0.5702043547574647, 0.7338575606990476, 0.9564446695915485, 0.5854255081129536, 0.9987551955179238, 0.7432491933358634, 0.8127084284603835, 0.9432534558121586, 0.7631389746316815, 0.7106309295501108, 0.5940258249326855, 0.6065320794227412, 0.6018114175979834, 0.6580104390640178, 0.5943903882956566, 0.7132414161445553, 0.6097822760364939, 0.9646902718993591, 0.8588364204586957, 0.9695390716813518, 0.8844892671678829, 0.6471105988683106, 0.9126159403007921, 0.8853160800063717, 0.7630776349224813, 0.8689644046012055, 0.596166804824517, 0.8993656892058466, 0.6428532139139829, 0.5369272309340085, 0.5243024854927476, 0.9646567109716292, 0.6156712394626696, 0.5207381332549712, 0.8058149001219379, 0.9185107229981562, 0.9726641618024041, 0.5276979219554175, 0.818102755156987, 0.5032396381197715, 0.9031001547809405, 0.9481368514289688, 0.9182867243725044, 0.5372734025763934, 0.5665579811810997, 0.8236468357332862, 0.8917529294191842, 0.8764085681393174, 0.5822322337741697, 0.9090465311770917, 0.7051888400912139, 0.7720404795114686, 0.6778976806981926, 0.966126511824142, 0.9842898541921896, 0.7633041144091375, 0.7906928571366945, 0.6400819353217018, 0.6114897550203928, 0.8831401296021835, 0.9305528137835452, 0.7293106930022961, 0.6551139613574122, 0.6452054528248916, 0.8281179372313615, 0.5201587410570044, 0.8086294843874958, 0.986643965631546, 0.5966339373665679, 0.5119120420005743, 0.8952173899497895, 0.958553938919002, 0.9511259815923699, 0.8201078426087738, 0.8669457910507496, 0.695909933181231, 0.8159412204465146, 0.6695156354457674, 0.6333269693680457, 0.6134531529901803, 0.7416624600636939, 0.6986783518294368, 0.642624768879416, 0.9949764619759903, 0.5229715720084384, 0.9689298302611844, 0.7754376762970687, 0.8536267992470632, 0.8501081156306967, 0.6849795790625938, 0.9794373567980468, 0.6785653508487072, 0.7637598499449807, 0.9956239987905893, 0.5904490449012784, 0.5513031879943074, 0.5756438671837449, 0.884326288607202, 0.6806591174714265, 0.7305360955017246, 0.6696798333891645, 0.5222534499383533, 0.5810510227115202, 0.7708183931265113, 0.5476594005372712, 0.819723507576016, 0.5328614132700478, 0.5119152304723296, 0.686220555807164, 0.6796922384976694, 0.9885250006045265, 0.7721776728353464, 0.8380204890493856, 0.7958467713184565, 0.6584669705613468, 0.8991796219011037, 0.6813086147580554, 0.5836130934786161, 0.9274835700042965, 0.8707204544839335, 0.7695094555832641, 0.5834383450558072, 0.6271379072345808, 0.710281989248227, 0.7683212624954594, 0.6666133256261766, 0.84359046902831, 0.580062569098556, 0.5988880665016245, 0.8763826528185408, 0.6123950650381118, 0.7229466564197579, 0.8550895068102276, 0.6978329955698428, 0.6882214369583303, 0.7257571839270194, 0.917543452627345, 0.8690938365087767, 0.574633422356067, 0.74819868508217, 0.9201798620631094, 0.9317520727937282, 0.5474360507599076, 0.9338175491339533, 0.5649673162939898, 0.7743997425193931, 0.913754950136264, 0.7010267937894126, 0.6926379910091895, 0.642802766691216, 0.9757693419394468, 0.7994104364955451, 0.9066652089887561, 0.9561112184864029, 0.6324897507877919, 0.6897729965452086, 0.6275429842179372, 0.7741720633885928, 0.797356093308722, 0.869679207628119, 0.8106377019651227, 0.6764217027748228, 0.7939975543354462, 0.6199846938521293, 0.784581230737027, 0.6360178126836411, 0.6607854819732049, 0.7029252181886807, 0.6932814139260778, 0.7049882056162979, 0.8996497647631683, 0.8436992026634376, 0.965920506709456, 0.6260401318690527, 0.9161278555558613, 0.7050740378285334, 0.5347205383985376, 0.5359285991939369, 0.7513688083761951, 0.8168434695509812, 0.9305560857921216, 0.6799749745883118, 0.9831240318704548, 0.5865785400031963, 0.7199741586464077, 0.9994433365379649, 0.5888350400629541, 0.6847820595628544, 0.8230033880398911, 0.5785343978563517, 0.8222217713576303, 0.9891385054389079, 0.9321858413096425, 0.6842417511462657, 0.8592767825017105, 0.7216250589186606, 0.6679067025776937, 0.5149734392296137, 0.7854998503013882, 0.7194843652305638, 0.5550793739900475, 0.5094463340462309, 0.5574047608223625, 0.8781257195307903, 0.6144873080869593, 0.9258277492135638, 0.8550150564617599, 0.707909569425702, 0.7490012819575291, 0.6372643462974394, 0.5755319022613901, 0.503807667417667, 0.6377858109157388, 0.9298150396821497, 0.771855666429687, 0.8565524376548259, 0.9808462939926073, 0.5523820756047375, 0.9708199593172722, 0.646060749874654, 0.6024841461499694, 0.6917268429973857, 0.638251976212411, 0.8247329308148112, 0.5865322504512117, 0.8034131211541642, 0.934850579075209, 0.7967218350757975, 0.7874952116540579, 0.702788669445252, 0.9978518803610521, 0.7479427455909662, 0.526642576391495, 0.9680078167862922, 0.5804404867948234, 0.536513272701844, 0.8762332924148399, 0.8660601084184769, 0.7885296205392007, 0.7485077227276464, 0.6950979054406289, 0.8161555105497491, 0.8570419693748859, 0.6517274704564842, 0.8096830674332285, 0.7797278537414566, 0.6719570838214521, 0.5855643707270016, 0.7202525214439972, 0.7128944462841299, 0.5032168910487345, 0.8483360807989035, 0.5305664264088521, 0.5960449829584391, 0.8760866103877585, 0.6243886472601813, 0.8724252180544008, 0.7376963486731695, 0.5128264779613079, 0.8266142032311891, 0.7673323504193978, 0.7880394331114096, 0.7703183230975812, 0.8467711099713169, 0.5186892186473948, 0.579920900077552, 0.8009244961523495, 0.7018692244453142, 0.8091844622434212, 0.7021683393458953, 0.9525860187510808, 0.5698237785800344, 0.8221941385521242, 0.5486883171544652, 0.7616908932652768, 0.7619927864566292, 0.6848339498216883, 0.8772894856565959, 0.8370267836770462, 0.9209079547661503, 0.5511514033067986, 0.9417911581079392, 0.7516009214892393, 0.6879175015935123, 0.8100776072822975, 0.763770529322895, 0.5837267170805998, 0.6751739585802554, 0.5843774021107886, 0.7165163178002438, 0.5587828743475284, 0.5102648694027399, 0.9130333496628259, 0.9832545774308239, 0.8652513972648648, 0.5111868178387056, 0.6154350246221627, 0.6650289079608547, 0.6161531234279186, 0.6225879218703785, 0.6533713299581343, 0.5773677627894813, 0.8526461700780015, 0.6022619144225032, 0.8121915139448492, 0.6397230374910176, 0.5640585979851966, 0.8675641916559036, 0.9466789204985864, 0.825700022873288, 0.5150135556969488, 0.7652916191912205, 0.56484361351571, 0.9272722653227244, 0.8810471404359373, 0.6553568011458208, 0.5322075606959329, 0.6868193596958916, 0.9491385245546873, 0.8732521314860817, 0.5449105421217533, 0.6237895182596438, 0.5650161805532531, 0.8046464942966671, 0.9452969883062525, 0.7198464966770992, 0.5012028579126862, 0.5948177239377797, 0.6233581823435448, 0.6663018783848174, 0.7541752681250806, 0.838374939363798, 0.9924583097743709, 0.605748225204902, 0.8170937868179384, 0.8819405606592039, 0.8953872285386024, 0.7613906403253463, 0.7812062899159056, 0.5725299242138986, 0.9643585686525795, 0.5344800892498531, 0.8058346677883022, 0.7771379080984946, 0.7784539780524589, 0.7950225590151399, 0.8276210608236265, 0.653928195683174, 0.8595364885520331, 0.7003700952034859, 0.9981396877202174, 0.9996501690593609, 0.573378133637297, 0.9605807208379833, 0.6241563118326878, 0.7928369706497539, 0.9309647156418577, 0.9410788129240647, 0.7456271381594973, 0.887818083298638, 0.5802576070361323, 0.9758646034725722, 0.9234179480448766, 0.9832749381126099, 0.595007318948005, 0.6403120016167165, 0.8418190728745928, 0.8072384314181336, 0.8970178913230814, 0.7812211949771313, 0.7557673687312643, 0.6769557663536361, 0.8125369225199659, 0.5420957939699604, 0.7056401648355234, 0.8890480934526065, 0.7630432540646406, 0.5322592833007415, 0.6779637478442955, 0.9981088032006484, 0.6304615782685883, 0.7446285776127196, 0.5192774897373966, 0.7381138544593628, 0.5059611728553004, 0.8451946951185523, 0.7370921637981896, 0.9996046060958176, 0.7261103662005324, 0.6057989663561532, 0.631927031855222, 0.5114025234597249, 0.8537261685977682, 0.873137271353745, 0.5613686690478964, 0.8400462089982969, 0.6990063615854591, 0.7416878255454596, 0.9387128705012002, 0.5791656394244713, 0.766589176481664, 0.9530631562807699, 0.8981956357503869, 0.5768977531653822, 0.5540756058333226, 0.935530782628852, 0.710640065597345, 0.6639700640683901, 0.9438310534283578, 0.5371892171381056, 0.5883978837424024, 0.9799509684807955, 0.830958047471148, 0.9715501735350318, 0.7560333557267717, 0.9341641263645439, 0.7412203921336978, 0.7184953362250885, 0.8193383030804043, 0.6014477608185932, 0.6026604573146075, 0.6556123718556368, 0.7921871769354515, 0.6076851804874278, 0.8265145805999781, 0.9063959479608973, 0.5173099284036233, 0.6046398504114269, 0.9450842041979363, 0.9094484447072101, 0.9674251916662269, 0.8161687345195737, 0.9706654843241075, 0.9306505714038966, 0.9220869941041601, 0.7252603383294509, 0.6808287275153282, 0.6826610721565358, 0.8494710323254485, 0.9914212196898031, 0.9322939425300365, 0.8481801372754522, 0.6344850866009024, 0.7159535844712148, 0.7081174411278928, 0.8198387642945947, 0.9040286197708592, 0.8106059198358909, 0.5527656405913508, 0.6754093839681057, 0.6160041415677786, 0.6136693649804644, 0.5211803898054612, 0.6645899071104351, 0.8658221672241138, 0.6753733665161313, 0.7862538611074619, 0.8229926222721555, 0.7386421621022523, 0.7550635349693529, 0.9098915511710705, 0.8117838998148323, 0.7426325420127402, 0.7055193734424141, 0.7813964838007532, 0.7179894268290834, 0.7504676397363348, 0.9514921601071437, 0.6749008866164863, 0.7957278192922661, 0.8083671407256653, 0.6022080851247461, 0.7427267927268458, 0.5294860234783685, 0.9572543945757501, 0.9954989018745688, 0.7504201420412218, 0.5360251784017425, 0.7710030712368793, 0.5616859422332268, 0.5086151578121703, 0.7740299253791894, 0.8592174216733688, 0.5465401717942419, 0.7698072602658657, 0.7812132971085022, 0.7833414480342056, 0.5893674813914631, 0.8400220133483248, 0.7153814896645617, 0.6139974328873021, 0.7355318050099, 0.8252520616541414, 0.516153713377169, 0.9112762420546237, 0.8273880889083909, 0.8960675853248702, 0.6246029803014972, 0.5171056691781694, 0.7026885674925218, 0.730914464180012, 0.5353783160469554, 0.9835110679424796, 0.812060169157127, 0.7608372849372362, 0.7600838850778715, 0.7868480409544684, 0.9276579940183655, 0.672812212958253, 0.6349359075508048, 0.8685360472532447, 0.6973482314352476, 0.6018735017130887, 0.551646919139382, 0.9927386882656222, 0.7542969065757662, 0.5219236626752427, 0.563214257826583, 0.7710208302536774, 0.7174366505273577, 0.8053102066592004, 0.9201283538389093, 0.8189119521327839, 0.580417799714827, 0.9993140680446964, 0.7910567791112213, 0.9795690793277205, 0.7777571023578369, 0.9642306002863801, 0.697651496670199, 0.5228120051330659, 0.9817809051246545, 0.5411420641193934, 0.8011028030423378, 0.8699857842396463, 0.5658847615147735, 0.9187263437218715, 0.6523408144045028, 0.5415344519944065, 0.5873372051488833, 0.8342222734440151, 0.8136171463655082, 0.8089579804050668, 0.8712479642562247, 0.9497625328575128, 0.8640891990629378, 0.8463934591658376, 0.9042836327233638, 0.5791611691212255, 0.7404214414570007, 0.6482317398689432, 0.569655216416533, 0.5181859056553474, 0.5347056684523148, 0.8076436270974472, 0.7422187745222979, 0.8866453008107739, 0.5764582161570628, 0.9991279393200214, 0.9826123734969399, 0.6961100614094907, 0.8414972889343706, 0.8469956921190072, 0.5571840852248895, 0.5809871681081333, 0.9662670686298709, 0.5328882023134897, 0.568303539274598, 0.8569564428863419, 0.9758817341191537, 0.5528955445274155, 0.9994449193398074, 0.7779723270731372, 0.6124047863833864, 0.6837289032227729, 0.618410752232444, 0.913549304101059, 0.5238631516828832, 0.8809274340642403, 0.5463906969333729, 0.6594406294937891, 0.7496274260014804, 0.8735044268317445, 0.8317260558723785, 0.8610834739139013, 0.7302698667756238, 0.8981771051879945, 0.7280133206637629, 0.9508585023127107, 0.5976172997995461, 0.6913770452491872, 0.9393130136462782, 0.9920115443845849, 0.9901918518758611, 0.9488811979785263, 0.8958337379169983, 0.75730264537726, 0.7577382656921781, 0.9256232646128403, 0.8741445927115739, 0.7347821818818028, 0.7583108749961396, 0.549002224755328, 0.7988931395888709, 0.8902454169755363, 0.9815542250788536, 0.9235614124702167, 0.6336502970120924, 0.5104724229030451, 0.5952880950619368, 0.832363356568834, 0.5359927123717212, 0.5409450990629462, 0.8093219577475967, 0.5919258381331547, 0.6284042922358494, 0.9707567610291936, 0.7261382208061528, 0.880427108794196, 0.8266737307103462, 0.727443628838056, 0.5122572019955757, 0.8072149324889621, 0.8113687262415736, 0.7800388043299118, 0.7785408196719712, 0.6485795192344325, 0.9454582849248951, 0.9667598864516254, 0.8602928484839529, 0.6542051107441502, 0.8680049211439669, 0.8294527478277032, 0.51890009318374, 0.8695953845630969, 0.9073419618222895, 0.9933007030966636, 0.7982437567053842, 0.8698026787795, 0.8246868605373437, 0.891707755905778, 0.5624269807000131, 0.941936502853179, 0.9329179333330886, 0.6606774995926497, 0.5312688482828851, 0.6268988048542531, 0.5467469157456258, 0.5430192220180976, 0.7647340574870074, 0.735536418074586, 0.7809708954356858, 0.6962186135097255, 0.6525709291773635, 0.6294350151107467, 0.7695415317667507, 0.9182795015290149, 0.53298619876591, 0.9313645067852616, 0.7917577884869551, 0.8055858063348955, 0.5159849128857303, 0.5896337129008007, 0.7680313645889143, 0.9874934494868973, 0.8774448480518509, 0.6444770492868335, 0.801773331034852, 0.9988383184814792, 0.8748315393593838, 0.8862377682807767, 0.9746967653623132, 0.585156561564778, 0.5240116502963319, 0.5933469819131072, 0.927062478256679, 0.5216566007055866, 0.6323303703001171, 0.8349254498467276, 0.7773073554490674, 0.6367973866128076, 0.7024473146897228, 0.9877992772852564, 0.70724434764404, 0.86587748648681, 0.7814001051649704, 0.587414674563862, 0.9151369529856144, 0.5959135027446922, 0.8170002198799203, 0.720844527618669, 0.6869647830679353, 0.7121067773822027, 0.5217726728793535, 0.7719938448631494, 0.8307845939111851, 0.8803478882575779, 0.7525127299456861, 0.9330172734710497, 0.7032977224698904, 0.577670800337559, 0.782330791737595, 0.5879765848657468, 0.6440119102731225, 0.9516875808177099, 0.848138410814524, 0.5445782737291484, 0.6316663233773114, 0.5683860113095582, 0.60550641370436, 0.7591000327876483, 0.9687076605528813, 0.8354458657240298, 0.6826171669737039, 0.5119143994749202, 0.9342990755468417, 0.6854511602315989, 0.9902567062844091, 0.9773098026629743, 0.8621053330886026, 0.9685587600450085, 0.6226665640987814, 0.6641612747633737, 0.8207959557593397, 0.6046667023636073, 0.8988426101652707, 0.6319251744258916, 0.5882514376417067, 0.7435739608870376, 0.7772982336605014, 0.5697424560169416, 0.850402708332054, 0.6847506343958345, 0.8747166916604145, 0.6899622379934945, 0.7541140570649647, 0.7308865313372781, 0.5442564183482173, 0.5543311545464, 0.9674452215824211, 0.9520764657241781, 0.745175155099594, 0.7384725609975424, 0.7470134168857155, 0.660031137225078, 0.7567556148353243, 0.8725634543308297, 0.5307212413150989, 0.6743237146741068, 0.95090792334755, 0.6623854731857932, 0.8823613556290397, 0.9625277363323828, 0.9536567255057538, 0.6235644939221178, 0.5618939745305918, 0.9313179308049618, 0.675732950945956, 0.5283416621834669, 0.7560188097562397, 0.6557098721825398, 0.5226687714261122, 0.60655057140534, 0.7973552102256841, 0.7983329100394764, 0.9562838472762163, 0.534019243757723, 0.7464493297684847, 0.9472096255383258, 0.5161886808275007, 0.710539895093891, 0.8052642091990014, 0.9541885459322319, 0.8368923163380528, 0.9606419136615804, 0.6243579461282991, 0.6828937837820167, 0.8061943168073233, 0.5719693341071056, 0.8404130891631711, 0.6080008582453049, 0.8475777540556471, 0.8695987065875079, 0.6407210917709378, 0.8172152833308074, 0.9036758426479015, 0.8956437327448861, 0.5028049914791978, 0.7401206809164466, 0.814505590510454, 0.9148767731355076, 0.7313634930176462, 0.8202366001989045, 0.8823238750114626, 0.5182015331505399, 0.8268749760853529, 0.6721148638549626, 0.5253445657924735, 0.7238568379086875, 0.7608532836215502, 0.6225893383261671, 0.778202201778178, 0.7039953661375438, 0.5045636742504812, 0.7480352909939568, 0.6071268976119856, 0.9413334395989285, 0.5089504314997424, 0.811207707923008, 0.715854562630545, 0.9160716466730712, 0.5385451047185765, 0.504040396294351, 0.9481798897870797, 0.677638501426507, 0.5815815712951585, 0.9048799034689534, 0.8981687657309205, 0.9053230354013788, 0.8406507026794051, 0.9562443063348363, 0.9435701144444191, 0.8467056714584167, 0.6822460387172442, 0.589380166979103, 0.6203744317737567, 0.6674612639188056, 0.752169762348985, 0.7401828590433716, 0.8224722644550835, 0.7654022830126418, 0.9229654136107421, 0.9611181005277536, 0.6371013952618079, 0.8767623466419003, 0.8665289484095975, 0.8276617659108596, 0.5193813803747143, 0.624025118652983, 0.8501806840200858, 0.9216713336308479, 0.8570671318286112, 0.6441202564758277, 0.7560360735616678, 0.5179075613740156, 0.5317977816042376, 0.9767726728142343, 0.9390786193531149, 0.8768223349148478, 0.6076111164936312, 0.8280801597829788, 0.8672505793956599, 0.8569146206436604, 0.6719133041277869, 0.8853042899368039, 0.9350565196626751, 0.8071822133622781, 0.9737557965475013, 0.6499224634360703, 0.6827260479608264, 0.7872058803624107, 0.8232865419400963, 0.6763747617417706, 0.7182632477966253, 0.7458785281077954, 0.7189288959784279, 0.9260947931474894, 0.5493234002192391, 0.7312765179363415, 0.8751010249079618, 0.685440719319419, 0.9931721127781864, 0.7284455588057588, 0.6411748738536229, 0.9014479374630088, 0.9133902080826984, 0.6686973301114045, 0.9792904227011552, 0.5695799310847466, 0.9405098560258136, 0.5510273331092063, 0.8600482914066792, 0.6065165192060725, 0.7303440465063289, 0.5617597357582442, 0.9529178669858909, 0.511450475827993, 0.9521657913514864, 0.5524743603777087, 0.5771395779162529, 0.6610515626956674, 0.8237912318387126, 0.736583678075398, 0.8500564382502812, 0.5868996231761716, 0.6193784736545376, 0.702605359245227, 0.6854709442594149, 0.5297487641213559, 0.999754911049881, 0.7625763370398482, 0.7280521779730853, 0.5247330243865274, 0.8558161251416838, 0.6155078076723661, 0.865672733566386, 0.5461326839424219, 0.9305628991457955, 0.6476638114553697, 0.5078371532810659, 0.7558847597713207, 0.9818354239793489, 0.8481970689723692, 0.9520440433707861, 0.542523584553827, 0.8287308764080221, 0.8883306734422571, 0.5504482496646828, 0.9197647350081668, 0.8123541095613596, 0.6784812341195744, 0.6107195984618692, 0.773951801316857, 0.7277809672514264, 0.6925532081240201, 0.8284796490165613, 0.6576770324847594, 0.9394809085467613, 0.649322037784573, 0.8014712259082606, 0.7333293230718677, 0.9120183545904892, 0.790866584944153, 0.9883020280954045, 0.5492292639066756, 0.5709948027735099, 0.958965402568621, 0.5294289709997171, 0.5221205815985168, 0.8445831226242944, 0.65926449128619, 0.7297909400359269, 0.7730700232115171, 0.7203320706685907, 0.8501777128350786, 0.6546285936770815, 0.9445437645878028, 0.5243568389771871, 0.9330404004436124, 0.7347433409704183, 0.5024915703510742, 0.8442714842701309, 0.6702277184112371, 0.699231875645732, 0.5500472036803161, 0.6862821673862489, 0.5904428627923752, 0.5955246396441576, 0.8868226354213642, 0.7947850645561984, 0.8661948982061654, 0.5904692444241713, 0.9067443811019166, 0.8130567326980924, 0.9407416083806781, 0.6013413716082177, 0.9700826538614653, 0.5924058035236179, 0.8718424332684643, 0.510728700957544, 0.6008520824769992, 0.9591055831161781, 0.8387836227037209, 0.5482677469837536, 0.7819005539800881, 0.554369470730879, 0.5853428675606951, 0.5537469747976009, 0.8138307492526865, 0.698305826598488, 0.8230895980048585, 0.8572358279237797, 0.9504807712096133, 0.8241065540226931, 0.9116089864279272, 0.8997344491365745, 0.7505267690902451, 0.7841605989255629, 0.6537343174547577, 0.8890977806878855, 0.9102489793221631, 0.5074337899251586, 0.8698120454118636, 0.5017861857321719, 0.7369207449848436, 0.7780399756559748, 0.9248497790168444, 0.9366697725437323, 0.6644396133558534, 0.8676893336659257, 0.8651649314252576, 0.7132791420601436, 0.8851121692953942, 0.9629442668770863, 0.8317048508172631, 0.6746711816804156, 0.630719542778569, 0.6101065320722306, 0.8361338343803342, 0.555517022993887, 0.6878554864475088, 0.5896858837479098, 0.6138929765683876, 0.6769413272514417, 0.9679335467347959, 0.6751040391912554, 0.9198951736855223, 0.6849724868500267, 0.9882476839577147, 0.514733614447415, 0.778445067535768, 0.922972843003414, 0.9322620719508474, 0.8967176188033279, 0.8975596751327655, 0.5854767061449817, 0.6243071783660414, 0.6452647924464454, 0.7891741387657406, 0.7447401491530364, 0.7695436937521316, 0.8517643018039832, 0.9060876259865407, 0.8220993820593753, 0.752461119273908, 0.7078811110705496, 0.760064067782549, 0.8479382539932564, 0.5436727782334454, 0.9148610066831129, 0.7494527167051457, 0.6136107123472052, 0.8831674441866009, 0.6202734064800857, 0.9514779316849922, 0.9064499710814171, 0.9889176098858412, 0.930773923892693, 0.9355869659808265, 0.6527479221499977, 0.9387738010499496, 0.7654824379081191, 0.8032476967679795, 0.7718609521865656, 0.9965215047182499, 0.717065047373623, 0.6388070730486002, 0.6505799026878576, 0.7101386114060204, 0.7997288698073706, 0.8508013044939149, 0.7194494844593613, 0.773632802269969, 0.8806663909697086, 0.8083364754478386, 0.7139682155000515, 0.6784885536926091, 0.9444811149405924, 0.7590976038694095, 0.5724926301337734, 0.9934353919941411, 0.8281886247458795, 0.5091347705637892, 0.501748070219536, 0.683394674858629, 0.5982237821934275, 0.9189081518675422, 0.7566779679715803, 0.624289315789818, 0.9936731743149791, 0.7869810103967283, 0.6683838747698069, 0.6544663185944204, 0.9000234930720306, 0.8740768581001676, 0.9608237117145026, 0.8567581896857528, 0.5735594812078052, 0.8831795794216069, 0.5888828897085976, 0.9825298680056086, 0.8909307442710556, 0.7476033953986694, 0.7105227399889753, 0.886052552971862, 0.763074144448429, 0.5727800766085863, 0.8108340868094173, 0.6159431771885893, 0.970347885395811, 0.929230384246003, 0.8461412475067169, 0.7674744459923637, 0.8197066713881626, 0.5108155649687821, 0.8801583995272122, 0.9810702857024985, 0.7614052053774699, 0.8867638723918347, 0.9970317893270931, 0.777121840903461, 0.9632058055847028, 0.7279998121233532, 0.8310380188221359, 0.7011224136037146, 0.7475642835370704, 0.9486803198570758, 0.6235650630529357, 0.850281756775513, 0.6620706633319766, 0.5267415894512016, 0.7353257646558352, 0.6640536100063643, 0.8625074445251911, 0.8147351203196846, 0.6782690078627983, 0.510340536095562, 0.9675743657140268, 0.8156991463315322, 0.9467191351861592, 0.660935070630384, 0.9159151289663251, 0.884576624201673, 0.5863958632793651, 0.6809285910090923, 0.9137955007502451, 0.6532152072121166, 0.7664609380078853, 0.6923774576877005, 0.846727616705374, 0.7017631799038455, 0.5573308420760317, 0.8230179437472747, 0.682491394156699, 0.7308202125520533, 0.6651955833732, 0.6100876822653598, 0.9655653940104474, 0.5595298866321099, 0.5826577147861662, 0.9604532083901766, 0.5160802098013906, 0.8966207410330207, 0.5355271002483042, 0.858326636329299, 0.9940564436098952, 0.9126125476493909, 0.8617468753881017, 0.5728665676541093, 0.8195987287846496, 0.6298422652140451, 0.9007967813275553, 0.6408076882227933, 0.5702908917760947, 0.9998355367740908, 0.8700879704034847, 0.5894705032943002, 0.5070534235386972, 0.8539650380685524, 0.903988798054447, 0.6720624165266293, 0.942774356260544, 0.6994921368149427, 0.7655464630953568, 0.7994379384120709, 0.9402525437370264, 0.6799293551854271, 0.5184386587951626, 0.5923098402416104, 0.6994804204340063, 0.6833675905127194, 0.8736255310853414, 0.8361467363475303, 0.9328578525131336, 0.5413985105452254, 0.603442368849302, 0.9509453152502207, 0.8640858310790651, 0.6101082078113969, 0.6089611216254606, 0.5913308596837631, 0.9088459409472089, 0.6093042584123562, 0.6961273904782643, 0.7964965670147843, 0.7142052995180183, 0.5160344375737145, 0.5061227141852715, 0.547352116778061, 0.9008386953223235, 0.8954438357677224, 0.684820830052157, 0.7504891140523864, 0.8319316618569503, 0.5630332067567176, 0.8477807779478816, 0.6573940721997518, 0.5859410263672951, 0.6159904606269719, 0.6328693713735745, 0.7927285957898806, 0.8356193910866577, 0.7384734535540372, 0.6831626807336029, 0.670439897434248, 0.5784506323327947, 0.8044073910660896, 0.9012091010160727, 0.9948203668987314, 0.9222857303941645, 0.5551420578487756, 0.6779852919185088, 0.5980726277730533, 0.8369095153061428, 0.7795116284677148, 0.6199348434129323, 0.6075885109743344, 0.758565139606844, 0.6324669907328062, 0.8829152758397707, 0.9110807147789013, 0.506683082278309, 0.6212380748320134, 0.73140233766797, 0.8244905712042646, 0.9589011802486122, 0.8134652172277164, 0.7155950017391886, 0.9991334366566638, 0.5637184146778567, 0.9715878694241007, 0.8908320611688832, 0.8119924086437931, 0.7145135099286344, 0.7842268156957815, 0.8431670153436817, 0.519447940854487, 0.6515709489593756, 0.6622782519247057, 0.8801506909310948, 0.9922833082994893, 0.6362808748607693, 0.910250843955189, 0.9808359648674215, 0.8799594633745513, 0.7254660290582452, 0.5998062834839086, 0.7876878703739287, 0.5923711890736538, 0.602410794307294, 0.7736658664694482, 0.6517171584366296, 0.711604910040186, 0.754760337217821, 0.7827163359392575, 0.8036883201600789, 0.983003119034372, 0.9781377851096126, 0.9367774644007265, 0.7785183170558536, 0.763233884415861, 0.6067376095600505, 0.8521762003658775, 0.5648899439410049, 0.895585986830917, 0.9547320383628244, 0.8145006822364994, 0.9175809763121462, 0.8852567220170398, 0.7309076819688112, 0.8370561604450821, 0.6380005547968222, 0.998636287896666, 0.7713352907410059, 0.6110369952256214, 0.7681059530715695, 0.9945611209633708, 0.9212101250989504, 0.9499480197946983, 0.7534247886869332, 0.5408540495693528, 0.8083832630466754, 0.6095985733413304, 0.6167405126479042, 0.9822317133911908, 0.5716911068850092, 0.9602098800013934, 0.5720548969488883, 0.6273790621906594, 0.9669208970683251, 0.9672154304699498, 0.5037718650638476, 0.829563753606287, 0.912534219684742, 0.8660570077028834, 0.8523312768980491, 0.7453402698583502, 0.5935630853625881, 0.9748054873359935, 0.8146679918100346, 0.8133819321312785, 0.7827486485907489, 0.7228607292431852, 0.847375794618203, 0.6751974000196865, 0.9330145714453553, 0.7289001854987538, 0.8562755685808772, 0.7756170235484552, 0.7701265639271444, 0.7326353375039303, 0.7014217406770307, 0.5373894792155403, 0.7161099622169644, 0.7824407495325022, 0.6784964535983276, 0.6380688580302389, 0.8184519397899578, 0.8649438510851306, 0.6522803874280041, 0.916011381947067, 0.8049716583044273, 0.7506807034845665, 0.9298151775015899, 0.9648164570778847, 0.837738446813125, 0.8509026421891364, 0.6551261553988229, 0.8134447732337132, 0.6152723865652475, 0.9968904077219006, 0.8880158685568038, 0.6360611319659781, 0.9480636079748209, 0.5555797167172476, 0.7020678409355605, 0.9810645423215072, 0.5825438107518457, 0.5245459927193878, 0.9056366457296867, 0.6596524310042033, 0.596913378012448, 0.6856315909470803, 0.7333340216013883, 0.7901462554934706, 0.6284601639075303, 0.5517030460645098, 0.940066309975484, 0.9617411931129642, 0.649737397638569, 0.6107512712462073, 0.550098083983085, 0.6394412423468291, 0.5641542416539054, 0.8300776351806305, 0.574291434898409, 0.8013470816454749, 0.7447960595836374, 0.5461741525376078, 0.9784174367058537, 0.5579520596637332, 0.5194639247496613, 0.6338751056730325, 0.5103429827312435, 0.8744429256858238, 0.500188573892516, 0.9073925552554353, 0.8155508529181127, 0.8210963632118948, 0.6951834695018257, 0.9886424289315761, 0.546623305385149, 0.8147535002494668, 0.659741082938428, 0.9935696035060255, 0.6605426258591813, 0.7576128783858771, 0.8261166461707296, 0.8522373770117421, 0.9712725476462905, 0.624723505250198, 0.7840370324952851, 0.6169236372992564, 0.8147469552834192, 0.8071675556514655, 0.5267642611863341, 0.8733547116039272, 0.7136879019743576, 0.6648965379907477, 0.9972816022924897, 0.9503980066368906, 0.5095679501582486, 0.7143862724456447, 0.9995149868780073, 0.592878259873282, 0.6862235918994057, 0.6883005820283419, 0.6098368335083854, 0.9218082034667245, 0.7372816752265119, 0.6808777083948144, 0.6413267323626756, 0.7949958333808322, 0.822850573089041, 0.9923470253531088, 0.8298281001883037, 0.6077307529210346, 0.9211035892043268, 0.5275587477463141, 0.5143046052922184, 0.9096803107971679, 0.5634239395323501, 0.9476605184971905, 0.5101940533732008, 0.9600893842859056, 0.9383961238800308, 0.9288994615094953, 0.6390953026818942, 0.7794733949878664, 0.5843381929876235, 0.7552445125516651, 0.8085897974654835, 0.5848863419528489, 0.6618357662665559, 0.6179481806846203, 0.5348447602710794, 0.9071482434771707, 0.9473669421168538, 0.9835783835494402, 0.5260180097146059, 0.8761233996869192, 0.7448654368341181, 0.8419260002340896, 0.8357218829736082, 0.8760042863998199, 0.8246806320964339, 0.8393005921918328, 0.7228622438186857, 0.5666925744100482, 0.7986574560448427, 0.8780191569598346, 0.9750629546825127, 0.9277245870875527, 0.7818989797677448, 0.9839901689939494, 0.9438848582730706, 0.8658498457170971, 0.6809422755536136, 0.6409494534031361, 0.545161256210537, 0.7542772625994436, 0.5021694916593846, 0.8343362978087849, 0.657113888222146, 0.67309436716041, 0.8212216542170903, 0.6280726892685036, 0.5458401555704143, 0.6801697253259935, 0.6141083383250927, 0.6896667694021501, 0.742919441560878, 0.852769620912089, 0.9122556914499608, 0.9384675581646909, 0.5403309192524726, 0.8765297245382173, 0.8500262367446296, 0.9202311875331177, 0.8327083997368464, 0.618085963267361, 0.683092120339022, 0.8984616499586959, 0.6543689262323231, 0.8519702225638408, 0.9986460591818016, 0.5518661651819229, 0.9041350030032134, 0.6883297371749124, 0.8668679917721618, 0.5905661547713158, 0.7257414095771682, 0.5246134102892263, 0.744288295450833, 0.7703030470829513, 0.5920146182294646, 0.8629397186815838, 0.5779152802427421, 0.9020734738548793, 0.8166329143283566, 0.9544794463273394, 0.9516737210723756, 0.7836499379580381, 0.9970378514509504, 0.7872349391613461, 0.5281121136240805, 0.7714451162459341, 0.6296399790578044, 0.785534527482715, 0.5739727379118205, 0.8023907152622461, 0.5991678845981494, 0.6651431248899364, 0.8586834798786077, 0.7291665862126413, 0.9724886654815155, 0.813654624433375, 0.9798693096143178, 0.7694999865027616, 0.8197978867631046, 0.660117961745389, 0.8875032489608204, 0.9757072059574721, 0.8096876588993627, 0.5650954098931456, 0.8131450254150199, 0.6061841629992131, 0.7789241187574154, 0.5789634528542494, 0.7675030102704066, 0.7129377691214563, 0.8995295126322564, 0.7190182125898387, 0.731555584956731, 0.6960493913549843, 0.673566678920858, 0.9286973464219704, 0.8323535101784991, 0.5411837283678966, 0.7836146537852562, 0.9205360198736645, 0.6725288679613612, 0.9237418206334851, 0.6698610834154359, 0.7483023659323281, 0.5140294649727772, 0.7868803882788784, 0.8579601268242265, 0.8560919271655394, 0.8036622651004433, 0.5475242385218235, 0.5789327299186764, 0.512820368388478, 0.6542282659415765, 0.6744601442110775, 0.7992373322742004, 0.5721742204282843, 0.7638213428115135, 0.8440855293390057, 0.7243937897292188, 0.6907357672188539, 0.8326733776757614, 0.6861868354627771, 0.5824526563772976, 0.5814678797134158, 0.8764208875855838, 0.596450739022365, 0.8390876270000394, 0.6856800779885206, 0.760917008117951, 0.6360503407863325, 0.8768238977159428, 0.6021479175459195, 0.5708106338824109, 0.9783003390609035, 0.696825349889991, 0.8225803245465064, 0.6240657143725483, 0.7766248249352607, 0.6304412820101573, 0.5696429999486147, 0.8532995279826151, 0.5415231009122043, 0.9396004934231243, 0.7888713411234576, 0.5462742089349256, 0.8344401079243511, 0.5634330400108436, 0.9972076880010479, 0.6245168607492158, 0.9097255748773032, 0.9651050487782602, 0.961897884471947, 0.7500809315408049, 0.7706289645252606, 0.8675085668265599, 0.7013869370676389, 0.7528979774618325, 0.9780866229550942, 0.6214090967570617, 0.7597825038502304, 0.9067249241852073, 0.6613838413027765, 0.9567557541728684, 0.6093113001805601, 0.852500560719218, 0.5971305473971338, 0.7875274802158391, 0.5224626485684396, 0.6943581140577737, 0.8954486767471892, 0.7653895014019932, 0.7030133297414334, 0.6654046276083185, 0.8731427704624624, 0.7174614704416942, 0.7336774428855994, 0.6002958085751513, 0.7159976467170795, 0.7364508429438865, 0.7635116956011195, 0.7451417556050928, 0.7437524819526318, 0.7927586278972161, 0.8519044653469269, 0.7215103979736837, 0.7235587528422986, 0.9065994888468206, 0.9077551965610507, 0.6819072790320417, 0.9657235669641431, 0.9886697884074086, 0.9790414804360077, 0.7659107757686867, 0.5787223466139608, 0.8845893473391067, 0.8289988776773491, 0.8067589221952908, 0.8201146327961706, 0.9148184291934568, 0.5022749924524906, 0.6528635074900242, 0.760827181018221, 0.5529647941039892, 0.9064391934147552, 0.8191856241480587, 0.8461468188400734, 0.5607670953438225, 0.6461924635905991, 0.7807106056382447, 0.7808726100404251, 0.920926868181247, 0.6892106711862711, 0.5560912497373993, 0.5893359766076371, 0.6043581879750932, 0.7881320142339356, 0.7706587259547919, 0.8408883519053117, 0.9376127803481064, 0.6730290139825181, 0.6760501298757429, 0.5675817628041047, 0.6246537142034645, 0.7148606667856597, 0.5598190873625526, 0.7344904726598696, 0.5485077824076413, 0.8295164934842905, 0.7511920178054985, 0.9423155301679107, 0.6847611229324859, 0.9954086018954436, 0.8992263896790046, 0.6521646924851472, 0.9291891185117342, 0.5431542716251372, 0.9792041337275421, 0.8956470226634837, 0.6774650650306053, 0.9055766008702579, 0.6534247234193359, 0.9532940200188923, 0.9488782949766871, 0.712148777871597, 0.5133565848720965, 0.7163583300700533, 0.9277434181748565, 0.8680569925129406, 0.5801919632857433, 0.8889392522921833, 0.7190291736524852, 0.5004989894762134, 0.7102014114431465, 0.5233502726471708, 0.8248007389801283, 0.8151292568935571, 0.771734474204854, 0.7495306512534168, 0.6797373520170316, 0.8256297682249284, 0.8804487850087307, 0.6957086058890457, 0.7447714352754835, 0.5466049347999726, 0.8877038641354618, 0.9603646113706321, 0.5598438965196888, 0.9210547235268227, 0.6505840174032351, 0.9371426319233502, 0.5562372028336671, 0.8001826822225258, 0.5757692194833961, 0.9127888693884674, 0.8781572733443077, 0.6615201765932874, 0.7913316271250745, 0.7459094551471541, 0.681413387523824, 0.8607663887165129, 0.620982679041975, 0.5241950249049512, 0.8897355327803325, 0.938472564850015, 0.6343203067725497, 0.9882399841037806, 0.9852904899952979, 0.6889761504453129, 0.7668146949409265, 0.9768740604999956, 0.7104549401863569, 0.8182061977822424, 0.7761090041796221, 0.5941482068293412, 0.8913195075894687, 0.8184381276863701, 0.7166479154335426, 0.7914942007445563, 0.9398238413281128, 0.9993303371239881, 0.5402879667759716, 0.7945420022552192, 0.6521542625977528, 0.7093943600761164, 0.6186021745753839, 0.8863484592185322, 0.9821983423695025, 0.6789504063820817, 0.7542359646505659, 0.8124988465026911, 0.7384770116835173, 0.7752787769297782, 0.6541348261392224, 0.6754108022786154, 0.9179134557910025, 0.8476426566014577, 0.5324461800419635, 0.7364070371237954, 0.7531896034479602, 0.6730142633512233, 0.8099148193603343, 0.9389098986998499, 0.6558146278186625, 0.5392914247781913, 0.7256406178031058, 0.8849246202741872, 0.8259618830733488, 0.612955252834892, 0.6361946084192427, 0.9769135794433688, 0.8281242180906234, 0.9949183724024712, 0.9084079414984888, 0.680643380638968, 0.9924720273580592, 0.5575065738807208, 0.8122851830919711, 0.5070851626758354, 0.939655138673563, 0.8150942201946132, 0.5577082551032106, 0.7784175487683436, 0.643817868414388, 0.702334868314733, 0.5192200568893675, 0.5626600949787814, 0.6748358167127647, 0.7011605181555589, 0.929220735786296, 0.9854941709767707, 0.9262733128502385, 0.5791411178045012, 0.7350756487719379, 0.6572250993788271, 0.864258947216735, 0.9394398744864334, 0.8804505810238927, 0.8241417619864462, 0.6653769433259338, 0.5234011984078738, 0.6049070609389899, 0.676844554768262, 0.5233239502038525, 0.7076508507972578, 0.7447525202088798, 0.7237172743635789, 0.642751384297521, 0.9681356541055558, 0.5212863239944373, 0.7595372461782821, 0.7696360366063448, 0.9867880260090558, 0.5727442121596665, 0.7843374609149906, 0.8854609317646296, 0.6360380689903056, 0.6412993507046978, 0.7467393657603677, 0.9247273667432951, 0.7545103840012011, 0.9147384758664314, 0.8861308670766289, 0.722834520498348, 0.7046301749197721, 0.6953980684541219, 0.6909476484405574, 0.686553463317206, 0.7312205108619313, 0.649108897822364, 0.9018591050220326, 0.944151730293589, 0.7218801049944765, 0.7892825610426222, 0.7709887550120834, 0.8973663887625161, 0.9286752534660818, 0.8891735621657747, 0.7557802175370882, 0.6920987466015629, 0.971544820258293, 0.5605156711923918, 0.5892446303145493, 0.7684926361041038, 0.8396845496525206, 0.7134362016348405, 0.9873923159079618, 0.8808748868517806, 0.796639345203365, 0.9582093337078781, 0.506189337628411, 0.664635378233213, 0.7363175929991932, 0.8255876404312921, 0.5412769705210531, 0.6125700480263334, 0.9204331042036445, 0.7977273902014078, 0.7139229743102473, 0.7338192377750862, 0.7477610256242675, 0.7755499766146718, 0.8738083772567063, 0.698871770088341, 0.6575998225524126, 0.5275281734316957, 0.743717655116983, 0.8238297575823657, 0.929177588957151, 0.8542743527845541, 0.6535308273519187, 0.6387951106277374, 0.8867246176371337, 0.5759484347193301, 0.9298134620158445, 0.6500796470049863, 0.8961111923459095, 0.6787906318810061, 0.5629809770806683, 0.5554963099715822, 0.6095815630949619, 0.7533826495235556, 0.6856247745803674, 0.9485391220470988, 0.8885128637260677, 0.54699735367101, 0.8959913873167338, 0.6285217546902655, 0.7833273893382476, 0.8729871168834478, 0.5389539214264035, 0.5192959529718892, 0.5523334933853361, 0.5552944208066675, 0.624531472525784, 0.6233217934252675, 0.8671205665507198, 0.6995952514358769, 0.8655321415211321, 0.9543147584017113, 0.7776470182050212, 0.6906572898125991, 0.9013793177018605, 0.5297483334189514, 0.7681686380793935, 0.8468448676699423, 0.7192682071851061, 0.9884320605054531, 0.8362891013487423, 0.9244325833921927, 0.6974504586903001, 0.913326463672816, 0.8180860718059628, 0.5783991246964437, 0.803298097308369, 0.9820821805278677, 0.6045816123035446, 0.6470106217428961, 0.6572029334204916, 0.7541284618269695, 0.6984487473238793, 0.5457376101092011, 0.5166467144993733, 0.7797735392133995, 0.8120792924817061, 0.9475948536092011, 0.8878328958805155, 0.6023536773968243, 0.564677978190723, 0.7930340916359505, 0.8614154828454601, 0.7905750927217792, 0.9031661095648271, 0.9799240650813658, 0.7378961717899822, 0.5632703214365105, 0.5961253054981531, 0.5332931424090059, 0.9066879701542925, 0.8325939745771346, 0.9174743310965481, 0.5737037901788016, 0.8944056819428408, 0.5997231601595876, 0.87262354856193, 0.9558282388981603, 0.7296069513691675, 0.9228231439329118, 0.710375575557028, 0.9520266163240549, 0.6354200018833955, 0.7724045812962841, 0.906371565852309, 0.5735904941505041, 0.9423991648397103, 0.7005080280082974, 0.8629157822059614, 0.8012778704110843, 0.6584795690800309, 0.6601556565534542, 0.5010961965617253, 0.5883016542267638, 0.7819013604812033, 0.6376891861606908, 0.7255111816913702, 0.7899614642156993, 0.9636050609352336, 0.6600912121809464, 0.5567289667286796, 0.908912501361012, 0.791775562805588, 0.6100840014966082, 0.930600430781582, 0.5647581273480611, 0.7872567142411537, 0.5742372125161561, 0.8458998065481185, 0.6508078806371768, 0.8112101497813092, 0.824303779393728, 0.7045806411491954, 0.5319971410273543, 0.7328600371354517, 0.5859136063497179, 0.7092440217966849, 0.9036199464050274, 0.8490097915983121, 0.878404810386694, 0.9217059328840217, 0.889125963308439, 0.7956885794592163, 0.5189175476987468, 0.7926699597003404, 0.5441513661499233, 0.9752177571863616, 0.6394386857251889, 0.7484092130059635, 0.7003575472251199, 0.6971732088594004, 0.8918594863243581, 0.9475109540408819, 0.7976991284354068, 0.6584539694626395, 0.7596543498673187, 0.834093496157776, 0.6315071044323084, 0.7257800755150537, 0.7613013867458259, 0.685106766294596, 0.7034601629776389, 0.8114772915168782, 0.6335824056016182, 0.9995896915456262, 0.8186110597310162, 0.9981494616156834, 0.502297056582431, 0.5793325977632442, 0.6535287877967197, 0.6393337933207417, 0.9032521469768862, 0.529637778291616, 0.6779847303884604, 0.5771971939163543, 0.7332407682877353, 0.8850591817180783, 0.5900082577653902, 0.9743134781127331, 0.7757431220551423, 0.6487183009597273, 0.722436902482217, 0.6986118470803124, 0.5311849638235153, 0.9367749481492245, 0.519708961029563, 0.561504790821346, 0.9338801298307611, 0.7881247322846188, 0.6540668053651815, 0.9126924414060251, 0.6703433670805382, 0.9215140795737846, 0.8559454441663517, 0.7710920417714936, 0.9242707649712256, 0.9034261310957409, 0.6991309986339969, 0.7687687466085857, 0.9599632285471492, 0.5212375395216666, 0.6987264227151251, 0.9268599674931516, 0.9372821391286983, 0.6996666741765527, 0.7845179166546925, 0.5145775230721541, 0.831603891370587, 0.802620942808469, 0.6559998501817275, 0.6292357160542208, 0.7388438834931446, 0.6273055957208579, 0.7303933241537535, 0.957978165957504, 0.5885328298630496, 0.9599956915361791, 0.5950492666388517, 0.6384638799077916, 0.750096843935993, 0.5569618059315617, 0.8192345290587713, 0.6900727034019176, 0.8932590782676365, 0.8377553330629615, 0.983828096143334, 0.8305988333491152, 0.9289090351097508, 0.9345416033239451, 0.7412764719544395, 0.6397853565945386, 0.9236378425002041, 0.554308393581685, 0.7515816585660685, 0.9222411363709248, 0.5691893709992575, 0.761709167386953, 0.8272151586622497, 0.7361215847213503, 0.8851051504693108, 0.7611555657549418, 0.7666314394621985, 0.6603529208718204, 0.9334051341880529, 0.6356429329686704, 0.5206495773202935, 0.973617523950227, 0.7816733282766819, 0.5187899600500379, 0.9085905872269147, 0.8380108782623885, 0.5393395888557552, 0.8561450550074381, 0.7042492496339697, 0.62659538592638, 0.5017509987400555, 0.6017945248435624, 0.7550925905811001, 0.6569147105647009, 0.6647589566114078, 0.8949785747822034, 0.7981038869452706, 0.8078481319689448, 0.5190733591705379, 0.5966040342875998, 0.7473842254497933, 0.9653565132225694, 0.7466904917877952, 0.8136192427659394, 0.6896916358526248, 0.6689589756692935, 0.8465143575720012, 0.5798120812537224, 0.5136370882109454, 0.5337675968618212, 0.5537471006581711, 0.9035534546411088, 0.759948615076788, 0.5155841890989501, 0.5321566151613681, 0.7434781342532575, 0.8106265138509909, 0.7445369564766827, 0.9488801046148768, 0.9123847153909774, 0.693394268276001, 0.9445157762638249, 0.7802943056104577, 0.5679785449639886, 0.5342035388360291, 0.6869595235856183, 0.9652342025596492, 0.516837290751162, 0.8841727730843741, 0.5330631805783026, 0.5541539503259652, 0.9699248057491848, 0.7697540083233966, 0.6947690359219851, 0.5103287586362033, 0.6445427659518552, 0.5366822867775145, 0.7239649645796169, 0.6450165314316411, 0.5605263671996396, 0.9606769693016073, 0.6983313524737516, 0.6784533537429207, 0.5576189858200257, 0.9044235920788786, 0.8618002249824743, 0.9864145291851316, 0.6828566188719255, 0.5499063168799968, 0.5607724038299435, 0.7570549217545142, 0.7445144584202521, 0.7068611020766534, 0.675603018742714, 0.884373725598283, 0.8779055433416527, 0.6508828482119289, 0.7228396262885124, 0.6582973183516396, 0.7154085975905422, 0.7778083350199249, 0.6179544863076294, 0.8112350415198861, 0.8428937501292155, 0.9250532213426288, 0.7932253308264771, 0.6221682119801941, 0.6353684920070097, 0.8030036780916574, 0.6352256226537237, 0.7827020062600356, 0.5376374336121499, 0.9299288998034045, 0.9667740120365718, 0.7572522264681572, 0.9916203057788281, 0.6941180002472918, 0.7937905671668881, 0.8684126567608168, 0.8275674333361687, 0.6272264103722187, 0.8771120110307709, 0.6494022726183125, 0.5867961873528513, 0.9071497512391153, 0.6140553287625421, 0.9951983730136194, 0.7391192163782392, 0.7769483123583993, 0.6413208415629827, 0.9140648854656985, 0.7303544384817418, 0.5887657820054462, 0.5283916472655535, 0.7701495712824791, 0.8307535542460243, 0.9887288289788697, 0.9775057900180614, 0.6173460311493959, 0.5075896069829997, 0.9876347727254942, 0.6171263127252626, 0.9125707858067098, 0.8398490565884678, 0.78837019862587, 0.683015894186576, 0.5802191675902054, 0.6528529103265974, 0.7560529097734369, 0.5024269848515954, 0.5392414727364346, 0.7124317298647678, 0.9188492044968617, 0.8123808624228366, 0.6012304602635786, 0.7484575352876188, 0.8536725487429473, 0.7173760143736572, 0.8110569675364422, 0.8479892425484614, 0.8846534477849222, 0.7000693908711461, 0.7336568254757716, 0.6635290277301378, 0.6223554639335576, 0.5362640137847742, 0.8882870054519589, 0.7163268670750973, 0.7104214509515776, 0.9963848255559498, 0.9868025113062244, 0.8326162466275249, 0.9677138853048244, 0.6491409044525878, 0.819186268723904, 0.8632433886295151, 0.8423557342894004, 0.6781766727049546, 0.9116453766964716, 0.9209050478601077, 0.8612186911214432, 0.9448855197415272, 0.6599628275966338, 0.8267004279287236, 0.8841363201278161, 0.8476611064417752, 0.5327673918647406, 0.942741814641038, 0.722607327974946, 0.634911849368301, 0.706046448719406, 0.5261906949754455, 0.6048554123326247, 0.5121306519641968, 0.5394756069668671, 0.6270451940492923, 0.9252588527363774, 0.7610523425477483, 0.5305463745103293, 0.6336972055293804, 0.5373885936022622, 0.6780688785222311, 0.6726564264409338, 0.6460064002425365, 0.8548520570470997, 0.518032418759254, 0.6887105589779026, 0.6759836945271931, 0.550945952798556, 0.6360331461657965, 0.7491652460836116, 0.5201922210859875, 0.7877450925847825, 0.8353421551947229, 0.5517670792418792, 0.625882952550467, 0.6994487442148252, 0.9894585315939254, 0.7536683927179098, 0.796424668244397, 0.8263079899579068, 0.8356376797431189, 0.5745436144360014, 0.688705684127715, 0.8936058307177115, 0.8840732354526252, 0.6805323136142203, 0.6180666136608088, 0.5285903541692274, 0.554652888770693, 0.530236468786847, 0.6957752695641517, 0.8540708242744803, 0.5846091678160662, 0.6561598298574719, 0.5509624571779996, 0.5101030062611733, 0.517266192921281, 0.7830015686813927, 0.9489102315771012, 0.9007403744352733, 0.6448692720067073, 0.9523058625080271, 0.8409467334734527, 0.6952282318901238, 0.8787155943743392, 0.8675547154824033, 0.8560032607886652, 0.6723086352876358, 0.7191614181766182, 0.869902466094742, 0.987606925793334, 0.7623075402771582, 0.9981542667918712, 0.5719310168341947, 0.5483193220396165, 0.5715725831457623, 0.7012803193401596, 0.7173265444122627, 0.5370810735067721, 0.5874527934868933, 0.959794795411199, 0.6795708076377649, 0.7580956101371952, 0.7790475497171765, 0.7764623137119675, 0.7657394994027746, 0.752175216040681, 0.7135390498338658, 0.5423861898227704, 0.7954937647919256, 0.6464374930663215, 0.9744319519790623, 0.538116347870107, 0.5576642297474788, 0.5192931615202776, 0.5419315730167737, 0.588997282108403, 0.9350742041607036, 0.5504107684744056, 0.5647171547235883, 0.6086499240961714, 0.7086206166539698, 0.5217482433352668, 0.9171601133125864, 0.6903298032502844, 0.9353564128950083, 0.8665402535942437, 0.7466134546798942, 0.7893529589777042, 0.581875540051596, 0.5139738336440804, 0.5507141264064859, 0.9540510302389148, 0.9401678300841473, 0.5718434421498102, 0.8053531767499522, 0.7008584982899335, 0.5372663095362579, 0.5986888838876328, 0.7624424537480867, 0.9140397252906535, 0.9004147878086695, 0.7576435194641811, 0.8749574445715758, 0.6444599562987262, 0.8554354263427, 0.8271631641262133, 0.7425073574672616, 0.8157177739357947, 0.6423471665447793, 0.8048614926877139, 0.8059872376327417, 0.7924127954314113, 0.9397545270160947, 0.7818469978786318, 0.6872208119942189, 0.546494246043427, 0.7444950582743656, 0.518386453739781, 0.648410049054347, 0.8188233736348773, 0.9456346964246918, 0.7991125554063075, 0.8663133719692693, 0.6551189432182372, 0.8180530335022058, 0.9465332147271952, 0.6819205267408491, 0.8751542082454447, 0.8400783596983494, 0.5259252468208211, 0.8843310313417023, 0.7793344597220375, 0.9593082023770796, 0.8000894951997879, 0.5679849642765422, 0.9971450026806838, 0.5954063110370049, 0.5452591747469295, 0.8062644656437361, 0.512187644380953, 0.7724621615696772, 0.6886087532108373, 0.7705024363851649, 0.7728633958862638, 0.5325382424035647, 0.9065084602665593, 0.6166867772087186, 0.5868539975500966, 0.7583324941245229, 0.8300074532239149, 0.825192145484216, 0.6529312303374695, 0.9240236847362238, 0.8966743571013744, 0.8306068318576115, 0.58861626137656, 0.5657927567740306, 0.9734161638845027, 0.9713763991706443, 0.7721911079373711, 0.5894708691201935, 0.9876412972594538, 0.6649913109963417, 0.5621455435752004, 0.6097976309844703, 0.722579133778726, 0.5884576141566555, 0.5768443458379353, 0.7487897678342967, 0.5226803025138909, 0.8704854818081101, 0.6098823475893297, 0.9436308585999544, 0.8780329352881144, 0.9126854540472125, 0.5310064661121673, 0.8868396984447398, 0.8043075121254326, 0.6355325461227537, 0.5929871914689948, 0.6940485871801825, 0.5245288896514653, 0.9726357944403783, 0.7717946876557142, 0.5223753700631169, 0.7757529149907503, 0.9492369984432912, 0.7982144912308776, 0.7278144263944242, 0.6118778717047424, 0.6710178319446812, 0.6299272517798922, 0.5243891735113534, 0.8979710815821145, 0.715981453007359, 0.8216073089733498, 0.9237053859476205, 0.9561646337058041, 0.8179422109744766, 0.6127553260584536, 0.5031320941546762, 0.8508548748514682, 0.6904671264388458, 0.5070620944156995, 0.517222960654731, 0.7308215319757037, 0.510903548716789, 0.9885921926105263, 0.9689818096346756, 0.8307165543915493, 0.62434905452682, 0.9861729284714387, 0.6997147874060936, 0.601541625193811, 0.6522377500073782, 0.9055414175906151, 0.7597540237173013, 0.8392040765436448, 0.7933729649484653, 0.534873157581687, 0.6454714415720297, 0.5784498714387567, 0.6176451728014773, 0.6746258539274073, 0.5551864633326207, 0.8600152074025468, 0.5774005420071131, 0.7340964785725448, 0.8290398892139985, 0.7157688569698777, 0.9407324607866355, 0.8826857682548104, 0.7785177310705763, 0.9958315524927559, 0.9642953244477621, 0.6732250020681008, 0.9099448930691374, 0.7935662867346335, 0.8851116906803674, 0.9536514811729383, 0.7474301440658393, 0.8761705528928306, 0.9758014162182136, 0.7469421991992371, 0.6044891281399603, 0.6478989768758665, 0.5135792525923362, 0.8621639614415636, 0.9199047609699088, 0.5154644517157755, 0.7690225029870702, 0.5724999403023663, 0.902078634490041, 0.5191888101751181, 0.6261542457556428, 0.970679562110067, 0.8060703628971796, 0.9163211032163197, 0.9417978316034252, 0.8072657440303623, 0.9162593466348575, 0.8726495597522537, 0.6019129321783365, 0.9765366344231267, 0.6023280584533477, 0.8593133127925613, 0.9600738850035861, 0.6484953003836834, 0.7979222910305838, 0.9351079578469352, 0.7748805318090811, 0.6983528634608742, 0.6592965152450494, 0.8145861510161136, 0.577037522140796, 0.9294164865346759, 0.6871865082220328, 0.6306701588957033, 0.7485306714658273, 0.9609588604431102, 0.9636085268740594, 0.5011854055529477, 0.638176624178109, 0.8053712920675489, 0.7358274348229998, 0.5675745290928029, 0.6889175178565792, 0.5121884201528151, 0.8557647782624446, 0.7449251495823985, 0.616944721757762, 0.7393801668192937, 0.9442132920305208, 0.88134613230323, 0.6161605263330489, 0.6481389838150236, 0.9844528022457633, 0.5677721606976881, 0.8872466302192161, 0.5596338057800146, 0.8281117139743494, 0.762209322397509, 0.5618711618964776, 0.7874505567338994, 0.8857828314255343, 0.8155096352462072, 0.7500879531433734, 0.8949110795962513, 0.9794375154793528, 0.634376829794359, 0.9086139442710777, 0.8074212439699625, 0.9424600841958293, 0.5963550142087078, 0.8769465612554581, 0.7483730626524907, 0.559973833571417, 0.8741698403933583, 0.7891546177753148, 0.652562385268817, 0.6582265603274937, 0.9486170282813609, 0.5310736577545747, 0.6436942137541406, 0.9718518644164476, 0.7524196560618659, 0.8541347956565268, 0.8397747206480717, 0.7819302042310893, 0.9981736731969868, 0.5350626203569935, 0.7637504362219365, 0.7725597895203562, 0.6403522216514618, 0.7427040003698917, 0.9661319108260311, 0.6008954345131949, 0.9592641381010371, 0.6252251535360791, 0.8039435603064737, 0.9579984623536575, 0.8954303554114333, 0.8185660028343427, 0.9146891379455744, 0.7285421733423345, 0.9984000688051282, 0.8958921600094314, 0.6055386498385424, 0.6075164158436276, 0.6964896267407578, 0.7653071854145757, 0.5889163073839212, 0.8921720980614745, 0.6257046318174697, 0.9421881944484283, 0.8846001178058516, 0.8773994010586506, 0.728020077551095, 0.6416236294094402, 0.9576022117458347, 0.6389742151635547, 0.6094385008594292, 0.5134900160788387, 0.9575893783415486, 0.8402047433417306, 0.6568647348292327, 0.5840864035387434, 0.5547299464955966, 0.8618908835553073, 0.9195051412249546, 0.7338218439126829, 0.8987380516819468, 0.979785060418783, 0.824873712581002, 0.7241033362763034, 0.9148042081725467, 0.9281697255926152, 0.5110968093728785, 0.8907632947396393, 0.7505714932831478, 0.969804811750447, 0.9820844570358498, 0.5157505949181551, 0.646121112319032, 0.5255221921434381, 0.891224362512566, 0.5723060007596348, 0.523307607969367, 0.8874068399064243, 0.7254000005038836, 0.8484803110220693, 0.6212856078042616, 0.7063510295699968, 0.6306520590257277, 0.999291193595526, 0.7794717406822863, 0.723897578674028, 0.8069923824067018, 0.6555782993164965, 0.7052191665415767, 0.7223498839363773, 0.7339894292226148, 0.749135565893455, 0.5663436888367801, 0.8994859161634003, 0.7084887802593675, 0.6294217991739482, 0.8135878157895287, 0.6528759449811772, 0.6434741371628244, 0.9864972952140686, 0.6707996691579247, 0.6353432603471088, 0.6863009930228949, 0.6551483616918086, 0.8899960731808676, 0.6242853027213233, 0.994880074319453, 0.7734483119218949, 0.5003405569236599, 0.5864212344577578, 0.9718619397683518, 0.7638129665038689, 0.6480611042548127, 0.6339671751834932, 0.9334234186443982, 0.9630664370324571, 0.8231484156975931, 0.917257632521306, 0.9266932710423375, 0.9702125013501407, 0.7288839439943366, 0.5790714977950748, 0.9257216001312374, 0.5931882661900935, 0.589132071064218, 0.7970387268994846, 0.792432465635999, 0.7021158139285857, 0.8398261775920941, 0.9178677509904491, 0.8567757275808666, 0.8326757533115012, 0.6039204138384744, 0.5383255129550508, 0.8136465833597559, 0.5965405029605422, 0.6382100400201687, 0.5175602606137304, 0.7043810283568386, 0.5363179581662572, 0.7669058497257845, 0.8306303255114605, 0.531820616770075, 0.716167131507899, 0.9557054527015394, 0.8159446738714675, 0.6269857521773877, 0.7932221480651338, 0.550758178361823, 0.730160112862537, 0.6545656313479393, 0.6191519543773264, 0.5820513120615316, 0.9263740687485538, 0.9060671704412716, 0.8866137672177157, 0.6760093585784646, 0.6166500895200604, 0.9256585744991555, 0.9678758475643292, 0.5212648619439639, 0.5415143192832994, 0.9361310466169672, 0.9438071993377584, 0.6967494790436619, 0.6177041031411237, 0.9731701129667334, 0.8983476174339682, 0.7565527028469816, 0.8605917226445873, 0.9005873749658703, 0.5767378155367053, 0.7644224214090648, 0.7454254426860116, 0.899706025048755, 0.6004628138522554, 0.6982556449400006, 0.5569587942315634, 0.6702475909779898, 0.7533950735754009, 0.6817708830683449, 0.9032004838770193, 0.9038093825119919, 0.7468104367742727, 0.9730642784393817, 0.9149331693482966, 0.8935239041484648, 0.9472143574074054, 0.7748880378665225, 0.6373753882153954, 0.9899187911085254, 0.8439929632800807, 0.6754300214971112, 0.9488433628546744, 0.758664375966714, 0.8114656241687404, 0.7361633874643486, 0.6615169364347009, 0.7897124078040141, 0.9465627173603538, 0.585395238528531, 0.8820932347940567, 0.6873983082642632, 0.5601208319882636, 0.8865556794919918, 0.9075949423496578, 0.945552786452684, 0.8898443302645584, 0.6338222294404284, 0.6933626796281214, 0.5643205301927142, 0.7839033833688844, 0.93052018953846, 0.9350155484001486, 0.8850956829555422, 0.549140180590469, 0.7566140788169946, 0.5752440317660192, 0.7824643206362906, 0.994138178655896, 0.6830340943558177, 0.5255414496843999, 0.853599031171212, 0.677254677121377, 0.9969034648122267, 0.8463349572431947, 0.8703081560077534, 0.8731320587817958, 0.6686109838507304, 0.731476113035961, 0.6108833662340049, 0.9752296960764153, 0.5189777105755327, 0.8359745133630496, 0.6485795945816286, 0.9922455391235666, 0.5615683820916314, 0.6161948707766061, 0.7723202483108247, 0.5666641481909863, 0.5953836902031768, 0.8589434260567097, 0.6783458514854459, 0.6339002753803176, 0.5658329198168699, 0.6007394197187375, 0.8334571976041041, 0.7769523852995388, 0.8360679891766758, 0.970584619551702, 0.5427370268842955, 0.834914978336332, 0.8932849218760224, 0.7054517086717116, 0.950368123700973, 0.6651558670798301, 0.6588800726442492, 0.5403318205147403, 0.5307823827490856, 0.6506866396186739, 0.538170985764667, 0.9872351017709378, 0.6181379044547393, 0.7248893248550057, 0.823953765331932, 0.8755056865770019, 0.6103501304489556, 0.9372192420960461, 0.7258040671135173, 0.9903888815861992, 0.7507894527651784, 0.9920563075898063, 0.698053285884948, 0.5562027523569001, 0.5837363554201375, 0.769591048753881, 0.5735092521590779, 0.6697813436742901, 0.8601310985767885, 0.8712938533038019, 0.7091419526169358, 0.5730302609545167, 0.6989937194832307, 0.585275667745835, 0.9633542672692492, 0.7597287151648342, 0.6009474894969723, 0.6049700753175997, 0.5578481977924106, 0.5183247420137536, 0.9016050642241582, 0.9782253389842697, 0.9293306907337724, 0.8588791680730208, 0.7689541654214487, 0.7432667225401792, 0.7147351780063295, 0.9107399741378257, 0.8831875994947476, 0.8293082291824923, 0.9772197985075703, 0.5641543169483442, 0.9398843505970345, 0.8452086650229345, 0.8286848659831503, 0.7246141628294916, 0.7038272750998714, 0.6866513656287525, 0.6712757202088746, 0.8043978394167357, 0.7048689208663501, 0.9375770460995199, 0.7758147236048574, 0.8190061858467073, 0.8487249145494267, 0.9453716181870384, 0.517459066699724, 0.9129495962530119, 0.5412540839653273, 0.6395706795527489, 0.7142923468067505, 0.9731663872685078, 0.8536143860268721, 0.7618534059118254, 0.9958952265171641, 0.9085421606060577, 0.9055496798097873, 0.5549015383811016, 0.9897323306136083, 0.8108941861216805, 0.9500408844632662, 0.5993028126285679, 0.9114100446023349, 0.768199163499814, 0.5114433675588209, 0.8719680686402629, 0.9509353004234862, 0.7413872173560676, 0.5040880337125957, 0.7247521972724581, 0.8376648514879727, 0.8636635521412357, 0.9333603275806632, 0.5197406566212965, 0.9918308410835529, 0.6833347214027592, 0.9614466200916423, 0.7542778011626943, 0.6002190790773654, 0.631492606880381, 0.8109914326424259, 0.5161812830143655, 0.6252151264092796, 0.5563295105531301, 0.6722476291825616, 0.9814941780907311, 0.5415566663569251, 0.8141559753477177, 0.7225184916194556, 0.7342069780499053, 0.809701599173068, 0.653955532012059, 0.5378903518651146, 0.5906632508094507, 0.7919183337599558, 0.9007377798522733, 0.7901304849493768, 0.7389298436463119, 0.9574143041517182, 0.5039903338943303, 0.8954577104347112, 0.5745928799246139, 0.9087785886363235, 0.7934889862562722, 0.6710060447533774, 0.550212005836778, 0.835008281162029, 0.8510286052442837, 0.7804049977096068, 0.6036689122652494, 0.7727626014601158, 0.8315500693412192, 0.8983751203419754, 0.5868933569767257, 0.8875288440556156, 0.7356579295941361, 0.5300579664684208, 0.787141961850735, 0.5731032258421873, 0.7771625629714027, 0.7706856675586471, 0.8432312145445374, 0.8350891540543319, 0.8430168417911172, 0.9707365543526527, 0.6769370194019667, 0.6972949161890372, 0.9919861764494173, 0.6605388422680165, 0.7937865386973404, 0.5347256437044872, 0.9978082836318315, 0.9212164951890227, 0.5939148266323726, 0.7539552020948936, 0.6254125044579439, 0.738138979438131, 0.6889566544749289, 0.9562179302013395, 0.7780706515777125, 0.7945490752682269, 0.5866935736709497, 0.5117052085665055, 0.5960383271076996, 0.8373891797367077, 0.9616561649098803, 0.704562289394896, 0.5921527883761083, 0.5565223995964991, 0.8680148031076484, 0.9891890031508208, 0.9991977806031309, 0.5020739651876478, 0.9033441281936851, 0.9085178608407365, 0.9460405196994227, 0.8968718038461636, 0.6480858695390368, 0.9356590496329726, 0.6561949230565862, 0.8383891003945961, 0.7621981204086448, 0.8392817709409162, 0.875384403157071, 0.8208388980329449, 0.7950198911602151, 0.821174436665876, 0.9422948167138974, 0.871667302541906, 0.8957949072385882, 0.5643544288798994, 0.5265437361518454, 0.7838464086710242, 0.7002119090602839, 0.9128247112277962, 0.6656861323509509, 0.5574090315707385, 0.5117149606644859, 0.7049319164807151, 0.655712709926491, 0.552250002418659, 0.8403682646704249, 0.7603953613149395, 0.6656980810965404, 0.5571750944513156, 0.7478608907435305, 0.7859537277016483, 0.8868613983239813, 0.6587943618875944, 0.7282173572695367, 0.8536416356550576, 0.5508967131394626, 0.9676547541240563, 0.7729936463718674, 0.8050784661109416, 0.7107792610773778, 0.6782692683361127, 0.686688778855292, 0.523475193874605, 0.9120515697347464, 0.7635565951594016, 0.5414979214434901, 0.9662511292749141, 0.9563552695332933, 0.9713461503863521, 0.8693382048714071, 0.5455291876155046, 0.9437475837323048, 0.8404310369367367, 0.6711412371898577, 0.5806684521639653, 0.76185107230379, 0.6863842692031963, 0.7799265581697847, 0.8132095569475222, 0.8645071373159684, 0.7257135762681602, 0.6594308030988855, 0.7415823050238428, 0.9018363219340275, 0.8989081152312284, 0.5100819025457073, 0.8204543526569082, 0.6612421794471938, 0.6278729041295942, 0.6008067677625952, 0.7453462793914194, 0.5177574625042592, 0.634116943236831, 0.9385787960274532, 0.7377472102867573, 0.786072709668902, 0.6823833836872213, 0.7236629573919562, 0.518798613967759, 0.734058665508672, 0.800608834813055, 0.8215045921202445, 0.8061461396196147, 0.9918149627758686, 0.7682512755896524, 0.816603621163309, 0.6906261024725174, 0.7641341010072509, 0.8288477401313115, 0.8731977584594863, 0.8696896046339879, 0.6932755295599362, 0.7962713996411181, 0.5637197663897254, 0.8378282116054132, 0.6385708201813594, 0.6513482345493173, 0.9982930702377553, 0.6349865583573022, 0.87973311063735, 0.9967636970561813, 0.9011873825395244, 0.5486470153024088, 0.7225052775139813, 0.8328362625183899, 0.709546650905453, 0.8727825736817809, 0.6248504185411998, 0.6966875497532397, 0.7679289191465868, 0.6876044001029729, 0.6907512542725858, 0.9716130831838297, 0.5465233966987704, 0.5079160084451937, 0.7798734394056803, 0.5245168375275437, 0.7288014080811629, 0.7109099641915859, 0.5974897204490134, 0.861213328321542, 0.5926732059280315, 0.6861911946077153, 0.8216087912701582, 0.9956681309861797, 0.7308050842518341, 0.5726382720974044, 0.7572630885155367, 0.6761216492210128, 0.7395922583340873, 0.5873800772193867, 0.9042536714899516, 0.6243434020895847, 0.5593910760203269, 0.660504988568668, 0.5777780134231019, 0.6360489698801602, 0.5882089162234283, 0.7997124786634061, 0.7545236858736835, 0.8948540450708029, 0.7920617127847283, 0.5324935798154609, 0.6177964558602171, 0.5191109927551804, 0.7801813110423599, 0.5174654865458037, 0.5252849350697887, 0.8002040479606283, 0.7255384668593685, 0.7207847829158686, 0.7528565012837907, 0.6720739983937307, 0.8810990894397748, 0.8094635533772907, 0.5834873392821667, 0.6435269369817114, 0.9976347099468805, 0.8205345989147516, 0.8876194577687934, 0.9820978956573037, 0.8213378259076773, 0.5501929795512703, 0.6153757370909215, 0.5819127577177274, 0.8274687020761464, 0.5628522311250969, 0.6244698912825695, 0.5102769715237401, 0.9244868046053919, 0.6428282398739493, 0.6679077463851406, 0.569902041614619, 0.5483353386191343, 0.5471812825182429, 0.6228096036106108, 0.9995139525017558, 0.7664963668787226, 0.7125306354120882, 0.7695007874725337, 0.8681501372794491, 0.5847211285373533, 0.5032180240889214, 0.514369376208951, 0.8749158951636755, 0.5897675029919548, 0.538661933576478, 0.9553961073609187, 0.8533740150939677, 0.944158877980643, 0.7471764564721031, 0.865738330054729, 0.8436635058195981, 0.5403139624162261, 0.864552185841324, 0.9204111526295999, 0.9682253921493723, 0.9584307749308025, 0.6079934139216234, 0.5885791445680008, 0.5638256635934655, 0.9551568188892479, 0.6768067246682317, 0.5363390318904455, 0.7936301673754824, 0.5597112345369075, 0.6467751745435537, 0.6845232534759939, 0.9995549414814826, 0.6597165314784545, 0.9883719978448388, 0.9063676017960303, 0.8709980223948988, 0.6013645401833374, 0.5755227088786643, 0.7177985790408538, 0.7362076121694701, 0.9783651176665795, 0.5259617099586735, 0.9276381091109698, 0.8796794837364181, 0.5109806046848253, 0.6710685512321988, 0.7734803917584137, 0.8786212076322106, 0.6038412500319168, 0.5989793368936327, 0.7632612025277121, 0.5472310011511524, 0.6551400523821337, 0.7149185689709059, 0.7499743847572446, 0.7577846992057407, 0.5806655036882333, 0.9714750234471559, 0.7454340985767132, 0.9830194649966267, 0.7790028838759937, 0.5016892642915557, 0.8166443536329637, 0.6476579752374458, 0.5833138031482902, 0.8942782016467313, 0.9695709128909993, 0.8412091481593496, 0.8172544078480183, 0.8015403888371355, 0.7508187699150942, 0.7373149030342488, 0.9069932468809174, 0.6595519185065096, 0.8146617107804667, 0.5929650259615724, 0.8056849426069934, 0.5263081581129534, 0.8045248762395376, 0.7979861708929247, 0.7984660650792001, 0.6021124958959202, 0.6521342500909952, 0.5771664400778067, 0.6762920272655364, 0.6353887512609796, 0.6260037926216111, 0.8740929136387398, 0.5018285085731558, 0.8240583787897326, 0.5342401161670398, 0.8725115480630575, 0.7390891371605438, 0.7896765514835025, 0.8895650651109446, 0.9349441625717856, 0.5966941148916289, 0.5219869322753499, 0.5836495809845825, 0.7993804871038517, 0.6836784519384127, 0.9963138607622459, 0.9496154832894257, 0.5597897776223891, 0.8814716323580363, 0.955535451686141, 0.7634198523657836, 0.8437172930526917, 0.7283684214564021, 0.5374041179908119, 0.7224996026916783, 0.9806685830720767, 0.9091699005339076, 0.5415597892153154, 0.7613535687508881, 0.7136139637204437, 0.8698290879454517, 0.8620129281628524, 0.8490485541927337, 0.6528201389460334, 0.7005521888085702, 0.9650403836049155, 0.9611203197210232, 0.6127502737440849, 0.9498261052791144, 0.6372848657502415, 0.9628626942358385, 0.607463036432202, 0.9562602587903004, 0.8009887060337448, 0.9121399812024468, 0.6763252134727143, 0.9657060114706498, 0.7862657931266726, 0.9177865243861368, 0.823771471986032, 0.5637864332752306, 0.5941740211248552, 0.5379137309674176, 0.9161503002155971, 0.6956195926804702, 0.8941726520798509, 0.696230618792935, 0.9178126386374205, 0.8387215204779213, 0.750203393865648, 0.7804911858817962, 0.9057478744742908, 0.7797753765884784, 0.9526615549337787, 0.7051619925951889, 0.7438589400808755, 0.569172315622775, 0.9434286301021557, 0.5062007423373796, 0.9872105289228383, 0.5100965874105416, 0.5608942291930203, 0.966468295008235, 0.9206103132974156, 0.9434108091168484, 0.9777306818419872, 0.9399314709071163, 0.9040075104077301, 0.5998064140790018, 0.903500890703905, 0.5343086824375558, 0.627277360898244, 0.8090297504096741, 0.7303719174039627, 0.6677611291714706, 0.7202136199365592, 0.8178668023743709, 0.8664232542083196, 0.8131800587410267, 0.942644033696461, 0.6489749771973732, 0.6607030300129861, 0.5556753305799906, 0.5467172898642518, 0.761513941934358, 0.7617192694467498, 0.9715931474210282, 0.6943451023893396, 0.5138427210210446, 0.8777983259677875, 0.5050938950931333, 0.5186706687603886, 0.5316041949725829, 0.8237926384291601, 0.5476973813865056, 0.7163753012563157, 0.6845108225507595, 0.7766070341230242, 0.7883077794455602, 0.7407924088125332, 0.7570392371795018, 0.8503570415734574, 0.7190612298502584, 0.9022092358672618, 0.7554558226310812, 0.7180431991654309, 0.9705770833799818, 0.8642031607978518, 0.7984411848582298, 0.9834381313547491, 0.7704947946775054, 0.5914614129287259, 0.7361347507628071, 0.8526747817759072, 0.6921828740640213, 0.8045622400653267, 0.8289924779531153, 0.8636265882318837, 0.8661320709762448, 0.9729472442171625, 0.6305384308615505, 0.8719931199324702, 0.6466183414429763, 0.5350884644197395, 0.8027194311936477, 0.6317380478915222, 0.9400569039968141, 0.7586522813622297, 0.8408747448726566, 0.7203515425172643, 0.7630560673230458, 0.5783459995963741, 0.777814524498915, 0.5424307089620881, 0.5310720024546842, 0.6378356942937053, 0.6565937765118874, 0.9851674411564668, 0.9136669006929409, 0.9645253446626939, 0.6890649709374835, 0.9409422499803337, 0.7479636814985337, 0.9903503544659735, 0.5927755986838261, 0.899672906452762, 0.5914037576696334, 0.9821928808790297, 0.65108651481081, 0.7968445649218723, 0.6363740617299674, 0.6732072404786531, 0.9761352766570479, 0.6686909690667922, 0.8084246652231619, 0.6147058850396075, 0.6586473381339263, 0.6874095747960682, 0.9355545065747353, 0.6548617312204594, 0.9098129832965676, 0.7826180569760881, 0.5129253885366412, 0.8268949335772147, 0.5400740221070177, 0.7895500924975816, 0.9655398648305034, 0.6646032842738232, 0.7119240636269042, 0.5776792020981381, 0.9726123785746221, 0.7964609020224402, 0.9691024040373649, 0.9378813696173313, 0.7227410688442742, 0.7465770975561838, 0.8638463572235302, 0.790339572251427, 0.8523870892122026, 0.9695437488342918, 0.8544977589885372, 0.6410760360892473, 0.9483695007210202, 0.9766892604721211, 0.9041437789469893, 0.6576697820376012, 0.6179163105790378, 0.790849540630882, 0.5658020217077739, 0.5726909921881618, 0.6152615733854174, 0.5219502091525692, 0.8077924438188018, 0.9831554101917974, 0.8862911160765423, 0.7739090345221962, 0.5314536872746016, 0.9916106578325709, 0.5852265760431694, 0.654458832638862, 0.7999348086674061, 0.6628840413475516, 0.9262576133605082, 0.7601672399370694, 0.7416353410771388, 0.6207668618337987, 0.9403352239586148, 0.8615699140703286, 0.9348439356499569, 0.7144607032362407, 0.9570440778714445, 0.6487828050361331, 0.989192098747559, 0.9762521467485317, 0.8417081902157003, 0.8110179189321116, 0.789549609756276, 0.7203395611654507, 0.8517153558770418, 0.6625384184569401, 0.856652189026136, 0.5151793597042622, 0.8799305871522366, 0.6458980481720256, 0.6311556889707571, 0.8811202190514058, 0.9117658277856879, 0.7557627032867491, 0.539139036024693, 0.512435739980718, 0.7062902061025393, 0.6235539804832755, 0.6372926653166835, 0.5586010438024055, 0.8559367933304853, 0.7303433196856852, 0.5192699132669873, 0.7947815978104387, 0.510661067374014, 0.9041096021703686, 0.8770313570937261, 0.7268250006969299, 0.9796502731574195, 0.6548556403888672, 0.5084762513188588, 0.5585135781498733, 0.8200062809560259, 0.9202698907293998, 0.9748151929919912, 0.745140573706847, 0.8014436729376119, 0.9677078427508046, 0.7073261379770268, 0.739640991400933, 0.7598985826037528, 0.7371714397588682, 0.7722016185305758, 0.6059809957983857, 0.6449684588718088, 0.7914931930418876, 0.897961067170529, 0.7508590725358286, 0.9880698793610241, 0.6414529448393895, 0.8550088159941225, 0.5191452143378337, 0.5573287782894297, 0.8116567456750672, 0.8950016349561414, 0.9092775956896271, 0.6639136581754055, 0.6050731729086327, 0.5328686944379568, 0.9976161586823712, 0.8784816000231497, 0.6466020163331188, 0.5806568950558442, 0.5327811232411733, 0.627028757952206, 0.867765215220156, 0.9778874529187038, 0.8333856077959961, 0.5906710198300975, 0.5416540765791633, 0.8135515445633956, 0.8282572493818172, 0.5987480427191667, 0.9449597084374622, 0.7371426189228444, 0.8356001195700531, 0.8037766128496334, 0.6482718421317616, 0.5216574127490037, 0.8182884831759679, 0.8287797488807751, 0.6399486349708507, 0.870776889785081, 0.7538607767887806, 0.5922045928716269, 0.6776968545176028, 0.5834351905586155, 0.5249241523042817, 0.609453253136648, 0.83980719393308, 0.6471335437672803, 0.8083062830932849, 0.6845334216410978, 0.7692111652238531, 0.5795032340068536, 0.7299840584721793, 0.8491370832162819, 0.5700786881177323, 0.9107909083559388, 0.8746297855328784, 0.8001767161334985, 0.5475462908581064, 0.86651903541155, 0.6825944047059339, 0.5177210640691665, 0.9567626213299281, 0.5303166257935457, 0.9100248241288362, 0.6056721695294562, 0.7896083184420181, 0.5223615725018624, 0.7368112747783151, 0.7875708598971636, 0.7281276586196301, 0.7042967187786757, 0.5248267901579124, 0.5846304345702154, 0.5811199697098608, 0.9291989963478394, 0.6704313677888942, 0.8479008392537299, 0.7764989158487352, 0.8848031625649313, 0.7149377817591864, 0.8793944525855528, 0.68084854486983, 0.712050483474927, 0.8008444623083933, 0.7129583260791823, 0.7562592421181258, 0.5139257712532712, 0.8689183084070378, 0.6672684808700344, 0.6605734341938123, 0.5505104801563829, 0.7134576388457617, 0.823442822691848, 0.9913801880420365, 0.6165047347258314, 0.7996886757345278, 0.8481388880308587, 0.8522080136790522, 0.5897932836606995, 0.771156876152552, 0.5510356612540113, 0.6578324440964098, 0.6457138202492807, 0.9911367913707538, 0.5248811047957239, 0.6141484301425899, 0.9087399914079772, 0.790581953505647, 0.5888556253131594, 0.6195057719503702, 0.6595147340843535, 0.6524919172456565, 0.9400642849312304, 0.6125769444587961, 0.750352918874551, 0.8033214913996014, 0.8211196056581588, 0.9914752680275767, 0.7550504511934852, 0.5235483772123678, 0.9361564286699136, 0.5799278905029444, 0.9590849081330709, 0.9224334821202063, 0.7081943836957807, 0.9094613031482347, 0.904380703328397, 0.84359745671216, 0.9339611355589148, 0.8756442058219829, 0.9188485381353444, 0.9763378455794811, 0.9161671052713245, 0.9473115908848838, 0.5729879195610434, 0.9834166263118729, 0.5619527069236225, 0.7921176205804213, 0.8322783047982937, 0.6299154413689124, 0.802573466582972, 0.6898784515230232, 0.7466445705593787, 0.7843538009738735, 0.9696185378554787, 0.6982025888495218, 0.9196579888636527, 0.9636214400265193, 0.8627298631295017, 0.5580785644376118, 0.5489317866743106, 0.871509612990486, 0.6539803616303441, 0.8747565393180291, 0.5396286177074465, 0.782779613067146, 0.7833069703721437, 0.6726498402206711, 0.7327802731735245, 0.8517853148340808, 0.6612182153214113, 0.8187890561935812, 0.5704385012399777, 0.7123175762933001, 0.7729127808290588, 0.6830285973022691, 0.7476596999875179, 0.7441981493343945, 0.9007783674611813, 0.5581802164023413, 0.7531744912124689, 0.8495750579806974, 0.5257322072732205, 0.8379961164905294, 0.8855152651167507, 0.9631141864569008, 0.8782456045347471, 0.8744236851152649, 0.8833684824291834, 0.8067273989525323, 0.8569416113659729, 0.9636328797574103, 0.6993166409398601, 0.921029759218223, 0.7331705402531216, 0.8375902243012158, 0.8676724326080212, 0.952872812438859, 0.9607407009491611, 0.8690281798213235, 0.7464999323915353, 0.635340708838696, 0.527435575502132, 0.5020376260243427, 0.8352045476537078, 0.5236437871113133, 0.740187321066831, 0.6511889416298872, 0.7057602881180496, 0.7816998830405436, 0.7081787221046907, 0.9524588697073446, 0.5135214193156884, 0.819107555527264, 0.8394616410846986, 0.6983418659474152, 0.9300815213261634, 0.765379220770962, 0.8832927540831483, 0.6397968559896783, 0.6307885562633466, 0.6243960435977307, 0.8983567875378664, 0.7394741210700552, 0.6139383538357537, 0.9342263664895698, 0.6083713019891525, 0.9695106893503138, 0.9042516383406076, 0.6948760804982662, 0.6125178444551923, 0.8624571500738387, 0.7612194100896719, 0.9026997798741612, 0.5846281208250365, 0.5907880897973495, 0.5470987128376599, 0.6913947590334706, 0.5557666711635025, 0.749186339660893, 0.9469789142222573, 0.6704085657444505, 0.931252814491027, 0.6834717195126212, 0.9775332668457983, 0.5421832108704444, 0.8799751955542345, 0.6313757538058999, 0.900565057055833, 0.7646676040552842, 0.6895233665102731, 0.9675131263006869, 0.5960027219997419, 0.7101149428280267, 0.6579295333164019, 0.6397824595997621, 0.6927334168044533, 0.9388940229755562, 0.6066445277715575, 0.6841064619291677, 0.9592365284537095, 0.5090230084565892, 0.9069938068823447, 0.5355540106569048, 0.5018026272303946, 0.6414543070734959, 0.5373889134105951, 0.8927295866976588, 0.7161590302729053, 0.8708221800568803, 0.5972919627664417, 0.9691132990283955, 0.695485429463514, 0.7640371125167134, 0.9072009780844209, 0.5718691359262729, 0.7867451976664088, 0.721095040313823, 0.8369326333072787, 0.6380336990196198, 0.7399092576875842, 0.6736624729894246, 0.7924547270226392, 0.55868469076503, 0.9591190933591147, 0.6714667829676866, 0.9842869874592254, 0.5411637199805095, 0.6568275721223935, 0.7474251946105642, 0.7572459610394608, 0.9817734897169774, 0.810208117235583, 0.8408891288269975, 0.8249510961398214, 0.7396111830302783, 0.680272212771687, 0.7442733965418862, 0.5788584482993259, 0.7273229060598572, 0.8161104312641383, 0.7903584006399587, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 47, 49, 51, 53, 56, 58, 60, 62, 64, 66, 68, 70, 73, 75, 77, 79, 81, 83, 85, 87, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 486, 488, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 512, 514, 516, 518, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 682, 684, 686, 688, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 947, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1045, 1047, 1050, 1052, 1054, 1056, 1059, 1061, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1155, 1158, 1160, 1163, 1165, 1167, 1169, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1205, 1207, 1210, 1212, 1214, 1216, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1235, 1237, 1239, 1241, 1244, 1246, 1248, 1250, 1253, 1255, 1258, 1260, 1263, 1265, 1268, 1270, 1273, 1275, 1277, 1279, 1281, 1283, 1286, 1288, 1291, 1293, 1296, 1298, 1301, 1303, 1306, 1308, 1311, 1313, 1316, 1318, 1321, 1323, 1326, 1328, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1348, 1350, 1353, 1355, 1361, 1363, 1365, 1367, 1369, 1371, 1374, 1376, 1378, 1380, 1382, 1384, 1387, 1389, 1391, 1393, 1396, 1398, 1401, 1403, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1435, 1437, 1439, 1441, 1443, 1445, 1448, 1450, 1453, 1455, 1457, 1459, 1461, 1463, 1466, 1468, 1471, 1473, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1516, 1518, 1520, 1522, 1525, 1527, 1530, 1532, 1538, 1540, 1542, 1544, 1546, 1548, 1551, 1553, 1556, 1558, 1561, 1563, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1580, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1639, 1641, 1643, 1645, 1648, 1650, 1652, 1654, 1656, 1658, 1660, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1694, 1696, 1698, 1700, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1728, 1730, 1732, 1734, 1736, 1738, 1740, 1742, 1744, 1746, 1748, 1750, 1752, 1754, 1756, 1758, 1760, 1762, 1764, 1766, 1768, 1770, 1772, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1790, 1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1809, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1880, 1882, 1884, 1886, 1888, 1890, 1893, 1895, 1898, 1900, 1903, 1905, 1908, 1910, 1912, 1914, 1917, 1919, 1921, 1923, 1926, 1928, 1932, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, 1960, 1962, 1964, 1966, 1968, 1970, 1973, 1975, 1978, 1980, 1983, 1985, 1988, 1990, 1993, 1995, 1997, 1999, 2002, 2004, 2007, 2009, 2014, 2016, 2019, 2021, 2024, 2026, 2029, 2031, 2034, 2036, 2039, 2041, 2044, 2046, 2048, 2050, 2052, 2054, 2057, 2059, 2062, 2064, 2066, 2068, 2071, 2073, 2075, 2077, 2079, 2081, 2083, 2085, 2087, 2089, 2092, 2094, 2099, 2101, 2104, 2106, 2109, 2111, 2113, 2115, 2117, 2119, 2121, 2123, 2125, 2127, 2129, 2131, 2133, 2135, 2137, 2139, 2141, 2143, 2145, 2147, 2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167, 2170, 2172, 2174, 2176, 2179, 2181, 2184, 2186, 2192, 2194, 2196, 2198, 2200, 2202, 2205, 2207, 2210, 2212, 2215, 2217, 2220, 2222, 2224, 2226, 2228, 2230, 2233, 2235, 2238, 2240, 2243, 2245, 2248, 2250, 2253, 2255, 2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2279, 2281, 2284, 2286, 2292, 2294, 2296, 2298, 2301, 2303, 2306, 2308, 2317, 2319, 2322, 2324, 2327, 2329, 2332, 2334, 2337, 2339, 2343, 2345, 2347, 2349, 2352, 2354, 2356, 2358, 2360, 2362, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 2396, 2398, 2400, 2402, 2404, 2406, 2408, 2410, 2412, 2414, 2416, 2418, 2420, 2422, 2424, 2426, 2428, 2430, 2432, 2434, 2436, 2438, 2441, 2443, 2445, 2447, 2450, 2452, 2454, 2456, 2458, 2460, 2462, 2464, 2466, 2468, 2470, 2472, 2474, 2476, 2479, 2481, 2483, 2485, 2487, 2489, 2491, 2493, 2495, 2497, 2499, 2501, 2503, 2505, 2507, 2509, 2512, 2514, 2516, 2518, 2520, 2522, 2524, 2526, 2528, 2530, 2533, 2535, 2537, 2539, 2541, 2543, 2545, 2547, 2549, 2551, 2553, 2555, 2557, 2559, 2561, 2563, 2566, 2568, 2570, 2572, 2574, 2576, 2578, 2580, 2582, 2584, 2586, 2588, 2590, 2592, 2594, 2596, 2598, 2600, 2602, 2604, 2606, 2608, 2610, 2612, 2614, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2644, 2646, 2648, 2650, 2652, 2654, 2656, 2658, 2660, 2662, 2664, 2666, 2668, 2670, 2672, 2674, 2676, 2678, 2680, 2682, 2684, 2686, 2688, 2690, 2692, 2694, 2696, 2699, 2701, 2703, 2705, 2708, 2710, 2712, 2714, 2716, 2718, 2720, 2722, 2724, 2726, 2729, 2731, 2734, 2736, 2738, 2740, 2742, 2744, 2746, 2748, 2750, 2752, 2754, 2756, 2759, 2761, 2763, 2765, 2767, 2769, 2771, 2773, 2776, 2778, 2780, 2782, 2785, 2787, 2789, 2791, 2793, 2795, 2797, 2799, 2801, 2803, 2805, 2807, 2810, 2812, 2814, 2816, 2819, 2821, 2823, 2825, 2827, 2829, 2831, 2833, 2835, 2837, 2840, 2842, 2844, 2846, 2848, 2850, 2852, 2854, 2856, 2858, 2861, 2863, 2865, 2867, 2869, 2871, 2873, 2875, 2877, 2879, 2881, 2883, 2885, 2887, 2889, 2891, 2893, 2895, 2898, 2900, 2903, 2905, 2908, 2910, 2913, 2915, 2917, 2919, 2921, 2923, 2926, 2928, 2931, 2933, 2935, 2937, 2940, 2942, 2945, 2947, 2950, 2952, 2954, 2956, 2958, 2960, 2963, 2965, 2967, 2969, 2971, 2973, 2976, 2978, 2980, 2982, 2984, 2986, 2989, 2991, 2994, 2996, 2999, 3001, 3005, 3007, 3009, 3011, 3015, 3017, 3019, 3021, 3023, 3025, 3028, 3030, 3033, 3035, 3038, 3040, 3043, 3045, 3048, 3050, 3053, 3055, 3061, 3063, 3065, 3067, 3069, 3071, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3139, 3141, 3145, 3147, 3150, 3152, 3155, 3157, 3160, 3162, 3165, 3167, 3170, 3172, 3175, 3177, 3180, 3182, 3184, 3186, 3188, 3190, 3193, 3195, 3198, 3200, 3203, 3205, 3208, 3210, 3212, 3214, 3216, 3218, 3221, 3223, 3226, 3228, 3231, 3233, 3236, 3238, 3241, 3243, 3246, 3248, 3251, 3253, 3256, 3258, 3261, 3263, 3269, 3271, 3274, 3276, 3279, 3281, 3284, 3286, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3316, 3318, 3320, 3322, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3350, 3352, 3355, 3357, 3359, 3361, 3364, 3366, 3369, 3371, 3374, 3376, 3379, 3381, 3384, 3386, 3389, 3391, 3394, 3396, 3399, 3401, 3404, 3406, 3409, 3411, 3414, 3416, 3419, 3421, 3424, 3426, 3428, 3430, 3433, 3435, 3437, 3439, 3442, 3444, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3469, 3471, 3473, 3475, 3480, 3482, 3484, 3486, 3488, 3490, 3493, 3495, 3498, 3500, 3503, 3505, 3508, 3510, 3512, 3514, 3516, 3518, 3521, 3523, 3525, 3527, 3530, 3532, 3535, 3537, 3542, 3544, 3546, 3548, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3623, 3625, 3628, 3630, 3632, 3634, 3637, 3639, 3641, 3643, 3647, 3649, 3651, 3653, 3656, 3658, 3660, 3662, 3665, 3667, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3752, 3754, 3757, 3759, 3762, 3764, 3767, 3769, 3772, 3774, 3777, 3779, 3782, 3784, 3787, 3789, 3792, 3794, 3797, 3799, 3802, 3804, 3808, 3810, 3812, 3814, 3817, 3819, 3822, 3824, 3827, 3829, 3832, 3834, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3854, 3856, 3858, 3860, 3863, 3865, 3869, 3871, 3873, 3875, 3878, 3880, 3883, 3885, 3888, 3890, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 3907, 3909, 3911, 3913, 3915, 3917, 3919, 3921, 3923, 3925, 3927, 3929, 3931, 3933, 3935, 3937, 3939, 3941, 3943, 3945, 3947, 3949, 3951, 3953, 3955, 3958, 3960, 3963, 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3995, 3997, 3999, 4001, 4003, 4005, 4007, 4009, 4011, 4013, 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078, 4080, 4082, 4084, 4086, 4088, 4090, 4092, 4094, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 4110, 4112, 4114, 4116, 4118, 4120, 4122, 4124, 4126, 4128, 4130, 4132, 4134, 4136, 4138, 4140, 4142, 4144, 4146, 4148, 4150, 4152, 4154, 4156, 4158, 4160, 4162, 4164, 4166, 4168, 4170, 4172, 4174, 4176, 4178, 4181, 4183, 4185, 4187, 4189, 4191, 4193, 4195, 4197, 4199, 4201, 4203, 4205, 4207, 4209, 4211, 4213, 4215, 4217, 4219, 4221, 4223, 4225, 4227, 4229, 4231, 4233, 4235, 4237, 4239, 4241, 4243, 4246, 4248, 4250, 4252, 4254, 4256, 4258, 4260, 4262, 4264, 4266, 4268, 4270, 4272, 4274, 4276, 4278, 4280, 4282, 4284, 4286, 4288, 4290, 4292, 4294, 4296, 4298, 4300, 4302, 4304, 4306, 4308, 4310, 4312, 4314, 4316, 4318, 4320, 4322, 4324, 4326, 4328, 4330, 4332, 4335, 4337, 4339, 4341, 4344, 4346, 4348, 4350, 4353, 4355, 4357, 4359, 4361, 4363, 4366, 4368, 4371, 4373, 4376, 4378, 4380, 4382, 4384, 4386, 4389, 4391, 4393, 4395, 4397, 4399, 4402, 4404, 4407, 4409, 4412, 4414, 4417, 4419, 4421, 4423, 4433, 4435, 4438, 4440, 4446, 4448, 4450, 4452, 4454, 4456, 4459, 4461, 4464, 4466, 4469, 4471, 4474, 4476, 4479, 4481, 4484, 4486, 4489, 4491, 4494, 4496, 4499, 4501, 4504, 4506, 4509, 4511, 4514, 4516, 4518, 4520, 4522, 4524, 4527, 4529, 4532, 4534, 4537, 4539, 4542, 4544, 4546, 4548, 4550, 4552, 4554, 4556, 4558, 4560, 4562, 4564, 4566, 4568, 4570, 4572, 4574, 4576, 4578, 4580, 4582, 4584, 4586, 4588, 4590, 4592, 4594, 4596, 4598, 4600, 4602, 4604, 4607, 4609, 4611, 4613, 4615, 4617, 4619, 4621, 4624, 4626, 4628, 4630, 4632, 4634, 4636, 4638, 4642, 4644, 4646, 4648, 4651, 4653, 4656, 4658, 4664, 4666, 4669, 4671, 4677, 4679, 4682, 4684, 4690, 4692, 4694, 4696, 4699, 4701, 4703, 4705, 4709, 4711, 4713, 4715, 4717, 4719, 4721, 4723, 4725, 4727, 4729, 4731, 4734, 4736, 4738, 4740, 4742, 4744, 4746, 4748, 4752, 4754, 4756, 4758, 4760, 4762, 4764, 4766, 4768, 4770, 4772, 4774, 4778, 4780, 4782, 4784, 4787, 4789, 4792, 4794, 4800, 4802, 4805, 4807, 4810, 4812, 4815, 4817, 4819, 4821, 4823, 4825, 4828, 4830, 4832, 4834, 4836, 4838, 4841, 4843, 4846, 4848, 4851, 4853, 4856, 4858, 4860, 4862, 4864, 4866, 4869, 4871, 4873, 4875, 4877, 4879, 4881, 4883, 4885, 4887, 4890, 4892, 4895, 4897, 4900, 4902, 4904, 4906, 4908, 4910, 4912, 4914, 4916, 4918, 4920, 4922, 4924, 4926, 4928, 4930, 4932, 4934, 4936, 4938, 4940, 4942, 4944, 4946, 4948, 4950, 4952, 4954, 4956, 4958, 4961, 4963, 4965, 4967, 4969, 4971, 4973, 4975, 4977, 4979, 4981, 4983, 4986, 4988, 4990, 4992, 4994, 4996, 4999, 5001, 5004, 5006, 5009, 5011, 5017, 5019, 5022, 5024, 5027, 5029, 5031, 5033, 5035, 5037, 5040, 5042, 5048, 5050, 5053, 5055, 5057, 5059, 5061, 5063, 5065, 5067, 5069, 5071, 5073, 5075, 5077, 5079, 5081, 5083, 5085, 5087, 5089, 5091, 5093, 5095, 5097, 5099, 5101, 5103, 5105, 5107, 5109, 5111, 5114, 5116, 5120, 5122, 5124, 5126, 5128, 5130, 5133, 5135, 5138, 5140, 5143, 5145, 5148, 5150, 5152, 5154, 5156, 5158, 5161, 5163, 5165, 5167, 5169, 5171, 5174, 5176, 5179, 5181, 5184, 5186, 5189, 5191, 5193, 5195, 5198, 5200, 5202, 5204, 5208, 5210, 5212, 5214, 5217, 5219, 5221, 5223, 5226, 5228, 5230, 5232, 5235, 5237, 5240, 5242, 5244, 5246, 5248, 5250, 5252, 5254, 5256, 5258, 5260, 5262, 5264, 5266, 5268, 5270, 5272, 5274, 5276, 5278, 5280, 5282, 5285, 5287, 5289, 5291, 5294, 5296, 5298, 5300, 5302, 5304, 5306, 5308, 5311, 5313, 5315, 5317, 5320, 5322, 5324, 5326, 5328, 5330, 5332, 5334, 5336, 5338, 5340, 5342, 5345, 5347, 5349, 5351, 5353, 5355, 5357, 5359, 5361, 5363, 5365, 5367, 5369, 5371, 5374, 5376, 5378, 5380, 5382, 5384, 5387, 5389, 5392, 5394, 5397, 5399, 5402, 5404, 5407, 5409, 5412, 5414, 5417, 5419, 5422, 5424, 5427, 5429, 5432, 5434, 5437, 5439, 5442, 5444, 5447, 5449, 5452, 5454, 5457, 5459, 5461, 5463, 5465, 5467, 5469, 5471, 5473, 5475, 5477, 5479, 5482, 5484, 5486, 5488, 5490, 5492, 5495, 5497, 5500, 5502, 5506, 5508, 5511, 5513, 5516, 5518, 5520, 5522, 5524, 5526, 5529, 5531, 5534, 5536, 5539, 5541, 5544, 5546, 5549, 5551, 5553, 5555, 5557, 5559, 5562, 5564, 5567, 5569, 5571, 5573, 5575, 5577, 5579, 5581, 5584, 5586, 5588, 5590, 5593, 5595, 5597, 5599, 5601, 5603, 5605, 5607, 5609, 5611, 5613, 5615, 5617, 5619, 5621, 5623, 5625, 5627, 5629, 5631, 5633, 5635, 5637, 5639, 5641, 5643, 5645, 5647, 5649, 5651, 5653, 5655, 5657, 5659, 5661, 5663, 5665, 5667, 5669, 5671, 5673, 5675, 5677, 5679, 5681, 5683, 5685, 5687, 5689, 5691, 5693, 5695, 5697, 5699, 5701, 5703, 5705, 5707, 5709, 5711, 5713, 5715, 5717, 5719, 5721, 5723, 5725, 5727, 5729, 5731, 5733, 5735, 5737, 5739, 5741, 5743, 5745, 5747, 5749, 5751, 5753, 5755, 5757, 5759, 5761, 5763, 5765, 5767, 5769, 5771, 5773, 5775, 5777, 5779, 5781, 5783, 5785, 5787, 5789, 5791, 5793, 5795, 5797, 5799, 5801, 5803, 5805, 5807, 5809, 5811, 5813, 5815, 5817, 5819, 5821, 5823, 5826, 5828, 5830, 5832, 5835, 5837, 5839, 5841, 5843, 5845, 5847, 5849, 5851, 5853, 5855, 5857, 5859, 5861, 5863, 5865, 5867, 5869, 5871, 5873, 5875, 5877, 5879, 5881, 5884, 5886, 5889, 5891, 5893, 5895, 5898, 5900, 5902, 5904, 5907, 5909, 5911, 5913, 5915, 5917, 5919, 5921, 5923, 5925, 5927, 5929, 5932, 5934, 5937, 5939, 5941, 5943, 5945, 5947, 5949, 5951, 5953, 5955, 5957, 5959, 5961, 5963, 5965, 5967, 5969, 5971, 5973, 5975, 5977, 5979, 5981, 5983, 5986, 5988, 5990, 5992, 5994, 5996, 5998, 6000, 6002, 6004, 6007, 6009, 6011, 6013, 6015, 6017, 6019, 6021, 6023, 6025, 6027, 6029, 6031, 6033, 6036, 6038, 6041, 6043, 6045, 6047, 6049, 6051, 6053, 6055, 6058, 6060, 6064, 6066, 6068, 6070, 6074, 6076, 6079, 6081, 6084, 6086, 6089, 6091, 6094, 6096, 6098, 6100, 6102, 6104, 6107, 6109, 6111, 6113, 6115, 6117, 6120, 6122, 6124, 6126, 6129, 6131, 6134, 6136, 6142, 6144, 6146, 6148, 6150, 6152, 6155, 6157, 6159, 6161, 6163, 6165, 6167, 6169, 6171, 6173, 6175, 6177, 6179, 6181, 6183, 6185, 6187, 6189, 6191, 6193, 6196, 6198, 6200, 6202, 6204, 6206, 6208, 6210, 6213, 6215, 6218, 6220, 6223, 6225, 6228, 6230, 6233, 6235, 6238, 6240, 6243, 6245, 6248, 6250, 6256, 6258, 6261, 6263, 6266, 6268, 6271, 6273, 6276, 6278, 6281, 6283, 6286, 6288, 6290, 6292, 6294, 6296, 6299, 6301, 6303, 6305, 6307, 6309, 6311, 6313, 6316, 6318, 6324, 6326, 6329, 6331, 6334, 6336, 6338, 6340, 6343, 6345, 6347, 6349, 6353, 6355, 6358, 6360, 6363, 6365, 6368, 6370, 6373, 6375, 6378, 6380, 6383, 6385, 6388, 6390, 6393, 6395, 6398, 6400, 6402, 6404, 6406, 6408, 6410, 6412, 6414, 6416, 6419, 6421, 6424, 6426, 6429, 6431, 6434, 6436, 6438, 6440, 6442, 6444, 6447, 6449, 6452, 6454, 6456, 6458, 6460, 6462, 6465, 6467, 6469, 6471, 6473, 6475, 6478, 6480, 6482, 6484, 6486, 6488, 6490, 6492, 6494, 6496, 6498, 6500, 6502, 6504, 6506, 6508, 6510, 6512, 6514, 6516, 6518, 6520, 6522, 6524, 6526, 6528, 6530, 6532, 6534, 6536, 6538, 6540, 6542, 6544, 6546, 6548, 6550, 6552, 6554, 6556, 6558, 6560, 6562, 6564, 6566, 6568, 6570, 6572, 6574, 6576, 6578, 6580, 6582, 6584, 6586, 6588, 6590, 6592, 6594, 6596, 6599, 6601, 6603, 6605, 6608, 6610, 6612, 6614, 6616, 6618, 6620, 6622, 6625, 6627, 6630, 6632, 6634, 6636, 6638, 6640, 6642, 6644, 6647, 6649, 6651, 6653, 6655, 6657, 6659, 6661, 6664, 6666, 6668, 6670, 6672, 6674, 6677, 6679, 6682, 6684, 6687, 6689, 6692, 6694, 6697, 6699, 6702, 6704, 6707, 6709, 6712, 6714, 6717, 6719, 6722, 6724, 6727, 6729, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6786, 6788, 6791, 6793, 6796, 6798, 6801, 6803, 6805, 6807, 6810, 6812, 6814, 6816, 6819, 6821, 6825, 6827, 6829, 6831, 6834, 6836, 6839, 6841, 6844, 6846, 6849, 6851, 6854, 6856, 6859, 6861, 6863, 6865, 6867, 6869, 1537, 1535, 1136, 1408, 1406, 1136, 5391, 5386, 5391, 5386, 5391, 5386, 5411, 5494, 5416, 5494, 6701, 6706, 6255, 6253, 6387, 6382, 6387, 6382, 6701, 6706, 6706, 6701, 5416, 5411, 5426, 5421, 5416, 5411, 5426, 5421, 5416, 5411, 3060, 3058, 3268, 3268, 5416, 5411, 6800, 6141, 6139, 6141, 6139, 6255, 6253, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 6800, 4429, 4427, 4442, 4437, 4660, 4655, 4660, 4655, 4673, 4668, 4676, 4674, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4689, 4687, 4689, 4687, 4660, 4655, 4809, 4804, 4429, 4427, 4442, 4437, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4660, 4655, 4429, 4427, 4442, 4437, 4443, 4445, 4429, 4427, 4442, 4437, 4443, 4445, 4416, 4416, 4442, 4437, 4388, 4388, 4429, 4427, 4432, 4430, 4429, 4427, 4432, 4430, 4797, 4799, 4708, 4698, 4799, 4797, 5456, 5451, 5456, 5451, 5416, 5411, 5047, 5045, 5047, 5045, 5391, 5386, 5391, 5386, 5416, 5411, 5416, 5411, 3220, 2478, 2511, 2478, 2511, 3220, 3477, 3479, 815, 3831, 3826, 3831, 3826, 816, 3831, 3826, 3796, 3791, 3060, 3058, 3060, 3058, 2758, 2511, 2511, 2478, 2478, 3290, 1300, 1295, 1310, 1305, 1320, 1315, 1300, 1295, 1310, 1305, 1204, 1310, 1305, 1204, 1320, 1315, 1320, 1315, 1360, 1358, 1360, 1358, 1405, 1400, 1408, 1406, 1360, 1358, 1360, 1358, 1405, 1400, 1408, 1406, 1360, 1358, 1360, 1358, 1405, 1400, 1408, 1406, 1360, 1358, 1360, 1358, 1408, 1406, 1272, 1267, 1272, 1267, 1272, 1267, 1272, 1267, 1320, 1315, 1320, 1315, 1360, 1358, 1360, 1358, 1408, 1406, 1408, 1406, 1537, 1535, 1537, 1535, 1987, 1987, 1838, 1838, 2070, 2098, 2001, 2013, 2001, 2013, 2070, 2098, 2260, 2258, 2191, 2189, 2191, 2189, 2260, 2258, 2291, 2289, 2313, 2311, 2313, 2311, 2314, 2291, 2289, 2313, 2311, 2313, 2311, 2316, 2289, 2291, 2291, 2289, 2313, 2311, 2313, 2311, 2316, 2314, 3060, 3058, 3278, 3273, 3278, 3273, 3268, 3266, 2930, 2930, 2925, 3060, 3058, 3164, 3159, 3164, 3159, 3235, 3235, 2758, 2925, 2925, 2988, 2988, 3060, 3058, 3014, 3014, 3060, 3058, 3144, 3144, 3268, 3266, 3268, 3266, 3290, 3403, 3398, 3403, 3398, 3550, 3552, 3477, 3479, 3479, 3477, 3541, 3541, 3552, 3550, 3887, 3892, 3831, 3826, 3831, 3826, 3579, 3578, 3887, 3892, 3796, 3791, 3796, 3791, 3796, 3791, 3821, 3816, 3821, 3816, 3831, 3826, 3796, 3791, 3796, 3791, 3821, 3816, 3831, 3826, 3831, 3826, 5406, 5406, 6093, 6088, 6093, 6088, 6255, 6253, 5016, 5014, 5391, 5386, 5391, 5386, 4445, 4443, 4687, 4689, 4689, 4687, 4429, 4427, 4442, 4437, 4429, 4427, 4442, 4437, 4429, 4427, 4432, 4430, 4429, 4427, 4432, 4430, 4445, 4443, 4429, 4427, 4432, 4430, 4429, 4427, 4432, 4430, 4445, 4443, 4689, 4687, 4698, 4708, 4663, 4661, 4663, 4661, 4673, 4668, 4676, 4674, 4663, 4661, 4663, 4661, 4673, 4668, 4676, 4674, 4663, 4661, 4663, 4661, 4676, 4674, 4689, 4687, 4698, 4708, 4797, 4799, 4799, 4797, 4809, 4804, 4799, 4797, 4799, 4797, 4809, 4804, 4777, 4799, 4797, 4799, 4797, 4777, 4799, 4797, 4799, 4797, 5016, 5014, 5008, 5008, 5045, 5047, 5016, 5014, 5016, 5014, 5047, 5045, 5047, 5045, 5119, 5119, 5207, 5207, 5391, 5386, 5391, 5386, 5373, 5373, 5456, 5451, 5456, 5451, 5481, 5481, 5505, 5505, 6320, 6315, 6320, 6315, 6323, 6321, 6320, 6315, 6320, 6315, 6323, 6321, 6731, 6726, 6731, 6726, 9144, 9146, 9148, 9150, 6255, 6253, 9166, 9168, 9170, 9172, 9174, 9176, 9178, 9180, 9182, 9184, 9186, 9188, 6141, 6139, 6141, 6139, 6255, 6253, 6320, 6315, 6320, 6315, 6323, 6321, 6315, 6320, 6320, 6315, 6320, 6315, 6320, 6315, 6323, 6321, 6320, 6315, 6320, 6315, 6323, 6321, 6451, 6446, 6451, 6446, 6711, 6711, 9355, 9357, 9359, 9361, 9363, 9365, 9367, 9369, 9371, 9373, 9375, 9377, 9379, 9381, 9383, 9385, 9387, 9389, 9391, 9393, 9396, 9398, 9400, 9402, 6141, 6139, 6141, 6139, 6073, 6073, 6141, 6139, 6141, 6139, 6255, 6253, 6255, 6253, 6255, 6253, 6323, 6321, 6323, 6321, 6323, 6321, 6352, 6352, 6858, 6858, 9573, 9575, 9578, 9580, 9582, 9584, 9586, 9588, 9590, 9592, 9594, 9596, 9665, 9667, 9670, 9672, 9678, 9680, 9682, 9684, 9687, 9689, 9692, 9694, 9700, 9702, 9705, 9707, 9710, 9712, 9715, 9717, 9577, 9674, 9577, 9675, 9677, 9674, 9697, 9697, 9726, 9724, 9721, 9726, 9724, 9723, 9726, 9724, 9699, 9699, 9723, 9721, 9697, 9699, 9697, 9699, 9723, 9721, 9723, 9721, 9726, 9724, 9677, 9675, 9677, 9675, 9699, 9697, 9699, 9697, 9723, 9721, 9726, 9724, 9723, 9721, 9726, 9724, 13, 14, 15, 13648, 13650, 13652, 13654, 13656, 13658, 13660, 13662, 13664, 13666, 13668, 13670, 13672, 13674, 13676, 13678, 13680, 13682, 13684, 13686, 13688, 13690, 13692, 13694, 13696, 13698, 13700, 13702, 13704, 13706, 13708, 13710, 13712, 13714, 13716, 13718, 13720, 13722, 13724, 13726, 13728, 13730, 13732, 13734, 13736, 13738, 13740, 13742, 13744, 13746, 13748, 13750, 13752, 13754, 13756, 13758, 13760, 13762, 13764, 13766, 13768, 13770, 13772, 13774, 13776, 13778, 13780, 13782, 13784, 13786, 13788, 13790, 13792, 13794, 13796, 13798, 13800, 13802, 13804, 13806, 13808, 13810, 13812, 13814, 13816, 13818, 13820, 13822, 13824, 13826, 13828, 13830, 13832, 13834, 13836, 13838, 13840, 13842, 13844, 13846, 13848, 13850, 13852, 13854, 13856, 13858, 13860, 13862, 13864, 13866, 13868, 13870, 13872, 13874, 13876, 13878, 13880, 13882, 13884, 13886, 13888, 13890, 13892, 13894, 13896, 13898, 13900, 13902, 13904, 13906, 13908, 13910, 13912, 13914, 13916, 13918, 13920, 13922, 13924, 13926, 13928, 13930, 13932, 13934, 13936, 13938, 13940, 13942, 13944, 13946, 13948, 13950, 13952, 13954, 13956, 13958, 13960, 13962, 13964, 13966, 13968, 13970, 13972, 13974, 13976, 13978, 13980, 13982, 13984, 13986, 13988, 13990, 13992, 13994, 13996, 13998, 14000, 14002, 14004, 14006, 14008, 14010, 14012, 14014, 14016, 14018, 14020, 14022, 14024, 14026, 14028, 14030, 14032, 14034, 14036, 14038, 14040, 14042, 14044, 14046, 14048, 14050, 14052, 14054, 14056, 14058, 14060, 14062, 14064, 14066, 14068, 14070, 14072, 14074, 14076, 14078, 14080, 14082, 14084, 14086, 14088, 14090, 14092, 14094, 14096, 14098, 14100, 14102, 14104, 14106, 14108, 14110, 14112, 14114, 14116, 14118, 14120, 14122, 14124, 14126, 14128, 14130, 14132, 14134, 14136, 14138, 14140, 14142, 14144, 14146, 14148, 14150, 14152, 14154, 14156, 14158, 14160, 14162, 14164, 14166, 14168, 14170, 14172, 14174, 14176, 14178, 14180, 14182, 14184, 14186, 14188, 14190, 14192, 14194, 14196, 14198, 14200, 14202, 14204, 14206, 14208, 14210, 14212, 14214, 14216, 14218, 14220, 14222, 14224, 14226, 14228, 14230, 14232, 14234, 14236, 14238, 14240, 14242, 14244, 14246, 14248, 14250, 14252, 14254, 14256, 14258, 14260, 14262, 14264, 14266, 14268, 14270, 14272, 14274, 14276, 14278, 14280, 14282, 14284, 14286, 14288, 14290, 14292, 14294, 14296, 14298, 14300, 14302, 14304, 14306, 14308, 14310, 14312, 14314, 14316, 14318, 14320, 14322, 14324, 14326, 14328, 14330, 14332, 14334, 14336, 14338, 14340, 14342, 14344, 14346, 14348, 14350, 14352, 14354, 14356, 14358, 14360, 14362, 14364, 14366, 14368, 14370, 14372, 14374, 14376, 14378, 14380, 14382, 14384, 14386, 14388, 14390, 14392, 14394, 14396, 14398, 14400, 14402, 14404, 14406, 14408, 14410, 14412, 14414, 14416, 14418, 14420, 14422, 14424, 14426, 14428, 14430, 14432, 14434, 14436, 14438, 14440, 14442, 14444, 14446, 14448, 14450, 14452, 14454, 14456, 14458, 14460, 14462, 14464, 14466, 14468, 14470, 14472, 14474, 14476, 14478, 14480, 14482, 14484, 14486, 14488, 14490, 14492, 14494, 14496, 14498, 14500, 14502, 14504, 14506, 14508, 14510, 14512, 14514, 14516, 14518, 14520, 14522, 14524, 14526, 14528, 14530, 14532, 14534, 14536, 14538, 14540, 14542, 14544, 14546, 14548, 14550, 14552, 14554, 14556, 14558, 14560, 14562, 14564, 14566, 14568, 14570, 14572, 14574, 14576, 14578, 14580, 14582, 14584, 14586, 14588, 14590, 14592, 14594, 14596, 14598, 14600, 14602, 14604, 14606, 14608, 14610, 14612, 14614, 14616, 14618, 14620, 14622, 14624, 14626, 14628, 14630, 14632, 14634, 14636, 14638, 14640, 14642, 14644, 14646, 14648, 14650, 14652, 14654, 14656, 14658, 14660, 14662, 14664, 14666, 14668, 14670, 14672, 14674, 14676, 14678, 14680, 14682, 14684, 14686, 14688, 14690, 14692, 14694, 14696, 14698, 14700, 14702, 14704, 14706, 14708, 14710, 14712, 14714, 14716, 14718, 14720, 14722, 14724, 14726, 14728, 14730, 14732, 14734, 14736, 14738, 14740, 14742, 14744, 14746, 14748, 14750, 14752, 14754, 14756, 14758, 14760, 14762, 14764, 14766, 14768, 14770, 14772, 14774, 14776, 14778, 14780, 14782, 14784, 14786, 14788, 14790, 14792, 14794, 14796, 14798, 14800, 14802, 14804, 14806, 14808, 14810, 14812, 14814, 14816, 14818, 14820, 14822, 14824, 14826, 14828, 14830, 14832, 14834, 14836, 14838, 14840, 14842, 14844, 14846, 14848, 14850, 14852, 14854, 14856, 14858, 14860, 14862, 14864, 14866, 14868, 14870, 14872, 14874, 14876, 14878, 14880, 14882, 14884, 14886, 14888, 14890, 14892, 14894, 14896, 14898, 14900, 14902, 14904, 14906, 14908, 14910, 14912, 14914, 14916, 14918, 14920, 14922, 14924, 14926, 14928, 14930, 14932, 14934, 14936, 14938, 14940, 14942, 14944, 14946, 14948, 14950, 14952, 14954, 14956, 14958, 14960, 14962, 14964, 14966, 14968, 14970, 14972, 14974, 14976, 14978, 14980, 14982, 14984, 14986, 14988, 14990, 14992, 14994, 14996, 14998, 15000, 15002, 15004, 15006, 15008, 15010, 15012, 15014, 15016, 15018, 15020, 15022, 15024, 15026, 15028, 15030, 15032, 15034, 15036, 15038, 15040, 15042, 15044, 15046, 15048, 15050, 15052, 15054, 15056, 15058, 15060, 15062, 15064, 15066, 15068, 15070, 15072, 15074, 15076, 15078, 15080, 15082, 15084, 15086, 15088, 15090, 15092, 15094, 15096, 15098, 15100, 15102, 15104, 15106, 15108, 15110, 15112, 15114, 15116, 15118, 15120, 15122, 15124, 15126, 15128, 15130, 15132, 15134, 15136, 15138, 15140, 15142, 15144, 15146, 15148, 15150, 15152, 15154, 15156, 15158, 15160, 15162, 15164, 15166, 15168, 15170, 15172, 15174, 15176, 15178, 15180, 15182, 15184, 15186, 15188, 15190, 15192, 15194, 15196, 15198, 15200, 15202, 15204, 15206, 15208, 15210, 15212, 15214, 15216, 15218, 15220, 15222, 15224, 15226, 15228, 15230, 15232, 15234, 15236, 15238, 15240, 15242, 15244, 15246, 15248, 15250, 15252, 15254, 15256, 15258, 15260, 15262, 15264, 15266, 15268, 15270, 15272, 15274, 15276, 15278, 15280, 15282, 15284, 15286, 15288, 15290, 15292, 15294, 15296, 15298, 15300, 15302, 15304, 15306, 15308, 15310, 15312, 15314, 15316, 15318, 15320, 15322, 15324, 15326, 15328, 15330, 15332, 15334, 15336, 15338, 15340, 15342, 15344, 15346, 15348, 15350, 15352, 15354, 15356, 15358, 15360, 15362, 15364, 15366, 15368, 15370, 15372, 15374, 15376, 15378, 15380, 15382, 15384, 15386, 15388, 15390, 15392, 15394, 15396, 15398, 15400, 15402, 15404, 15406, 15408, 15410, 15412, 15414, 15416, 15418, 15420, 15422, 15424, 15426, 15428, 15430, 15432, 15434, 15436, 15438, 15440, 15442, 15444, 15446, 15448, 15450, 15452, 15454, 15456, 15458, 15460, 15462, 15464, 15466, 15468, 15470, 15472, 15474, 15476, 15478, 15480, 15482, 15484, 15486, 15488, 15490, 15492, 15494, 15496, 15498, 15500, 15502, 15504, 15506, 15508, 15510, 15512, 15514, 15516, 15518, 15520, 15522, 15524, 15526, 15528, 15530, 15532, 15534, 15536, 15538, 15540, 15542, 15544, 15546, 15548, 15550, 15552, 15554, 15556, 15558, 15560, 15562, 15564, 15566, 15568, 15570, 15572, 15574, 15576, 15578, 15580, 15582, 15584, 15586, 15588, 15590, 15592, 15594, 15596, 15598, 15600, 15602, 15604, 15606, 15608, 15610, 15612, 15614, 15616, 15618, 15620, 15622, 15624, 15626, 15628, 15630, 15632, 15634, 15636, 15638, 15640, 15642, 15644, 15646, 15648, 15650, 15652, 15654, 15656, 15658, 15660, 15662, 15664, 15666, 15668, 15670, 15672, 15674, 15676, 15678, 15680, 15682, 15684, 15686, 15688, 15690, 15692, 15694, 15696, 15698, 15700, 15702, 15704, 15706, 15708, 15710, 15712, 15714, 15716, 15718, 15720, 15722, 15724, 15726, 15728, 15730, 15732, 15734, 15736, 15738, 15740, 15742, 15744, 15746, 15748, 15750, 15752, 15754, 15756, 15758, 15760, 15762, 15764, 15766, 15768, 15770, 15772, 15774, 15776, 15778, 15780, 15782, 15784, 15786, 15788, 15790, 15792, 15794, 15796, 15798, 15800, 15802, 15804, 15806, 15808, 15810, 15812, 15814, 15816, 15818, 15820, 15822, 15824, 15826, 15828, 15830, 15832, 15834, 15836, 15838, 15840, 15842, 15844, 15846, 15848, 15850, 15852, 15854, 15856, 15858, 15860, 15862, 15864, 15866, 15868, 15870, 15872, 15874, 15876, 15878, 15880, 15882, 15884, 15886, 15888, 15890, 15892, 15894, 15896, 15898, 15900, 15902, 15904, 15906, 15908, 15910, 15912, 15914, 15916, 15918, 15920, 15922, 15924, 15926, 15928, 15930, 15932, 15934, 15936, 15938, 15940, 15942, 15944, 15946, 15948, 15950, 15952, 15954, 15956, 15958, 15960, 15962, 15964, 15966, 15968, 15970, 15972, 15974, 15976, 15978, 15980, 15982, 15984, 15986, 15988, 15990, 15992, 15994, 15996, 15998, 16000, 16002, 16004, 16006, 16008, 16010, 16012, 16014, 16016, 16018, 16020, 16022, 16024, 16026, 16028, 16030, 16032, 16034, 16036, 16038, 16040, 16042, 16044, 16046, 16048, 16050, 16052, 16054, 16056, 16058, 16060, 16062, 16064, 16066, 16068, 16070, 16072, 16074, 16076, 16078, 16080, 16082, 16084, 16086, 16088, 16090, 16092, 16094, 16096, 16098, 16100, 16102, 16104, 16106, 16108, 16110, 16112, 16114, 16116, 16118, 16120, 16122, 16124, 16126, 16128, 16130, 16132, 16134, 16136, 16138, 16140, 16142, 16144, 16146, 16148, 16150, 16152, 16154, 16156, 16158, 16160, 16162, 16164, 16166, 16168, 16170, 16172, 16174, 16176, 16178, 16180, 16182, 16184, 16186, 16188, 16190, 16192, 16194, 16196, 16198, 16200, 16202, 16204, 16206, 16208, 16210, 16212, 16214, 16216, 16218, 16220, 16222, 16224, 16226, 16228, 16230, 16232, 16234, 16236, 16238, 16240, 16242, 16244, 16246, 16248, 16250, 16252, 16254, 16256, 16258, 16260, 16262, 16264, 16266, 16268, 16270, 16272, 16274, 16276, 16278, 16280, 16282, 16284, 16286, 16288, 16290, 16292, 16294, 16296, 16298, 16300, 16302, 16304, 16306, 16308, 16310, 16312, 16314, 16316, 16318, 16320, 16322, 16324, 16326, 16328, 16330, 16332, 16334, 16336, 16338, 16340, 16342, 16344, 16346, 16348, 16350, 16352, 16354, 16356, 16358, 16360, 16362, 16364, 16366, 16368, 16370, 16372, 16374, 16376, 16378, 16380, 16382, 16384, 16386, 16388, 16390, 16392, 16394, 16396, 16398, 16400, 16402, 16404, 16406, 16408, 16410, 16412, 16414, 16416, 16418, 16420, 16422, 16424, 16426, 16428, 16430, 16432, 16434, 16436, 16438, 16440, 16442, 16444, 16446, 16448, 16450, 16452, 16454, 16456, 16458, 16460, 16462, 16464, 16466, 16468, 16470, 16472, 16474, 16476, 16478, 16480, 16482, 16484, 16486, 16488, 16490, 16492, 16494, 16496, 16498, 16500, 16502, 16504, 16506, 16508, 16510, 16512, 16514, 16516, 16518, 16520, 16522, 16524, 16526, 16528, 16530, 16532, 16534, 16536, 16538, 16540, 16542, 16544, 16546, 16548, 16550, 16552, 16554, 16556, 16558, 16560, 16562, 16564, 16566, 16568, 16570, 16572, 16574, 16576, 16578, 16580, 16582, 16584, 16586, 16588, 16590, 16592, 16594, 16596, 16598, 16600, 16602, 16604, 16606, 16608, 16610, 16612, 16614, 16616, 16618, 16620, 16622, 16624, 16626, 16628, 16630, 16632, 16634, 16636, 16638, 16640, 16642, 16644, 16646, 16648, 16650, 16652, 16654, 16656, 16658, 16660, 16662, 16664, 16666, 16668, 16670, 16672, 16674, 16676, 16678, 16680, 16682, 16684, 16686, 16688, 16690, 16692, 16694, 16696, 16698, 16700, 16702, 16704, 16706, 16708, 16710, 16712, 16714, 16716, 16718, 16720, 16722, 16724, 16726, 16728, 16730, 16732, 16734, 16736, 16738, 16740, 16742, 16744, 16746, 16748, 16750, 16752, 16754, 16756, 16758, 16760, 16762, 16764, 16766, 16768, 16770, 16772, 16774, 16776, 16777, 16778, 16779, 16780, 16781, 16782, 16783, 16784, 16785, 16786, 16787, 16788, 16789, 16790, 16791, 16792, 16793, 16794, 16795, 16796, 16797, 16798, 16799, 16800, 16801, 16802, 16803, 16804, 16805, 16806, 16807, 16808, 16809, 16810, 16811, 16812, 16813, 16814, 16815, 16816, 16817, 16818, 16819, 16820, 16821, 16822, 16823, 16824, 16825, 16826, 16827, 16829, 16831, 16833, 16835, 16836, 16837, 16838, 16839, 16840, 16841, 16842, 16843, 16844, 16845, 16846, 16847, 16848, 16849, 16850, 16851, 16852, 16853, 16854, 16855, 16856, 16857, 16858, 16859, 16860, 16861, 16862, 16863, 16864, 16865, 16866, 16867, 16868, 16869, 16870, 16871, 16872, 16873, 16874, 16875, 16876, 16877, 16878, 16879, 16880, 16881, 16882, 16883, 16884, 16885, 16886, 16887, 16888, 16889, 16890, 16891, 16892, 16893, 16894, 16895, 16896, 16897, 16898, 16899, 16900, 16901, 16902, 16903, 16904, 16905, 16906, 16907, 16908, 16909, 16910, 16911, 16912, 16913, 16914, 16915, 16916, 16917, 16918, 16919, 16920, 16921, 16922, 16923, 16924, 16925, 16926, 16927, 16928, 16929, 16930, 16931, 16932, 16933, 16934, 16935, 16936, 16937, 16938, 16939, 16940, 16941, 16942, 16943, 16944, 16945, 16946, 16947, 16948, 16949, 16950, 16951, 16952, 16953, 16954, 16955, 16956, 16957, 16958, 16959, 16960, 16961, 16962, 16963, 16964, 16965, 16966, 16967, 16968, 16969, 16970, 16971, 16972, 16973, 16974, 16975, 16976, 16977, 16978, 16979, 16980, 16981, 16982, 16983, 16984, 16985, 16986, 16987, 16988, 16989, 16990, 16991, 16992, 16993, 16994, 16995, 16996, 16997, 16998, 16999, 17000, 17001, 17002, 17003, 17004, 17005, 17006, 17007, 17008, 17009, 17010, 17011, 17012, 17013, 17014, 17015, 17016, 17017, 17018, 17019, 17020, 17021, 17022, 17023, 17024, 17025, 17026, 17027, 17028, 17029, 17030, 17031, 17032, 17033, 17034, 17035, 17036, 17037, 17038, 17039, 17040, 17041, 17042, 17043, 17044, 17045, 17046, 17047, 17048, 17049, 17050, 17051, 17052, 17053, 17054, 17055, 17056, 17057, 17058, 17059, 17060, 17061, 17062, 17063, 17064, 17065, 17066, 17067, 17068, 17069, 17070, 17071, 17072, 17073, 17074, 17075, 17076, 17077, 17078, 17079, 17080, 17081, 17082, 17083, 17084, 17085, 17086, 17087, 17088, 17089, 17090, 17091, 17092, 17093, 17094, 17095, 17096, 17097, 17098, 17099, 17100, 17101, 17102, 17103, 17104, 17105, 17106, 17107, 17108, 17109, 17110, 17111, 17112, 17113, 17114, 17115, 17116, 17117, 17118, 17119, 17120, 17121, 17122, 17123, 17124, 17125, 17126, 17127, 17128, 17129, 17130, 17131, 17132, 17133, 17134, 17135, 17136, 17137, 17138, 17139, 17140, 17141, 17142, 17143, 17144, 17145, 17146, 17147, 17148, 17149, 17150, 17151, 17152, 17153, 17154, 17155, 17156, 17157, 17158, 17159, 17160, 17161, 17162, 17163, 17164, 17165, 17166, 17167, 17168, 17169, 17170, 17171, 17172, 17173, 17174, 17175, 17176, 17177, 17178, 17179, 17180, 17181, 17182, 17183, 17184, 17185, 17186, 17187, 17188, 17189, 17190, 17191, 17192, 17193, 17194, 17195, 17196, 17197, 17198, 17199, 17200, 17201, 17202, 17203, 17204, 17205, 17206, 17207, 17208, 17209, 17210, 17211, 17212, 17213, 17214, 17215, 17216, 17217, 17218, 17219, 17220, 17221, 17222, 17223, 17224, 17225, 17226, 17227, 17228, 17229, 17230, 17231, 17232, 17233, 17234, 17235, 17236, 17237, 17238, 17239, 17240, 17241, 17242, 17243, 17244, 17245, 17246, 17247, 17248, 17249, 17250, 17251, 17252, 17253, 17254, 17255, 17256, 17257, 17258, 17259, 17260, 17261, 17262, 17263, 17264, 17265, 17266, 17267, 17268, 17269, 17270, 17271, 17272, 17273, 17274, 17275, 17276, 17277, 17278, 17279, 17280, 17281, 17282, 17283, 17284, 17285, 17286, 17287, 17288, 17289, 17290, 17291, 17292, 17293, 17294, 17295, 17296, 17297, 17298, 17299, 17300, 17301, 17302, 17303, 17304, 17305, 17307, 17309, 17310, 17311, 17313, 17315, 17317, 17319, 17321, 17323, 17324, 17325, 17326, 17327, 17328, 17329, 17330, 17331, 17332, 17333, 17334, 17335, 17336, 17337, 17338, 17339, 17340, 17341, 17342, 17343, 17344, 17345, 17346, 17347, 17348, 17349, 17350, 17351, 17352, 17353, 17354, 17355, 17356, 17357, 17359, 17361, 17363, 17365, 17367, 17369, 17371, 17373, 17375, 17377, 17379, 17381, 17382, 17383, 17384, 17385, 17386, 17387, 17388, 17389, 17390, 17391, 17392, 17393, 17394, 17395, 17396, 17397, 17398, 17399, 17400, 17401, 17402, 17403, 17404, 17405, 17406, 17407, 17409, 17411, 17413, 17415, 17417, 17419, 17421, 17423, 17425, 17427, 17429, 17431, 17433, 17435, 17437, 17439, 17440, 17441, 17442, 17443, 17444, 17445, 17446, 17447, 17448, 17449, 17450, 17451, 17452, 17453, 17454, 17455, 17456, 17457, 17458, 17459, 17460, 17461, 17462, 17463, 17464, 17465, 17466, 17467, 17468, 17469, 17470, 17471, 17472, 17473, 17474, 17475, 17476, 17477, 17478, 17479, 17480, 17481, 17482, 17483, 17484, 9, 10, 11, 12, 13, 14, 15, 1136, 19052, 1560, 1555, 1550, 1405, 1400, 19055, 19058, 19060, 19062, 5481, 5118, 5113, 5118, 5113, 18751, 5515, 5510, 17498, 18751, 19068, 6252, 6247, 19070, 19072, 6397, 6392, 17504, 19074, 6795, 6790, 6785, 6823, 6818, 18977, 6838, 6833, 6843, 6853, 6848, 6853, 6848, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18977, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18986, 6833, 6838, 6843, 18995, 6871, 19076, 19078, 19080, 19082, 19084, 19086, 19088, 2912, 2897, 3047, 3042, 3230, 3225, 3245, 3240, 3255, 3245, 3240, 3250, 2988, 3052, 3042, 3047, 3052, 19090, 3057, 3057, 3278, 3273, 3235, 3220, 2912, 3273, 3230, 3278, 18707, 5386, 5391, 5401, 5396, 5373, 18714, 5391, 5386, 5401, 5396, 5406, 19094, 5426, 5421, 5436, 5431, 5446, 5441, 6785, 6083, 6078, 6093, 6088, 18389, 6106, 6040, 6035, 6119, 6133, 6138, 19097, 6062, 6057, 6083, 6078, 6093, 6088, 18866, 6106, 6040, 6035, 6119, 6133, 6138, 19099, 6212, 6217, 6222, 6227, 6237, 6232, 6242, 6252, 6247, 19101, 6265, 6260, 6270, 17537, 4388, 19108, 4432, 4430, 19110, 4468, 4463, 4458, 4503, 4498, 4513, 4508, 17549, 4388, 19112, 19114, 19116, 19118, 19120, 19122, 19124, 19126, 4686, 4681, 19128, 4686, 4681, 19130, 17556, 17558, 19132, 4796, 4791, 19134, 17562, 4827, 17565, 4840, 17568, 4388, 19136, 4430, 4432, 19138, 4443, 4445, 4370, 4365, 4375, 17576, 4388, 4483, 4478, 4493, 4488, 4493, 4488, 19140, 19142, 19144, 19146, 19148, 19150, 17585, 4827, 17588, 4840, 19152, 4432, 4430, 19154, 19156, 19158, 4432, 4430, 19160, 19162, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4536, 4531, 4401, 4411, 4411, 4432, 4430, 19166, 4443, 4445, 17611, 17613, 19170, 19172, 19174, 19176, 4442, 4437, 4445, 4443, 4468, 4463, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4536, 4531, 4541, 4526, 4660, 4655, 4676, 4674, 613, 4827, 613, 17638, 19180, 4791, 4796, 19182, 4796, 4791, 4791, 4796, 646, 646, 647, 647, 5515, 5510, 19184, 5515, 5510, 19186, 5533, 5528, 5344, 5548, 5543, 5548, 5543, 5548, 5543, 5494, 4180, 5561, 4180, 5561, 5515, 5510, 18744, 5548, 5543, 18751, 4180, 5561, 5566, 18707, 5386, 5391, 5401, 5396, 5373, 18714, 5391, 5386, 5401, 5396, 5406, 19188, 5426, 5421, 5436, 5431, 5446, 5441, 680, 4960, 19190, 5052, 5044, 680, 19192, 5052, 5044, 18677, 19194, 18675, 19196, 5401, 5396, 5401, 5396, 19198, 19200, 5426, 5421, 4180, 5561, 4180, 5561, 4180, 5561, 2018, 2108, 2103, 2011, 2006, 2023, 2108, 2103, 3266, 3278, 3273, 3283, 19203, 19205, 3164, 3159, 3268, 3373, 3368, 3383, 3378, 3393, 3388, 3413, 3408, 3423, 3418, 3502, 3497, 3507, 3502, 3497, 3492, 3766, 3761, 3776, 3771, 3786, 3781, 3796, 3791, 18302, 3821, 3816, 19211, 3821, 3816, 19213, 18302, 3821, 3816, 3821, 3816, 19216, 3786, 3781, 19218, 2907, 2902, 2912, 2907, 2902, 2897, 2988, 3037, 3032, 3047, 3042, 3057, 3052, 19220, 3037, 3032, 3042, 3037, 3032, 3047, 3057, 3052, 19222, 3265, 3113, 3268, 3266, 3278, 3273, 3283, 3278, 3273, 3288, 3113, 3278, 3273, 3288, 3283, 3278, 3273, 3283, 3278, 3273, 3288, 3245, 3240, 3255, 3250, 3265, 3268, 3266, 3278, 3273, 3288, 3283, 2478, 2511, 3393, 3388, 3354, 3349, 3403, 3398, 3446, 3441, 1262, 1257, 1272, 1267, 17794, 1285, 19230, 19232, 19234, 19236, 19238, 19241, 19244, 1262, 1257, 1272, 1267, 17775, 1252, 1310, 1305, 1320, 1315, 1262, 1257, 1272, 1267, 17739, 1252, 1204, 1300, 1295, 1310, 1305, 19246, 1330, 1325, 17806, 1357, 1352, 19248, 1357, 1352, 19250, 1042, 1373, 1042, 19252, 19254, 1330, 1325, 17806, 1357, 1352, 19256, 1357, 1352, 19258, 1043, 1373, 1043, 1386, 19260, 19262, 1330, 1325, 17806, 1357, 1352, 19264, 1357, 1352, 19266, 17812, 1373, 17815, 1386, 19268, 19270, 1330, 1325, 1063, 1058, 1357, 1352, 19272, 1357, 1352, 19274, 17812, 1373, 17815, 1386, 1405, 1400, 19276, 17748, 17750, 1452, 1447, 17754, 1490, 1485, 1500, 1495, 1510, 1505, 1136, 1262, 1257, 19278, 1262, 1257, 19280, 17763, 1252, 1262, 1257, 19282, 1262, 1257, 19284, 17768, 1252, 1204, 1300, 1295, 1310, 1305, 19286, 1262, 1257, 1272, 1267, 17775, 1252, 1204, 1300, 1295, 1310, 1305, 19288, 1262, 1257, 1272, 1267, 17781, 1252, 1262, 1257, 1272, 1267, 17787, 1252, 1262, 1257, 1272, 1267, 17794, 1285, 1290, 1300, 1295, 1310, 1305, 1320, 1315, 1330, 1325, 17806, 1357, 1352, 19290, 1357, 1352, 19292, 17812, 1373, 17815, 1386, 1405, 1400, 19294, 1405, 1400, 19296, 17822, 17824, 1434, 1429, 17828, 1452, 1447, 17832, 1470, 1465, 1480, 1475, 1490, 1485, 1500, 1495, 1510, 1505, 1515, 1534, 1529, 19298, 1534, 1529, 19300, 1560, 1555, 1550, 1560, 1555, 1565, 17854, 1982, 1977, 2023, 2018, 2033, 2028, 2038, 17863, 2096, 2091, 1902, 1897, 17868, 17869, 1982, 1977, 1982, 1977, 2011, 2006, 2023, 2018, 2033, 2028, 2043, 17882, 2096, 2091, 2098, 1902, 1897, 1892, 1902, 1897, 1930, 1925, 17892, 2023, 2018, 17896, 2204, 2219, 2288, 2283, 2291, 2289, 2305, 2310, 2313, 2311, 2316, 2314, 2326, 2321, 2336, 2316, 2314, 2321, 2316, 2314, 2326, 2336, 2288, 2283, 2291, 2289, 2305, 2310, 2313, 2311, 2316, 2314, 2326, 2321, 2341, 2316, 2314, 2321, 2316, 2314, 2326, 2341, 1902, 1897, 1930, 1925, 1930, 1925, 1982, 1977, 2061, 2056, 1902, 1897, 1892, 1902, 1897, 1907, 1930, 1925, 17939, 1930, 1925, 17942, 1982, 1977, 1987, 1982, 1977, 1992, 2011, 2006, 2011, 2006, 1982, 1977, 1992, 1987, 2011, 2006, 2011, 2006, 2023, 2018, 2033, 2028, 2043, 2038, 17967, 2061, 2056, 2096, 2091, 2108, 2103, 17975, 2096, 2091, 2108, 2103, 2188, 2183, 2189, 2188, 2183, 2191, 17987, 2204, 2219, 18006, 2232, 17993, 2247, 2257, 2252, 19314, 2188, 2183, 19316, 2188, 2183, 19318, 2214, 2209, 2204, 2214, 2209, 2219, 18006, 2232, 2242, 2237, 2247, 2257, 2252, 19320, 2288, 2283, 2288, 2283, 2288, 2283, 19322, 2310, 2305, 19324, 2310, 2305, 19326, 2326, 2321, 2331, 2341, 2336, 2288, 2283, 2288, 2283, 19329, 2310, 2305, 19331, 2310, 2305, 19333, 2326, 2321, 2331, 2341, 2336, 2288, 2283, 2288, 2283, 2288, 2283, 19338, 2310, 2305, 19340, 2310, 2305, 19342, 19344, 2326, 2321, 2331, 2341, 2336, 18106, 18186, 18188, 2912, 18029, 2988, 18106, 18186, 18188, 3037, 3032, 3047, 3042, 19346, 18035, 2930, 2925, 2897, 18040, 18042, 18044, 2930, 18047, 2962, 18050, 2975, 19348, 3245, 3245, 3265, 3113, 19350, 3174, 3169, 3174, 3169, 3202, 3197, 3240, 3250, 3240, 3265, 3113, 19352, 2478, 3225, 3225, 3245, 3240, 3265, 3113, 3278, 3273, 3288, 3283, 2511, 2907, 2902, 2897, 2907, 2902, 2912, 2532, 3037, 3032, 3047, 3042, 3057, 3052, 3060, 3058, 2565, 2993, 3060, 3058, 2907, 2902, 18087, 18089, 18091, 19355, 2939, 18094, 2988, 18106, 18186, 18188, 3027, 3037, 3032, 3042, 3037, 3032, 3047, 3057, 3052, 19357, 18180, 2988, 18106, 18186, 18188, 3027, 3032, 3037, 3042, 3047, 3057, 3052, 3154, 3149, 19359, 3154, 3149, 19361, 3174, 3169, 3174, 3169, 3207, 3230, 3230, 3230, 3245, 3240, 3255, 3245, 3240, 3250, 3265, 3113, 3265, 3113, 2912, 2897, 18132, 18134, 18136, 2930, 2962, 18140, 18142, 18144, 2993, 18147, 19370, 2907, 2902, 2897, 2902, 2907, 2912, 18166, 2930, 2925, 2860, 2939, 2860, 2949, 18155, 2962, 18158, 2975, 2907, 2902, 2897, 2907, 2902, 2912, 18166, 2930, 2925, 2944, 2939, 2944, 2949, 18174, 2962, 18177, 2975, 18180, 2993, 2988, 3003, 2998, 18186, 18188, 3027, 3037, 3032, 3047, 3042, 3057, 3052, 19374, 3174, 3169, 3202, 3197, 3207, 3202, 3197, 3230, 3225, 3220, 3230, 3225, 3113, 3113, 3154, 3149, 3164, 3159, 3174, 3169, 3174, 3169, 3154, 3149, 3164, 3159, 3174, 3169, 3179, 3202, 3197, 3192, 3202, 3197, 3207, 3230, 3225, 3220, 3230, 3225, 3235, 3245, 3240, 3255, 3250, 3265, 19378, 3265, 19380, 3278, 3273, 3288, 3283, 18246, 3373, 3368, 3393, 3388, 19383, 3393, 3388, 19385, 3502, 3497, 3492, 3539, 3534, 3354, 3349, 18261, 3373, 3368, 3383, 3378, 3393, 3388, 3403, 3398, 3413, 3408, 3423, 3418, 3446, 3441, 18276, 3446, 3441, 18279, 18281, 18283, 18285, 19391, 3502, 3497, 3492, 3502, 3497, 3507, 3539, 3534, 3520, 3539, 3534, 3539, 3534, 18300, 19395, 19397, 18302, 3821, 3816, 19399, 3821, 3816, 19401, 19403, 3877, 3882, 3887, 3892, 3600, 3877, 3882, 3887, 3892, 3621, 19405, 3664, 18365, 3766, 3761, 3776, 3771, 3786, 3781, 19407, 3786, 3781, 19409, 3801, 18358, 3821, 3816, 3831, 3826, 3766, 3761, 3776, 3771, 3786, 3781, 19411, 3806, 18358, 19413, 19415, 19417, 18318, 3766, 3761, 3776, 3771, 3786, 3781, 19419, 3786, 3781, 19421, 3806, 18358, 19423, 3831, 3826, 3836, 3664, 18324, 3836, 3664, 18327, 18328, 18330, 3887, 3892, 18334, 18336, 18338, 3786, 3781, 3796, 3791, 18346, 18347, 3821, 3816, 19425, 3766, 3761, 3776, 3771, 3786, 3781, 3791, 3796, 18346, 18347, 3821, 3816, 19427, 3766, 3761, 3776, 3771, 3786, 3781, 3796, 3791, 3806, 3801, 18358, 3821, 3816, 3831, 3826, 3836, 18365, 3867, 3862, 18368, 3867, 3862, 18371, 3882, 3877, 3892, 3887, 18377, 4889, 5188, 5188, 4889, 5113, 5113, 5113, 5113, 5118, 5118, 5118, 5118, 5548, 5543, 5543, 5548, 5494, 5494, 6083, 6078, 6093, 6088, 6040, 6035, 6106, 18884, 6119, 6133, 6138, 6141, 6139, 6062, 6057, 18875, 6133, 6138, 6139, 6141, 18891, 6154, 6083, 6078, 6093, 6088, 18389, 6106, 6040, 6035, 6119, 6138, 6133, 6141, 6139, 18875, 6062, 6057, 6083, 6078, 19431, 6083, 6078, 19433, 6138, 6133, 6141, 6139, 6217, 6212, 6227, 6222, 6237, 6232, 4015, 6252, 6247, 19435, 6265, 6260, 18398, 6217, 6212, 6227, 6222, 6237, 6232, 4015, 5446, 5441, 5533, 5528, 4180, 4180, 4536, 4531, 4526, 4536, 4531, 4541, 4655, 4660, 4663, 4661, 4673, 4668, 4676, 4674, 4689, 18428, 4827, 18431, 4840, 19437, 5142, 5137, 5132, 5147, 18665, 5173, 18646, 5160, 5183, 5178, 5188, 4889, 5118, 5113, 5113, 5118, 5142, 5137, 5132, 5142, 5137, 5147, 18665, 5173, 18662, 5160, 5183, 5178, 5188, 4889, 18671, 18673, 18438, 19439, 18440, 19441, 5401, 5396, 5401, 5396, 5401, 5396, 5406, 5416, 5411, 5426, 5421, 5436, 5431, 5446, 5441, 4180, 4442, 4437, 19443, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4686, 4681, 4686, 4681, 4686, 4681, 19447, 4370, 4365, 4375, 18467, 4388, 4406, 4401, 4406, 4416, 4370, 4365, 4375, 18477, 4388, 4411, 4401, 4411, 4416, 19449, 4432, 4430, 19451, 4443, 4445, 4370, 4365, 4375, 18495, 4388, 4411, 4406, 4401, 4411, 4406, 4416, 19453, 4430, 4432, 19455, 4443, 4445, 4370, 4365, 4375, 18495, 4388, 4411, 4406, 4401, 4411, 4406, 4416, 19457, 19459, 19461, 19463, 4442, 4437, 19465, 19467, 19469, 19471, 19473, 4442, 4437, 19475, 4468, 4463, 4458, 4468, 4463, 4473, 4483, 4478, 4493, 4488, 4503, 4498, 4513, 4508, 4536, 4531, 4526, 4536, 4531, 4541, 4660, 4655, 4663, 4661, 18532, 4676, 4674, 4686, 4681, 4686, 4681, 18540, 18542, 4660, 4655, 19481, 4660, 4655, 19483, 19485, 19487, 4660, 4655, 19489, 4660, 4655, 19491, 19493, 19495, 4660, 4655, 19497, 4660, 4655, 19499, 4673, 4668, 19501, 4686, 4681, 19503, 18560, 18562, 4796, 4791, 4796, 4791, 4796, 4791, 19509, 19511, 4796, 4791, 19513, 4796, 4791, 19515, 19517, 4796, 4791, 19520, 4796, 4791, 19522, 4809, 4804, 4796, 4791, 19525, 4796, 4791, 19527, 4809, 4804, 4814, 18587, 4827, 18590, 4840, 4850, 4845, 4855, 18596, 4868, 18599, 19529, 4889, 5160, 5183, 5178, 4889, 4998, 5003, 4998, 5003, 4998, 5003, 5013, 5014, 5016, 18611, 18613, 4960, 5052, 5044, 18618, 4960, 5052, 5044, 5003, 4998, 5013, 5008, 19535, 5026, 5021, 5003, 4998, 5013, 5008, 19537, 5026, 5021, 18635, 5039, 19539, 5044, 19541, 5052, 5142, 5137, 5147, 5132, 18644, 5173, 18646, 5160, 5183, 5178, 5188, 5118, 5113, 5118, 5113, 5142, 5137, 5132, 5142, 5137, 5147, 18662, 5160, 18665, 5173, 5183, 5178, 5188, 18671, 18673, 18675, 19547, 18677, 19549, 5401, 5396, 5401, 5396, 5401, 5396, 5406, 5416, 5411, 5426, 5421, 5436, 5431, 5446, 5441, 5515, 5510, 19553, 5515, 5510, 19555, 5533, 5528, 5344, 5548, 5543, 5548, 5543, 5548, 5543, 5494, 5561, 18704, 5344, 18707, 5391, 5386, 5401, 5396, 5373, 18714, 5391, 5386, 5401, 5396, 5406, 5416, 5411, 5426, 5421, 5436, 5431, 5446, 5441, 5515, 5510, 5456, 5451, 18731, 5538, 5548, 5543, 5481, 5548, 5543, 5494, 5561, 5561, 5515, 5510, 18744, 5533, 5528, 5538, 5548, 5543, 18751, 5561, 5566, 6280, 6285, 19561, 6285, 6280, 19563, 19565, 6333, 6328, 18817, 19567, 18819, 19569, 19571, 6333, 6328, 6362, 6357, 6352, 6362, 6357, 6367, 6377, 6372, 18828, 19005, 6663, 6629, 6624, 6676, 6686, 6681, 6696, 6691, 6701, 6721, 6716, 19573, 6721, 6716, 19575, 6629, 6624, 6663, 6629, 6624, 6676, 6686, 6681, 6696, 6691, 6706, 6721, 6716, 6731, 6726, 6818, 6818, 6818, 6818, 6252, 6247, 19579, 6265, 6260, 6275, 6270, 6367, 6377, 6372, 18769, 6823, 6823, 6083, 6078, 6088, 6093, 18881, 6106, 6040, 6035, 6119, 6133, 6138, 6141, 6139, 6062, 6057, 18875, 6133, 6138, 19587, 6133, 6138, 19589, 18891, 6154, 6217, 6212, 6227, 6222, 6237, 6232, 6242, 6252, 6247, 19591, 6265, 6260, 18786, 6285, 6280, 19593, 6285, 6280, 19595, 19597, 18791, 18793, 18795, 19601, 6362, 6357, 6352, 6362, 6357, 6367, 6377, 6372, 18804, 6397, 6392, 6423, 6423, 18809, 6285, 6280, 19603, 6285, 6280, 19605, 19607, 6333, 6328, 18817, 19609, 18819, 19611, 19613, 6333, 6328, 6362, 6357, 6367, 6362, 6357, 6352, 6377, 6372, 18828, 6397, 6392, 5888, 5883, 6428, 6428, 18834, 19615, 18836, 19617, 18838, 6464, 19005, 6663, 6629, 6624, 6676, 6686, 6681, 6696, 6691, 6706, 6701, 6721, 6716, 6731, 6726, 19005, 6663, 19008, 6676, 6686, 6681, 6696, 6691, 6706, 6701, 6721, 6716, 6731, 6726, 6701, 6706, 6646, 6795, 6790, 6800, 6785, 18852, 6764, 18854, 6858, 18856, 6871, 18859, 6764, 6858, 18861, 6871, 6083, 6078, 6093, 6088, 18866, 6106, 6040, 6035, 6119, 6133, 6138, 19633, 6062, 6057, 18875, 6083, 6078, 6093, 6088, 18866, 6106, 6040, 6035, 6119, 6133, 6138, 19635, 6062, 6057, 18875, 6083, 6078, 6093, 6088, 18881, 6106, 18884, 6119, 6138, 6133, 19639, 6138, 6133, 19641, 18891, 6154, 6212, 6217, 6222, 6227, 6237, 6232, 6242, 6252, 6247, 19643, 6265, 6260, 6270, 6252, 6247, 19645, 6265, 6260, 6275, 6217, 6212, 6227, 6222, 6237, 6232, 6242, 6252, 6247, 19647, 6265, 6260, 6275, 6270, 6285, 6280, 6320, 6315, 19649, 6328, 19651, 6333, 18926, 6320, 6315, 19653, 6333, 6328, 6362, 6357, 6362, 6357, 6362, 6357, 6367, 6377, 6372, 6387, 6382, 6397, 6392, 18945, 6428, 6423, 6418, 6428, 6423, 6433, 18953, 6451, 6446, 18957, 6464, 18960, 6477, 6721, 6716, 6731, 6726, 18967, 6764, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18977, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 18986, 6833, 6838, 6843, 6853, 6848, 6853, 6848, 18995, 6871, 6629, 6624, 6663, 19008, 6676, 6686, 6681, 6696, 6691, 6701, 6706, 6646, 6721, 6716, 6731, 6726, 19005, 6663, 19008, 6676, 6686, 6681, 6696, 6691, 6706, 6701, 6711, 6721, 6716, 6731, 6726, 6795, 6790, 6800, 6785, 6823, 6818, 19039, 6823, 6818, 19042, 19027, 6764, 19030, 6858, 19050, 6871, 6795, 6790, 6785, 6795, 6790, 6800, 6823, 6818, 19039, 6823, 6818, 19042, 6838, 6833, 6843, 6853, 6848, 6858, 19050, 6871, 19226, 19225, 19228, 19227, 19228, 19227, 19228, 19227, 9709, 9704, 9719, 9714, 19683, 9709, 9704, 9719, 9714, 19686, 19689, 19226, 19225, 19228, 19227, 19693, 9726, 9724, 19582, 9677, 9675, 9696, 9691, 19695, 9709, 9704, 9719, 9714, 19622, 9677, 9675, 9696, 9691, 19697, 9709, 9704, 9719, 9714, 19699, 9726, 9724, 9577, 9577, 9696, 9691, 9709, 9704, 9719, 9714, 19701, 19703, 9674, 19705, 9674, 19707, 9696, 9691, 19709, 9696, 9691, 19711, 9709, 9704, 9719, 9714, 19713, 19715, 19717, 19719, 19679, 19678, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 19728, 19730, 19731, 19732, 19733, 19734, 19739, 19740, 19741, 19742, 19743, 19744, 19745, 19746, 19747, 19748, 19750, 19751, 19754, 19755, 19756, 19758, 19759, 19760, 19761, 19762, 19763, 19764, 19765, 19766, 19767, 19768, 19769, 19770, 19771, 19772, 19773, 19774, 19775, 19776, 19777, 19778, 19779, 19780, 19781, 19782, 19783, 19784, 19785, 19786, 19787, 19788, 19789, 19790, 19791, 19792, 19793, 19801, 19802, 19803, 19804, 19805, 19806, 19807, 19808, 19809, 19810, 19811, 19812, 19813, 19814, 19815, 19816, 19817, 19819, 19820, 19821, 19822, 19823, 19824, 19825, 19826, 19827, 19828, 19829, 19830, 19831, 19832, 19833, 19834, 19835, 19836, 19837, 19838, 19839, 19840, 19842, 19843, 19844, 19845, 19846, 19847, 19848, 19849, 19850, 19851, 19852, 19853, 19854, 19855, 19856, 19857, 19858, 19859, 19861, 19862, 19863, 19864, 19865, 19866, 19867, 19868, 19869, 19870, 19871, 19872, 19873, 19875, 19876, 19877, 19878, 19879, 19880, 19881, 19882, 19883, 19885, 19886, 19887, 19888, 19889, 19891, 19892, 19894, 19895, 19896, 19897, 19898, 19899, 19900, 19901, 19902, 19911, 19912, 19914, 19915, 19917, 19918, 19920, 19921, 19923, 19924, 19925, 19926, 19927, 19928, 19930, 19931, 19933, 19934, 19935, 19936, 19937, 19938, 19939, 19940, 19941, 19942, 19943, 19944, 19945, 19952, 19953, 19954, 19955, 19957, 19958, 19962, 19963, 19966, 19967, 19968, 19969, 19970, 19971, 19972, 19973, 19974, 19975, 19976, 19977, 19978, 19979, 19980, 19982, 19983, 19984, 19985, 19990, 19991, 19992, 19993, 19994, 19995, 19996, 19997, 19998, 19999, 20000, 20001, 20002, 20003, 20004, 20005, 20006, 20007, 20008, 20009, 20010, 20011, 20012, 20013, 20014, 20015, 20017, 20018, 20020, 20021, 20022, 20023, 20024, 20025, 20026, 20027, 20028, 20029, 20031, 20032, 20034, 20035, 20036, 20037, 20038, 20039, 20040, 20041, 20042, 20043, 20044, 20045, 20046, 20047, 20048, 20049, 20050, 20051, 20052, 20053, 20054, 20055, 20056, 20057, 20058, 20059, 20060, 20061, 20062, 20063, 20064, 20065, 20066, 20067, 20068, 20070, 20071, 20072, 20073, 20074, 20075, 20076, 20077, 20079, 20080, 20081, 20083, 20084, 20085, 20087, 20089, 20090, 20091, 20092, 20095, 20096, 20097, 20098, 20099, 20100, 20101, 20102, 20103, 20104, 20105, 20106, 20107, 20108, 20109, 20110, 20111, 20112, 20113, 20114, 20117, 20118, 20119, 20120, 20121, 20122, 20123, 20124, 20125, 20126, 20127, 20128, 20129, 20130, 20131, 20132, 20133, 20134, 20135, 20136, 20137, 20138, 20139, 20140, 20141, 20142, 20143, 20144, 20145, 20146, 20148, 20149, 20151, 20152, 20153, 20154, 20155, 20157, 20158, 20160, 20161, 20162, 20163, 20164, 20165, 20166, 20167, 20168, 20169, 20170, 20171, 20172, 20174, 20175, 20176, 20177, 20178, 20179, 20180, 20181, 20183, 20184, 20185, 20186, 20187, 20188, 20189, 20190, 20191, 20192, 20193, 20194, 20195, 20196, 20197, 20198, 20199, 20200, 20201, 20202, 20203, 20204, 20205, 20206, 20207, 20208, 20209, 20210, 20211, 20212, 20213, 20214, 20215, 20216, 20217, 20218, 20219, 20220, 20221, 20222, 20223, 20224, 20225, 20226, 20227, 20228, 20229, 20230, 20238, 20239, 20240, 20241, 20242, 20243, 20244, 20245, 20246, 20247, 20248, 20249, 20250, 20251, 20252, 20253, 20254, 20255, 20256, 20257, 20258, 20260, 20261, 20262, 20263, 20264, 20266, 20267, 20269, 20270, 20271, 20274, 20275, 20276, 20277, 20278, 20280, 20281, 20283, 20284, 20285, 20286, 20289, 20290, 20291, 20292, 20293, 20295, 20296, 20298, 20299, 20300, 20301, 20304, 20305, 20306, 20307, 20308, 20309, 20311, 20312, 20314, 20315, 20316, 20317, 20318, 20319, 20321, 20322, 20323, 20324, 20325, 20326, 20327, 20328, 20329, 20330, 20331, 20332, 20333, 20334, 20336, 20337, 20339, 20340, 20341, 20342, 20344, 20345, 20347, 20348, 20349, 20350, 20351, 20352, 20353, 20355, 20356, 20357, 20358, 20359, 20360, 20361, 20362, 20363, 20364, 20365, 20367, 20368, 20369, 20370, 20371, 20372, 20373, 20374, 20375, 20376, 20377, 20378, 20379, 20380, 20381, 20382, 20383, 20384, 20385, 20386, 20387, 20388, 20389, 20390, 20391, 20392, 20393, 20394, 20395, 20396, 20398, 20399, 20401, 20402, 20403, 20404, 20405, 20406, 20408, 20409, 20411, 20412, 20413, 20414, 20415, 20416, 20417, 20418, 20419, 20420, 20421, 20422, 20423, 20424, 20425, 20426, 20427, 20428, 20429, 20430, 20431, 20433, 20434, 20436, 20437, 20438, 20439, 20440, 20441, 20442, 20443, 20444, 20445, 20446, 20447, 20448, 20449, 20450, 20451, 20452, 20453, 20454, 20455, 20456, 20457, 20458, 20459, 20460, 20461, 20462, 20463, 20464, 20465, 20466, 20467, 20468, 20469, 20470, 20471, 20472, 20473, 20474, 20475, 20476, 20477, 20478, 20479, 20480, 20481, 20482, 20483, 20484, 20485, 20486, 20487, 20488, 20489, 20490, 20491, 20492, 20493, 20494, 20495, 20496, 20497, 20498, 20499, 20500, 20501, 20502, 20503, 20504, 20505, 20506, 20507, 20508, 20509, 20510, 20511, 20512, 20513, 20514, 20515, 20516, 20517, 20518, 20519, 20520, 20521, 20522, 20523, 20524, 20525, 20526, 20527, 20528, 20529, 20530, 20531, 20532, 20533, 20534, 20535, 20536, 20537, 20538, 20539, 20540, 20541, 20542, 20543, 20544, 20545, 20546, 20547, 20548, 20549, 20550, 20551, 20552, 20553, 20554, 20555, 20556, 20557, 20558, 20559, 20560, 20561, 20562, 20563, 20564, 20565, 20566, 20567, 20568, 20569, 20570, 20571, 20572, 20573, 20574, 20575, 20576, 20577, 20578, 20579, 20580, 20581, 20582, 20583, 20584, 20585, 20586, 20587, 20588, 20589, 20590, 20591, 20592, 20593, 20594, 20595, 20596, 20597, 20599, 20600, 20602, 20603, 20605, 20606, 20607, 20608, 20609, 20610, 20611, 20612, 20613, 20614, 20615, 20616, 20617, 20619, 20620, 20621, 20622, 20623, 20624, 20626, 20627, 20629, 20630, 20632, 20633, 20634, 20635, 20636, 20637, 20638, 20639, 20640, 20642, 20643, 20645, 20646, 20648, 20649, 20650, 20651, 20652, 20653, 20654, 20655, 20656, 20657, 20658, 20660, 20661, 20663, 20664, 20667, 20668, 20669, 20670, 20671, 20672, 20673, 20674, 20675, 20676, 20677, 20678, 20679, 20680, 20681, 20682, 20683, 20684, 20686, 20687, 20688, 20689, 20690, 20691, 20692, 20693, 20694, 20695, 20696, 20697, 20699, 20700, 20701, 20702, 20704, 20705, 20706, 20707, 20708, 20709, 20710, 20711, 20712, 20713, 20714, 20716, 20717, 20718, 20719, 20720, 20721, 20722, 20723, 20724, 20725, 20726, 20727, 20728, 20729, 20730, 20731, 20732, 20733, 20734, 20735, 20736, 20737, 20738, 20739, 20740, 20741, 20742, 20743, 20744, 20745, 20746, 20747, 20748, 20749, 20750, 20751, 20753, 20754, 20755, 20756, 20757, 20758, 20759, 20760, 20761, 20762, 20763, 20764, 20765, 20766, 20767, 20769, 20770, 20771, 20772, 20773, 20774, 20775, 20776, 20777, 20778, 20779, 20780, 20781, 20782, 20784, 20785, 20787, 20788, 20789, 20790, 20791, 20792, 20793, 20794, 20795, 20796, 20797, 20798, 20799, 20800, 20801, 20802, 20803, 20804, 20805, 20806, 20807, 20808, 20809, 20810, 20811, 20812, 20813, 20814, 20815, 20816, 20818, 20819, 20820, 20821, 20822, 20823, 20824, 20825, 20826, 20827, 20828, 20829, 20830, 20831, 20832, 20833, 20834, 20835, 20836, 20837, 20838, 20839, 20840, 20841, 20842, 20843, 20844, 20845, 20846, 20847, 20848, 20849, 20850, 20851, 20852, 20853, 20854, 20855, 20856, 20857, 20858, 20859, 20860, 20861, 20862, 20863, 20864, 20865, 20867, 20868, 20869, 20870, 20871, 20872, 20873, 20874, 20875, 20876, 20877, 20878, 20879, 20880, 20881, 20882, 20883, 20884, 20885, 20886, 20887, 20888, 20889, 20890, 20891, 20892, 20893, 20894, 20895, 20896, 20897, 20898, 20899, 20900, 20901, 20902, 20903, 20904, 20905, 20906, 20907, 20908, 20909, 20910, 20911, 20912, 20914, 20916, 20917, 20918, 20919, 20920, 20921, 20922, 20923, 20924, 20926, 20927, 20929, 20930, 20931, 20932, 20933, 20934, 20935, 20936, 20937, 20938, 20939, 20940, 20941, 20942, 20943, 20944, 20945, 20946, 20947, 20948, 20949, 20950, 20951, 20952, 20953, 20954, 20955, 20956, 20957, 20959, 20960, 20961, 20962, 20963, 20964, 20965, 20966, 20967, 20968, 20969, 20970, 20971, 20972, 20975, 20976, 20977, 20979, 20980, 20983, 20984, 20985, 20986, 20987, 20988, 20989, 20990, 20991, 20992, 20994, 20995, 20996, 20997, 20998, 20999, 21000, 21001, 21003, 21004, 21006, 21007, 21008, 21009, 21010, 21011, 21012, 21013, 21014, 21015, 21016, 21017, 21019, 21020, 21024, 21025, 21026, 21027, 21028, 21029, 21030, 21032, 21033, 21035, 21036, 21038, 21039, 21040, 21041, 21042, 21043, 21044, 21045, 21046, 21047, 21048, 21049, 21050, 21051, 21052, 21053, 21054, 21055, 21056, 21057, 21058, 21059, 21060, 21062, 21063, 21064, 21065, 21066, 21067, 21068, 21069, 21070, 21071, 21072, 21073, 21075, 21076, 21077, 21078, 21079, 21080, 21081, 21082, 21083, 21084, 21085, 21086, 21087, 21088, 21089, 21090, 21091, 21092, 21093, 21094, 21095, 21096, 21097, 21098, 21099, 21100, 21101, 21102, 21103, 21104, 21105, 21106, 21107, 21108, 21109, 21110, 21111, 21112, 21113, 21114, 21115, 21116, 21117, 21118, 21119, 21120, 21121, 21122, 21123, 21124, 21125, 21126, 21127, 21128, 21129, 21130, 21131, 21132, 21133, 21134, 21135, 21136, 21137, 21138, 21139, 21140, 21141, 21142, 21143, 21144, 21145, 21146, 21147, 21148, 21149, 21150, 21151, 21152, 21153, 21154, 21155, 21156, 21157, 21158, 21159, 21160, 21162, 21163, 21165, 21166, 21167, 21168, 21169, 21170, 21171, 21172, 21173, 21174, 21175, 21176, 21177, 21179, 21180, 21181, 21182, 21183, 21184, 21185, 21186, 21187, 21188, 21189, 21190, 21191, 21192, 21193, 21194, 21195, 21196, 21197, 21198, 21199, 21200, 21201, 21202, 21203, 21204, 21205, 21206, 21207, 21208, 21209, 21210, 21211, 21212, 21213, 21215, 21216, 21217, 21218, 21219, 21220, 21221, 21222, 21223, 21224, 21225, 21226, 21227, 21228, 21229, 21230, 21231, 21232, 21233, 21234, 21235, 21236, 21237, 21238, 21239, 21240, 21241, 21242, 21243, 21244, 21245, 21246, 21247, 21249, 21251, 21252, 21253, 21254, 21255, 21256, 21257, 21258, 21259, 21260, 21261, 21262, 21263, 21264, 21265, 21266, 21267, 21268, 21270, 21271, 21272, 21273, 21274, 21275, 21276, 21277, 21278, 21279, 21280, 21281, 21282, 21283, 21285, 21286, 21287, 21288, 21289, 21290, 21291, 21292, 21293, 21294, 21295, 21296, 21297, 21298, 21299, 21300, 21301, 21302, 21304, 21305, 21307, 21308, 21309, 21310, 21311, 21312, 21313, 21314, 21315, 21316, 21317, 21318, 21319, 21321, 21322, 21324, 21325, 21326, 21327, 21328, 21329, 21330, 21331, 21332, 21333, 21334, 21335, 21336, 21341, 21342, 21348, 21349, 21351, 21352, 21353, 21354, 21355, 21356, 21357, 21358, 21359, 21360, 21361, 21362, 21363, 21364, 21365, 21366, 21367, 21368, 21369, 21370, 21371, 21372, 21373, 21374, 21375, 21376, 21377, 21378, 21379, 21380, 21381, 21382, 21383, 21384, 21385, 21387, 21388, 21392, 21393, 21395, 21396, 21400, 21401, 21403, 21404, 21406, 21407, 21409, 21410, 21412, 21413, 21414, 21415, 21416, 21417, 21418, 21419, 21422, 21423, 21425, 21426, 21429, 21430, 21432, 21433, 21435, 21436, 21437, 21438, 21440, 21441, 21443, 21444, 21445, 21446, 21447, 21448, 21449, 21450, 21451, 21452, 21453, 21454, 21455, 21457, 21458, 21459, 21460, 21461, 21462, 21463, 21464, 21465, 21466, 21467, 21468, 21469, 21470, 21471, 21472, 21473, 21474, 21475, 21476, 21477, 21478, 21479, 21480, 21481, 21482, 21483, 21485, 21486, 21487, 21488, 21489, 21490, 21492, 21493, 21494, 21495, 21497, 21499, 21500, 21501, 21502, 21503, 21504, 21505, 21506, 21507, 21508, 21509, 21510, 21511, 21512, 21513, 21514, 21515, 21516, 21517, 21518, 21519, 21520, 21521, 21522, 21523, 21524, 21525, 21526, 21527, 21528, 21529, 21530, 21532, 21534, 21535, 21536, 21537, 21538, 21539, 21540, 21541, 21542, 21543, 21544, 21545, 21546, 21547, 21548, 21549, 21550, 21552, 21553, 21555, 21556, 21557, 21558, 21559, 21560, 21561, 21562, 21563, 21564, 21565, 21566, 21567, 21568, 21569, 21570, 21571, 21572, 21573, 21574, 21575, 21576, 21577, 21578, 21579, 21580, 21581, 21582, 21583, 21584, 21585, 21586, 21587, 21588, 21589, 21590, 21591, 21592, 21593, 21594, 21595, 21596, 21597, 21598, 21599, 21600, 21601, 21602, 21603, 21604, 21605, 21606, 21607, 21608, 21609, 21610, 21611, 21612, 21613, 21614, 21616, 21617, 21620, 21621, 21622, 21624, 21627, 21628, 21629, 21630, 21631, 21632, 21633, 21634, 21635, 21636, 21637, 21638, 21639, 21640, 21641, 21642, 21643, 21644, 21645, 21646, 21647, 21648, 21649, 21651, 21652, 21654, 21655, 21656, 21657, 21658, 21659, 21660, 21661, 21662, 21663, 21664, 21665, 21666, 21667, 21668, 21669, 21670, 21671, 21672, 21673, 21674, 21676, 21677, 21678, 21679, 21680, 21681, 21682, 21683, 21684, 21685, 21686, 21687, 21688, 21689, 21690, 21691, 21692, 21693, 21694, 21695, 21696, 21697, 21698, 21699, 21700, 21701, 21702, 21703, 21705, 21706, 21708, 21709, 21710, 21711, 21712, 21713, 21714, 21715, 21716, 21717, 21718, 21720, 21721, 21722, 21723, 21724, 21726, 21727, 21730, 21731, 21732, 21734, 21735, 21736, 21737, 21738, 21739, 21740, 21741, 21742, 21743, 21744, 21745, 21746, 21747, 21748, 21749, 21751, 21752, 21755, 21756, 21757, 21759, 21762, 21763, 21764, 21765, 21766, 21767, 21768, 21769, 21770, 21771, 21772, 21773, 21774, 21775, 21776, 21777, 21778, 21779, 21781, 21783, 21784, 21785, 21786, 21787, 21788, 21789, 21790, 21791, 21792, 21793, 21794, 21795, 21796, 21797, 21798, 21799, 21800, 21801, 21802, 21803, 21804, 21805, 21806, 21807, 21808, 21809, 21810, 21811, 21812, 21813, 21814, 21815, 21816, 21817, 21818, 21819, 21820, 21821, 21822, 21823, 21824, 21825, 21826, 21827, 21828, 21829, 21830, 21831, 21832, 21833, 21834, 21835, 21836, 21837, 21838, 21839, 21840, 21841, 21842, 21844, 21845, 21846, 21847, 21848, 21849, 21850, 21851, 21852, 21853, 21854, 21855, 21856, 21857, 21859, 21860, 21861, 21862, 21863, 21864, 21865, 21866, 21867, 21868, 21869, 21870, 21871, 21873, 21874, 21876, 21877, 21878, 21879, 21880, 21881, 21882, 21883, 21884, 21885, 21886, 21888, 21889, 21890, 21891, 21892, 21894, 21895, 21896, 21897, 21898, 21899, 21900, 21901, 21902, 21903, 21904, 21905, 21907, 21908, 21909, 21910, 21911, 21912, 21913, 21914, 21916, 21918, 21919, 21920, 21921, 21923, 21924, 21925, 21926, 21927, 21928, 21929, 21930, 21931, 21932, 21933, 21934, 21935, 21936, 21937, 21938, 21939, 21940, 21941, 21942, 21943, 21944, 21945, 21946, 21947, 21948, 21949, 21950, 21951, 21952, 21953, 21954, 21955, 21956, 21957, 21958, 21959, 21960, 21961, 21962, 21963, 21964, 21965, 21966, 21967, 21968, 21969, 21970, 21971, 21972, 21973, 21974, 21975, 21976, 21977, 21978, 21979, 21980, 21981, 21982, 21983, 21984, 21985, 21986, 21987, 21988, 21989, 21990, 21991, 21992, 21993, 21994, 21995, 21996, 21997, 21998, 21999, 22000, 22001, 22002, 22003, 22004, 22005, 22006, 22007, 22008, 22009, 22010, 22011, 22012, 22013, 22014, 22015, 22016, 22017, 22018, 22019, 22020, 22021, 22022, 22023, 22024, 22025, 22026, 22027, 22028, 22029, 22030, 22031, 22032, 22033, 22034, 22035, 22036, 22037, 22038, 22039, 22040, 22041, 22042, 22043, 22044, 22045, 22046, 22047, 22048, 22049, 22050, 22051, 19799, 19797, 22052, 22053, 22054, 22055, 22056, 22057, 22058, 22059, 22060, 22061, 22062, 22063, 22065, 22066, 22067, 22068, 21389, 21386, 19906, 21397, 21394, 21399, 21405, 21402, 19519, 19519, 21389, 21386, 21397, 21394, 21405, 21402, 19960, 19965, 19989, 19987, 19989, 19987, 21391, 21399, 19519, 21391, 21399, 19519, 19519, 20116, 20115, 22071, 22072, 22073, 22074, 20231, 20233, 20234, 20237, 20237, 20273, 20288, 20303, 21023, 21061, 21340, 21338, 21347, 21345, 21391, 21399, 19519, 19519, 22076, 22077, 22078, 22079, 22080, 22081, 22082, 22084, 22085, 22086, 22087, 22088, 22089, 22090, 22091, 22092, 22094, 22095, 22096, 22097, 22099, 22100, 22101, 22102, 22103, 22104, 22105, 22106, 22107, 22108, 22111, 22113, 22115, 22116, 22118, 22119, 22121, 22122, 22123, 22124, 22114, 22112, 22114, 22112, 22114, 22112, 22114, 22129, 22130, 22070, 22064, 22070, 22069, 22128, 22110, 22128, 22126, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22145, 22148, 22151, 22153, 22156, 22160, 22162, 22165, 22168, 22171, 22174, 22176, 22178, 22181, 22184, 22187, 22190, 22193, 22196, 22203, 22205, 22207, 22210, 22220, 22229, 22231, 22235, 22237, 22240, 22242, 22244, 22247, 22249, 22253, 22256, 22258, 22260, 22262, 22266, 22269, 22271, 22273, 22275, 22278, 22280, 22285, 22287, 22290, 22292, 22296, 22298, 22302, 22310, 22312, 22314, 22319, 22321, 22323, 22329, 22331, 22333, 22335, 22337, 22339, 22341, 22346, 22348, 22352, 22354, 22356, 22358, 22360, 22362, 22364, 22366, 22370, 22372, 22378, 22380, 22382, 22388, 22390, 22392, 22395, 22397, 22399, 22402, 22404, 22406, 22409, 22412, 22416, 22418, 22422, 22424, 22427, 22429, 22431, 22435, 22438, 22442, 22444, 22446, 22448, 22450, 22452, 22455, 22457, 22460, 22463, 22466, 22469, 22471, 22473, 22475, 22477, 22479, 22482, 22485, 22487, 22489, 22491, 22494, 22496, 22499, 22501, 22503, 22505, 22508, 22512, 22514, 22516, 22518, 22521, 22524, 22526, 22528, 22530, 22533, 22537, 22539, 22541, 22544, 22547, 22549, 22552, 22554, 22556, 22558, 22560, 22562, 22564, 22566, 22568, 22570, 22574, 22576, 22580, 22582, 22584, 22586, 22591, 22593, 22595, 22598, 22600, 22605, 22608, 22610, 22616, 22619, 22621, 22627, 22629, 22631, 22633, 22639, 22643, 22646, 22648, 22650, 22653, 22655, 22659, 22661, 22666, 22668, 22670, 22672, 22677, 22679, 22681, 22683, 22687, 22689, 22693, 22695, 22700, 22702, 22704, 22706, 22709, 22711, 22717, 22719, 22723, 22726, 22729, 22731, 22733, 22735, 22737, 22740, 22742, 22744, 22747, 22751, 22753, 22755, 22759, 22761, 22765, 22767, 22769, 22771, 22773, 22777, 22780, 22783, 22785, 22788, 22791, 22793, 22795, 22797, 22799, 22801, 22803, 22806, 22809, 22813, 22815, 22817, 22819, 22821, 22823, 22826, 22829, 22833, 22835, 22837, 22839, 22841, 22843, 22846, 22849, 22852, 22855, 22858, 22861, 22863, 22865, 22867, 22869, 22871, 22873, 22875, 22877, 22880, 22882, 22884, 22887, 22889, 22891, 22894, 22898, 22904, 22906, 22908, 22910, 22913, 22918, 22921, 22923, 22925, 22927, 22929, 22931, 22933, 22936, 22938, 22940, 22942, 22944, 22946, 22949, 22951, 22953, 22955, 22957, 22959, 22961, 22964, 22975, 22977, 22980, 22993, 22995, 22997, 22999, 23004, 23009, 23011, 23013, 23015, 23018, 23021, 23025, 23027, 23029, 23031, 23035, 23037, 23049, 23052, 23055, 23063, 23065, 23067, 23069, 23071, 23073, 23075, 23081, 23084, 23087, 23089, 23103, 23106, 23110, 23120, 23123, 23127, 23138, 23140, 23145, 23147, 23149, 23151, 23153, 23156, 23158, 23161, 23165, 23167, 23169, 23171, 23173, 23175, 23177, 23180, 23183, 23186, 23189, 23192, 23194, 23198, 23200, 23203, 23205, 23207, 23209, 23212, 23214, 23217, 23219, 23221, 23223, 23225, 23227, 23229, 23232, 23238, 23241, 23244, 23247, 23249, 23253, 23255, 23257, 23259, 23262, 23264, 23269, 23271, 23273, 23275, 23279, 23281, 23283, 23285, 23287, 23292, 23294, 23296, 23298, 23302, 23304, 23307, 23312, 23317, 23319, 23323, 23325, 23327, 23329, 23331, 23335, 23337, 23339, 23341, 23343, 23345, 23348, 23350, 23354, 23357, 23360, 23362, 23365, 23367, 23377, 23379, 23383, 23385, 23387, 23392, 23394, 23396, 23399, 23401, 23405, 23407, 23411, 23414, 23416, 23419, 23421, 23423, 23425, 23427, 23429, 23431, 23433, 23436, 23438, 23441, 23443, 23445, 23448, 23450, 23454, 23457, 23460, 23462, 23464, 23466, 23473, 23475, 23481, 23483, 23485, 23487, 23489, 23492, 23499, 23501, 23507, 23509, 23511, 23514, 23516, 23518, 23520, 23523, 23525, 23527, 23529, 23531, 23533, 23535, 23537, 23539, 23548, 23557, 23559, 23561, 23566, 23569, 23572, 23574, 23576, 23581, 23584, 23587, 23589, 23591, 23594, 23597, 23599, 23601, 23603, 23605, 23608, 23611, 23613, 23616, 23618, 23620, 23624, 23626, 23628, 23630, 23632, 23634, 23636, 23638, 23642, 23644, 23646, 23648, 23650, 23652, 23654, 23656, 23658, 23660, 23662, 23669, 23677, 23680, 23682, 23684, 23687, 23692, 23696, 23698, 23700, 23702, 23704, 23706, 23708, 23714, 23716, 23722, 23725, 23727, 23729, 23732, 23739, 23746, 23748, 23750, 23753, 23755, 23757, 23759, 23761, 23763, 23765, 23768, 23770, 23772, 23779, 23781, 23785, 23787, 23790, 23792, 23794, 23796, 23798, 23800, 23804, 23807, 23812, 23815, 23818, 23823, 23825, 23827, 23831, 23833, 23836, 23839, 23844, 23847, 23849, 23852, 23854, 23856, 23859, 23862, 23864, 23867, 23869, 23875, 23877, 23879, 23882, 23887, 23889, 23893, 23896, 23898, 23900, 23903, 23905, 23909, 23911, 23913, 23916, 23918, 23921, 23923, 23928, 23931, 23934, 23937, 23942, 23944, 23946, 23950, 23952, 23955, 23958, 23961, 23963, 23973, 23976, 23978, 23980, 23982, 23984, 23990, 23992, 23994, 23996, 23998, 24000, 24003, 24005, 24018, 24020, 24024, 24027, 24029, 24032, 24034, 24038, 24041, 24043, 24046, 24048, 24054, 24056, 24060, 24062, 24064, 24067, 24069, 24072, 24074, 24077, 24079, 24081, 24084, 24086, 24088, 24090, 24092, 24097, 24099, 24101, 24103, 24105, 24108, 24110, 24112, 24115, 24118, 24122, 24128, 24130, 24134, 24137, 24140, 24143, 24146, 24149, 24152, 24155, 24157, 24161, 24166, 24168, 24170, 24173, 24175, 24181, 24183, 24185, 24188, 24190, 24192, 24194, 24196, 24199, 24208, 24211, 24214, 24217, 24220, 24223, 22642, 19736, 19737, 21248, 19738, 19738, 21250, 24172, 24187, 22200, 24172, 24187, 24228, 24229, 23096, 19367, 19366, 23132, 23130, 23136, 23097, 23101, 19369, 19368, 23144, 19373, 19372, 24230, 24232, 23132, 23042, 23136, 23134, 22213, 23144, 19373, 20173, 19818, 22511, 20173, 20182, 22971, 23191, 23188, 20752, 19354, 19366, 23132, 23042, 23136, 23134, 22971, 23048, 19373, 19372, 20752, 19354, 19366, 23191, 23160, 24234, 23191, 23188, 24236, 22252, 22265, 19638, 24238, 24240, 24242, 24244, 22284, 22295, 24246, 24247, 24248, 24249, 24250, 24251, 24252, 24253, 19506, 19505, 20016, 24254, 24255, 22307, 22305, 23673, 22309, 22318, 24256, 24257, 24258, 24259, 24260, 24261, 20016, 22328, 22326, 23543, 23547, 23545, 23552, 23556, 23554, 24262, 24263, 23565, 23580, 24264, 24265, 23543, 23547, 22343, 23552, 23554, 19165, 19164, 19169, 19168, 23580, 24266, 24267, 24268, 24269, 19506, 19505, 20016, 24270, 23472, 22375, 23673, 23472, 22375, 24271, 24272, 19506, 19505, 20016, 24273, 24274, 23472, 23470, 23673, 22434, 23711, 22434, 23711, 20088, 20086, 22758, 22776, 22758, 24275, 24276, 19208, 19389, 19388, 19209, 19390, 19388, 19210, 19215, 22758, 22776, 22511, 23144, 19373, 24277, 24279, 20958, 19390, 19389, 19388, 20958, 19390, 19389, 20973, 23334, 8430, 22573, 24281, 24282, 24283, 24284, 24285, 22579, 22589, 22615, 22603, 24286, 22615, 22613, 24287, 22626, 22624, 24288, 22638, 22636, 22642, 22658, 22664, 22675, 22686, 22692, 22698, 22716, 22714, 22722, 22758, 22776, 22805, 22812, 22825, 22832, 22903, 22901, 22917, 22971, 23048, 19373, 19372, 23115, 23113, 23119, 23117, 23132, 23042, 23136, 23134, 22971, 23048, 19373, 19372, 23132, 23042, 22986, 19367, 19366, 22990, 22988, 23191, 23188, 23083, 23002, 23006, 23083, 23002, 23006, 23191, 23188, 23083, 23002, 23006, 23191, 23188, 23017, 23132, 23042, 23136, 23134, 23034, 23144, 19373, 19372, 23034, 23115, 23113, 23119, 23117, 20752, 19354, 19366, 23132, 23042, 23136, 23134, 23044, 23048, 19373, 19372, 23058, 23062, 19373, 19372, 19364, 19363, 23160, 23202, 23096, 19367, 19366, 23132, 23130, 23136, 23097, 23101, 19369, 19368, 23144, 19373, 19372, 23115, 23113, 23119, 23117, 23132, 23130, 23136, 23134, 23144, 19373, 20915, 20913, 20915, 20913, 23202, 20958, 19390, 19389, 19388, 20958, 19390, 19389, 20973, 20974, 8430, 23347, 20982, 8485, 8485, 23334, 20993, 8430, 23268, 23278, 23290, 24289, 24290, 23291, 23301, 8430, 23316, 23322, 23334, 23353, 8485, 23721, 23719, 23676, 23738, 19546, 19545, 19544, 19543, 19546, 19545, 19544, 19543, 19546, 19545, 21250, 21248, 19560, 19559, 23391, 19638, 23404, 23410, 19638, 24053, 24051, 24059, 21250, 21248, 19560, 19559, 19560, 19559, 23472, 23470, 23691, 23695, 23711, 23713, 23712, 23480, 23478, 23498, 23496, 19546, 19545, 21250, 21248, 23822, 23543, 23547, 23545, 23552, 23556, 23554, 23565, 23580, 24291, 24292, 24293, 24294, 19480, 19479, 24295, 24296, 19506, 19505, 24297, 24298, 23668, 23666, 23673, 23691, 23695, 23711, 23713, 23712, 23676, 23738, 19546, 19545, 23691, 23695, 23711, 23713, 23712, 23721, 23719, 23738, 23736, 19546, 19545, 21533, 21531, 19560, 19559, 23777, 23803, 19560, 19559, 23822, 21625, 21623, 23843, 19619, 19619, 24201, 23884, 24219, 24216, 24299, 24201, 23884, 24219, 24216, 24302, 24304, 24306, 24308, 23892, 19638, 23908, 21733, 19600, 19599, 24120, 24117, 24127, 24125, 21760, 21758, 24120, 24117, 21782, 21780, 24127, 23970, 23972, 23989, 23987, 24008, 24012, 24010, 24014, 24017, 24015, 24311, 24313, 24315, 24317, 24319, 24023, 19638, 24037, 19638, 24053, 24051, 24059, 24095, 24094, 24127, 24125, 24133, 24160, 24323, 24325, 24327, 24165, 24180, 24178, 24203, 24207, 24205, 24227, 24331, 24333, 24335, 24337, 24339, 24340, 24341, 24342, 24343, 24344, 24345, 24348, 24349, 24350, 24351, 22114, 22112, 24352, 24353, 22114, 22112, 24354, 24355, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 25092, 22725, 22728, 24576, 24578, 22144, 20435, 19729, 22749, 22147, 19054, 19735, 19057, 25093, 25094, 25095, 25096, 25097, 24463, 23381, 22150, 25098, 24849, 19544, 19543, 19065, 19557, 22155, 19065, 19557, 22155, 24463, 19067, 19558, 22158, 24849, 19067, 19558, 22159, 25099, 25100, 19752, 19753, 22164, 19757, 22183, 22167, 22170, 22173, 19658, 19657, 22183, 22180, 22186, 22192, 22189, 22195, 22198, 25101, 25102, 25103, 25104, 24470, 22202, 22201, 25106, 25107, 25108, 25109, 25110, 25111, 25112, 25113, 25114, 25115, 25116, 25117, 25118, 24387, 20817, 24722, 19377, 19376, 24478, 23179, 23182, 23155, 23188, 23191, 22212, 22209, 24513, 24508, 22546, 22543, 23122, 22969, 24673, 25121, 25122, 25123, 25124, 25125, 25126, 25127, 19372, 25128, 22216, 22215, 25129, 25130, 25131, 22523, 22520, 25132, 20786, 20783, 19377, 19376, 24478, 23179, 23182, 23185, 23188, 22222, 24512, 19092, 22535, 22532, 24516, 24478, 23182, 23185, 23188, 22222, 19093, 22223, 22222, 25133, 24672, 24726, 23179, 20786, 20783, 23185, 23182, 25134, 25135, 23083, 23002, 20915, 20715, 24735, 23122, 22224, 25136, 25137, 25138, 25139, 25140, 25141, 25142, 25143, 25144, 25145, 25146, 24672, 20685, 25147, 25148, 25149, 20786, 20783, 19376, 19377, 24726, 23179, 23182, 23077, 25150, 25151, 23086, 23083, 20915, 20715, 24735, 20786, 20783, 19376, 19377, 23185, 23182, 25153, 25154, 23086, 23083, 20915, 20715, 24735, 24392, 22233, 24394, 22239, 24396, 24398, 19096, 19096, 19096, 19096, 24136, 24145, 22246, 24210, 24400, 22255, 25156, 19860, 19637, 24405, 22268, 25157, 19874, 25158, 24409, 22277, 19884, 22282, 19107, 19107, 19107, 19107, 25163, 22316, 24413, 24863, 22289, 24416, 25164, 22316, 24879, 25165, 25168, 25171, 21408, 19916, 19913, 25173, 25174, 24832, 24834, 23468, 19478, 19445, 25175, 21420, 19508, 19507, 21427, 21424, 21434, 21431, 19524, 21442, 21439, 23664, 25178, 25179, 25180, 23671, 25181, 22316, 24420, 24421, 25182, 22316, 24424, 24425, 25183, 25185, 25187, 24832, 25189, 19508, 19507, 20019, 25190, 25191, 25192, 23541, 25193, 25194, 25195, 23550, 25196, 25197, 24426, 24427, 25200, 23563, 23571, 23568, 24867, 24868, 25201, 23578, 23586, 23583, 25202, 24436, 23593, 24429, 24431, 23456, 23459, 25204, 23541, 25205, 25206, 25207, 23550, 25208, 25209, 25210, 24433, 24434, 23563, 25211, 25212, 23571, 23568, 24867, 24868, 25213, 23578, 23586, 23583, 25214, 24436, 23596, 24439, 24441, 22369, 22368, 21389, 21386, 21397, 21394, 21405, 21402, 21408, 21411, 21284, 25218, 25219, 24832, 24444, 19478, 19445, 23468, 25220, 19507, 19178, 19178, 19507, 19178, 19507, 19524, 19178, 19507, 23664, 25222, 25223, 25224, 23671, 24832, 24444, 19508, 19179, 19179, 19508, 19179, 19508, 19179, 19508, 25225, 25226, 21389, 21386, 21397, 21394, 21405, 21402, 21408, 21411, 21284, 25229, 25230, 24832, 24834, 23468, 19478, 19445, 25231, 19508, 19507, 20019, 21427, 21424, 21434, 21431, 19524, 21442, 21439, 23664, 25234, 25235, 25236, 23671, 25237, 25238, 23513, 19552, 19551, 20033, 20030, 22394, 22401, 19558, 19557, 19560, 19559, 22408, 22411, 22414, 24459, 22420, 24461, 22426, 24463, 24465, 25239, 24466, 25240, 24467, 25241, 25242, 23752, 19552, 19551, 24849, 19560, 19559, 23822, 24470, 24851, 19560, 19559, 23822, 22782, 22848, 22854, 22750, 22860, 19303, 19302, 19309, 19308, 24630, 19311, 19310, 24586, 25243, 24594, 24474, 22782, 22848, 22764, 22763, 22860, 19303, 19302, 19309, 19308, 24593, 25244, 24639, 24476, 22782, 22848, 22764, 22854, 19303, 19302, 22860, 19309, 19308, 24630, 19311, 19310, 24586, 25245, 24594, 24476, 19202, 19363, 20786, 20783, 19377, 19376, 24726, 23179, 23185, 23182, 19207, 19364, 24512, 22462, 22535, 22465, 25246, 20786, 20783, 19377, 19376, 24478, 23179, 23182, 23185, 19364, 19207, 24512, 22468, 22535, 22532, 24516, 24743, 20928, 20925, 24747, 23234, 23231, 25248, 25249, 23243, 23211, 19394, 19393, 23246, 25250, 19387, 24480, 20925, 20928, 24483, 23234, 23231, 25251, 25252, 22484, 22481, 23246, 19394, 19393, 25253, 19387, 24787, 24789, 23347, 24792, 24487, 24489, 25254, 20150, 20147, 25255, 20156, 21061, 24787, 21034, 20159, 23347, 24792, 22848, 22845, 22764, 22854, 24630, 19311, 19310, 22860, 19303, 19302, 19309, 19308, 24586, 25256, 24593, 25257, 24594, 24640, 22510, 22507, 24689, 24673, 25258, 25259, 25260, 19372, 24498, 20173, 22523, 22520, 20182, 19377, 19376, 23191, 23188, 24512, 24504, 22535, 22532, 24516, 24512, 24513, 24508, 22546, 22543, 24512, 24513, 24515, 24516, 24743, 20925, 20928, 24747, 23234, 23231, 25263, 25264, 25265, 23243, 23211, 23246, 19394, 19393, 25266, 19387, 24743, 24519, 24747, 23234, 23231, 25267, 25268, 25269, 23243, 23240, 23246, 19394, 19393, 25270, 24782, 24784, 25271, 21074, 23309, 23306, 23359, 24777, 25272, 24522, 25273, 24557, 24557, 20237, 24524, 25279, 24526, 24528, 25280, 24529, 20259, 22597, 20268, 20265, 25281, 25282, 22607, 20282, 20279, 25284, 25285, 22618, 20297, 20294, 25287, 25288, 24541, 20313, 20310, 25290, 25291, 20320, 25292, 22645, 24547, 22652, 20338, 20335, 25293, 20346, 20343, 25294, 24553, 20354, 24556, 25295, 24557, 20366, 24560, 25296, 24562, 25297, 24564, 25298, 24565, 24567, 22708, 20400, 20397, 25299, 25300, 20410, 20407, 25301, 22725, 22728, 24576, 24578, 22739, 20435, 20432, 22749, 22746, 22848, 22782, 22854, 22750, 19303, 19302, 22860, 19309, 19308, 24630, 19311, 19310, 24586, 25302, 24587, 24640, 22782, 22848, 22764, 22763, 22860, 19303, 19302, 19309, 19308, 24630, 19311, 19310, 24593, 25303, 24594, 24640, 22782, 22848, 22854, 22787, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 22848, 22782, 22787, 22854, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 24599, 24601, 24603, 24605, 25304, 22811, 22808, 25305, 24609, 24611, 24613, 25306, 22831, 22828, 25307, 22848, 22845, 22854, 22851, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 24639, 24640, 22848, 22845, 22854, 22851, 22860, 22857, 19309, 19308, 24630, 19311, 19310, 24634, 22879, 24637, 24638, 24639, 24640, 22896, 22893, 24643, 25308, 25309, 20598, 20604, 20601, 22915, 22912, 22920, 25310, 20618, 20625, 19337, 19336, 20631, 20628, 24656, 24657, 20641, 19337, 19336, 20647, 20644, 24662, 24663, 20659, 19337, 19336, 20665, 20662, 24669, 24670, 23122, 22969, 25311, 25312, 25313, 25314, 24672, 20768, 23108, 23105, 24707, 25315, 25316, 25317, 25318, 23122, 22969, 24673, 25319, 25320, 25321, 25322, 25323, 25324, 25325, 25326, 24672, 20685, 23122, 23125, 24673, 25327, 25328, 23125, 22982, 25329, 25330, 25331, 25332, 25333, 24722, 19376, 19377, 24726, 23179, 23182, 23077, 25334, 25335, 25336, 25337, 20715, 20915, 24682, 25338, 19376, 19377, 23179, 23182, 23077, 25339, 25340, 20715, 20915, 24682, 25341, 24722, 19376, 19377, 24726, 23179, 23182, 23185, 25342, 25343, 25344, 25345, 20715, 20915, 24682, 25346, 19377, 19376, 25347, 25348, 23083, 23086, 20915, 20913, 24682, 25349, 23023, 23020, 24710, 25350, 25351, 25352, 25353, 25354, 25355, 25356, 25357, 24686, 24688, 25358, 24689, 23108, 23105, 24707, 25359, 25360, 25361, 25362, 23122, 23125, 25363, 25364, 25365, 25366, 25367, 25368, 25369, 25370, 25371, 25372, 25373, 23054, 23051, 20768, 25374, 25375, 25376, 25377, 24695, 20866, 20786, 20783, 19377, 19376, 24726, 23179, 23182, 23077, 25378, 25379, 25380, 23086, 23083, 20915, 20913, 24735, 25381, 23092, 23091, 25382, 25383, 25384, 25385, 25386, 25387, 25388, 25389, 25390, 25391, 25392, 25393, 25394, 24714, 20817, 23108, 23105, 24707, 25395, 25396, 25397, 25398, 23125, 23122, 24710, 25399, 25400, 25401, 25402, 24711, 25403, 25404, 19372, 24714, 20866, 24722, 19377, 19376, 24726, 23179, 23182, 23155, 23191, 23160, 25405, 25406, 24722, 19377, 19376, 24726, 23179, 23185, 23182, 23191, 23188, 24733, 25407, 25408, 24735, 25409, 24743, 20928, 20925, 24747, 23234, 23231, 25410, 25411, 25412, 23243, 23211, 23246, 19394, 19393, 25413, 19387, 24743, 24745, 24747, 23234, 23231, 25414, 25415, 25416, 23243, 23240, 19394, 19393, 23246, 25417, 23309, 23306, 23359, 25418, 25419, 24787, 24789, 25420, 20981, 20978, 25421, 23359, 23356, 24758, 25422, 24760, 25423, 24782, 24784, 25424, 21074, 24787, 24789, 23347, 24792, 23309, 23306, 23359, 25425, 25426, 25427, 24762, 21005, 21002, 25428, 24766, 24768, 21018, 21031, 25429, 25430, 24782, 24784, 25432, 21074, 24771, 21034, 21031, 25433, 24774, 23309, 23306, 23310, 24777, 25434, 25435, 24779, 25436, 21061, 24782, 24784, 25437, 21074, 24787, 24789, 23347, 24792, 25438, 23359, 23356, 24796, 25439, 24921, 25440, 25441, 24797, 19544, 19543, 23734, 23731, 25442, 25443, 24798, 25444, 25445, 25446, 25447, 25448, 25449, 25450, 25451, 25452, 25453, 19557, 19557, 19557, 19558, 19558, 19558, 19429, 19551, 23774, 19551, 19429, 23381, 23382, 25454, 25455, 19430, 19552, 24849, 24851, 25456, 25457, 19552, 19430, 24802, 25458, 23389, 24805, 25459, 19637, 24808, 25460, 24810, 23413, 25461, 24813, 19637, 25462, 21164, 21161, 25463, 25464, 24818, 25465, 24820, 23435, 21178, 23440, 24825, 23447, 25466, 25467, 23513, 19552, 19551, 24849, 24827, 21554, 21551, 23767, 23774, 19558, 19557, 25468, 25469, 25470, 25471, 23459, 23456, 21411, 21284, 24832, 24834, 23468, 19478, 19445, 25472, 25473, 23686, 19532, 19531, 23689, 25474, 24912, 25475, 24913, 24915, 24916, 24918, 24919, 25476, 25477, 25478, 24836, 25479, 25480, 24838, 19544, 19543, 23494, 23491, 25481, 25482, 24844, 25483, 25484, 25485, 25486, 23513, 19552, 19551, 24849, 24851, 25487, 21269, 24854, 24856, 19446, 19445, 21284, 25488, 23541, 25489, 25490, 25491, 23550, 25492, 25493, 24862, 24863, 25494, 23563, 23571, 23568, 24867, 24868, 25495, 23578, 23586, 23583, 25496, 21343, 25498, 21350, 23596, 23593, 24877, 24879, 23610, 23607, 24883, 24884, 19478, 19477, 25500, 25501, 21389, 21386, 21397, 21394, 21405, 21402, 21408, 21411, 25504, 25505, 21420, 19508, 19507, 21427, 21424, 21434, 21431, 19524, 21442, 21439, 23664, 25508, 25509, 25510, 23671, 23686, 19532, 19531, 23689, 25511, 24912, 25512, 24913, 24915, 24916, 24918, 24919, 25513, 25514, 25515, 23675, 23734, 23731, 25516, 25517, 23679, 25518, 25519, 23686, 19532, 19531, 23689, 25520, 24912, 25521, 24913, 24915, 24916, 24918, 24919, 25522, 25523, 25524, 24921, 25525, 25526, 23724, 19544, 19543, 23734, 23731, 25527, 25528, 23741, 25529, 25530, 25531, 25532, 23752, 19552, 19551, 24932, 24934, 21554, 21551, 23767, 23774, 19558, 19557, 25533, 25534, 23814, 25535, 23820, 24941, 23783, 24943, 23789, 24946, 24948, 24950, 25536, 23809, 23806, 25537, 25538, 23814, 23817, 23820, 25539, 21618, 21615, 24958, 25540, 25541, 24959, 23838, 23835, 23841, 23846, 25542, 24965, 25543, 21653, 21650, 23861, 23858, 24971, 25544, 24973, 25545, 25546, 25547, 25548, 21675, 24976, 23881, 23957, 23960, 25550, 25551, 25552, 25553, 24979, 23895, 25558, 24982, 25559, 19637, 21707, 21704, 25560, 24987, 23915, 21719, 23920, 21906, 25046, 21728, 21725, 24999, 25561, 25562, 25563, 25000, 23933, 23930, 23936, 24114, 25564, 25565, 25059, 25566, 25567, 21753, 21750, 24999, 25568, 25569, 25000, 23957, 23954, 23960, 25005, 25570, 25571, 25572, 25573, 25574, 25575, 23975, 25576, 25008, 19619, 25011, 25577, 25578, 25013, 19620, 25016, 24002, 25019, 25579, 25580, 25581, 25582, 25583, 25584, 25021, 24026, 25590, 21843, 25591, 19637, 25026, 24040, 25592, 21858, 25593, 19637, 25031, 25594, 25595, 21875, 21872, 25596, 25035, 24066, 21887, 24071, 21893, 24076, 25042, 24083, 21906, 25046, 25048, 25597, 25598, 25049, 25050, 24107, 19656, 19655, 25055, 24114, 24120, 24117, 25059, 25599, 25600, 25061, 25601, 24139, 24136, 24142, 24148, 24145, 24151, 24154, 25602, 19658, 19657, 25606, 24163, 25073, 24172, 25076, 25607, 25608, 25078, 24187, 25081, 25083, 24201, 24198, 25609, 25610, 25611, 24213, 24210, 24219, 24216, 24222, 25612, 24225, 25275, 25275, 25617, 25619, 25621, 19691, 19692, 25623, 19692, 19691, 19681, 19682, 19682, 19681, 19691, 19692, 25160, 25624, 25162, 25626, 19692, 19691, 25275, 25277, 25549, 25554, 22083, 25557, 25585, 22093, 25588, 25589, 25628, 25629, 22117, 22120, 25605, 25630, 25632, 25633, 22120, 22117, 25616, 25634, 15, 25649, 25650, 25651, 25652, 25653, 25654, 25655, 25656, 25657, 25658, 25659, 25660, 25666, 25667, 25668, 25670, 25671, 25672, 25673, 25674, 25675, 25676, 25677, 25678, 25679, 25680, 25681, 25682, 25683, 25684, 25685, 25686, 25689, 25690, 25691, 25692, 25693, 25694, 25695, 25696, 25697, 25698, 25699, 25700, 25701, 25702, 25703, 25704, 25705, 25710, 25711, 25712, 25713, 25716, 25718, 25720, 25723, 25726, 25727, 25728, 25729, 25730, 25731, 25732, 25733, 25734, 25735, 25736, 25737, 25738, 25739, 25740, 25741, 25742, 25743, 25744, 25745, 25746, 25748, 25753, 25751, 25755, 25756, 25760, 25761, 25763, 25764, 25765, 25766, 25767, 25768, 25769, 25770, 25771, 25772, 25773, 25774, 25775, 25776, 25777, 25778, 25779, 25780, 25781, 25782, 25783, 25784, 25785, 25787, 25788, 25789, 25790, 25791, 25792, 25793, 25794, 25796, 25797, 25798, 25799, 25800, 25801, 25802, 25803, 25806, 25808, 25811, 25814, 25815, 25816, 25819, 25820, 25821, 25822, 25823, 25824, 25825, 25826, 25827, 25829, 25830, 25831, 25832, 25833, 25834, 25835, 25836, 25837, 25838, 25839, 25840, 25842, 25843, 25844, 25845, 25846, 25847, 25848, 25849, 25850, 25851, 25852, 25853, 25854, 25855, 25856, 25857, 25858, 25859, 25860, 25861, 25862, 25864, 25865, 25866, 25867, 25869, 25871, 25872, 25873, 25874, 25875, 25876, 25877, 25878, 25880, 25881, 25882, 25883, 25884, 25886, 25887, 25891, 25892, 25893, 25894, 25896, 25897, 25898, 25899, 25900, 25902, 25903, 25904, 25905, 25906, 25907, 25908, 25909, 25910, 25911, 25912, 25913, 25916, 25918, 25919, 25920, 25922, 25923, 25924, 25928, 25930, 25931, 25932, 25933, 25936, 25937, 25940, 25941, 25943, 25944, 25946, 25947, 25948, 25949, 25950, 25952, 25953, 25954, 25956, 25957, 25958, 25959, 25960, 25961, 25963, 25964, 25967, 25968, 25971, 25972, 25973, 25976, 25977, 25978, 25979, 25981, 25982, 25983, 25985, 25986, 25987, 25988, 25989, 25990, 25991, 25992, 25993, 25994, 25995, 25996, 25997, 25998, 25999, 26000, 26002, 26003, 26004, 26005, 26006, 26008, 26009, 26010, 26011, 26012, 26013, 26014, 26015, 26016, 26017, 26018, 26021, 26022, 26023, 26024, 26025, 26026, 26027, 26028, 26029, 26030, 26031, 26032, 26034, 26035, 26036, 26037, 26038, 26039, 26040, 26041, 26042, 26043, 26045, 26046, 26047, 26048, 26049, 26051, 26052, 26053, 26054, 26055, 26056, 26057, 26058, 26059, 26060, 26061, 26062, 26065, 26068, 26069, 26070, 26071, 26072, 26073, 26074, 26075, 26076, 26077, 26078, 26079, 26080, 26081, 26082, 26083, 26084, 26085, 26086, 26087, 26089, 26091, 26092, 26094, 26095, 26096, 26097, 26098, 26099, 26100, 26101, 26102, 26103, 26104, 26105, 26106, 26107, 26108, 26109, 26110, 26111, 26112, 26113, 26114, 26115, 26116, 26117, 26118, 26120, 26121, 26122, 26123, 26124, 26125, 26126, 26127, 26128, 26129, 26130, 26131, 26133, 26134, 26135, 26136, 26137, 26138, 26139, 26140, 26141, 26142, 26143, 26144, 26145, 26146, 26147, 26149, 26150, 26151, 26152, 26153, 26154, 26155, 26156, 26157, 26158, 26159, 26160, 26161, 26162, 26163, 26164, 26165, 26166, 26168, 26169, 26170, 26171, 26172, 26173, 26174, 26175, 26176, 26177, 26178, 26179, 26180, 26181, 26182, 26183, 26184, 26185, 26186, 26187, 26188, 26189, 26191, 26192, 26193, 26194, 26195, 26197, 26198, 26199, 26200, 26201, 26202, 26203, 26204, 26206, 26207, 26208, 26209, 26210, 26212, 26213, 26214, 26215, 26216, 26217, 26218, 26220, 26221, 26223, 26224, 26225, 26226, 26227, 26228, 26229, 26230, 26231, 26232, 26233, 26234, 26235, 26236, 26237, 26238, 26239, 26240, 26241, 26242, 26244, 26246, 26247, 26248, 26249, 26250, 26251, 26255, 26253, 26256, 26257, 26258, 26259, 26260, 26261, 26262, 26263, 26264, 26265, 26266, 26267, 26268, 26269, 26270, 26271, 26272, 26273, 26274, 26275, 26276, 26277, 26278, 26279, 26280, 26281, 26282, 26283, 26284, 26285, 26288, 26289, 26290, 26291, 26292, 26294, 26295, 26296, 26297, 26298, 26299, 26300, 26303, 26304, 26305, 26306, 26307, 26309, 26310, 26312, 26313, 26314, 26315, 26316, 26318, 26320, 26321, 26322, 26323, 26325, 26326, 26328, 26329, 26330, 26331, 26332, 26333, 26335, 26336, 26337, 26338, 26340, 26341, 26342, 26343, 26345, 26346, 26347, 26348, 26350, 26352, 26353, 26354, 26355, 26356, 26358, 26359, 26361, 26362, 26363, 26365, 26366, 26367, 26369, 26371, 26373, 26374, 26375, 26376, 26377, 26378, 26380, 26381, 26383, 26384, 26385, 26386, 26387, 26388, 26389, 26390, 26391, 26392, 26393, 26394, 26395, 26396, 26397, 26398, 26399, 26400, 26401, 26402, 26403, 26404, 26406, 26407, 26408, 26409, 26410, 26411, 26412, 26413, 26414, 26415, 26416, 26417, 26418, 26419, 26420, 26422, 26423, 26424, 26425, 26426, 26427, 26428, 26429, 26430, 26431, 26432, 26433, 26434, 26435, 26436, 26437, 26438, 26439, 26440, 26441, 26442, 26443, 26444, 26445, 26446, 26447, 26448, 26449, 26450, 26451, 26452, 26453, 26454, 26455, 26456, 26457, 26459, 26460, 26462, 26463, 26464, 26466, 26467, 26469, 26470, 26471, 26472, 26473, 26474, 26475, 26476, 26477, 26478, 26479, 26480, 26481, 26482, 26483, 26484, 26485, 26486, 26487, 26488, 26489, 26490, 26491, 26492, 26493, 26494, 26495, 26496, 26497, 26498, 26499, 26500, 26501, 26502, 26503, 26504, 26505, 26506, 26508, 26509, 26510, 26511, 26512, 26513, 26515, 26516, 26517, 26518, 26519, 26520, 26521, 26522, 26523, 26524, 26525, 26526, 26527, 26528, 26529, 26530, 26531, 26532, 26533, 26534, 26535, 26536, 26537, 26538, 26540, 26543, 26544, 26545, 26546, 26547, 26548, 26550, 26552, 26553, 26554, 26555, 26557, 26560, 26563, 26564, 26565, 26566, 26567, 26568, 26570, 26571, 26572, 26575, 26577, 26578, 26579, 26580, 26581, 26582, 26583, 26584, 26586, 26588, 26589, 26590, 26592, 26593, 26594, 26595, 26596, 26597, 26599, 26600, 26601, 26603, 26604, 26605, 26606, 26607, 26608, 26609, 26610, 26612, 26614, 26615, 26616, 26618, 26619, 26620, 26622, 26623, 26624, 26625, 26626, 26628, 26629, 26630, 26631, 26633, 26636, 26639, 26640, 26642, 26643, 26644, 26645, 26646, 26648, 26650, 26651, 26652, 26655, 26657, 26660, 26663, 26664, 26665, 26667, 26670, 26671, 26672, 26673, 26674, 26675, 26676, 26677, 26678, 26679, 26680, 26683, 26684, 26685, 26686, 26687, 26689, 26690, 26691, 26694, 26696, 26698, 26701, 26704, 26705, 26706, 26707, 26708, 26709, 26711, 26713, 26714, 26715, 26716, 26718, 26720, 26723, 26721, 26724, 26725, 26726, 26727, 26728, 26729, 26730, 26731, 26732, 26733, 26734, 26735, 26737, 26738, 26739, 26740, 26741, 26742, 26743, 26744, 26745, 26746, 26747, 26749, 26751, 26752, 26753, 26754, 26755, 26756, 26757, 26760, 26761, 26762, 26763, 26764, 26766, 26767, 26768, 26769, 26770, 26771, 26772, 26775, 26776, 26777, 26778, 26779, 26781, 26782, 26783, 26786, 26787, 26789, 26790, 26792, 26793, 26794, 26796, 26798, 26799, 26801, 26802, 26803, 26804, 26805, 26806, 26807, 26808, 26812, 26813, 26814, 26816, 26817, 26818, 26819, 26822, 26823, 26825, 26826, 26827, 26828, 26830, 26831, 26832, 26833, 26834, 26837, 26839, 26840, 26841, 26843, 26844, 26845, 26846, 26847, 26849, 26850, 26851, 26853, 26854, 26856, 26857, 26858, 26859, 26860, 26861, 26863, 26864, 26866, 26868, 26870, 26872, 26874, 26875, 26876, 26877, 26878, 26879, 26880, 26881, 26882, 26883, 26884, 26885, 26886, 26887, 26889, 26890, 26891, 26892, 26893, 26895, 26896, 26897, 26899, 26900, 26902, 26903, 26905, 26906, 26908, 26909, 26911, 26912, 26913, 26915, 26917, 26918, 26919, 26920, 26921, 26922, 26923, 26925, 26926, 26927, 26928, 26929, 26930, 26931, 26932, 26933, 26934, 26935, 26936, 26938, 26940, 26941, 26942, 26943, 26944, 26945, 26946, 26947, 26948, 26949, 26951, 26952, 26953, 26954, 26956, 26958, 26959, 26960, 26961, 26962, 26964, 26966, 26967, 26969, 26970, 26971, 26972, 26973, 26974, 26976, 26977, 26979, 26981, 26982, 26983, 26984, 26985, 26987, 26988, 26989, 26990, 26991, 26992, 26994, 26995, 26998, 26999, 27001, 27002, 27004, 27005, 27006, 27007, 27008, 27010, 27011, 27012, 27014, 27016, 27017, 27018, 27019, 27020, 27021, 27022, 27023, 27024, 27025, 27026, 27027, 27029, 27030, 27031, 27032, 27033, 27034, 27035, 27036, 27037, 27039, 27040, 27041, 27042, 27043, 27044, 27045, 27046, 27047, 27048, 27049, 27050, 27053, 27054, 27055, 27056, 27057, 27059, 27061, 27062, 27063, 27064, 27065, 27067, 27069, 27070, 27071, 27072, 27074, 27075, 27077, 27078, 27079, 27080, 27082, 27084, 27085, 27086, 27087, 27088, 27090, 27092, 27093, 27095, 27096, 27097, 27098, 27099, 27100, 27102, 27103, 27105, 27107, 27108, 27109, 27110, 27111, 27112, 27113, 27114, 27115, 27116, 27117, 27118, 27120, 27122, 27123, 27124, 27125, 27126, 27127, 27128, 27129, 27131, 27132, 27133, 27135, 27136, 27137, 27139, 27140, 27141, 27142, 27144, 27145, 27146, 27147, 27148, 27150, 27152, 27153, 27154, 27155, 27156, 27158, 27159, 27161, 27163, 27164, 27165, 27166, 27167, 27168, 27170, 27172, 27173, 27175, 27177, 27178, 27179, 27181, 27182, 27183, 27184, 27185, 27186, 27187, 27188, 27189, 27190, 27193, 27194, 27195, 27196, 27197, 27198, 27200, 27201, 27203, 27204, 27205, 27206, 27208, 27209, 27210, 27211, 27212, 27213, 27215, 27217, 27219, 27221, 27222, 27223, 27224, 27226, 27227, 27228, 27229, 27230, 27232, 27235, 27237, 27238, 27240, 27242, 27243, 27244, 27246, 27248, 27249, 27250, 27252, 27253, 27255, 27256, 27257, 27258, 27259, 27260, 27261, 27262, 27263, 27264, 27265, 27266, 27268, 27269, 27270, 27271, 27272, 27273, 27274, 27275, 27276, 27277, 27278, 27280, 27282, 27283, 27284, 27285, 27286, 27287, 27288, 27290, 27291, 27293, 27294, 27295, 27296, 27297, 27299, 27300, 27301, 27302, 27303, 27304, 27306, 27308, 27309, 27310, 27311, 27312, 27314, 27315, 27316, 27320, 27321, 27322, 27323, 27324, 27325, 27326, 27327, 27328, 27329, 27330, 27331, 27333, 27335, 27336, 25170, 25167, 25503, 25502, 27337, 27338, 26785, 26810, 26821, 27339, 27340, 27341, 27342, 27343, 27344, 27345, 27346, 27347, 27349, 27350, 27351, 27353, 27355, 27356, 27357, 7, 8, 9, 10, 11, 12, 13, 14, 15, 27365, 27367, 27373, 27376, 27378, 27381, 27385, 27389, 27396, 27400, 27402, 27405, 27410, 27412, 27415, 27416, 27420, 27424, 27426, 27428, 27432, 27434, 27440, 27441, 27443, 27445, 27447, 27451, 27453, 27457, 27461, 27463, 27466, 27471, 27473, 27476, 27478, 27481, 27483, 27486, 27489, 27490, 27492, 27496, 27499, 27501, 27504, 27506, 27508, 27511, 27513, 27531, 27535, 25879, 25885, 27553, 27558, 27561, 27564, 27566, 27569, 25915, 25917, 25921, 27581, 25935, 25939, 25945, 27592, 25951, 27597, 27603, 25962, 25966, 27608, 27611, 27612, 25980, 27617, 27623, 27625, 27627, 27629, 27632, 27637, 27640, 27642, 27644, 27647, 26020, 27654, 27656, 27658, 27660, 27663, 27665, 27667, 27670, 27675, 27678, 27681, 27683, 27686, 26064, 27691, 27694, 27697, 27700, 27714, 27718, 27723, 27726, 27728, 27730, 27733, 27736, 27741, 27743, 27745, 27748, 27753, 27755, 27757, 27760, 27763, 27768, 27770, 27772, 27776, 27778, 27782, 27784, 27786, 27790, 27792, 27796, 27800, 27803, 27806, 27808, 26196, 27813, 27816, 27819, 27821, 26211, 27831, 27833, 27836, 27840, 27842, 27845, 27847, 27850, 27856, 27861, 27864, 27867, 27869, 27873, 27879, 27886, 27889, 27891, 27892, 27894, 26293, 27901, 27903, 27904, 27906, 27912, 27926, 27930, 27934, 27938, 27945, 27947, 27960, 27963, 27970, 27972, 27974, 27976, 27978, 27981, 27984, 27989, 27991, 27993, 27996, 27999, 28004, 28006, 28008, 28010, 28013, 28019, 28021, 28023, 28025, 28028, 28038, 28043, 28045, 28047, 28049, 28051, 28054, 28062, 28064, 28066, 28068, 28071, 28079, 28084, 28086, 28088, 28090, 28093, 28097, 28100, 28104, 28107, 28111, 28113, 28116, 28121, 28126, 28129, 28133, 28135, 28138, 28142, 28146, 28149, 28152, 28155, 28159, 28163, 28167, 28170, 28173, 28175, 28178, 28183, 28187, 28192, 28194, 28197, 28198, 28201, 28204, 28206, 28210, 28212, 28213, 28215, 28218, 28220, 28223, 28224, 28227, 28232, 28239, 28243, 28247, 28249, 28253, 28257, 28259, 28265, 28268, 28270, 28271, 28273, 26765, 28280, 28282, 28283, 28285, 28288, 28293, 28295, 28306, 28310, 28314, 28320, 28323, 28336, 28342, 28344, 28359, 28367, 26898, 26901, 28380, 28382, 28383, 28394, 28399, 28402, 28407, 28409, 28413, 28417, 28431, 28433, 28439, 28447, 26993, 26997, 27003, 28457, 27009, 28462, 28466, 28470, 28474, 28477, 28479, 28481, 28486, 28489, 28491, 28494, 27052, 28499, 28511, 28516, 28530, 28532, 28538, 28543, 28546, 28559, 28565, 28570, 28573, 28575, 28577, 28585, 28591, 27176, 28594, 28602, 28605, 28607, 28614, 28619, 28626, 28639, 27241, 28643, 27247, 28648, 28664, 28669, 28674, 28677, 27289, 27292, 28692, 28695, 28697, 27313, 27360, 27362, 27364, 26319, 26364, 26372, 26324, 27921, 26372, 26327, 27924, 25283, 25289, 27941, 27965, 27967, 27369, 26319, 26364, 26372, 26324, 27921, 26372, 26327, 27924, 25283, 25289, 27370, 27965, 27967, 27371, 27711, 28340, 27708, 27706, 27710, 28563, 27725, 27708, 27706, 27710, 27725, 28370, 27711, 28426, 28424, 27712, 28340, 28348, 27708, 27706, 27710, 27130, 28563, 27725, 27720, 27711, 28426, 28424, 27712, 28340, 28348, 27708, 27706, 27710, 27130, 28563, 27725, 28370, 27720, 28673, 28686, 28690, 26916, 28388, 28390, 28392, 28601, 28663, 28661, 28672, 26916, 28388, 28390, 28392, 28601, 28663, 28661, 28672, 28673, 28690, 25706, 28703, 28686, 28690, 28706, 28370, 27722, 27414, 27418, 27423, 25119, 27438, 25754, 25759, 27450, 27456, 28151, 27465, 27775, 27781, 28185, 28200, 27470, 26627, 27485, 27488, 28182, 28185, 27495, 25152, 28256, 25155, 27519, 27517, 27521, 28708, 28710, 28679, 28676, 28679, 28676, 27533, 25870, 27538, 27540, 28679, 28676, 28712, 28716, 27547, 27595, 27599, 27578, 27610, 27615, 27619, 27621, 27552, 28718, 28719, 27557, 27576, 27595, 27599, 27578, 27610, 27615, 27619, 27579, 28483, 28720, 28721, 28412, 25199, 25198, 27595, 27599, 27601, 27610, 27615, 27619, 27621, 27636, 27653, 27674, 27711, 27712, 27121, 27704, 27708, 27706, 27710, 27711, 27712, 28370, 27720, 27722, 27725, 26119, 27740, 26132, 27752, 26148, 27767, 28182, 27858, 28191, 28196, 27863, 27775, 27781, 27789, 27795, 28136, 27858, 28191, 28196, 27863, 27775, 27781, 27789, 27795, 27826, 27828, 27830, 28327, 27839, 26245, 26243, 27855, 28136, 27858, 28196, 28191, 27863, 28209, 27872, 27877, 25261, 27882, 27884, 27899, 27910, 27911, 26317, 26319, 26364, 25278, 27919, 26372, 26324, 27921, 26372, 26327, 27924, 25283, 25286, 25289, 27941, 27965, 27967, 27944, 26372, 27950, 26372, 26364, 27953, 26372, 26370, 26368, 27958, 27965, 27967, 27969, 26405, 27988, 26421, 28003, 28016, 28018, 28031, 28033, 28083, 28036, 26458, 28041, 26465, 28057, 28061, 28059, 28074, 28078, 28076, 28083, 28096, 28103, 28110, 28115, 28120, 28125, 28128, 28236, 28136, 28185, 28141, 26591, 28151, 26602, 28162, 26617, 28256, 26627, 28182, 28185, 28186, 28191, 28196, 28203, 28209, 26688, 28222, 28226, 28231, 28236, 28241, 28246, 28251, 28256, 28262, 26750, 28278, 28724, 28292, 26797, 26795, 28300, 28301, 28303, 28305, 28725, 28312, 28726, 28317, 28318, 28322, 26835, 28327, 28328, 28330, 28331, 28333, 28335, 26852, 28340, 28348, 28349, 28350, 28351, 28352, 28371, 28406, 26986, 28371, 28406, 26986, 28371, 28363, 28362, 28406, 26986, 28370, 28373, 28372, 26904, 26916, 28388, 28390, 28392, 28473, 28398, 28455, 28460, 28444, 28465, 28412, 28422, 28421, 28426, 28424, 28427, 28429, 28437, 28443, 26986, 26986, 28455, 28460, 28444, 28465, 28473, 28485, 28455, 28460, 28465, 28464, 28473, 28485, 28504, 28503, 28508, 28506, 28509, 28515, 28521, 28520, 28525, 28523, 28526, 28528, 28536, 28542, 27121, 27138, 28555, 28553, 28557, 27130, 28563, 27138, 28569, 28580, 28581, 28584, 28588, 28597, 28599, 28601, 28611, 28613, 28618, 28623, 28625, 28629, 28631, 28633, 28686, 28637, 28636, 28647, 28651, 28653, 28655, 28657, 28659, 28663, 28661, 28672, 28673, 28688, 28690, 28694, 28736, 28686, 28688, 28690, 28694, 28740, 27352, 27358, 27352, 27358, 27352, 27358, 27334, 27332, 27358, 28729, 28727, 28729, 28734, 28732, 28734, 27352, 27358, 9, 10, 11, 12, 13, 14, 15, 28808, 28809, 28816, 28827, 28836, 28850, 28851, 28856, 28858, 28860, 28865, 28870, 28874, 28891, 28896, 28904, 28917, 28922, 28936, 28941, 28970, 28972, 28974, 29027, 29032, 29051, 29053, 29056, 29057, 29060, 29074, 29079, 29081, 29084, 29086, 29108, 29112, 29118, 29119, 29120, 28753, 29121, 29122, 29123, 29124, 29125, 29126, 29127, 29128, 28924, 29129, 28926, 29130, 28927, 29131, 29132, 29133, 29134, 28933, 29135, 29136, 29137, 29138, 29139, 29140, 29141, 29142, 28924, 29143, 28926, 29144, 28927, 29145, 29146, 29147, 29148, 28933, 29149, 29150, 28755, 29151, 29152, 29153, 29154, 29155, 28755, 29156, 29157, 29158, 28862, 29159, 29160, 29161, 29162, 29163, 29164, 29165, 28755, 28346, 29166, 29167, 29168, 29169, 29170, 28862, 29171, 29172, 28861, 29173, 29174, 29175, 29176, 29177, 29178, 29042, 28346, 29179, 29180, 29181, 29182, 29183, 28862, 29184, 29185, 29186, 28545, 28861, 29187, 29188, 29189, 29190, 28385, 29191, 29192, 29193, 29194, 29195, 29196, 29197, 29109, 29198, 29048, 29049, 28385, 29199, 29200, 29201, 29202, 29203, 29204, 29205, 29109, 29206, 29207, 29208, 27407, 27398, 28761, 27407, 27404, 29209, 29211, 29212, 29214, 29215, 28765, 29216, 28767, 29217, 29218, 28768, 28770, 27430, 25120, 29219, 27436, 29220, 28774, 25757, 29221, 29016, 25762, 29222, 29223, 28778, 28780, 29224, 27459, 29225, 29020, 28783, 29226, 27875, 29227, 28879, 28784, 29228, 26167, 28180, 29229, 29001, 29230, 28987, 29231, 27475, 28788, 29232, 28790, 29233, 28791, 29234, 28792, 28180, 29235, 28997, 29236, 29237, 28794, 27498, 28797, 29238, 29239, 28799, 27510, 28802, 29240, 29241, 29242, 29243, 29246, 29247, 29114, 29116, 29114, 29248, 29249, 29114, 29116, 28803, 29250, 28804, 29251, 29252, 29253, 29114, 29254, 29255, 29114, 29116, 27586, 27588, 29258, 28820, 29259, 28822, 29260, 29261, 28823, 27606, 28826, 29262, 29263, 28830, 29264, 29265, 28831, 29266, 27555, 29269, 27571, 27568, 25177, 28813, 27586, 27588, 29270, 28820, 29271, 28822, 29272, 29273, 28823, 28826, 27606, 29274, 29275, 28830, 29276, 29277, 28831, 29278, 28485, 29281, 28496, 28493, 25507, 29078, 27588, 27586, 29282, 29283, 28820, 29284, 28822, 29285, 29286, 28823, 28826, 27606, 29287, 29288, 28830, 29289, 29290, 28831, 27631, 25217, 25216, 27634, 29291, 27649, 27646, 25233, 25221, 28841, 29292, 27688, 27685, 25233, 25232, 28855, 27669, 25228, 25227, 27672, 29293, 27688, 27685, 25233, 28855, 29294, 29295, 27696, 29296, 29297, 29298, 29299, 29300, 29301, 29302, 29303, 29304, 29305, 28862, 29306, 28864, 28867, 29307, 29308, 28869, 28943, 29309, 29310, 28873, 28876, 29311, 29312, 28983, 29313, 28997, 29314, 28189, 29315, 29000, 29316, 28907, 27866, 29317, 29318, 28879, 28877, 29319, 26167, 29320, 28884, 28877, 29321, 27798, 28180, 29322, 28997, 29323, 28189, 29324, 29000, 29325, 28907, 27866, 29326, 29327, 28879, 28881, 29328, 26167, 29329, 28884, 28886, 29330, 27798, 28888, 28889, 28890, 28893, 28894, 28895, 29331, 29332, 29333, 28898, 29334, 28899, 28900, 29335, 28902, 28903, 29336, 29337, 29338, 28983, 29339, 28997, 29340, 27859, 29341, 28189, 29342, 28907, 27866, 29343, 29344, 28909, 28910, 29345, 27875, 29346, 25262, 29347, 29348, 29349, 28913, 28914, 28916, 29350, 28919, 28921, 29351, 29352, 27914, 29353, 29354, 29355, 29356, 29357, 29358, 29359, 29360, 29361, 29362, 29363, 28924, 29364, 28925, 29365, 28926, 29366, 28927, 29367, 29368, 29369, 29370, 28933, 29371, 26360, 26357, 29372, 29373, 29374, 29375, 29376, 29377, 29378, 29379, 28930, 28931, 29380, 29381, 29382, 28933, 28935, 28938, 29383, 29384, 28940, 28943, 29385, 29386, 28945, 28948, 28947, 29387, 29388, 28950, 28953, 28952, 29389, 29390, 28034, 29391, 28968, 28089, 29392, 26461, 29393, 29394, 26468, 29395, 28957, 28960, 28959, 29396, 29397, 29398, 28962, 28965, 28964, 29399, 29400, 29401, 28081, 29402, 28968, 28089, 29403, 29404, 29405, 28131, 28977, 29406, 28118, 29407, 28123, 29408, 28980, 29409, 28131, 29410, 28983, 29411, 28997, 29412, 29413, 28984, 28144, 28986, 29414, 29415, 28987, 28172, 28989, 29416, 29417, 28990, 28165, 28992, 29418, 29419, 28993, 28172, 28995, 29420, 28180, 29421, 28997, 29422, 29016, 29423, 28189, 29424, 29000, 29425, 29001, 28200, 29003, 29426, 29427, 29005, 29007, 29009, 29428, 29011, 29429, 29013, 29430, 28229, 29431, 28234, 29432, 29016, 29433, 29434, 29017, 29019, 29435, 29436, 29020, 29022, 29437, 29438, 29023, 29024, 29026, 29439, 29029, 29031, 28290, 29441, 29034, 29035, 29442, 29443, 29444, 29445, 29446, 29447, 28308, 29041, 29037, 29449, 29038, 29451, 29452, 29039, 29453, 28325, 29454, 29455, 29456, 29457, 29458, 29459, 29460, 29041, 29461, 29462, 29042, 28346, 29463, 29464, 29465, 29466, 29467, 29468, 29469, 29470, 29471, 29472, 29473, 29044, 29474, 29475, 29476, 29477, 29478, 29045, 29479, 29480, 29481, 29046, 29482, 29047, 29048, 29049, 28385, 29483, 29484, 29485, 29486, 29487, 29488, 28401, 28406, 28453, 28451, 29489, 29065, 29490, 29067, 29491, 29492, 28445, 29054, 28483, 25503, 25502, 28485, 29493, 28496, 28493, 25507, 29078, 29494, 29495, 29496, 29497, 29498, 29499, 29058, 28435, 29500, 29501, 29502, 29503, 28453, 28451, 29504, 29065, 29505, 29067, 29506, 29507, 28445, 29069, 29508, 28476, 28483, 25503, 25502, 29509, 28496, 28493, 25507, 29078, 28453, 28451, 29510, 29065, 29511, 29067, 29512, 29513, 28468, 29069, 29514, 28476, 28483, 25503, 25502, 29515, 28496, 28493, 25507, 29078, 29516, 29517, 29518, 29519, 29520, 29082, 28513, 29521, 29522, 29523, 29524, 29525, 29526, 29527, 29082, 28534, 29528, 29529, 28545, 29530, 29531, 29532, 29533, 29534, 29535, 28561, 29536, 29537, 29538, 28567, 28572, 28574, 29091, 28579, 29539, 29540, 28582, 29541, 28587, 29542, 28589, 29094, 27180, 29095, 29543, 29544, 29545, 28606, 28604, 28609, 29546, 29547, 29548, 28616, 28621, 29549, 29550, 28627, 29551, 29552, 29553, 28684, 29554, 29114, 29555, 29556, 29116, 29117, 29103, 29104, 29105, 29106, 29557, 27254, 29558, 29559, 29560, 29561, 29562, 29563, 29564, 29109, 29565, 28684, 29566, 29567, 29568, 29114, 29569, 28679, 28676, 28684, 29571, 29572, 29573, 29114, 29574, 29116, 29117, 29570, 29575, 29244, 29245, 29210, 29576, 29213, 29577, 29244, 29245, 29570, 29578, 29575, 29579, 29570, 29580, 29575, 29581, 29256, 29582, 29583, 29257, 29584, 29585, 29586, 29587, 29588, 29589, 29590, 29570, 29591, 29575, 29592, 14, 15, 29640, 29641, 29643, 29646, 29649, 29651, 29653, 29658, 29659, 29661, 29664, 29667, 29669, 29671, 29676, 28420, 29679, 29680, 29607, 29685, 29686, 29689, 29607, 28861, 28420, 29693, 29697, 29698, 29700, 29704, 29707, 28420, 29710, 29714, 29715, 29717, 29721, 29607, 29725, 29726, 29731, 29737, 27393, 29739, 29741, 29742, 29743, 29749, 27395, 29751, 29755, 29756, 29757, 29758, 29759, 29765, 29767, 29770, 29771, 29772, 29773, 29775, 29777, 29778, 29780, 29781, 29784, 29785, 29787, 29789, 29790, 29792, 29794, 29795, 29797, 29798, 29800, 29802, 29804, 29805, 29807, 29809, 29811, 29812, 29814, 29817, 29818, 29819, 29822, 29823, 29824, 29826, 29829, 29831, 29832, 29833, 29834, 29836, 29837, 29838, 29840, 29844, 29845, 29847, 29848, 29849, 29850, 29852, 29854, 29857, 29858, 29859, 28828, 29862, 29865, 29866, 29867, 25901, 29869, 29870, 29871, 25176, 29872, 29873, 29874, 29876, 29878, 29881, 29882, 29883, 28828, 29886, 29889, 29890, 29891, 25929, 29893, 29894, 29895, 25506, 29896, 29897, 29898, 29899, 29901, 29903, 29906, 29907, 29908, 28828, 29911, 29914, 29915, 29916, 29917, 29918, 26007, 29920, 29921, 29922, 29923, 29924, 29926, 29927, 29928, 29929, 29930, 29931, 29932, 29933, 29934, 26050, 29936, 29937, 29938, 25232, 29939, 29607, 29942, 28859, 29945, 29609, 28861, 29953, 29955, 29956, 28866, 29959, 29960, 28871, 29963, 29964, 28875, 29967, 29969, 29971, 29973, 29975, 29976, 29979, 29980, 29982, 29984, 29985, 29987, 29988, 29990, 29992, 29994, 29996, 29997, 30000, 30001, 30003, 30005, 30006, 30008, 30009, 30010, 30011, 28892, 30012, 30013, 30014, 28897, 30018, 30020, 30021, 30023, 28905, 30024, 30025, 30028, 30030, 30032, 30034, 30036, 30037, 30040, 30041, 30043, 30045, 30049, 30050, 30051, 28918, 30053, 30054, 26308, 30057, 30059, 30063, 30066, 30069, 30071, 30073, 30075, 30080, 30082, 30083, 30085, 30088, 30092, 30093, 30097, 30098, 30099, 28937, 30102, 30103, 28942, 30106, 30107, 30108, 30111, 30112, 30113, 30116, 30118, 30119, 30121, 30124, 30126, 30127, 30128, 30130, 30132, 30133, 30134, 30136, 30138, 30140, 30141, 28971, 28973, 28975, 30145, 30146, 30148, 30150, 30152, 30154, 30156, 30158, 30161, 30162, 30163, 30166, 30167, 30168, 30171, 30172, 30173, 30176, 30177, 30178, 30180, 30182, 30184, 30186, 30188, 30190, 30191, 30192, 30195, 30196, 30197, 30199, 30201, 30203, 30205, 30207, 30210, 30211, 30214, 30215, 30218, 30219, 30220, 29028, 30222, 30223, 26780, 30224, 30226, 30227, 30228, 30234, 30235, 30236, 30238, 30241, 30243, 30251, 30254, 30255, 30267, 30269, 30273, 28371, 30275, 30277, 30279, 30280, 30281, 30282, 29625, 30289, 28405, 30290, 30291, 30292, 30294, 30296, 30297, 30299, 30300, 30301, 30302, 30303, 30304, 28476, 30306, 30307, 30308, 25506, 30309, 28420, 30310, 30312, 30316, 30317, 29629, 30322, 30323, 30325, 30327, 30328, 30330, 30331, 30333, 30334, 30335, 30336, 30338, 30339, 30340, 25506, 30341, 30342, 30343, 30345, 30347, 30348, 30350, 30351, 30353, 30354, 30355, 30356, 30358, 30359, 30360, 25506, 30361, 28502, 30362, 30364, 30367, 30368, 28519, 30370, 30372, 30376, 30377, 29633, 30380, 28549, 30383, 30387, 30391, 30392, 30393, 30394, 30395, 30398, 30400, 30402, 30403, 30404, 30405, 30409, 30410, 30411, 30415, 30416, 30419, 30423, 30425, 30426, 30428, 30429, 30430, 30431, 30432, 30433, 30435, 30441, 28667, 30443, 30445, 30449, 30451, 30452, 29636, 30453, 30457, 30459, 30460, 29638, 29656, 29674, 29684, 29690, 29951, 29706, 29708, 29723, 29727, 30461, 30462, 29730, 29730, 29736, 29734, 29754, 30463, 29762, 30464, 29736, 29734, 29748, 29746, 29754, 30465, 29762, 30467, 30469, 30470, 30440, 30438, 30437, 30448, 30471, 30456, 30473, 30440, 30438, 30437, 30448, 30475, 30456, 30477, 30440, 30438, 29843, 30448, 30479, 30480, 30456, 30482, 29951, 29944, 29954, 29951, 29954, 29958, 29962, 29966, 30016, 30250, 30048, 30248, 30246, 30056, 30246, 30240, 30246, 30078, 30095, 30101, 30105, 30110, 30115, 30217, 30217, 30248, 30246, 30233, 30231, 30246, 30240, 30246, 30250, 30248, 30246, 30257, 30263, 30262, 30259, 30263, 30263, 30262, 30266, 30266, 30265, 30272, 30272, 30271, 30320, 30321, 30399, 30408, 30399, 30408, 30399, 30408, 30408, 30285, 30320, 30321, 30382, 30389, 30408, 30407, 30418, 30413, 30422, 30399, 30408, 30413, 30418, 30422, 30408, 30407, 30413, 30418, 30422, 30440, 30438, 30437, 30448, 30490, 30456, 30492, 30489, 30485, 30485, 30489, 30485, 30487, 30489, 14, 15, 30511, 30514, 30518, 30519, 30520, 30527, 30533, 30538, 30544, 30546, 30549, 29769, 30556, 30559, 30561, 29783, 29788, 29793, 30573, 29816, 29821, 30601, 30606, 30608, 30611, 30613, 30617, 30614, 30619, 30624, 30626, 30629, 30631, 30635, 30632, 30637, 30643, 30645, 30648, 30652, 30653, 30655, 30658, 30660, 30663, 30667, 30671, 30668, 30673, 30675, 30677, 30678, 30682, 30685, 30688, 30694, 29978, 29983, 30706, 29999, 30004, 30716, 30720, 30725, 30733, 30039, 30737, 30741, 30744, 30081, 30757, 30763, 30766, 30768, 30771, 30776, 30777, 30779, 30783, 30789, 30790, 30791, 30160, 30165, 30170, 30175, 30194, 30209, 30213, 30835, 30838, 30855, 30278, 30862, 30864, 30866, 30873, 30877, 30881, 30878, 30883, 30888, 30889, 30897, 30903, 30900, 30905, 30913, 30919, 30916, 30921, 30926, 30931, 30933, 30390, 30945, 30947, 30414, 30964, 30968, 30970, 30975, 30496, 30061, 28701, 28723, 29645, 29648, 30062, 29654, 29652, 30072, 29650, 30976, 30503, 28723, 30062, 30061, 28702, 29666, 29663, 29672, 29670, 30072, 29668, 30977, 30510, 29712, 29716, 30512, 29682, 30978, 30517, 29716, 30515, 29688, 30979, 30517, 30980, 29695, 29699, 30522, 29702, 30981, 30525, 30982, 30526, 29712, 29716, 30529, 29719, 30983, 30532, 30984, 30535, 30987, 29728, 30450, 30988, 29729, 29732, 30541, 30989, 30990, 29740, 30991, 29753, 30450, 30993, 29761, 30974, 30458, 29732, 30541, 30995, 30996, 29740, 29744, 30541, 30997, 30998, 29752, 30999, 29753, 30450, 31001, 29761, 30974, 30458, 29764, 29764, 29766, 30204, 29768, 29776, 30204, 30564, 30567, 30570, 29968, 30204, 29799, 30193, 29801, 29806, 29808, 30204, 29810, 30155, 30204, 29813, 30204, 29815, 29820, 29825, 29828, 30450, 30974, 30458, 30962, 30961, 30959, 31005, 31006, 31007, 30444, 31008, 30446, 30450, 31010, 30454, 30974, 30458, 30962, 30961, 30959, 31012, 31013, 31014, 30444, 31015, 30446, 30450, 31017, 30454, 30974, 30458, 30962, 30961, 29841, 29839, 31019, 31020, 31021, 30444, 31022, 30446, 30450, 31025, 30454, 30974, 30458, 29855, 29853, 30605, 29863, 30610, 29879, 29877, 30623, 29887, 30628, 31027, 29952, 29904, 29902, 30642, 29912, 30647, 29941, 30256, 30850, 31028, 29947, 31029, 30679, 29949, 30256, 30850, 31030, 29952, 31031, 30679, 31032, 31033, 31034, 29968, 30204, 29970, 29974, 29972, 30697, 30700, 29989, 30204, 29991, 29995, 29993, 30709, 30712, 30714, 30718, 31035, 30722, 30721, 31036, 30022, 30027, 30029, 30204, 30031, 30035, 30033, 30736, 31037, 30739, 30742, 31038, 31039, 31040, 31041, 30058, 31042, 31043, 30061, 30062, 28723, 30065, 30068, 28722, 30076, 30074, 30072, 30070, 31044, 30753, 30087, 30759, 31045, 30760, 31046, 31047, 31048, 31049, 30775, 30117, 30781, 30785, 30788, 30139, 30155, 30204, 30147, 30151, 30149, 30153, 30155, 30204, 30193, 30157, 30204, 30159, 30164, 30169, 30174, 30179, 30181, 30204, 30183, 30185, 30189, 30187, 30193, 30818, 30198, 30200, 30204, 30202, 30206, 30204, 30208, 31050, 31051, 30833, 30836, 29440, 30840, 31052, 31053, 30842, 31054, 31055, 31056, 29448, 30252, 29450, 31057, 30242, 31058, 30237, 30244, 31059, 31060, 31061, 30252, 30870, 30295, 30872, 30314, 30256, 30850, 30870, 30295, 30872, 30314, 30258, 31062, 30385, 31063, 31064, 30260, 31065, 31066, 30261, 31067, 31068, 31069, 30264, 31070, 31071, 30274, 31072, 30268, 30385, 31073, 31074, 30274, 31075, 30385, 31076, 30865, 31077, 31078, 31079, 31080, 31081, 31082, 30283, 30860, 31083, 31084, 30870, 30295, 30872, 30314, 30318, 30886, 30385, 30865, 30870, 30295, 30872, 30314, 30318, 30886, 31085, 30385, 31086, 30893, 30326, 30895, 30896, 30909, 30346, 30911, 30912, 30366, 30369, 30924, 30374, 30378, 30929, 31087, 30385, 31088, 30935, 31089, 31090, 31091, 31092, 30424, 31093, 30396, 30939, 30957, 30955, 31094, 31095, 31096, 31097, 30424, 31098, 30420, 30957, 30955, 31099, 31100, 31101, 31102, 30424, 31103, 30420, 30957, 30955, 30962, 30961, 30959, 31104, 31105, 31106, 30444, 31107, 30446, 30450, 31109, 30454, 30974, 30458, 30472, 30474, 30472, 30474, 30466, 30468, 30472, 30474, 30472, 30474, 30476, 30478, 31024, 30483, 31111, 31112, 31113, 31114, 30491, 30493, 31115, 31116, 31117, 30491, 30493, 7, 8, 9, 10, 11, 12, 13, 14, 15, 30616, 30634, 31158, 31160, 31162, 31164, 30670, 30681, 30684, 30687, 31183, 31189, 30762, 30765, 31216, 30880, 31223, 30902, 31227, 30918, 31242, 31243, 31244, 31245, 31246, 31247, 31248, 31249, 31250, 31251, 31252, 31254, 31255, 31256, 31257, 31258, 31259, 31260, 31261, 31262, 31263, 31264, 31266, 31267, 29677, 31268, 31269, 31270, 31272, 29691, 31273, 31274, 31275, 31277, 29691, 31123, 31279, 29692, 31280, 31281, 31282, 31284, 31286, 31287, 29709, 31288, 31289, 31290, 31292, 29724, 31294, 31296, 31240, 31297, 31299, 31300, 31301, 31302, 31127, 31304, 31306, 31307, 29760, 30548, 31309, 31310, 31311, 31312, 31313, 31314, 31127, 31316, 31317, 31318, 31319, 31128, 31321, 31323, 31324, 29760, 30548, 31326, 31327, 31328, 29763, 31329, 29763, 31330, 31331, 31332, 31333, 30554, 31132, 31334, 31335, 31133, 31134, 30563, 31336, 30566, 31337, 30569, 31338, 31339, 31340, 31341, 31342, 31343, 30574, 31344, 31345, 31346, 31347, 31348, 31349, 31350, 31351, 31352, 30582, 31353, 30585, 31354, 31355, 31240, 31356, 31357, 31358, 31359, 31360, 31361, 31362, 31238, 31365, 31367, 31240, 31368, 31370, 31371, 31372, 31373, 31374, 31375, 31376, 31238, 31379, 31381, 31240, 31382, 31384, 31385, 31386, 31387, 31388, 31389, 31390, 31391, 31238, 31394, 31396, 31240, 31397, 31399, 31400, 31401, 31402, 31403, 29851, 31404, 31405, 29861, 29860, 31406, 31145, 30612, 31407, 31408, 29875, 31409, 31410, 29885, 29884, 31411, 31152, 30630, 31171, 31413, 31414, 31415, 30639, 31416, 31417, 29910, 29909, 31418, 31159, 31165, 31165, 31419, 29940, 31420, 31421, 29950, 31169, 31423, 31425, 31426, 29948, 31427, 31428, 29950, 31171, 31430, 31432, 31436, 31437, 31438, 31439, 31440, 31175, 30696, 31441, 30699, 31442, 31443, 31444, 31445, 31446, 31447, 31178, 30708, 31448, 30711, 31449, 31450, 31181, 31451, 31182, 31453, 31454, 31456, 31457, 31458, 31459, 31460, 31461, 31462, 31184, 30735, 31463, 31186, 31465, 31187, 31466, 31188, 31467, 31469, 31471, 31472, 31474, 31475, 31476, 31477, 31478, 31479, 31480, 31481, 31482, 31483, 31485, 30091, 31486, 31487, 31489, 31193, 31194, 31494, 31495, 31196, 31195, 31197, 31496, 31198, 31497, 31498, 31499, 30144, 30143, 30142, 31500, 31501, 31502, 31503, 31504, 31505, 31506, 31507, 31508, 31509, 31510, 31511, 30801, 31512, 30804, 31513, 30807, 31514, 30810, 31515, 31516, 31517, 31518, 31519, 31520, 31521, 31522, 31523, 30821, 31524, 31525, 31526, 31527, 31528, 31529, 31530, 30829, 30831, 31533, 31209, 31534, 31210, 31535, 31536, 31539, 31540, 31543, 31544, 31545, 31547, 31549, 31550, 31551, 31554, 31555, 31556, 30293, 31557, 31217, 31558, 30884, 31559, 31560, 31561, 31562, 30293, 31563, 31217, 31564, 30884, 31565, 31567, 31568, 31570, 31573, 31574, 31577, 31578, 31580, 31582, 31583, 31584, 31217, 31586, 31211, 31588, 31590, 31212, 31591, 30951, 31593, 31595, 31597, 31598, 31599, 31601, 31602, 30293, 31603, 31217, 31604, 30884, 31605, 31606, 30288, 31214, 31607, 31608, 31609, 31610, 30293, 31611, 31217, 31612, 30884, 31613, 31614, 30319, 31616, 31618, 31619, 30324, 31620, 31621, 31622, 31623, 30344, 31624, 31625, 31626, 30922, 31627, 31628, 31629, 30927, 31630, 31631, 30379, 31233, 31633, 31635, 31235, 31636, 30937, 30942, 31640, 31642, 31643, 31644, 31645, 31235, 31646, 30942, 30951, 31650, 31652, 31653, 31654, 31235, 31655, 30949, 30951, 31659, 31661, 31662, 31663, 31664, 31665, 31666, 31667, 31238, 31670, 31672, 31240, 31673, 31675, 31676, 31677, 31678, 31679, 31680, 31681, 31682, 31683, 31684, 31685, 31686, 31687, 31688, 31689, 31690, 31691, 31696, 31697, 31701, 31702, 8, 9, 10, 11, 12, 13, 14, 15, 31147, 31154, 31167, 31219, 31225, 31229, 31733, 31735, 31737, 31739, 31741, 31744, 31746, 31748, 31750, 31752, 31756, 31757, 31271, 31761, 31762, 31276, 31766, 31767, 31769, 31770, 31283, 31285, 31776, 31777, 31291, 31781, 31293, 31295, 31784, 31298, 31787, 31790, 31305, 31794, 31795, 31308, 31797, 31799, 31802, 31804, 31807, 31322, 31811, 31812, 31325, 31814, 31816, 31818, 31820, 31823, 31824, 31825, 31827, 31828, 31829, 31831, 31833, 31835, 31838, 31840, 31842, 31845, 31847, 31850, 31852, 31855, 31857, 31859, 31862, 31863, 31366, 31866, 31369, 31869, 31871, 31874, 31875, 31380, 31878, 31383, 31881, 31883, 31885, 31887, 31888, 31395, 31891, 31398, 31894, 31898, 31896, 31901, 31902, 31904, 31905, 31908, 31906, 31911, 31912, 31914, 31915, 31916, 31920, 31918, 31923, 31924, 31926, 30651, 30657, 31927, 30666, 30662, 31928, 30666, 31930, 31931, 31933, 31934, 31424, 31938, 31939, 31941, 31942, 31431, 31719, 31720, 31721, 31945, 31948, 31950, 31951, 31953, 31955, 31958, 31960, 31961, 31963, 31966, 31968, 31969, 31722, 31973, 31976, 31978, 31979, 31981, 31983, 31985, 31990, 31992, 31994, 31996, 31998, 32001, 30084, 31724, 31725, 32005, 32006, 32007, 32009, 32010, 32011, 32013, 32015, 32017, 32018, 32019, 32020, 32023, 32026, 32029, 32032, 32034, 32036, 32038, 32040, 32044, 32046, 32048, 32050, 32053, 32056, 32057, 32059, 32061, 32063, 32065, 32068, 32069, 32072, 32076, 32074, 30876, 32078, 32080, 32081, 32085, 32083, 30876, 32087, 32089, 32090, 32093, 31572, 31576, 31581, 30876, 32102, 32104, 31589, 32107, 32109, 32112, 32117, 32115, 30876, 32119, 32121, 32122, 32124, 32125, 32130, 32128, 32132, 30876, 32134, 32135, 32137, 32141, 32139, 30337, 32146, 32144, 30357, 32150, 32151, 32154, 32155, 32157, 32158, 31634, 32161, 32163, 32164, 32165, 32166, 32168, 32170, 32172, 32173, 32174, 32176, 32178, 32180, 32181, 32182, 32184, 32186, 32189, 32190, 31671, 32193, 31674, 32196, 31732, 31743, 31754, 32000, 32004, 32071, 31988, 32067, 32073, 32000, 32004, 32067, 32073, 32071, 31988, 32064, 32000, 32004, 32092, 32095, 32097, 32101, 32127, 31617, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32230, 32233, 32235, 32238, 31755, 31278, 31768, 31775, 32258, 31793, 31810, 32295, 32297, 32301, 32304, 32308, 32311, 32316, 32320, 31900, 32323, 30618, 32326, 31910, 32329, 30636, 31412, 32333, 31922, 32337, 32338, 32340, 32341, 32343, 30672, 31929, 31422, 31937, 31429, 32354, 32355, 32356, 32370, 32378, 32381, 32384, 32383, 32385, 32386, 32390, 32395, 32416, 32418, 32422, 32423, 30882, 32079, 32428, 32429, 30882, 32088, 32437, 31587, 32445, 32446, 30882, 32120, 32453, 32455, 30882, 32133, 32460, 32461, 30904, 32463, 32464, 30920, 32149, 32153, 31632, 32475, 32481, 32486, 32488, 32492, 32495, 32496, 32497, 32242, 32245, 32251, 32250, 32256, 32254, 32266, 31789, 31791, 32266, 31801, 31803, 31806, 31808, 32275, 32348, 32353, 32055, 31822, 31464, 32280, 32043, 31975, 32283, 32282, 31834, 31832, 31830, 32288, 32043, 31837, 31841, 32043, 31849, 32028, 31844, 31853, 31851, 32348, 32296, 31864, 32303, 31876, 32310, 31889, 32318, 32348, 32179, 31658, 31657, 32162, 31639, 31638, 32348, 32353, 32498, 32499, 31493, 31492, 32014, 32012, 32359, 32043, 31947, 31954, 31952, 32364, 32043, 31957, 31964, 31962, 32055, 32052, 31464, 32375, 32377, 32368, 32367, 32500, 32501, 32502, 32503, 32064, 32504, 32505, 31493, 31492, 32014, 32012, 32373, 32043, 31975, 31980, 31464, 32375, 32377, 32376, 32506, 32507, 32508, 32509, 32510, 32511, 32512, 31493, 31492, 32014, 32012, 32043, 32022, 32025, 32028, 32031, 32039, 32037, 32035, 32033, 32408, 32043, 32042, 32049, 32055, 32052, 31532, 31531, 32415, 32414, 32066, 32067, 32062, 32073, 32471, 32513, 32434, 32514, 32434, 32515, 32435, 32516, 32436, 32440, 32108, 31638, 31639, 32110, 32111, 31658, 31657, 32114, 32191, 32494, 32517, 32451, 32518, 31615, 32471, 32162, 31639, 31638, 32171, 31649, 31648, 32179, 31658, 31657, 32191, 32494, 15, 32528, 32530, 32537, 32538, 32547, 32549, 32551, 32553, 32556, 32336, 32339, 32342, 32562, 32571, 32574, 32578, 32580, 32582, 32583, 32586, 32587, 32589, 32592, 32593, 32454, 32597, 32600, 32601, 32603, 32604, 32241, 32533, 32616, 32244, 32533, 32617, 32249, 32618, 32619, 32253, 32620, 32621, 32536, 32536, 32622, 32623, 32624, 32625, 32626, 32627, 32628, 32629, 32630, 31899, 31909, 32345, 32631, 32564, 32632, 32566, 32633, 32634, 32635, 32636, 32637, 32638, 32639, 32640, 32641, 32642, 32643, 32644, 32645, 32646, 32647, 32648, 32649, 32650, 32651, 32652, 32653, 31899, 31909, 32345, 32554, 32654, 32539, 32655, 32298, 32656, 32541, 32657, 32305, 32658, 32543, 32659, 32313, 32660, 32545, 32661, 31899, 31909, 32345, 32662, 32554, 32663, 32664, 32665, 32487, 32666, 32667, 32668, 32477, 31921, 32345, 32669, 32564, 32350, 32670, 32566, 31433, 31434, 32673, 32674, 31435, 32577, 32675, 32676, 32677, 32678, 32679, 32680, 32681, 32682, 32683, 32684, 32685, 32686, 32687, 32688, 32689, 32690, 32691, 32692, 32693, 32698, 32694, 32696, 31972, 32701, 32702, 32577, 32703, 32704, 32705, 32706, 32707, 32708, 32709, 32710, 32711, 32712, 32713, 32715, 32720, 32721, 31491, 31490, 32577, 32722, 32723, 32724, 32725, 32726, 32727, 32728, 32729, 32730, 32731, 32732, 32733, 32734, 32735, 32736, 32737, 32738, 32739, 32740, 32741, 32742, 32743, 32744, 32745, 32064, 32746, 32077, 32426, 32747, 32607, 32086, 32432, 32749, 32433, 32751, 32753, 32755, 32756, 32590, 32757, 32758, 32759, 32477, 32760, 32761, 32762, 32763, 32487, 32764, 32765, 32612, 32766, 32118, 32449, 32768, 32131, 32457, 32770, 32142, 32147, 32468, 32466, 32771, 32607, 32772, 32773, 32774, 32477, 32775, 32776, 32777, 32482, 32778, 32779, 32780, 32487, 32489, 32781, 32612, 32782, 9, 10, 11, 12, 13, 14, 15, 32529, 32531, 32814, 32815, 32817, 32818, 32820, 32821, 32823, 32824, 32826, 32827, 32786, 32787, 31903, 32837, 32789, 31913, 32838, 32791, 32839, 32841, 32843, 32844, 32846, 32848, 32850, 32852, 32855, 32859, 32861, 32863, 31903, 32865, 32789, 31913, 32866, 32791, 32867, 32868, 32870, 32872, 32874, 32876, 32878, 32880, 32882, 31903, 32884, 32789, 31913, 32885, 32791, 32886, 32888, 32890, 32892, 32894, 32896, 31925, 32897, 32796, 32560, 32558, 32898, 32900, 32901, 32903, 32572, 32003, 32904, 32905, 32908, 32906, 32909, 32910, 32799, 32912, 32915, 32917, 32920, 32922, 32924, 32926, 32930, 32572, 32003, 32932, 32935, 32936, 32799, 32938, 32942, 32944, 32946, 32572, 32003, 32950, 32951, 32948, 32952, 32953, 32799, 32955, 32957, 32960, 32962, 32964, 32968, 32970, 32972, 32071, 32977, 32975, 32979, 32802, 32980, 32982, 32983, 32804, 32984, 32748, 32986, 32750, 32752, 32754, 32809, 32991, 32993, 32995, 32998, 33000, 33003, 33005, 32807, 33006, 32767, 33008, 32809, 33009, 32769, 33011, 32811, 33012, 32813, 33013, 33014, 33016, 33018, 33020, 33022, 33024, 33026, 33028, 33029, 33031, 32199, 32207, 32835, 32830, 32201, 32835, 32833, 32203, 32215, 32205, 32207, 32209, 32211, 33002, 32213, 33002, 32213, 32215, 14, 15, 33040, 33041, 33043, 33045, 33052, 33053, 33054, 33056, 33057, 33059, 32840, 32842, 33065, 33067, 33068, 33069, 33072, 33074, 33075, 33077, 33079, 33087, 33089, 33090, 33092, 32887, 33099, 33101, 33102, 33103, 32899, 32902, 33108, 33109, 33110, 33116, 33117, 33119, 33123, 33124, 33125, 33126, 33127, 33130, 33131, 33134, 33135, 33136, 33137, 33142, 33143, 33145, 33147, 33151, 33152, 33155, 32981, 33159, 33166, 32990, 33174, 33178, 33182, 33184, 33185, 33015, 33064, 33149, 33049, 33047, 33168, 31694, 33196, 32198, 33197, 32206, 33170, 31695, 33198, 33199, 33200, 33201, 33202, 33203, 33188, 31698, 33192, 31700, 33030, 33204, 32214, 33064, 33149, 33205, 32204, 32873, 33206, 32206, 32877, 33207, 32208, 32881, 33208, 32210, 33095, 31692, 33097, 31693, 33168, 31694, 33170, 31695, 33209, 33210, 32212, 33122, 33132, 33149, 33161, 33163, 33164, 33165, 33168, 31694, 33190, 33170, 31695, 33211, 33212, 32212, 33176, 33180, 33188, 31698, 33190, 31699, 33192, 31700, 33030, 33213, 32214, 13, 14, 15, 33222, 33224, 33232, 33234, 33237, 33239, 33242, 33243, 33250, 33258, 33139, 33266, 33269, 32672, 32613, 33251, 33229, 33282, 33255, 32719, 32615, 32614, 33265, 33283, 33071, 32858, 33284, 33285, 33219, 33218, 33286, 33287, 33289, 33291, 33292, 33293, 33294, 32200, 33297, 32202, 33226, 33227, 33300, 33301, 33302, 33303, 33304, 33306, 32672, 32671, 33251, 33229, 33307, 33255, 32719, 32718, 33265, 33308, 33071, 32858, 33236, 33310, 33311, 33313, 33314, 33316, 33317, 33319, 33241, 33320, 33321, 33322, 33323, 33246, 33247, 33324, 33325, 33326, 33327, 33330, 32672, 32671, 33251, 33331, 33120, 33118, 33255, 32700, 32699, 33259, 33332, 32941, 33261, 32719, 32718, 33265, 33333, 32967, 33271, 33272, 33273, 33334, 33335, 33336, 33337, 33274, 33275, 33338, 33339, 33340, 33341, 33342, 33345, 33276, 33346, 33277, 33347, 33279, 33278, 33281, 33348, 33349, 33350, 33351, 33352, 33353, 33354, 33356, 33367, 33368, 33372, 33373, 33374, 33375, 33376, 33378, 33379, 33380, 33381, 33382, 33140, 33267, 33384, 33385, 33388, 33389, 33386, 33288, 33290, 33397, 33399, 33225, 33223, 33400, 33401, 33305, 33408, 33409, 33410, 33411, 33413, 33414, 33415, 33416, 33140, 33267, 33418, 33419, 33235, 33233, 33420, 33309, 33312, 33315, 33318, 33240, 33238, 33428, 33433, 33434, 33329, 33440, 33441, 33442, 33444, 33445, 33446, 33447, 33448, 33449, 33128, 33451, 33452, 33453, 33454, 33455, 33140, 33457, 33267, 33458, 33459, 33460, 33465, 33466, 33344, 33473, 33475, 33477, 33478, 33479, 33355, 33483, 33391, 33395, 33405, 33483, 33403, 33483, 33432, 33430, 33438, 33483, 33436, 33471, 33483, 33468, 33485, 33483, 33481, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33490, 33491, 33114, 33494, 33496, 33500, 33501, 33502, 33488, 33504, 33296, 33299, 33511, 33512, 33516, 33114, 33519, 33521, 33524, 33525, 33526, 33528, 33529, 33535, 33536, 33488, 33541, 33114, 33443, 33547, 33550, 33450, 33553, 33556, 33558, 33456, 33567, 33514, 33571, 33572, 33508, 33507, 33573, 33514, 33515, 33574, 33575, 33576, 33539, 33534, 33533, 33532, 33531, 33539, 33577, 33578, 33579, 33539, 33540, 33580, 33581, 33582, 33462, 33461, 33560, 33463, 33563, 33464, 33564, 33583, 33584, 33585, 33476, 33474, 33570, 33586, 33587, 33588, 14, 15, 33602, 33604, 33499, 33383, 33600, 33608, 33506, 33612, 33615, 33523, 33417, 33600, 33621, 33623, 33625, 33627, 33628, 33549, 33555, 33635, 33600, 33495, 33637, 33610, 33640, 33641, 33611, 33643, 33644, 33646, 33520, 33648, 33649, 33650, 33651, 33652, 33653, 33654, 33657, 33658, 33660, 33552, 33662, 33663, 33664, 33665, 33666, 33667, 33668, 33670, 33569, 33672, 33673, 33674, 33676, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33493, 33683, 33684, 33518, 33690, 33691, 33543, 33700, 33701, 33682, 33686, 33703, 33706, 33639, 33513, 33708, 33710, 33689, 33530, 33712, 33714, 33537, 33717, 33538, 33719, 33546, 33697, 33721, 33698, 33722, 33724, 33726, 33728, 33730, 33733, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33744, 33753, 33746, 33754, 33755, 33705, 33758, 33759, 33747, 33761, 33749, 33762, 33763, 33765, 33767, 33768, 33750, 33769, 33770, 33772, 33751, 33773, 33776, 33777, 33778, 9, 10, 11, 12, 13, 14, 15, 33792, 33794, 33702, 33796, 33797, 33707, 33800, 33802, 33711, 33716, 33718, 33808, 33812, 33813, 33815, 33771, 33771, 33771, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33827, 33825, 33839, 33752, 33799, 33831, 33840, 33760, 33807, 33766, 33804, 33836, 33841, 33809, 33816, 33814, 33857, 33859, 33860, 33856, 33861, 33863, 33864, 33865, 33866, 33867, 33869, 33870, 33871, 13, 14, 15, 33872, 33875, 33876, 33878, 33881, 33883, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33888, 33874, 33890, 33891, 33892, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33893, 33907, 33905, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33920, 33921, 33922, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33936, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33952, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 48, 50, 52, 54, 57, 59, 61, 63, 65, 67, 69, 71, 74, 76, 78, 80, 82, 84, 86, 88, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 487, 489, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 513, 515, 517, 519, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 683, 685, 687, 689, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 948, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1039, 1041, 1046, 1048, 1051, 1053, 1055, 1057, 1060, 1062, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1089, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1154, 1156, 1159, 1161, 1164, 1166, 1168, 1170, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1206, 1208, 1211, 1213, 1215, 1217, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1236, 1238, 1240, 1242, 1245, 1247, 1249, 1251, 1254, 1256, 1259, 1261, 1264, 1266, 1269, 1271, 1274, 1276, 1278, 1280, 1282, 1284, 1287, 1289, 1292, 1294, 1297, 1299, 1302, 1304, 1307, 1309, 1312, 1314, 1317, 1319, 1322, 1324, 1327, 1329, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1349, 1351, 1354, 1356, 1362, 1364, 1366, 1368, 1370, 1372, 1375, 1377, 1379, 1381, 1383, 1385, 1388, 1390, 1392, 1394, 1397, 1399, 1402, 1404, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1436, 1438, 1440, 1442, 1444, 1446, 1449, 1451, 1454, 1456, 1458, 1460, 1462, 1464, 1467, 1469, 1472, 1474, 1477, 1479, 1482, 1484, 1487, 1489, 1492, 1494, 1497, 1499, 1502, 1504, 1507, 1509, 1512, 1514, 1517, 1519, 1521, 1523, 1526, 1528, 1531, 1533, 1539, 1541, 1543, 1545, 1547, 1549, 1552, 1554, 1557, 1559, 1562, 1564, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1640, 1642, 1644, 1646, 1649, 1651, 1653, 1655, 1657, 1659, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1727, 1729, 1731, 1733, 1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753, 1755, 1757, 1759, 1761, 1763, 1765, 1767, 1769, 1771, 1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1791, 1793, 1795, 1797, 1799, 1801, 1803, 1805, 1807, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868, 1870, 1872, 1874, 1876, 1878, 1881, 1883, 1885, 1887, 1889, 1891, 1894, 1896, 1899, 1901, 1904, 1906, 1909, 1911, 1913, 1915, 1918, 1920, 1922, 1924, 1927, 1929, 1933, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1951, 1953, 1955, 1957, 1959, 1961, 1963, 1965, 1967, 1969, 1971, 1974, 1976, 1979, 1981, 1984, 1986, 1989, 1991, 1994, 1996, 1998, 2000, 2003, 2005, 2008, 2010, 2015, 2017, 2020, 2022, 2025, 2027, 2030, 2032, 2035, 2037, 2040, 2042, 2045, 2047, 2049, 2051, 2053, 2055, 2058, 2060, 2063, 2065, 2067, 2069, 2072, 2074, 2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090, 2093, 2095, 2100, 2102, 2105, 2107, 2110, 2112, 2114, 2116, 2118, 2120, 2122, 2124, 2126, 2128, 2130, 2132, 2134, 2136, 2138, 2140, 2142, 2144, 2146, 2148, 2150, 2152, 2154, 2156, 2158, 2160, 2162, 2164, 2166, 2168, 2171, 2173, 2175, 2177, 2180, 2182, 2185, 2187, 2193, 2195, 2197, 2199, 2201, 2203, 2206, 2208, 2211, 2213, 2216, 2218, 2221, 2223, 2225, 2227, 2229, 2231, 2234, 2236, 2239, 2241, 2244, 2246, 2249, 2251, 2254, 2256, 2263, 2265, 2267, 2269, 2271, 2273, 2275, 2277, 2280, 2282, 2285, 2287, 2293, 2295, 2297, 2299, 2302, 2304, 2307, 2309, 2318, 2320, 2323, 2325, 2328, 2330, 2333, 2335, 2338, 2340, 2344, 2346, 2348, 2350, 2353, 2355, 2357, 2359, 2361, 2363, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 2397, 2399, 2401, 2403, 2405, 2407, 2409, 2411, 2413, 2415, 2417, 2419, 2421, 2423, 2425, 2427, 2429, 2431, 2433, 2435, 2437, 2439, 2442, 2444, 2446, 2448, 2451, 2453, 2455, 2457, 2459, 2461, 2463, 2465, 2467, 2469, 2471, 2473, 2475, 2477, 2480, 2482, 2484, 2486, 2488, 2490, 2492, 2494, 2496, 2498, 2500, 2502, 2504, 2506, 2508, 2510, 2513, 2515, 2517, 2519, 2521, 2523, 2525, 2527, 2529, 2531, 2534, 2536, 2538, 2540, 2542, 2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558, 2560, 2562, 2564, 2567, 2569, 2571, 2573, 2575, 2577, 2579, 2581, 2583, 2585, 2587, 2589, 2591, 2593, 2595, 2597, 2599, 2601, 2603, 2605, 2607, 2609, 2611, 2613, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2643, 2645, 2647, 2649, 2651, 2653, 2655, 2657, 2659, 2661, 2663, 2665, 2667, 2669, 2671, 2673, 2675, 2677, 2679, 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, 2697, 2700, 2702, 2704, 2706, 2709, 2711, 2713, 2715, 2717, 2719, 2721, 2723, 2725, 2727, 2730, 2732, 2735, 2737, 2739, 2741, 2743, 2745, 2747, 2749, 2751, 2753, 2755, 2757, 2760, 2762, 2764, 2766, 2768, 2770, 2772, 2774, 2777, 2779, 2781, 2783, 2786, 2788, 2790, 2792, 2794, 2796, 2798, 2800, 2802, 2804, 2806, 2808, 2811, 2813, 2815, 2817, 2820, 2822, 2824, 2826, 2828, 2830, 2832, 2834, 2836, 2838, 2841, 2843, 2845, 2847, 2849, 2851, 2853, 2855, 2857, 2859, 2862, 2864, 2866, 2868, 2870, 2872, 2874, 2876, 2878, 2880, 2882, 2884, 2886, 2888, 2890, 2892, 2894, 2896, 2899, 2901, 2904, 2906, 2909, 2911, 2914, 2916, 2918, 2920, 2922, 2924, 2927, 2929, 2932, 2934, 2936, 2938, 2941, 2943, 2946, 2948, 2951, 2953, 2955, 2957, 2959, 2961, 2964, 2966, 2968, 2970, 2972, 2974, 2977, 2979, 2981, 2983, 2985, 2987, 2990, 2992, 2995, 2997, 3000, 3002, 3006, 3008, 3010, 3012, 3016, 3018, 3020, 3022, 3024, 3026, 3029, 3031, 3034, 3036, 3039, 3041, 3044, 3046, 3049, 3051, 3054, 3056, 3062, 3064, 3066, 3068, 3070, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3140, 3142, 3146, 3148, 3151, 3153, 3156, 3158, 3161, 3163, 3166, 3168, 3171, 3173, 3176, 3178, 3181, 3183, 3185, 3187, 3189, 3191, 3194, 3196, 3199, 3201, 3204, 3206, 3209, 3211, 3213, 3215, 3217, 3219, 3222, 3224, 3227, 3229, 3232, 3234, 3237, 3239, 3242, 3244, 3247, 3249, 3252, 3254, 3257, 3259, 3262, 3264, 3270, 3272, 3275, 3277, 3280, 3282, 3285, 3287, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3317, 3319, 3321, 3323, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3351, 3353, 3356, 3358, 3360, 3362, 3365, 3367, 3370, 3372, 3375, 3377, 3380, 3382, 3385, 3387, 3390, 3392, 3395, 3397, 3400, 3402, 3405, 3407, 3410, 3412, 3415, 3417, 3420, 3422, 3425, 3427, 3429, 3431, 3434, 3436, 3438, 3440, 3443, 3445, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3470, 3472, 3474, 3476, 3481, 3483, 3485, 3487, 3489, 3491, 3494, 3496, 3499, 3501, 3504, 3506, 3509, 3511, 3513, 3515, 3517, 3519, 3522, 3524, 3526, 3528, 3531, 3533, 3536, 3538, 3543, 3545, 3547, 3549, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3624, 3626, 3629, 3631, 3633, 3635, 3638, 3640, 3642, 3644, 3648, 3650, 3652, 3654, 3657, 3659, 3661, 3663, 3666, 3668, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3753, 3755, 3758, 3760, 3763, 3765, 3768, 3770, 3773, 3775, 3778, 3780, 3783, 3785, 3788, 3790, 3793, 3795, 3798, 3800, 3803, 3805, 3809, 3811, 3813, 3815, 3818, 3820, 3823, 3825, 3828, 3830, 3833, 3835, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3855, 3857, 3859, 3861, 3864, 3866, 3870, 3872, 3874, 3876, 3879, 3881, 3884, 3886, 3889, 3891, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918, 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, 3952, 3954, 3956, 3959, 3961, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, 4017, 4019, 4021, 4023, 4025, 4027, 4029, 4031, 4033, 4035, 4037, 4039, 4041, 4043, 4045, 4047, 4049, 4051, 4053, 4055, 4057, 4059, 4061, 4063, 4065, 4067, 4069, 4071, 4073, 4075, 4077, 4079, 4081, 4083, 4085, 4087, 4089, 4091, 4093, 4095, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 4111, 4113, 4115, 4117, 4119, 4121, 4123, 4125, 4127, 4129, 4131, 4133, 4135, 4137, 4139, 4141, 4143, 4145, 4147, 4149, 4151, 4153, 4155, 4157, 4159, 4161, 4163, 4165, 4167, 4169, 4171, 4173, 4175, 4177, 4179, 4182, 4184, 4186, 4188, 4190, 4192, 4194, 4196, 4198, 4200, 4202, 4204, 4206, 4208, 4210, 4212, 4214, 4216, 4218, 4220, 4222, 4224, 4226, 4228, 4230, 4232, 4234, 4236, 4238, 4240, 4242, 4244, 4247, 4249, 4251, 4253, 4255, 4257, 4259, 4261, 4263, 4265, 4267, 4269, 4271, 4273, 4275, 4277, 4279, 4281, 4283, 4285, 4287, 4289, 4291, 4293, 4295, 4297, 4299, 4301, 4303, 4305, 4307, 4309, 4311, 4313, 4315, 4317, 4319, 4321, 4323, 4325, 4327, 4329, 4331, 4333, 4336, 4338, 4340, 4342, 4345, 4347, 4349, 4351, 4354, 4356, 4358, 4360, 4362, 4364, 4367, 4369, 4372, 4374, 4377, 4379, 4381, 4383, 4385, 4387, 4390, 4392, 4394, 4396, 4398, 4400, 4403, 4405, 4408, 4410, 4413, 4415, 4418, 4420, 4422, 4424, 4434, 4436, 4439, 4441, 4447, 4449, 4451, 4453, 4455, 4457, 4460, 4462, 4465, 4467, 4470, 4472, 4475, 4477, 4480, 4482, 4485, 4487, 4490, 4492, 4495, 4497, 4500, 4502, 4505, 4507, 4510, 4512, 4515, 4517, 4519, 4521, 4523, 4525, 4528, 4530, 4533, 4535, 4538, 4540, 4543, 4545, 4547, 4549, 4551, 4553, 4555, 4557, 4559, 4561, 4563, 4565, 4567, 4569, 4571, 4573, 4575, 4577, 4579, 4581, 4583, 4585, 4587, 4589, 4591, 4593, 4595, 4597, 4599, 4601, 4603, 4605, 4608, 4610, 4612, 4614, 4616, 4618, 4620, 4622, 4625, 4627, 4629, 4631, 4633, 4635, 4637, 4639, 4643, 4645, 4647, 4649, 4652, 4654, 4657, 4659, 4665, 4667, 4670, 4672, 4678, 4680, 4683, 4685, 4691, 4693, 4695, 4697, 4700, 4702, 4704, 4706, 4710, 4712, 4714, 4716, 4718, 4720, 4722, 4724, 4726, 4728, 4730, 4732, 4735, 4737, 4739, 4741, 4743, 4745, 4747, 4749, 4753, 4755, 4757, 4759, 4761, 4763, 4765, 4767, 4769, 4771, 4773, 4775, 4779, 4781, 4783, 4785, 4788, 4790, 4793, 4795, 4801, 4803, 4806, 4808, 4811, 4813, 4816, 4818, 4820, 4822, 4824, 4826, 4829, 4831, 4833, 4835, 4837, 4839, 4842, 4844, 4847, 4849, 4852, 4854, 4857, 4859, 4861, 4863, 4865, 4867, 4870, 4872, 4874, 4876, 4878, 4880, 4882, 4884, 4886, 4888, 4891, 4893, 4896, 4898, 4901, 4903, 4905, 4907, 4909, 4911, 4913, 4915, 4917, 4919, 4921, 4923, 4925, 4927, 4929, 4931, 4933, 4935, 4937, 4939, 4941, 4943, 4945, 4947, 4949, 4951, 4953, 4955, 4957, 4959, 4962, 4964, 4966, 4968, 4970, 4972, 4974, 4976, 4978, 4980, 4982, 4984, 4987, 4989, 4991, 4993, 4995, 4997, 5000, 5002, 5005, 5007, 5010, 5012, 5018, 5020, 5023, 5025, 5028, 5030, 5032, 5034, 5036, 5038, 5041, 5043, 5049, 5051, 5054, 5056, 5058, 5060, 5062, 5064, 5066, 5068, 5070, 5072, 5074, 5076, 5078, 5080, 5082, 5084, 5086, 5088, 5090, 5092, 5094, 5096, 5098, 5100, 5102, 5104, 5106, 5108, 5110, 5112, 5115, 5117, 5121, 5123, 5125, 5127, 5129, 5131, 5134, 5136, 5139, 5141, 5144, 5146, 5149, 5151, 5153, 5155, 5157, 5159, 5162, 5164, 5166, 5168, 5170, 5172, 5175, 5177, 5180, 5182, 5185, 5187, 5190, 5192, 5194, 5196, 5199, 5201, 5203, 5205, 5209, 5211, 5213, 5215, 5218, 5220, 5222, 5224, 5227, 5229, 5231, 5233, 5236, 5238, 5241, 5243, 5245, 5247, 5249, 5251, 5253, 5255, 5257, 5259, 5261, 5263, 5265, 5267, 5269, 5271, 5273, 5275, 5277, 5279, 5281, 5283, 5286, 5288, 5290, 5292, 5295, 5297, 5299, 5301, 5303, 5305, 5307, 5309, 5312, 5314, 5316, 5318, 5321, 5323, 5325, 5327, 5329, 5331, 5333, 5335, 5337, 5339, 5341, 5343, 5346, 5348, 5350, 5352, 5354, 5356, 5358, 5360, 5362, 5364, 5366, 5368, 5370, 5372, 5375, 5377, 5379, 5381, 5383, 5385, 5388, 5390, 5393, 5395, 5398, 5400, 5403, 5405, 5408, 5410, 5413, 5415, 5418, 5420, 5423, 5425, 5428, 5430, 5433, 5435, 5438, 5440, 5443, 5445, 5448, 5450, 5453, 5455, 5458, 5460, 5462, 5464, 5466, 5468, 5470, 5472, 5474, 5476, 5478, 5480, 5483, 5485, 5487, 5489, 5491, 5493, 5496, 5498, 5501, 5503, 5507, 5509, 5512, 5514, 5517, 5519, 5521, 5523, 5525, 5527, 5530, 5532, 5535, 5537, 5540, 5542, 5545, 5547, 5550, 5552, 5554, 5556, 5558, 5560, 5563, 5565, 5568, 5570, 5572, 5574, 5576, 5578, 5580, 5582, 5585, 5587, 5589, 5591, 5594, 5596, 5598, 5600, 5602, 5604, 5606, 5608, 5610, 5612, 5614, 5616, 5618, 5620, 5622, 5624, 5626, 5628, 5630, 5632, 5634, 5636, 5638, 5640, 5642, 5644, 5646, 5648, 5650, 5652, 5654, 5656, 5658, 5660, 5662, 5664, 5666, 5668, 5670, 5672, 5674, 5676, 5678, 5680, 5682, 5684, 5686, 5688, 5690, 5692, 5694, 5696, 5698, 5700, 5702, 5704, 5706, 5708, 5710, 5712, 5714, 5716, 5718, 5720, 5722, 5724, 5726, 5728, 5730, 5732, 5734, 5736, 5738, 5740, 5742, 5744, 5746, 5748, 5750, 5752, 5754, 5756, 5758, 5760, 5762, 5764, 5766, 5768, 5770, 5772, 5774, 5776, 5778, 5780, 5782, 5784, 5786, 5788, 5790, 5792, 5794, 5796, 5798, 5800, 5802, 5804, 5806, 5808, 5810, 5812, 5814, 5816, 5818, 5820, 5822, 5824, 5827, 5829, 5831, 5833, 5836, 5838, 5840, 5842, 5844, 5846, 5848, 5850, 5852, 5854, 5856, 5858, 5860, 5862, 5864, 5866, 5868, 5870, 5872, 5874, 5876, 5878, 5880, 5882, 5885, 5887, 5890, 5892, 5894, 5896, 5899, 5901, 5903, 5905, 5908, 5910, 5912, 5914, 5916, 5918, 5920, 5922, 5924, 5926, 5928, 5930, 5933, 5935, 5938, 5940, 5942, 5944, 5946, 5948, 5950, 5952, 5954, 5956, 5958, 5960, 5962, 5964, 5966, 5968, 5970, 5972, 5974, 5976, 5978, 5980, 5982, 5984, 5987, 5989, 5991, 5993, 5995, 5997, 5999, 6001, 6003, 6005, 6008, 6010, 6012, 6014, 6016, 6018, 6020, 6022, 6024, 6026, 6028, 6030, 6032, 6034, 6037, 6039, 6042, 6044, 6046, 6048, 6050, 6052, 6054, 6056, 6059, 6061, 6065, 6067, 6069, 6071, 6075, 6077, 6080, 6082, 6085, 6087, 6090, 6092, 6095, 6097, 6099, 6101, 6103, 6105, 6108, 6110, 6112, 6114, 6116, 6118, 6121, 6123, 6125, 6127, 6130, 6132, 6135, 6137, 6143, 6145, 6147, 6149, 6151, 6153, 6156, 6158, 6160, 6162, 6164, 6166, 6168, 6170, 6172, 6174, 6176, 6178, 6180, 6182, 6184, 6186, 6188, 6190, 6192, 6194, 6197, 6199, 6201, 6203, 6205, 6207, 6209, 6211, 6214, 6216, 6219, 6221, 6224, 6226, 6229, 6231, 6234, 6236, 6239, 6241, 6244, 6246, 6249, 6251, 6257, 6259, 6262, 6264, 6267, 6269, 6272, 6274, 6277, 6279, 6282, 6284, 6287, 6289, 6291, 6293, 6295, 6297, 6300, 6302, 6304, 6306, 6308, 6310, 6312, 6314, 6317, 6319, 6325, 6327, 6330, 6332, 6335, 6337, 6339, 6341, 6344, 6346, 6348, 6350, 6354, 6356, 6359, 6361, 6364, 6366, 6369, 6371, 6374, 6376, 6379, 6381, 6384, 6386, 6389, 6391, 6394, 6396, 6399, 6401, 6403, 6405, 6407, 6409, 6411, 6413, 6415, 6417, 6420, 6422, 6425, 6427, 6430, 6432, 6435, 6437, 6439, 6441, 6443, 6445, 6448, 6450, 6453, 6455, 6457, 6459, 6461, 6463, 6466, 6468, 6470, 6472, 6474, 6476, 6479, 6481, 6483, 6485, 6487, 6489, 6491, 6493, 6495, 6497, 6499, 6501, 6503, 6505, 6507, 6509, 6511, 6513, 6515, 6517, 6519, 6521, 6523, 6525, 6527, 6529, 6531, 6533, 6535, 6537, 6539, 6541, 6543, 6545, 6547, 6549, 6551, 6553, 6555, 6557, 6559, 6561, 6563, 6565, 6567, 6569, 6571, 6573, 6575, 6577, 6579, 6581, 6583, 6585, 6587, 6589, 6591, 6593, 6595, 6597, 6600, 6602, 6604, 6606, 6609, 6611, 6613, 6615, 6617, 6619, 6621, 6623, 6626, 6628, 6631, 6633, 6635, 6637, 6639, 6641, 6643, 6645, 6648, 6650, 6652, 6654, 6656, 6658, 6660, 6662, 6665, 6667, 6669, 6671, 6673, 6675, 6678, 6680, 6683, 6685, 6688, 6690, 6693, 6695, 6698, 6700, 6703, 6705, 6708, 6710, 6713, 6715, 6718, 6720, 6723, 6725, 6728, 6730, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6787, 6789, 6792, 6794, 6797, 6799, 6802, 6804, 6806, 6808, 6811, 6813, 6815, 6817, 6820, 6822, 6826, 6828, 6830, 6832, 6835, 6837, 6840, 6842, 6845, 6847, 6850, 6852, 6855, 6857, 6860, 6862, 6864, 6866, 6868, 6870, 1524, 1524, 16, 1407, 1407, 17, 5216, 5216, 5216, 5216, 5225, 5225, 92, 5310, 92, 5319, 46, 46, 6195, 6195, 55, 55, 72, 72, 89, 89, 89, 89, 92, 92, 90, 90, 92, 92, 91, 91, 92, 92, 3059, 3059, 3260, 3267, 690, 690, 169, 6128, 6128, 6128, 6128, 6195, 6195, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 202, 4343, 4343, 4334, 4334, 295, 295, 295, 295, 4623, 4623, 4641, 4641, 295, 295, 295, 295, 295, 295, 295, 295, 4245, 4245, 4688, 4688, 295, 295, 4733, 4733, 4343, 4343, 4334, 4334, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 396, 4343, 4343, 4334, 4334, 4444, 4444, 4343, 4343, 4334, 4334, 4425, 4425, 485, 490, 4334, 4334, 511, 520, 4428, 4428, 4431, 4431, 4428, 4428, 4426, 4426, 4798, 4798, 4606, 4606, 4798, 4798, 5284, 5284, 5293, 5293, 690, 690, 681, 681, 681, 681, 5225, 5225, 5216, 5216, 690, 690, 690, 690, 2728, 945, 945, 946, 946, 2733, 3478, 3478, 3807, 3718, 3718, 3756, 3756, 3807, 3756, 3756, 3636, 3636, 2839, 2839, 3059, 3059, 971, 945, 946, 945, 946, 971, 1012, 1012, 1014, 1014, 1171, 1171, 1013, 1013, 1014, 1014, 1015, 1016, 1016, 1017, 1171, 1171, 1209, 1209, 1347, 1347, 1359, 1359, 1044, 1044, 1395, 1395, 1347, 1347, 1359, 1359, 1044, 1044, 1395, 1395, 1347, 1347, 1359, 1359, 1049, 1049, 1395, 1395, 1347, 1347, 1359, 1359, 1407, 1407, 1157, 1157, 1162, 1162, 1157, 1157, 1162, 1162, 1171, 1171, 1209, 1209, 1347, 1347, 1359, 1359, 1395, 1395, 1407, 1407, 1524, 1524, 1536, 1536, 1638, 1647, 1808, 1837, 1879, 1879, 1972, 1972, 2012, 2012, 2097, 2097, 2169, 2169, 2178, 2178, 2190, 2190, 2259, 2259, 2290, 2290, 2300, 2300, 2312, 2312, 2261, 2290, 2290, 2300, 2300, 2312, 2312, 2261, 2278, 2278, 2290, 2290, 2300, 2300, 2312, 2312, 2315, 2315, 2839, 2839, 2440, 2440, 2449, 2449, 3260, 3260, 2775, 2784, 2784, 2839, 2839, 2698, 2698, 2707, 2707, 2728, 2733, 3289, 2775, 2784, 2809, 2818, 2839, 2839, 3004, 3013, 3059, 3059, 3138, 3143, 3260, 3260, 3267, 3267, 3289, 3315, 3315, 3324, 3324, 3363, 3363, 3468, 3468, 3478, 3478, 3529, 3540, 3551, 3551, 3553, 3553, 3718, 3718, 3756, 3756, 3655, 3655, 3622, 3622, 3636, 3636, 3645, 3645, 3645, 3645, 3627, 3627, 3627, 3627, 3756, 3756, 3636, 3636, 3645, 3645, 3646, 3646, 3718, 3718, 3756, 3756, 5234, 5239, 3957, 3957, 3962, 3962, 6195, 6195, 4985, 4985, 5216, 5216, 5225, 5225, 4425, 4425, 4245, 4245, 4245, 4245, 4343, 4343, 4334, 4334, 4343, 4343, 4352, 4352, 4428, 4428, 4426, 4426, 4428, 4428, 4431, 4431, 4425, 4425, 4428, 4428, 4426, 4426, 4428, 4428, 4431, 4431, 4444, 4444, 4688, 4688, 4606, 4606, 4650, 4650, 4662, 4662, 4623, 4623, 4641, 4641, 4650, 4650, 4662, 4662, 4640, 4640, 4641, 4641, 4650, 4650, 4662, 4662, 4675, 4675, 4688, 4688, 4707, 4707, 4786, 4786, 4798, 4798, 4733, 4733, 4786, 4786, 4798, 4798, 4750, 4750, 4751, 4786, 4786, 4798, 4798, 4776, 4786, 4786, 4798, 4798, 4985, 4985, 4894, 4899, 5046, 5046, 4985, 4985, 5015, 5015, 5046, 5046, 5046, 5046, 5197, 5206, 5197, 5206, 5216, 5216, 5225, 5225, 5234, 5239, 5284, 5284, 5293, 5293, 5310, 5319, 5499, 5504, 5825, 5825, 5834, 5834, 6298, 6298, 5825, 5825, 5834, 5834, 6322, 6322, 5583, 5583, 5592, 5592, 9145, 9147, 9149, 9151, 6195, 6195, 9167, 9169, 9171, 9173, 9175, 9177, 9179, 9181, 9183, 9185, 9187, 9189, 6128, 6128, 6140, 6140, 6195, 6195, 5825, 5825, 5834, 5834, 6298, 6298, 5825, 5825, 5834, 5834, 5825, 5825, 5834, 5834, 6298, 6298, 5825, 5825, 5834, 5834, 6322, 6322, 5897, 5897, 5906, 5906, 5931, 5936, 9356, 9358, 9360, 9362, 9364, 9366, 9368, 9370, 9372, 9374, 9376, 9378, 9380, 9382, 9384, 9386, 9388, 9390, 9392, 9394, 9397, 9399, 9401, 9403, 6128, 6128, 6140, 6140, 6063, 6072, 6128, 6128, 6140, 6140, 6195, 6195, 6195, 6195, 6254, 6254, 6298, 6298, 6298, 6298, 6322, 6322, 6342, 6351, 6598, 6607, 9574, 9576, 9579, 9581, 9583, 9585, 9587, 9589, 9591, 9593, 9595, 9597, 9666, 9668, 9671, 9673, 9679, 9681, 9683, 9685, 9688, 9690, 9693, 9695, 9701, 9703, 9706, 9708, 9711, 9713, 9716, 9718, 6904, 6904, 6958, 9669, 9669, 6958, 9686, 9698, 9720, 9720, 9722, 9720, 9720, 9722, 9725, 9725, 9686, 9698, 9395, 9395, 9686, 9686, 9698, 9698, 9395, 9395, 9722, 9722, 9720, 9720, 9669, 9669, 9676, 9676, 9686, 9686, 9698, 9698, 9722, 9722, 9720, 9720, 9722, 9722, 9725, 9725, 13, 14, 15, 13649, 13651, 13653, 13655, 13657, 13659, 13661, 13663, 13665, 13667, 13669, 13671, 13673, 13675, 13677, 13679, 13681, 13683, 13685, 13687, 13689, 13691, 13693, 13695, 13697, 13699, 13701, 13703, 13705, 13707, 13709, 13711, 13713, 13715, 13717, 13719, 13721, 13723, 13725, 13727, 13729, 13731, 13733, 13735, 13737, 13739, 13741, 13743, 13745, 13747, 13749, 13751, 13753, 13755, 13757, 13759, 13761, 13763, 13765, 13767, 13769, 13771, 13773, 13775, 13777, 13779, 13781, 13783, 13785, 13787, 13789, 13791, 13793, 13795, 13797, 13799, 13801, 13803, 13805, 13807, 13809, 13811, 13813, 13815, 13817, 13819, 13821, 13823, 13825, 13827, 13829, 13831, 13833, 13835, 13837, 13839, 13841, 13843, 13845, 13847, 13849, 13851, 13853, 13855, 13857, 13859, 13861, 13863, 13865, 13867, 13869, 13871, 13873, 13875, 13877, 13879, 13881, 13883, 13885, 13887, 13889, 13891, 13893, 13895, 13897, 13899, 13901, 13903, 13905, 13907, 13909, 13911, 13913, 13915, 13917, 13919, 13921, 13923, 13925, 13927, 13929, 13931, 13933, 13935, 13937, 13939, 13941, 13943, 13945, 13947, 13949, 13951, 13953, 13955, 13957, 13959, 13961, 13963, 13965, 13967, 13969, 13971, 13973, 13975, 13977, 13979, 13981, 13983, 13985, 13987, 13989, 13991, 13993, 13995, 13997, 13999, 14001, 14003, 14005, 14007, 14009, 14011, 14013, 14015, 14017, 14019, 14021, 14023, 14025, 14027, 14029, 14031, 14033, 14035, 14037, 14039, 14041, 14043, 14045, 14047, 14049, 14051, 14053, 14055, 14057, 14059, 14061, 14063, 14065, 14067, 14069, 14071, 14073, 14075, 14077, 14079, 14081, 14083, 14085, 14087, 14089, 14091, 14093, 14095, 14097, 14099, 14101, 14103, 14105, 14107, 14109, 14111, 14113, 14115, 14117, 14119, 14121, 14123, 14125, 14127, 14129, 14131, 14133, 14135, 14137, 14139, 14141, 14143, 14145, 14147, 14149, 14151, 14153, 14155, 14157, 14159, 14161, 14163, 14165, 14167, 14169, 14171, 14173, 14175, 14177, 14179, 14181, 14183, 14185, 14187, 14189, 14191, 14193, 14195, 14197, 14199, 14201, 14203, 14205, 14207, 14209, 14211, 14213, 14215, 14217, 14219, 14221, 14223, 14225, 14227, 14229, 14231, 14233, 14235, 14237, 14239, 14241, 14243, 14245, 14247, 14249, 14251, 14253, 14255, 14257, 14259, 14261, 14263, 14265, 14267, 14269, 14271, 14273, 14275, 14277, 14279, 14281, 14283, 14285, 14287, 14289, 14291, 14293, 14295, 14297, 14299, 14301, 14303, 14305, 14307, 14309, 14311, 14313, 14315, 14317, 14319, 14321, 14323, 14325, 14327, 14329, 14331, 14333, 14335, 14337, 14339, 14341, 14343, 14345, 14347, 14349, 14351, 14353, 14355, 14357, 14359, 14361, 14363, 14365, 14367, 14369, 14371, 14373, 14375, 14377, 14379, 14381, 14383, 14385, 14387, 14389, 14391, 14393, 14395, 14397, 14399, 14401, 14403, 14405, 14407, 14409, 14411, 14413, 14415, 14417, 14419, 14421, 14423, 14425, 14427, 14429, 14431, 14433, 14435, 14437, 14439, 14441, 14443, 14445, 14447, 14449, 14451, 14453, 14455, 14457, 14459, 14461, 14463, 14465, 14467, 14469, 14471, 14473, 14475, 14477, 14479, 14481, 14483, 14485, 14487, 14489, 14491, 14493, 14495, 14497, 14499, 14501, 14503, 14505, 14507, 14509, 14511, 14513, 14515, 14517, 14519, 14521, 14523, 14525, 14527, 14529, 14531, 14533, 14535, 14537, 14539, 14541, 14543, 14545, 14547, 14549, 14551, 14553, 14555, 14557, 14559, 14561, 14563, 14565, 14567, 14569, 14571, 14573, 14575, 14577, 14579, 14581, 14583, 14585, 14587, 14589, 14591, 14593, 14595, 14597, 14599, 14601, 14603, 14605, 14607, 14609, 14611, 14613, 14615, 14617, 14619, 14621, 14623, 14625, 14627, 14629, 14631, 14633, 14635, 14637, 14639, 14641, 14643, 14645, 14647, 14649, 14651, 14653, 14655, 14657, 14659, 14661, 14663, 14665, 14667, 14669, 14671, 14673, 14675, 14677, 14679, 14681, 14683, 14685, 14687, 14689, 14691, 14693, 14695, 14697, 14699, 14701, 14703, 14705, 14707, 14709, 14711, 14713, 14715, 14717, 14719, 14721, 14723, 14725, 14727, 14729, 14731, 14733, 14735, 14737, 14739, 14741, 14743, 14745, 14747, 14749, 14751, 14753, 14755, 14757, 14759, 14761, 14763, 14765, 14767, 14769, 14771, 14773, 14775, 14777, 14779, 14781, 14783, 14785, 14787, 14789, 14791, 14793, 14795, 14797, 14799, 14801, 14803, 14805, 14807, 14809, 14811, 14813, 14815, 14817, 14819, 14821, 14823, 14825, 14827, 14829, 14831, 14833, 14835, 14837, 14839, 14841, 14843, 14845, 14847, 14849, 14851, 14853, 14855, 14857, 14859, 14861, 14863, 14865, 14867, 14869, 14871, 14873, 14875, 14877, 14879, 14881, 14883, 14885, 14887, 14889, 14891, 14893, 14895, 14897, 14899, 14901, 14903, 14905, 14907, 14909, 14911, 14913, 14915, 14917, 14919, 14921, 14923, 14925, 14927, 14929, 14931, 14933, 14935, 14937, 14939, 14941, 14943, 14945, 14947, 14949, 14951, 14953, 14955, 14957, 14959, 14961, 14963, 14965, 14967, 14969, 14971, 14973, 14975, 14977, 14979, 14981, 14983, 14985, 14987, 14989, 14991, 14993, 14995, 14997, 14999, 15001, 15003, 15005, 15007, 15009, 15011, 15013, 15015, 15017, 15019, 15021, 15023, 15025, 15027, 15029, 15031, 15033, 15035, 15037, 15039, 15041, 15043, 15045, 15047, 15049, 15051, 15053, 15055, 15057, 15059, 15061, 15063, 15065, 15067, 15069, 15071, 15073, 15075, 15077, 15079, 15081, 15083, 15085, 15087, 15089, 15091, 15093, 15095, 15097, 15099, 15101, 15103, 15105, 15107, 15109, 15111, 15113, 15115, 15117, 15119, 15121, 15123, 15125, 15127, 15129, 15131, 15133, 15135, 15137, 15139, 15141, 15143, 15145, 15147, 15149, 15151, 15153, 15155, 15157, 15159, 15161, 15163, 15165, 15167, 15169, 15171, 15173, 15175, 15177, 15179, 15181, 15183, 15185, 15187, 15189, 15191, 15193, 15195, 15197, 15199, 15201, 15203, 15205, 15207, 15209, 15211, 15213, 15215, 15217, 15219, 15221, 15223, 15225, 15227, 15229, 15231, 15233, 15235, 15237, 15239, 15241, 15243, 15245, 15247, 15249, 15251, 15253, 15255, 15257, 15259, 15261, 15263, 15265, 15267, 15269, 15271, 15273, 15275, 15277, 15279, 15281, 15283, 15285, 15287, 15289, 15291, 15293, 15295, 15297, 15299, 15301, 15303, 15305, 15307, 15309, 15311, 15313, 15315, 15317, 15319, 15321, 15323, 15325, 15327, 15329, 15331, 15333, 15335, 15337, 15339, 15341, 15343, 15345, 15347, 15349, 15351, 15353, 15355, 15357, 15359, 15361, 15363, 15365, 15367, 15369, 15371, 15373, 15375, 15377, 15379, 15381, 15383, 15385, 15387, 15389, 15391, 15393, 15395, 15397, 15399, 15401, 15403, 15405, 15407, 15409, 15411, 15413, 15415, 15417, 15419, 15421, 15423, 15425, 15427, 15429, 15431, 15433, 15435, 15437, 15439, 15441, 15443, 15445, 15447, 15449, 15451, 15453, 15455, 15457, 15459, 15461, 15463, 15465, 15467, 15469, 15471, 15473, 15475, 15477, 15479, 15481, 15483, 15485, 15487, 15489, 15491, 15493, 15495, 15497, 15499, 15501, 15503, 15505, 15507, 15509, 15511, 15513, 15515, 15517, 15519, 15521, 15523, 15525, 15527, 15529, 15531, 15533, 15535, 15537, 15539, 15541, 15543, 15545, 15547, 15549, 15551, 15553, 15555, 15557, 15559, 15561, 15563, 15565, 15567, 15569, 15571, 15573, 15575, 15577, 15579, 15581, 15583, 15585, 15587, 15589, 15591, 15593, 15595, 15597, 15599, 15601, 15603, 15605, 15607, 15609, 15611, 15613, 15615, 15617, 15619, 15621, 15623, 15625, 15627, 15629, 15631, 15633, 15635, 15637, 15639, 15641, 15643, 15645, 15647, 15649, 15651, 15653, 15655, 15657, 15659, 15661, 15663, 15665, 15667, 15669, 15671, 15673, 15675, 15677, 15679, 15681, 15683, 15685, 15687, 15689, 15691, 15693, 15695, 15697, 15699, 15701, 15703, 15705, 15707, 15709, 15711, 15713, 15715, 15717, 15719, 15721, 15723, 15725, 15727, 15729, 15731, 15733, 15735, 15737, 15739, 15741, 15743, 15745, 15747, 15749, 15751, 15753, 15755, 15757, 15759, 15761, 15763, 15765, 15767, 15769, 15771, 15773, 15775, 15777, 15779, 15781, 15783, 15785, 15787, 15789, 15791, 15793, 15795, 15797, 15799, 15801, 15803, 15805, 15807, 15809, 15811, 15813, 15815, 15817, 15819, 15821, 15823, 15825, 15827, 15829, 15831, 15833, 15835, 15837, 15839, 15841, 15843, 15845, 15847, 15849, 15851, 15853, 15855, 15857, 15859, 15861, 15863, 15865, 15867, 15869, 15871, 15873, 15875, 15877, 15879, 15881, 15883, 15885, 15887, 15889, 15891, 15893, 15895, 15897, 15899, 15901, 15903, 15905, 15907, 15909, 15911, 15913, 15915, 15917, 15919, 15921, 15923, 15925, 15927, 15929, 15931, 15933, 15935, 15937, 15939, 15941, 15943, 15945, 15947, 15949, 15951, 15953, 15955, 15957, 15959, 15961, 15963, 15965, 15967, 15969, 15971, 15973, 15975, 15977, 15979, 15981, 15983, 15985, 15987, 15989, 15991, 15993, 15995, 15997, 15999, 16001, 16003, 16005, 16007, 16009, 16011, 16013, 16015, 16017, 16019, 16021, 16023, 16025, 16027, 16029, 16031, 16033, 16035, 16037, 16039, 16041, 16043, 16045, 16047, 16049, 16051, 16053, 16055, 16057, 16059, 16061, 16063, 16065, 16067, 16069, 16071, 16073, 16075, 16077, 16079, 16081, 16083, 16085, 16087, 16089, 16091, 16093, 16095, 16097, 16099, 16101, 16103, 16105, 16107, 16109, 16111, 16113, 16115, 16117, 16119, 16121, 16123, 16125, 16127, 16129, 16131, 16133, 16135, 16137, 16139, 16141, 16143, 16145, 16147, 16149, 16151, 16153, 16155, 16157, 16159, 16161, 16163, 16165, 16167, 16169, 16171, 16173, 16175, 16177, 16179, 16181, 16183, 16185, 16187, 16189, 16191, 16193, 16195, 16197, 16199, 16201, 16203, 16205, 16207, 16209, 16211, 16213, 16215, 16217, 16219, 16221, 16223, 16225, 16227, 16229, 16231, 16233, 16235, 16237, 16239, 16241, 16243, 16245, 16247, 16249, 16251, 16253, 16255, 16257, 16259, 16261, 16263, 16265, 16267, 16269, 16271, 16273, 16275, 16277, 16279, 16281, 16283, 16285, 16287, 16289, 16291, 16293, 16295, 16297, 16299, 16301, 16303, 16305, 16307, 16309, 16311, 16313, 16315, 16317, 16319, 16321, 16323, 16325, 16327, 16329, 16331, 16333, 16335, 16337, 16339, 16341, 16343, 16345, 16347, 16349, 16351, 16353, 16355, 16357, 16359, 16361, 16363, 16365, 16367, 16369, 16371, 16373, 16375, 16377, 16379, 16381, 16383, 16385, 16387, 16389, 16391, 16393, 16395, 16397, 16399, 16401, 16403, 16405, 16407, 16409, 16411, 16413, 16415, 16417, 16419, 16421, 16423, 16425, 16427, 16429, 16431, 16433, 16435, 16437, 16439, 16441, 16443, 16445, 16447, 16449, 16451, 16453, 16455, 16457, 16459, 16461, 16463, 16465, 16467, 16469, 16471, 16473, 16475, 16477, 16479, 16481, 16483, 16485, 16487, 16489, 16491, 16493, 16495, 16497, 16499, 16501, 16503, 16505, 16507, 16509, 16511, 16513, 16515, 16517, 16519, 16521, 16523, 16525, 16527, 16529, 16531, 16533, 16535, 16537, 16539, 16541, 16543, 16545, 16547, 16549, 16551, 16553, 16555, 16557, 16559, 16561, 16563, 16565, 16567, 16569, 16571, 16573, 16575, 16577, 16579, 16581, 16583, 16585, 16587, 16589, 16591, 16593, 16595, 16597, 16599, 16601, 16603, 16605, 16607, 16609, 16611, 16613, 16615, 16617, 16619, 16621, 16623, 16625, 16627, 16629, 16631, 16633, 16635, 16637, 16639, 16641, 16643, 16645, 16647, 16649, 16651, 16653, 16655, 16657, 16659, 16661, 16663, 16665, 16667, 16669, 16671, 16673, 16675, 16677, 16679, 16681, 16683, 16685, 16687, 16689, 16691, 16693, 16695, 16697, 16699, 16701, 16703, 16705, 16707, 16709, 16711, 16713, 16715, 16717, 16719, 16721, 16723, 16725, 16727, 16729, 16731, 16733, 16735, 16737, 16739, 16741, 16743, 16745, 16747, 16749, 16751, 16753, 16755, 16757, 16759, 16761, 16763, 16765, 16767, 16769, 16771, 16773, 16775, 6873, 6874, 6878, 6881, 6882, 6883, 6884, 6885, 6886, 6887, 6888, 6889, 6891, 6896, 6901, 6902, 6905, 6906, 6909, 6910, 6911, 6912, 6916, 6917, 6954, 6955, 6956, 6957, 6959, 6960, 6961, 6962, 6963, 6964, 6965, 6966, 6967, 6968, 6986, 6987, 6990, 6993, 7012, 7013, 7020, 7033, 7034, 7048, 7049, 7059, 7060, 16828, 16830, 16832, 16834, 7080, 7083, 7084, 7087, 7088, 7098, 7099, 7100, 7101, 7102, 7103, 7104, 7105, 7106, 7107, 7108, 7109, 7110, 7111, 7112, 7113, 7116, 7117, 7120, 7121, 7124, 7125, 7128, 7129, 7136, 7137, 7140, 7141, 7155, 7156, 7157, 7158, 7159, 7160, 7161, 7162, 7163, 7164, 7165, 7166, 7171, 7172, 7175, 7176, 7177, 7178, 7179, 7180, 7183, 7184, 7185, 7186, 7199, 7201, 7204, 7205, 7209, 7211, 7212, 7213, 7214, 7215, 7216, 7217, 7218, 7219, 7238, 7243, 7248, 7249, 7252, 7253, 7264, 7265, 7268, 7269, 7305, 7306, 7315, 7316, 7320, 7321, 7325, 7326, 7328, 7329, 7334, 7335, 7336, 7337, 7354, 7359, 7360, 7361, 7362, 7365, 7369, 7378, 7394, 7397, 7398, 7401, 7402, 7404, 7409, 7410, 7413, 7414, 7428, 7429, 7438, 7439, 7450, 7456, 7457, 7464, 7465, 7477, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503, 7504, 7505, 7506, 7507, 7508, 7509, 7531, 7532, 7538, 7539, 7542, 7543, 7547, 7548, 7549, 7550, 7556, 7557, 7560, 7561, 7566, 7567, 7568, 7569, 7575, 7576, 7579, 7580, 7585, 7586, 7587, 7588, 7595, 7596, 7599, 7600, 7607, 7608, 7623, 7624, 7627, 7628, 7633, 7634, 7637, 7638, 7646, 7647, 7659, 7660, 7691, 7692, 7695, 7696, 7703, 7704, 7707, 7708, 7730, 7731, 7734, 7735, 7759, 7762, 7819, 7827, 7839, 7840, 7861, 7864, 7871, 7874, 7886, 7892, 7910, 7911, 7914, 7915, 7918, 7919, 7933, 7934, 7941, 7942, 7945, 7946, 7949, 7950, 7951, 7961, 7962, 7965, 7966, 7969, 7970, 7971, 7979, 7982, 7985, 7986, 7989, 7990, 7993, 7994, 7995, 7996, 8015, 8016, 8029, 8030, 8035, 8036, 8048, 8049, 8085, 8087, 8088, 8104, 8105, 8120, 8121, 8124, 8125, 8133, 8135, 8146, 8150, 8152, 8157, 8159, 8163, 8164, 8204, 8206, 8215, 8216, 8237, 8240, 8265, 8266, 8268, 8269, 8274, 8280, 8281, 8284, 8285, 8293, 8295, 8315, 8317, 8319, 8320, 8332, 8335, 8337, 8338, 8339, 8340, 8344, 8345, 8348, 8349, 8350, 8351, 8362, 8363, 8372, 8373, 8376, 8377, 8390, 8391, 8394, 8395, 8396, 8397, 8398, 8399, 8407, 8408, 8411, 8412, 8415, 8416, 8441, 8442, 8455, 8456, 8500, 8505, 8546, 8547, 8550, 8551, 8565, 8566, 8602, 8603, 8637, 8638, 8640, 8641, 8660, 8661, 8672, 8675, 8678, 8679, 8698, 8699, 8702, 8703, 8717, 8718, 8721, 8722, 8736, 8737, 8738, 8739, 8740, 8741, 8742, 8743, 8746, 8747, 8748, 8749, 8750, 8751, 8752, 8753, 8754, 8755, 8758, 8759, 8789, 8792, 8794, 8796, 8799, 8800, 8803, 8804, 8805, 8806, 8807, 8808, 8811, 8812, 8815, 8816, 8817, 8818, 8819, 8820, 8823, 8824, 8827, 8828, 8831, 8832, 8835, 8836, 8838, 8840, 8843, 8846, 8849, 8850, 8851, 8852, 8855, 8856, 8859, 8860, 8861, 8862, 8863, 8866, 8867, 8870, 8871, 8874, 8877, 8878, 8881, 8882, 8896, 8897, 8905, 8908, 8917, 8922, 8929, 8930, 8937, 8938, 8943, 8944, 8946, 8947, 8962, 8965, 8980, 8982, 8984, 8985, 8987, 8988, 8991, 8994, 9008, 9009, 9012, 9013, 9019, 9022, 9062, 9064, 9078, 9079, 9082, 9083, 9084, 9085, 9089, 9090, 9092, 9093, 9094, 9095, 9119, 9120, 9123, 9124, 17306, 17308, 9154, 9155, 17312, 17314, 17316, 17318, 17320, 17322, 9208, 9209, 9212, 9213, 9225, 9226, 9232, 9233, 9236, 9237, 9238, 9239, 9241, 9243, 9245, 9246, 9263, 9264, 9267, 9268, 9269, 9270, 9274, 9275, 9277, 9278, 9279, 9280, 9299, 9300, 9302, 9303, 9317, 9332, 17358, 17360, 17362, 17364, 17366, 17368, 17370, 17372, 17374, 17376, 17378, 17380, 9415, 9416, 9431, 9432, 9435, 9437, 9448, 9449, 9452, 9453, 9465, 9466, 9472, 9473, 9486, 9487, 9496, 9497, 9499, 9500, 9505, 9506, 9511, 9514, 9567, 9570, 17408, 17410, 17412, 17414, 17416, 17418, 17420, 17422, 17424, 17426, 17428, 17430, 17432, 17434, 17436, 17438, 9766, 9767, 9788, 9791, 9792, 9793, 9947, 9948, 9977, 9978, 9979, 9984, 9985, 9986, 9987, 9988, 9991, 9992, 11318, 11319, 11336, 11337, 11412, 11413, 11418, 11419, 11485, 11486, 11487, 11488, 11513, 11514, 11516, 11517, 11520, 11521, 11524, 11525, 11530, 11531, 11532, 11533, 11534, 11535, 11536, 11537, 9, 10, 11, 12, 13, 14, 15, 17488, 19053, 17490, 17489, 17491, 17820, 17819, 19056, 19059, 19061, 19063, 17492, 17494, 17493, 17494, 17494, 5310, 17496, 17495, 17497, 5319, 19069, 17500, 17499, 19071, 19073, 17502, 17501, 17503, 19075, 18970, 17505, 17506, 18976, 17507, 6809, 18987, 17508, 18989, 18991, 18990, 18993, 18992, 18970, 18969, 18971, 18973, 18972, 18974, 18976, 18975, 6809, 18979, 18978, 18980, 18982, 18981, 18983, 18985, 18984, 6824, 18988, 18987, 18989, 18994, 18996, 19077, 19079, 19081, 19083, 19085, 19087, 19089, 17509, 17510, 17512, 17511, 18121, 17513, 18124, 17514, 17515, 17517, 17516, 17518, 17519, 18102, 17520, 17521, 18111, 19091, 18103, 18112, 17523, 17522, 17524, 17525, 17526, 18241, 18233, 18242, 18706, 17527, 17645, 18711, 18710, 18712, 18713, 18716, 18715, 18718, 18717, 18719, 19095, 17656, 17655, 18687, 18686, 18689, 18688, 19023, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18871, 18870, 19098, 18873, 18776, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18871, 18870, 19100, 18906, 17528, 17530, 17529, 18911, 18910, 18912, 17532, 17531, 19102, 17534, 17533, 17535, 17536, 17538, 19109, 17540, 17539, 19111, 17542, 17541, 17543, 17545, 17544, 17547, 17546, 17548, 17550, 19113, 19115, 19117, 19119, 19121, 19123, 19125, 19127, 17552, 17551, 19129, 17554, 17553, 19131, 17555, 17557, 19133, 17560, 17559, 19135, 17561, 17563, 17564, 17566, 17567, 17569, 19137, 17571, 17570, 19139, 17573, 17572, 18473, 18473, 17574, 17575, 17577, 17579, 17578, 17581, 17580, 17583, 17582, 19141, 19143, 19145, 19147, 19149, 19151, 17584, 17586, 17587, 17589, 19153, 17591, 17590, 19155, 19157, 19159, 17593, 17592, 19161, 19163, 17595, 17594, 17597, 17596, 17599, 17598, 17601, 17600, 17603, 17602, 17604, 17605, 17606, 18487, 17607, 19167, 17609, 17608, 17610, 17612, 19171, 19173, 19175, 19177, 17615, 17614, 17617, 17616, 17619, 17618, 17621, 17620, 17623, 17622, 17625, 17624, 17627, 17626, 17629, 17628, 17630, 17631, 17633, 17632, 17635, 17634, 18427, 17636, 18430, 17637, 19181, 17640, 17639, 19183, 17642, 17641, 17644, 17643, 18427, 18430, 17649, 18634, 18691, 18690, 19185, 18693, 18692, 19187, 18695, 18694, 18705, 18697, 18696, 18699, 18698, 18700, 18749, 18701, 18410, 17657, 18411, 18740, 18742, 18741, 18743, 18749, 18748, 18750, 18446, 18752, 18753, 18706, 18708, 17645, 18711, 18710, 17646, 17647, 18716, 18715, 18718, 18717, 18719, 19189, 17656, 17655, 18687, 18686, 18689, 17648, 17649, 17650, 19191, 17652, 17651, 18634, 19193, 17654, 17653, 18676, 19195, 18674, 19197, 18679, 18678, 18718, 18680, 19199, 19201, 17656, 17655, 18410, 17657, 18411, 18740, 18446, 18752, 17857, 17659, 17658, 17661, 17660, 17858, 17663, 17662, 17699, 17665, 17664, 17666, 19204, 19206, 17668, 17667, 17700, 17670, 17669, 17672, 17671, 17674, 17673, 17676, 17675, 17678, 17677, 17680, 17679, 17681, 17683, 17682, 17684, 18349, 17685, 18351, 18350, 18353, 18352, 18355, 18354, 18301, 18304, 18303, 19212, 18306, 18305, 19214, 18301, 18304, 18303, 18306, 18305, 19217, 18320, 18319, 19219, 17687, 17686, 17688, 18160, 17689, 17690, 18104, 18031, 18030, 18033, 18032, 18103, 18102, 19221, 17692, 17691, 17693, 17695, 17694, 17696, 18112, 18111, 19223, 17698, 17697, 17700, 17699, 17702, 17701, 17703, 17705, 17704, 17706, 17707, 17709, 17708, 17711, 17710, 17713, 17712, 17714, 17716, 17715, 17717, 18236, 18235, 18238, 17718, 17719, 17721, 17720, 18242, 17722, 18244, 17723, 17725, 17724, 17727, 17726, 17729, 17728, 17731, 17730, 17733, 17732, 17790, 17789, 17792, 17791, 17793, 17795, 19231, 19233, 19235, 19237, 19239, 19242, 19245, 17771, 17770, 17773, 17772, 17774, 17776, 17735, 17734, 17737, 17736, 17771, 17770, 17773, 17772, 17738, 17776, 17777, 17798, 17778, 17800, 17799, 19247, 17804, 17803, 17805, 17808, 17740, 19249, 17810, 17809, 19251, 17811, 17813, 17814, 19253, 19255, 17804, 17741, 17805, 17808, 17740, 19257, 17810, 17809, 19259, 17744, 17813, 17814, 17816, 19261, 19263, 17804, 17803, 17805, 17808, 17740, 19265, 17810, 17809, 19267, 17744, 17813, 17814, 17816, 19269, 19271, 17804, 17741, 17743, 17742, 17808, 17807, 19273, 17810, 17809, 19275, 17744, 17813, 17814, 17816, 17746, 17745, 19277, 17747, 17749, 17752, 17751, 17753, 17756, 17755, 17758, 17757, 17760, 17759, 17761, 17762, 17789, 19279, 17789, 17789, 19281, 1218, 17764, 17766, 17765, 19283, 17789, 17767, 19285, 1243, 17769, 17777, 17798, 17778, 17800, 17799, 19287, 17771, 17770, 17773, 17772, 17774, 17776, 17777, 17798, 17778, 17800, 17799, 19289, 17779, 17789, 17792, 17780, 1218, 17782, 17784, 17783, 17786, 17785, 1243, 17788, 17790, 17789, 17792, 17791, 17793, 17795, 17796, 17798, 17797, 17800, 17799, 17802, 17801, 17804, 17803, 17805, 17808, 17807, 19291, 17810, 17809, 19293, 17811, 17813, 17814, 17816, 17818, 17817, 19295, 17820, 17819, 19297, 17821, 17823, 17826, 17825, 17827, 17830, 17829, 17831, 17834, 17833, 17836, 17835, 17838, 17837, 17840, 17839, 17842, 17841, 17843, 17845, 17844, 19299, 17847, 17846, 19301, 17849, 17848, 17850, 17852, 17851, 17853, 1916, 17856, 17855, 17858, 17857, 17860, 17859, 17861, 17862, 17865, 17864, 17867, 17866, 1931, 1916, 17871, 17870, 17873, 17872, 17875, 17874, 17877, 17876, 17879, 17878, 17880, 17881, 17884, 17883, 17885, 17887, 17886, 17888, 17890, 17889, 17891, 17938, 1916, 17894, 17893, 17895, 17898, 17897, 17903, 17902, 17905, 17904, 17907, 17906, 17909, 17899, 17911, 17910, 17913, 17912, 17900, 17916, 17915, 17917, 17919, 17918, 17920, 17901, 17903, 17902, 17905, 17904, 17907, 17906, 17909, 17908, 17911, 17910, 17913, 17912, 17914, 17916, 17915, 17917, 17919, 17918, 17920, 17921, 17923, 17922, 17938, 17924, 17926, 17925, 17928, 17927, 17930, 17929, 17932, 17931, 17933, 17935, 17934, 17936, 17938, 17937, 1916, 17941, 17940, 1931, 17944, 17943, 17945, 17947, 17946, 17948, 17950, 17949, 17959, 17951, 17953, 17952, 17955, 17954, 17957, 17956, 17959, 17958, 17961, 17960, 17963, 17962, 17965, 17964, 17966, 17969, 17968, 17971, 17970, 17973, 17972, 17974, 17977, 17976, 17979, 17978, 17981, 17980, 17982, 17984, 17983, 17985, 17986, 17989, 17988, 17990, 17991, 17992, 18010, 18012, 17994, 19315, 17996, 17995, 19317, 17998, 17997, 19319, 18000, 17999, 18001, 18003, 18002, 18004, 18005, 18007, 18009, 18008, 18010, 18012, 18011, 19321, 18014, 18013, 18016, 18015, 18018, 18017, 19323, 18020, 18019, 19325, 18022, 18021, 19327, 18024, 18023, 18025, 18027, 18026, 18016, 18015, 18018, 18017, 19330, 18020, 18019, 19332, 18022, 18021, 19334, 18024, 18023, 18025, 18027, 18026, 18014, 18013, 18016, 18015, 18018, 18017, 19339, 18020, 18019, 19341, 18022, 18021, 19343, 19345, 18024, 18023, 18025, 18027, 18026, 2342, 2342, 2342, 18028, 18179, 18181, 2351, 2351, 2351, 18031, 18030, 18033, 18032, 19347, 18034, 18037, 18036, 18038, 18039, 18041, 18043, 18045, 18046, 18048, 18049, 18051, 19349, 18062, 18124, 18053, 18052, 19351, 18221, 18054, 18221, 18055, 18057, 18056, 18061, 18058, 18123, 18060, 18059, 19353, 18245, 18229, 18232, 18062, 18061, 18064, 18063, 18066, 18065, 18068, 18067, 18245, 18160, 18069, 18070, 18072, 18071, 18073, 18179, 18075, 18074, 18077, 18076, 18079, 18078, 18081, 18080, 18179, 18182, 18083, 18082, 18085, 18084, 18086, 18088, 18090, 19356, 18092, 18093, 18095, 18105, 18185, 18187, 18189, 18097, 18096, 18098, 18100, 18099, 18101, 18103, 18102, 19358, 18179, 18104, 18105, 18185, 18187, 18189, 18108, 18107, 18110, 18109, 18112, 18111, 18114, 18113, 19360, 18116, 18115, 19362, 18118, 18117, 18221, 18119, 18120, 18230, 18121, 18122, 18124, 18123, 18125, 18127, 18126, 18128, 18239, 18207, 18240, 18208, 18129, 18130, 18131, 18133, 18135, 18137, 18138, 18139, 18141, 18143, 18145, 18146, 19371, 18160, 18159, 18161, 18162, 18148, 18149, 18165, 18168, 18150, 18151, 18170, 18152, 18153, 18154, 18156, 18157, 18178, 18160, 18159, 18161, 18163, 18162, 18164, 18165, 18168, 18167, 18169, 18170, 18171, 18172, 18173, 18175, 18176, 18178, 18179, 18182, 18181, 18184, 18183, 18185, 18187, 18189, 18191, 18190, 18193, 18192, 18195, 18194, 19375, 18221, 18196, 18198, 18197, 18199, 18201, 18200, 18203, 18202, 18204, 18206, 18205, 18207, 18208, 18210, 18209, 18212, 18211, 18214, 18213, 18221, 18215, 18217, 18216, 18219, 18218, 18221, 18220, 18222, 18224, 18223, 18225, 18227, 18226, 18228, 18230, 18229, 18231, 18233, 18232, 18234, 18236, 18235, 18238, 18237, 18239, 19379, 18240, 19381, 18242, 18241, 18244, 18243, 18245, 18248, 18247, 18250, 18249, 19384, 18252, 18251, 19386, 18254, 18253, 18255, 18257, 18256, 18259, 18258, 18260, 18263, 18262, 18265, 18264, 18267, 18266, 18269, 18268, 18271, 18270, 18273, 18272, 18275, 18274, 3432, 18278, 18277, 3447, 18280, 18282, 18284, 19392, 18287, 18286, 18288, 18290, 18289, 18291, 18293, 18292, 18294, 18296, 18295, 18298, 18297, 18299, 19396, 19398, 18301, 18304, 18303, 19400, 18306, 18305, 19402, 19404, 18308, 18307, 18310, 18309, 18311, 18313, 18312, 18315, 18314, 18316, 19406, 18317, 3669, 18349, 18348, 18351, 18350, 18320, 18319, 19408, 18321, 18320, 19410, 18356, 3807, 18360, 18359, 18362, 18361, 18349, 18348, 18351, 18350, 18321, 18320, 19412, 18357, 3807, 19414, 19416, 19418, 3751, 18349, 18348, 18351, 18350, 18320, 18319, 19420, 18321, 18320, 19422, 18357, 3807, 19424, 18362, 18361, 18323, 18322, 3655, 18326, 18325, 3669, 3853, 18329, 18332, 18331, 18333, 18335, 18337, 18353, 18352, 18355, 18354, 18345, 3751, 18360, 18359, 19426, 18340, 18339, 18342, 18341, 18353, 18343, 18354, 18344, 18345, 3751, 18360, 18359, 19428, 18349, 18348, 18351, 18350, 18353, 18352, 18355, 18354, 18357, 18356, 3807, 18360, 18359, 18362, 18361, 18363, 18364, 18367, 18366, 3853, 18370, 18369, 3868, 18373, 18372, 18375, 18374, 18376, 18600, 18669, 18650, 18602, 18651, 18653, 18670, 18672, 18652, 18436, 18670, 18672, 18379, 18378, 18381, 18380, 18382, 18750, 18877, 18770, 18879, 18878, 18384, 18383, 18882, 18883, 18863, 18871, 18870, 18775, 18774, 18873, 18776, 18874, 18386, 18385, 18388, 18387, 18890, 18892, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18870, 18390, 18775, 18774, 18874, 18873, 18776, 18392, 18391, 19432, 18392, 18392, 19434, 18394, 18393, 18396, 18395, 18400, 18397, 18402, 18401, 18404, 18403, 18405, 18902, 18901, 19436, 18904, 18903, 18905, 18400, 18399, 18402, 18401, 18404, 18403, 18405, 18407, 18406, 18409, 18408, 18410, 18411, 18413, 18412, 18414, 18416, 18415, 18417, 18419, 18418, 18421, 18420, 18423, 18422, 18425, 18424, 18426, 18427, 18429, 18430, 18432, 19438, 18640, 18639, 18641, 18433, 18434, 18666, 18645, 18647, 18649, 18435, 18650, 18600, 18652, 18651, 18653, 18436, 18656, 18655, 18657, 18659, 18658, 18660, 18664, 18666, 18661, 18647, 18649, 18601, 18650, 18602, 18670, 18672, 18437, 19440, 18439, 19442, 18442, 18441, 18718, 18443, 18718, 18444, 18445, 18683, 18682, 18685, 18684, 18687, 18686, 18689, 18688, 18446, 18448, 18447, 19444, 18450, 18449, 18452, 18451, 18454, 18453, 18456, 18455, 18458, 18457, 18460, 18459, 18462, 18461, 19448, 18464, 18463, 18465, 18466, 18468, 18469, 18470, 18471, 18472, 18474, 18473, 18475, 18476, 18478, 18479, 18480, 18481, 18482, 19450, 18484, 18483, 19452, 18485, 18489, 18492, 18491, 18493, 18494, 18496, 18498, 18497, 18499, 18501, 18500, 18486, 19454, 18488, 18487, 19456, 18490, 18489, 18492, 18491, 18493, 18494, 18496, 18498, 18497, 18499, 18501, 18500, 18502, 19458, 19460, 19462, 19464, 18504, 18503, 19466, 19468, 19470, 19472, 19474, 18506, 18505, 19476, 18508, 18507, 18509, 18511, 18510, 18512, 18514, 18513, 18516, 18515, 18518, 18517, 18520, 18519, 18522, 18521, 18523, 18525, 18524, 18526, 18528, 18527, 18530, 18529, 18531, 18534, 18533, 18536, 18535, 18538, 18537, 18539, 18541, 18544, 18543, 19482, 18546, 18545, 19484, 19486, 19488, 18548, 18547, 19490, 18550, 18549, 19492, 19494, 19496, 18552, 18551, 19498, 18554, 18553, 19500, 18556, 18555, 19502, 18558, 18557, 19504, 18559, 18561, 18564, 18563, 18566, 18565, 18568, 18567, 19510, 19512, 18570, 18569, 19514, 18572, 18571, 19516, 19518, 18574, 18573, 19521, 18576, 18575, 19523, 18578, 18577, 18580, 18579, 19526, 18582, 18581, 19528, 18584, 18583, 18585, 18586, 18588, 18589, 18591, 18593, 18592, 18594, 18595, 18597, 18598, 19530, 18600, 18647, 18649, 18601, 18602, 18603, 18628, 18604, 18628, 18606, 18605, 18607, 18609, 18608, 18610, 18612, 18614, 18616, 18615, 18617, 18619, 18621, 18620, 18623, 18622, 18625, 18624, 19536, 18627, 18626, 18629, 18628, 18631, 18630, 19538, 18633, 18632, 18634, 18636, 19540, 18637, 19542, 18638, 18640, 18639, 18642, 18641, 18643, 18666, 18645, 18647, 18649, 18648, 18650, 18652, 18651, 18654, 18653, 18656, 18655, 18657, 18659, 18658, 18660, 18661, 18663, 18664, 18666, 18668, 18667, 18669, 18670, 18672, 18674, 19548, 18676, 19550, 18679, 18678, 18718, 18680, 18718, 18718, 18681, 18683, 18682, 18685, 18684, 18687, 18686, 18689, 18688, 18691, 18690, 19554, 18693, 18692, 19556, 18695, 18694, 18705, 18697, 18696, 18699, 18698, 18700, 18749, 18701, 18702, 18703, 18705, 18706, 18709, 18708, 18711, 18710, 18712, 18713, 18716, 18715, 18718, 18717, 18719, 18721, 18720, 18723, 18722, 18725, 18724, 18727, 18726, 18742, 18741, 18729, 18728, 18730, 18732, 18734, 18733, 18735, 18737, 18736, 18738, 18739, 18740, 18742, 18741, 18743, 18746, 18745, 18747, 18749, 18748, 18750, 18752, 18753, 18810, 18754, 19562, 18813, 18812, 19564, 19566, 18815, 18814, 18816, 19568, 18818, 19570, 19572, 18930, 18755, 18823, 18822, 18824, 18936, 18820, 18766, 18826, 18825, 18827, 19004, 19006, 18841, 18840, 19009, 19011, 19010, 19013, 19012, 18842, 18757, 18756, 19574, 18759, 18758, 19576, 18761, 18760, 19006, 18841, 18840, 19009, 19011, 19010, 19013, 19012, 18762, 19018, 19017, 19020, 18843, 18763, 19040, 19037, 19040, 18914, 18913, 19580, 18916, 18915, 18765, 18764, 18766, 18768, 18767, 6809, 19038, 19041, 18877, 18770, 18771, 18879, 18880, 18882, 18773, 18772, 18885, 18871, 18870, 18775, 18774, 18873, 18776, 18874, 18778, 18777, 19588, 18780, 18779, 19590, 18890, 18892, 18907, 18906, 18909, 18908, 18911, 18910, 18912, 18782, 18781, 19592, 18784, 18783, 18785, 18788, 18787, 19594, 18813, 18789, 19596, 19598, 18790, 18792, 18794, 19602, 18797, 18796, 18798, 18936, 18799, 18800, 18802, 18801, 18803, 18806, 18805, 18807, 18949, 18808, 18811, 18810, 19604, 18813, 18812, 19606, 19608, 18815, 18814, 18816, 19610, 18818, 19612, 19614, 18930, 18929, 18936, 18820, 18821, 18823, 18822, 18824, 18826, 18825, 18827, 18830, 18829, 18832, 18831, 18947, 18950, 18833, 19616, 18835, 19618, 18837, 18839, 19004, 19006, 18841, 18840, 19009, 19011, 19010, 19013, 19012, 19015, 18842, 19018, 19017, 19020, 18843, 19004, 19006, 19007, 19009, 19011, 19010, 19013, 19012, 19015, 19014, 19018, 19017, 19020, 19019, 18845, 18844, 18846, 18848, 18847, 18850, 18849, 18851, 18853, 5985, 18855, 5985, 18857, 18858, 18860, 18862, 6006, 18862, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18863, 18871, 18870, 19634, 18873, 18864, 18874, 18877, 18876, 18879, 18878, 18865, 18882, 18868, 18867, 18869, 18871, 18870, 19636, 18873, 18872, 18874, 18877, 18876, 18879, 18878, 18880, 18882, 18883, 18885, 18887, 18886, 19640, 18889, 18888, 19642, 18890, 18892, 18894, 18893, 18896, 18895, 18898, 18897, 18899, 18902, 18900, 19644, 18904, 18903, 18905, 18902, 18901, 19646, 18904, 18903, 18905, 18907, 18906, 18909, 18908, 18911, 18910, 18912, 18914, 18913, 19648, 18916, 18915, 18918, 18917, 18920, 18919, 18922, 18921, 19650, 18923, 19652, 18924, 18925, 18928, 18927, 19654, 18930, 18929, 18932, 18931, 18934, 18933, 18936, 18935, 18937, 18939, 18938, 18941, 18940, 18943, 18942, 18944, 18947, 18946, 18948, 18950, 18949, 18951, 18952, 18955, 18954, 18956, 18958, 18959, 18961, 18963, 18962, 18965, 18964, 18966, 18968, 18970, 18969, 18971, 18973, 18972, 18974, 18976, 18975, 6809, 18979, 18978, 18980, 18982, 18981, 18983, 18985, 18984, 6824, 18988, 18987, 18989, 18991, 18990, 18993, 18992, 18994, 18996, 18998, 18997, 18999, 19007, 19009, 19011, 19000, 19013, 19012, 19014, 19001, 19002, 19018, 19017, 19020, 19003, 19004, 19006, 19007, 19009, 19011, 19010, 19013, 19012, 19015, 19014, 19016, 19018, 19017, 19020, 19019, 19022, 19021, 19024, 19023, 19038, 19025, 6809, 19041, 19040, 6824, 19026, 19028, 19029, 19048, 19049, 19051, 19032, 19031, 19033, 19035, 19034, 19036, 19038, 19037, 6809, 19041, 19040, 6824, 19044, 19043, 19045, 19047, 19046, 19048, 19049, 19051, 19229, 19229, 19229, 19229, 19365, 19365, 19365, 19365, 19104, 19104, 19106, 19106, 19684, 19104, 19103, 19106, 19105, 19687, 19690, 19229, 19229, 19229, 19229, 19694, 19578, 19577, 19581, 19584, 19583, 19585, 19668, 19696, 19672, 19586, 19674, 19673, 19621, 19624, 19623, 19626, 19625, 19698, 19628, 19627, 19630, 19629, 19700, 19632, 19631, 19665, 19659, 19668, 19660, 19662, 19661, 19664, 19663, 19702, 19704, 19665, 19706, 19666, 19708, 19668, 19667, 19710, 19670, 19669, 19712, 19672, 19671, 19674, 19673, 19714, 19716, 19718, 19720, 19680, 19680, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6872, 6875, 6876, 6877, 6879, 6880, 6890, 6892, 6893, 6894, 6895, 6897, 6898, 6899, 6900, 6903, 6907, 6908, 6913, 6914, 6915, 6918, 6919, 6920, 6921, 6922, 6923, 6924, 6925, 6926, 6927, 6928, 6929, 6930, 6931, 6932, 6933, 6934, 6935, 6936, 6937, 6938, 6939, 6940, 6941, 6942, 6943, 6944, 6945, 6946, 6947, 6948, 6949, 6950, 6951, 6952, 6953, 6969, 6970, 6971, 6972, 6973, 6974, 6975, 6976, 6977, 6978, 6979, 6980, 6981, 6982, 6983, 6984, 6985, 6988, 6989, 6991, 6992, 6994, 6995, 6996, 6997, 6998, 6999, 7000, 7001, 7002, 7003, 7004, 7005, 7006, 7007, 7008, 7009, 7010, 7011, 7014, 7015, 7016, 7017, 7018, 7019, 7021, 7022, 7023, 7024, 7025, 7026, 7027, 7028, 7029, 7030, 7031, 7032, 7035, 7036, 7037, 7038, 7039, 7040, 7041, 7042, 7043, 7044, 7045, 7046, 7047, 7050, 7051, 7052, 7053, 7054, 7055, 7056, 7057, 7058, 7061, 7062, 7063, 7081, 7082, 7085, 7086, 7089, 7090, 7091, 7092, 7093, 7094, 7095, 7096, 7097, 7114, 7115, 7118, 7119, 7122, 7123, 7126, 7127, 7130, 7131, 7132, 7133, 7134, 7135, 7138, 7139, 7142, 7143, 7144, 7145, 7146, 7147, 7148, 7149, 7150, 7151, 7152, 7153, 7154, 7167, 7168, 7169, 7170, 7173, 7174, 7181, 7182, 7187, 7188, 7189, 7190, 7191, 7192, 7193, 7194, 7195, 7196, 7197, 7198, 7200, 7202, 7203, 7206, 7207, 7208, 7210, 7220, 7221, 7222, 7223, 7224, 7225, 7226, 7227, 7228, 7229, 7230, 7231, 7232, 7233, 7234, 7235, 7236, 7237, 7239, 7240, 7241, 7242, 7244, 7245, 7246, 7247, 7250, 7251, 7254, 7255, 7256, 7257, 7258, 7259, 7260, 7261, 7262, 7263, 7266, 7267, 7270, 7271, 7272, 7273, 7274, 7275, 7276, 7277, 7278, 7279, 7280, 7281, 7282, 7283, 7284, 7285, 7286, 7287, 7288, 7289, 7290, 7291, 7292, 7293, 7294, 7295, 7296, 7297, 7298, 7299, 7300, 7301, 7302, 7303, 7304, 7307, 7308, 7309, 7310, 7311, 7312, 7313, 7314, 7317, 7318, 7319, 7322, 7323, 7324, 7327, 7330, 7331, 7332, 7333, 7338, 7339, 7340, 7341, 7342, 7343, 7344, 7345, 7346, 7347, 7348, 7349, 7350, 7351, 7352, 7353, 7355, 7356, 7357, 7358, 7363, 7364, 7366, 7367, 7368, 7370, 7371, 7372, 7373, 7374, 7375, 7376, 7377, 7379, 7380, 7381, 7382, 7383, 7384, 7385, 7386, 7387, 7388, 7389, 7390, 7391, 7392, 7393, 7395, 7396, 7399, 7400, 7403, 7405, 7406, 7407, 7408, 7411, 7412, 7415, 7416, 7417, 7418, 7419, 7420, 7421, 7422, 7423, 7424, 7425, 7426, 7427, 7430, 7431, 7432, 7433, 7434, 7435, 7436, 7437, 7440, 7441, 7442, 7443, 7444, 7445, 7446, 7447, 7448, 7449, 7451, 7452, 7453, 7454, 7455, 7458, 7459, 7460, 7461, 7462, 7463, 7466, 7467, 7468, 7469, 7470, 7471, 7472, 7473, 7474, 7475, 7476, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7485, 7486, 7487, 7488, 7489, 7490, 7491, 7492, 7493, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7533, 7534, 7535, 7536, 7537, 7540, 7541, 7544, 7545, 7546, 7551, 7552, 7553, 7554, 7555, 7558, 7559, 7562, 7563, 7564, 7565, 7570, 7571, 7572, 7573, 7574, 7577, 7578, 7581, 7582, 7583, 7584, 7589, 7590, 7591, 7592, 7593, 7594, 7597, 7598, 7601, 7602, 7603, 7604, 7605, 7606, 7609, 7610, 7611, 7612, 7613, 7614, 7615, 7616, 7617, 7618, 7619, 7620, 7621, 7622, 7625, 7626, 7629, 7630, 7631, 7632, 7635, 7636, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7661, 7662, 7663, 7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7676, 7677, 7678, 7679, 7680, 7681, 7682, 7683, 7684, 7685, 7686, 7687, 7688, 7689, 7690, 7693, 7694, 7697, 7698, 7699, 7700, 7701, 7702, 7705, 7706, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7732, 7733, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743, 7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7758, 7760, 7761, 7763, 7764, 7765, 7766, 7767, 7768, 7769, 7770, 7771, 7772, 7773, 7774, 7775, 7776, 7777, 7778, 7779, 7780, 7781, 7782, 7783, 7784, 7785, 7786, 7787, 7788, 7789, 7790, 7791, 7792, 7793, 7794, 7795, 7796, 7797, 7798, 7799, 7800, 7801, 7802, 7803, 7804, 7805, 7806, 7807, 7808, 7809, 7810, 7811, 7812, 7813, 7814, 7815, 7816, 7817, 7818, 7820, 7821, 7822, 7823, 7824, 7825, 7826, 7828, 7829, 7830, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855, 7856, 7857, 7858, 7859, 7860, 7862, 7863, 7865, 7866, 7867, 7868, 7869, 7870, 7872, 7873, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7887, 7888, 7889, 7890, 7891, 7893, 7894, 7895, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7904, 7905, 7906, 7907, 7908, 7909, 7912, 7913, 7916, 7917, 7920, 7921, 7922, 7923, 7924, 7925, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7935, 7936, 7937, 7938, 7939, 7940, 7943, 7944, 7947, 7948, 7952, 7953, 7954, 7955, 7956, 7957, 7958, 7959, 7960, 7963, 7964, 7967, 7968, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7980, 7981, 7983, 7984, 7987, 7988, 7991, 7992, 7997, 7998, 7999, 8000, 8001, 8002, 8003, 8004, 8005, 8006, 8007, 8008, 8009, 8010, 8011, 8012, 8013, 8014, 8017, 8018, 8019, 8020, 8021, 8022, 8023, 8024, 8025, 8026, 8027, 8028, 8031, 8032, 8033, 8034, 8037, 8038, 8039, 8040, 8041, 8042, 8043, 8044, 8045, 8046, 8047, 8050, 8051, 8052, 8053, 8054, 8055, 8056, 8057, 8058, 8059, 8060, 8061, 8062, 8063, 8064, 8065, 8066, 8067, 8068, 8069, 8070, 8071, 8072, 8073, 8074, 8075, 8076, 8077, 8078, 8079, 8080, 8081, 8082, 8083, 8084, 8086, 8089, 8090, 8091, 8092, 8093, 8094, 8095, 8096, 8097, 8098, 8099, 8100, 8101, 8102, 8103, 8106, 8107, 8108, 8109, 8110, 8111, 8112, 8113, 8114, 8115, 8116, 8117, 8118, 8119, 8122, 8123, 8126, 8127, 8128, 8129, 8130, 8131, 8132, 8134, 8136, 8137, 8138, 8139, 8140, 8141, 8142, 8143, 8144, 8145, 8147, 8148, 8149, 8151, 8153, 8154, 8155, 8156, 8158, 8160, 8161, 8162, 8165, 8166, 8167, 8168, 8169, 8170, 8171, 8172, 8173, 8174, 8175, 8176, 8177, 8178, 8179, 8180, 8181, 8182, 8183, 8184, 8185, 8186, 8187, 8188, 8189, 8190, 8191, 8192, 8193, 8194, 8195, 8196, 8197, 8198, 8199, 8200, 8201, 8202, 8203, 8205, 8207, 8208, 8209, 8210, 8211, 8212, 8213, 8214, 8217, 8218, 8219, 8220, 8221, 8222, 8223, 8224, 8225, 8226, 8227, 8228, 8229, 8230, 8231, 8232, 8233, 8234, 8235, 8236, 8238, 8239, 8241, 8242, 8243, 8244, 8245, 8246, 8247, 8248, 8249, 8250, 8251, 8252, 8253, 8254, 8255, 8256, 8257, 8258, 8259, 8260, 8261, 8262, 8263, 8264, 8267, 8270, 8271, 8272, 8273, 8275, 8276, 8277, 8278, 8279, 8282, 8283, 8286, 8287, 8288, 8289, 8290, 8291, 8292, 8294, 8296, 8297, 8298, 8299, 8300, 8301, 8302, 8303, 8304, 8305, 8306, 8307, 8308, 8309, 8310, 8311, 8312, 8313, 8314, 8316, 8318, 8321, 8322, 8323, 8324, 8325, 8326, 8327, 8328, 8329, 8330, 8331, 8333, 8334, 8336, 8341, 8342, 8343, 8346, 8347, 8352, 8353, 8354, 8355, 8356, 8357, 8358, 8359, 8360, 8361, 8364, 8365, 8366, 8367, 8368, 8369, 8370, 8371, 8374, 8375, 8378, 8379, 8380, 8381, 8382, 8383, 8384, 8385, 8386, 8387, 8388, 8389, 8392, 8393, 8400, 8401, 8402, 8403, 8404, 8405, 8406, 8409, 8410, 8413, 8414, 8417, 8418, 8419, 8420, 8421, 8422, 8423, 8424, 8425, 8426, 8427, 8428, 8429, 8431, 8432, 8433, 8434, 8435, 8436, 8437, 8438, 8439, 8440, 8443, 8444, 8445, 8446, 8447, 8448, 8449, 8450, 8451, 8452, 8453, 8454, 8457, 8458, 8459, 8460, 8461, 8462, 8463, 8464, 8465, 8466, 8467, 8468, 8469, 8470, 8471, 8472, 8473, 8474, 8475, 8476, 8477, 8478, 8479, 8480, 8481, 8482, 8483, 8484, 8486, 8487, 8488, 8489, 8490, 8491, 8492, 8493, 8494, 8495, 8496, 8497, 8498, 8499, 8501, 8502, 8503, 8504, 8506, 8507, 8508, 8509, 8510, 8511, 8512, 8513, 8514, 8515, 8516, 8517, 8518, 8519, 8520, 8521, 8522, 8523, 8524, 8525, 8526, 8527, 8528, 8529, 8530, 8531, 8532, 8533, 8534, 8535, 8536, 8537, 8538, 8539, 8540, 8541, 8542, 8543, 8544, 8545, 8548, 8549, 8552, 8553, 8554, 8555, 8556, 8557, 8558, 8559, 8560, 8561, 8562, 8563, 8564, 8567, 8568, 8569, 8570, 8571, 8572, 8573, 8574, 8575, 8576, 8577, 8578, 8579, 8580, 8581, 8582, 8583, 8584, 8585, 8586, 8587, 8588, 8589, 8590, 8591, 8592, 8593, 8594, 8595, 8596, 8597, 8598, 8599, 8600, 8601, 8604, 8605, 8606, 8607, 8608, 8609, 8610, 8611, 8612, 8613, 8614, 8615, 8616, 8617, 8618, 8619, 8620, 8621, 8622, 8623, 8624, 8625, 8626, 8627, 8628, 8629, 8630, 8631, 8632, 8633, 8634, 8635, 8636, 8639, 8642, 8643, 8644, 8645, 8646, 8647, 8648, 8649, 8650, 8651, 8652, 8653, 8654, 8655, 8656, 8657, 8658, 8659, 8662, 8663, 8664, 8665, 8666, 8667, 8668, 8669, 8670, 8671, 8673, 8674, 8676, 8677, 8680, 8681, 8682, 8683, 8684, 8685, 8686, 8687, 8688, 8689, 8690, 8691, 8692, 8693, 8694, 8695, 8696, 8697, 8700, 8701, 8704, 8705, 8706, 8707, 8708, 8709, 8710, 8711, 8712, 8713, 8714, 8715, 8716, 8719, 8720, 8723, 8724, 8725, 8726, 8727, 8728, 8729, 8730, 8731, 8732, 8733, 8734, 8735, 8744, 8745, 8756, 8757, 8760, 8761, 8762, 8763, 8764, 8765, 8766, 8767, 8768, 8769, 8770, 8771, 8772, 8773, 8774, 8775, 8776, 8777, 8778, 8779, 8780, 8781, 8782, 8783, 8784, 8785, 8786, 8787, 8788, 8790, 8791, 8793, 8795, 8797, 8798, 8801, 8802, 8809, 8810, 8813, 8814, 8821, 8822, 8825, 8826, 8829, 8830, 8833, 8834, 8837, 8839, 8841, 8842, 8844, 8845, 8847, 8848, 8853, 8854, 8857, 8858, 8864, 8865, 8868, 8869, 8872, 8873, 8875, 8876, 8879, 8880, 8883, 8884, 8885, 8886, 8887, 8888, 8889, 8890, 8891, 8892, 8893, 8894, 8895, 8898, 8899, 8900, 8901, 8902, 8903, 8904, 8906, 8907, 8909, 8910, 8911, 8912, 8913, 8914, 8915, 8916, 8918, 8919, 8920, 8921, 8923, 8924, 8925, 8926, 8927, 8928, 8931, 8932, 8933, 8934, 8935, 8936, 8939, 8940, 8941, 8942, 8945, 8948, 8949, 8950, 8951, 8952, 8953, 8954, 8955, 8956, 8957, 8958, 8959, 8960, 8961, 8963, 8964, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8975, 8976, 8977, 8978, 8979, 8981, 8983, 8986, 8989, 8990, 8992, 8993, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007, 9010, 9011, 9014, 9015, 9016, 9017, 9018, 9020, 9021, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9032, 9033, 9034, 9035, 9036, 9037, 9038, 9039, 9040, 9041, 9042, 9043, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9051, 9052, 9053, 9054, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9063, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9073, 9074, 9075, 9076, 9077, 9080, 9081, 9086, 9087, 9088, 9091, 9096, 9097, 9098, 9099, 9100, 9101, 9102, 9103, 9104, 9105, 9106, 9107, 9108, 9109, 9110, 9111, 9112, 9113, 9114, 9115, 9116, 9117, 9118, 9121, 9122, 9125, 9126, 9127, 9128, 9129, 9130, 9131, 9132, 9133, 9134, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9152, 9153, 9156, 9157, 9158, 9159, 9160, 9161, 9162, 9163, 9164, 9165, 9190, 9191, 9192, 9193, 9194, 9195, 9196, 9197, 9198, 9199, 9200, 9201, 9202, 9203, 9204, 9205, 9206, 9207, 9210, 9211, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9224, 9227, 9228, 9229, 9230, 9231, 9234, 9235, 9240, 9242, 9244, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9262, 9265, 9266, 9271, 9272, 9273, 9276, 9281, 9282, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9293, 9294, 9295, 9296, 9297, 9298, 9301, 9304, 9305, 9306, 9307, 9308, 9309, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9318, 9319, 9320, 9321, 9322, 9323, 9324, 9325, 9326, 9327, 9328, 9329, 9330, 9331, 9333, 9334, 9335, 9336, 9337, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9347, 9348, 9349, 9350, 9351, 9352, 9353, 9354, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9412, 9413, 9414, 9417, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9433, 9434, 9436, 9438, 9439, 9440, 9441, 9442, 9443, 9444, 9445, 9446, 9447, 9450, 9451, 9454, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9462, 9463, 9464, 9467, 9468, 9469, 9470, 9471, 9474, 9475, 9476, 9477, 9478, 9479, 9480, 9481, 9482, 9483, 9484, 9485, 9488, 9489, 9490, 9491, 9492, 9493, 9494, 9495, 9498, 9501, 9502, 9503, 9504, 9507, 9508, 9509, 9510, 9512, 9513, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9530, 9531, 9532, 9533, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9549, 9550, 9551, 9552, 9553, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9566, 9568, 9569, 9571, 9572, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9616, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9641, 9642, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9661, 9662, 9663, 9664, 19798, 19796, 9827, 9828, 9831, 9832, 9924, 9925, 9939, 9940, 9973, 9974, 9975, 9976, 9980, 9981, 9982, 9983, 19904, 19903, 19905, 19908, 19907, 21398, 19910, 19909, 19922, 21428, 19947, 19946, 19948, 19949, 19950, 19949, 19959, 19964, 19988, 19986, 19988, 19986, 21390, 21398, 21421, 21390, 21398, 21421, 21428, 19224, 19224, 10409, 10410, 10413, 10414, 19240, 20232, 19240, 20235, 20236, 20272, 20287, 20302, 21022, 21021, 21339, 21337, 21346, 21344, 21390, 21398, 21421, 21428, 11320, 11321, 11331, 11332, 11333, 11334, 11335, 11338, 11339, 11340, 11341, 11407, 11408, 11409, 11410, 11411, 11414, 11415, 11416, 11417, 11420, 11421, 11477, 11478, 11479, 11480, 11481, 11482, 11483, 11484, 11512, 11515, 11518, 11519, 11522, 11523, 11526, 11527, 11528, 11529, 19675, 19675, 19676, 19676, 19677, 19677, 19680, 11684, 11685, 19685, 19685, 19688, 19688, 22127, 22109, 22127, 22125, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 22146, 22149, 22152, 22154, 22157, 22161, 22163, 22166, 22169, 22172, 22175, 22177, 22179, 22182, 22185, 22188, 22191, 22194, 22197, 22204, 22206, 22208, 22211, 22221, 22230, 22232, 22236, 22238, 22241, 22243, 22245, 22248, 22250, 22254, 22257, 22259, 22261, 22263, 22267, 22270, 22272, 22274, 22276, 22279, 22281, 22286, 22288, 22291, 22293, 22297, 22299, 22303, 22311, 22313, 22315, 22320, 22322, 22324, 22330, 22332, 22334, 22336, 22338, 22340, 22342, 22347, 22349, 22353, 22355, 22357, 22359, 22361, 22363, 22365, 22367, 22371, 22373, 22379, 22381, 22383, 22389, 22391, 22393, 22396, 22398, 22400, 22403, 22405, 22407, 22410, 22413, 22417, 22419, 22423, 22425, 22428, 22430, 22432, 22436, 22439, 22443, 22445, 22447, 22449, 22451, 22453, 22456, 22458, 22461, 22464, 22467, 22470, 22472, 22474, 22476, 22478, 22480, 22483, 22486, 22488, 22490, 22492, 22495, 22497, 22500, 22502, 22504, 22506, 22509, 22513, 22515, 22517, 22519, 22522, 22525, 22527, 22529, 22531, 22534, 22538, 22540, 22542, 22545, 22548, 22550, 22553, 22555, 22557, 22559, 22561, 22563, 22565, 22567, 22569, 22571, 22575, 22577, 22581, 22583, 22585, 22587, 22592, 22594, 22596, 22599, 22601, 22606, 22609, 22611, 22617, 22620, 22622, 22628, 22630, 22632, 22634, 22640, 22644, 22647, 22649, 22651, 22654, 22656, 22660, 22662, 22667, 22669, 22671, 22673, 22678, 22680, 22682, 22684, 22688, 22690, 22694, 22696, 22701, 22703, 22705, 22707, 22710, 22712, 22718, 22720, 22724, 22727, 22730, 22732, 22734, 22736, 22738, 22741, 22743, 22745, 22748, 22752, 22754, 22756, 22760, 22762, 22766, 22768, 22770, 22772, 22774, 22778, 22781, 22784, 22786, 22789, 22792, 22794, 22796, 22798, 22800, 22802, 22804, 22807, 22810, 22814, 22816, 22818, 22820, 22822, 22824, 22827, 22830, 22834, 22836, 22838, 22840, 22842, 22844, 22847, 22850, 22853, 22856, 22859, 22862, 22864, 22866, 22868, 22870, 22872, 22874, 22876, 22878, 22881, 22883, 22885, 22888, 22890, 22892, 22895, 22899, 22905, 22907, 22909, 22911, 22914, 22919, 22922, 22924, 22926, 22928, 22930, 22932, 22934, 22937, 22939, 22941, 22943, 22945, 22947, 22950, 22952, 22954, 22956, 22958, 22960, 22962, 22965, 22976, 22978, 22981, 22994, 22996, 22998, 23000, 23005, 23010, 23012, 23014, 23016, 23019, 23022, 23026, 23028, 23030, 23032, 23036, 23038, 23050, 23053, 23056, 23064, 23066, 23068, 23070, 23072, 23074, 23076, 23082, 23085, 23088, 23090, 23104, 23107, 23111, 23121, 23124, 23128, 23139, 23141, 23146, 23148, 23150, 23152, 23154, 23157, 23159, 23162, 23166, 23168, 23170, 23172, 23174, 23176, 23178, 23181, 23184, 23187, 23190, 23193, 23195, 23199, 23201, 23204, 23206, 23208, 23210, 23213, 23215, 23218, 23220, 23222, 23224, 23226, 23228, 23230, 23233, 23239, 23242, 23245, 23248, 23250, 23254, 23256, 23258, 23260, 23263, 23265, 23270, 23272, 23274, 23276, 23280, 23282, 23284, 23286, 23288, 23293, 23295, 23297, 23299, 23303, 23305, 23308, 23313, 23318, 23320, 23324, 23326, 23328, 23330, 23332, 23336, 23338, 23340, 23342, 23344, 23346, 23349, 23351, 23355, 23358, 23361, 23363, 23366, 23368, 23378, 23380, 23384, 23386, 23388, 23393, 23395, 23397, 23400, 23402, 23406, 23408, 23412, 23415, 23417, 23420, 23422, 23424, 23426, 23428, 23430, 23432, 23434, 23437, 23439, 23442, 23444, 23446, 23449, 23451, 23455, 23458, 23461, 23463, 23465, 23467, 23474, 23476, 23482, 23484, 23486, 23488, 23490, 23493, 23500, 23502, 23508, 23510, 23512, 23515, 23517, 23519, 23521, 23524, 23526, 23528, 23530, 23532, 23534, 23536, 23538, 23540, 23549, 23558, 23560, 23562, 23567, 23570, 23573, 23575, 23577, 23582, 23585, 23588, 23590, 23592, 23595, 23598, 23600, 23602, 23604, 23606, 23609, 23612, 23614, 23617, 23619, 23621, 23625, 23627, 23629, 23631, 23633, 23635, 23637, 23639, 23643, 23645, 23647, 23649, 23651, 23653, 23655, 23657, 23659, 23661, 23663, 23670, 23678, 23681, 23683, 23685, 23688, 23693, 23697, 23699, 23701, 23703, 23705, 23707, 23709, 23715, 23717, 23723, 23726, 23728, 23730, 23733, 23740, 23747, 23749, 23751, 23754, 23756, 23758, 23760, 23762, 23764, 23766, 23769, 23771, 23773, 23780, 23782, 23786, 23788, 23791, 23793, 23795, 23797, 23799, 23801, 23805, 23808, 23813, 23816, 23819, 23824, 23826, 23828, 23832, 23834, 23837, 23840, 23845, 23848, 23850, 23853, 23855, 23857, 23860, 23863, 23865, 23868, 23870, 23876, 23878, 23880, 23883, 23888, 23890, 23894, 23897, 23899, 23901, 23904, 23906, 23910, 23912, 23914, 23917, 23919, 23922, 23924, 23929, 23932, 23935, 23938, 23943, 23945, 23947, 23951, 23953, 23956, 23959, 23962, 23964, 23974, 23977, 23979, 23981, 23983, 23985, 23991, 23993, 23995, 23997, 23999, 24001, 24004, 24006, 24019, 24021, 24025, 24028, 24030, 24033, 24035, 24039, 24042, 24044, 24047, 24049, 24055, 24057, 24061, 24063, 24065, 24068, 24070, 24073, 24075, 24078, 24080, 24082, 24085, 24087, 24089, 24091, 24093, 24098, 24100, 24102, 24104, 24106, 24109, 24111, 24113, 24116, 24119, 24123, 24129, 24131, 24135, 24138, 24141, 24144, 24147, 24150, 24153, 24156, 24158, 24162, 24167, 24169, 24171, 24174, 24176, 24182, 24184, 24186, 24189, 24191, 24193, 24195, 24197, 24200, 24209, 24212, 24215, 24218, 24221, 24224, 22641, 22415, 22421, 23505, 22415, 22421, 23506, 19749, 19749, 22199, 19794, 19795, 9794, 9795, 23095, 23094, 23093, 23131, 23129, 23135, 23133, 23100, 23099, 23098, 23143, 23142, 23102, 24231, 24233, 23131, 23129, 23135, 23133, 23137, 23143, 23142, 22214, 22217, 23137, 22218, 22219, 22970, 22226, 23078, 23041, 23040, 23039, 23131, 23129, 23135, 23133, 22970, 23047, 23046, 23045, 23041, 23040, 23039, 22226, 23078, 24235, 22226, 23078, 24237, 22251, 22264, 23398, 24239, 24241, 24243, 24245, 22283, 22294, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 22301, 22300, 22377, 10024, 10027, 22306, 22304, 23672, 22308, 22317, 10046, 10047, 10048, 10049, 10050, 10051, 22377, 22327, 22325, 23542, 23546, 23544, 23551, 23555, 23553, 10068, 10070, 23564, 23579, 10081, 10082, 23542, 23546, 23544, 23551, 23553, 22345, 22344, 22351, 22350, 23579, 10111, 10112, 10121, 10124, 23641, 23640, 22377, 10140, 22376, 22374, 23672, 22376, 22374, 10167, 10170, 23641, 23640, 22377, 10187, 10190, 22385, 22384, 23672, 22386, 22387, 22433, 22437, 22441, 22440, 22757, 22775, 22757, 10302, 10303, 23237, 23235, 23216, 23237, 23236, 23216, 22493, 22498, 22757, 22775, 23137, 23143, 23142, 24278, 24280, 23237, 23236, 23235, 23216, 23237, 23236, 23235, 23251, 23333, 23314, 22572, 10460, 10461, 10462, 10463, 10465, 22578, 22588, 22604, 22602, 10480, 22614, 22612, 10486, 22625, 22623, 10492, 22637, 22635, 22641, 22657, 22663, 22674, 22685, 22691, 22697, 22715, 22713, 22721, 22757, 22775, 19304, 19305, 19304, 19305, 22902, 22900, 22916, 22970, 22968, 22967, 22966, 23114, 23112, 23118, 23116, 23131, 23129, 23135, 23133, 22970, 22974, 22973, 22972, 23131, 23129, 22985, 22984, 22983, 22989, 22987, 23008, 23007, 22992, 22991, 19365, 22992, 22991, 19365, 23008, 23007, 23003, 23001, 19365, 23008, 23007, 19365, 23131, 23129, 23135, 23133, 23024, 23143, 23142, 23102, 23033, 23114, 23112, 23118, 23116, 23041, 23040, 23039, 23131, 23129, 23135, 23133, 23043, 23047, 23046, 23045, 23057, 23061, 23060, 23059, 23080, 23079, 23078, 19365, 23095, 23094, 23093, 23131, 23129, 23135, 23133, 23100, 23099, 23098, 23143, 23142, 23102, 23114, 23112, 23118, 23116, 23131, 23129, 23135, 23133, 23143, 23142, 23164, 23163, 23197, 23196, 19382, 23237, 23236, 23235, 23216, 23237, 23236, 23235, 23251, 23311, 23314, 23252, 23267, 23261, 23266, 23333, 23311, 23314, 23267, 23277, 23289, 10969, 10970, 23333, 23300, 23314, 23315, 23321, 23333, 23352, 23364, 23720, 23718, 23735, 23737, 23743, 23742, 23370, 23369, 23372, 23371, 23374, 23373, 23376, 23375, 23506, 23505, 23453, 23452, 23390, 23398, 23403, 23409, 23418, 24052, 24050, 24058, 23506, 23505, 23453, 23452, 23453, 23452, 23471, 23469, 23690, 23694, 23710, 21498, 21496, 23479, 23477, 23497, 23495, 23504, 23503, 23506, 23505, 23522, 23542, 23546, 23544, 23551, 23555, 23553, 23564, 23579, 11162, 11163, 11165, 11166, 23623, 23622, 11182, 11185, 23641, 23640, 11195, 11198, 23667, 23665, 23672, 23690, 23674, 23710, 21498, 21496, 23735, 23737, 23743, 23742, 23690, 23694, 23710, 21498, 21496, 23720, 23718, 23737, 23735, 23743, 23742, 23745, 23744, 23811, 23775, 23776, 23802, 23811, 23810, 23821, 23830, 23829, 23842, 23851, 23866, 23872, 23871, 23874, 23873, 24300, 23886, 23885, 23886, 23885, 24303, 24305, 24307, 24309, 23891, 23902, 23907, 23927, 23926, 23925, 23940, 23939, 24126, 24124, 23949, 23948, 23966, 23965, 23968, 23967, 24126, 23969, 23971, 23988, 23986, 24007, 24011, 24009, 24013, 24016, 24016, 24312, 24314, 24316, 24318, 24320, 24022, 24031, 24036, 24045, 24052, 24050, 24058, 21917, 21915, 24126, 24124, 24132, 24159, 24324, 24326, 24328, 24164, 24179, 24177, 24202, 24206, 24204, 24226, 24332, 24334, 24336, 24338, 11638, 11639, 11640, 11641, 11677, 11678, 11683, 11779, 11780, 11782, 11783, 24322, 24321, 12458, 12459, 24330, 24329, 12473, 12474, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 9727, 24573, 24574, 24575, 24577, 24548, 24581, 24580, 24583, 24368, 24548, 24369, 24548, 9740, 9741, 9742, 9743, 9744, 19064, 24800, 24799, 9748, 19064, 24371, 24370, 24800, 24799, 24955, 24940, 24938, 24955, 19066, 24800, 24799, 24372, 19066, 24940, 24939, 24955, 9768, 9769, 24373, 25054, 24374, 25054, 24381, 24375, 24376, 24377, 24379, 24378, 24381, 24380, 24382, 24384, 24383, 24385, 24386, 9787, 9789, 9790, 25105, 19800, 24708, 24709, 9799, 9800, 9801, 9802, 9803, 9804, 9805, 9806, 9807, 9808, 9809, 9810, 9811, 24685, 24715, 24721, 24724, 24723, 24725, 24676, 24718, 24717, 24730, 24388, 24390, 24389, 22536, 24507, 24510, 24509, 24708, 24690, 22979, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 24712, 9844, 24501, 24500, 9847, 9848, 9849, 24501, 24500, 9852, 24698, 24697, 24724, 24723, 24725, 24676, 24718, 24677, 24730, 24731, 24511, 24503, 24506, 24391, 19224, 24725, 24718, 24677, 24730, 24731, 24503, 24730, 24731, 9876, 24671, 24725, 24727, 24698, 24697, 24729, 24728, 9884, 9885, 24701, 24702, 24704, 24703, 24734, 24708, 24690, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 24671, 24693, 9906, 9907, 9908, 24698, 24697, 24699, 24675, 24725, 24727, 24728, 24729, 9917, 9918, 24702, 24701, 24704, 24703, 22225, 24698, 24697, 24699, 24675, 24729, 24728, 9932, 9933, 24702, 24701, 24704, 24703, 22227, 22228, 24393, 22234, 24395, 19841, 24397, 25063, 25066, 25082, 25087, 25062, 25065, 25082, 25086, 24399, 24401, 9959, 24402, 24403, 24404, 24406, 9964, 24407, 9966, 24408, 24410, 24411, 24412, 25063, 25066, 25082, 25087, 9993, 24422, 19890, 19893, 24414, 24415, 9999, 24422, 24878, 25166, 25169, 25172, 24893, 24418, 24417, 10013, 10014, 19919, 24833, 24858, 24886, 24857, 10020, 24897, 24896, 24419, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 10034, 10035, 10036, 24906, 10038, 24422, 19929, 19932, 10042, 24422, 24423, 24853, 25184, 25186, 25188, 19951, 10053, 24447, 24446, 24445, 10057, 10058, 10059, 24860, 10061, 10062, 10063, 24861, 10065, 10066, 19956, 19961, 10071, 24864, 24866, 24865, 21320, 21323, 10077, 24869, 24871, 24870, 25203, 24435, 24874, 24428, 24430, 24432, 24442, 10089, 24860, 10091, 10092, 10093, 24861, 10095, 10096, 10097, 21303, 19981, 24864, 10101, 10102, 24866, 24865, 21320, 21323, 10107, 24869, 24871, 24870, 25215, 24435, 24437, 24438, 24440, 24829, 24442, 24888, 24887, 24890, 24889, 24892, 24891, 24893, 24894, 24859, 10130, 10131, 24443, 24833, 24886, 24857, 24885, 10137, 24446, 24445, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 10149, 10150, 10151, 24906, 24443, 24833, 24447, 24445, 24899, 24898, 24901, 24900, 24904, 24903, 10163, 10164, 24888, 24887, 24890, 24889, 24892, 24891, 24893, 24894, 24859, 10176, 10177, 24831, 24833, 24858, 24886, 24857, 10183, 24447, 24446, 24445, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 10197, 10198, 10199, 24906, 10201, 10202, 24847, 24846, 24845, 24449, 24448, 24450, 24453, 24452, 24451, 24455, 24454, 24456, 24457, 24458, 22415, 24460, 22421, 24462, 20069, 24464, 10223, 20078, 10225, 20082, 10227, 10228, 24930, 24469, 24468, 20093, 24472, 24471, 24473, 20094, 24850, 24472, 24471, 24473, 24595, 24588, 24624, 24623, 24626, 24590, 24589, 24628, 24591, 24629, 24632, 24631, 22454, 10254, 22886, 22779, 24595, 24588, 24623, 24624, 24626, 24590, 24589, 24628, 24475, 24592, 10267, 22886, 22779, 24595, 24588, 24623, 24624, 24590, 24589, 24584, 24628, 24591, 24629, 24632, 24631, 22459, 10283, 22886, 22779, 24730, 24731, 24698, 24697, 24724, 24723, 24725, 24716, 24729, 24718, 24730, 24731, 24511, 24503, 24506, 24477, 25247, 24698, 24697, 24724, 24723, 24725, 24676, 24718, 24677, 24731, 24719, 24511, 24503, 24506, 24505, 19224, 24479, 24738, 24737, 24746, 24749, 24748, 10325, 10326, 24751, 24739, 24754, 24753, 24752, 10332, 24741, 24736, 24517, 24481, 24482, 24749, 24520, 10340, 10341, 24485, 24484, 24740, 24754, 24753, 10347, 24518, 24786, 24788, 24790, 24791, 24486, 24488, 10355, 24491, 24490, 10358, 24493, 24492, 24786, 24773, 24494, 24790, 24791, 24622, 24595, 24623, 24624, 24629, 24632, 24631, 24626, 24590, 24589, 24628, 24591, 24585, 10379, 24592, 10381, 22886, 22779, 24496, 24495, 24715, 22979, 10388, 10389, 10390, 24712, 24497, 24499, 24501, 24500, 24502, 24724, 24723, 24731, 24730, 24511, 24503, 24506, 24505, 19224, 24511, 22536, 24507, 24510, 24509, 24511, 22551, 24514, 19229, 24736, 24517, 24738, 24746, 24749, 24520, 10425, 10426, 10427, 24751, 24739, 24740, 24754, 24753, 10433, 24518, 24742, 24744, 24746, 24749, 24520, 10440, 10441, 10442, 24751, 24750, 24740, 24754, 24753, 10448, 24781, 24783, 10451, 24785, 24776, 24775, 24794, 23311, 10457, 24521, 10459, 19240, 19243, 24558, 24523, 10469, 24525, 24527, 10472, 22590, 24530, 24531, 24533, 24532, 10478, 10479, 24534, 24536, 24535, 10484, 10485, 24537, 24539, 24538, 10490, 10491, 24540, 24543, 24542, 10496, 10497, 24544, 10499, 24545, 24546, 24548, 24550, 24549, 10505, 24552, 24551, 10508, 22665, 24554, 24555, 10512, 22676, 24558, 24559, 10516, 24561, 10518, 24563, 10520, 22699, 24566, 24568, 24570, 24569, 10526, 10527, 24572, 24571, 10530, 24573, 24574, 24575, 24577, 24579, 24581, 24580, 24583, 24582, 24622, 24621, 24624, 24623, 24590, 24589, 24584, 24628, 24591, 24629, 24632, 24631, 24585, 10553, 22886, 22779, 24595, 24588, 24623, 24624, 24626, 24590, 24589, 24628, 24591, 24629, 24632, 24631, 24592, 10569, 22886, 22779, 24595, 24596, 24618, 24617, 24626, 24625, 24628, 24627, 24619, 24632, 24631, 24633, 24635, 24620, 19306, 24596, 24621, 24623, 24597, 24626, 24625, 24628, 24627, 24629, 24632, 24631, 24598, 24635, 24636, 19312, 22790, 24600, 24602, 24604, 10606, 24607, 24606, 10609, 24608, 24610, 24612, 10613, 24615, 24614, 10616, 24622, 24616, 24618, 24617, 24626, 24625, 24628, 24627, 24619, 24632, 24631, 24633, 24635, 24620, 19306, 22886, 19307, 24622, 24621, 24624, 24623, 24626, 24625, 24628, 24627, 24629, 24632, 24631, 24633, 24635, 24636, 19312, 22886, 19313, 24642, 24641, 22897, 10654, 10655, 24644, 24646, 24645, 24648, 24647, 24649, 10662, 24650, 24653, 24652, 24651, 24655, 24654, 19328, 22935, 24659, 24658, 24664, 24661, 24660, 19335, 22948, 24666, 24665, 24664, 24668, 24667, 20666, 22963, 24708, 24690, 10687, 10688, 10689, 10690, 24671, 24693, 24706, 24705, 23109, 10696, 10697, 10698, 10699, 24708, 24690, 22979, 10703, 10704, 10705, 10706, 10707, 10708, 10709, 10710, 24671, 24693, 24708, 24690, 22979, 10716, 10717, 24690, 24708, 10720, 10721, 10722, 10723, 10724, 24721, 24699, 24675, 24725, 24676, 24718, 24677, 10732, 10733, 10734, 10735, 24680, 24674, 20698, 10739, 24699, 24675, 24676, 24718, 24677, 10745, 10746, 24680, 24674, 20703, 10750, 24721, 24699, 24675, 24725, 24676, 24718, 24677, 10758, 10759, 10760, 10761, 24680, 24678, 24681, 10765, 24700, 24699, 10768, 10769, 24701, 24679, 24704, 24680, 24681, 10775, 24684, 24683, 23126, 10779, 10780, 10781, 10782, 10783, 10784, 10785, 10786, 24685, 24687, 10789, 24715, 24706, 24705, 23109, 10794, 10795, 10796, 10797, 24708, 24690, 10800, 10801, 10802, 10803, 10804, 10805, 10806, 10807, 10808, 10809, 10810, 24692, 24691, 24693, 10814, 10815, 10816, 10817, 24694, 24696, 24698, 24697, 24700, 24699, 24725, 24727, 24728, 24729, 10828, 10829, 10830, 24702, 24701, 24704, 24703, 24734, 10836, 24708, 24709, 10839, 10840, 10841, 10842, 10843, 10844, 10845, 10846, 10847, 10848, 10849, 10850, 10851, 24713, 24715, 24706, 24705, 23109, 10857, 10858, 10859, 10860, 24709, 24708, 23126, 10864, 10865, 10866, 10867, 23137, 10869, 10870, 24712, 24713, 24715, 24721, 24724, 24723, 24725, 24716, 24718, 24717, 24720, 24719, 10883, 10884, 24721, 24724, 24723, 24725, 24727, 24729, 24728, 24731, 24730, 24732, 10895, 10896, 24734, 10898, 24736, 24738, 24737, 24746, 24749, 24748, 10905, 10906, 10907, 24751, 24739, 24740, 24754, 24753, 10913, 24741, 24742, 24744, 24746, 24749, 24748, 10920, 10921, 10922, 24751, 24750, 24754, 24753, 24752, 10928, 24776, 24775, 24794, 10932, 10933, 24786, 24788, 10936, 24756, 24755, 10939, 24794, 24793, 24757, 10943, 24759, 10945, 24781, 24783, 10948, 24785, 24786, 24788, 24790, 24791, 24776, 24775, 24794, 10957, 10958, 10959, 24761, 24764, 24763, 10963, 24765, 24767, 24769, 24772, 10968, 25431, 24781, 24783, 10973, 24785, 24770, 24773, 24772, 10978, 21037, 24776, 24775, 24793, 23311, 10984, 10985, 24778, 10987, 24780, 24781, 24783, 10991, 24785, 24786, 24788, 24790, 24791, 10997, 24794, 24793, 24795, 11001, 24920, 11003, 11004, 24922, 24924, 24923, 24926, 24925, 11010, 11011, 24907, 11013, 11014, 11015, 11016, 11017, 11018, 11019, 11020, 11021, 11022, 24938, 24799, 24955, 24939, 24799, 24955, 24847, 24845, 24940, 24942, 24944, 24800, 24955, 11036, 11037, 24847, 24846, 24848, 24850, 11042, 11043, 24942, 24944, 24801, 11047, 24803, 24804, 11050, 24806, 24807, 11053, 24809, 24811, 11056, 24812, 24814, 11059, 24816, 24815, 11062, 11063, 24817, 11065, 24819, 24821, 24822, 24823, 24824, 24826, 11072, 11073, 24847, 24846, 24845, 24848, 24850, 24936, 24935, 24828, 24940, 24939, 24938, 11085, 11086, 11087, 11088, 24830, 24829, 24894, 24859, 24831, 24833, 24858, 24886, 24857, 11098, 11099, 24910, 24909, 24908, 24911, 11104, 19533, 11106, 19534, 24914, 21214, 24917, 21491, 11112, 11113, 11114, 24835, 11116, 11117, 24837, 24840, 24839, 24842, 24841, 11123, 11124, 24843, 11126, 11127, 11128, 11129, 24847, 24846, 24845, 24848, 24850, 11135, 24852, 24853, 24855, 24858, 24857, 24859, 11142, 24860, 11144, 11145, 11146, 24861, 11148, 11149, 21303, 21306, 11152, 24864, 24866, 24865, 21320, 21323, 11158, 24869, 24871, 24870, 25497, 24872, 25499, 24873, 24875, 24874, 24876, 24878, 24881, 24880, 24882, 23615, 24886, 24885, 11178, 11179, 24888, 24887, 24890, 24889, 24892, 24891, 24893, 24894, 11190, 11191, 24897, 24896, 24895, 24899, 24898, 24901, 24900, 24902, 24904, 24903, 24905, 11205, 11206, 11207, 24906, 24910, 24909, 24908, 24911, 11213, 19533, 11215, 19534, 24914, 21456, 24917, 21491, 11221, 11222, 11223, 24922, 24926, 24925, 11227, 11228, 24907, 11230, 11231, 24910, 24909, 24908, 24911, 11236, 19533, 11238, 19534, 24914, 21484, 24917, 21491, 11244, 11245, 11246, 24920, 11248, 11249, 24922, 24924, 24923, 24926, 24925, 11255, 11256, 24927, 11258, 11259, 11260, 11261, 24930, 24929, 24928, 24931, 24933, 24936, 24935, 24937, 24940, 24939, 24938, 11273, 11274, 24953, 11276, 24955, 23778, 24942, 23784, 24944, 24945, 24947, 24949, 11285, 24952, 24951, 11288, 11289, 24953, 24954, 24955, 11293, 24957, 24956, 21619, 11297, 11298, 21626, 24961, 24960, 24962, 24963, 11304, 24964, 11306, 24967, 24966, 24969, 24968, 24970, 11312, 24972, 11314, 11315, 11316, 11317, 24974, 24975, 25001, 25002, 24977, 11327, 11328, 11329, 11330, 24978, 24980, 11344, 24981, 11346, 24983, 24985, 24984, 11350, 24986, 24988, 24989, 24990, 25044, 25045, 24992, 24991, 21729, 11360, 11361, 11362, 21761, 24994, 24993, 24995, 24996, 11368, 11369, 23941, 11371, 11372, 24998, 24997, 21754, 11376, 11377, 21761, 25002, 25001, 25003, 25004, 11383, 11384, 11385, 11386, 11387, 11388, 25006, 11390, 25007, 25009, 25010, 11394, 11395, 25012, 25014, 25015, 25017, 25018, 11401, 11402, 11403, 11404, 11405, 11406, 25020, 25022, 11424, 25023, 11426, 25024, 25025, 25027, 11430, 25028, 11432, 25029, 25030, 11435, 11436, 25033, 25032, 11439, 25034, 25036, 25037, 25038, 25039, 25040, 25041, 25043, 25044, 25045, 25047, 11451, 11452, 24096, 21922, 25053, 25052, 25051, 25054, 25056, 25058, 25057, 24121, 11463, 11464, 25060, 11466, 25063, 25062, 25064, 25066, 25065, 25067, 25068, 11474, 25070, 25069, 11489, 25071, 25072, 25074, 25075, 11494, 11495, 25077, 25079, 25080, 25082, 25085, 25084, 11502, 11503, 11504, 25087, 25086, 25089, 25088, 25090, 11510, 25091, 25274, 25274, 25618, 25620, 25622, 25613, 25603, 24346, 25614, 25613, 25613, 25603, 25614, 25613, 25613, 25603, 25159, 25625, 25161, 25627, 25614, 25613, 25274, 25276, 22075, 24301, 25555, 25556, 24310, 25586, 25587, 22098, 12453, 12454, 25613, 25603, 25604, 25631, 12468, 12469, 25614, 25613, 25615, 25635, 15, 9728, 9729, 9730, 9731, 9732, 9733, 9734, 9735, 9736, 9737, 9738, 9739, 9745, 9746, 9747, 9749, 9750, 9751, 9752, 9753, 9754, 9755, 9756, 9757, 9758, 9759, 9760, 9761, 9762, 9763, 9764, 9765, 9770, 9771, 9772, 9773, 9774, 9775, 9776, 9777, 9778, 9779, 9780, 9781, 9782, 9783, 9784, 9785, 9786, 9796, 9797, 9798, 25714, 25717, 25719, 25721, 25724, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9821, 9822, 9823, 9824, 9825, 9826, 9829, 9830, 9833, 9834, 9835, 25747, 25749, 9843, 25752, 9845, 9846, 9850, 9851, 9853, 9854, 9855, 9856, 9857, 9858, 9859, 9860, 9861, 9862, 9863, 9864, 9865, 9866, 9867, 9868, 9869, 9870, 9871, 9872, 9873, 9874, 9875, 9877, 9878, 9879, 9880, 9881, 9882, 9883, 25795, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 25804, 25807, 25809, 25812, 9904, 9905, 25817, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 25828, 9919, 9920, 9921, 9922, 9923, 9926, 9927, 9928, 9929, 9930, 9931, 25841, 9934, 9935, 9936, 9937, 9938, 9941, 9942, 9943, 9944, 9945, 9946, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9960, 9961, 9962, 9963, 9965, 9967, 9968, 9969, 9970, 9971, 9972, 9989, 9990, 9994, 9995, 9996, 9997, 9998, 10000, 10001, 10010, 10011, 10012, 25895, 10015, 10016, 10017, 10018, 10019, 10021, 10022, 10023, 10025, 10026, 10028, 10029, 10030, 10031, 10032, 10033, 25914, 10037, 10039, 10040, 10041, 10043, 10044, 10045, 10052, 10054, 10055, 10056, 25934, 10060, 25938, 10064, 25942, 10067, 10069, 10072, 10073, 10074, 10075, 10076, 10078, 10079, 10080, 10083, 10084, 10085, 10086, 10087, 10088, 10090, 25965, 10094, 25969, 10098, 10099, 10100, 10103, 10104, 10105, 10106, 10108, 10109, 10110, 10113, 10114, 10115, 10116, 10117, 10118, 10119, 10120, 10122, 10123, 10125, 10126, 10127, 10128, 10129, 26001, 10132, 10133, 10134, 10135, 10136, 10138, 10139, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 10148, 26019, 10152, 10153, 10154, 10155, 10156, 10157, 10158, 10159, 10160, 10161, 10162, 26033, 10165, 10166, 10168, 10169, 10171, 10172, 10173, 10174, 10175, 26044, 10178, 10179, 10180, 10181, 10182, 10184, 10185, 10186, 10188, 10189, 10191, 10192, 10193, 10194, 10195, 10196, 26063, 10200, 10203, 10204, 10205, 10206, 10207, 10208, 10209, 10210, 10211, 10212, 10213, 10214, 10215, 10216, 10217, 10218, 10219, 10220, 10221, 10222, 10224, 10226, 26093, 10229, 10230, 10231, 10232, 10233, 10234, 10235, 10236, 10237, 10238, 10239, 10240, 10241, 10242, 10243, 10244, 10245, 10246, 10247, 10248, 10249, 10250, 10251, 10252, 10253, 10255, 10256, 10257, 10258, 10259, 10260, 10261, 10262, 10263, 10264, 10265, 10266, 10268, 10269, 10270, 10271, 10272, 10273, 10274, 10275, 10276, 10277, 10278, 10279, 10280, 10281, 10282, 10284, 10285, 10286, 10287, 10288, 10289, 10290, 10291, 10292, 10293, 10294, 10295, 10296, 10297, 10298, 10299, 10300, 10301, 10304, 10305, 10306, 10307, 10308, 10309, 10310, 10311, 10312, 10313, 10314, 10315, 10316, 10317, 10318, 10319, 10320, 10321, 10322, 10323, 10324, 26190, 10327, 10328, 10329, 10330, 10331, 10333, 10334, 10335, 10336, 10337, 10338, 10339, 26205, 10342, 10343, 10344, 10345, 10346, 10348, 10349, 10350, 10351, 10352, 10353, 10354, 10356, 10357, 10359, 10360, 10361, 10362, 10363, 10364, 10365, 10366, 10367, 10368, 10369, 10370, 10371, 10372, 10373, 10374, 10375, 10376, 10377, 10378, 10380, 10382, 10383, 10384, 10385, 10386, 10387, 10391, 26254, 10392, 10393, 10394, 10395, 10396, 10397, 10398, 10399, 10400, 10401, 10402, 10403, 10404, 10405, 10406, 10407, 10408, 10411, 10412, 10415, 10416, 10417, 10418, 10419, 10420, 10421, 10422, 10423, 10424, 26286, 10428, 10429, 10430, 10431, 10432, 10434, 10435, 10436, 10437, 10438, 10439, 26301, 10443, 10444, 10445, 10446, 10447, 10449, 10450, 10452, 10453, 10454, 10455, 10456, 10458, 10464, 10466, 10467, 10468, 10470, 10471, 10473, 10474, 10475, 10476, 10477, 26334, 10481, 10482, 10483, 26339, 10487, 10488, 10489, 26344, 10493, 10494, 10495, 26349, 10498, 10500, 10501, 10502, 10503, 10504, 10506, 10507, 10509, 10510, 10511, 10513, 10514, 10515, 10517, 10519, 10521, 10522, 10523, 10524, 10525, 26379, 10528, 10529, 10531, 10532, 10533, 10534, 10535, 10536, 10537, 10538, 10539, 10540, 10541, 10542, 10543, 10544, 10545, 10546, 10547, 10548, 10549, 10550, 10551, 10552, 10554, 10555, 10556, 10557, 10558, 10559, 10560, 10561, 10562, 10563, 10564, 10565, 10566, 10567, 10568, 10570, 10571, 10572, 10573, 10574, 10575, 10576, 10577, 10578, 10579, 10580, 10581, 10582, 10583, 10584, 10585, 10586, 10587, 10588, 10589, 10590, 10591, 10592, 10593, 10594, 10595, 10596, 10597, 10598, 10599, 10600, 10601, 10602, 10603, 10604, 10605, 10607, 10608, 10610, 10611, 10612, 10614, 10615, 10617, 10618, 10619, 10620, 10621, 10622, 10623, 10624, 10625, 10626, 10627, 10628, 10629, 10630, 10631, 10632, 10633, 10634, 10635, 10636, 10637, 10638, 10639, 10640, 10641, 10642, 10643, 10644, 10645, 10646, 10647, 10648, 10649, 10650, 10651, 10652, 10653, 26507, 10656, 10657, 10658, 10659, 10660, 10661, 10663, 10664, 10665, 10666, 10667, 10668, 10669, 10670, 10671, 10672, 10673, 10674, 10675, 10676, 10677, 10678, 10679, 10680, 10681, 10682, 10683, 10684, 10685, 10686, 26541, 10691, 10692, 10693, 10694, 10695, 26549, 26551, 10700, 10701, 10702, 26556, 26558, 26561, 10711, 10712, 10713, 10714, 10715, 26569, 10718, 10719, 26573, 26576, 10725, 10726, 10727, 10728, 10729, 10730, 10731, 26585, 26587, 10736, 10737, 10738, 10740, 10741, 10742, 10743, 10744, 26598, 10747, 10748, 10749, 10751, 10752, 10753, 10754, 10755, 10756, 10757, 26611, 26613, 10762, 10763, 10764, 10766, 10767, 26621, 10770, 10771, 10772, 10773, 10774, 10776, 10777, 10778, 26632, 26634, 26637, 10787, 10788, 10790, 10791, 10792, 10793, 26647, 26649, 10798, 10799, 26653, 26656, 26658, 26661, 10811, 10812, 10813, 26668, 10818, 10819, 10820, 10821, 10822, 10823, 10824, 10825, 10826, 10827, 26681, 10831, 10832, 10833, 10834, 10835, 10837, 10838, 26692, 26695, 26697, 26699, 26702, 10852, 10853, 10854, 10855, 10856, 26710, 26712, 10861, 10862, 10863, 26717, 26719, 10868, 10871, 26722, 10872, 10873, 10874, 10875, 10876, 10877, 10878, 10879, 10880, 10881, 10882, 26736, 10885, 10886, 10887, 10888, 10889, 10890, 10891, 10892, 10893, 10894, 26748, 10897, 10899, 10900, 10901, 10902, 10903, 10904, 26758, 10908, 10909, 10910, 10911, 10912, 10914, 10915, 10916, 10917, 10918, 10919, 26773, 10923, 10924, 10925, 10926, 10927, 10929, 10930, 10931, 10934, 10935, 10937, 10938, 10940, 10941, 10942, 10944, 10946, 10947, 10949, 10950, 10951, 10952, 10953, 10954, 10955, 10956, 10960, 10961, 10962, 10964, 10965, 10966, 10967, 10971, 10972, 10974, 10975, 10976, 10977, 10979, 10980, 10981, 10982, 10983, 10986, 10988, 10989, 10990, 10992, 10993, 10994, 10995, 10996, 10998, 10999, 11000, 11002, 26855, 11005, 11006, 11007, 11008, 11009, 26862, 11012, 26865, 26867, 26869, 26871, 26873, 11023, 11024, 11025, 11026, 11027, 11028, 11029, 11030, 11031, 11032, 11033, 11034, 11035, 26888, 11038, 11039, 11040, 11041, 26894, 11044, 11045, 11046, 11048, 11049, 11051, 11052, 11054, 11055, 11057, 11058, 11060, 11061, 26914, 11064, 11066, 11067, 11068, 11069, 11070, 11071, 26924, 11074, 11075, 11076, 11077, 11078, 11079, 11080, 11081, 11082, 11083, 11084, 26937, 26939, 11089, 11090, 11091, 11092, 11093, 11094, 11095, 11096, 11097, 26950, 11100, 11101, 11102, 11103, 11105, 11107, 11108, 11109, 11110, 11111, 26965, 11115, 26968, 11118, 11119, 11120, 11121, 11122, 26975, 11125, 26978, 26980, 11130, 11131, 11132, 11133, 11134, 11136, 11137, 11138, 11139, 11140, 11141, 11143, 26996, 11147, 27000, 11150, 11151, 11153, 11154, 11155, 11156, 11157, 11159, 11160, 11161, 11164, 11167, 11168, 11169, 11170, 11171, 11172, 11173, 11174, 11175, 11176, 11177, 27028, 11180, 11181, 11183, 11184, 11186, 11187, 11188, 11189, 27038, 11192, 11193, 11194, 11196, 11197, 11199, 11200, 11201, 11202, 11203, 11204, 27051, 11208, 11209, 11210, 11211, 11212, 11214, 11216, 11217, 11218, 11219, 11220, 27068, 11224, 11225, 11226, 27073, 11229, 27076, 11232, 11233, 11234, 11235, 11237, 11239, 11240, 11241, 11242, 11243, 27091, 11247, 27094, 11250, 11251, 11252, 11253, 11254, 27101, 11257, 27104, 27106, 11262, 11263, 11264, 11265, 11266, 11267, 11268, 11269, 11270, 11271, 11272, 27119, 11275, 11277, 11278, 11279, 11280, 11281, 11282, 11283, 11284, 11286, 11287, 27134, 11290, 11291, 11292, 11294, 11295, 11296, 27143, 11299, 11300, 11301, 11302, 11303, 11305, 11307, 11308, 11309, 11310, 11311, 11313, 27160, 27162, 11322, 11323, 11324, 11325, 11326, 27169, 27171, 11342, 11343, 11345, 11347, 11348, 11349, 11351, 11352, 11353, 11354, 11355, 11356, 11357, 11358, 11359, 27191, 11363, 11364, 11365, 11366, 11367, 27199, 11370, 27202, 11373, 11374, 11375, 27207, 11378, 11379, 11380, 11381, 11382, 27214, 27216, 27218, 11389, 11391, 11392, 11393, 27225, 11396, 11397, 11398, 11399, 11400, 27233, 27236, 11422, 11423, 11425, 11427, 11428, 11429, 11431, 11433, 11434, 27251, 11437, 11438, 11440, 11441, 11442, 11443, 11444, 11445, 11446, 11447, 11448, 11449, 11450, 27267, 11453, 11454, 11455, 11456, 11457, 11458, 11459, 11460, 11461, 11462, 27279, 11465, 11467, 11468, 11469, 11470, 11471, 11472, 11473, 11475, 11476, 11490, 11491, 11492, 11493, 27298, 11496, 11497, 11498, 11499, 11500, 11501, 27307, 11505, 11506, 11507, 11508, 11509, 11511, 11544, 11563, 11679, 11680, 24347, 11686, 11687, 11754, 11755, 11756, 11757, 11776, 11777, 11778, 11781, 11786, 11787, 25889, 25888, 25926, 25925, 12026, 12027, 26784, 26809, 26820, 12389, 12394, 12395, 12396, 12424, 12425, 12426, 12427, 27348, 12455, 12456, 12457, 27354, 12470, 12471, 12472, 7, 8, 9, 10, 11, 12, 13, 14, 15, 27366, 27368, 27374, 27377, 27379, 27382, 27386, 27390, 27397, 27401, 27403, 27406, 27411, 25715, 25722, 25725, 27421, 27425, 27427, 27429, 27433, 27435, 27439, 27442, 27444, 27446, 27448, 27452, 27454, 27458, 27462, 27464, 27467, 27472, 27474, 27477, 27479, 27482, 25805, 25813, 25818, 27491, 27493, 27497, 27500, 27502, 27505, 27507, 27509, 27512, 27514, 25863, 25868, 27545, 27550, 27554, 27559, 27562, 27565, 27567, 27570, 27573, 27574, 27577, 27582, 27585, 27587, 27591, 27593, 27596, 27598, 27604, 27605, 27607, 25970, 25974, 27613, 27616, 27618, 27624, 27626, 27628, 27630, 27633, 27638, 27641, 27643, 27645, 27648, 27651, 27655, 27657, 27659, 27661, 27664, 27666, 27668, 27671, 27676, 27679, 27682, 27684, 27687, 27690, 27692, 27695, 27698, 27701, 27715, 27719, 27724, 27727, 27729, 27731, 27734, 27737, 27742, 27744, 27746, 27749, 27754, 27756, 27758, 27761, 27764, 27769, 27771, 27773, 27777, 27779, 27783, 27785, 27787, 27791, 27793, 27797, 27801, 27804, 27807, 27809, 27811, 27814, 27817, 27820, 27822, 27824, 27832, 27834, 27837, 27841, 27843, 27846, 27848, 27851, 27857, 27860, 27865, 27868, 27870, 27874, 27880, 27887, 27890, 26287, 27893, 27895, 27897, 27902, 26302, 27905, 27907, 27913, 27927, 27931, 27935, 27939, 27946, 27948, 27961, 27964, 27971, 27973, 27975, 27977, 27979, 27982, 27985, 27990, 27992, 27994, 27997, 28000, 28005, 28007, 28009, 28011, 28014, 28020, 28022, 28024, 28026, 28029, 28039, 28044, 28046, 28048, 28050, 28052, 28055, 28063, 28065, 28067, 28069, 28072, 28080, 28085, 28087, 26514, 28091, 28094, 28098, 28101, 28105, 28108, 28112, 26542, 28117, 28122, 26562, 28130, 28134, 26574, 28139, 28143, 28147, 28150, 28153, 28156, 28160, 28164, 28168, 28171, 28174, 28176, 28179, 26638, 28188, 28193, 26654, 26662, 28199, 26669, 28205, 28207, 28211, 26682, 28214, 28216, 28219, 26693, 26700, 26703, 28228, 28233, 28238, 28244, 28248, 28250, 28254, 28258, 28260, 28266, 28269, 26759, 28272, 28274, 28276, 28281, 26774, 28284, 28286, 28289, 28294, 28296, 28307, 28311, 28315, 28321, 28324, 28337, 28343, 28345, 28360, 28368, 28375, 28377, 26907, 26910, 28384, 28395, 28400, 28403, 28408, 28410, 28414, 28418, 28432, 28434, 28440, 28448, 28450, 28452, 28456, 28458, 28461, 28463, 28467, 28471, 28475, 28478, 28480, 28482, 28487, 28490, 28492, 28495, 28498, 28500, 28512, 28517, 28531, 28533, 28539, 28544, 28547, 28560, 28566, 28571, 27149, 28576, 28578, 28586, 27174, 28593, 28595, 28603, 27192, 28608, 28615, 28620, 27220, 27239, 28641, 27245, 28645, 28649, 28665, 28670, 28675, 28678, 28681, 28683, 28693, 28696, 28698, 28700, 25648, 27361, 27363, 27916, 27951, 27956, 27920, 27957, 27956, 27922, 27923, 27928, 27936, 27940, 26382, 27966, 27943, 27916, 27951, 27956, 27920, 27957, 27956, 27922, 27923, 27928, 27936, 27940, 26382, 27966, 27968, 26066, 28339, 25662, 25661, 27372, 27387, 28564, 25665, 25664, 27372, 28564, 27375, 26066, 28425, 28423, 26067, 28339, 28347, 27707, 27705, 27384, 28558, 27387, 27380, 27383, 26066, 28425, 28423, 26067, 28339, 28347, 27707, 27705, 27384, 28558, 27387, 27391, 27388, 27391, 25687, 25687, 25688, 28386, 28387, 27392, 28391, 28600, 28662, 28660, 28671, 28386, 28387, 28389, 28391, 28600, 28662, 28660, 28671, 25707, 25708, 27408, 28704, 25707, 25708, 28707, 25709, 27409, 27413, 27417, 27422, 27431, 27437, 27862, 27862, 27449, 27455, 27460, 27881, 27774, 27780, 28184, 27468, 27469, 27480, 27484, 27487, 28181, 28184, 27494, 27503, 28255, 27515, 27518, 27516, 27520, 28709, 28711, 27523, 27522, 27527, 27526, 27532, 27536, 27537, 27539, 27542, 27541, 28713, 28717, 27546, 27594, 25955, 27548, 27609, 27614, 25984, 27620, 25890, 11807, 11808, 27556, 27575, 27594, 25955, 27600, 27609, 27614, 25984, 27620, 25927, 11836, 11837, 27580, 27590, 27589, 27594, 25955, 27600, 27609, 27614, 25984, 27620, 27635, 27652, 27673, 26066, 26067, 27702, 27703, 27707, 27705, 27709, 26088, 26090, 27717, 28551, 27721, 28564, 27738, 27739, 27750, 27751, 27765, 27766, 28181, 28184, 28190, 28195, 27862, 27774, 27780, 27788, 27794, 28181, 28184, 28190, 28195, 27862, 27774, 27780, 27788, 27794, 27825, 27827, 27829, 28319, 27838, 27853, 27852, 27854, 28181, 28184, 28195, 28190, 27862, 28208, 27871, 27876, 27878, 27881, 27883, 27898, 27909, 26311, 27915, 27916, 27951, 27917, 27918, 27956, 27920, 27957, 27956, 27922, 27923, 27928, 27932, 27936, 27940, 26351, 27942, 27943, 27956, 27949, 27956, 27951, 27952, 27956, 27955, 27954, 27957, 26382, 27966, 27968, 27986, 27987, 28001, 28002, 28015, 28017, 28030, 28032, 28082, 28035, 28037, 28040, 28042, 28056, 28060, 28058, 28073, 28077, 28075, 28082, 28095, 28102, 28109, 28114, 28119, 28124, 28127, 28132, 28181, 28184, 28140, 28148, 28255, 28157, 28161, 28169, 28255, 28177, 28181, 28184, 28240, 28190, 28195, 28202, 28208, 28217, 28221, 28225, 28230, 28235, 28240, 28245, 28261, 28255, 28261, 28263, 28277, 12199, 28291, 28298, 28297, 28299, 26800, 28302, 28304, 12210, 26815, 12215, 28316, 26824, 26829, 28326, 26836, 26838, 28329, 26842, 28332, 28334, 28338, 28339, 28347, 28430, 28436, 28430, 28436, 28353, 28354, 28355, 28356, 28357, 28358, 28361, 28554, 28552, 28364, 28365, 28369, 28554, 28552, 28378, 28386, 28387, 28389, 28391, 28472, 28397, 28454, 28459, 27013, 27015, 28411, 26957, 26955, 28425, 28423, 26963, 28428, 28436, 28442, 28551, 28564, 28454, 28459, 27013, 27015, 28472, 28449, 28454, 28459, 27015, 27013, 28472, 28484, 27060, 27058, 28507, 28505, 27066, 28514, 27083, 27081, 28524, 28522, 27089, 28527, 28535, 28541, 28550, 28551, 28554, 28552, 28556, 28558, 28562, 28564, 28568, 27157, 28635, 28583, 28691, 28596, 28598, 28600, 28610, 28612, 28617, 28622, 28624, 28628, 28630, 28632, 28634, 27234, 27231, 28646, 28650, 28652, 28654, 28656, 28658, 28662, 28660, 28671, 28685, 28687, 28689, 27281, 28737, 28685, 28687, 28689, 27305, 28741, 28738, 28742, 28738, 28742, 28738, 28742, 28715, 28714, 28742, 28728, 28730, 28728, 28730, 28731, 28733, 28738, 28742, 9, 10, 11, 12, 13, 14, 15, 27560, 27563, 27583, 25975, 27639, 27677, 27680, 27693, 27699, 27716, 27732, 27747, 27759, 27810, 27823, 27849, 27896, 27908, 27980, 27995, 28092, 28099, 28106, 28275, 28287, 28396, 28404, 28415, 28419, 28441, 28488, 28501, 28518, 28540, 28548, 28666, 28682, 11538, 11539, 11540, 28752, 11542, 11543, 11545, 11546, 11547, 11548, 11549, 11550, 27925, 11552, 27933, 11554, 27937, 11556, 11557, 11558, 11559, 28932, 11561, 11562, 11564, 11565, 11566, 11567, 11568, 11569, 27925, 11571, 27933, 11573, 27937, 11575, 11576, 11577, 11578, 28932, 11581, 11582, 28341, 11584, 11585, 11586, 11587, 11588, 28341, 11591, 11592, 11593, 28754, 11595, 11597, 11600, 11601, 11602, 11603, 11604, 28341, 29043, 11607, 11608, 11609, 11610, 11611, 28756, 11613, 11614, 28757, 11616, 11618, 11619, 11620, 11621, 11622, 28341, 29043, 11625, 11626, 11627, 11628, 11629, 28758, 11631, 11632, 11634, 29085, 28759, 11637, 11642, 11643, 11644, 29050, 11646, 11647, 11648, 11649, 11650, 11651, 11652, 27394, 11655, 28379, 28381, 29050, 11659, 11660, 11661, 11662, 11663, 11664, 11665, 28668, 11668, 11669, 11670, 28763, 28760, 27399, 28763, 28762, 11676, 11681, 11682, 11688, 11689, 28764, 11691, 28766, 11693, 11694, 27419, 28769, 28771, 28772, 11699, 28773, 11701, 25750, 28775, 11704, 25758, 28776, 11707, 11708, 28777, 28779, 11711, 28781, 11713, 28785, 28782, 11716, 28911, 11718, 28878, 28880, 11721, 28882, 28982, 11724, 25786, 11726, 28785, 11728, 28786, 28787, 11731, 28789, 11733, 25810, 11735, 28981, 28996, 11738, 26635, 11740, 11741, 28793, 28795, 28796, 11745, 11746, 28798, 28800, 28801, 11750, 11751, 11752, 11753, 11758, 11759, 27524, 27525, 27528, 11763, 11764, 27528, 27529, 27530, 11768, 27534, 11770, 11771, 11772, 27543, 11774, 11775, 27543, 27544, 28805, 28814, 11790, 28815, 11792, 28821, 11794, 11795, 27549, 28824, 28806, 11799, 11801, 28829, 11803, 11804, 27551, 11806, 28807, 11810, 28812, 28811, 28810, 27572, 28817, 28814, 11819, 28815, 11821, 28821, 11823, 11824, 27602, 28825, 28824, 11828, 11830, 28829, 11832, 11833, 27622, 11835, 29055, 11839, 29077, 29076, 29075, 27584, 28818, 28817, 11848, 11849, 28819, 11851, 28821, 11853, 11854, 27602, 28825, 28824, 11858, 11860, 28829, 11862, 11863, 27622, 28834, 28833, 28832, 28835, 11869, 28840, 28839, 28838, 28837, 27650, 11876, 28845, 28844, 28843, 28842, 27662, 28848, 28847, 28846, 28849, 11886, 28854, 28853, 28852, 27689, 11893, 11894, 28857, 11898, 11899, 11900, 11901, 11902, 11903, 11904, 11906, 11908, 11909, 29087, 11911, 28863, 27735, 11915, 11916, 28868, 27998, 11920, 11921, 28872, 27762, 11925, 11926, 28906, 11928, 26635, 11930, 28998, 11932, 28999, 11934, 26252, 28908, 11937, 11938, 28878, 28880, 11941, 28882, 11943, 28883, 28885, 11946, 28887, 28906, 11949, 26635, 11951, 28998, 11953, 28999, 11955, 26252, 28908, 11958, 11959, 28878, 28880, 11962, 28882, 11964, 28883, 28885, 11967, 28887, 27799, 27802, 27805, 27812, 27815, 27818, 11977, 11978, 11979, 26219, 11981, 26222, 27835, 11984, 28901, 27844, 11988, 11989, 11990, 28906, 11992, 26635, 11994, 28999, 11996, 28998, 11998, 26252, 28908, 12001, 12002, 29004, 29006, 12005, 28911, 12007, 28912, 12009, 12010, 12011, 27885, 27888, 28915, 12016, 27900, 28920, 12020, 12021, 28923, 12023, 12024, 12025, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 27925, 12037, 27929, 12039, 27933, 12041, 27937, 12043, 12044, 12045, 12046, 28932, 12048, 28929, 28928, 12051, 12052, 12053, 12054, 12055, 12056, 12057, 12058, 27959, 27962, 12061, 12062, 12063, 28932, 28934, 27983, 12068, 12069, 28939, 27998, 12073, 12074, 28944, 28012, 28946, 12078, 12079, 28949, 28027, 28951, 12083, 12084, 28966, 12086, 28967, 28969, 12089, 28954, 12091, 12092, 28955, 12094, 28956, 28053, 28958, 12098, 12099, 12100, 28961, 28070, 28963, 12104, 12105, 12106, 28966, 12108, 28967, 28969, 12112, 12114, 12116, 28976, 26539, 12119, 28978, 12121, 28979, 12123, 26559, 12125, 28981, 12127, 28982, 12129, 26635, 12131, 12132, 28137, 28985, 28145, 12136, 12137, 28252, 28988, 28154, 12141, 12142, 28158, 28991, 28166, 12146, 12147, 28252, 29021, 28994, 12151, 28996, 12153, 26635, 12155, 26641, 12157, 28998, 12159, 28999, 12161, 26659, 29002, 26666, 12165, 12166, 29004, 29006, 29008, 12170, 29010, 12172, 29012, 12174, 29014, 12176, 29015, 12178, 28237, 12180, 12181, 28242, 29018, 12184, 12185, 28252, 29021, 12188, 12189, 28264, 28267, 29025, 12194, 28279, 29030, 29033, 12200, 26788, 26791, 12203, 12204, 12205, 12206, 12207, 12208, 29036, 26811, 28309, 12213, 28313, 12216, 12217, 28319, 12219, 29040, 12221, 12222, 12223, 12224, 12225, 12226, 12227, 26848, 12229, 12230, 28341, 29043, 12233, 12234, 12235, 12236, 12237, 12238, 12239, 12240, 12241, 12242, 12243, 28366, 12245, 12246, 12247, 12248, 12249, 28366, 12251, 12253, 12254, 28374, 12256, 28376, 28379, 28381, 29050, 12261, 12262, 12263, 12264, 12265, 12267, 29052, 29087, 29063, 29062, 12273, 29064, 12275, 29066, 12277, 12278, 29068, 28446, 29073, 29072, 29071, 29055, 12285, 29077, 29076, 29075, 28416, 12293, 12294, 12295, 12296, 12297, 12298, 28430, 29059, 12301, 12303, 12304, 12305, 29063, 29062, 12308, 29064, 12310, 29066, 12312, 12313, 29068, 28446, 12316, 29061, 29073, 29072, 29071, 12321, 29077, 29076, 29075, 28497, 29063, 29062, 12329, 29064, 12331, 29066, 12333, 12334, 29068, 28469, 12337, 29070, 29073, 29072, 29071, 12342, 29077, 29076, 29075, 28497, 12349, 12350, 12351, 12352, 12353, 28510, 29080, 12356, 12358, 12359, 12360, 12361, 12362, 12363, 28529, 29083, 12366, 12368, 29085, 12371, 12372, 12373, 12374, 12375, 12376, 29087, 12378, 12379, 12380, 29088, 29089, 29090, 27151, 29092, 12386, 12387, 29115, 12390, 29093, 12392, 29115, 28590, 29096, 28592, 12400, 12401, 12402, 29098, 29097, 29099, 12406, 12407, 12408, 29100, 29101, 12411, 12412, 29102, 12414, 12415, 12416, 29113, 12418, 28635, 12420, 12421, 29115, 28699, 28638, 28640, 28642, 28644, 12432, 29107, 12434, 12435, 12436, 12437, 12438, 12439, 12440, 28668, 12443, 29113, 12445, 12446, 12447, 28691, 12449, 29111, 29110, 29113, 12461, 12462, 12463, 28691, 12465, 29115, 28699, 27317, 27318, 27319, 28705, 27319, 12579, 28705, 12585, 28735, 28739, 28735, 12644, 28739, 12650, 28735, 12664, 28739, 12670, 28735, 12685, 12686, 28739, 12692, 13048, 13049, 13062, 13063, 13076, 13077, 28735, 13091, 28739, 13097, 14, 15, 11541, 29642, 29644, 29647, 11551, 11553, 11555, 11560, 29660, 29662, 29665, 11570, 11572, 11574, 11579, 29628, 11583, 29681, 25663, 11590, 29687, 11594, 25669, 29634, 29628, 29694, 11605, 11606, 29701, 11612, 11615, 29628, 29711, 11623, 11624, 29718, 11630, 28366, 11635, 11636, 11645, 29738, 29635, 11654, 11656, 11657, 11658, 29750, 29635, 11667, 11671, 11672, 11673, 11674, 11675, 11690, 11692, 11695, 11696, 11697, 11698, 11700, 11702, 11703, 11705, 11706, 11709, 11710, 11712, 11714, 11715, 11717, 11719, 11720, 11722, 11723, 11725, 11727, 11729, 11730, 11732, 11734, 11736, 11737, 11739, 11742, 11743, 11744, 11747, 11748, 11749, 29827, 29830, 11760, 11761, 11762, 29835, 11765, 11766, 11767, 11769, 11773, 29846, 11784, 11785, 11788, 11789, 11791, 11793, 11796, 11797, 11798, 29603, 11802, 11805, 29267, 11809, 29600, 11812, 11813, 11814, 29601, 11816, 11817, 11818, 11820, 11822, 11825, 11826, 11827, 29603, 11831, 11834, 29279, 11838, 29627, 11841, 11842, 11843, 29602, 11845, 11846, 11847, 29900, 11850, 11852, 11855, 11856, 11857, 29603, 11861, 11864, 11865, 11866, 11867, 11868, 29604, 11871, 11872, 11873, 11874, 11875, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 29605, 11888, 11889, 11890, 29606, 11892, 28366, 11896, 29608, 29946, 27713, 29634, 11910, 11912, 11913, 29610, 11917, 11918, 29611, 11922, 11923, 29612, 11927, 11929, 11931, 11933, 11935, 11936, 11939, 11940, 11942, 11944, 11945, 11947, 11948, 11950, 11952, 11954, 11956, 11957, 11960, 11961, 11963, 11965, 11966, 11968, 11969, 11970, 11971, 29613, 11973, 11974, 11975, 29614, 11980, 11982, 11983, 11985, 29615, 11987, 30026, 11991, 11993, 11995, 11997, 11999, 12000, 12003, 12004, 12006, 12008, 12012, 12013, 12014, 29616, 12017, 12018, 29617, 12022, 30060, 30064, 30067, 12036, 12038, 12040, 12042, 12047, 12049, 12050, 30086, 30089, 12059, 12060, 12064, 12065, 12066, 29618, 12070, 12071, 29619, 12075, 12076, 12077, 12080, 12081, 12082, 12085, 12087, 12088, 12090, 12093, 12095, 12096, 12097, 30131, 12101, 12102, 12103, 30137, 12107, 12109, 12110, 29620, 29621, 29622, 12117, 12118, 12120, 12122, 12124, 12126, 12128, 12130, 12133, 12134, 12135, 12138, 12139, 12140, 12143, 12144, 12145, 12148, 12149, 12150, 12152, 12154, 12156, 12158, 12160, 12162, 12163, 12164, 12167, 12168, 12169, 12171, 12173, 12175, 12177, 12179, 12182, 12183, 12186, 12187, 12190, 12191, 12192, 29623, 12195, 12196, 29624, 12198, 12201, 12202, 30229, 12209, 12211, 12212, 12214, 12218, 12220, 12228, 12231, 12232, 12244, 30270, 12250, 29634, 30276, 12255, 12257, 12258, 12259, 12260, 28393, 12268, 29626, 12270, 12271, 12272, 12274, 12276, 30298, 12279, 12280, 12281, 12282, 12283, 12284, 29627, 12287, 12288, 12289, 29630, 12291, 29628, 30311, 30313, 12299, 12300, 28438, 12306, 12307, 12309, 12311, 30329, 12314, 12315, 12317, 12318, 12319, 12320, 12322, 12323, 12324, 29630, 12326, 12327, 12328, 12330, 12332, 30349, 12335, 12336, 12338, 12339, 12340, 12341, 12343, 12344, 12345, 29630, 12347, 29631, 30363, 30365, 12354, 12355, 29632, 30371, 30373, 12364, 12365, 28537, 12369, 29634, 30384, 12377, 12381, 12382, 12383, 12384, 12385, 12388, 12391, 12393, 12397, 12398, 12399, 12403, 12404, 12405, 12409, 12410, 12413, 12417, 12419, 30427, 12422, 12423, 12428, 12429, 12430, 12431, 12433, 30442, 29635, 12442, 12444, 12448, 12450, 12451, 28680, 12460, 12464, 12466, 12467, 29637, 29655, 29673, 29683, 29722, 30381, 29705, 30381, 29722, 30381, 12536, 12537, 30447, 30455, 29735, 29733, 30447, 12555, 30455, 12560, 29735, 29733, 29747, 29745, 30447, 12578, 30455, 12584, 12627, 12630, 30439, 30439, 30436, 30447, 12643, 30455, 12649, 30439, 30439, 30436, 30447, 12663, 30455, 12669, 30439, 30439, 29842, 30447, 12684, 30481, 30455, 12691, 30381, 29943, 30388, 30381, 30388, 29957, 29961, 29965, 30015, 30249, 30047, 30247, 30245, 30055, 30245, 30239, 30245, 30077, 30094, 30100, 30104, 30109, 30114, 30212, 30216, 30247, 30245, 30232, 30230, 30245, 30239, 30245, 30249, 30247, 30245, 30315, 30388, 30386, 30315, 30381, 30388, 30386, 30381, 30388, 30386, 30381, 30388, 30386, 30381, 30388, 30284, 30286, 30284, 30286, 30284, 30286, 30286, 30284, 30381, 30388, 30381, 30388, 30439, 30406, 30417, 30412, 30421, 30406, 30439, 30412, 30417, 30421, 30439, 30406, 30412, 30417, 30421, 30439, 30439, 30436, 30447, 13090, 30455, 13096, 30488, 30484, 30484, 30488, 30484, 30486, 30488, 14, 15, 11580, 11589, 11596, 11598, 11599, 11617, 11633, 11653, 11666, 30547, 30550, 30553, 29774, 29779, 29782, 30562, 30565, 30568, 29803, 30581, 30584, 30602, 30607, 11800, 29268, 11811, 11815, 30615, 30620, 30625, 11829, 29280, 11840, 11844, 30633, 30638, 30644, 11859, 30649, 11870, 30654, 30656, 30659, 30661, 30664, 11887, 11891, 30669, 11895, 11897, 11905, 11907, 11914, 11919, 11924, 29977, 30695, 30698, 29998, 30707, 30710, 11972, 11976, 11986, 30038, 30734, 30046, 12015, 12019, 30754, 30090, 12067, 12072, 30769, 30772, 30122, 30125, 30780, 30784, 12111, 12113, 12115, 30800, 30803, 30806, 30809, 30820, 30828, 30830, 12193, 12197, 12252, 30858, 12266, 12269, 30867, 30874, 12286, 12290, 30879, 12292, 12302, 30890, 30898, 12325, 30901, 30906, 30914, 12346, 30917, 12348, 12357, 12367, 12370, 30936, 30946, 30948, 30950, 12441, 30969, 12452, 12475, 29639, 30756, 30497, 30756, 30498, 30499, 30756, 30502, 30501, 30750, 30500, 12487, 29657, 30756, 30756, 30756, 30504, 30506, 30505, 30509, 30508, 30750, 30507, 12499, 29675, 30528, 30530, 29678, 30513, 12506, 29720, 30530, 29713, 30516, 12512, 29720, 12515, 30521, 30523, 29696, 30524, 12522, 29703, 12524, 30534, 30528, 30530, 29713, 30531, 12531, 29720, 12534, 30534, 12538, 30966, 30967, 12542, 30971, 30536, 30540, 12546, 12547, 30539, 12550, 30966, 30967, 12556, 30971, 30973, 30972, 30536, 30540, 12563, 12564, 30539, 30542, 30540, 12569, 12570, 30545, 12573, 30966, 30967, 12580, 30971, 30973, 30972, 30934, 30934, 30551, 30825, 30552, 30557, 30825, 29786, 29791, 29796, 30571, 30825, 30690, 30819, 30572, 30575, 30576, 30825, 30577, 30578, 30825, 30579, 30825, 30580, 30583, 30586, 30587, 30597, 30600, 30599, 30434, 30960, 30958, 12634, 12635, 12636, 30965, 12639, 30966, 30589, 12645, 30971, 30590, 30589, 30434, 30960, 30958, 12654, 12655, 12656, 30965, 12659, 30966, 30591, 12665, 30971, 30594, 30593, 30434, 30960, 30596, 30595, 12675, 12676, 12677, 30965, 12680, 30966, 30597, 12687, 30971, 30600, 30599, 30604, 30603, 29856, 30609, 29864, 30622, 30621, 29880, 30627, 29888, 12715, 30934, 30641, 30640, 29905, 30646, 29913, 30885, 30851, 30253, 12740, 30676, 12743, 30386, 30885, 30851, 30253, 12750, 30934, 12753, 30386, 12756, 12758, 12760, 30689, 30825, 30690, 30692, 30691, 29981, 29986, 30701, 30825, 30702, 30704, 30703, 30002, 30007, 30713, 30717, 12785, 30019, 30017, 12788, 30723, 30727, 30728, 30825, 30729, 30731, 30730, 30042, 12801, 30738, 30052, 12806, 12807, 12808, 12809, 30745, 12811, 12812, 30756, 30756, 30756, 30747, 30748, 30746, 30752, 30751, 30750, 30749, 12823, 30079, 30756, 30758, 12829, 30096, 12832, 12834, 12836, 12838, 30774, 30773, 30129, 30135, 30787, 30786, 30792, 30825, 30793, 30795, 30794, 30796, 30797, 30825, 30819, 30798, 30825, 30799, 30802, 30805, 30808, 30811, 30812, 30825, 30813, 30814, 30816, 30815, 30819, 30817, 30822, 30823, 30825, 30824, 30826, 30825, 30827, 12889, 12891, 30832, 30221, 30839, 30225, 12898, 12899, 30841, 12901, 12902, 12903, 30843, 30844, 30846, 12907, 30847, 12909, 30845, 30848, 12912, 12913, 12914, 30849, 30869, 30868, 30871, 30885, 30851, 30253, 30869, 30868, 30871, 30885, 30887, 12937, 30853, 12939, 12940, 30887, 12942, 12943, 30932, 12945, 12946, 12947, 30932, 12949, 12950, 30852, 12952, 30932, 30853, 12955, 12956, 30854, 12960, 30856, 12963, 30386, 12966, 12967, 12969, 12970, 12971, 12972, 30861, 30859, 12975, 12976, 30869, 30868, 30871, 30885, 30887, 30315, 30934, 30386, 30869, 30868, 30871, 30885, 30887, 30315, 13004, 30934, 13006, 30892, 30891, 30894, 30332, 30908, 30907, 30910, 30352, 30923, 30925, 30375, 30928, 30930, 30375, 13030, 30934, 13033, 30386, 13036, 13037, 13039, 13041, 30953, 13043, 30940, 30938, 30941, 30397, 13051, 13052, 13054, 13056, 30953, 13058, 30952, 30943, 30401, 13065, 13066, 13068, 13070, 30953, 13072, 30952, 30956, 30954, 30434, 30960, 30958, 13081, 13082, 13083, 30965, 13086, 30966, 30967, 13092, 30971, 30973, 30972, 30985, 30986, 30992, 30994, 31000, 31002, 31003, 31004, 31009, 31011, 31016, 31018, 31023, 31026, 13210, 13215, 13347, 13353, 31108, 31110, 13382, 13387, 13392, 31108, 31110, 7, 8, 9, 10, 11, 12, 13, 14, 15, 31146, 31153, 30650, 31161, 31163, 30665, 31166, 31172, 31173, 31174, 30726, 30755, 31191, 31192, 30875, 31218, 30899, 31224, 30915, 31228, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12488, 12489, 12490, 12491, 12492, 12493, 12494, 12495, 12496, 12497, 12498, 12500, 12501, 31120, 12503, 12504, 12505, 12507, 31121, 12509, 12510, 12511, 12513, 31122, 30534, 12517, 31124, 12519, 12520, 12521, 12523, 12525, 12526, 31125, 12528, 12529, 12530, 12532, 31126, 12535, 12539, 31239, 12541, 12543, 12544, 12545, 31303, 30537, 12549, 12551, 12552, 31130, 31129, 12557, 12558, 12559, 12561, 12562, 31315, 30537, 12566, 12567, 12568, 31320, 30543, 12572, 12574, 12575, 31130, 31129, 12581, 12582, 12583, 31168, 12587, 31168, 12589, 12590, 12591, 12592, 31131, 30555, 12595, 12596, 30558, 30560, 31135, 12600, 31136, 12602, 31137, 12604, 12605, 12606, 12607, 12608, 12609, 31138, 12611, 12612, 12613, 12614, 12615, 12616, 12617, 12618, 12619, 31139, 12621, 31140, 12623, 12624, 30598, 12626, 12628, 12629, 12631, 12632, 12633, 31363, 30963, 12638, 12640, 30588, 12642, 12646, 12647, 12648, 12651, 12652, 12653, 31377, 30963, 12658, 12660, 30592, 12662, 12666, 12667, 12668, 12671, 12672, 12673, 12674, 31392, 30963, 12679, 12681, 30598, 12683, 12688, 12689, 12690, 12693, 12694, 31141, 12696, 12697, 31143, 31142, 12700, 29868, 31144, 12704, 12705, 31148, 12707, 12708, 31150, 31149, 12711, 29892, 31151, 30932, 12717, 12718, 12719, 31155, 12721, 12722, 31157, 31156, 12725, 29919, 29925, 29935, 12735, 31220, 12737, 12738, 31168, 30674, 12742, 12744, 12745, 31220, 12747, 12748, 31170, 30932, 12752, 12754, 12761, 12762, 12763, 12764, 12765, 30693, 31176, 12768, 31177, 12770, 12771, 12772, 12773, 12774, 12775, 30705, 31179, 12778, 31180, 12780, 12781, 30715, 12783, 30719, 12786, 12787, 12789, 12791, 12792, 12793, 12794, 12795, 12796, 30732, 31185, 12799, 30044, 12802, 30740, 12804, 30743, 31468, 31470, 12810, 31473, 12813, 12814, 12815, 12816, 12817, 12818, 12819, 12820, 12821, 12822, 12824, 31190, 12826, 12828, 12830, 30767, 30770, 12839, 12840, 30123, 30120, 30778, 12844, 30782, 12846, 12847, 12848, 31201, 31200, 31199, 12852, 12853, 12854, 12855, 12856, 12857, 12858, 12859, 12860, 12861, 12862, 12863, 31202, 12865, 31203, 12867, 31204, 12869, 31205, 12871, 12872, 12873, 12874, 12875, 12876, 12877, 12878, 12879, 31206, 12881, 12882, 12883, 12884, 12885, 12886, 12887, 31207, 31208, 12892, 30834, 12894, 30837, 12896, 12897, 12900, 31541, 12904, 12905, 12906, 12908, 12910, 12911, 31552, 12915, 12916, 12917, 31215, 12919, 30287, 12923, 31220, 12925, 12926, 12927, 12928, 31215, 12930, 30287, 12934, 31220, 12936, 12938, 31569, 12941, 12944, 31575, 12948, 31579, 12951, 12953, 12954, 31585, 30287, 12959, 30932, 12962, 12964, 30857, 31592, 31237, 31594, 31596, 12973, 12974, 31600, 12977, 12978, 31215, 12980, 30287, 12984, 31220, 12986, 12987, 31213, 30863, 12990, 12991, 12992, 12993, 31215, 12995, 30305, 12999, 31220, 13001, 13002, 31221, 13005, 13007, 13008, 31222, 13010, 13012, 13014, 13015, 31226, 13017, 13019, 13021, 31230, 13023, 13024, 13025, 31231, 13027, 13028, 31232, 30932, 13032, 13034, 30944, 31637, 31234, 31236, 13042, 13044, 13045, 13046, 13047, 30944, 31647, 31236, 31237, 13057, 13059, 13060, 13061, 30944, 31656, 31236, 31237, 13071, 13073, 13074, 13075, 13078, 13079, 13080, 31668, 30963, 13085, 13087, 31239, 13089, 13093, 13094, 13095, 13116, 13117, 13123, 13125, 13131, 13133, 13176, 13178, 13182, 13184, 13188, 13190, 13194, 13196, 13357, 13359, 13396, 13398, 8, 9, 10, 11, 12, 13, 14, 15, 31712, 31713, 31718, 31727, 31729, 31731, 31734, 31736, 31738, 31740, 31742, 31745, 31747, 31749, 31751, 31753, 12502, 31758, 31760, 12508, 31763, 31765, 12514, 12516, 12518, 31771, 31773, 31774, 12527, 31778, 31780, 12533, 31782, 31783, 12540, 31786, 31788, 12548, 31792, 12553, 12554, 31796, 31798, 31800, 12565, 31805, 12571, 31809, 12576, 12577, 31813, 31815, 12586, 12588, 31821, 12593, 12594, 31826, 12597, 12598, 12599, 12601, 12603, 31836, 31839, 12610, 31843, 31846, 31848, 12620, 12622, 12625, 31858, 31860, 31364, 12637, 31865, 12641, 31868, 31870, 31872, 31378, 12657, 31877, 12661, 31880, 31882, 31884, 31886, 31393, 12678, 31890, 12682, 31893, 31895, 12695, 31897, 12698, 12699, 12701, 12702, 12706, 31907, 12709, 12710, 12712, 12713, 12716, 12720, 31919, 12723, 12724, 12726, 31714, 31715, 12729, 31717, 31716, 12732, 31717, 12736, 31932, 12739, 12741, 31936, 12746, 31940, 12749, 12751, 31944, 30680, 30683, 30686, 31946, 31949, 12766, 12767, 12769, 31956, 31959, 12776, 12777, 12779, 12782, 12784, 31970, 30724, 31974, 31977, 12797, 12798, 12800, 12803, 12805, 31991, 31993, 31995, 31997, 31999, 12825, 31723, 30761, 30764, 12835, 12837, 32008, 12841, 12842, 12843, 12845, 32016, 12849, 12850, 12851, 32021, 32024, 32027, 32030, 12864, 12866, 12868, 12870, 32041, 32045, 32047, 12880, 32051, 32054, 12888, 12890, 12893, 12895, 31537, 31542, 31546, 31548, 31553, 12918, 32075, 31726, 12921, 12924, 32082, 12929, 32084, 31726, 12932, 12935, 31566, 31571, 32094, 32096, 32099, 31726, 12958, 12961, 32106, 12965, 12968, 32113, 12979, 32116, 31726, 12982, 12985, 32123, 12988, 12989, 12994, 32129, 12996, 31726, 13000, 32136, 13003, 13009, 32140, 31728, 13016, 32145, 31730, 13022, 32152, 13026, 32156, 13029, 13031, 32160, 13035, 13038, 13040, 31641, 32167, 32169, 13050, 13053, 13055, 31651, 32177, 13064, 13067, 13069, 31660, 32185, 32187, 31669, 13084, 32192, 13088, 32195, 32197, 31241, 31253, 31265, 31484, 31488, 31971, 31452, 31455, 31455, 31484, 31488, 31986, 31986, 31989, 31987, 31986, 31484, 31488, 32091, 32100, 32100, 32100, 32126, 32138, 7, 8, 9, 10, 11, 12, 13, 14, 15, 32231, 32234, 32236, 32239, 32240, 32247, 32248, 32252, 31785, 32263, 32272, 31856, 31861, 31867, 31873, 31879, 32312, 31892, 32319, 32321, 32324, 32224, 32325, 32327, 32330, 32225, 32331, 32332, 32334, 12727, 12728, 12730, 12731, 12733, 32226, 32344, 32347, 32349, 32352, 12755, 12757, 12759, 12790, 32379, 32382, 12827, 32002, 12831, 12833, 32391, 32396, 31538, 32419, 32421, 12920, 32227, 32425, 32427, 12931, 32227, 32431, 12957, 32439, 32444, 12981, 32227, 32448, 32452, 12997, 32227, 32456, 32459, 13011, 32228, 32462, 13018, 32229, 32465, 32467, 32470, 32476, 32175, 32183, 32188, 32194, 13098, 13100, 13102, 31759, 31764, 32255, 31772, 32255, 31779, 32259, 32260, 32261, 32265, 32267, 32268, 32269, 32270, 32274, 31817, 31819, 32411, 32278, 32413, 32279, 32411, 32371, 32400, 32281, 32286, 32285, 32284, 32358, 32411, 32287, 32289, 32411, 32292, 32291, 32290, 32294, 32293, 31854, 32302, 32299, 32302, 32306, 32309, 32314, 32317, 31917, 32483, 32485, 32484, 32472, 32474, 32473, 31935, 31943, 13228, 13230, 32388, 32387, 32393, 32392, 32358, 32411, 32357, 32361, 32360, 32363, 32411, 32362, 32366, 32365, 32411, 32410, 32413, 32412, 31984, 31967, 31965, 13257, 13258, 13259, 13260, 32369, 13263, 13265, 32388, 32387, 32393, 32392, 32372, 32411, 32371, 32374, 32413, 32412, 31984, 31982, 13281, 13282, 13283, 13284, 13285, 13287, 13289, 32388, 32387, 32393, 32392, 32411, 32398, 32399, 32400, 32401, 32405, 32404, 32403, 32402, 32407, 32411, 32406, 32409, 32411, 32410, 32413, 32412, 32060, 32058, 32417, 32420, 32420, 32420, 32159, 13331, 32098, 13334, 32098, 13336, 32098, 13338, 32098, 32105, 32441, 32442, 32474, 32478, 32483, 32485, 32484, 32443, 32490, 32493, 13363, 32450, 13368, 32458, 32159, 32472, 32474, 32473, 32478, 32480, 32479, 32483, 32485, 32484, 32490, 32493, 15, 32232, 32237, 32264, 32273, 32322, 12703, 32328, 12714, 32335, 32557, 32559, 32561, 12734, 32380, 32573, 32397, 32070, 32424, 12922, 32430, 12933, 32438, 32447, 12983, 32596, 12998, 32143, 13013, 32148, 13020, 32532, 32243, 13105, 32535, 32246, 13108, 32534, 13110, 13111, 32535, 13113, 13114, 32262, 32257, 13119, 13120, 13121, 13124, 13126, 13127, 13128, 13129, 13132, 32546, 32550, 32563, 13141, 32276, 13143, 32277, 13145, 13146, 13147, 13148, 13149, 13150, 13151, 13152, 13153, 13154, 13155, 13156, 13157, 13158, 13159, 13160, 13161, 13162, 13163, 13164, 13165, 32546, 32550, 32563, 32346, 13174, 32300, 13177, 32540, 13180, 32300, 13183, 32542, 13186, 32307, 13189, 32544, 13192, 32315, 13195, 32546, 32550, 32563, 13204, 32346, 13206, 13207, 13208, 32610, 13211, 13212, 13213, 32608, 32555, 32563, 13222, 32346, 32565, 13225, 32351, 32567, 32568, 13233, 13234, 32569, 32389, 13237, 13238, 13240, 13241, 13242, 13243, 13244, 13245, 13246, 13247, 13248, 13249, 13250, 13251, 13252, 13253, 13254, 13255, 13256, 13261, 32695, 32697, 32570, 13267, 13268, 32389, 13270, 13271, 13273, 13274, 13275, 13276, 13277, 13278, 13279, 13280, 32714, 32716, 13290, 13291, 32576, 32575, 32389, 13295, 13296, 13298, 13299, 13300, 13301, 13302, 13303, 13304, 13305, 13306, 13307, 13308, 13309, 13310, 13311, 13312, 13313, 13314, 13315, 13316, 13318, 13319, 13320, 32579, 13322, 32581, 32584, 13326, 32469, 32585, 32588, 13332, 32598, 13335, 13337, 13339, 13341, 32103, 13343, 13344, 13345, 32608, 13348, 13349, 13350, 13351, 32610, 13354, 13355, 32491, 13358, 32591, 32594, 13364, 32595, 32598, 13369, 32599, 32602, 32606, 32605, 13376, 32469, 13378, 13379, 13380, 32608, 13383, 13384, 13385, 32609, 13388, 13389, 13390, 32610, 32611, 13394, 32491, 13397, 9, 10, 11, 12, 13, 14, 15, 32784, 32785, 13103, 13104, 13106, 13107, 13109, 32822, 13112, 32825, 13115, 13118, 32262, 32271, 32788, 13135, 32548, 32790, 13138, 32552, 13140, 13142, 13144, 32845, 32847, 32849, 32851, 32853, 32856, 32860, 32862, 32864, 32788, 13167, 32548, 32790, 13170, 32552, 13172, 13173, 13175, 13179, 13181, 13185, 13187, 13191, 13193, 32788, 13198, 32548, 32790, 13201, 32552, 13203, 13205, 32891, 13209, 32895, 13214, 32792, 13217, 32795, 32794, 32793, 13221, 13223, 13224, 13226, 32797, 32798, 13231, 13232, 13235, 32907, 13236, 32911, 32394, 32913, 32916, 32918, 32921, 32923, 32925, 32927, 32931, 32797, 32798, 13266, 13269, 32937, 32394, 32939, 32943, 32945, 32947, 32797, 32798, 13292, 13293, 32949, 13294, 32954, 32394, 32956, 32958, 32961, 32963, 32965, 32969, 32971, 32973, 32800, 13321, 32976, 13323, 32801, 13325, 13327, 13328, 32803, 13330, 32985, 13333, 32987, 32988, 32989, 32805, 13342, 32994, 13346, 32999, 13352, 13356, 13360, 32806, 13362, 33007, 13365, 32808, 13367, 33010, 13370, 32810, 13372, 32812, 13374, 13375, 13377, 33019, 13381, 33023, 13386, 33027, 13391, 13393, 13395, 32831, 32828, 32834, 32829, 32831, 32834, 32832, 32836, 33032, 32871, 32875, 32879, 32883, 33001, 33004, 33001, 33004, 33032, 14, 15, 13099, 13101, 32816, 32819, 13122, 13130, 13134, 13136, 13137, 13139, 33061, 33062, 33066, 32854, 32857, 33070, 13166, 13168, 13169, 13171, 32869, 13197, 13199, 13200, 13202, 33094, 13216, 13218, 13219, 13220, 33105, 33107, 13227, 13229, 33111, 13239, 32914, 32919, 32928, 32929, 13262, 13264, 32933, 13272, 32940, 32717, 13286, 13288, 33138, 13297, 33144, 33146, 32966, 13317, 32978, 13324, 33157, 13329, 13340, 33167, 13361, 13366, 13371, 13373, 33186, 33187, 33121, 33148, 33048, 33046, 32992, 33169, 13423, 33050, 13425, 33051, 32997, 33171, 13429, 13430, 13431, 13433, 13434, 13435, 33017, 33189, 33025, 33193, 33194, 13446, 33195, 33063, 33148, 13467, 33080, 33081, 13470, 33082, 33083, 13473, 33084, 33085, 13476, 33086, 32889, 33096, 32893, 33098, 32992, 33169, 32997, 33171, 13492, 13493, 33172, 33121, 33148, 33148, 33160, 33162, 33179, 33179, 32992, 33169, 32996, 32997, 33171, 13532, 13533, 33172, 33175, 33179, 33017, 33189, 33021, 33191, 33025, 33193, 33194, 13549, 33195, 13, 14, 15, 33055, 33058, 33073, 33076, 33088, 33091, 33100, 33244, 33113, 32934, 33264, 32959, 32974, 33249, 33248, 33115, 33228, 13404, 33254, 33263, 33217, 33216, 33141, 13411, 33231, 33230, 13417, 13418, 33044, 33042, 13421, 13422, 13424, 13426, 13427, 13428, 33295, 33220, 33298, 33221, 33060, 33106, 13441, 13442, 13443, 13444, 13445, 13447, 33249, 33248, 33115, 33228, 13453, 33254, 33263, 33262, 33141, 13459, 33231, 33230, 33078, 13468, 13469, 13471, 13472, 13474, 13475, 13477, 33093, 13481, 13482, 13483, 13484, 33104, 33106, 13488, 13489, 13490, 13491, 13494, 33249, 33248, 33115, 13499, 33253, 33252, 33254, 33257, 33256, 33129, 13507, 33260, 33133, 33263, 33262, 33141, 13514, 33268, 33154, 33156, 33158, 13521, 13522, 13523, 13524, 33177, 33179, 13527, 13528, 13529, 13530, 13531, 13534, 33173, 13536, 33177, 13538, 33183, 33181, 33280, 13542, 13543, 13544, 13545, 13546, 13547, 13548, 13550, 33245, 33112, 33153, 13399, 13400, 13401, 13403, 13405, 13406, 13407, 13408, 13409, 33370, 33371, 13413, 13414, 13419, 13420, 33387, 33392, 33393, 13432, 13436, 33361, 33360, 13439, 13440, 33407, 13448, 13449, 13450, 13452, 13454, 13455, 13456, 13457, 33370, 33371, 13461, 13462, 33363, 33362, 13466, 33421, 33423, 33425, 33427, 33365, 33364, 13480, 13486, 13487, 33439, 13495, 13496, 13497, 13500, 13501, 13502, 13503, 13504, 13505, 33369, 13508, 13509, 13510, 13511, 13512, 33370, 13515, 33371, 13518, 13519, 13520, 13525, 13526, 33472, 13535, 13537, 13539, 13540, 13541, 33487, 33469, 33390, 33394, 33404, 33482, 33402, 33482, 33431, 33429, 33437, 33469, 33435, 33470, 33469, 33467, 33484, 33482, 33480, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33270, 33492, 33489, 33377, 33497, 13410, 13412, 33503, 33366, 33505, 33509, 33510, 13437, 13438, 33517, 33489, 33412, 33522, 13458, 13460, 33527, 13464, 13465, 13478, 13479, 33366, 33542, 33489, 33544, 33548, 13506, 33551, 33554, 13513, 13516, 33557, 33568, 33566, 13558, 13559, 33396, 33396, 13563, 33566, 33406, 13567, 13568, 13569, 33566, 33426, 33424, 33422, 33422, 33566, 13582, 13583, 13584, 33566, 33328, 13588, 13589, 13590, 33562, 33561, 33559, 33562, 33562, 33562, 33343, 13604, 13605, 13606, 33566, 33565, 33486, 13611, 13612, 13613, 14, 15, 13402, 33498, 33605, 33606, 33150, 13416, 33609, 33613, 13451, 33618, 33619, 33150, 33622, 33624, 13485, 13498, 33545, 33630, 33633, 33634, 33150, 33603, 13555, 33396, 13560, 13561, 33398, 13564, 13566, 33647, 33616, 13574, 13576, 13577, 13578, 13579, 13580, 33655, 13585, 13587, 33661, 33631, 13597, 13598, 13599, 13600, 13601, 13602, 13603, 33671, 33636, 13608, 13609, 13610, 33677, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33680, 33607, 13415, 33688, 33620, 13463, 33695, 13517, 13552, 33681, 33685, 13557, 13562, 33704, 33687, 33645, 13571, 33617, 33692, 33713, 33715, 33693, 33656, 33694, 33659, 33696, 33629, 13594, 33632, 33723, 33725, 33727, 33669, 13607, 33675, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33601, 13553, 33745, 13556, 33638, 33756, 13565, 33709, 33614, 13572, 33748, 13575, 33764, 13581, 13586, 33720, 33626, 13592, 13593, 13595, 33699, 33774, 33729, 33731, 33734, 9, 10, 11, 12, 13, 14, 15, 13551, 13554, 33795, 33757, 33642, 33798, 13570, 13573, 33803, 33805, 33806, 13591, 13596, 33775, 33732, 33810, 33810, 33810, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33828, 33793, 13615, 33824, 33829, 33801, 13620, 33830, 33834, 33833, 33832, 33811, 13626, 33835, 33838, 33837, 13614, 13616, 13617, 33826, 13619, 13621, 13622, 13623, 13624, 13625, 13627, 13628, 13629, 13, 14, 15, 33858, 13618, 33862, 33879, 33868, 33884, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33873, 33889, 33877, 33880, 33882, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33908, 33906, 33904, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 13630, 13631, 13632, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33937, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 33938, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 16
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 13648
#define SIZE_OF_AC 20336
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[2124*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
R[i + 52*t] = A[i + 52*t];
R[i + 53*t] = A[i + 53*t];
R[i + 54*t] = A[i + 54*t];
R[i + 55*t] = A[i + 55*t];
R[i + 56*t] = A[i + 56*t];
R[i + 57*t] = A[i + 57*t];
R[i + 58*t] = A[i + 58*t];
R[i + 59*t] = A[i + 59*t];
R[i + 60*t] = A[i + 60*t];
R[i + 61*t] = A[i + 61*t];
R[i + 62*t] = A[i + 62*t];
R[i + 63*t] = A[i + 63*t];
R[i + 64*t] = A[i + 64*t];
R[i + 65*t] = A[i + 65*t];
R[i + 66*t] = A[i + 66*t];
R[i + 67*t] = A[i + 67*t];
R[i + 68*t] = A[i + 68*t];
R[i + 69*t] = A[i + 69*t];
R[i + 70*t] = A[i + 70*t];
R[i + 71*t] = A[i + 71*t];
R[i + 72*t] = A[i + 72*t];
R[i + 73*t] = A[i + 73*t];
R[i + 74*t] = A[i + 74*t];
R[i + 75*t] = A[i + 75*t];
R[i + 76*t] = A[i + 76*t];
R[i + 77*t] = A[i + 77*t];
R[i + 78*t] = A[i + 78*t];
R[i + 79*t] = A[i + 79*t];
R[i + 80*t] = A[i + 80*t];
R[i + 81*t] = A[i + 81*t];
R[i + 82*t] = A[i + 82*t];
R[i + 83*t] = A[i + 83*t];
R[i + 84*t] = A[i + 84*t];
R[i + 85*t] = A[i + 85*t];
R[i + 86*t] = A[i + 86*t];
R[i + 87*t] = A[i + 87*t];
R[i + 88*t] = A[i + 88*t];
R[i + 89*t] = A[i + 89*t];
R[i + 90*t] = A[i + 90*t];
R[i + 91*t] = A[i + 91*t];
R[i + 92*t] = A[i + 92*t];
R[i + 93*t] = A[i + 93*t];
R[i + 94*t] = A[i + 94*t];
R[i + 95*t] = A[i + 95*t];
R[i + 96*t] = A[i + 96*t];
R[i + 97*t] = A[i + 97*t];
R[i + 98*t] = A[i + 98*t];
R[i + 99*t] = A[i + 99*t];
R[i + 100*t] = A[i + 100*t];
R[i + 101*t] = A[i + 101*t];
R[i + 102*t] = A[i + 102*t];
R[i + 103*t] = A[i + 103*t];
R[i + 104*t] = A[i + 104*t];
R[i + 105*t] = A[i + 105*t];
R[i + 106*t] = A[i + 106*t];
R[i + 107*t] = A[i + 107*t];
R[i + 108*t] = A[i + 108*t];
R[i + 109*t] = A[i + 109*t];
R[i + 110*t] = A[i + 110*t];
R[i + 111*t] = A[i + 111*t];
R[i + 112*t] = A[i + 112*t];
R[i + 113*t] = A[i + 113*t];
R[i + 114*t] = A[i + 114*t];
R[i + 115*t] = A[i + 115*t];
R[i + 116*t] = A[i + 116*t];
R[i + 117*t] = A[i + 117*t];
R[i + 118*t] = A[i + 118*t];
R[i + 119*t] = A[i + 119*t];
R[i + 120*t] = A[i + 120*t];
R[i + 121*t] = A[i + 121*t];
R[i + 122*t] = A[i + 122*t];
R[i + 123*t] = A[i + 123*t];
R[i + 124*t] = A[i + 124*t];
R[i + 125*t] = A[i + 125*t];
R[i + 126*t] = A[i + 126*t];
R[i + 127*t] = A[i + 127*t];
R[i + 128*t] = A[i + 128*t];
R[i + 129*t] = A[i + 129*t];
R[i + 130*t] = A[i + 130*t];
R[i + 131*t] = A[i + 131*t];
R[i + 132*t] = A[i + 132*t];
R[i + 133*t] = A[i + 133*t];
R[i + 134*t] = A[i + 134*t];
R[i + 135*t] = A[i + 135*t];
R[i + 136*t] = A[i + 136*t];
R[i + 137*t] = A[i + 137*t];
R[i + 138*t] = A[i + 138*t];
R[i + 139*t] = A[i + 139*t];
R[i + 140*t] = A[i + 140*t];
R[i + 141*t] = A[i + 141*t];
R[i + 142*t] = A[i + 142*t];
R[i + 143*t] = A[i + 143*t];
R[i + 144*t] = A[i + 144*t];
R[i + 145*t] = A[i + 145*t];
R[i + 146*t] = A[i + 146*t];
R[i + 147*t] = A[i + 147*t];
R[i + 148*t] = A[i + 148*t];
R[i + 149*t] = A[i + 149*t];
R[i + 150*t] = A[i + 150*t];
R[i + 151*t] = A[i + 151*t];
R[i + 152*t] = A[i + 152*t];
R[i + 153*t] = A[i + 153*t];
R[i + 154*t] = A[i + 154*t];
R[i + 155*t] = A[i + 155*t];
R[i + 156*t] = A[i + 156*t];
R[i + 157*t] = A[i + 157*t];
R[i + 158*t] = A[i + 158*t];
R[i + 159*t] = A[i + 159*t];
R[i + 160*t] = A[i + 160*t];
R[i + 161*t] = A[i + 161*t];
R[i + 162*t] = A[i + 162*t];
R[i + 163*t] = A[i + 163*t];
R[i + 164*t] = A[i + 164*t];
R[i + 165*t] = A[i + 165*t];
R[i + 166*t] = A[i + 166*t];
R[i + 167*t] = A[i + 167*t];
R[i + 168*t] = A[i + 168*t];
R[i + 169*t] = A[i + 169*t];
R[i + 170*t] = A[i + 170*t];
R[i + 171*t] = A[i + 171*t];
R[i + 172*t] = A[i + 172*t];
R[i + 173*t] = A[i + 173*t];
R[i + 174*t] = A[i + 174*t];
R[i + 175*t] = A[i + 175*t];
R[i + 176*t] = A[i + 176*t];
R[i + 177*t] = A[i + 177*t];
R[i + 178*t] = A[i + 178*t];
R[i + 179*t] = A[i + 179*t];
R[i + 180*t] = A[i + 180*t];
R[i + 181*t] = A[i + 181*t];
R[i + 182*t] = A[i + 182*t];
R[i + 183*t] = A[i + 183*t];
R[i + 184*t] = A[i + 184*t];
R[i + 185*t] = A[i + 185*t];
R[i + 186*t] = A[i + 186*t];
R[i + 187*t] = A[i + 187*t];
R[i + 188*t] = A[i + 188*t];
R[i + 189*t] = A[i + 189*t];
R[i + 190*t] = A[i + 190*t];
R[i + 191*t] = A[i + 191*t];
R[i + 192*t] = A[i + 192*t];
R[i + 193*t] = A[i + 193*t];
R[i + 194*t] = A[i + 194*t];
R[i + 195*t] = A[i + 195*t];
R[i + 196*t] = A[i + 196*t];
R[i + 197*t] = A[i + 197*t];
R[i + 198*t] = A[i + 198*t];
R[i + 199*t] = A[i + 199*t];
R[i + 200*t] = A[i + 200*t];
R[i + 201*t] = A[i + 201*t];
R[i + 202*t] = A[i + 202*t];
R[i + 203*t] = A[i + 203*t];
R[i + 204*t] = A[i + 204*t];
R[i + 205*t] = A[i + 205*t];
R[i + 206*t] = A[i + 206*t];
R[i + 207*t] = A[i + 207*t];
R[i + 208*t] = A[i + 208*t];
R[i + 209*t] = A[i + 209*t];
R[i + 210*t] = A[i + 210*t];
R[i + 211*t] = A[i + 211*t];
R[i + 212*t] = A[i + 212*t];
R[i + 213*t] = A[i + 213*t];
R[i + 214*t] = A[i + 214*t];
R[i + 215*t] = A[i + 215*t];
R[i + 216*t] = A[i + 216*t];
R[i + 217*t] = A[i + 217*t];
R[i + 218*t] = A[i + 218*t];
R[i + 219*t] = A[i + 219*t];
R[i + 220*t] = A[i + 220*t];
R[i + 221*t] = A[i + 221*t];
R[i + 222*t] = A[i + 222*t];
R[i + 223*t] = A[i + 223*t];
R[i + 224*t] = A[i + 224*t];
R[i + 225*t] = A[i + 225*t];
R[i + 226*t] = A[i + 226*t];
R[i + 227*t] = A[i + 227*t];
R[i + 228*t] = A[i + 228*t];
R[i + 229*t] = A[i + 229*t];
R[i + 230*t] = A[i + 230*t];
R[i + 231*t] = A[i + 231*t];
R[i + 232*t] = A[i + 232*t];
R[i + 233*t] = A[i + 233*t];
R[i + 234*t] = A[i + 234*t];
R[i + 235*t] = A[i + 235*t];
R[i + 236*t] = A[i + 236*t];
R[i + 237*t] = A[i + 237*t];
R[i + 238*t] = A[i + 238*t];
R[i + 239*t] = A[i + 239*t];
R[i + 240*t] = A[i + 240*t];
R[i + 241*t] = A[i + 241*t];
R[i + 242*t] = A[i + 242*t];
R[i + 243*t] = A[i + 243*t];
R[i + 244*t] = A[i + 244*t];
R[i + 245*t] = A[i + 245*t];
R[i + 246*t] = A[i + 246*t];
R[i + 247*t] = A[i + 247*t];
R[i + 248*t] = A[i + 248*t];
R[i + 249*t] = A[i + 249*t];
R[i + 250*t] = A[i + 250*t];
R[i + 251*t] = A[i + 251*t];
R[i + 252*t] = A[i + 252*t];
R[i + 253*t] = A[i + 253*t];
R[i + 254*t] = A[i + 254*t];
R[i + 255*t] = A[i + 255*t];
R[i + 256*t] = A[i + 256*t];
R[i + 257*t] = A[i + 257*t];
R[i + 258*t] = A[i + 258*t];
R[i + 259*t] = A[i + 259*t];
R[i + 260*t] = A[i + 260*t];
R[i + 261*t] = A[i + 261*t];
R[i + 262*t] = A[i + 262*t];
R[i + 263*t] = A[i + 263*t];
R[i + 264*t] = A[i + 264*t];
R[i + 265*t] = A[i + 265*t];
R[i + 266*t] = A[i + 266*t];
R[i + 267*t] = A[i + 267*t];
R[i + 268*t] = A[i + 268*t];
R[i + 269*t] = A[i + 269*t];
R[i + 270*t] = A[i + 270*t];
R[i + 271*t] = A[i + 271*t];
R[i + 272*t] = A[i + 272*t];
R[i + 273*t] = A[i + 273*t];
R[i + 274*t] = A[i + 274*t];
R[i + 275*t] = A[i + 275*t];
R[i + 276*t] = A[i + 276*t];
R[i + 277*t] = A[i + 277*t];
R[i + 278*t] = A[i + 278*t];
R[i + 279*t] = A[i + 279*t];
R[i + 280*t] = A[i + 280*t];
R[i + 281*t] = A[i + 281*t];
R[i + 282*t] = A[i + 282*t];
R[i + 283*t] = A[i + 283*t];
R[i + 284*t] = A[i + 284*t];
R[i + 285*t] = A[i + 285*t];
R[i + 286*t] = A[i + 286*t];
R[i + 287*t] = A[i + 287*t];
R[i + 288*t] = A[i + 288*t];
R[i + 289*t] = A[i + 289*t];
R[i + 290*t] = A[i + 290*t];
R[i + 291*t] = A[i + 291*t];
R[i + 292*t] = A[i + 292*t];
R[i + 293*t] = A[i + 293*t];
R[i + 294*t] = A[i + 294*t];
R[i + 295*t] = A[i + 295*t];
R[i + 296*t] = A[i + 296*t];
R[i + 297*t] = A[i + 297*t];
R[i + 298*t] = A[i + 298*t];
R[i + 299*t] = A[i + 299*t];
R[i + 300*t] = A[i + 300*t];
R[i + 301*t] = A[i + 301*t];
R[i + 302*t] = A[i + 302*t];
R[i + 303*t] = A[i + 303*t];
R[i + 304*t] = A[i + 304*t];
R[i + 305*t] = A[i + 305*t];
R[i + 306*t] = A[i + 306*t];
R[i + 307*t] = A[i + 307*t];
R[i + 308*t] = A[i + 308*t];
R[i + 309*t] = A[i + 309*t];
R[i + 310*t] = A[i + 310*t];
R[i + 311*t] = A[i + 311*t];
R[i + 312*t] = A[i + 312*t];
R[i + 313*t] = A[i + 313*t];
R[i + 314*t] = A[i + 314*t];
R[i + 315*t] = A[i + 315*t];
R[i + 316*t] = A[i + 316*t];
R[i + 317*t] = A[i + 317*t];
R[i + 318*t] = A[i + 318*t];
R[i + 319*t] = A[i + 319*t];
R[i + 320*t] = A[i + 320*t];
R[i + 321*t] = A[i + 321*t];
R[i + 322*t] = A[i + 322*t];
R[i + 323*t] = A[i + 323*t];
R[i + 324*t] = A[i + 324*t];
R[i + 325*t] = A[i + 325*t];
R[i + 326*t] = A[i + 326*t];
R[i + 327*t] = A[i + 327*t];
R[i + 328*t] = A[i + 328*t];
R[i + 329*t] = A[i + 329*t];
R[i + 330*t] = A[i + 330*t];
R[i + 331*t] = A[i + 331*t];
R[i + 332*t] = A[i + 332*t];
R[i + 333*t] = A[i + 333*t];
R[i + 334*t] = A[i + 334*t];
R[i + 335*t] = A[i + 335*t];
R[i + 336*t] = A[i + 336*t];
R[i + 337*t] = A[i + 337*t];
R[i + 338*t] = A[i + 338*t];
R[i + 339*t] = A[i + 339*t];
R[i + 340*t] = A[i + 340*t];
R[i + 341*t] = A[i + 341*t];
R[i + 342*t] = A[i + 342*t];
R[i + 343*t] = A[i + 343*t];
R[i + 344*t] = A[i + 344*t];
R[i + 345*t] = A[i + 345*t];
R[i + 346*t] = A[i + 346*t];
R[i + 347*t] = A[i + 347*t];
R[i + 348*t] = A[i + 348*t];
R[i + 349*t] = A[i + 349*t];
R[i + 350*t] = A[i + 350*t];
R[i + 351*t] = A[i + 351*t];
R[i + 352*t] = A[i + 352*t];
R[i + 353*t] = A[i + 353*t];
R[i + 354*t] = A[i + 354*t];
R[i + 355*t] = A[i + 355*t];
R[i + 356*t] = A[i + 356*t];
R[i + 357*t] = A[i + 357*t];
R[i + 358*t] = A[i + 358*t];
R[i + 359*t] = A[i + 359*t];
R[i + 360*t] = A[i + 360*t];
R[i + 361*t] = A[i + 361*t];
R[i + 362*t] = A[i + 362*t];
R[i + 363*t] = A[i + 363*t];
R[i + 364*t] = A[i + 364*t];
R[i + 365*t] = A[i + 365*t];
R[i + 366*t] = A[i + 366*t];
R[i + 367*t] = A[i + 367*t];
R[i + 368*t] = A[i + 368*t];
R[i + 369*t] = A[i + 369*t];
R[i + 370*t] = A[i + 370*t];
R[i + 371*t] = A[i + 371*t];
R[i + 372*t] = A[i + 372*t];
R[i + 373*t] = A[i + 373*t];
R[i + 374*t] = A[i + 374*t];
R[i + 375*t] = A[i + 375*t];
R[i + 376*t] = A[i + 376*t];
R[i + 377*t] = A[i + 377*t];
R[i + 378*t] = A[i + 378*t];
R[i + 379*t] = A[i + 379*t];
R[i + 380*t] = A[i + 380*t];
R[i + 381*t] = A[i + 381*t];
R[i + 382*t] = A[i + 382*t];
R[i + 383*t] = A[i + 383*t];
R[i + 384*t] = A[i + 384*t];
R[i + 385*t] = A[i + 385*t];
R[i + 386*t] = A[i + 386*t];
R[i + 387*t] = A[i + 387*t];
R[i + 388*t] = A[i + 388*t];
R[i + 389*t] = A[i + 389*t];
R[i + 390*t] = A[i + 390*t];
R[i + 391*t] = A[i + 391*t];
R[i + 392*t] = A[i + 392*t];
R[i + 393*t] = A[i + 393*t];
R[i + 394*t] = A[i + 394*t];
R[i + 395*t] = A[i + 395*t];
R[i + 396*t] = A[i + 396*t];
R[i + 397*t] = A[i + 397*t];
R[i + 398*t] = A[i + 398*t];
R[i + 399*t] = A[i + 399*t];
R[i + 400*t] = A[i + 400*t];
R[i + 401*t] = A[i + 401*t];
R[i + 402*t] = A[i + 402*t];
R[i + 403*t] = A[i + 403*t];
R[i + 404*t] = A[i + 404*t];
R[i + 405*t] = A[i + 405*t];
R[i + 406*t] = A[i + 406*t];
R[i + 407*t] = A[i + 407*t];
R[i + 408*t] = A[i + 408*t];
R[i + 409*t] = A[i + 409*t];
R[i + 410*t] = A[i + 410*t];
R[i + 411*t] = A[i + 411*t];
R[i + 412*t] = A[i + 412*t];
R[i + 413*t] = A[i + 413*t];
R[i + 414*t] = A[i + 414*t];
R[i + 415*t] = A[i + 415*t];
R[i + 416*t] = A[i + 416*t];
R[i + 417*t] = A[i + 417*t];
R[i + 418*t] = A[i + 418*t];
R[i + 419*t] = A[i + 419*t];
R[i + 420*t] = A[i + 420*t];
R[i + 421*t] = A[i + 421*t];
R[i + 422*t] = A[i + 422*t];
R[i + 423*t] = A[i + 423*t];
R[i + 424*t] = A[i + 424*t];
R[i + 425*t] = A[i + 425*t];
R[i + 426*t] = A[i + 426*t];
R[i + 427*t] = A[i + 427*t];
R[i + 428*t] = A[i + 428*t];
R[i + 429*t] = A[i + 429*t];
R[i + 430*t] = A[i + 430*t];
R[i + 431*t] = A[i + 431*t];
R[i + 432*t] = A[i + 432*t];
R[i + 433*t] = A[i + 433*t];
R[i + 434*t] = A[i + 434*t];
R[i + 435*t] = A[i + 435*t];
R[i + 436*t] = A[i + 436*t];
R[i + 437*t] = A[i + 437*t];
R[i + 438*t] = A[i + 438*t];
R[i + 439*t] = A[i + 439*t];
R[i + 440*t] = A[i + 440*t];
R[i + 441*t] = A[i + 441*t];
R[i + 442*t] = A[i + 442*t];
R[i + 443*t] = A[i + 443*t];
R[i + 444*t] = A[i + 444*t];
R[i + 445*t] = A[i + 445*t];
R[i + 446*t] = A[i + 446*t];
R[i + 447*t] = A[i + 447*t];
R[i + 448*t] = A[i + 448*t];
R[i + 449*t] = A[i + 449*t];
R[i + 450*t] = A[i + 450*t];
R[i + 451*t] = A[i + 451*t];
R[i + 452*t] = A[i + 452*t];
R[i + 453*t] = A[i + 453*t];
R[i + 454*t] = A[i + 454*t];
R[i + 455*t] = A[i + 455*t];
R[i + 456*t] = A[i + 456*t];
R[i + 457*t] = A[i + 457*t];
R[i + 458*t] = A[i + 458*t];
R[i + 459*t] = A[i + 459*t];
R[i + 460*t] = A[i + 460*t];
R[i + 461*t] = A[i + 461*t];
R[i + 462*t] = A[i + 462*t];
R[i + 463*t] = A[i + 463*t];
R[i + 464*t] = A[i + 464*t];
R[i + 465*t] = A[i + 465*t];
R[i + 466*t] = A[i + 466*t];
R[i + 467*t] = A[i + 467*t];
R[i + 468*t] = A[i + 468*t];
R[i + 469*t] = A[i + 469*t];
R[i + 470*t] = A[i + 470*t];
R[i + 471*t] = A[i + 471*t];
R[i + 472*t] = A[i + 472*t];
R[i + 473*t] = A[i + 473*t];
R[i + 474*t] = A[i + 474*t];
R[i + 475*t] = A[i + 475*t];
R[i + 476*t] = A[i + 476*t];
R[i + 477*t] = A[i + 477*t];
R[i + 478*t] = A[i + 478*t];
R[i + 479*t] = A[i + 479*t];
R[i + 480*t] = A[i + 480*t];
R[i + 481*t] = A[i + 481*t];
R[i + 482*t] = A[i + 482*t];
R[i + 483*t] = A[i + 483*t];
R[i + 484*t] = A[i + 484*t];
R[i + 485*t] = A[i + 485*t];
R[i + 486*t] = A[i + 486*t];
R[i + 487*t] = A[i + 487*t];
R[i + 488*t] = A[i + 488*t];
R[i + 489*t] = A[i + 489*t];
R[i + 490*t] = A[i + 490*t];
R[i + 491*t] = A[i + 491*t];
R[i + 492*t] = A[i + 492*t];
R[i + 493*t] = A[i + 493*t];
R[i + 494*t] = A[i + 494*t];
R[i + 495*t] = A[i + 495*t];
R[i + 496*t] = A[i + 496*t];
R[i + 497*t] = A[i + 497*t];
R[i + 498*t] = A[i + 498*t];
R[i + 499*t] = A[i + 499*t];
R[i + 500*t] = A[i + 500*t];
R[i + 501*t] = A[i + 501*t];
R[i + 502*t] = A[i + 502*t];
R[i + 503*t] = A[i + 503*t];
R[i + 504*t] = A[i + 504*t];
R[i + 505*t] = A[i + 505*t];
R[i + 506*t] = A[i + 506*t];
R[i + 507*t] = A[i + 507*t];
R[i + 508*t] = A[i + 508*t];
R[i + 509*t] = A[i + 509*t];
R[i + 510*t] = A[i + 510*t];
R[i + 511*t] = A[i + 511*t];
R[i + 512*t] = A[i + 512*t];
R[i + 513*t] = A[i + 513*t];
R[i + 514*t] = A[i + 514*t];
R[i + 515*t] = A[i + 515*t];
R[i + 516*t] = A[i + 516*t];
R[i + 517*t] = A[i + 517*t];
R[i + 518*t] = A[i + 518*t];
R[i + 519*t] = A[i + 519*t];
R[i + 520*t] = A[i + 520*t];
R[i + 521*t] = A[i + 521*t];
R[i + 522*t] = A[i + 522*t];
R[i + 523*t] = A[i + 523*t];
R[i + 524*t] = A[i + 524*t];
R[i + 525*t] = A[i + 525*t];
R[i + 526*t] = A[i + 526*t];
R[i + 527*t] = A[i + 527*t];
R[i + 528*t] = A[i + 528*t];
R[i + 529*t] = A[i + 529*t];
R[i + 530*t] = A[i + 530*t];
R[i + 531*t] = A[i + 531*t];
R[i + 532*t] = A[i + 532*t];
R[i + 533*t] = A[i + 533*t];
R[i + 534*t] = A[i + 534*t];
R[i + 535*t] = A[i + 535*t];
R[i + 536*t] = A[i + 536*t];
R[i + 537*t] = A[i + 537*t];
R[i + 538*t] = A[i + 538*t];
R[i + 539*t] = A[i + 539*t];
R[i + 540*t] = A[i + 540*t];
R[i + 541*t] = A[i + 541*t];
R[i + 542*t] = A[i + 542*t];
R[i + 543*t] = A[i + 543*t];
R[i + 544*t] = A[i + 544*t];
R[i + 545*t] = A[i + 545*t];
R[i + 546*t] = A[i + 546*t];
R[i + 547*t] = A[i + 547*t];
R[i + 548*t] = A[i + 548*t];
R[i + 549*t] = A[i + 549*t];
R[i + 550*t] = A[i + 550*t];
R[i + 551*t] = A[i + 551*t];
R[i + 552*t] = A[i + 552*t];
R[i + 553*t] = A[i + 553*t];
R[i + 554*t] = A[i + 554*t];
R[i + 555*t] = A[i + 555*t];
R[i + 556*t] = A[i + 556*t];
R[i + 557*t] = A[i + 557*t];
R[i + 558*t] = A[i + 558*t];
R[i + 559*t] = A[i + 559*t];
R[i + 560*t] = A[i + 560*t];
R[i + 561*t] = A[i + 561*t];
R[i + 562*t] = A[i + 562*t];
R[i + 563*t] = A[i + 563*t];
R[i + 564*t] = A[i + 564*t];
R[i + 565*t] = A[i + 565*t];
R[i + 566*t] = A[i + 566*t];
R[i + 567*t] = A[i + 567*t];
R[i + 568*t] = A[i + 568*t];
R[i + 569*t] = A[i + 569*t];
R[i + 570*t] = A[i + 570*t];
R[i + 571*t] = A[i + 571*t];
R[i + 572*t] = A[i + 572*t];
R[i + 573*t] = A[i + 573*t];
R[i + 574*t] = A[i + 574*t];
R[i + 575*t] = A[i + 575*t];
R[i + 576*t] = A[i + 576*t];
R[i + 577*t] = A[i + 577*t];
R[i + 578*t] = A[i + 578*t];
R[i + 579*t] = A[i + 579*t];
R[i + 580*t] = A[i + 580*t];
R[i + 581*t] = A[i + 581*t];
R[i + 582*t] = A[i + 582*t];
R[i + 583*t] = A[i + 583*t];
R[i + 584*t] = A[i + 584*t];
R[i + 585*t] = A[i + 585*t];
R[i + 586*t] = A[i + 586*t];
R[i + 587*t] = A[i + 587*t];
R[i + 588*t] = A[i + 588*t];
R[i + 589*t] = A[i + 589*t];
R[i + 590*t] = A[i + 590*t];
R[i + 591*t] = A[i + 591*t];
R[i + 592*t] = A[i + 592*t];
R[i + 593*t] = A[i + 593*t];
R[i + 594*t] = A[i + 594*t];
R[i + 595*t] = A[i + 595*t];
R[i + 596*t] = A[i + 596*t];
R[i + 597*t] = A[i + 597*t];
R[i + 598*t] = A[i + 598*t];
R[i + 599*t] = A[i + 599*t];
R[i + 600*t] = A[i + 600*t];
R[i + 601*t] = A[i + 601*t];
R[i + 602*t] = A[i + 602*t];
R[i + 603*t] = A[i + 603*t];
R[i + 604*t] = A[i + 604*t];
R[i + 605*t] = A[i + 605*t];
R[i + 606*t] = A[i + 606*t];
R[i + 607*t] = A[i + 607*t];
R[i + 608*t] = A[i + 608*t];
R[i + 609*t] = A[i + 609*t];
R[i + 610*t] = A[i + 610*t];
R[i + 611*t] = A[i + 611*t];
R[i + 612*t] = A[i + 612*t];
R[i + 613*t] = A[i + 613*t];
R[i + 614*t] = A[i + 614*t];
R[i + 615*t] = A[i + 615*t];
R[i + 616*t] = A[i + 616*t];
R[i + 617*t] = A[i + 617*t];
R[i + 618*t] = A[i + 618*t];
R[i + 619*t] = A[i + 619*t];
R[i + 620*t] = A[i + 620*t];
R[i + 621*t] = A[i + 621*t];
R[i + 622*t] = A[i + 622*t];
R[i + 623*t] = A[i + 623*t];
R[i + 624*t] = A[i + 624*t];
R[i + 625*t] = A[i + 625*t];
R[i + 626*t] = A[i + 626*t];
R[i + 627*t] = A[i + 627*t];
R[i + 628*t] = A[i + 628*t];
R[i + 629*t] = A[i + 629*t];
R[i + 630*t] = A[i + 630*t];
R[i + 631*t] = A[i + 631*t];
R[i + 632*t] = A[i + 632*t];
R[i + 633*t] = A[i + 633*t];
R[i + 634*t] = A[i + 634*t];
R[i + 635*t] = A[i + 635*t];
R[i + 636*t] = A[i + 636*t];
R[i + 637*t] = A[i + 637*t];
R[i + 638*t] = A[i + 638*t];
R[i + 639*t] = A[i + 639*t];
R[i + 640*t] = A[i + 640*t];
R[i + 641*t] = A[i + 641*t];
R[i + 642*t] = A[i + 642*t];
R[i + 643*t] = A[i + 643*t];
R[i + 644*t] = A[i + 644*t];
R[i + 645*t] = A[i + 645*t];
R[i + 646*t] = A[i + 646*t];
R[i + 647*t] = A[i + 647*t];
R[i + 648*t] = A[i + 648*t];
R[i + 649*t] = A[i + 649*t];
R[i + 650*t] = A[i + 650*t];
R[i + 651*t] = A[i + 651*t];
R[i + 652*t] = A[i + 652*t];
R[i + 653*t] = A[i + 653*t];
R[i + 654*t] = A[i + 654*t];
R[i + 655*t] = A[i + 655*t];
R[i + 656*t] = A[i + 656*t];
R[i + 657*t] = A[i + 657*t];
R[i + 658*t] = A[i + 658*t];
R[i + 659*t] = A[i + 659*t];
R[i + 660*t] = A[i + 660*t];
R[i + 661*t] = A[i + 661*t];
R[i + 662*t] = A[i + 662*t];
R[i + 663*t] = A[i + 663*t];
R[i + 664*t] = A[i + 664*t];
R[i + 665*t] = A[i + 665*t];
R[i + 666*t] = A[i + 666*t];
R[i + 667*t] = A[i + 667*t];
R[i + 668*t] = A[i + 668*t];
R[i + 669*t] = A[i + 669*t];
R[i + 670*t] = A[i + 670*t];
R[i + 671*t] = A[i + 671*t];
R[i + 672*t] = A[i + 672*t];
R[i + 673*t] = A[i + 673*t];
R[i + 674*t] = A[i + 674*t];
R[i + 675*t] = A[i + 675*t];
R[i + 676*t] = A[i + 676*t];
R[i + 677*t] = A[i + 677*t];
R[i + 678*t] = A[i + 678*t];
R[i + 679*t] = A[i + 679*t];
R[i + 680*t] = A[i + 680*t];
R[i + 681*t] = A[i + 681*t];
R[i + 682*t] = A[i + 682*t];
R[i + 683*t] = A[i + 683*t];
R[i + 684*t] = A[i + 684*t];
R[i + 685*t] = A[i + 685*t];
R[i + 686*t] = A[i + 686*t];
R[i + 687*t] = A[i + 687*t];
R[i + 688*t] = A[i + 688*t];
R[i + 689*t] = A[i + 689*t];
R[i + 690*t] = A[i + 690*t];
R[i + 691*t] = A[i + 691*t];
R[i + 692*t] = A[i + 692*t];
R[i + 693*t] = A[i + 693*t];
R[i + 694*t] = A[i + 694*t];
R[i + 695*t] = A[i + 695*t];
R[i + 696*t] = A[i + 696*t];
R[i + 697*t] = A[i + 697*t];
R[i + 698*t] = A[i + 698*t];
R[i + 699*t] = A[i + 699*t];
R[i + 700*t] = A[i + 700*t];
R[i + 701*t] = A[i + 701*t];
R[i + 702*t] = A[i + 702*t];
R[i + 703*t] = A[i + 703*t];
R[i + 704*t] = A[i + 704*t];
R[i + 705*t] = A[i + 705*t];
R[i + 706*t] = A[i + 706*t];
R[i + 707*t] = A[i + 707*t];
R[i + 708*t] = A[i + 708*t];
R[i + 709*t] = A[i + 709*t];
R[i + 710*t] = A[i + 710*t];
R[i + 711*t] = A[i + 711*t];
R[i + 712*t] = A[i + 712*t];
R[i + 713*t] = A[i + 713*t];
R[i + 714*t] = A[i + 714*t];
R[i + 715*t] = A[i + 715*t];
R[i + 716*t] = A[i + 716*t];
R[i + 717*t] = A[i + 717*t];
R[i + 718*t] = A[i + 718*t];
R[i + 719*t] = A[i + 719*t];
R[i + 720*t] = A[i + 720*t];
R[i + 721*t] = A[i + 721*t];
R[i + 722*t] = A[i + 722*t];
R[i + 723*t] = A[i + 723*t];
R[i + 724*t] = A[i + 724*t];
R[i + 725*t] = A[i + 725*t];
R[i + 726*t] = A[i + 726*t];
R[i + 727*t] = A[i + 727*t];
R[i + 728*t] = A[i + 728*t];
R[i + 729*t] = A[i + 729*t];
R[i + 730*t] = A[i + 730*t];
R[i + 731*t] = A[i + 731*t];
R[i + 732*t] = A[i + 732*t];
R[i + 733*t] = A[i + 733*t];
R[i + 734*t] = A[i + 734*t];
R[i + 735*t] = A[i + 735*t];
R[i + 736*t] = A[i + 736*t];
R[i + 737*t] = A[i + 737*t];
R[i + 738*t] = A[i + 738*t];
R[i + 739*t] = A[i + 739*t];
R[i + 740*t] = A[i + 740*t];
R[i + 741*t] = A[i + 741*t];
R[i + 742*t] = A[i + 742*t];
R[i + 743*t] = A[i + 743*t];
R[i + 744*t] = A[i + 744*t];
R[i + 745*t] = A[i + 745*t];
R[i + 746*t] = A[i + 746*t];
R[i + 747*t] = A[i + 747*t];
R[i + 748*t] = A[i + 748*t];
R[i + 749*t] = A[i + 749*t];
R[i + 750*t] = A[i + 750*t];
R[i + 751*t] = A[i + 751*t];
R[i + 752*t] = A[i + 752*t];
R[i + 753*t] = A[i + 753*t];
R[i + 754*t] = A[i + 754*t];
R[i + 755*t] = A[i + 755*t];
R[i + 756*t] = A[i + 756*t];
R[i + 757*t] = A[i + 757*t];
R[i + 758*t] = A[i + 758*t];
R[i + 759*t] = A[i + 759*t];
R[i + 760*t] = A[i + 760*t];
R[i + 761*t] = A[i + 761*t];
R[i + 762*t] = A[i + 762*t];
R[i + 763*t] = A[i + 763*t];
R[i + 764*t] = A[i + 764*t];
R[i + 765*t] = A[i + 765*t];
R[i + 766*t] = A[i + 766*t];
R[i + 767*t] = A[i + 767*t];
R[i + 768*t] = A[i + 768*t];
R[i + 769*t] = A[i + 769*t];
R[i + 770*t] = A[i + 770*t];
R[i + 771*t] = A[i + 771*t];
R[i + 772*t] = A[i + 772*t];
R[i + 773*t] = A[i + 773*t];
R[i + 774*t] = A[i + 774*t];
R[i + 775*t] = A[i + 775*t];
R[i + 776*t] = A[i + 776*t];
R[i + 777*t] = A[i + 777*t];
R[i + 778*t] = A[i + 778*t];
R[i + 779*t] = A[i + 779*t];
R[i + 780*t] = A[i + 780*t];
R[i + 781*t] = A[i + 781*t];
R[i + 782*t] = A[i + 782*t];
R[i + 783*t] = A[i + 783*t];
R[i + 784*t] = A[i + 784*t];
R[i + 785*t] = A[i + 785*t];
R[i + 786*t] = A[i + 786*t];
R[i + 787*t] = A[i + 787*t];
R[i + 788*t] = A[i + 788*t];
R[i + 789*t] = A[i + 789*t];
R[i + 790*t] = A[i + 790*t];
R[i + 791*t] = A[i + 791*t];
R[i + 792*t] = A[i + 792*t];
R[i + 793*t] = A[i + 793*t];
R[i + 794*t] = A[i + 794*t];
R[i + 795*t] = A[i + 795*t];
R[i + 796*t] = A[i + 796*t];
R[i + 797*t] = A[i + 797*t];
R[i + 798*t] = A[i + 798*t];
R[i + 799*t] = A[i + 799*t];
R[i + 800*t] = A[i + 800*t];
R[i + 801*t] = A[i + 801*t];
R[i + 802*t] = A[i + 802*t];
R[i + 803*t] = A[i + 803*t];
R[i + 804*t] = A[i + 804*t];
R[i + 805*t] = A[i + 805*t];
R[i + 806*t] = A[i + 806*t];
R[i + 807*t] = A[i + 807*t];
R[i + 808*t] = A[i + 808*t];
R[i + 809*t] = A[i + 809*t];
R[i + 810*t] = A[i + 810*t];
R[i + 811*t] = A[i + 811*t];
R[i + 812*t] = A[i + 812*t];
R[i + 813*t] = A[i + 813*t];
R[i + 814*t] = A[i + 814*t];
R[i + 815*t] = A[i + 815*t];
R[i + 816*t] = A[i + 816*t];
R[i + 817*t] = A[i + 817*t];
R[i + 818*t] = A[i + 818*t];
R[i + 819*t] = A[i + 819*t];
R[i + 820*t] = A[i + 820*t];
R[i + 821*t] = A[i + 821*t];
R[i + 822*t] = A[i + 822*t];
R[i + 823*t] = A[i + 823*t];
R[i + 824*t] = A[i + 824*t];
R[i + 825*t] = A[i + 825*t];
R[i + 826*t] = A[i + 826*t];
R[i + 827*t] = A[i + 827*t];
R[i + 828*t] = A[i + 828*t];
R[i + 829*t] = A[i + 829*t];
R[i + 830*t] = A[i + 830*t];
R[i + 831*t] = A[i + 831*t];
R[i + 832*t] = A[i + 832*t];
R[i + 833*t] = A[i + 833*t];
R[i + 834*t] = A[i + 834*t];
R[i + 835*t] = A[i + 835*t];
R[i + 836*t] = A[i + 836*t];
R[i + 837*t] = A[i + 837*t];
R[i + 838*t] = A[i + 838*t];
R[i + 839*t] = A[i + 839*t];
R[i + 840*t] = A[i + 840*t];
R[i + 841*t] = A[i + 841*t];
R[i + 842*t] = A[i + 842*t];
R[i + 843*t] = A[i + 843*t];
R[i + 844*t] = A[i + 844*t];
R[i + 845*t] = A[i + 845*t];
R[i + 846*t] = A[i + 846*t];
R[i + 847*t] = A[i + 847*t];
R[i + 848*t] = A[i + 848*t];
R[i + 849*t] = A[i + 849*t];
R[i + 850*t] = A[i + 850*t];
R[i + 851*t] = A[i + 851*t];
R[i + 852*t] = A[i + 852*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 853*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 854*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 855*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 856*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 857*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 858*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 859*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 860*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 861*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 862*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 863*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 864*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 865*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 866*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 867*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 868*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 869*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 870*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 871*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 872*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 873*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 874*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 875*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 876*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 877*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 878*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 879*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 880*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 881*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 882*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 883*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 884*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 885*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 886*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 887*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 888*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 889*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 890*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 891*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 892*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 893*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 894*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 895*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 896*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 897*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 898*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 899*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 900*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 901*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 902*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 903*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 904*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 905*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 906*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 907*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 908*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 909*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 910*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 911*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 912*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 913*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 914*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 915*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 916*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 917*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 918*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 919*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 920*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 921*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 922*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 923*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 924*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 925*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 926*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 927*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 928*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 929*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 930*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 931*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
R[i + 932*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 933*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 934*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 935*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
R[i + 936*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 937*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 938*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 939*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
R[i + 940*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 941*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 942*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 943*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
R[i + 944*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 945*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 946*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
R[i + 947*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 948*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
R[i + 949*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
R[i + 950*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
R[i + 951*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
R[i + 952*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
R[i + 953*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
R[i + 954*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
R[i + 955*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
R[i + 956*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
R[i + 957*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
R[i + 958*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
R[i + 959*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
R[i + 960*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
R[i + 961*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
R[i + 962*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
R[i + 963*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
R[i + 964*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
R[i + 965*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
R[i + 966*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]];
R[i + 967*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]];
R[i + 968*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]];
R[i + 969*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]];
R[i + 970*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]];
R[i + 971*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]];
R[i + 972*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]];
R[i + 973*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]];
R[i + 974*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]];
R[i + 975*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]];
R[i + 976*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]];
R[i + 977*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]];
R[i + 978*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]];
R[i + 979*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]];
R[i + 980*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]];
R[i + 981*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]];
R[i + 982*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]];
R[i + 983*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]];
R[i + 984*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]];
R[i + 985*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]];
R[i + 986*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]];
R[i + 987*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]];
R[i + 988*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]];
R[i + 989*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]];
R[i + 990*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]];
R[i + 991*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]];
R[i + 992*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]];
R[i + 993*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]];
R[i + 994*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]];
R[i + 995*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]];
R[i + 996*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]];
R[i + 997*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]];
R[i + 998*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]];
R[i + 999*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]];
R[i + 1000*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]];
R[i + 1001*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]];
R[i + 1002*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]];
R[i + 1003*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]];
R[i + 1004*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]];
R[i + 1005*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]];
R[i + 1006*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]];
R[i + 1007*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]];
R[i + 1008*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]];
R[i + 1009*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]];
R[i + 1010*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]];
R[i + 1011*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]];
R[i + 1012*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]];
R[i + 1013*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]];
R[i + 1014*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]];
R[i + 1015*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]];
R[i + 1016*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]];
R[i + 1017*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]];
R[i + 1018*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]];
R[i + 1019*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]];
R[i + 1020*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]];
R[i + 1021*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]];
R[i + 1022*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]];
R[i + 1023*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]];
R[i + 1024*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]];
R[i + 1025*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]];
R[i + 1026*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]];
R[i + 1027*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]];
R[i + 1028*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]];
R[i + 1029*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]];
R[i + 1030*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]];
R[i + 1031*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]];
R[i + 1032*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]];
R[i + 1033*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]];
R[i + 1034*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]];
R[i + 1035*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]];
R[i + 1036*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]];
R[i + 1037*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]];
R[i + 1038*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]];
R[i + 1039*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]];
R[i + 1040*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]];
R[i + 1041*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]];
R[i + 1042*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]];
R[i + 1043*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]];
R[i + 1044*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]];
R[i + 1045*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]];
R[i + 1046*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]];
R[i + 1047*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]];
R[i + 1048*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]];
R[i + 1049*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]];
R[i + 1050*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]];
R[i + 1051*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]];
R[i + 1052*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]];
R[i + 1053*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]];
R[i + 1054*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]];
R[i + 1055*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]];
R[i + 1056*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]];
R[i + 1057*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]];
R[i + 1058*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]];
R[i + 1059*t] = Op[i + 206*t] ? R[B[i + 206*t]] * R[C[i + 206*t]] : R[B[i + 206*t]] + R[C[i + 206*t]];
R[i + 1060*t] = Op[i + 207*t] ? R[B[i + 207*t]] * R[C[i + 207*t]] : R[B[i + 207*t]] + R[C[i + 207*t]];
R[i + 1061*t] = Op[i + 208*t] ? R[B[i + 208*t]] * R[C[i + 208*t]] : R[B[i + 208*t]] + R[C[i + 208*t]];
R[i + 1062*t] = Op[i + 209*t] ? R[B[i + 209*t]] * R[C[i + 209*t]] : R[B[i + 209*t]] + R[C[i + 209*t]];
R[i + 1063*t] = Op[i + 210*t] ? R[B[i + 210*t]] * R[C[i + 210*t]] : R[B[i + 210*t]] + R[C[i + 210*t]];
R[i + 1064*t] = Op[i + 211*t] ? R[B[i + 211*t]] * R[C[i + 211*t]] : R[B[i + 211*t]] + R[C[i + 211*t]];
R[i + 1065*t] = Op[i + 212*t] ? R[B[i + 212*t]] * R[C[i + 212*t]] : R[B[i + 212*t]] + R[C[i + 212*t]];
R[i + 1066*t] = Op[i + 213*t] ? R[B[i + 213*t]] * R[C[i + 213*t]] : R[B[i + 213*t]] + R[C[i + 213*t]];
R[i + 1067*t] = Op[i + 214*t] ? R[B[i + 214*t]] * R[C[i + 214*t]] : R[B[i + 214*t]] + R[C[i + 214*t]];
R[i + 1068*t] = Op[i + 215*t] ? R[B[i + 215*t]] * R[C[i + 215*t]] : R[B[i + 215*t]] + R[C[i + 215*t]];
R[i + 1069*t] = Op[i + 216*t] ? R[B[i + 216*t]] * R[C[i + 216*t]] : R[B[i + 216*t]] + R[C[i + 216*t]];
R[i + 1070*t] = Op[i + 217*t] ? R[B[i + 217*t]] * R[C[i + 217*t]] : R[B[i + 217*t]] + R[C[i + 217*t]];
R[i + 1071*t] = Op[i + 218*t] ? R[B[i + 218*t]] * R[C[i + 218*t]] : R[B[i + 218*t]] + R[C[i + 218*t]];
R[i + 1072*t] = Op[i + 219*t] ? R[B[i + 219*t]] * R[C[i + 219*t]] : R[B[i + 219*t]] + R[C[i + 219*t]];
R[i + 1073*t] = Op[i + 220*t] ? R[B[i + 220*t]] * R[C[i + 220*t]] : R[B[i + 220*t]] + R[C[i + 220*t]];
R[i + 1074*t] = Op[i + 221*t] ? R[B[i + 221*t]] * R[C[i + 221*t]] : R[B[i + 221*t]] + R[C[i + 221*t]];
R[i + 1075*t] = Op[i + 222*t] ? R[B[i + 222*t]] * R[C[i + 222*t]] : R[B[i + 222*t]] + R[C[i + 222*t]];
R[i + 1076*t] = Op[i + 223*t] ? R[B[i + 223*t]] * R[C[i + 223*t]] : R[B[i + 223*t]] + R[C[i + 223*t]];
R[i + 1077*t] = Op[i + 224*t] ? R[B[i + 224*t]] * R[C[i + 224*t]] : R[B[i + 224*t]] + R[C[i + 224*t]];
R[i + 1078*t] = Op[i + 225*t] ? R[B[i + 225*t]] * R[C[i + 225*t]] : R[B[i + 225*t]] + R[C[i + 225*t]];
R[i + 1079*t] = Op[i + 226*t] ? R[B[i + 226*t]] * R[C[i + 226*t]] : R[B[i + 226*t]] + R[C[i + 226*t]];
R[i + 1080*t] = Op[i + 227*t] ? R[B[i + 227*t]] * R[C[i + 227*t]] : R[B[i + 227*t]] + R[C[i + 227*t]];
R[i + 1081*t] = Op[i + 228*t] ? R[B[i + 228*t]] * R[C[i + 228*t]] : R[B[i + 228*t]] + R[C[i + 228*t]];
R[i + 1082*t] = Op[i + 229*t] ? R[B[i + 229*t]] * R[C[i + 229*t]] : R[B[i + 229*t]] + R[C[i + 229*t]];
R[i + 1083*t] = Op[i + 230*t] ? R[B[i + 230*t]] * R[C[i + 230*t]] : R[B[i + 230*t]] + R[C[i + 230*t]];
R[i + 1084*t] = Op[i + 231*t] ? R[B[i + 231*t]] * R[C[i + 231*t]] : R[B[i + 231*t]] + R[C[i + 231*t]];
R[i + 1085*t] = Op[i + 232*t] ? R[B[i + 232*t]] * R[C[i + 232*t]] : R[B[i + 232*t]] + R[C[i + 232*t]];
R[i + 1086*t] = Op[i + 233*t] ? R[B[i + 233*t]] * R[C[i + 233*t]] : R[B[i + 233*t]] + R[C[i + 233*t]];
R[i + 1087*t] = Op[i + 234*t] ? R[B[i + 234*t]] * R[C[i + 234*t]] : R[B[i + 234*t]] + R[C[i + 234*t]];
R[i + 1088*t] = Op[i + 235*t] ? R[B[i + 235*t]] * R[C[i + 235*t]] : R[B[i + 235*t]] + R[C[i + 235*t]];
R[i + 1089*t] = Op[i + 236*t] ? R[B[i + 236*t]] * R[C[i + 236*t]] : R[B[i + 236*t]] + R[C[i + 236*t]];
R[i + 1090*t] = Op[i + 237*t] ? R[B[i + 237*t]] * R[C[i + 237*t]] : R[B[i + 237*t]] + R[C[i + 237*t]];
R[i + 1091*t] = Op[i + 238*t] ? R[B[i + 238*t]] * R[C[i + 238*t]] : R[B[i + 238*t]] + R[C[i + 238*t]];
R[i + 1092*t] = Op[i + 239*t] ? R[B[i + 239*t]] * R[C[i + 239*t]] : R[B[i + 239*t]] + R[C[i + 239*t]];
__syncthreads();
R[i + 1093*t] = Op[i + 240*t] ? R[B[i + 240*t]] * R[C[i + 240*t]] : R[B[i + 240*t]] + R[C[i + 240*t]];
R[i + 1094*t] = Op[i + 241*t] ? R[B[i + 241*t]] * R[C[i + 241*t]] : R[B[i + 241*t]] + R[C[i + 241*t]];
R[i + 1095*t] = Op[i + 242*t] ? R[B[i + 242*t]] * R[C[i + 242*t]] : R[B[i + 242*t]] + R[C[i + 242*t]];
R[i + 1096*t] = Op[i + 243*t] ? R[B[i + 243*t]] * R[C[i + 243*t]] : R[B[i + 243*t]] + R[C[i + 243*t]];
R[i + 1097*t] = Op[i + 244*t] ? R[B[i + 244*t]] * R[C[i + 244*t]] : R[B[i + 244*t]] + R[C[i + 244*t]];
R[i + 1098*t] = Op[i + 245*t] ? R[B[i + 245*t]] * R[C[i + 245*t]] : R[B[i + 245*t]] + R[C[i + 245*t]];
R[i + 1099*t] = Op[i + 246*t] ? R[B[i + 246*t]] * R[C[i + 246*t]] : R[B[i + 246*t]] + R[C[i + 246*t]];
R[i + 1100*t] = Op[i + 247*t] ? R[B[i + 247*t]] * R[C[i + 247*t]] : R[B[i + 247*t]] + R[C[i + 247*t]];
R[i + 1101*t] = Op[i + 248*t] ? R[B[i + 248*t]] * R[C[i + 248*t]] : R[B[i + 248*t]] + R[C[i + 248*t]];
R[i + 1102*t] = Op[i + 249*t] ? R[B[i + 249*t]] * R[C[i + 249*t]] : R[B[i + 249*t]] + R[C[i + 249*t]];
R[i + 1103*t] = Op[i + 250*t] ? R[B[i + 250*t]] * R[C[i + 250*t]] : R[B[i + 250*t]] + R[C[i + 250*t]];
R[i + 1104*t] = Op[i + 251*t] ? R[B[i + 251*t]] * R[C[i + 251*t]] : R[B[i + 251*t]] + R[C[i + 251*t]];
R[i + 1105*t] = Op[i + 252*t] ? R[B[i + 252*t]] * R[C[i + 252*t]] : R[B[i + 252*t]] + R[C[i + 252*t]];
R[i + 1106*t] = Op[i + 253*t] ? R[B[i + 253*t]] * R[C[i + 253*t]] : R[B[i + 253*t]] + R[C[i + 253*t]];
R[i + 1107*t] = Op[i + 254*t] ? R[B[i + 254*t]] * R[C[i + 254*t]] : R[B[i + 254*t]] + R[C[i + 254*t]];
R[i + 1108*t] = Op[i + 255*t] ? R[B[i + 255*t]] * R[C[i + 255*t]] : R[B[i + 255*t]] + R[C[i + 255*t]];
R[i + 1109*t] = Op[i + 256*t] ? R[B[i + 256*t]] * R[C[i + 256*t]] : R[B[i + 256*t]] + R[C[i + 256*t]];
R[i + 1110*t] = Op[i + 257*t] ? R[B[i + 257*t]] * R[C[i + 257*t]] : R[B[i + 257*t]] + R[C[i + 257*t]];
R[i + 1111*t] = Op[i + 258*t] ? R[B[i + 258*t]] * R[C[i + 258*t]] : R[B[i + 258*t]] + R[C[i + 258*t]];
R[i + 1112*t] = Op[i + 259*t] ? R[B[i + 259*t]] * R[C[i + 259*t]] : R[B[i + 259*t]] + R[C[i + 259*t]];
R[i + 1113*t] = Op[i + 260*t] ? R[B[i + 260*t]] * R[C[i + 260*t]] : R[B[i + 260*t]] + R[C[i + 260*t]];
R[i + 1114*t] = Op[i + 261*t] ? R[B[i + 261*t]] * R[C[i + 261*t]] : R[B[i + 261*t]] + R[C[i + 261*t]];
R[i + 1115*t] = Op[i + 262*t] ? R[B[i + 262*t]] * R[C[i + 262*t]] : R[B[i + 262*t]] + R[C[i + 262*t]];
R[i + 1116*t] = Op[i + 263*t] ? R[B[i + 263*t]] * R[C[i + 263*t]] : R[B[i + 263*t]] + R[C[i + 263*t]];
R[i + 1117*t] = Op[i + 264*t] ? R[B[i + 264*t]] * R[C[i + 264*t]] : R[B[i + 264*t]] + R[C[i + 264*t]];
R[i + 1118*t] = Op[i + 265*t] ? R[B[i + 265*t]] * R[C[i + 265*t]] : R[B[i + 265*t]] + R[C[i + 265*t]];
R[i + 1119*t] = Op[i + 266*t] ? R[B[i + 266*t]] * R[C[i + 266*t]] : R[B[i + 266*t]] + R[C[i + 266*t]];
R[i + 1120*t] = Op[i + 267*t] ? R[B[i + 267*t]] * R[C[i + 267*t]] : R[B[i + 267*t]] + R[C[i + 267*t]];
R[i + 1121*t] = Op[i + 268*t] ? R[B[i + 268*t]] * R[C[i + 268*t]] : R[B[i + 268*t]] + R[C[i + 268*t]];
R[i + 1122*t] = Op[i + 269*t] ? R[B[i + 269*t]] * R[C[i + 269*t]] : R[B[i + 269*t]] + R[C[i + 269*t]];
R[i + 1123*t] = Op[i + 270*t] ? R[B[i + 270*t]] * R[C[i + 270*t]] : R[B[i + 270*t]] + R[C[i + 270*t]];
R[i + 1124*t] = Op[i + 271*t] ? R[B[i + 271*t]] * R[C[i + 271*t]] : R[B[i + 271*t]] + R[C[i + 271*t]];
R[i + 1125*t] = Op[i + 272*t] ? R[B[i + 272*t]] * R[C[i + 272*t]] : R[B[i + 272*t]] + R[C[i + 272*t]];
R[i + 1126*t] = Op[i + 273*t] ? R[B[i + 273*t]] * R[C[i + 273*t]] : R[B[i + 273*t]] + R[C[i + 273*t]];
R[i + 1127*t] = Op[i + 274*t] ? R[B[i + 274*t]] * R[C[i + 274*t]] : R[B[i + 274*t]] + R[C[i + 274*t]];
R[i + 1128*t] = Op[i + 275*t] ? R[B[i + 275*t]] * R[C[i + 275*t]] : R[B[i + 275*t]] + R[C[i + 275*t]];
R[i + 1129*t] = Op[i + 276*t] ? R[B[i + 276*t]] * R[C[i + 276*t]] : R[B[i + 276*t]] + R[C[i + 276*t]];
R[i + 1130*t] = Op[i + 277*t] ? R[B[i + 277*t]] * R[C[i + 277*t]] : R[B[i + 277*t]] + R[C[i + 277*t]];
R[i + 1131*t] = Op[i + 278*t] ? R[B[i + 278*t]] * R[C[i + 278*t]] : R[B[i + 278*t]] + R[C[i + 278*t]];
R[i + 1132*t] = Op[i + 279*t] ? R[B[i + 279*t]] * R[C[i + 279*t]] : R[B[i + 279*t]] + R[C[i + 279*t]];
R[i + 1133*t] = Op[i + 280*t] ? R[B[i + 280*t]] * R[C[i + 280*t]] : R[B[i + 280*t]] + R[C[i + 280*t]];
R[i + 1134*t] = Op[i + 281*t] ? R[B[i + 281*t]] * R[C[i + 281*t]] : R[B[i + 281*t]] + R[C[i + 281*t]];
R[i + 1135*t] = Op[i + 282*t] ? R[B[i + 282*t]] * R[C[i + 282*t]] : R[B[i + 282*t]] + R[C[i + 282*t]];
R[i + 1136*t] = Op[i + 283*t] ? R[B[i + 283*t]] * R[C[i + 283*t]] : R[B[i + 283*t]] + R[C[i + 283*t]];
R[i + 1137*t] = Op[i + 284*t] ? R[B[i + 284*t]] * R[C[i + 284*t]] : R[B[i + 284*t]] + R[C[i + 284*t]];
R[i + 1138*t] = Op[i + 285*t] ? R[B[i + 285*t]] * R[C[i + 285*t]] : R[B[i + 285*t]] + R[C[i + 285*t]];
R[i + 1139*t] = Op[i + 286*t] ? R[B[i + 286*t]] * R[C[i + 286*t]] : R[B[i + 286*t]] + R[C[i + 286*t]];
R[i + 1140*t] = Op[i + 287*t] ? R[B[i + 287*t]] * R[C[i + 287*t]] : R[B[i + 287*t]] + R[C[i + 287*t]];
R[i + 1141*t] = Op[i + 288*t] ? R[B[i + 288*t]] * R[C[i + 288*t]] : R[B[i + 288*t]] + R[C[i + 288*t]];
R[i + 1142*t] = Op[i + 289*t] ? R[B[i + 289*t]] * R[C[i + 289*t]] : R[B[i + 289*t]] + R[C[i + 289*t]];
R[i + 1143*t] = Op[i + 290*t] ? R[B[i + 290*t]] * R[C[i + 290*t]] : R[B[i + 290*t]] + R[C[i + 290*t]];
R[i + 1144*t] = Op[i + 291*t] ? R[B[i + 291*t]] * R[C[i + 291*t]] : R[B[i + 291*t]] + R[C[i + 291*t]];
R[i + 1145*t] = Op[i + 292*t] ? R[B[i + 292*t]] * R[C[i + 292*t]] : R[B[i + 292*t]] + R[C[i + 292*t]];
R[i + 1146*t] = Op[i + 293*t] ? R[B[i + 293*t]] * R[C[i + 293*t]] : R[B[i + 293*t]] + R[C[i + 293*t]];
R[i + 1147*t] = Op[i + 294*t] ? R[B[i + 294*t]] * R[C[i + 294*t]] : R[B[i + 294*t]] + R[C[i + 294*t]];
R[i + 1148*t] = Op[i + 295*t] ? R[B[i + 295*t]] * R[C[i + 295*t]] : R[B[i + 295*t]] + R[C[i + 295*t]];
R[i + 1149*t] = Op[i + 296*t] ? R[B[i + 296*t]] * R[C[i + 296*t]] : R[B[i + 296*t]] + R[C[i + 296*t]];
R[i + 1150*t] = Op[i + 297*t] ? R[B[i + 297*t]] * R[C[i + 297*t]] : R[B[i + 297*t]] + R[C[i + 297*t]];
R[i + 1151*t] = Op[i + 298*t] ? R[B[i + 298*t]] * R[C[i + 298*t]] : R[B[i + 298*t]] + R[C[i + 298*t]];
R[i + 1152*t] = Op[i + 299*t] ? R[B[i + 299*t]] * R[C[i + 299*t]] : R[B[i + 299*t]] + R[C[i + 299*t]];
R[i + 1153*t] = Op[i + 300*t] ? R[B[i + 300*t]] * R[C[i + 300*t]] : R[B[i + 300*t]] + R[C[i + 300*t]];
R[i + 1154*t] = Op[i + 301*t] ? R[B[i + 301*t]] * R[C[i + 301*t]] : R[B[i + 301*t]] + R[C[i + 301*t]];
R[i + 1155*t] = Op[i + 302*t] ? R[B[i + 302*t]] * R[C[i + 302*t]] : R[B[i + 302*t]] + R[C[i + 302*t]];
R[i + 1156*t] = Op[i + 303*t] ? R[B[i + 303*t]] * R[C[i + 303*t]] : R[B[i + 303*t]] + R[C[i + 303*t]];
R[i + 1157*t] = Op[i + 304*t] ? R[B[i + 304*t]] * R[C[i + 304*t]] : R[B[i + 304*t]] + R[C[i + 304*t]];
R[i + 1158*t] = Op[i + 305*t] ? R[B[i + 305*t]] * R[C[i + 305*t]] : R[B[i + 305*t]] + R[C[i + 305*t]];
R[i + 1159*t] = Op[i + 306*t] ? R[B[i + 306*t]] * R[C[i + 306*t]] : R[B[i + 306*t]] + R[C[i + 306*t]];
R[i + 1160*t] = Op[i + 307*t] ? R[B[i + 307*t]] * R[C[i + 307*t]] : R[B[i + 307*t]] + R[C[i + 307*t]];
R[i + 1161*t] = Op[i + 308*t] ? R[B[i + 308*t]] * R[C[i + 308*t]] : R[B[i + 308*t]] + R[C[i + 308*t]];
R[i + 1162*t] = Op[i + 309*t] ? R[B[i + 309*t]] * R[C[i + 309*t]] : R[B[i + 309*t]] + R[C[i + 309*t]];
R[i + 1163*t] = Op[i + 310*t] ? R[B[i + 310*t]] * R[C[i + 310*t]] : R[B[i + 310*t]] + R[C[i + 310*t]];
R[i + 1164*t] = Op[i + 311*t] ? R[B[i + 311*t]] * R[C[i + 311*t]] : R[B[i + 311*t]] + R[C[i + 311*t]];
R[i + 1165*t] = Op[i + 312*t] ? R[B[i + 312*t]] * R[C[i + 312*t]] : R[B[i + 312*t]] + R[C[i + 312*t]];
R[i + 1166*t] = Op[i + 313*t] ? R[B[i + 313*t]] * R[C[i + 313*t]] : R[B[i + 313*t]] + R[C[i + 313*t]];
R[i + 1167*t] = Op[i + 314*t] ? R[B[i + 314*t]] * R[C[i + 314*t]] : R[B[i + 314*t]] + R[C[i + 314*t]];
R[i + 1168*t] = Op[i + 315*t] ? R[B[i + 315*t]] * R[C[i + 315*t]] : R[B[i + 315*t]] + R[C[i + 315*t]];
R[i + 1169*t] = Op[i + 316*t] ? R[B[i + 316*t]] * R[C[i + 316*t]] : R[B[i + 316*t]] + R[C[i + 316*t]];
R[i + 1170*t] = Op[i + 317*t] ? R[B[i + 317*t]] * R[C[i + 317*t]] : R[B[i + 317*t]] + R[C[i + 317*t]];
R[i + 1171*t] = Op[i + 318*t] ? R[B[i + 318*t]] * R[C[i + 318*t]] : R[B[i + 318*t]] + R[C[i + 318*t]];
R[i + 1172*t] = Op[i + 319*t] ? R[B[i + 319*t]] * R[C[i + 319*t]] : R[B[i + 319*t]] + R[C[i + 319*t]];
R[i + 1173*t] = Op[i + 320*t] ? R[B[i + 320*t]] * R[C[i + 320*t]] : R[B[i + 320*t]] + R[C[i + 320*t]];
R[i + 1174*t] = Op[i + 321*t] ? R[B[i + 321*t]] * R[C[i + 321*t]] : R[B[i + 321*t]] + R[C[i + 321*t]];
R[i + 1175*t] = Op[i + 322*t] ? R[B[i + 322*t]] * R[C[i + 322*t]] : R[B[i + 322*t]] + R[C[i + 322*t]];
R[i + 1176*t] = Op[i + 323*t] ? R[B[i + 323*t]] * R[C[i + 323*t]] : R[B[i + 323*t]] + R[C[i + 323*t]];
R[i + 1177*t] = Op[i + 324*t] ? R[B[i + 324*t]] * R[C[i + 324*t]] : R[B[i + 324*t]] + R[C[i + 324*t]];
R[i + 1178*t] = Op[i + 325*t] ? R[B[i + 325*t]] * R[C[i + 325*t]] : R[B[i + 325*t]] + R[C[i + 325*t]];
R[i + 1179*t] = Op[i + 326*t] ? R[B[i + 326*t]] * R[C[i + 326*t]] : R[B[i + 326*t]] + R[C[i + 326*t]];
R[i + 1180*t] = Op[i + 327*t] ? R[B[i + 327*t]] * R[C[i + 327*t]] : R[B[i + 327*t]] + R[C[i + 327*t]];
R[i + 1181*t] = Op[i + 328*t] ? R[B[i + 328*t]] * R[C[i + 328*t]] : R[B[i + 328*t]] + R[C[i + 328*t]];
R[i + 1182*t] = Op[i + 329*t] ? R[B[i + 329*t]] * R[C[i + 329*t]] : R[B[i + 329*t]] + R[C[i + 329*t]];
R[i + 1183*t] = Op[i + 330*t] ? R[B[i + 330*t]] * R[C[i + 330*t]] : R[B[i + 330*t]] + R[C[i + 330*t]];
R[i + 1184*t] = Op[i + 331*t] ? R[B[i + 331*t]] * R[C[i + 331*t]] : R[B[i + 331*t]] + R[C[i + 331*t]];
R[i + 1185*t] = Op[i + 332*t] ? R[B[i + 332*t]] * R[C[i + 332*t]] : R[B[i + 332*t]] + R[C[i + 332*t]];
R[i + 1186*t] = Op[i + 333*t] ? R[B[i + 333*t]] * R[C[i + 333*t]] : R[B[i + 333*t]] + R[C[i + 333*t]];
R[i + 1187*t] = Op[i + 334*t] ? R[B[i + 334*t]] * R[C[i + 334*t]] : R[B[i + 334*t]] + R[C[i + 334*t]];
R[i + 1188*t] = Op[i + 335*t] ? R[B[i + 335*t]] * R[C[i + 335*t]] : R[B[i + 335*t]] + R[C[i + 335*t]];
R[i + 1189*t] = Op[i + 336*t] ? R[B[i + 336*t]] * R[C[i + 336*t]] : R[B[i + 336*t]] + R[C[i + 336*t]];
R[i + 1190*t] = Op[i + 337*t] ? R[B[i + 337*t]] * R[C[i + 337*t]] : R[B[i + 337*t]] + R[C[i + 337*t]];
R[i + 1191*t] = Op[i + 338*t] ? R[B[i + 338*t]] * R[C[i + 338*t]] : R[B[i + 338*t]] + R[C[i + 338*t]];
R[i + 1192*t] = Op[i + 339*t] ? R[B[i + 339*t]] * R[C[i + 339*t]] : R[B[i + 339*t]] + R[C[i + 339*t]];
R[i + 1193*t] = Op[i + 340*t] ? R[B[i + 340*t]] * R[C[i + 340*t]] : R[B[i + 340*t]] + R[C[i + 340*t]];
R[i + 1194*t] = Op[i + 341*t] ? R[B[i + 341*t]] * R[C[i + 341*t]] : R[B[i + 341*t]] + R[C[i + 341*t]];
R[i + 1195*t] = Op[i + 342*t] ? R[B[i + 342*t]] * R[C[i + 342*t]] : R[B[i + 342*t]] + R[C[i + 342*t]];
R[i + 1196*t] = Op[i + 343*t] ? R[B[i + 343*t]] * R[C[i + 343*t]] : R[B[i + 343*t]] + R[C[i + 343*t]];
R[i + 1197*t] = Op[i + 344*t] ? R[B[i + 344*t]] * R[C[i + 344*t]] : R[B[i + 344*t]] + R[C[i + 344*t]];
R[i + 1198*t] = Op[i + 345*t] ? R[B[i + 345*t]] * R[C[i + 345*t]] : R[B[i + 345*t]] + R[C[i + 345*t]];
R[i + 1199*t] = Op[i + 346*t] ? R[B[i + 346*t]] * R[C[i + 346*t]] : R[B[i + 346*t]] + R[C[i + 346*t]];
R[i + 1200*t] = Op[i + 347*t] ? R[B[i + 347*t]] * R[C[i + 347*t]] : R[B[i + 347*t]] + R[C[i + 347*t]];
R[i + 1201*t] = Op[i + 348*t] ? R[B[i + 348*t]] * R[C[i + 348*t]] : R[B[i + 348*t]] + R[C[i + 348*t]];
R[i + 1202*t] = Op[i + 349*t] ? R[B[i + 349*t]] * R[C[i + 349*t]] : R[B[i + 349*t]] + R[C[i + 349*t]];
R[i + 1203*t] = Op[i + 350*t] ? R[B[i + 350*t]] * R[C[i + 350*t]] : R[B[i + 350*t]] + R[C[i + 350*t]];
R[i + 1204*t] = Op[i + 351*t] ? R[B[i + 351*t]] * R[C[i + 351*t]] : R[B[i + 351*t]] + R[C[i + 351*t]];
R[i + 1205*t] = Op[i + 352*t] ? R[B[i + 352*t]] * R[C[i + 352*t]] : R[B[i + 352*t]] + R[C[i + 352*t]];
R[i + 1206*t] = Op[i + 353*t] ? R[B[i + 353*t]] * R[C[i + 353*t]] : R[B[i + 353*t]] + R[C[i + 353*t]];
R[i + 1207*t] = Op[i + 354*t] ? R[B[i + 354*t]] * R[C[i + 354*t]] : R[B[i + 354*t]] + R[C[i + 354*t]];
R[i + 1208*t] = Op[i + 355*t] ? R[B[i + 355*t]] * R[C[i + 355*t]] : R[B[i + 355*t]] + R[C[i + 355*t]];
R[i + 1209*t] = Op[i + 356*t] ? R[B[i + 356*t]] * R[C[i + 356*t]] : R[B[i + 356*t]] + R[C[i + 356*t]];
R[i + 1210*t] = Op[i + 357*t] ? R[B[i + 357*t]] * R[C[i + 357*t]] : R[B[i + 357*t]] + R[C[i + 357*t]];
R[i + 1211*t] = Op[i + 358*t] ? R[B[i + 358*t]] * R[C[i + 358*t]] : R[B[i + 358*t]] + R[C[i + 358*t]];
R[i + 1212*t] = Op[i + 359*t] ? R[B[i + 359*t]] * R[C[i + 359*t]] : R[B[i + 359*t]] + R[C[i + 359*t]];
R[i + 1213*t] = Op[i + 360*t] ? R[B[i + 360*t]] * R[C[i + 360*t]] : R[B[i + 360*t]] + R[C[i + 360*t]];
R[i + 1214*t] = Op[i + 361*t] ? R[B[i + 361*t]] * R[C[i + 361*t]] : R[B[i + 361*t]] + R[C[i + 361*t]];
R[i + 1215*t] = Op[i + 362*t] ? R[B[i + 362*t]] * R[C[i + 362*t]] : R[B[i + 362*t]] + R[C[i + 362*t]];
R[i + 1216*t] = Op[i + 363*t] ? R[B[i + 363*t]] * R[C[i + 363*t]] : R[B[i + 363*t]] + R[C[i + 363*t]];
R[i + 1217*t] = Op[i + 364*t] ? R[B[i + 364*t]] * R[C[i + 364*t]] : R[B[i + 364*t]] + R[C[i + 364*t]];
R[i + 1218*t] = Op[i + 365*t] ? R[B[i + 365*t]] * R[C[i + 365*t]] : R[B[i + 365*t]] + R[C[i + 365*t]];
R[i + 1219*t] = Op[i + 366*t] ? R[B[i + 366*t]] * R[C[i + 366*t]] : R[B[i + 366*t]] + R[C[i + 366*t]];
R[i + 1220*t] = Op[i + 367*t] ? R[B[i + 367*t]] * R[C[i + 367*t]] : R[B[i + 367*t]] + R[C[i + 367*t]];
R[i + 1221*t] = Op[i + 368*t] ? R[B[i + 368*t]] * R[C[i + 368*t]] : R[B[i + 368*t]] + R[C[i + 368*t]];
R[i + 1222*t] = Op[i + 369*t] ? R[B[i + 369*t]] * R[C[i + 369*t]] : R[B[i + 369*t]] + R[C[i + 369*t]];
R[i + 1223*t] = Op[i + 370*t] ? R[B[i + 370*t]] * R[C[i + 370*t]] : R[B[i + 370*t]] + R[C[i + 370*t]];
R[i + 1224*t] = Op[i + 371*t] ? R[B[i + 371*t]] * R[C[i + 371*t]] : R[B[i + 371*t]] + R[C[i + 371*t]];
R[i + 1225*t] = Op[i + 372*t] ? R[B[i + 372*t]] * R[C[i + 372*t]] : R[B[i + 372*t]] + R[C[i + 372*t]];
R[i + 1226*t] = Op[i + 373*t] ? R[B[i + 373*t]] * R[C[i + 373*t]] : R[B[i + 373*t]] + R[C[i + 373*t]];
R[i + 1227*t] = Op[i + 374*t] ? R[B[i + 374*t]] * R[C[i + 374*t]] : R[B[i + 374*t]] + R[C[i + 374*t]];
R[i + 1228*t] = Op[i + 375*t] ? R[B[i + 375*t]] * R[C[i + 375*t]] : R[B[i + 375*t]] + R[C[i + 375*t]];
R[i + 1229*t] = Op[i + 376*t] ? R[B[i + 376*t]] * R[C[i + 376*t]] : R[B[i + 376*t]] + R[C[i + 376*t]];
R[i + 1230*t] = Op[i + 377*t] ? R[B[i + 377*t]] * R[C[i + 377*t]] : R[B[i + 377*t]] + R[C[i + 377*t]];
R[i + 1231*t] = Op[i + 378*t] ? R[B[i + 378*t]] * R[C[i + 378*t]] : R[B[i + 378*t]] + R[C[i + 378*t]];
R[i + 1232*t] = Op[i + 379*t] ? R[B[i + 379*t]] * R[C[i + 379*t]] : R[B[i + 379*t]] + R[C[i + 379*t]];
__syncthreads();
R[i + 1233*t] = Op[i + 380*t] ? R[B[i + 380*t]] * R[C[i + 380*t]] : R[B[i + 380*t]] + R[C[i + 380*t]];
R[i + 1234*t] = Op[i + 381*t] ? R[B[i + 381*t]] * R[C[i + 381*t]] : R[B[i + 381*t]] + R[C[i + 381*t]];
R[i + 1235*t] = Op[i + 382*t] ? R[B[i + 382*t]] * R[C[i + 382*t]] : R[B[i + 382*t]] + R[C[i + 382*t]];
R[i + 1236*t] = Op[i + 383*t] ? R[B[i + 383*t]] * R[C[i + 383*t]] : R[B[i + 383*t]] + R[C[i + 383*t]];
R[i + 1237*t] = Op[i + 384*t] ? R[B[i + 384*t]] * R[C[i + 384*t]] : R[B[i + 384*t]] + R[C[i + 384*t]];
R[i + 1238*t] = Op[i + 385*t] ? R[B[i + 385*t]] * R[C[i + 385*t]] : R[B[i + 385*t]] + R[C[i + 385*t]];
R[i + 1239*t] = Op[i + 386*t] ? R[B[i + 386*t]] * R[C[i + 386*t]] : R[B[i + 386*t]] + R[C[i + 386*t]];
R[i + 1240*t] = Op[i + 387*t] ? R[B[i + 387*t]] * R[C[i + 387*t]] : R[B[i + 387*t]] + R[C[i + 387*t]];
R[i + 1241*t] = Op[i + 388*t] ? R[B[i + 388*t]] * R[C[i + 388*t]] : R[B[i + 388*t]] + R[C[i + 388*t]];
R[i + 1242*t] = Op[i + 389*t] ? R[B[i + 389*t]] * R[C[i + 389*t]] : R[B[i + 389*t]] + R[C[i + 389*t]];
R[i + 1243*t] = Op[i + 390*t] ? R[B[i + 390*t]] * R[C[i + 390*t]] : R[B[i + 390*t]] + R[C[i + 390*t]];
R[i + 1244*t] = Op[i + 391*t] ? R[B[i + 391*t]] * R[C[i + 391*t]] : R[B[i + 391*t]] + R[C[i + 391*t]];
R[i + 1245*t] = Op[i + 392*t] ? R[B[i + 392*t]] * R[C[i + 392*t]] : R[B[i + 392*t]] + R[C[i + 392*t]];
R[i + 1246*t] = Op[i + 393*t] ? R[B[i + 393*t]] * R[C[i + 393*t]] : R[B[i + 393*t]] + R[C[i + 393*t]];
R[i + 1247*t] = Op[i + 394*t] ? R[B[i + 394*t]] * R[C[i + 394*t]] : R[B[i + 394*t]] + R[C[i + 394*t]];
R[i + 1248*t] = Op[i + 395*t] ? R[B[i + 395*t]] * R[C[i + 395*t]] : R[B[i + 395*t]] + R[C[i + 395*t]];
R[i + 1249*t] = Op[i + 396*t] ? R[B[i + 396*t]] * R[C[i + 396*t]] : R[B[i + 396*t]] + R[C[i + 396*t]];
R[i + 1250*t] = Op[i + 397*t] ? R[B[i + 397*t]] * R[C[i + 397*t]] : R[B[i + 397*t]] + R[C[i + 397*t]];
R[i + 1251*t] = Op[i + 398*t] ? R[B[i + 398*t]] * R[C[i + 398*t]] : R[B[i + 398*t]] + R[C[i + 398*t]];
R[i + 1252*t] = Op[i + 399*t] ? R[B[i + 399*t]] * R[C[i + 399*t]] : R[B[i + 399*t]] + R[C[i + 399*t]];
R[i + 1253*t] = Op[i + 400*t] ? R[B[i + 400*t]] * R[C[i + 400*t]] : R[B[i + 400*t]] + R[C[i + 400*t]];
R[i + 1254*t] = Op[i + 401*t] ? R[B[i + 401*t]] * R[C[i + 401*t]] : R[B[i + 401*t]] + R[C[i + 401*t]];
R[i + 1255*t] = Op[i + 402*t] ? R[B[i + 402*t]] * R[C[i + 402*t]] : R[B[i + 402*t]] + R[C[i + 402*t]];
R[i + 1256*t] = Op[i + 403*t] ? R[B[i + 403*t]] * R[C[i + 403*t]] : R[B[i + 403*t]] + R[C[i + 403*t]];
R[i + 1257*t] = Op[i + 404*t] ? R[B[i + 404*t]] * R[C[i + 404*t]] : R[B[i + 404*t]] + R[C[i + 404*t]];
R[i + 1258*t] = Op[i + 405*t] ? R[B[i + 405*t]] * R[C[i + 405*t]] : R[B[i + 405*t]] + R[C[i + 405*t]];
R[i + 1259*t] = Op[i + 406*t] ? R[B[i + 406*t]] * R[C[i + 406*t]] : R[B[i + 406*t]] + R[C[i + 406*t]];
R[i + 1260*t] = Op[i + 407*t] ? R[B[i + 407*t]] * R[C[i + 407*t]] : R[B[i + 407*t]] + R[C[i + 407*t]];
R[i + 1261*t] = Op[i + 408*t] ? R[B[i + 408*t]] * R[C[i + 408*t]] : R[B[i + 408*t]] + R[C[i + 408*t]];
R[i + 1262*t] = Op[i + 409*t] ? R[B[i + 409*t]] * R[C[i + 409*t]] : R[B[i + 409*t]] + R[C[i + 409*t]];
R[i + 1263*t] = Op[i + 410*t] ? R[B[i + 410*t]] * R[C[i + 410*t]] : R[B[i + 410*t]] + R[C[i + 410*t]];
R[i + 1264*t] = Op[i + 411*t] ? R[B[i + 411*t]] * R[C[i + 411*t]] : R[B[i + 411*t]] + R[C[i + 411*t]];
R[i + 1265*t] = Op[i + 412*t] ? R[B[i + 412*t]] * R[C[i + 412*t]] : R[B[i + 412*t]] + R[C[i + 412*t]];
R[i + 1266*t] = Op[i + 413*t] ? R[B[i + 413*t]] * R[C[i + 413*t]] : R[B[i + 413*t]] + R[C[i + 413*t]];
R[i + 1267*t] = Op[i + 414*t] ? R[B[i + 414*t]] * R[C[i + 414*t]] : R[B[i + 414*t]] + R[C[i + 414*t]];
R[i + 1268*t] = Op[i + 415*t] ? R[B[i + 415*t]] * R[C[i + 415*t]] : R[B[i + 415*t]] + R[C[i + 415*t]];
R[i + 1269*t] = Op[i + 416*t] ? R[B[i + 416*t]] * R[C[i + 416*t]] : R[B[i + 416*t]] + R[C[i + 416*t]];
R[i + 1270*t] = Op[i + 417*t] ? R[B[i + 417*t]] * R[C[i + 417*t]] : R[B[i + 417*t]] + R[C[i + 417*t]];
R[i + 1271*t] = Op[i + 418*t] ? R[B[i + 418*t]] * R[C[i + 418*t]] : R[B[i + 418*t]] + R[C[i + 418*t]];
R[i + 1272*t] = Op[i + 419*t] ? R[B[i + 419*t]] * R[C[i + 419*t]] : R[B[i + 419*t]] + R[C[i + 419*t]];
R[i + 1273*t] = Op[i + 420*t] ? R[B[i + 420*t]] * R[C[i + 420*t]] : R[B[i + 420*t]] + R[C[i + 420*t]];
R[i + 1274*t] = Op[i + 421*t] ? R[B[i + 421*t]] * R[C[i + 421*t]] : R[B[i + 421*t]] + R[C[i + 421*t]];
R[i + 1275*t] = Op[i + 422*t] ? R[B[i + 422*t]] * R[C[i + 422*t]] : R[B[i + 422*t]] + R[C[i + 422*t]];
R[i + 1276*t] = Op[i + 423*t] ? R[B[i + 423*t]] * R[C[i + 423*t]] : R[B[i + 423*t]] + R[C[i + 423*t]];
R[i + 1277*t] = Op[i + 424*t] ? R[B[i + 424*t]] * R[C[i + 424*t]] : R[B[i + 424*t]] + R[C[i + 424*t]];
R[i + 1278*t] = Op[i + 425*t] ? R[B[i + 425*t]] * R[C[i + 425*t]] : R[B[i + 425*t]] + R[C[i + 425*t]];
R[i + 1279*t] = Op[i + 426*t] ? R[B[i + 426*t]] * R[C[i + 426*t]] : R[B[i + 426*t]] + R[C[i + 426*t]];
R[i + 1280*t] = Op[i + 427*t] ? R[B[i + 427*t]] * R[C[i + 427*t]] : R[B[i + 427*t]] + R[C[i + 427*t]];
R[i + 1281*t] = Op[i + 428*t] ? R[B[i + 428*t]] * R[C[i + 428*t]] : R[B[i + 428*t]] + R[C[i + 428*t]];
R[i + 1282*t] = Op[i + 429*t] ? R[B[i + 429*t]] * R[C[i + 429*t]] : R[B[i + 429*t]] + R[C[i + 429*t]];
R[i + 1283*t] = Op[i + 430*t] ? R[B[i + 430*t]] * R[C[i + 430*t]] : R[B[i + 430*t]] + R[C[i + 430*t]];
R[i + 1284*t] = Op[i + 431*t] ? R[B[i + 431*t]] * R[C[i + 431*t]] : R[B[i + 431*t]] + R[C[i + 431*t]];
R[i + 1285*t] = Op[i + 432*t] ? R[B[i + 432*t]] * R[C[i + 432*t]] : R[B[i + 432*t]] + R[C[i + 432*t]];
R[i + 1286*t] = Op[i + 433*t] ? R[B[i + 433*t]] * R[C[i + 433*t]] : R[B[i + 433*t]] + R[C[i + 433*t]];
R[i + 1287*t] = Op[i + 434*t] ? R[B[i + 434*t]] * R[C[i + 434*t]] : R[B[i + 434*t]] + R[C[i + 434*t]];
R[i + 1288*t] = Op[i + 435*t] ? R[B[i + 435*t]] * R[C[i + 435*t]] : R[B[i + 435*t]] + R[C[i + 435*t]];
R[i + 1289*t] = Op[i + 436*t] ? R[B[i + 436*t]] * R[C[i + 436*t]] : R[B[i + 436*t]] + R[C[i + 436*t]];
R[i + 1290*t] = Op[i + 437*t] ? R[B[i + 437*t]] * R[C[i + 437*t]] : R[B[i + 437*t]] + R[C[i + 437*t]];
R[i + 1291*t] = Op[i + 438*t] ? R[B[i + 438*t]] * R[C[i + 438*t]] : R[B[i + 438*t]] + R[C[i + 438*t]];
R[i + 1292*t] = Op[i + 439*t] ? R[B[i + 439*t]] * R[C[i + 439*t]] : R[B[i + 439*t]] + R[C[i + 439*t]];
R[i + 1293*t] = Op[i + 440*t] ? R[B[i + 440*t]] * R[C[i + 440*t]] : R[B[i + 440*t]] + R[C[i + 440*t]];
R[i + 1294*t] = Op[i + 441*t] ? R[B[i + 441*t]] * R[C[i + 441*t]] : R[B[i + 441*t]] + R[C[i + 441*t]];
R[i + 1295*t] = Op[i + 442*t] ? R[B[i + 442*t]] * R[C[i + 442*t]] : R[B[i + 442*t]] + R[C[i + 442*t]];
R[i + 1296*t] = Op[i + 443*t] ? R[B[i + 443*t]] * R[C[i + 443*t]] : R[B[i + 443*t]] + R[C[i + 443*t]];
R[i + 1297*t] = Op[i + 444*t] ? R[B[i + 444*t]] * R[C[i + 444*t]] : R[B[i + 444*t]] + R[C[i + 444*t]];
R[i + 1298*t] = Op[i + 445*t] ? R[B[i + 445*t]] * R[C[i + 445*t]] : R[B[i + 445*t]] + R[C[i + 445*t]];
R[i + 1299*t] = Op[i + 446*t] ? R[B[i + 446*t]] * R[C[i + 446*t]] : R[B[i + 446*t]] + R[C[i + 446*t]];
R[i + 1300*t] = Op[i + 447*t] ? R[B[i + 447*t]] * R[C[i + 447*t]] : R[B[i + 447*t]] + R[C[i + 447*t]];
R[i + 1301*t] = Op[i + 448*t] ? R[B[i + 448*t]] * R[C[i + 448*t]] : R[B[i + 448*t]] + R[C[i + 448*t]];
R[i + 1302*t] = Op[i + 449*t] ? R[B[i + 449*t]] * R[C[i + 449*t]] : R[B[i + 449*t]] + R[C[i + 449*t]];
R[i + 1303*t] = Op[i + 450*t] ? R[B[i + 450*t]] * R[C[i + 450*t]] : R[B[i + 450*t]] + R[C[i + 450*t]];
R[i + 1304*t] = Op[i + 451*t] ? R[B[i + 451*t]] * R[C[i + 451*t]] : R[B[i + 451*t]] + R[C[i + 451*t]];
R[i + 1305*t] = Op[i + 452*t] ? R[B[i + 452*t]] * R[C[i + 452*t]] : R[B[i + 452*t]] + R[C[i + 452*t]];
R[i + 1306*t] = Op[i + 453*t] ? R[B[i + 453*t]] * R[C[i + 453*t]] : R[B[i + 453*t]] + R[C[i + 453*t]];
R[i + 1307*t] = Op[i + 454*t] ? R[B[i + 454*t]] * R[C[i + 454*t]] : R[B[i + 454*t]] + R[C[i + 454*t]];
R[i + 1308*t] = Op[i + 455*t] ? R[B[i + 455*t]] * R[C[i + 455*t]] : R[B[i + 455*t]] + R[C[i + 455*t]];
R[i + 1309*t] = Op[i + 456*t] ? R[B[i + 456*t]] * R[C[i + 456*t]] : R[B[i + 456*t]] + R[C[i + 456*t]];
R[i + 1310*t] = Op[i + 457*t] ? R[B[i + 457*t]] * R[C[i + 457*t]] : R[B[i + 457*t]] + R[C[i + 457*t]];
R[i + 1311*t] = Op[i + 458*t] ? R[B[i + 458*t]] * R[C[i + 458*t]] : R[B[i + 458*t]] + R[C[i + 458*t]];
R[i + 1312*t] = Op[i + 459*t] ? R[B[i + 459*t]] * R[C[i + 459*t]] : R[B[i + 459*t]] + R[C[i + 459*t]];
R[i + 1313*t] = Op[i + 460*t] ? R[B[i + 460*t]] * R[C[i + 460*t]] : R[B[i + 460*t]] + R[C[i + 460*t]];
R[i + 1314*t] = Op[i + 461*t] ? R[B[i + 461*t]] * R[C[i + 461*t]] : R[B[i + 461*t]] + R[C[i + 461*t]];
R[i + 1315*t] = Op[i + 462*t] ? R[B[i + 462*t]] * R[C[i + 462*t]] : R[B[i + 462*t]] + R[C[i + 462*t]];
R[i + 1316*t] = Op[i + 463*t] ? R[B[i + 463*t]] * R[C[i + 463*t]] : R[B[i + 463*t]] + R[C[i + 463*t]];
R[i + 1317*t] = Op[i + 464*t] ? R[B[i + 464*t]] * R[C[i + 464*t]] : R[B[i + 464*t]] + R[C[i + 464*t]];
R[i + 1318*t] = Op[i + 465*t] ? R[B[i + 465*t]] * R[C[i + 465*t]] : R[B[i + 465*t]] + R[C[i + 465*t]];
R[i + 1319*t] = Op[i + 466*t] ? R[B[i + 466*t]] * R[C[i + 466*t]] : R[B[i + 466*t]] + R[C[i + 466*t]];
R[i + 1320*t] = Op[i + 467*t] ? R[B[i + 467*t]] * R[C[i + 467*t]] : R[B[i + 467*t]] + R[C[i + 467*t]];
R[i + 1321*t] = Op[i + 468*t] ? R[B[i + 468*t]] * R[C[i + 468*t]] : R[B[i + 468*t]] + R[C[i + 468*t]];
R[i + 1322*t] = Op[i + 469*t] ? R[B[i + 469*t]] * R[C[i + 469*t]] : R[B[i + 469*t]] + R[C[i + 469*t]];
R[i + 1323*t] = Op[i + 470*t] ? R[B[i + 470*t]] * R[C[i + 470*t]] : R[B[i + 470*t]] + R[C[i + 470*t]];
R[i + 1324*t] = Op[i + 471*t] ? R[B[i + 471*t]] * R[C[i + 471*t]] : R[B[i + 471*t]] + R[C[i + 471*t]];
R[i + 1325*t] = Op[i + 472*t] ? R[B[i + 472*t]] * R[C[i + 472*t]] : R[B[i + 472*t]] + R[C[i + 472*t]];
R[i + 1326*t] = Op[i + 473*t] ? R[B[i + 473*t]] * R[C[i + 473*t]] : R[B[i + 473*t]] + R[C[i + 473*t]];
R[i + 1327*t] = Op[i + 474*t] ? R[B[i + 474*t]] * R[C[i + 474*t]] : R[B[i + 474*t]] + R[C[i + 474*t]];
R[i + 1328*t] = Op[i + 475*t] ? R[B[i + 475*t]] * R[C[i + 475*t]] : R[B[i + 475*t]] + R[C[i + 475*t]];
R[i + 1329*t] = Op[i + 476*t] ? R[B[i + 476*t]] * R[C[i + 476*t]] : R[B[i + 476*t]] + R[C[i + 476*t]];
R[i + 1330*t] = Op[i + 477*t] ? R[B[i + 477*t]] * R[C[i + 477*t]] : R[B[i + 477*t]] + R[C[i + 477*t]];
R[i + 1331*t] = Op[i + 478*t] ? R[B[i + 478*t]] * R[C[i + 478*t]] : R[B[i + 478*t]] + R[C[i + 478*t]];
R[i + 1332*t] = Op[i + 479*t] ? R[B[i + 479*t]] * R[C[i + 479*t]] : R[B[i + 479*t]] + R[C[i + 479*t]];
R[i + 1333*t] = Op[i + 480*t] ? R[B[i + 480*t]] * R[C[i + 480*t]] : R[B[i + 480*t]] + R[C[i + 480*t]];
R[i + 1334*t] = Op[i + 481*t] ? R[B[i + 481*t]] * R[C[i + 481*t]] : R[B[i + 481*t]] + R[C[i + 481*t]];
R[i + 1335*t] = Op[i + 482*t] ? R[B[i + 482*t]] * R[C[i + 482*t]] : R[B[i + 482*t]] + R[C[i + 482*t]];
R[i + 1336*t] = Op[i + 483*t] ? R[B[i + 483*t]] * R[C[i + 483*t]] : R[B[i + 483*t]] + R[C[i + 483*t]];
R[i + 1337*t] = Op[i + 484*t] ? R[B[i + 484*t]] * R[C[i + 484*t]] : R[B[i + 484*t]] + R[C[i + 484*t]];
R[i + 1338*t] = Op[i + 485*t] ? R[B[i + 485*t]] * R[C[i + 485*t]] : R[B[i + 485*t]] + R[C[i + 485*t]];
R[i + 1339*t] = Op[i + 486*t] ? R[B[i + 486*t]] * R[C[i + 486*t]] : R[B[i + 486*t]] + R[C[i + 486*t]];
R[i + 1340*t] = Op[i + 487*t] ? R[B[i + 487*t]] * R[C[i + 487*t]] : R[B[i + 487*t]] + R[C[i + 487*t]];
R[i + 1341*t] = Op[i + 488*t] ? R[B[i + 488*t]] * R[C[i + 488*t]] : R[B[i + 488*t]] + R[C[i + 488*t]];
R[i + 1342*t] = Op[i + 489*t] ? R[B[i + 489*t]] * R[C[i + 489*t]] : R[B[i + 489*t]] + R[C[i + 489*t]];
R[i + 1343*t] = Op[i + 490*t] ? R[B[i + 490*t]] * R[C[i + 490*t]] : R[B[i + 490*t]] + R[C[i + 490*t]];
R[i + 1344*t] = Op[i + 491*t] ? R[B[i + 491*t]] * R[C[i + 491*t]] : R[B[i + 491*t]] + R[C[i + 491*t]];
R[i + 1345*t] = Op[i + 492*t] ? R[B[i + 492*t]] * R[C[i + 492*t]] : R[B[i + 492*t]] + R[C[i + 492*t]];
R[i + 1346*t] = Op[i + 493*t] ? R[B[i + 493*t]] * R[C[i + 493*t]] : R[B[i + 493*t]] + R[C[i + 493*t]];
R[i + 1347*t] = Op[i + 494*t] ? R[B[i + 494*t]] * R[C[i + 494*t]] : R[B[i + 494*t]] + R[C[i + 494*t]];
R[i + 1348*t] = Op[i + 495*t] ? R[B[i + 495*t]] * R[C[i + 495*t]] : R[B[i + 495*t]] + R[C[i + 495*t]];
R[i + 1349*t] = Op[i + 496*t] ? R[B[i + 496*t]] * R[C[i + 496*t]] : R[B[i + 496*t]] + R[C[i + 496*t]];
R[i + 1350*t] = Op[i + 497*t] ? R[B[i + 497*t]] * R[C[i + 497*t]] : R[B[i + 497*t]] + R[C[i + 497*t]];
R[i + 1351*t] = Op[i + 498*t] ? R[B[i + 498*t]] * R[C[i + 498*t]] : R[B[i + 498*t]] + R[C[i + 498*t]];
R[i + 1352*t] = Op[i + 499*t] ? R[B[i + 499*t]] * R[C[i + 499*t]] : R[B[i + 499*t]] + R[C[i + 499*t]];
R[i + 1353*t] = Op[i + 500*t] ? R[B[i + 500*t]] * R[C[i + 500*t]] : R[B[i + 500*t]] + R[C[i + 500*t]];
R[i + 1354*t] = Op[i + 501*t] ? R[B[i + 501*t]] * R[C[i + 501*t]] : R[B[i + 501*t]] + R[C[i + 501*t]];
R[i + 1355*t] = Op[i + 502*t] ? R[B[i + 502*t]] * R[C[i + 502*t]] : R[B[i + 502*t]] + R[C[i + 502*t]];
R[i + 1356*t] = Op[i + 503*t] ? R[B[i + 503*t]] * R[C[i + 503*t]] : R[B[i + 503*t]] + R[C[i + 503*t]];
R[i + 1357*t] = Op[i + 504*t] ? R[B[i + 504*t]] * R[C[i + 504*t]] : R[B[i + 504*t]] + R[C[i + 504*t]];
R[i + 1358*t] = Op[i + 505*t] ? R[B[i + 505*t]] * R[C[i + 505*t]] : R[B[i + 505*t]] + R[C[i + 505*t]];
R[i + 1359*t] = Op[i + 506*t] ? R[B[i + 506*t]] * R[C[i + 506*t]] : R[B[i + 506*t]] + R[C[i + 506*t]];
R[i + 1360*t] = Op[i + 507*t] ? R[B[i + 507*t]] * R[C[i + 507*t]] : R[B[i + 507*t]] + R[C[i + 507*t]];
R[i + 1361*t] = Op[i + 508*t] ? R[B[i + 508*t]] * R[C[i + 508*t]] : R[B[i + 508*t]] + R[C[i + 508*t]];
R[i + 1362*t] = Op[i + 509*t] ? R[B[i + 509*t]] * R[C[i + 509*t]] : R[B[i + 509*t]] + R[C[i + 509*t]];
R[i + 1363*t] = Op[i + 510*t] ? R[B[i + 510*t]] * R[C[i + 510*t]] : R[B[i + 510*t]] + R[C[i + 510*t]];
R[i + 1364*t] = Op[i + 511*t] ? R[B[i + 511*t]] * R[C[i + 511*t]] : R[B[i + 511*t]] + R[C[i + 511*t]];
R[i + 1365*t] = Op[i + 512*t] ? R[B[i + 512*t]] * R[C[i + 512*t]] : R[B[i + 512*t]] + R[C[i + 512*t]];
R[i + 1366*t] = Op[i + 513*t] ? R[B[i + 513*t]] * R[C[i + 513*t]] : R[B[i + 513*t]] + R[C[i + 513*t]];
R[i + 1367*t] = Op[i + 514*t] ? R[B[i + 514*t]] * R[C[i + 514*t]] : R[B[i + 514*t]] + R[C[i + 514*t]];
R[i + 1368*t] = Op[i + 515*t] ? R[B[i + 515*t]] * R[C[i + 515*t]] : R[B[i + 515*t]] + R[C[i + 515*t]];
R[i + 1369*t] = Op[i + 516*t] ? R[B[i + 516*t]] * R[C[i + 516*t]] : R[B[i + 516*t]] + R[C[i + 516*t]];
R[i + 1370*t] = Op[i + 517*t] ? R[B[i + 517*t]] * R[C[i + 517*t]] : R[B[i + 517*t]] + R[C[i + 517*t]];
R[i + 1371*t] = Op[i + 518*t] ? R[B[i + 518*t]] * R[C[i + 518*t]] : R[B[i + 518*t]] + R[C[i + 518*t]];
R[i + 1372*t] = Op[i + 519*t] ? R[B[i + 519*t]] * R[C[i + 519*t]] : R[B[i + 519*t]] + R[C[i + 519*t]];
R[i + 1373*t] = Op[i + 520*t] ? R[B[i + 520*t]] * R[C[i + 520*t]] : R[B[i + 520*t]] + R[C[i + 520*t]];
R[i + 1374*t] = Op[i + 521*t] ? R[B[i + 521*t]] * R[C[i + 521*t]] : R[B[i + 521*t]] + R[C[i + 521*t]];
R[i + 1375*t] = Op[i + 522*t] ? R[B[i + 522*t]] * R[C[i + 522*t]] : R[B[i + 522*t]] + R[C[i + 522*t]];
R[i + 1376*t] = Op[i + 523*t] ? R[B[i + 523*t]] * R[C[i + 523*t]] : R[B[i + 523*t]] + R[C[i + 523*t]];
R[i + 1377*t] = Op[i + 524*t] ? R[B[i + 524*t]] * R[C[i + 524*t]] : R[B[i + 524*t]] + R[C[i + 524*t]];
R[i + 1378*t] = Op[i + 525*t] ? R[B[i + 525*t]] * R[C[i + 525*t]] : R[B[i + 525*t]] + R[C[i + 525*t]];
R[i + 1379*t] = Op[i + 526*t] ? R[B[i + 526*t]] * R[C[i + 526*t]] : R[B[i + 526*t]] + R[C[i + 526*t]];
R[i + 1380*t] = Op[i + 527*t] ? R[B[i + 527*t]] * R[C[i + 527*t]] : R[B[i + 527*t]] + R[C[i + 527*t]];
R[i + 1381*t] = Op[i + 528*t] ? R[B[i + 528*t]] * R[C[i + 528*t]] : R[B[i + 528*t]] + R[C[i + 528*t]];
R[i + 1382*t] = Op[i + 529*t] ? R[B[i + 529*t]] * R[C[i + 529*t]] : R[B[i + 529*t]] + R[C[i + 529*t]];
R[i + 1383*t] = Op[i + 530*t] ? R[B[i + 530*t]] * R[C[i + 530*t]] : R[B[i + 530*t]] + R[C[i + 530*t]];
__syncthreads();
R[i + 1384*t] = Op[i + 531*t] ? R[B[i + 531*t]] * R[C[i + 531*t]] : R[B[i + 531*t]] + R[C[i + 531*t]];
R[i + 1385*t] = Op[i + 532*t] ? R[B[i + 532*t]] * R[C[i + 532*t]] : R[B[i + 532*t]] + R[C[i + 532*t]];
R[i + 1386*t] = Op[i + 533*t] ? R[B[i + 533*t]] * R[C[i + 533*t]] : R[B[i + 533*t]] + R[C[i + 533*t]];
R[i + 1387*t] = Op[i + 534*t] ? R[B[i + 534*t]] * R[C[i + 534*t]] : R[B[i + 534*t]] + R[C[i + 534*t]];
R[i + 1388*t] = Op[i + 535*t] ? R[B[i + 535*t]] * R[C[i + 535*t]] : R[B[i + 535*t]] + R[C[i + 535*t]];
R[i + 1389*t] = Op[i + 536*t] ? R[B[i + 536*t]] * R[C[i + 536*t]] : R[B[i + 536*t]] + R[C[i + 536*t]];
R[i + 1390*t] = Op[i + 537*t] ? R[B[i + 537*t]] * R[C[i + 537*t]] : R[B[i + 537*t]] + R[C[i + 537*t]];
R[i + 1391*t] = Op[i + 538*t] ? R[B[i + 538*t]] * R[C[i + 538*t]] : R[B[i + 538*t]] + R[C[i + 538*t]];
R[i + 1392*t] = Op[i + 539*t] ? R[B[i + 539*t]] * R[C[i + 539*t]] : R[B[i + 539*t]] + R[C[i + 539*t]];
R[i + 1393*t] = Op[i + 540*t] ? R[B[i + 540*t]] * R[C[i + 540*t]] : R[B[i + 540*t]] + R[C[i + 540*t]];
R[i + 1394*t] = Op[i + 541*t] ? R[B[i + 541*t]] * R[C[i + 541*t]] : R[B[i + 541*t]] + R[C[i + 541*t]];
R[i + 1395*t] = Op[i + 542*t] ? R[B[i + 542*t]] * R[C[i + 542*t]] : R[B[i + 542*t]] + R[C[i + 542*t]];
R[i + 1396*t] = Op[i + 543*t] ? R[B[i + 543*t]] * R[C[i + 543*t]] : R[B[i + 543*t]] + R[C[i + 543*t]];
R[i + 1397*t] = Op[i + 544*t] ? R[B[i + 544*t]] * R[C[i + 544*t]] : R[B[i + 544*t]] + R[C[i + 544*t]];
R[i + 1398*t] = Op[i + 545*t] ? R[B[i + 545*t]] * R[C[i + 545*t]] : R[B[i + 545*t]] + R[C[i + 545*t]];
R[i + 1399*t] = Op[i + 546*t] ? R[B[i + 546*t]] * R[C[i + 546*t]] : R[B[i + 546*t]] + R[C[i + 546*t]];
R[i + 1400*t] = Op[i + 547*t] ? R[B[i + 547*t]] * R[C[i + 547*t]] : R[B[i + 547*t]] + R[C[i + 547*t]];
R[i + 1401*t] = Op[i + 548*t] ? R[B[i + 548*t]] * R[C[i + 548*t]] : R[B[i + 548*t]] + R[C[i + 548*t]];
R[i + 1402*t] = Op[i + 549*t] ? R[B[i + 549*t]] * R[C[i + 549*t]] : R[B[i + 549*t]] + R[C[i + 549*t]];
R[i + 1403*t] = Op[i + 550*t] ? R[B[i + 550*t]] * R[C[i + 550*t]] : R[B[i + 550*t]] + R[C[i + 550*t]];
R[i + 1404*t] = Op[i + 551*t] ? R[B[i + 551*t]] * R[C[i + 551*t]] : R[B[i + 551*t]] + R[C[i + 551*t]];
R[i + 1405*t] = Op[i + 552*t] ? R[B[i + 552*t]] * R[C[i + 552*t]] : R[B[i + 552*t]] + R[C[i + 552*t]];
R[i + 1406*t] = Op[i + 553*t] ? R[B[i + 553*t]] * R[C[i + 553*t]] : R[B[i + 553*t]] + R[C[i + 553*t]];
R[i + 1407*t] = Op[i + 554*t] ? R[B[i + 554*t]] * R[C[i + 554*t]] : R[B[i + 554*t]] + R[C[i + 554*t]];
R[i + 1408*t] = Op[i + 555*t] ? R[B[i + 555*t]] * R[C[i + 555*t]] : R[B[i + 555*t]] + R[C[i + 555*t]];
R[i + 1409*t] = Op[i + 556*t] ? R[B[i + 556*t]] * R[C[i + 556*t]] : R[B[i + 556*t]] + R[C[i + 556*t]];
R[i + 1410*t] = Op[i + 557*t] ? R[B[i + 557*t]] * R[C[i + 557*t]] : R[B[i + 557*t]] + R[C[i + 557*t]];
R[i + 1411*t] = Op[i + 558*t] ? R[B[i + 558*t]] * R[C[i + 558*t]] : R[B[i + 558*t]] + R[C[i + 558*t]];
R[i + 1412*t] = Op[i + 559*t] ? R[B[i + 559*t]] * R[C[i + 559*t]] : R[B[i + 559*t]] + R[C[i + 559*t]];
R[i + 1413*t] = Op[i + 560*t] ? R[B[i + 560*t]] * R[C[i + 560*t]] : R[B[i + 560*t]] + R[C[i + 560*t]];
R[i + 1414*t] = Op[i + 561*t] ? R[B[i + 561*t]] * R[C[i + 561*t]] : R[B[i + 561*t]] + R[C[i + 561*t]];
R[i + 1415*t] = Op[i + 562*t] ? R[B[i + 562*t]] * R[C[i + 562*t]] : R[B[i + 562*t]] + R[C[i + 562*t]];
R[i + 1416*t] = Op[i + 563*t] ? R[B[i + 563*t]] * R[C[i + 563*t]] : R[B[i + 563*t]] + R[C[i + 563*t]];
R[i + 1417*t] = Op[i + 564*t] ? R[B[i + 564*t]] * R[C[i + 564*t]] : R[B[i + 564*t]] + R[C[i + 564*t]];
R[i + 1418*t] = Op[i + 565*t] ? R[B[i + 565*t]] * R[C[i + 565*t]] : R[B[i + 565*t]] + R[C[i + 565*t]];
R[i + 1419*t] = Op[i + 566*t] ? R[B[i + 566*t]] * R[C[i + 566*t]] : R[B[i + 566*t]] + R[C[i + 566*t]];
R[i + 1420*t] = Op[i + 567*t] ? R[B[i + 567*t]] * R[C[i + 567*t]] : R[B[i + 567*t]] + R[C[i + 567*t]];
R[i + 1421*t] = Op[i + 568*t] ? R[B[i + 568*t]] * R[C[i + 568*t]] : R[B[i + 568*t]] + R[C[i + 568*t]];
R[i + 1422*t] = Op[i + 569*t] ? R[B[i + 569*t]] * R[C[i + 569*t]] : R[B[i + 569*t]] + R[C[i + 569*t]];
R[i + 1423*t] = Op[i + 570*t] ? R[B[i + 570*t]] * R[C[i + 570*t]] : R[B[i + 570*t]] + R[C[i + 570*t]];
R[i + 1424*t] = Op[i + 571*t] ? R[B[i + 571*t]] * R[C[i + 571*t]] : R[B[i + 571*t]] + R[C[i + 571*t]];
R[i + 1425*t] = Op[i + 572*t] ? R[B[i + 572*t]] * R[C[i + 572*t]] : R[B[i + 572*t]] + R[C[i + 572*t]];
R[i + 1426*t] = Op[i + 573*t] ? R[B[i + 573*t]] * R[C[i + 573*t]] : R[B[i + 573*t]] + R[C[i + 573*t]];
R[i + 1427*t] = Op[i + 574*t] ? R[B[i + 574*t]] * R[C[i + 574*t]] : R[B[i + 574*t]] + R[C[i + 574*t]];
R[i + 1428*t] = Op[i + 575*t] ? R[B[i + 575*t]] * R[C[i + 575*t]] : R[B[i + 575*t]] + R[C[i + 575*t]];
R[i + 1429*t] = Op[i + 576*t] ? R[B[i + 576*t]] * R[C[i + 576*t]] : R[B[i + 576*t]] + R[C[i + 576*t]];
R[i + 1430*t] = Op[i + 577*t] ? R[B[i + 577*t]] * R[C[i + 577*t]] : R[B[i + 577*t]] + R[C[i + 577*t]];
R[i + 1431*t] = Op[i + 578*t] ? R[B[i + 578*t]] * R[C[i + 578*t]] : R[B[i + 578*t]] + R[C[i + 578*t]];
R[i + 1432*t] = Op[i + 579*t] ? R[B[i + 579*t]] * R[C[i + 579*t]] : R[B[i + 579*t]] + R[C[i + 579*t]];
R[i + 1433*t] = Op[i + 580*t] ? R[B[i + 580*t]] * R[C[i + 580*t]] : R[B[i + 580*t]] + R[C[i + 580*t]];
R[i + 1434*t] = Op[i + 581*t] ? R[B[i + 581*t]] * R[C[i + 581*t]] : R[B[i + 581*t]] + R[C[i + 581*t]];
R[i + 1435*t] = Op[i + 582*t] ? R[B[i + 582*t]] * R[C[i + 582*t]] : R[B[i + 582*t]] + R[C[i + 582*t]];
R[i + 1436*t] = Op[i + 583*t] ? R[B[i + 583*t]] * R[C[i + 583*t]] : R[B[i + 583*t]] + R[C[i + 583*t]];
R[i + 1437*t] = Op[i + 584*t] ? R[B[i + 584*t]] * R[C[i + 584*t]] : R[B[i + 584*t]] + R[C[i + 584*t]];
R[i + 1438*t] = Op[i + 585*t] ? R[B[i + 585*t]] * R[C[i + 585*t]] : R[B[i + 585*t]] + R[C[i + 585*t]];
R[i + 1439*t] = Op[i + 586*t] ? R[B[i + 586*t]] * R[C[i + 586*t]] : R[B[i + 586*t]] + R[C[i + 586*t]];
R[i + 1440*t] = Op[i + 587*t] ? R[B[i + 587*t]] * R[C[i + 587*t]] : R[B[i + 587*t]] + R[C[i + 587*t]];
R[i + 1441*t] = Op[i + 588*t] ? R[B[i + 588*t]] * R[C[i + 588*t]] : R[B[i + 588*t]] + R[C[i + 588*t]];
R[i + 1442*t] = Op[i + 589*t] ? R[B[i + 589*t]] * R[C[i + 589*t]] : R[B[i + 589*t]] + R[C[i + 589*t]];
R[i + 1443*t] = Op[i + 590*t] ? R[B[i + 590*t]] * R[C[i + 590*t]] : R[B[i + 590*t]] + R[C[i + 590*t]];
R[i + 1444*t] = Op[i + 591*t] ? R[B[i + 591*t]] * R[C[i + 591*t]] : R[B[i + 591*t]] + R[C[i + 591*t]];
R[i + 1445*t] = Op[i + 592*t] ? R[B[i + 592*t]] * R[C[i + 592*t]] : R[B[i + 592*t]] + R[C[i + 592*t]];
R[i + 1446*t] = Op[i + 593*t] ? R[B[i + 593*t]] * R[C[i + 593*t]] : R[B[i + 593*t]] + R[C[i + 593*t]];
R[i + 1447*t] = Op[i + 594*t] ? R[B[i + 594*t]] * R[C[i + 594*t]] : R[B[i + 594*t]] + R[C[i + 594*t]];
R[i + 1448*t] = Op[i + 595*t] ? R[B[i + 595*t]] * R[C[i + 595*t]] : R[B[i + 595*t]] + R[C[i + 595*t]];
R[i + 1449*t] = Op[i + 596*t] ? R[B[i + 596*t]] * R[C[i + 596*t]] : R[B[i + 596*t]] + R[C[i + 596*t]];
R[i + 1450*t] = Op[i + 597*t] ? R[B[i + 597*t]] * R[C[i + 597*t]] : R[B[i + 597*t]] + R[C[i + 597*t]];
R[i + 1451*t] = Op[i + 598*t] ? R[B[i + 598*t]] * R[C[i + 598*t]] : R[B[i + 598*t]] + R[C[i + 598*t]];
R[i + 1452*t] = Op[i + 599*t] ? R[B[i + 599*t]] * R[C[i + 599*t]] : R[B[i + 599*t]] + R[C[i + 599*t]];
R[i + 1453*t] = Op[i + 600*t] ? R[B[i + 600*t]] * R[C[i + 600*t]] : R[B[i + 600*t]] + R[C[i + 600*t]];
R[i + 1454*t] = Op[i + 601*t] ? R[B[i + 601*t]] * R[C[i + 601*t]] : R[B[i + 601*t]] + R[C[i + 601*t]];
R[i + 1455*t] = Op[i + 602*t] ? R[B[i + 602*t]] * R[C[i + 602*t]] : R[B[i + 602*t]] + R[C[i + 602*t]];
R[i + 1456*t] = Op[i + 603*t] ? R[B[i + 603*t]] * R[C[i + 603*t]] : R[B[i + 603*t]] + R[C[i + 603*t]];
R[i + 1457*t] = Op[i + 604*t] ? R[B[i + 604*t]] * R[C[i + 604*t]] : R[B[i + 604*t]] + R[C[i + 604*t]];
R[i + 1458*t] = Op[i + 605*t] ? R[B[i + 605*t]] * R[C[i + 605*t]] : R[B[i + 605*t]] + R[C[i + 605*t]];
R[i + 1459*t] = Op[i + 606*t] ? R[B[i + 606*t]] * R[C[i + 606*t]] : R[B[i + 606*t]] + R[C[i + 606*t]];
R[i + 1460*t] = Op[i + 607*t] ? R[B[i + 607*t]] * R[C[i + 607*t]] : R[B[i + 607*t]] + R[C[i + 607*t]];
R[i + 1461*t] = Op[i + 608*t] ? R[B[i + 608*t]] * R[C[i + 608*t]] : R[B[i + 608*t]] + R[C[i + 608*t]];
R[i + 1462*t] = Op[i + 609*t] ? R[B[i + 609*t]] * R[C[i + 609*t]] : R[B[i + 609*t]] + R[C[i + 609*t]];
R[i + 1463*t] = Op[i + 610*t] ? R[B[i + 610*t]] * R[C[i + 610*t]] : R[B[i + 610*t]] + R[C[i + 610*t]];
R[i + 1464*t] = Op[i + 611*t] ? R[B[i + 611*t]] * R[C[i + 611*t]] : R[B[i + 611*t]] + R[C[i + 611*t]];
R[i + 1465*t] = Op[i + 612*t] ? R[B[i + 612*t]] * R[C[i + 612*t]] : R[B[i + 612*t]] + R[C[i + 612*t]];
R[i + 1466*t] = Op[i + 613*t] ? R[B[i + 613*t]] * R[C[i + 613*t]] : R[B[i + 613*t]] + R[C[i + 613*t]];
R[i + 1467*t] = Op[i + 614*t] ? R[B[i + 614*t]] * R[C[i + 614*t]] : R[B[i + 614*t]] + R[C[i + 614*t]];
R[i + 1468*t] = Op[i + 615*t] ? R[B[i + 615*t]] * R[C[i + 615*t]] : R[B[i + 615*t]] + R[C[i + 615*t]];
R[i + 1469*t] = Op[i + 616*t] ? R[B[i + 616*t]] * R[C[i + 616*t]] : R[B[i + 616*t]] + R[C[i + 616*t]];
R[i + 1470*t] = Op[i + 617*t] ? R[B[i + 617*t]] * R[C[i + 617*t]] : R[B[i + 617*t]] + R[C[i + 617*t]];
R[i + 1471*t] = Op[i + 618*t] ? R[B[i + 618*t]] * R[C[i + 618*t]] : R[B[i + 618*t]] + R[C[i + 618*t]];
R[i + 1472*t] = Op[i + 619*t] ? R[B[i + 619*t]] * R[C[i + 619*t]] : R[B[i + 619*t]] + R[C[i + 619*t]];
R[i + 1473*t] = Op[i + 620*t] ? R[B[i + 620*t]] * R[C[i + 620*t]] : R[B[i + 620*t]] + R[C[i + 620*t]];
R[i + 1474*t] = Op[i + 621*t] ? R[B[i + 621*t]] * R[C[i + 621*t]] : R[B[i + 621*t]] + R[C[i + 621*t]];
R[i + 1475*t] = Op[i + 622*t] ? R[B[i + 622*t]] * R[C[i + 622*t]] : R[B[i + 622*t]] + R[C[i + 622*t]];
R[i + 1476*t] = Op[i + 623*t] ? R[B[i + 623*t]] * R[C[i + 623*t]] : R[B[i + 623*t]] + R[C[i + 623*t]];
R[i + 1477*t] = Op[i + 624*t] ? R[B[i + 624*t]] * R[C[i + 624*t]] : R[B[i + 624*t]] + R[C[i + 624*t]];
R[i + 1478*t] = Op[i + 625*t] ? R[B[i + 625*t]] * R[C[i + 625*t]] : R[B[i + 625*t]] + R[C[i + 625*t]];
R[i + 1479*t] = Op[i + 626*t] ? R[B[i + 626*t]] * R[C[i + 626*t]] : R[B[i + 626*t]] + R[C[i + 626*t]];
R[i + 1480*t] = Op[i + 627*t] ? R[B[i + 627*t]] * R[C[i + 627*t]] : R[B[i + 627*t]] + R[C[i + 627*t]];
R[i + 1481*t] = Op[i + 628*t] ? R[B[i + 628*t]] * R[C[i + 628*t]] : R[B[i + 628*t]] + R[C[i + 628*t]];
R[i + 1482*t] = Op[i + 629*t] ? R[B[i + 629*t]] * R[C[i + 629*t]] : R[B[i + 629*t]] + R[C[i + 629*t]];
R[i + 1483*t] = Op[i + 630*t] ? R[B[i + 630*t]] * R[C[i + 630*t]] : R[B[i + 630*t]] + R[C[i + 630*t]];
R[i + 1484*t] = Op[i + 631*t] ? R[B[i + 631*t]] * R[C[i + 631*t]] : R[B[i + 631*t]] + R[C[i + 631*t]];
R[i + 1485*t] = Op[i + 632*t] ? R[B[i + 632*t]] * R[C[i + 632*t]] : R[B[i + 632*t]] + R[C[i + 632*t]];
R[i + 1486*t] = Op[i + 633*t] ? R[B[i + 633*t]] * R[C[i + 633*t]] : R[B[i + 633*t]] + R[C[i + 633*t]];
R[i + 1487*t] = Op[i + 634*t] ? R[B[i + 634*t]] * R[C[i + 634*t]] : R[B[i + 634*t]] + R[C[i + 634*t]];
R[i + 1488*t] = Op[i + 635*t] ? R[B[i + 635*t]] * R[C[i + 635*t]] : R[B[i + 635*t]] + R[C[i + 635*t]];
R[i + 1489*t] = Op[i + 636*t] ? R[B[i + 636*t]] * R[C[i + 636*t]] : R[B[i + 636*t]] + R[C[i + 636*t]];
R[i + 1490*t] = Op[i + 637*t] ? R[B[i + 637*t]] * R[C[i + 637*t]] : R[B[i + 637*t]] + R[C[i + 637*t]];
R[i + 1491*t] = Op[i + 638*t] ? R[B[i + 638*t]] * R[C[i + 638*t]] : R[B[i + 638*t]] + R[C[i + 638*t]];
R[i + 1492*t] = Op[i + 639*t] ? R[B[i + 639*t]] * R[C[i + 639*t]] : R[B[i + 639*t]] + R[C[i + 639*t]];
R[i + 1493*t] = Op[i + 640*t] ? R[B[i + 640*t]] * R[C[i + 640*t]] : R[B[i + 640*t]] + R[C[i + 640*t]];
R[i + 1494*t] = Op[i + 641*t] ? R[B[i + 641*t]] * R[C[i + 641*t]] : R[B[i + 641*t]] + R[C[i + 641*t]];
R[i + 1495*t] = Op[i + 642*t] ? R[B[i + 642*t]] * R[C[i + 642*t]] : R[B[i + 642*t]] + R[C[i + 642*t]];
R[i + 1496*t] = Op[i + 643*t] ? R[B[i + 643*t]] * R[C[i + 643*t]] : R[B[i + 643*t]] + R[C[i + 643*t]];
R[i + 1497*t] = Op[i + 644*t] ? R[B[i + 644*t]] * R[C[i + 644*t]] : R[B[i + 644*t]] + R[C[i + 644*t]];
R[i + 1498*t] = Op[i + 645*t] ? R[B[i + 645*t]] * R[C[i + 645*t]] : R[B[i + 645*t]] + R[C[i + 645*t]];
R[i + 1499*t] = Op[i + 646*t] ? R[B[i + 646*t]] * R[C[i + 646*t]] : R[B[i + 646*t]] + R[C[i + 646*t]];
R[i + 1500*t] = Op[i + 647*t] ? R[B[i + 647*t]] * R[C[i + 647*t]] : R[B[i + 647*t]] + R[C[i + 647*t]];
R[i + 1501*t] = Op[i + 648*t] ? R[B[i + 648*t]] * R[C[i + 648*t]] : R[B[i + 648*t]] + R[C[i + 648*t]];
R[i + 1502*t] = Op[i + 649*t] ? R[B[i + 649*t]] * R[C[i + 649*t]] : R[B[i + 649*t]] + R[C[i + 649*t]];
R[i + 1503*t] = Op[i + 650*t] ? R[B[i + 650*t]] * R[C[i + 650*t]] : R[B[i + 650*t]] + R[C[i + 650*t]];
R[i + 1504*t] = Op[i + 651*t] ? R[B[i + 651*t]] * R[C[i + 651*t]] : R[B[i + 651*t]] + R[C[i + 651*t]];
R[i + 1505*t] = Op[i + 652*t] ? R[B[i + 652*t]] * R[C[i + 652*t]] : R[B[i + 652*t]] + R[C[i + 652*t]];
R[i + 1506*t] = Op[i + 653*t] ? R[B[i + 653*t]] * R[C[i + 653*t]] : R[B[i + 653*t]] + R[C[i + 653*t]];
R[i + 1507*t] = Op[i + 654*t] ? R[B[i + 654*t]] * R[C[i + 654*t]] : R[B[i + 654*t]] + R[C[i + 654*t]];
R[i + 1508*t] = Op[i + 655*t] ? R[B[i + 655*t]] * R[C[i + 655*t]] : R[B[i + 655*t]] + R[C[i + 655*t]];
R[i + 1509*t] = Op[i + 656*t] ? R[B[i + 656*t]] * R[C[i + 656*t]] : R[B[i + 656*t]] + R[C[i + 656*t]];
R[i + 1510*t] = Op[i + 657*t] ? R[B[i + 657*t]] * R[C[i + 657*t]] : R[B[i + 657*t]] + R[C[i + 657*t]];
R[i + 1511*t] = Op[i + 658*t] ? R[B[i + 658*t]] * R[C[i + 658*t]] : R[B[i + 658*t]] + R[C[i + 658*t]];
R[i + 1512*t] = Op[i + 659*t] ? R[B[i + 659*t]] * R[C[i + 659*t]] : R[B[i + 659*t]] + R[C[i + 659*t]];
R[i + 1513*t] = Op[i + 660*t] ? R[B[i + 660*t]] * R[C[i + 660*t]] : R[B[i + 660*t]] + R[C[i + 660*t]];
R[i + 1514*t] = Op[i + 661*t] ? R[B[i + 661*t]] * R[C[i + 661*t]] : R[B[i + 661*t]] + R[C[i + 661*t]];
R[i + 1515*t] = Op[i + 662*t] ? R[B[i + 662*t]] * R[C[i + 662*t]] : R[B[i + 662*t]] + R[C[i + 662*t]];
R[i + 1516*t] = Op[i + 663*t] ? R[B[i + 663*t]] * R[C[i + 663*t]] : R[B[i + 663*t]] + R[C[i + 663*t]];
R[i + 1517*t] = Op[i + 664*t] ? R[B[i + 664*t]] * R[C[i + 664*t]] : R[B[i + 664*t]] + R[C[i + 664*t]];
R[i + 1518*t] = Op[i + 665*t] ? R[B[i + 665*t]] * R[C[i + 665*t]] : R[B[i + 665*t]] + R[C[i + 665*t]];
R[i + 1519*t] = Op[i + 666*t] ? R[B[i + 666*t]] * R[C[i + 666*t]] : R[B[i + 666*t]] + R[C[i + 666*t]];
R[i + 1520*t] = Op[i + 667*t] ? R[B[i + 667*t]] * R[C[i + 667*t]] : R[B[i + 667*t]] + R[C[i + 667*t]];
R[i + 1521*t] = Op[i + 668*t] ? R[B[i + 668*t]] * R[C[i + 668*t]] : R[B[i + 668*t]] + R[C[i + 668*t]];
R[i + 1522*t] = Op[i + 669*t] ? R[B[i + 669*t]] * R[C[i + 669*t]] : R[B[i + 669*t]] + R[C[i + 669*t]];
__syncthreads();
R[i + 1523*t] = Op[i + 670*t] ? R[B[i + 670*t]] * R[C[i + 670*t]] : R[B[i + 670*t]] + R[C[i + 670*t]];
R[i + 1524*t] = Op[i + 671*t] ? R[B[i + 671*t]] * R[C[i + 671*t]] : R[B[i + 671*t]] + R[C[i + 671*t]];
R[i + 1525*t] = Op[i + 672*t] ? R[B[i + 672*t]] * R[C[i + 672*t]] : R[B[i + 672*t]] + R[C[i + 672*t]];
R[i + 1526*t] = Op[i + 673*t] ? R[B[i + 673*t]] * R[C[i + 673*t]] : R[B[i + 673*t]] + R[C[i + 673*t]];
R[i + 1527*t] = Op[i + 674*t] ? R[B[i + 674*t]] * R[C[i + 674*t]] : R[B[i + 674*t]] + R[C[i + 674*t]];
R[i + 1528*t] = Op[i + 675*t] ? R[B[i + 675*t]] * R[C[i + 675*t]] : R[B[i + 675*t]] + R[C[i + 675*t]];
R[i + 1529*t] = Op[i + 676*t] ? R[B[i + 676*t]] * R[C[i + 676*t]] : R[B[i + 676*t]] + R[C[i + 676*t]];
R[i + 1530*t] = Op[i + 677*t] ? R[B[i + 677*t]] * R[C[i + 677*t]] : R[B[i + 677*t]] + R[C[i + 677*t]];
R[i + 1531*t] = Op[i + 678*t] ? R[B[i + 678*t]] * R[C[i + 678*t]] : R[B[i + 678*t]] + R[C[i + 678*t]];
R[i + 1532*t] = Op[i + 679*t] ? R[B[i + 679*t]] * R[C[i + 679*t]] : R[B[i + 679*t]] + R[C[i + 679*t]];
R[i + 1533*t] = Op[i + 680*t] ? R[B[i + 680*t]] * R[C[i + 680*t]] : R[B[i + 680*t]] + R[C[i + 680*t]];
R[i + 1534*t] = Op[i + 681*t] ? R[B[i + 681*t]] * R[C[i + 681*t]] : R[B[i + 681*t]] + R[C[i + 681*t]];
R[i + 1535*t] = Op[i + 682*t] ? R[B[i + 682*t]] * R[C[i + 682*t]] : R[B[i + 682*t]] + R[C[i + 682*t]];
R[i + 1536*t] = Op[i + 683*t] ? R[B[i + 683*t]] * R[C[i + 683*t]] : R[B[i + 683*t]] + R[C[i + 683*t]];
R[i + 1537*t] = Op[i + 684*t] ? R[B[i + 684*t]] * R[C[i + 684*t]] : R[B[i + 684*t]] + R[C[i + 684*t]];
R[i + 1538*t] = Op[i + 685*t] ? R[B[i + 685*t]] * R[C[i + 685*t]] : R[B[i + 685*t]] + R[C[i + 685*t]];
R[i + 1539*t] = Op[i + 686*t] ? R[B[i + 686*t]] * R[C[i + 686*t]] : R[B[i + 686*t]] + R[C[i + 686*t]];
R[i + 1540*t] = Op[i + 687*t] ? R[B[i + 687*t]] * R[C[i + 687*t]] : R[B[i + 687*t]] + R[C[i + 687*t]];
R[i + 1541*t] = Op[i + 688*t] ? R[B[i + 688*t]] * R[C[i + 688*t]] : R[B[i + 688*t]] + R[C[i + 688*t]];
R[i + 1542*t] = Op[i + 689*t] ? R[B[i + 689*t]] * R[C[i + 689*t]] : R[B[i + 689*t]] + R[C[i + 689*t]];
R[i + 1543*t] = Op[i + 690*t] ? R[B[i + 690*t]] * R[C[i + 690*t]] : R[B[i + 690*t]] + R[C[i + 690*t]];
R[i + 1544*t] = Op[i + 691*t] ? R[B[i + 691*t]] * R[C[i + 691*t]] : R[B[i + 691*t]] + R[C[i + 691*t]];
R[i + 1545*t] = Op[i + 692*t] ? R[B[i + 692*t]] * R[C[i + 692*t]] : R[B[i + 692*t]] + R[C[i + 692*t]];
R[i + 1546*t] = Op[i + 693*t] ? R[B[i + 693*t]] * R[C[i + 693*t]] : R[B[i + 693*t]] + R[C[i + 693*t]];
R[i + 1547*t] = Op[i + 694*t] ? R[B[i + 694*t]] * R[C[i + 694*t]] : R[B[i + 694*t]] + R[C[i + 694*t]];
R[i + 1548*t] = Op[i + 695*t] ? R[B[i + 695*t]] * R[C[i + 695*t]] : R[B[i + 695*t]] + R[C[i + 695*t]];
R[i + 1549*t] = Op[i + 696*t] ? R[B[i + 696*t]] * R[C[i + 696*t]] : R[B[i + 696*t]] + R[C[i + 696*t]];
R[i + 1550*t] = Op[i + 697*t] ? R[B[i + 697*t]] * R[C[i + 697*t]] : R[B[i + 697*t]] + R[C[i + 697*t]];
R[i + 1551*t] = Op[i + 698*t] ? R[B[i + 698*t]] * R[C[i + 698*t]] : R[B[i + 698*t]] + R[C[i + 698*t]];
R[i + 1552*t] = Op[i + 699*t] ? R[B[i + 699*t]] * R[C[i + 699*t]] : R[B[i + 699*t]] + R[C[i + 699*t]];
R[i + 1553*t] = Op[i + 700*t] ? R[B[i + 700*t]] * R[C[i + 700*t]] : R[B[i + 700*t]] + R[C[i + 700*t]];
R[i + 1554*t] = Op[i + 701*t] ? R[B[i + 701*t]] * R[C[i + 701*t]] : R[B[i + 701*t]] + R[C[i + 701*t]];
R[i + 1555*t] = Op[i + 702*t] ? R[B[i + 702*t]] * R[C[i + 702*t]] : R[B[i + 702*t]] + R[C[i + 702*t]];
R[i + 1556*t] = Op[i + 703*t] ? R[B[i + 703*t]] * R[C[i + 703*t]] : R[B[i + 703*t]] + R[C[i + 703*t]];
R[i + 1557*t] = Op[i + 704*t] ? R[B[i + 704*t]] * R[C[i + 704*t]] : R[B[i + 704*t]] + R[C[i + 704*t]];
R[i + 1558*t] = Op[i + 705*t] ? R[B[i + 705*t]] * R[C[i + 705*t]] : R[B[i + 705*t]] + R[C[i + 705*t]];
R[i + 1559*t] = Op[i + 706*t] ? R[B[i + 706*t]] * R[C[i + 706*t]] : R[B[i + 706*t]] + R[C[i + 706*t]];
R[i + 1560*t] = Op[i + 707*t] ? R[B[i + 707*t]] * R[C[i + 707*t]] : R[B[i + 707*t]] + R[C[i + 707*t]];
R[i + 1561*t] = Op[i + 708*t] ? R[B[i + 708*t]] * R[C[i + 708*t]] : R[B[i + 708*t]] + R[C[i + 708*t]];
R[i + 1562*t] = Op[i + 709*t] ? R[B[i + 709*t]] * R[C[i + 709*t]] : R[B[i + 709*t]] + R[C[i + 709*t]];
R[i + 1563*t] = Op[i + 710*t] ? R[B[i + 710*t]] * R[C[i + 710*t]] : R[B[i + 710*t]] + R[C[i + 710*t]];
R[i + 1564*t] = Op[i + 711*t] ? R[B[i + 711*t]] * R[C[i + 711*t]] : R[B[i + 711*t]] + R[C[i + 711*t]];
R[i + 1565*t] = Op[i + 712*t] ? R[B[i + 712*t]] * R[C[i + 712*t]] : R[B[i + 712*t]] + R[C[i + 712*t]];
R[i + 1566*t] = Op[i + 713*t] ? R[B[i + 713*t]] * R[C[i + 713*t]] : R[B[i + 713*t]] + R[C[i + 713*t]];
R[i + 1567*t] = Op[i + 714*t] ? R[B[i + 714*t]] * R[C[i + 714*t]] : R[B[i + 714*t]] + R[C[i + 714*t]];
R[i + 1568*t] = Op[i + 715*t] ? R[B[i + 715*t]] * R[C[i + 715*t]] : R[B[i + 715*t]] + R[C[i + 715*t]];
R[i + 1569*t] = Op[i + 716*t] ? R[B[i + 716*t]] * R[C[i + 716*t]] : R[B[i + 716*t]] + R[C[i + 716*t]];
R[i + 1570*t] = Op[i + 717*t] ? R[B[i + 717*t]] * R[C[i + 717*t]] : R[B[i + 717*t]] + R[C[i + 717*t]];
R[i + 1571*t] = Op[i + 718*t] ? R[B[i + 718*t]] * R[C[i + 718*t]] : R[B[i + 718*t]] + R[C[i + 718*t]];
R[i + 1572*t] = Op[i + 719*t] ? R[B[i + 719*t]] * R[C[i + 719*t]] : R[B[i + 719*t]] + R[C[i + 719*t]];
R[i + 1573*t] = Op[i + 720*t] ? R[B[i + 720*t]] * R[C[i + 720*t]] : R[B[i + 720*t]] + R[C[i + 720*t]];
R[i + 1574*t] = Op[i + 721*t] ? R[B[i + 721*t]] * R[C[i + 721*t]] : R[B[i + 721*t]] + R[C[i + 721*t]];
R[i + 1575*t] = Op[i + 722*t] ? R[B[i + 722*t]] * R[C[i + 722*t]] : R[B[i + 722*t]] + R[C[i + 722*t]];
R[i + 1576*t] = Op[i + 723*t] ? R[B[i + 723*t]] * R[C[i + 723*t]] : R[B[i + 723*t]] + R[C[i + 723*t]];
R[i + 1577*t] = Op[i + 724*t] ? R[B[i + 724*t]] * R[C[i + 724*t]] : R[B[i + 724*t]] + R[C[i + 724*t]];
R[i + 1578*t] = Op[i + 725*t] ? R[B[i + 725*t]] * R[C[i + 725*t]] : R[B[i + 725*t]] + R[C[i + 725*t]];
R[i + 1579*t] = Op[i + 726*t] ? R[B[i + 726*t]] * R[C[i + 726*t]] : R[B[i + 726*t]] + R[C[i + 726*t]];
R[i + 1580*t] = Op[i + 727*t] ? R[B[i + 727*t]] * R[C[i + 727*t]] : R[B[i + 727*t]] + R[C[i + 727*t]];
R[i + 1581*t] = Op[i + 728*t] ? R[B[i + 728*t]] * R[C[i + 728*t]] : R[B[i + 728*t]] + R[C[i + 728*t]];
R[i + 1582*t] = Op[i + 729*t] ? R[B[i + 729*t]] * R[C[i + 729*t]] : R[B[i + 729*t]] + R[C[i + 729*t]];
R[i + 1583*t] = Op[i + 730*t] ? R[B[i + 730*t]] * R[C[i + 730*t]] : R[B[i + 730*t]] + R[C[i + 730*t]];
R[i + 1584*t] = Op[i + 731*t] ? R[B[i + 731*t]] * R[C[i + 731*t]] : R[B[i + 731*t]] + R[C[i + 731*t]];
R[i + 1585*t] = Op[i + 732*t] ? R[B[i + 732*t]] * R[C[i + 732*t]] : R[B[i + 732*t]] + R[C[i + 732*t]];
R[i + 1586*t] = Op[i + 733*t] ? R[B[i + 733*t]] * R[C[i + 733*t]] : R[B[i + 733*t]] + R[C[i + 733*t]];
R[i + 1587*t] = Op[i + 734*t] ? R[B[i + 734*t]] * R[C[i + 734*t]] : R[B[i + 734*t]] + R[C[i + 734*t]];
R[i + 1588*t] = Op[i + 735*t] ? R[B[i + 735*t]] * R[C[i + 735*t]] : R[B[i + 735*t]] + R[C[i + 735*t]];
R[i + 1589*t] = Op[i + 736*t] ? R[B[i + 736*t]] * R[C[i + 736*t]] : R[B[i + 736*t]] + R[C[i + 736*t]];
R[i + 1590*t] = Op[i + 737*t] ? R[B[i + 737*t]] * R[C[i + 737*t]] : R[B[i + 737*t]] + R[C[i + 737*t]];
R[i + 1591*t] = Op[i + 738*t] ? R[B[i + 738*t]] * R[C[i + 738*t]] : R[B[i + 738*t]] + R[C[i + 738*t]];
R[i + 1592*t] = Op[i + 739*t] ? R[B[i + 739*t]] * R[C[i + 739*t]] : R[B[i + 739*t]] + R[C[i + 739*t]];
R[i + 1593*t] = Op[i + 740*t] ? R[B[i + 740*t]] * R[C[i + 740*t]] : R[B[i + 740*t]] + R[C[i + 740*t]];
R[i + 1594*t] = Op[i + 741*t] ? R[B[i + 741*t]] * R[C[i + 741*t]] : R[B[i + 741*t]] + R[C[i + 741*t]];
R[i + 1595*t] = Op[i + 742*t] ? R[B[i + 742*t]] * R[C[i + 742*t]] : R[B[i + 742*t]] + R[C[i + 742*t]];
R[i + 1596*t] = Op[i + 743*t] ? R[B[i + 743*t]] * R[C[i + 743*t]] : R[B[i + 743*t]] + R[C[i + 743*t]];
R[i + 1597*t] = Op[i + 744*t] ? R[B[i + 744*t]] * R[C[i + 744*t]] : R[B[i + 744*t]] + R[C[i + 744*t]];
R[i + 1598*t] = Op[i + 745*t] ? R[B[i + 745*t]] * R[C[i + 745*t]] : R[B[i + 745*t]] + R[C[i + 745*t]];
R[i + 1599*t] = Op[i + 746*t] ? R[B[i + 746*t]] * R[C[i + 746*t]] : R[B[i + 746*t]] + R[C[i + 746*t]];
R[i + 1600*t] = Op[i + 747*t] ? R[B[i + 747*t]] * R[C[i + 747*t]] : R[B[i + 747*t]] + R[C[i + 747*t]];
R[i + 1601*t] = Op[i + 748*t] ? R[B[i + 748*t]] * R[C[i + 748*t]] : R[B[i + 748*t]] + R[C[i + 748*t]];
R[i + 1602*t] = Op[i + 749*t] ? R[B[i + 749*t]] * R[C[i + 749*t]] : R[B[i + 749*t]] + R[C[i + 749*t]];
__syncthreads();
R[i + 1603*t] = Op[i + 750*t] ? R[B[i + 750*t]] * R[C[i + 750*t]] : R[B[i + 750*t]] + R[C[i + 750*t]];
R[i + 1604*t] = Op[i + 751*t] ? R[B[i + 751*t]] * R[C[i + 751*t]] : R[B[i + 751*t]] + R[C[i + 751*t]];
R[i + 1605*t] = Op[i + 752*t] ? R[B[i + 752*t]] * R[C[i + 752*t]] : R[B[i + 752*t]] + R[C[i + 752*t]];
R[i + 1606*t] = Op[i + 753*t] ? R[B[i + 753*t]] * R[C[i + 753*t]] : R[B[i + 753*t]] + R[C[i + 753*t]];
R[i + 1607*t] = Op[i + 754*t] ? R[B[i + 754*t]] * R[C[i + 754*t]] : R[B[i + 754*t]] + R[C[i + 754*t]];
R[i + 1608*t] = Op[i + 755*t] ? R[B[i + 755*t]] * R[C[i + 755*t]] : R[B[i + 755*t]] + R[C[i + 755*t]];
R[i + 1609*t] = Op[i + 756*t] ? R[B[i + 756*t]] * R[C[i + 756*t]] : R[B[i + 756*t]] + R[C[i + 756*t]];
R[i + 1610*t] = Op[i + 757*t] ? R[B[i + 757*t]] * R[C[i + 757*t]] : R[B[i + 757*t]] + R[C[i + 757*t]];
R[i + 1611*t] = Op[i + 758*t] ? R[B[i + 758*t]] * R[C[i + 758*t]] : R[B[i + 758*t]] + R[C[i + 758*t]];
R[i + 1612*t] = Op[i + 759*t] ? R[B[i + 759*t]] * R[C[i + 759*t]] : R[B[i + 759*t]] + R[C[i + 759*t]];
R[i + 1613*t] = Op[i + 760*t] ? R[B[i + 760*t]] * R[C[i + 760*t]] : R[B[i + 760*t]] + R[C[i + 760*t]];
R[i + 1614*t] = Op[i + 761*t] ? R[B[i + 761*t]] * R[C[i + 761*t]] : R[B[i + 761*t]] + R[C[i + 761*t]];
R[i + 1615*t] = Op[i + 762*t] ? R[B[i + 762*t]] * R[C[i + 762*t]] : R[B[i + 762*t]] + R[C[i + 762*t]];
R[i + 1616*t] = Op[i + 763*t] ? R[B[i + 763*t]] * R[C[i + 763*t]] : R[B[i + 763*t]] + R[C[i + 763*t]];
R[i + 1617*t] = Op[i + 764*t] ? R[B[i + 764*t]] * R[C[i + 764*t]] : R[B[i + 764*t]] + R[C[i + 764*t]];
R[i + 1618*t] = Op[i + 765*t] ? R[B[i + 765*t]] * R[C[i + 765*t]] : R[B[i + 765*t]] + R[C[i + 765*t]];
R[i + 1619*t] = Op[i + 766*t] ? R[B[i + 766*t]] * R[C[i + 766*t]] : R[B[i + 766*t]] + R[C[i + 766*t]];
R[i + 1620*t] = Op[i + 767*t] ? R[B[i + 767*t]] * R[C[i + 767*t]] : R[B[i + 767*t]] + R[C[i + 767*t]];
R[i + 1621*t] = Op[i + 768*t] ? R[B[i + 768*t]] * R[C[i + 768*t]] : R[B[i + 768*t]] + R[C[i + 768*t]];
R[i + 1622*t] = Op[i + 769*t] ? R[B[i + 769*t]] * R[C[i + 769*t]] : R[B[i + 769*t]] + R[C[i + 769*t]];
R[i + 1623*t] = Op[i + 770*t] ? R[B[i + 770*t]] * R[C[i + 770*t]] : R[B[i + 770*t]] + R[C[i + 770*t]];
R[i + 1624*t] = Op[i + 771*t] ? R[B[i + 771*t]] * R[C[i + 771*t]] : R[B[i + 771*t]] + R[C[i + 771*t]];
R[i + 1625*t] = Op[i + 772*t] ? R[B[i + 772*t]] * R[C[i + 772*t]] : R[B[i + 772*t]] + R[C[i + 772*t]];
R[i + 1626*t] = Op[i + 773*t] ? R[B[i + 773*t]] * R[C[i + 773*t]] : R[B[i + 773*t]] + R[C[i + 773*t]];
R[i + 1627*t] = Op[i + 774*t] ? R[B[i + 774*t]] * R[C[i + 774*t]] : R[B[i + 774*t]] + R[C[i + 774*t]];
R[i + 1628*t] = Op[i + 775*t] ? R[B[i + 775*t]] * R[C[i + 775*t]] : R[B[i + 775*t]] + R[C[i + 775*t]];
R[i + 1629*t] = Op[i + 776*t] ? R[B[i + 776*t]] * R[C[i + 776*t]] : R[B[i + 776*t]] + R[C[i + 776*t]];
R[i + 1630*t] = Op[i + 777*t] ? R[B[i + 777*t]] * R[C[i + 777*t]] : R[B[i + 777*t]] + R[C[i + 777*t]];
R[i + 1631*t] = Op[i + 778*t] ? R[B[i + 778*t]] * R[C[i + 778*t]] : R[B[i + 778*t]] + R[C[i + 778*t]];
R[i + 1632*t] = Op[i + 779*t] ? R[B[i + 779*t]] * R[C[i + 779*t]] : R[B[i + 779*t]] + R[C[i + 779*t]];
R[i + 1633*t] = Op[i + 780*t] ? R[B[i + 780*t]] * R[C[i + 780*t]] : R[B[i + 780*t]] + R[C[i + 780*t]];
R[i + 1634*t] = Op[i + 781*t] ? R[B[i + 781*t]] * R[C[i + 781*t]] : R[B[i + 781*t]] + R[C[i + 781*t]];
R[i + 1635*t] = Op[i + 782*t] ? R[B[i + 782*t]] * R[C[i + 782*t]] : R[B[i + 782*t]] + R[C[i + 782*t]];
R[i + 1636*t] = Op[i + 783*t] ? R[B[i + 783*t]] * R[C[i + 783*t]] : R[B[i + 783*t]] + R[C[i + 783*t]];
R[i + 1637*t] = Op[i + 784*t] ? R[B[i + 784*t]] * R[C[i + 784*t]] : R[B[i + 784*t]] + R[C[i + 784*t]];
R[i + 1638*t] = Op[i + 785*t] ? R[B[i + 785*t]] * R[C[i + 785*t]] : R[B[i + 785*t]] + R[C[i + 785*t]];
R[i + 1639*t] = Op[i + 786*t] ? R[B[i + 786*t]] * R[C[i + 786*t]] : R[B[i + 786*t]] + R[C[i + 786*t]];
R[i + 1640*t] = Op[i + 787*t] ? R[B[i + 787*t]] * R[C[i + 787*t]] : R[B[i + 787*t]] + R[C[i + 787*t]];
R[i + 1641*t] = Op[i + 788*t] ? R[B[i + 788*t]] * R[C[i + 788*t]] : R[B[i + 788*t]] + R[C[i + 788*t]];
R[i + 1642*t] = Op[i + 789*t] ? R[B[i + 789*t]] * R[C[i + 789*t]] : R[B[i + 789*t]] + R[C[i + 789*t]];
R[i + 1643*t] = Op[i + 790*t] ? R[B[i + 790*t]] * R[C[i + 790*t]] : R[B[i + 790*t]] + R[C[i + 790*t]];
R[i + 1644*t] = Op[i + 791*t] ? R[B[i + 791*t]] * R[C[i + 791*t]] : R[B[i + 791*t]] + R[C[i + 791*t]];
R[i + 1645*t] = Op[i + 792*t] ? R[B[i + 792*t]] * R[C[i + 792*t]] : R[B[i + 792*t]] + R[C[i + 792*t]];
R[i + 1646*t] = Op[i + 793*t] ? R[B[i + 793*t]] * R[C[i + 793*t]] : R[B[i + 793*t]] + R[C[i + 793*t]];
R[i + 1647*t] = Op[i + 794*t] ? R[B[i + 794*t]] * R[C[i + 794*t]] : R[B[i + 794*t]] + R[C[i + 794*t]];
R[i + 1648*t] = Op[i + 795*t] ? R[B[i + 795*t]] * R[C[i + 795*t]] : R[B[i + 795*t]] + R[C[i + 795*t]];
R[i + 1649*t] = Op[i + 796*t] ? R[B[i + 796*t]] * R[C[i + 796*t]] : R[B[i + 796*t]] + R[C[i + 796*t]];
R[i + 1650*t] = Op[i + 797*t] ? R[B[i + 797*t]] * R[C[i + 797*t]] : R[B[i + 797*t]] + R[C[i + 797*t]];
R[i + 1651*t] = Op[i + 798*t] ? R[B[i + 798*t]] * R[C[i + 798*t]] : R[B[i + 798*t]] + R[C[i + 798*t]];
R[i + 1652*t] = Op[i + 799*t] ? R[B[i + 799*t]] * R[C[i + 799*t]] : R[B[i + 799*t]] + R[C[i + 799*t]];
R[i + 1653*t] = Op[i + 800*t] ? R[B[i + 800*t]] * R[C[i + 800*t]] : R[B[i + 800*t]] + R[C[i + 800*t]];
R[i + 1654*t] = Op[i + 801*t] ? R[B[i + 801*t]] * R[C[i + 801*t]] : R[B[i + 801*t]] + R[C[i + 801*t]];
R[i + 1655*t] = Op[i + 802*t] ? R[B[i + 802*t]] * R[C[i + 802*t]] : R[B[i + 802*t]] + R[C[i + 802*t]];
R[i + 1656*t] = Op[i + 803*t] ? R[B[i + 803*t]] * R[C[i + 803*t]] : R[B[i + 803*t]] + R[C[i + 803*t]];
R[i + 1657*t] = Op[i + 804*t] ? R[B[i + 804*t]] * R[C[i + 804*t]] : R[B[i + 804*t]] + R[C[i + 804*t]];
R[i + 1658*t] = Op[i + 805*t] ? R[B[i + 805*t]] * R[C[i + 805*t]] : R[B[i + 805*t]] + R[C[i + 805*t]];
R[i + 1659*t] = Op[i + 806*t] ? R[B[i + 806*t]] * R[C[i + 806*t]] : R[B[i + 806*t]] + R[C[i + 806*t]];
R[i + 1660*t] = Op[i + 807*t] ? R[B[i + 807*t]] * R[C[i + 807*t]] : R[B[i + 807*t]] + R[C[i + 807*t]];
R[i + 1661*t] = Op[i + 808*t] ? R[B[i + 808*t]] * R[C[i + 808*t]] : R[B[i + 808*t]] + R[C[i + 808*t]];
R[i + 1662*t] = Op[i + 809*t] ? R[B[i + 809*t]] * R[C[i + 809*t]] : R[B[i + 809*t]] + R[C[i + 809*t]];
R[i + 1663*t] = Op[i + 810*t] ? R[B[i + 810*t]] * R[C[i + 810*t]] : R[B[i + 810*t]] + R[C[i + 810*t]];
R[i + 1664*t] = Op[i + 811*t] ? R[B[i + 811*t]] * R[C[i + 811*t]] : R[B[i + 811*t]] + R[C[i + 811*t]];
R[i + 1665*t] = Op[i + 812*t] ? R[B[i + 812*t]] * R[C[i + 812*t]] : R[B[i + 812*t]] + R[C[i + 812*t]];
R[i + 1666*t] = Op[i + 813*t] ? R[B[i + 813*t]] * R[C[i + 813*t]] : R[B[i + 813*t]] + R[C[i + 813*t]];
R[i + 1667*t] = Op[i + 814*t] ? R[B[i + 814*t]] * R[C[i + 814*t]] : R[B[i + 814*t]] + R[C[i + 814*t]];
R[i + 1668*t] = Op[i + 815*t] ? R[B[i + 815*t]] * R[C[i + 815*t]] : R[B[i + 815*t]] + R[C[i + 815*t]];
R[i + 1669*t] = Op[i + 816*t] ? R[B[i + 816*t]] * R[C[i + 816*t]] : R[B[i + 816*t]] + R[C[i + 816*t]];
R[i + 1670*t] = Op[i + 817*t] ? R[B[i + 817*t]] * R[C[i + 817*t]] : R[B[i + 817*t]] + R[C[i + 817*t]];
R[i + 1671*t] = Op[i + 818*t] ? R[B[i + 818*t]] * R[C[i + 818*t]] : R[B[i + 818*t]] + R[C[i + 818*t]];
R[i + 1672*t] = Op[i + 819*t] ? R[B[i + 819*t]] * R[C[i + 819*t]] : R[B[i + 819*t]] + R[C[i + 819*t]];
R[i + 1673*t] = Op[i + 820*t] ? R[B[i + 820*t]] * R[C[i + 820*t]] : R[B[i + 820*t]] + R[C[i + 820*t]];
R[i + 1674*t] = Op[i + 821*t] ? R[B[i + 821*t]] * R[C[i + 821*t]] : R[B[i + 821*t]] + R[C[i + 821*t]];
R[i + 1675*t] = Op[i + 822*t] ? R[B[i + 822*t]] * R[C[i + 822*t]] : R[B[i + 822*t]] + R[C[i + 822*t]];
R[i + 1676*t] = Op[i + 823*t] ? R[B[i + 823*t]] * R[C[i + 823*t]] : R[B[i + 823*t]] + R[C[i + 823*t]];
R[i + 1677*t] = Op[i + 824*t] ? R[B[i + 824*t]] * R[C[i + 824*t]] : R[B[i + 824*t]] + R[C[i + 824*t]];
R[i + 1678*t] = Op[i + 825*t] ? R[B[i + 825*t]] * R[C[i + 825*t]] : R[B[i + 825*t]] + R[C[i + 825*t]];
R[i + 1679*t] = Op[i + 826*t] ? R[B[i + 826*t]] * R[C[i + 826*t]] : R[B[i + 826*t]] + R[C[i + 826*t]];
R[i + 1680*t] = Op[i + 827*t] ? R[B[i + 827*t]] * R[C[i + 827*t]] : R[B[i + 827*t]] + R[C[i + 827*t]];
R[i + 1681*t] = Op[i + 828*t] ? R[B[i + 828*t]] * R[C[i + 828*t]] : R[B[i + 828*t]] + R[C[i + 828*t]];
R[i + 1682*t] = Op[i + 829*t] ? R[B[i + 829*t]] * R[C[i + 829*t]] : R[B[i + 829*t]] + R[C[i + 829*t]];
R[i + 1683*t] = Op[i + 830*t] ? R[B[i + 830*t]] * R[C[i + 830*t]] : R[B[i + 830*t]] + R[C[i + 830*t]];
R[i + 1684*t] = Op[i + 831*t] ? R[B[i + 831*t]] * R[C[i + 831*t]] : R[B[i + 831*t]] + R[C[i + 831*t]];
R[i + 1685*t] = Op[i + 832*t] ? R[B[i + 832*t]] * R[C[i + 832*t]] : R[B[i + 832*t]] + R[C[i + 832*t]];
R[i + 1686*t] = Op[i + 833*t] ? R[B[i + 833*t]] * R[C[i + 833*t]] : R[B[i + 833*t]] + R[C[i + 833*t]];
R[i + 1687*t] = Op[i + 834*t] ? R[B[i + 834*t]] * R[C[i + 834*t]] : R[B[i + 834*t]] + R[C[i + 834*t]];
R[i + 1688*t] = Op[i + 835*t] ? R[B[i + 835*t]] * R[C[i + 835*t]] : R[B[i + 835*t]] + R[C[i + 835*t]];
R[i + 1689*t] = Op[i + 836*t] ? R[B[i + 836*t]] * R[C[i + 836*t]] : R[B[i + 836*t]] + R[C[i + 836*t]];
R[i + 1690*t] = Op[i + 837*t] ? R[B[i + 837*t]] * R[C[i + 837*t]] : R[B[i + 837*t]] + R[C[i + 837*t]];
R[i + 1691*t] = Op[i + 838*t] ? R[B[i + 838*t]] * R[C[i + 838*t]] : R[B[i + 838*t]] + R[C[i + 838*t]];
R[i + 1692*t] = Op[i + 839*t] ? R[B[i + 839*t]] * R[C[i + 839*t]] : R[B[i + 839*t]] + R[C[i + 839*t]];
R[i + 1693*t] = Op[i + 840*t] ? R[B[i + 840*t]] * R[C[i + 840*t]] : R[B[i + 840*t]] + R[C[i + 840*t]];
R[i + 1694*t] = Op[i + 841*t] ? R[B[i + 841*t]] * R[C[i + 841*t]] : R[B[i + 841*t]] + R[C[i + 841*t]];
R[i + 1695*t] = Op[i + 842*t] ? R[B[i + 842*t]] * R[C[i + 842*t]] : R[B[i + 842*t]] + R[C[i + 842*t]];
R[i + 1696*t] = Op[i + 843*t] ? R[B[i + 843*t]] * R[C[i + 843*t]] : R[B[i + 843*t]] + R[C[i + 843*t]];
R[i + 1697*t] = Op[i + 844*t] ? R[B[i + 844*t]] * R[C[i + 844*t]] : R[B[i + 844*t]] + R[C[i + 844*t]];
R[i + 1698*t] = Op[i + 845*t] ? R[B[i + 845*t]] * R[C[i + 845*t]] : R[B[i + 845*t]] + R[C[i + 845*t]];
R[i + 1699*t] = Op[i + 846*t] ? R[B[i + 846*t]] * R[C[i + 846*t]] : R[B[i + 846*t]] + R[C[i + 846*t]];
R[i + 1700*t] = Op[i + 847*t] ? R[B[i + 847*t]] * R[C[i + 847*t]] : R[B[i + 847*t]] + R[C[i + 847*t]];
R[i + 1701*t] = Op[i + 848*t] ? R[B[i + 848*t]] * R[C[i + 848*t]] : R[B[i + 848*t]] + R[C[i + 848*t]];
R[i + 1702*t] = Op[i + 849*t] ? R[B[i + 849*t]] * R[C[i + 849*t]] : R[B[i + 849*t]] + R[C[i + 849*t]];
R[i + 1703*t] = Op[i + 850*t] ? R[B[i + 850*t]] * R[C[i + 850*t]] : R[B[i + 850*t]] + R[C[i + 850*t]];
R[i + 1704*t] = Op[i + 851*t] ? R[B[i + 851*t]] * R[C[i + 851*t]] : R[B[i + 851*t]] + R[C[i + 851*t]];
R[i + 1705*t] = Op[i + 852*t] ? R[B[i + 852*t]] * R[C[i + 852*t]] : R[B[i + 852*t]] + R[C[i + 852*t]];
R[i + 1706*t] = Op[i + 853*t] ? R[B[i + 853*t]] * R[C[i + 853*t]] : R[B[i + 853*t]] + R[C[i + 853*t]];
R[i + 1707*t] = Op[i + 854*t] ? R[B[i + 854*t]] * R[C[i + 854*t]] : R[B[i + 854*t]] + R[C[i + 854*t]];
R[i + 1708*t] = Op[i + 855*t] ? R[B[i + 855*t]] * R[C[i + 855*t]] : R[B[i + 855*t]] + R[C[i + 855*t]];
R[i + 1709*t] = Op[i + 856*t] ? R[B[i + 856*t]] * R[C[i + 856*t]] : R[B[i + 856*t]] + R[C[i + 856*t]];
__syncthreads();
R[i + 1710*t] = Op[i + 857*t] ? R[B[i + 857*t]] * R[C[i + 857*t]] : R[B[i + 857*t]] + R[C[i + 857*t]];
R[i + 1711*t] = Op[i + 858*t] ? R[B[i + 858*t]] * R[C[i + 858*t]] : R[B[i + 858*t]] + R[C[i + 858*t]];
R[i + 1712*t] = Op[i + 859*t] ? R[B[i + 859*t]] * R[C[i + 859*t]] : R[B[i + 859*t]] + R[C[i + 859*t]];
R[i + 1713*t] = Op[i + 860*t] ? R[B[i + 860*t]] * R[C[i + 860*t]] : R[B[i + 860*t]] + R[C[i + 860*t]];
R[i + 1714*t] = Op[i + 861*t] ? R[B[i + 861*t]] * R[C[i + 861*t]] : R[B[i + 861*t]] + R[C[i + 861*t]];
R[i + 1715*t] = Op[i + 862*t] ? R[B[i + 862*t]] * R[C[i + 862*t]] : R[B[i + 862*t]] + R[C[i + 862*t]];
R[i + 1716*t] = Op[i + 863*t] ? R[B[i + 863*t]] * R[C[i + 863*t]] : R[B[i + 863*t]] + R[C[i + 863*t]];
R[i + 1717*t] = Op[i + 864*t] ? R[B[i + 864*t]] * R[C[i + 864*t]] : R[B[i + 864*t]] + R[C[i + 864*t]];
R[i + 1718*t] = Op[i + 865*t] ? R[B[i + 865*t]] * R[C[i + 865*t]] : R[B[i + 865*t]] + R[C[i + 865*t]];
R[i + 1719*t] = Op[i + 866*t] ? R[B[i + 866*t]] * R[C[i + 866*t]] : R[B[i + 866*t]] + R[C[i + 866*t]];
R[i + 1720*t] = Op[i + 867*t] ? R[B[i + 867*t]] * R[C[i + 867*t]] : R[B[i + 867*t]] + R[C[i + 867*t]];
R[i + 1721*t] = Op[i + 868*t] ? R[B[i + 868*t]] * R[C[i + 868*t]] : R[B[i + 868*t]] + R[C[i + 868*t]];
R[i + 1722*t] = Op[i + 869*t] ? R[B[i + 869*t]] * R[C[i + 869*t]] : R[B[i + 869*t]] + R[C[i + 869*t]];
R[i + 1723*t] = Op[i + 870*t] ? R[B[i + 870*t]] * R[C[i + 870*t]] : R[B[i + 870*t]] + R[C[i + 870*t]];
R[i + 1724*t] = Op[i + 871*t] ? R[B[i + 871*t]] * R[C[i + 871*t]] : R[B[i + 871*t]] + R[C[i + 871*t]];
R[i + 1725*t] = Op[i + 872*t] ? R[B[i + 872*t]] * R[C[i + 872*t]] : R[B[i + 872*t]] + R[C[i + 872*t]];
R[i + 1726*t] = Op[i + 873*t] ? R[B[i + 873*t]] * R[C[i + 873*t]] : R[B[i + 873*t]] + R[C[i + 873*t]];
R[i + 1727*t] = Op[i + 874*t] ? R[B[i + 874*t]] * R[C[i + 874*t]] : R[B[i + 874*t]] + R[C[i + 874*t]];
R[i + 1728*t] = Op[i + 875*t] ? R[B[i + 875*t]] * R[C[i + 875*t]] : R[B[i + 875*t]] + R[C[i + 875*t]];
R[i + 1729*t] = Op[i + 876*t] ? R[B[i + 876*t]] * R[C[i + 876*t]] : R[B[i + 876*t]] + R[C[i + 876*t]];
R[i + 1730*t] = Op[i + 877*t] ? R[B[i + 877*t]] * R[C[i + 877*t]] : R[B[i + 877*t]] + R[C[i + 877*t]];
R[i + 1731*t] = Op[i + 878*t] ? R[B[i + 878*t]] * R[C[i + 878*t]] : R[B[i + 878*t]] + R[C[i + 878*t]];
R[i + 1732*t] = Op[i + 879*t] ? R[B[i + 879*t]] * R[C[i + 879*t]] : R[B[i + 879*t]] + R[C[i + 879*t]];
R[i + 1733*t] = Op[i + 880*t] ? R[B[i + 880*t]] * R[C[i + 880*t]] : R[B[i + 880*t]] + R[C[i + 880*t]];
R[i + 1734*t] = Op[i + 881*t] ? R[B[i + 881*t]] * R[C[i + 881*t]] : R[B[i + 881*t]] + R[C[i + 881*t]];
R[i + 1735*t] = Op[i + 882*t] ? R[B[i + 882*t]] * R[C[i + 882*t]] : R[B[i + 882*t]] + R[C[i + 882*t]];
R[i + 1736*t] = Op[i + 883*t] ? R[B[i + 883*t]] * R[C[i + 883*t]] : R[B[i + 883*t]] + R[C[i + 883*t]];
R[i + 1737*t] = Op[i + 884*t] ? R[B[i + 884*t]] * R[C[i + 884*t]] : R[B[i + 884*t]] + R[C[i + 884*t]];
R[i + 1738*t] = Op[i + 885*t] ? R[B[i + 885*t]] * R[C[i + 885*t]] : R[B[i + 885*t]] + R[C[i + 885*t]];
R[i + 1739*t] = Op[i + 886*t] ? R[B[i + 886*t]] * R[C[i + 886*t]] : R[B[i + 886*t]] + R[C[i + 886*t]];
R[i + 1740*t] = Op[i + 887*t] ? R[B[i + 887*t]] * R[C[i + 887*t]] : R[B[i + 887*t]] + R[C[i + 887*t]];
R[i + 1741*t] = Op[i + 888*t] ? R[B[i + 888*t]] * R[C[i + 888*t]] : R[B[i + 888*t]] + R[C[i + 888*t]];
R[i + 1742*t] = Op[i + 889*t] ? R[B[i + 889*t]] * R[C[i + 889*t]] : R[B[i + 889*t]] + R[C[i + 889*t]];
R[i + 1743*t] = Op[i + 890*t] ? R[B[i + 890*t]] * R[C[i + 890*t]] : R[B[i + 890*t]] + R[C[i + 890*t]];
R[i + 1744*t] = Op[i + 891*t] ? R[B[i + 891*t]] * R[C[i + 891*t]] : R[B[i + 891*t]] + R[C[i + 891*t]];
R[i + 1745*t] = Op[i + 892*t] ? R[B[i + 892*t]] * R[C[i + 892*t]] : R[B[i + 892*t]] + R[C[i + 892*t]];
R[i + 1746*t] = Op[i + 893*t] ? R[B[i + 893*t]] * R[C[i + 893*t]] : R[B[i + 893*t]] + R[C[i + 893*t]];
R[i + 1747*t] = Op[i + 894*t] ? R[B[i + 894*t]] * R[C[i + 894*t]] : R[B[i + 894*t]] + R[C[i + 894*t]];
R[i + 1748*t] = Op[i + 895*t] ? R[B[i + 895*t]] * R[C[i + 895*t]] : R[B[i + 895*t]] + R[C[i + 895*t]];
R[i + 1749*t] = Op[i + 896*t] ? R[B[i + 896*t]] * R[C[i + 896*t]] : R[B[i + 896*t]] + R[C[i + 896*t]];
R[i + 1750*t] = Op[i + 897*t] ? R[B[i + 897*t]] * R[C[i + 897*t]] : R[B[i + 897*t]] + R[C[i + 897*t]];
R[i + 1751*t] = Op[i + 898*t] ? R[B[i + 898*t]] * R[C[i + 898*t]] : R[B[i + 898*t]] + R[C[i + 898*t]];
R[i + 1752*t] = Op[i + 899*t] ? R[B[i + 899*t]] * R[C[i + 899*t]] : R[B[i + 899*t]] + R[C[i + 899*t]];
R[i + 1753*t] = Op[i + 900*t] ? R[B[i + 900*t]] * R[C[i + 900*t]] : R[B[i + 900*t]] + R[C[i + 900*t]];
R[i + 1754*t] = Op[i + 901*t] ? R[B[i + 901*t]] * R[C[i + 901*t]] : R[B[i + 901*t]] + R[C[i + 901*t]];
R[i + 1755*t] = Op[i + 902*t] ? R[B[i + 902*t]] * R[C[i + 902*t]] : R[B[i + 902*t]] + R[C[i + 902*t]];
R[i + 1756*t] = Op[i + 903*t] ? R[B[i + 903*t]] * R[C[i + 903*t]] : R[B[i + 903*t]] + R[C[i + 903*t]];
R[i + 1757*t] = Op[i + 904*t] ? R[B[i + 904*t]] * R[C[i + 904*t]] : R[B[i + 904*t]] + R[C[i + 904*t]];
R[i + 1758*t] = Op[i + 905*t] ? R[B[i + 905*t]] * R[C[i + 905*t]] : R[B[i + 905*t]] + R[C[i + 905*t]];
R[i + 1759*t] = Op[i + 906*t] ? R[B[i + 906*t]] * R[C[i + 906*t]] : R[B[i + 906*t]] + R[C[i + 906*t]];
R[i + 1760*t] = Op[i + 907*t] ? R[B[i + 907*t]] * R[C[i + 907*t]] : R[B[i + 907*t]] + R[C[i + 907*t]];
R[i + 1761*t] = Op[i + 908*t] ? R[B[i + 908*t]] * R[C[i + 908*t]] : R[B[i + 908*t]] + R[C[i + 908*t]];
R[i + 1762*t] = Op[i + 909*t] ? R[B[i + 909*t]] * R[C[i + 909*t]] : R[B[i + 909*t]] + R[C[i + 909*t]];
R[i + 1763*t] = Op[i + 910*t] ? R[B[i + 910*t]] * R[C[i + 910*t]] : R[B[i + 910*t]] + R[C[i + 910*t]];
R[i + 1764*t] = Op[i + 911*t] ? R[B[i + 911*t]] * R[C[i + 911*t]] : R[B[i + 911*t]] + R[C[i + 911*t]];
R[i + 1765*t] = Op[i + 912*t] ? R[B[i + 912*t]] * R[C[i + 912*t]] : R[B[i + 912*t]] + R[C[i + 912*t]];
R[i + 1766*t] = Op[i + 913*t] ? R[B[i + 913*t]] * R[C[i + 913*t]] : R[B[i + 913*t]] + R[C[i + 913*t]];
R[i + 1767*t] = Op[i + 914*t] ? R[B[i + 914*t]] * R[C[i + 914*t]] : R[B[i + 914*t]] + R[C[i + 914*t]];
R[i + 1768*t] = Op[i + 915*t] ? R[B[i + 915*t]] * R[C[i + 915*t]] : R[B[i + 915*t]] + R[C[i + 915*t]];
R[i + 1769*t] = Op[i + 916*t] ? R[B[i + 916*t]] * R[C[i + 916*t]] : R[B[i + 916*t]] + R[C[i + 916*t]];
R[i + 1770*t] = Op[i + 917*t] ? R[B[i + 917*t]] * R[C[i + 917*t]] : R[B[i + 917*t]] + R[C[i + 917*t]];
R[i + 1771*t] = Op[i + 918*t] ? R[B[i + 918*t]] * R[C[i + 918*t]] : R[B[i + 918*t]] + R[C[i + 918*t]];
R[i + 1772*t] = Op[i + 919*t] ? R[B[i + 919*t]] * R[C[i + 919*t]] : R[B[i + 919*t]] + R[C[i + 919*t]];
R[i + 1773*t] = Op[i + 920*t] ? R[B[i + 920*t]] * R[C[i + 920*t]] : R[B[i + 920*t]] + R[C[i + 920*t]];
R[i + 1774*t] = Op[i + 921*t] ? R[B[i + 921*t]] * R[C[i + 921*t]] : R[B[i + 921*t]] + R[C[i + 921*t]];
R[i + 1775*t] = Op[i + 922*t] ? R[B[i + 922*t]] * R[C[i + 922*t]] : R[B[i + 922*t]] + R[C[i + 922*t]];
R[i + 1776*t] = Op[i + 923*t] ? R[B[i + 923*t]] * R[C[i + 923*t]] : R[B[i + 923*t]] + R[C[i + 923*t]];
R[i + 1777*t] = Op[i + 924*t] ? R[B[i + 924*t]] * R[C[i + 924*t]] : R[B[i + 924*t]] + R[C[i + 924*t]];
R[i + 1778*t] = Op[i + 925*t] ? R[B[i + 925*t]] * R[C[i + 925*t]] : R[B[i + 925*t]] + R[C[i + 925*t]];
R[i + 1779*t] = Op[i + 926*t] ? R[B[i + 926*t]] * R[C[i + 926*t]] : R[B[i + 926*t]] + R[C[i + 926*t]];
R[i + 1780*t] = Op[i + 927*t] ? R[B[i + 927*t]] * R[C[i + 927*t]] : R[B[i + 927*t]] + R[C[i + 927*t]];
R[i + 1781*t] = Op[i + 928*t] ? R[B[i + 928*t]] * R[C[i + 928*t]] : R[B[i + 928*t]] + R[C[i + 928*t]];
R[i + 1782*t] = Op[i + 929*t] ? R[B[i + 929*t]] * R[C[i + 929*t]] : R[B[i + 929*t]] + R[C[i + 929*t]];
R[i + 1783*t] = Op[i + 930*t] ? R[B[i + 930*t]] * R[C[i + 930*t]] : R[B[i + 930*t]] + R[C[i + 930*t]];
R[i + 1784*t] = Op[i + 931*t] ? R[B[i + 931*t]] * R[C[i + 931*t]] : R[B[i + 931*t]] + R[C[i + 931*t]];
R[i + 1785*t] = Op[i + 932*t] ? R[B[i + 932*t]] * R[C[i + 932*t]] : R[B[i + 932*t]] + R[C[i + 932*t]];
R[i + 1786*t] = Op[i + 933*t] ? R[B[i + 933*t]] * R[C[i + 933*t]] : R[B[i + 933*t]] + R[C[i + 933*t]];
R[i + 1787*t] = Op[i + 934*t] ? R[B[i + 934*t]] * R[C[i + 934*t]] : R[B[i + 934*t]] + R[C[i + 934*t]];
R[i + 1788*t] = Op[i + 935*t] ? R[B[i + 935*t]] * R[C[i + 935*t]] : R[B[i + 935*t]] + R[C[i + 935*t]];
R[i + 1789*t] = Op[i + 936*t] ? R[B[i + 936*t]] * R[C[i + 936*t]] : R[B[i + 936*t]] + R[C[i + 936*t]];
R[i + 1790*t] = Op[i + 937*t] ? R[B[i + 937*t]] * R[C[i + 937*t]] : R[B[i + 937*t]] + R[C[i + 937*t]];
R[i + 1791*t] = Op[i + 938*t] ? R[B[i + 938*t]] * R[C[i + 938*t]] : R[B[i + 938*t]] + R[C[i + 938*t]];
R[i + 1792*t] = Op[i + 939*t] ? R[B[i + 939*t]] * R[C[i + 939*t]] : R[B[i + 939*t]] + R[C[i + 939*t]];
R[i + 1793*t] = Op[i + 940*t] ? R[B[i + 940*t]] * R[C[i + 940*t]] : R[B[i + 940*t]] + R[C[i + 940*t]];
R[i + 1794*t] = Op[i + 941*t] ? R[B[i + 941*t]] * R[C[i + 941*t]] : R[B[i + 941*t]] + R[C[i + 941*t]];
R[i + 1795*t] = Op[i + 942*t] ? R[B[i + 942*t]] * R[C[i + 942*t]] : R[B[i + 942*t]] + R[C[i + 942*t]];
R[i + 1796*t] = Op[i + 943*t] ? R[B[i + 943*t]] * R[C[i + 943*t]] : R[B[i + 943*t]] + R[C[i + 943*t]];
__syncthreads();
R[i + 1797*t] = Op[i + 944*t] ? R[B[i + 944*t]] * R[C[i + 944*t]] : R[B[i + 944*t]] + R[C[i + 944*t]];
R[i + 1798*t] = Op[i + 945*t] ? R[B[i + 945*t]] * R[C[i + 945*t]] : R[B[i + 945*t]] + R[C[i + 945*t]];
R[i + 1799*t] = Op[i + 946*t] ? R[B[i + 946*t]] * R[C[i + 946*t]] : R[B[i + 946*t]] + R[C[i + 946*t]];
R[i + 1800*t] = Op[i + 947*t] ? R[B[i + 947*t]] * R[C[i + 947*t]] : R[B[i + 947*t]] + R[C[i + 947*t]];
R[i + 1801*t] = Op[i + 948*t] ? R[B[i + 948*t]] * R[C[i + 948*t]] : R[B[i + 948*t]] + R[C[i + 948*t]];
R[i + 1802*t] = Op[i + 949*t] ? R[B[i + 949*t]] * R[C[i + 949*t]] : R[B[i + 949*t]] + R[C[i + 949*t]];
R[i + 1803*t] = Op[i + 950*t] ? R[B[i + 950*t]] * R[C[i + 950*t]] : R[B[i + 950*t]] + R[C[i + 950*t]];
R[i + 1804*t] = Op[i + 951*t] ? R[B[i + 951*t]] * R[C[i + 951*t]] : R[B[i + 951*t]] + R[C[i + 951*t]];
R[i + 1805*t] = Op[i + 952*t] ? R[B[i + 952*t]] * R[C[i + 952*t]] : R[B[i + 952*t]] + R[C[i + 952*t]];
R[i + 1806*t] = Op[i + 953*t] ? R[B[i + 953*t]] * R[C[i + 953*t]] : R[B[i + 953*t]] + R[C[i + 953*t]];
R[i + 1807*t] = Op[i + 954*t] ? R[B[i + 954*t]] * R[C[i + 954*t]] : R[B[i + 954*t]] + R[C[i + 954*t]];
R[i + 1808*t] = Op[i + 955*t] ? R[B[i + 955*t]] * R[C[i + 955*t]] : R[B[i + 955*t]] + R[C[i + 955*t]];
R[i + 1809*t] = Op[i + 956*t] ? R[B[i + 956*t]] * R[C[i + 956*t]] : R[B[i + 956*t]] + R[C[i + 956*t]];
R[i + 1810*t] = Op[i + 957*t] ? R[B[i + 957*t]] * R[C[i + 957*t]] : R[B[i + 957*t]] + R[C[i + 957*t]];
R[i + 1811*t] = Op[i + 958*t] ? R[B[i + 958*t]] * R[C[i + 958*t]] : R[B[i + 958*t]] + R[C[i + 958*t]];
R[i + 1812*t] = Op[i + 959*t] ? R[B[i + 959*t]] * R[C[i + 959*t]] : R[B[i + 959*t]] + R[C[i + 959*t]];
R[i + 1813*t] = Op[i + 960*t] ? R[B[i + 960*t]] * R[C[i + 960*t]] : R[B[i + 960*t]] + R[C[i + 960*t]];
R[i + 1814*t] = Op[i + 961*t] ? R[B[i + 961*t]] * R[C[i + 961*t]] : R[B[i + 961*t]] + R[C[i + 961*t]];
R[i + 1815*t] = Op[i + 962*t] ? R[B[i + 962*t]] * R[C[i + 962*t]] : R[B[i + 962*t]] + R[C[i + 962*t]];
R[i + 1816*t] = Op[i + 963*t] ? R[B[i + 963*t]] * R[C[i + 963*t]] : R[B[i + 963*t]] + R[C[i + 963*t]];
R[i + 1817*t] = Op[i + 964*t] ? R[B[i + 964*t]] * R[C[i + 964*t]] : R[B[i + 964*t]] + R[C[i + 964*t]];
R[i + 1818*t] = Op[i + 965*t] ? R[B[i + 965*t]] * R[C[i + 965*t]] : R[B[i + 965*t]] + R[C[i + 965*t]];
R[i + 1819*t] = Op[i + 966*t] ? R[B[i + 966*t]] * R[C[i + 966*t]] : R[B[i + 966*t]] + R[C[i + 966*t]];
R[i + 1820*t] = Op[i + 967*t] ? R[B[i + 967*t]] * R[C[i + 967*t]] : R[B[i + 967*t]] + R[C[i + 967*t]];
R[i + 1821*t] = Op[i + 968*t] ? R[B[i + 968*t]] * R[C[i + 968*t]] : R[B[i + 968*t]] + R[C[i + 968*t]];
R[i + 1822*t] = Op[i + 969*t] ? R[B[i + 969*t]] * R[C[i + 969*t]] : R[B[i + 969*t]] + R[C[i + 969*t]];
R[i + 1823*t] = Op[i + 970*t] ? R[B[i + 970*t]] * R[C[i + 970*t]] : R[B[i + 970*t]] + R[C[i + 970*t]];
R[i + 1824*t] = Op[i + 971*t] ? R[B[i + 971*t]] * R[C[i + 971*t]] : R[B[i + 971*t]] + R[C[i + 971*t]];
R[i + 1825*t] = Op[i + 972*t] ? R[B[i + 972*t]] * R[C[i + 972*t]] : R[B[i + 972*t]] + R[C[i + 972*t]];
R[i + 1826*t] = Op[i + 973*t] ? R[B[i + 973*t]] * R[C[i + 973*t]] : R[B[i + 973*t]] + R[C[i + 973*t]];
R[i + 1827*t] = Op[i + 974*t] ? R[B[i + 974*t]] * R[C[i + 974*t]] : R[B[i + 974*t]] + R[C[i + 974*t]];
R[i + 1828*t] = Op[i + 975*t] ? R[B[i + 975*t]] * R[C[i + 975*t]] : R[B[i + 975*t]] + R[C[i + 975*t]];
R[i + 1829*t] = Op[i + 976*t] ? R[B[i + 976*t]] * R[C[i + 976*t]] : R[B[i + 976*t]] + R[C[i + 976*t]];
R[i + 1830*t] = Op[i + 977*t] ? R[B[i + 977*t]] * R[C[i + 977*t]] : R[B[i + 977*t]] + R[C[i + 977*t]];
R[i + 1831*t] = Op[i + 978*t] ? R[B[i + 978*t]] * R[C[i + 978*t]] : R[B[i + 978*t]] + R[C[i + 978*t]];
R[i + 1832*t] = Op[i + 979*t] ? R[B[i + 979*t]] * R[C[i + 979*t]] : R[B[i + 979*t]] + R[C[i + 979*t]];
R[i + 1833*t] = Op[i + 980*t] ? R[B[i + 980*t]] * R[C[i + 980*t]] : R[B[i + 980*t]] + R[C[i + 980*t]];
R[i + 1834*t] = Op[i + 981*t] ? R[B[i + 981*t]] * R[C[i + 981*t]] : R[B[i + 981*t]] + R[C[i + 981*t]];
R[i + 1835*t] = Op[i + 982*t] ? R[B[i + 982*t]] * R[C[i + 982*t]] : R[B[i + 982*t]] + R[C[i + 982*t]];
R[i + 1836*t] = Op[i + 983*t] ? R[B[i + 983*t]] * R[C[i + 983*t]] : R[B[i + 983*t]] + R[C[i + 983*t]];
R[i + 1837*t] = Op[i + 984*t] ? R[B[i + 984*t]] * R[C[i + 984*t]] : R[B[i + 984*t]] + R[C[i + 984*t]];
R[i + 1838*t] = Op[i + 985*t] ? R[B[i + 985*t]] * R[C[i + 985*t]] : R[B[i + 985*t]] + R[C[i + 985*t]];
R[i + 1839*t] = Op[i + 986*t] ? R[B[i + 986*t]] * R[C[i + 986*t]] : R[B[i + 986*t]] + R[C[i + 986*t]];
R[i + 1840*t] = Op[i + 987*t] ? R[B[i + 987*t]] * R[C[i + 987*t]] : R[B[i + 987*t]] + R[C[i + 987*t]];
R[i + 1841*t] = Op[i + 988*t] ? R[B[i + 988*t]] * R[C[i + 988*t]] : R[B[i + 988*t]] + R[C[i + 988*t]];
R[i + 1842*t] = Op[i + 989*t] ? R[B[i + 989*t]] * R[C[i + 989*t]] : R[B[i + 989*t]] + R[C[i + 989*t]];
R[i + 1843*t] = Op[i + 990*t] ? R[B[i + 990*t]] * R[C[i + 990*t]] : R[B[i + 990*t]] + R[C[i + 990*t]];
R[i + 1844*t] = Op[i + 991*t] ? R[B[i + 991*t]] * R[C[i + 991*t]] : R[B[i + 991*t]] + R[C[i + 991*t]];
R[i + 1845*t] = Op[i + 992*t] ? R[B[i + 992*t]] * R[C[i + 992*t]] : R[B[i + 992*t]] + R[C[i + 992*t]];
R[i + 1846*t] = Op[i + 993*t] ? R[B[i + 993*t]] * R[C[i + 993*t]] : R[B[i + 993*t]] + R[C[i + 993*t]];
R[i + 1847*t] = Op[i + 994*t] ? R[B[i + 994*t]] * R[C[i + 994*t]] : R[B[i + 994*t]] + R[C[i + 994*t]];
R[i + 1848*t] = Op[i + 995*t] ? R[B[i + 995*t]] * R[C[i + 995*t]] : R[B[i + 995*t]] + R[C[i + 995*t]];
R[i + 1849*t] = Op[i + 996*t] ? R[B[i + 996*t]] * R[C[i + 996*t]] : R[B[i + 996*t]] + R[C[i + 996*t]];
__syncthreads();
R[i + 1850*t] = Op[i + 997*t] ? R[B[i + 997*t]] * R[C[i + 997*t]] : R[B[i + 997*t]] + R[C[i + 997*t]];
R[i + 1851*t] = Op[i + 998*t] ? R[B[i + 998*t]] * R[C[i + 998*t]] : R[B[i + 998*t]] + R[C[i + 998*t]];
R[i + 1852*t] = Op[i + 999*t] ? R[B[i + 999*t]] * R[C[i + 999*t]] : R[B[i + 999*t]] + R[C[i + 999*t]];
R[i + 1853*t] = Op[i + 1000*t] ? R[B[i + 1000*t]] * R[C[i + 1000*t]] : R[B[i + 1000*t]] + R[C[i + 1000*t]];
R[i + 1854*t] = Op[i + 1001*t] ? R[B[i + 1001*t]] * R[C[i + 1001*t]] : R[B[i + 1001*t]] + R[C[i + 1001*t]];
R[i + 1855*t] = Op[i + 1002*t] ? R[B[i + 1002*t]] * R[C[i + 1002*t]] : R[B[i + 1002*t]] + R[C[i + 1002*t]];
R[i + 1856*t] = Op[i + 1003*t] ? R[B[i + 1003*t]] * R[C[i + 1003*t]] : R[B[i + 1003*t]] + R[C[i + 1003*t]];
R[i + 1857*t] = Op[i + 1004*t] ? R[B[i + 1004*t]] * R[C[i + 1004*t]] : R[B[i + 1004*t]] + R[C[i + 1004*t]];
R[i + 1858*t] = Op[i + 1005*t] ? R[B[i + 1005*t]] * R[C[i + 1005*t]] : R[B[i + 1005*t]] + R[C[i + 1005*t]];
R[i + 1859*t] = Op[i + 1006*t] ? R[B[i + 1006*t]] * R[C[i + 1006*t]] : R[B[i + 1006*t]] + R[C[i + 1006*t]];
R[i + 1860*t] = Op[i + 1007*t] ? R[B[i + 1007*t]] * R[C[i + 1007*t]] : R[B[i + 1007*t]] + R[C[i + 1007*t]];
R[i + 1861*t] = Op[i + 1008*t] ? R[B[i + 1008*t]] * R[C[i + 1008*t]] : R[B[i + 1008*t]] + R[C[i + 1008*t]];
R[i + 1862*t] = Op[i + 1009*t] ? R[B[i + 1009*t]] * R[C[i + 1009*t]] : R[B[i + 1009*t]] + R[C[i + 1009*t]];
R[i + 1863*t] = Op[i + 1010*t] ? R[B[i + 1010*t]] * R[C[i + 1010*t]] : R[B[i + 1010*t]] + R[C[i + 1010*t]];
R[i + 1864*t] = Op[i + 1011*t] ? R[B[i + 1011*t]] * R[C[i + 1011*t]] : R[B[i + 1011*t]] + R[C[i + 1011*t]];
R[i + 1865*t] = Op[i + 1012*t] ? R[B[i + 1012*t]] * R[C[i + 1012*t]] : R[B[i + 1012*t]] + R[C[i + 1012*t]];
R[i + 1866*t] = Op[i + 1013*t] ? R[B[i + 1013*t]] * R[C[i + 1013*t]] : R[B[i + 1013*t]] + R[C[i + 1013*t]];
R[i + 1867*t] = Op[i + 1014*t] ? R[B[i + 1014*t]] * R[C[i + 1014*t]] : R[B[i + 1014*t]] + R[C[i + 1014*t]];
R[i + 1868*t] = Op[i + 1015*t] ? R[B[i + 1015*t]] * R[C[i + 1015*t]] : R[B[i + 1015*t]] + R[C[i + 1015*t]];
R[i + 1869*t] = Op[i + 1016*t] ? R[B[i + 1016*t]] * R[C[i + 1016*t]] : R[B[i + 1016*t]] + R[C[i + 1016*t]];
R[i + 1870*t] = Op[i + 1017*t] ? R[B[i + 1017*t]] * R[C[i + 1017*t]] : R[B[i + 1017*t]] + R[C[i + 1017*t]];
R[i + 1871*t] = Op[i + 1018*t] ? R[B[i + 1018*t]] * R[C[i + 1018*t]] : R[B[i + 1018*t]] + R[C[i + 1018*t]];
R[i + 1872*t] = Op[i + 1019*t] ? R[B[i + 1019*t]] * R[C[i + 1019*t]] : R[B[i + 1019*t]] + R[C[i + 1019*t]];
R[i + 1873*t] = Op[i + 1020*t] ? R[B[i + 1020*t]] * R[C[i + 1020*t]] : R[B[i + 1020*t]] + R[C[i + 1020*t]];
R[i + 1874*t] = Op[i + 1021*t] ? R[B[i + 1021*t]] * R[C[i + 1021*t]] : R[B[i + 1021*t]] + R[C[i + 1021*t]];
R[i + 1875*t] = Op[i + 1022*t] ? R[B[i + 1022*t]] * R[C[i + 1022*t]] : R[B[i + 1022*t]] + R[C[i + 1022*t]];
R[i + 1876*t] = Op[i + 1023*t] ? R[B[i + 1023*t]] * R[C[i + 1023*t]] : R[B[i + 1023*t]] + R[C[i + 1023*t]];
R[i + 1877*t] = Op[i + 1024*t] ? R[B[i + 1024*t]] * R[C[i + 1024*t]] : R[B[i + 1024*t]] + R[C[i + 1024*t]];
R[i + 1878*t] = Op[i + 1025*t] ? R[B[i + 1025*t]] * R[C[i + 1025*t]] : R[B[i + 1025*t]] + R[C[i + 1025*t]];
R[i + 1879*t] = Op[i + 1026*t] ? R[B[i + 1026*t]] * R[C[i + 1026*t]] : R[B[i + 1026*t]] + R[C[i + 1026*t]];
R[i + 1880*t] = Op[i + 1027*t] ? R[B[i + 1027*t]] * R[C[i + 1027*t]] : R[B[i + 1027*t]] + R[C[i + 1027*t]];
R[i + 1881*t] = Op[i + 1028*t] ? R[B[i + 1028*t]] * R[C[i + 1028*t]] : R[B[i + 1028*t]] + R[C[i + 1028*t]];
R[i + 1882*t] = Op[i + 1029*t] ? R[B[i + 1029*t]] * R[C[i + 1029*t]] : R[B[i + 1029*t]] + R[C[i + 1029*t]];
R[i + 1883*t] = Op[i + 1030*t] ? R[B[i + 1030*t]] * R[C[i + 1030*t]] : R[B[i + 1030*t]] + R[C[i + 1030*t]];
R[i + 1884*t] = Op[i + 1031*t] ? R[B[i + 1031*t]] * R[C[i + 1031*t]] : R[B[i + 1031*t]] + R[C[i + 1031*t]];
R[i + 1885*t] = Op[i + 1032*t] ? R[B[i + 1032*t]] * R[C[i + 1032*t]] : R[B[i + 1032*t]] + R[C[i + 1032*t]];
R[i + 1886*t] = Op[i + 1033*t] ? R[B[i + 1033*t]] * R[C[i + 1033*t]] : R[B[i + 1033*t]] + R[C[i + 1033*t]];
R[i + 1887*t] = Op[i + 1034*t] ? R[B[i + 1034*t]] * R[C[i + 1034*t]] : R[B[i + 1034*t]] + R[C[i + 1034*t]];
R[i + 1888*t] = Op[i + 1035*t] ? R[B[i + 1035*t]] * R[C[i + 1035*t]] : R[B[i + 1035*t]] + R[C[i + 1035*t]];
R[i + 1889*t] = Op[i + 1036*t] ? R[B[i + 1036*t]] * R[C[i + 1036*t]] : R[B[i + 1036*t]] + R[C[i + 1036*t]];
R[i + 1890*t] = Op[i + 1037*t] ? R[B[i + 1037*t]] * R[C[i + 1037*t]] : R[B[i + 1037*t]] + R[C[i + 1037*t]];
R[i + 1891*t] = Op[i + 1038*t] ? R[B[i + 1038*t]] * R[C[i + 1038*t]] : R[B[i + 1038*t]] + R[C[i + 1038*t]];
R[i + 1892*t] = Op[i + 1039*t] ? R[B[i + 1039*t]] * R[C[i + 1039*t]] : R[B[i + 1039*t]] + R[C[i + 1039*t]];
R[i + 1893*t] = Op[i + 1040*t] ? R[B[i + 1040*t]] * R[C[i + 1040*t]] : R[B[i + 1040*t]] + R[C[i + 1040*t]];
R[i + 1894*t] = Op[i + 1041*t] ? R[B[i + 1041*t]] * R[C[i + 1041*t]] : R[B[i + 1041*t]] + R[C[i + 1041*t]];
R[i + 1895*t] = Op[i + 1042*t] ? R[B[i + 1042*t]] * R[C[i + 1042*t]] : R[B[i + 1042*t]] + R[C[i + 1042*t]];
R[i + 1896*t] = Op[i + 1043*t] ? R[B[i + 1043*t]] * R[C[i + 1043*t]] : R[B[i + 1043*t]] + R[C[i + 1043*t]];
R[i + 1897*t] = Op[i + 1044*t] ? R[B[i + 1044*t]] * R[C[i + 1044*t]] : R[B[i + 1044*t]] + R[C[i + 1044*t]];
R[i + 1898*t] = Op[i + 1045*t] ? R[B[i + 1045*t]] * R[C[i + 1045*t]] : R[B[i + 1045*t]] + R[C[i + 1045*t]];
R[i + 1899*t] = Op[i + 1046*t] ? R[B[i + 1046*t]] * R[C[i + 1046*t]] : R[B[i + 1046*t]] + R[C[i + 1046*t]];
R[i + 1900*t] = Op[i + 1047*t] ? R[B[i + 1047*t]] * R[C[i + 1047*t]] : R[B[i + 1047*t]] + R[C[i + 1047*t]];
R[i + 1901*t] = Op[i + 1048*t] ? R[B[i + 1048*t]] * R[C[i + 1048*t]] : R[B[i + 1048*t]] + R[C[i + 1048*t]];
R[i + 1902*t] = Op[i + 1049*t] ? R[B[i + 1049*t]] * R[C[i + 1049*t]] : R[B[i + 1049*t]] + R[C[i + 1049*t]];
R[i + 1903*t] = Op[i + 1050*t] ? R[B[i + 1050*t]] * R[C[i + 1050*t]] : R[B[i + 1050*t]] + R[C[i + 1050*t]];
R[i + 1904*t] = Op[i + 1051*t] ? R[B[i + 1051*t]] * R[C[i + 1051*t]] : R[B[i + 1051*t]] + R[C[i + 1051*t]];
R[i + 1905*t] = Op[i + 1052*t] ? R[B[i + 1052*t]] * R[C[i + 1052*t]] : R[B[i + 1052*t]] + R[C[i + 1052*t]];
__syncthreads();
R[i + 1906*t] = Op[i + 1053*t] ? R[B[i + 1053*t]] * R[C[i + 1053*t]] : R[B[i + 1053*t]] + R[C[i + 1053*t]];
R[i + 1907*t] = Op[i + 1054*t] ? R[B[i + 1054*t]] * R[C[i + 1054*t]] : R[B[i + 1054*t]] + R[C[i + 1054*t]];
R[i + 1908*t] = Op[i + 1055*t] ? R[B[i + 1055*t]] * R[C[i + 1055*t]] : R[B[i + 1055*t]] + R[C[i + 1055*t]];
R[i + 1909*t] = Op[i + 1056*t] ? R[B[i + 1056*t]] * R[C[i + 1056*t]] : R[B[i + 1056*t]] + R[C[i + 1056*t]];
R[i + 1910*t] = Op[i + 1057*t] ? R[B[i + 1057*t]] * R[C[i + 1057*t]] : R[B[i + 1057*t]] + R[C[i + 1057*t]];
R[i + 1911*t] = Op[i + 1058*t] ? R[B[i + 1058*t]] * R[C[i + 1058*t]] : R[B[i + 1058*t]] + R[C[i + 1058*t]];
R[i + 1912*t] = Op[i + 1059*t] ? R[B[i + 1059*t]] * R[C[i + 1059*t]] : R[B[i + 1059*t]] + R[C[i + 1059*t]];
R[i + 1913*t] = Op[i + 1060*t] ? R[B[i + 1060*t]] * R[C[i + 1060*t]] : R[B[i + 1060*t]] + R[C[i + 1060*t]];
R[i + 1914*t] = Op[i + 1061*t] ? R[B[i + 1061*t]] * R[C[i + 1061*t]] : R[B[i + 1061*t]] + R[C[i + 1061*t]];
R[i + 1915*t] = Op[i + 1062*t] ? R[B[i + 1062*t]] * R[C[i + 1062*t]] : R[B[i + 1062*t]] + R[C[i + 1062*t]];
R[i + 1916*t] = Op[i + 1063*t] ? R[B[i + 1063*t]] * R[C[i + 1063*t]] : R[B[i + 1063*t]] + R[C[i + 1063*t]];
R[i + 1917*t] = Op[i + 1064*t] ? R[B[i + 1064*t]] * R[C[i + 1064*t]] : R[B[i + 1064*t]] + R[C[i + 1064*t]];
R[i + 1918*t] = Op[i + 1065*t] ? R[B[i + 1065*t]] * R[C[i + 1065*t]] : R[B[i + 1065*t]] + R[C[i + 1065*t]];
R[i + 1919*t] = Op[i + 1066*t] ? R[B[i + 1066*t]] * R[C[i + 1066*t]] : R[B[i + 1066*t]] + R[C[i + 1066*t]];
R[i + 1920*t] = Op[i + 1067*t] ? R[B[i + 1067*t]] * R[C[i + 1067*t]] : R[B[i + 1067*t]] + R[C[i + 1067*t]];
R[i + 1921*t] = Op[i + 1068*t] ? R[B[i + 1068*t]] * R[C[i + 1068*t]] : R[B[i + 1068*t]] + R[C[i + 1068*t]];
R[i + 1922*t] = Op[i + 1069*t] ? R[B[i + 1069*t]] * R[C[i + 1069*t]] : R[B[i + 1069*t]] + R[C[i + 1069*t]];
R[i + 1923*t] = Op[i + 1070*t] ? R[B[i + 1070*t]] * R[C[i + 1070*t]] : R[B[i + 1070*t]] + R[C[i + 1070*t]];
R[i + 1924*t] = Op[i + 1071*t] ? R[B[i + 1071*t]] * R[C[i + 1071*t]] : R[B[i + 1071*t]] + R[C[i + 1071*t]];
R[i + 1925*t] = Op[i + 1072*t] ? R[B[i + 1072*t]] * R[C[i + 1072*t]] : R[B[i + 1072*t]] + R[C[i + 1072*t]];
R[i + 1926*t] = Op[i + 1073*t] ? R[B[i + 1073*t]] * R[C[i + 1073*t]] : R[B[i + 1073*t]] + R[C[i + 1073*t]];
R[i + 1927*t] = Op[i + 1074*t] ? R[B[i + 1074*t]] * R[C[i + 1074*t]] : R[B[i + 1074*t]] + R[C[i + 1074*t]];
R[i + 1928*t] = Op[i + 1075*t] ? R[B[i + 1075*t]] * R[C[i + 1075*t]] : R[B[i + 1075*t]] + R[C[i + 1075*t]];
R[i + 1929*t] = Op[i + 1076*t] ? R[B[i + 1076*t]] * R[C[i + 1076*t]] : R[B[i + 1076*t]] + R[C[i + 1076*t]];
R[i + 1930*t] = Op[i + 1077*t] ? R[B[i + 1077*t]] * R[C[i + 1077*t]] : R[B[i + 1077*t]] + R[C[i + 1077*t]];
R[i + 1931*t] = Op[i + 1078*t] ? R[B[i + 1078*t]] * R[C[i + 1078*t]] : R[B[i + 1078*t]] + R[C[i + 1078*t]];
R[i + 1932*t] = Op[i + 1079*t] ? R[B[i + 1079*t]] * R[C[i + 1079*t]] : R[B[i + 1079*t]] + R[C[i + 1079*t]];
R[i + 1933*t] = Op[i + 1080*t] ? R[B[i + 1080*t]] * R[C[i + 1080*t]] : R[B[i + 1080*t]] + R[C[i + 1080*t]];
R[i + 1934*t] = Op[i + 1081*t] ? R[B[i + 1081*t]] * R[C[i + 1081*t]] : R[B[i + 1081*t]] + R[C[i + 1081*t]];
R[i + 1935*t] = Op[i + 1082*t] ? R[B[i + 1082*t]] * R[C[i + 1082*t]] : R[B[i + 1082*t]] + R[C[i + 1082*t]];
R[i + 1936*t] = Op[i + 1083*t] ? R[B[i + 1083*t]] * R[C[i + 1083*t]] : R[B[i + 1083*t]] + R[C[i + 1083*t]];
R[i + 1937*t] = Op[i + 1084*t] ? R[B[i + 1084*t]] * R[C[i + 1084*t]] : R[B[i + 1084*t]] + R[C[i + 1084*t]];
R[i + 1938*t] = Op[i + 1085*t] ? R[B[i + 1085*t]] * R[C[i + 1085*t]] : R[B[i + 1085*t]] + R[C[i + 1085*t]];
R[i + 1939*t] = Op[i + 1086*t] ? R[B[i + 1086*t]] * R[C[i + 1086*t]] : R[B[i + 1086*t]] + R[C[i + 1086*t]];
R[i + 1940*t] = Op[i + 1087*t] ? R[B[i + 1087*t]] * R[C[i + 1087*t]] : R[B[i + 1087*t]] + R[C[i + 1087*t]];
R[i + 1941*t] = Op[i + 1088*t] ? R[B[i + 1088*t]] * R[C[i + 1088*t]] : R[B[i + 1088*t]] + R[C[i + 1088*t]];
R[i + 1942*t] = Op[i + 1089*t] ? R[B[i + 1089*t]] * R[C[i + 1089*t]] : R[B[i + 1089*t]] + R[C[i + 1089*t]];
R[i + 1943*t] = Op[i + 1090*t] ? R[B[i + 1090*t]] * R[C[i + 1090*t]] : R[B[i + 1090*t]] + R[C[i + 1090*t]];
R[i + 1944*t] = Op[i + 1091*t] ? R[B[i + 1091*t]] * R[C[i + 1091*t]] : R[B[i + 1091*t]] + R[C[i + 1091*t]];
__syncthreads();
R[i + 1945*t] = Op[i + 1092*t] ? R[B[i + 1092*t]] * R[C[i + 1092*t]] : R[B[i + 1092*t]] + R[C[i + 1092*t]];
R[i + 1946*t] = Op[i + 1093*t] ? R[B[i + 1093*t]] * R[C[i + 1093*t]] : R[B[i + 1093*t]] + R[C[i + 1093*t]];
R[i + 1947*t] = Op[i + 1094*t] ? R[B[i + 1094*t]] * R[C[i + 1094*t]] : R[B[i + 1094*t]] + R[C[i + 1094*t]];
R[i + 1948*t] = Op[i + 1095*t] ? R[B[i + 1095*t]] * R[C[i + 1095*t]] : R[B[i + 1095*t]] + R[C[i + 1095*t]];
R[i + 1949*t] = Op[i + 1096*t] ? R[B[i + 1096*t]] * R[C[i + 1096*t]] : R[B[i + 1096*t]] + R[C[i + 1096*t]];
R[i + 1950*t] = Op[i + 1097*t] ? R[B[i + 1097*t]] * R[C[i + 1097*t]] : R[B[i + 1097*t]] + R[C[i + 1097*t]];
R[i + 1951*t] = Op[i + 1098*t] ? R[B[i + 1098*t]] * R[C[i + 1098*t]] : R[B[i + 1098*t]] + R[C[i + 1098*t]];
R[i + 1952*t] = Op[i + 1099*t] ? R[B[i + 1099*t]] * R[C[i + 1099*t]] : R[B[i + 1099*t]] + R[C[i + 1099*t]];
R[i + 1953*t] = Op[i + 1100*t] ? R[B[i + 1100*t]] * R[C[i + 1100*t]] : R[B[i + 1100*t]] + R[C[i + 1100*t]];
R[i + 1954*t] = Op[i + 1101*t] ? R[B[i + 1101*t]] * R[C[i + 1101*t]] : R[B[i + 1101*t]] + R[C[i + 1101*t]];
R[i + 1955*t] = Op[i + 1102*t] ? R[B[i + 1102*t]] * R[C[i + 1102*t]] : R[B[i + 1102*t]] + R[C[i + 1102*t]];
R[i + 1956*t] = Op[i + 1103*t] ? R[B[i + 1103*t]] * R[C[i + 1103*t]] : R[B[i + 1103*t]] + R[C[i + 1103*t]];
R[i + 1957*t] = Op[i + 1104*t] ? R[B[i + 1104*t]] * R[C[i + 1104*t]] : R[B[i + 1104*t]] + R[C[i + 1104*t]];
R[i + 1958*t] = Op[i + 1105*t] ? R[B[i + 1105*t]] * R[C[i + 1105*t]] : R[B[i + 1105*t]] + R[C[i + 1105*t]];
R[i + 1959*t] = Op[i + 1106*t] ? R[B[i + 1106*t]] * R[C[i + 1106*t]] : R[B[i + 1106*t]] + R[C[i + 1106*t]];
R[i + 1960*t] = Op[i + 1107*t] ? R[B[i + 1107*t]] * R[C[i + 1107*t]] : R[B[i + 1107*t]] + R[C[i + 1107*t]];
R[i + 1961*t] = Op[i + 1108*t] ? R[B[i + 1108*t]] * R[C[i + 1108*t]] : R[B[i + 1108*t]] + R[C[i + 1108*t]];
R[i + 1962*t] = Op[i + 1109*t] ? R[B[i + 1109*t]] * R[C[i + 1109*t]] : R[B[i + 1109*t]] + R[C[i + 1109*t]];
R[i + 1963*t] = Op[i + 1110*t] ? R[B[i + 1110*t]] * R[C[i + 1110*t]] : R[B[i + 1110*t]] + R[C[i + 1110*t]];
R[i + 1964*t] = Op[i + 1111*t] ? R[B[i + 1111*t]] * R[C[i + 1111*t]] : R[B[i + 1111*t]] + R[C[i + 1111*t]];
R[i + 1965*t] = Op[i + 1112*t] ? R[B[i + 1112*t]] * R[C[i + 1112*t]] : R[B[i + 1112*t]] + R[C[i + 1112*t]];
R[i + 1966*t] = Op[i + 1113*t] ? R[B[i + 1113*t]] * R[C[i + 1113*t]] : R[B[i + 1113*t]] + R[C[i + 1113*t]];
R[i + 1967*t] = Op[i + 1114*t] ? R[B[i + 1114*t]] * R[C[i + 1114*t]] : R[B[i + 1114*t]] + R[C[i + 1114*t]];
R[i + 1968*t] = Op[i + 1115*t] ? R[B[i + 1115*t]] * R[C[i + 1115*t]] : R[B[i + 1115*t]] + R[C[i + 1115*t]];
R[i + 1969*t] = Op[i + 1116*t] ? R[B[i + 1116*t]] * R[C[i + 1116*t]] : R[B[i + 1116*t]] + R[C[i + 1116*t]];
R[i + 1970*t] = Op[i + 1117*t] ? R[B[i + 1117*t]] * R[C[i + 1117*t]] : R[B[i + 1117*t]] + R[C[i + 1117*t]];
R[i + 1971*t] = Op[i + 1118*t] ? R[B[i + 1118*t]] * R[C[i + 1118*t]] : R[B[i + 1118*t]] + R[C[i + 1118*t]];
R[i + 1972*t] = Op[i + 1119*t] ? R[B[i + 1119*t]] * R[C[i + 1119*t]] : R[B[i + 1119*t]] + R[C[i + 1119*t]];
R[i + 1973*t] = Op[i + 1120*t] ? R[B[i + 1120*t]] * R[C[i + 1120*t]] : R[B[i + 1120*t]] + R[C[i + 1120*t]];
R[i + 1974*t] = Op[i + 1121*t] ? R[B[i + 1121*t]] * R[C[i + 1121*t]] : R[B[i + 1121*t]] + R[C[i + 1121*t]];
R[i + 1975*t] = Op[i + 1122*t] ? R[B[i + 1122*t]] * R[C[i + 1122*t]] : R[B[i + 1122*t]] + R[C[i + 1122*t]];
R[i + 1976*t] = Op[i + 1123*t] ? R[B[i + 1123*t]] * R[C[i + 1123*t]] : R[B[i + 1123*t]] + R[C[i + 1123*t]];
R[i + 1977*t] = Op[i + 1124*t] ? R[B[i + 1124*t]] * R[C[i + 1124*t]] : R[B[i + 1124*t]] + R[C[i + 1124*t]];
R[i + 1978*t] = Op[i + 1125*t] ? R[B[i + 1125*t]] * R[C[i + 1125*t]] : R[B[i + 1125*t]] + R[C[i + 1125*t]];
R[i + 1979*t] = Op[i + 1126*t] ? R[B[i + 1126*t]] * R[C[i + 1126*t]] : R[B[i + 1126*t]] + R[C[i + 1126*t]];
R[i + 1980*t] = Op[i + 1127*t] ? R[B[i + 1127*t]] * R[C[i + 1127*t]] : R[B[i + 1127*t]] + R[C[i + 1127*t]];
R[i + 1981*t] = Op[i + 1128*t] ? R[B[i + 1128*t]] * R[C[i + 1128*t]] : R[B[i + 1128*t]] + R[C[i + 1128*t]];
__syncthreads();
R[i + 1982*t] = Op[i + 1129*t] ? R[B[i + 1129*t]] * R[C[i + 1129*t]] : R[B[i + 1129*t]] + R[C[i + 1129*t]];
R[i + 1983*t] = Op[i + 1130*t] ? R[B[i + 1130*t]] * R[C[i + 1130*t]] : R[B[i + 1130*t]] + R[C[i + 1130*t]];
R[i + 1984*t] = Op[i + 1131*t] ? R[B[i + 1131*t]] * R[C[i + 1131*t]] : R[B[i + 1131*t]] + R[C[i + 1131*t]];
R[i + 1985*t] = Op[i + 1132*t] ? R[B[i + 1132*t]] * R[C[i + 1132*t]] : R[B[i + 1132*t]] + R[C[i + 1132*t]];
R[i + 1986*t] = Op[i + 1133*t] ? R[B[i + 1133*t]] * R[C[i + 1133*t]] : R[B[i + 1133*t]] + R[C[i + 1133*t]];
R[i + 1987*t] = Op[i + 1134*t] ? R[B[i + 1134*t]] * R[C[i + 1134*t]] : R[B[i + 1134*t]] + R[C[i + 1134*t]];
R[i + 1988*t] = Op[i + 1135*t] ? R[B[i + 1135*t]] * R[C[i + 1135*t]] : R[B[i + 1135*t]] + R[C[i + 1135*t]];
R[i + 1989*t] = Op[i + 1136*t] ? R[B[i + 1136*t]] * R[C[i + 1136*t]] : R[B[i + 1136*t]] + R[C[i + 1136*t]];
R[i + 1990*t] = Op[i + 1137*t] ? R[B[i + 1137*t]] * R[C[i + 1137*t]] : R[B[i + 1137*t]] + R[C[i + 1137*t]];
R[i + 1991*t] = Op[i + 1138*t] ? R[B[i + 1138*t]] * R[C[i + 1138*t]] : R[B[i + 1138*t]] + R[C[i + 1138*t]];
R[i + 1992*t] = Op[i + 1139*t] ? R[B[i + 1139*t]] * R[C[i + 1139*t]] : R[B[i + 1139*t]] + R[C[i + 1139*t]];
R[i + 1993*t] = Op[i + 1140*t] ? R[B[i + 1140*t]] * R[C[i + 1140*t]] : R[B[i + 1140*t]] + R[C[i + 1140*t]];
R[i + 1994*t] = Op[i + 1141*t] ? R[B[i + 1141*t]] * R[C[i + 1141*t]] : R[B[i + 1141*t]] + R[C[i + 1141*t]];
R[i + 1995*t] = Op[i + 1142*t] ? R[B[i + 1142*t]] * R[C[i + 1142*t]] : R[B[i + 1142*t]] + R[C[i + 1142*t]];
R[i + 1996*t] = Op[i + 1143*t] ? R[B[i + 1143*t]] * R[C[i + 1143*t]] : R[B[i + 1143*t]] + R[C[i + 1143*t]];
R[i + 1997*t] = Op[i + 1144*t] ? R[B[i + 1144*t]] * R[C[i + 1144*t]] : R[B[i + 1144*t]] + R[C[i + 1144*t]];
R[i + 1998*t] = Op[i + 1145*t] ? R[B[i + 1145*t]] * R[C[i + 1145*t]] : R[B[i + 1145*t]] + R[C[i + 1145*t]];
R[i + 1999*t] = Op[i + 1146*t] ? R[B[i + 1146*t]] * R[C[i + 1146*t]] : R[B[i + 1146*t]] + R[C[i + 1146*t]];
R[i + 2000*t] = Op[i + 1147*t] ? R[B[i + 1147*t]] * R[C[i + 1147*t]] : R[B[i + 1147*t]] + R[C[i + 1147*t]];
R[i + 2001*t] = Op[i + 1148*t] ? R[B[i + 1148*t]] * R[C[i + 1148*t]] : R[B[i + 1148*t]] + R[C[i + 1148*t]];
R[i + 2002*t] = Op[i + 1149*t] ? R[B[i + 1149*t]] * R[C[i + 1149*t]] : R[B[i + 1149*t]] + R[C[i + 1149*t]];
R[i + 2003*t] = Op[i + 1150*t] ? R[B[i + 1150*t]] * R[C[i + 1150*t]] : R[B[i + 1150*t]] + R[C[i + 1150*t]];
R[i + 2004*t] = Op[i + 1151*t] ? R[B[i + 1151*t]] * R[C[i + 1151*t]] : R[B[i + 1151*t]] + R[C[i + 1151*t]];
R[i + 2005*t] = Op[i + 1152*t] ? R[B[i + 1152*t]] * R[C[i + 1152*t]] : R[B[i + 1152*t]] + R[C[i + 1152*t]];
R[i + 2006*t] = Op[i + 1153*t] ? R[B[i + 1153*t]] * R[C[i + 1153*t]] : R[B[i + 1153*t]] + R[C[i + 1153*t]];
R[i + 2007*t] = Op[i + 1154*t] ? R[B[i + 1154*t]] * R[C[i + 1154*t]] : R[B[i + 1154*t]] + R[C[i + 1154*t]];
R[i + 2008*t] = Op[i + 1155*t] ? R[B[i + 1155*t]] * R[C[i + 1155*t]] : R[B[i + 1155*t]] + R[C[i + 1155*t]];
R[i + 2009*t] = Op[i + 1156*t] ? R[B[i + 1156*t]] * R[C[i + 1156*t]] : R[B[i + 1156*t]] + R[C[i + 1156*t]];
R[i + 2010*t] = Op[i + 1157*t] ? R[B[i + 1157*t]] * R[C[i + 1157*t]] : R[B[i + 1157*t]] + R[C[i + 1157*t]];
R[i + 2011*t] = Op[i + 1158*t] ? R[B[i + 1158*t]] * R[C[i + 1158*t]] : R[B[i + 1158*t]] + R[C[i + 1158*t]];
R[i + 2012*t] = Op[i + 1159*t] ? R[B[i + 1159*t]] * R[C[i + 1159*t]] : R[B[i + 1159*t]] + R[C[i + 1159*t]];
R[i + 2013*t] = Op[i + 1160*t] ? R[B[i + 1160*t]] * R[C[i + 1160*t]] : R[B[i + 1160*t]] + R[C[i + 1160*t]];
__syncthreads();
R[i + 2014*t] = Op[i + 1161*t] ? R[B[i + 1161*t]] * R[C[i + 1161*t]] : R[B[i + 1161*t]] + R[C[i + 1161*t]];
R[i + 2015*t] = Op[i + 1162*t] ? R[B[i + 1162*t]] * R[C[i + 1162*t]] : R[B[i + 1162*t]] + R[C[i + 1162*t]];
R[i + 2016*t] = Op[i + 1163*t] ? R[B[i + 1163*t]] * R[C[i + 1163*t]] : R[B[i + 1163*t]] + R[C[i + 1163*t]];
R[i + 2017*t] = Op[i + 1164*t] ? R[B[i + 1164*t]] * R[C[i + 1164*t]] : R[B[i + 1164*t]] + R[C[i + 1164*t]];
R[i + 2018*t] = Op[i + 1165*t] ? R[B[i + 1165*t]] * R[C[i + 1165*t]] : R[B[i + 1165*t]] + R[C[i + 1165*t]];
R[i + 2019*t] = Op[i + 1166*t] ? R[B[i + 1166*t]] * R[C[i + 1166*t]] : R[B[i + 1166*t]] + R[C[i + 1166*t]];
R[i + 2020*t] = Op[i + 1167*t] ? R[B[i + 1167*t]] * R[C[i + 1167*t]] : R[B[i + 1167*t]] + R[C[i + 1167*t]];
R[i + 2021*t] = Op[i + 1168*t] ? R[B[i + 1168*t]] * R[C[i + 1168*t]] : R[B[i + 1168*t]] + R[C[i + 1168*t]];
R[i + 2022*t] = Op[i + 1169*t] ? R[B[i + 1169*t]] * R[C[i + 1169*t]] : R[B[i + 1169*t]] + R[C[i + 1169*t]];
R[i + 2023*t] = Op[i + 1170*t] ? R[B[i + 1170*t]] * R[C[i + 1170*t]] : R[B[i + 1170*t]] + R[C[i + 1170*t]];
R[i + 2024*t] = Op[i + 1171*t] ? R[B[i + 1171*t]] * R[C[i + 1171*t]] : R[B[i + 1171*t]] + R[C[i + 1171*t]];
R[i + 2025*t] = Op[i + 1172*t] ? R[B[i + 1172*t]] * R[C[i + 1172*t]] : R[B[i + 1172*t]] + R[C[i + 1172*t]];
R[i + 2026*t] = Op[i + 1173*t] ? R[B[i + 1173*t]] * R[C[i + 1173*t]] : R[B[i + 1173*t]] + R[C[i + 1173*t]];
R[i + 2027*t] = Op[i + 1174*t] ? R[B[i + 1174*t]] * R[C[i + 1174*t]] : R[B[i + 1174*t]] + R[C[i + 1174*t]];
R[i + 2028*t] = Op[i + 1175*t] ? R[B[i + 1175*t]] * R[C[i + 1175*t]] : R[B[i + 1175*t]] + R[C[i + 1175*t]];
R[i + 2029*t] = Op[i + 1176*t] ? R[B[i + 1176*t]] * R[C[i + 1176*t]] : R[B[i + 1176*t]] + R[C[i + 1176*t]];
R[i + 2030*t] = Op[i + 1177*t] ? R[B[i + 1177*t]] * R[C[i + 1177*t]] : R[B[i + 1177*t]] + R[C[i + 1177*t]];
R[i + 2031*t] = Op[i + 1178*t] ? R[B[i + 1178*t]] * R[C[i + 1178*t]] : R[B[i + 1178*t]] + R[C[i + 1178*t]];
R[i + 2032*t] = Op[i + 1179*t] ? R[B[i + 1179*t]] * R[C[i + 1179*t]] : R[B[i + 1179*t]] + R[C[i + 1179*t]];
__syncthreads();
R[i + 2033*t] = Op[i + 1180*t] ? R[B[i + 1180*t]] * R[C[i + 1180*t]] : R[B[i + 1180*t]] + R[C[i + 1180*t]];
R[i + 2034*t] = Op[i + 1181*t] ? R[B[i + 1181*t]] * R[C[i + 1181*t]] : R[B[i + 1181*t]] + R[C[i + 1181*t]];
R[i + 2035*t] = Op[i + 1182*t] ? R[B[i + 1182*t]] * R[C[i + 1182*t]] : R[B[i + 1182*t]] + R[C[i + 1182*t]];
R[i + 2036*t] = Op[i + 1183*t] ? R[B[i + 1183*t]] * R[C[i + 1183*t]] : R[B[i + 1183*t]] + R[C[i + 1183*t]];
R[i + 2037*t] = Op[i + 1184*t] ? R[B[i + 1184*t]] * R[C[i + 1184*t]] : R[B[i + 1184*t]] + R[C[i + 1184*t]];
R[i + 2038*t] = Op[i + 1185*t] ? R[B[i + 1185*t]] * R[C[i + 1185*t]] : R[B[i + 1185*t]] + R[C[i + 1185*t]];
R[i + 2039*t] = Op[i + 1186*t] ? R[B[i + 1186*t]] * R[C[i + 1186*t]] : R[B[i + 1186*t]] + R[C[i + 1186*t]];
R[i + 2040*t] = Op[i + 1187*t] ? R[B[i + 1187*t]] * R[C[i + 1187*t]] : R[B[i + 1187*t]] + R[C[i + 1187*t]];
R[i + 2041*t] = Op[i + 1188*t] ? R[B[i + 1188*t]] * R[C[i + 1188*t]] : R[B[i + 1188*t]] + R[C[i + 1188*t]];
R[i + 2042*t] = Op[i + 1189*t] ? R[B[i + 1189*t]] * R[C[i + 1189*t]] : R[B[i + 1189*t]] + R[C[i + 1189*t]];
R[i + 2043*t] = Op[i + 1190*t] ? R[B[i + 1190*t]] * R[C[i + 1190*t]] : R[B[i + 1190*t]] + R[C[i + 1190*t]];
R[i + 2044*t] = Op[i + 1191*t] ? R[B[i + 1191*t]] * R[C[i + 1191*t]] : R[B[i + 1191*t]] + R[C[i + 1191*t]];
R[i + 2045*t] = Op[i + 1192*t] ? R[B[i + 1192*t]] * R[C[i + 1192*t]] : R[B[i + 1192*t]] + R[C[i + 1192*t]];
R[i + 2046*t] = Op[i + 1193*t] ? R[B[i + 1193*t]] * R[C[i + 1193*t]] : R[B[i + 1193*t]] + R[C[i + 1193*t]];
R[i + 2047*t] = Op[i + 1194*t] ? R[B[i + 1194*t]] * R[C[i + 1194*t]] : R[B[i + 1194*t]] + R[C[i + 1194*t]];
R[i + 2048*t] = Op[i + 1195*t] ? R[B[i + 1195*t]] * R[C[i + 1195*t]] : R[B[i + 1195*t]] + R[C[i + 1195*t]];
__syncthreads();
R[i + 2049*t] = Op[i + 1196*t] ? R[B[i + 1196*t]] * R[C[i + 1196*t]] : R[B[i + 1196*t]] + R[C[i + 1196*t]];
R[i + 2050*t] = Op[i + 1197*t] ? R[B[i + 1197*t]] * R[C[i + 1197*t]] : R[B[i + 1197*t]] + R[C[i + 1197*t]];
R[i + 2051*t] = Op[i + 1198*t] ? R[B[i + 1198*t]] * R[C[i + 1198*t]] : R[B[i + 1198*t]] + R[C[i + 1198*t]];
R[i + 2052*t] = Op[i + 1199*t] ? R[B[i + 1199*t]] * R[C[i + 1199*t]] : R[B[i + 1199*t]] + R[C[i + 1199*t]];
R[i + 2053*t] = Op[i + 1200*t] ? R[B[i + 1200*t]] * R[C[i + 1200*t]] : R[B[i + 1200*t]] + R[C[i + 1200*t]];
R[i + 2054*t] = Op[i + 1201*t] ? R[B[i + 1201*t]] * R[C[i + 1201*t]] : R[B[i + 1201*t]] + R[C[i + 1201*t]];
R[i + 2055*t] = Op[i + 1202*t] ? R[B[i + 1202*t]] * R[C[i + 1202*t]] : R[B[i + 1202*t]] + R[C[i + 1202*t]];
R[i + 2056*t] = Op[i + 1203*t] ? R[B[i + 1203*t]] * R[C[i + 1203*t]] : R[B[i + 1203*t]] + R[C[i + 1203*t]];
R[i + 2057*t] = Op[i + 1204*t] ? R[B[i + 1204*t]] * R[C[i + 1204*t]] : R[B[i + 1204*t]] + R[C[i + 1204*t]];
R[i + 2058*t] = Op[i + 1205*t] ? R[B[i + 1205*t]] * R[C[i + 1205*t]] : R[B[i + 1205*t]] + R[C[i + 1205*t]];
R[i + 2059*t] = Op[i + 1206*t] ? R[B[i + 1206*t]] * R[C[i + 1206*t]] : R[B[i + 1206*t]] + R[C[i + 1206*t]];
R[i + 2060*t] = Op[i + 1207*t] ? R[B[i + 1207*t]] * R[C[i + 1207*t]] : R[B[i + 1207*t]] + R[C[i + 1207*t]];
R[i + 2061*t] = Op[i + 1208*t] ? R[B[i + 1208*t]] * R[C[i + 1208*t]] : R[B[i + 1208*t]] + R[C[i + 1208*t]];
R[i + 2062*t] = Op[i + 1209*t] ? R[B[i + 1209*t]] * R[C[i + 1209*t]] : R[B[i + 1209*t]] + R[C[i + 1209*t]];
R[i + 2063*t] = Op[i + 1210*t] ? R[B[i + 1210*t]] * R[C[i + 1210*t]] : R[B[i + 1210*t]] + R[C[i + 1210*t]];
R[i + 2064*t] = Op[i + 1211*t] ? R[B[i + 1211*t]] * R[C[i + 1211*t]] : R[B[i + 1211*t]] + R[C[i + 1211*t]];
__syncthreads();
R[i + 2065*t] = Op[i + 1212*t] ? R[B[i + 1212*t]] * R[C[i + 1212*t]] : R[B[i + 1212*t]] + R[C[i + 1212*t]];
R[i + 2066*t] = Op[i + 1213*t] ? R[B[i + 1213*t]] * R[C[i + 1213*t]] : R[B[i + 1213*t]] + R[C[i + 1213*t]];
R[i + 2067*t] = Op[i + 1214*t] ? R[B[i + 1214*t]] * R[C[i + 1214*t]] : R[B[i + 1214*t]] + R[C[i + 1214*t]];
R[i + 2068*t] = Op[i + 1215*t] ? R[B[i + 1215*t]] * R[C[i + 1215*t]] : R[B[i + 1215*t]] + R[C[i + 1215*t]];
R[i + 2069*t] = Op[i + 1216*t] ? R[B[i + 1216*t]] * R[C[i + 1216*t]] : R[B[i + 1216*t]] + R[C[i + 1216*t]];
R[i + 2070*t] = Op[i + 1217*t] ? R[B[i + 1217*t]] * R[C[i + 1217*t]] : R[B[i + 1217*t]] + R[C[i + 1217*t]];
R[i + 2071*t] = Op[i + 1218*t] ? R[B[i + 1218*t]] * R[C[i + 1218*t]] : R[B[i + 1218*t]] + R[C[i + 1218*t]];
R[i + 2072*t] = Op[i + 1219*t] ? R[B[i + 1219*t]] * R[C[i + 1219*t]] : R[B[i + 1219*t]] + R[C[i + 1219*t]];
R[i + 2073*t] = Op[i + 1220*t] ? R[B[i + 1220*t]] * R[C[i + 1220*t]] : R[B[i + 1220*t]] + R[C[i + 1220*t]];
R[i + 2074*t] = Op[i + 1221*t] ? R[B[i + 1221*t]] * R[C[i + 1221*t]] : R[B[i + 1221*t]] + R[C[i + 1221*t]];
R[i + 2075*t] = Op[i + 1222*t] ? R[B[i + 1222*t]] * R[C[i + 1222*t]] : R[B[i + 1222*t]] + R[C[i + 1222*t]];
__syncthreads();
R[i + 2076*t] = Op[i + 1223*t] ? R[B[i + 1223*t]] * R[C[i + 1223*t]] : R[B[i + 1223*t]] + R[C[i + 1223*t]];
R[i + 2077*t] = Op[i + 1224*t] ? R[B[i + 1224*t]] * R[C[i + 1224*t]] : R[B[i + 1224*t]] + R[C[i + 1224*t]];
R[i + 2078*t] = Op[i + 1225*t] ? R[B[i + 1225*t]] * R[C[i + 1225*t]] : R[B[i + 1225*t]] + R[C[i + 1225*t]];
R[i + 2079*t] = Op[i + 1226*t] ? R[B[i + 1226*t]] * R[C[i + 1226*t]] : R[B[i + 1226*t]] + R[C[i + 1226*t]];
R[i + 2080*t] = Op[i + 1227*t] ? R[B[i + 1227*t]] * R[C[i + 1227*t]] : R[B[i + 1227*t]] + R[C[i + 1227*t]];
R[i + 2081*t] = Op[i + 1228*t] ? R[B[i + 1228*t]] * R[C[i + 1228*t]] : R[B[i + 1228*t]] + R[C[i + 1228*t]];
R[i + 2082*t] = Op[i + 1229*t] ? R[B[i + 1229*t]] * R[C[i + 1229*t]] : R[B[i + 1229*t]] + R[C[i + 1229*t]];
R[i + 2083*t] = Op[i + 1230*t] ? R[B[i + 1230*t]] * R[C[i + 1230*t]] : R[B[i + 1230*t]] + R[C[i + 1230*t]];
R[i + 2084*t] = Op[i + 1231*t] ? R[B[i + 1231*t]] * R[C[i + 1231*t]] : R[B[i + 1231*t]] + R[C[i + 1231*t]];
__syncthreads();
R[i + 2085*t] = Op[i + 1232*t] ? R[B[i + 1232*t]] * R[C[i + 1232*t]] : R[B[i + 1232*t]] + R[C[i + 1232*t]];
R[i + 2086*t] = Op[i + 1233*t] ? R[B[i + 1233*t]] * R[C[i + 1233*t]] : R[B[i + 1233*t]] + R[C[i + 1233*t]];
R[i + 2087*t] = Op[i + 1234*t] ? R[B[i + 1234*t]] * R[C[i + 1234*t]] : R[B[i + 1234*t]] + R[C[i + 1234*t]];
R[i + 2088*t] = Op[i + 1235*t] ? R[B[i + 1235*t]] * R[C[i + 1235*t]] : R[B[i + 1235*t]] + R[C[i + 1235*t]];
R[i + 2089*t] = Op[i + 1236*t] ? R[B[i + 1236*t]] * R[C[i + 1236*t]] : R[B[i + 1236*t]] + R[C[i + 1236*t]];
R[i + 2090*t] = Op[i + 1237*t] ? R[B[i + 1237*t]] * R[C[i + 1237*t]] : R[B[i + 1237*t]] + R[C[i + 1237*t]];
R[i + 2091*t] = Op[i + 1238*t] ? R[B[i + 1238*t]] * R[C[i + 1238*t]] : R[B[i + 1238*t]] + R[C[i + 1238*t]];
R[i + 2092*t] = Op[i + 1239*t] ? R[B[i + 1239*t]] * R[C[i + 1239*t]] : R[B[i + 1239*t]] + R[C[i + 1239*t]];
__syncthreads();
R[i + 2093*t] = Op[i + 1240*t] ? R[B[i + 1240*t]] * R[C[i + 1240*t]] : R[B[i + 1240*t]] + R[C[i + 1240*t]];
R[i + 2094*t] = Op[i + 1241*t] ? R[B[i + 1241*t]] * R[C[i + 1241*t]] : R[B[i + 1241*t]] + R[C[i + 1241*t]];
R[i + 2095*t] = Op[i + 1242*t] ? R[B[i + 1242*t]] * R[C[i + 1242*t]] : R[B[i + 1242*t]] + R[C[i + 1242*t]];
R[i + 2096*t] = Op[i + 1243*t] ? R[B[i + 1243*t]] * R[C[i + 1243*t]] : R[B[i + 1243*t]] + R[C[i + 1243*t]];
R[i + 2097*t] = Op[i + 1244*t] ? R[B[i + 1244*t]] * R[C[i + 1244*t]] : R[B[i + 1244*t]] + R[C[i + 1244*t]];
R[i + 2098*t] = Op[i + 1245*t] ? R[B[i + 1245*t]] * R[C[i + 1245*t]] : R[B[i + 1245*t]] + R[C[i + 1245*t]];
R[i + 2099*t] = Op[i + 1246*t] ? R[B[i + 1246*t]] * R[C[i + 1246*t]] : R[B[i + 1246*t]] + R[C[i + 1246*t]];
__syncthreads();
R[i + 2100*t] = Op[i + 1247*t] ? R[B[i + 1247*t]] * R[C[i + 1247*t]] : R[B[i + 1247*t]] + R[C[i + 1247*t]];
R[i + 2101*t] = Op[i + 1248*t] ? R[B[i + 1248*t]] * R[C[i + 1248*t]] : R[B[i + 1248*t]] + R[C[i + 1248*t]];
R[i + 2102*t] = Op[i + 1249*t] ? R[B[i + 1249*t]] * R[C[i + 1249*t]] : R[B[i + 1249*t]] + R[C[i + 1249*t]];
R[i + 2103*t] = Op[i + 1250*t] ? R[B[i + 1250*t]] * R[C[i + 1250*t]] : R[B[i + 1250*t]] + R[C[i + 1250*t]];
R[i + 2104*t] = Op[i + 1251*t] ? R[B[i + 1251*t]] * R[C[i + 1251*t]] : R[B[i + 1251*t]] + R[C[i + 1251*t]];
__syncthreads();
R[i + 2105*t] = Op[i + 1252*t] ? R[B[i + 1252*t]] * R[C[i + 1252*t]] : R[B[i + 1252*t]] + R[C[i + 1252*t]];
R[i + 2106*t] = Op[i + 1253*t] ? R[B[i + 1253*t]] * R[C[i + 1253*t]] : R[B[i + 1253*t]] + R[C[i + 1253*t]];
R[i + 2107*t] = Op[i + 1254*t] ? R[B[i + 1254*t]] * R[C[i + 1254*t]] : R[B[i + 1254*t]] + R[C[i + 1254*t]];
R[i + 2108*t] = Op[i + 1255*t] ? R[B[i + 1255*t]] * R[C[i + 1255*t]] : R[B[i + 1255*t]] + R[C[i + 1255*t]];
__syncthreads();
R[i + 2109*t] = Op[i + 1256*t] ? R[B[i + 1256*t]] * R[C[i + 1256*t]] : R[B[i + 1256*t]] + R[C[i + 1256*t]];
R[i + 2110*t] = Op[i + 1257*t] ? R[B[i + 1257*t]] * R[C[i + 1257*t]] : R[B[i + 1257*t]] + R[C[i + 1257*t]];
R[i + 2111*t] = Op[i + 1258*t] ? R[B[i + 1258*t]] * R[C[i + 1258*t]] : R[B[i + 1258*t]] + R[C[i + 1258*t]];
__syncthreads();
R[i + 2112*t] = Op[i + 1259*t] ? R[B[i + 1259*t]] * R[C[i + 1259*t]] : R[B[i + 1259*t]] + R[C[i + 1259*t]];
R[i + 2113*t] = Op[i + 1260*t] ? R[B[i + 1260*t]] * R[C[i + 1260*t]] : R[B[i + 1260*t]] + R[C[i + 1260*t]];
__syncthreads();
R[i + 2114*t] = Op[i + 1261*t] ? R[B[i + 1261*t]] * R[C[i + 1261*t]] : R[B[i + 1261*t]] + R[C[i + 1261*t]];
R[i + 2115*t] = Op[i + 1262*t] ? R[B[i + 1262*t]] * R[C[i + 1262*t]] : R[B[i + 1262*t]] + R[C[i + 1262*t]];
__syncthreads();
R[i + 2116*t] = Op[i + 1263*t] ? R[B[i + 1263*t]] * R[C[i + 1263*t]] : R[B[i + 1263*t]] + R[C[i + 1263*t]];
__syncthreads();
R[i + 2117*t] = Op[i + 1264*t] ? R[B[i + 1264*t]] * R[C[i + 1264*t]] : R[B[i + 1264*t]] + R[C[i + 1264*t]];
__syncthreads();
R[i + 2118*t] = Op[i + 1265*t] ? R[B[i + 1265*t]] * R[C[i + 1265*t]] : R[B[i + 1265*t]] + R[C[i + 1265*t]];
__syncthreads();
R[i + 2119*t] = Op[i + 1266*t] ? R[B[i + 1266*t]] * R[C[i + 1266*t]] : R[B[i + 1266*t]] + R[C[i + 1266*t]];
__syncthreads();
R[i + 2120*t] = Op[i + 1267*t] ? R[B[i + 1267*t]] * R[C[i + 1267*t]] : R[B[i + 1267*t]] + R[C[i + 1267*t]];
__syncthreads();
R[i + 2121*t] = Op[i + 1268*t] ? R[B[i + 1268*t]] * R[C[i + 1268*t]] : R[B[i + 1268*t]] + R[C[i + 1268*t]];
__syncthreads();
R[i + 2122*t] = Op[i + 1269*t] ? R[B[i + 1269*t]] * R[C[i + 1269*t]] : R[B[i + 1269*t]] + R[C[i + 1269*t]];
__syncthreads();
R[i + 2123*t] = Op[i + 1270*t] ? R[B[i + 1270*t]] * R[C[i + 1270*t]] : R[B[i + 1270*t]] + R[C[i + 1270*t]];
if (i==0) { final += R[2123*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
33776125c7e97c4e3ff0dcaf470ef8703fefd5f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
///*****************************************************************************************
// posvis.c
//
// Fill in the portion of a plane-of-sky image due to a particular model component: Assign
// each relevant POS pixel a z-value in observer coordinates (distance from the origin
// towards Earth) and a value of cos(scattering angle).
//
// Return 1 if any portion of this component lies outside the specified POS window,
// 0 otherwise.
//
// If the "src" argument is true, the "observer" is the Sun rather than Earth, and
// "plane-of-sky" becomes "projection as viewed from the Sun."
//
// Modified 2014 February 20 by CM:
// Allow facets that partly project outside the POS frame to contribute to the POS frame
// (thus avoiding see-through "holes" in the model at the edge of a POS image)
//
// Modified 2010 May 18 by CM:
// Bug fix: When checking if a POS pixel hasn't already been assigned
// values during a previous call to posvis for a different component,
// check for fac[i][j] < 0 rather than cosa[i][j] == 0.0, since for
// bistatic situations the latter condition will also be true for
// pixels centered on Earth-facing facets that don't face the Sun
//
// Modified 2009 July 2 by CM:
// Eliminate the check that facets are "active": this term is now being
// interpreted to mean "not lying interior to the model," so the
// check is unnecessary and the determination of active vs. inactive
// status is inaccurate for half-exposed facets at the intersections
// between model components
//
// Modified 2009 April 3 by CM:
// Compute the "posbnd_logfactor" parameter: if the model extends beyond
// the POS frame, posbnd_logfactor is set to the logarithm of the
// ratio of the area that would have been required to "contain" the
// entire model divided by the area of the actual POS frame
// Work with floating-point pixel numbers (imin_dbl, etc.), at least
// initially, in case the sky rendering for a model with illegal
// parameters would involve huge pixel numbers that exceed the
// limits for valid integers
//
// Modified 2007 August 4 by CM:
// Add "orbit_offset" and "body" parameters and remove "facet" parameter
// Add body, bodyill, comp, and compill matrices for POS frames
//
// Modified 2006 June 21 by CM:
// For POS renderings, change res to km_per_pixel
//
// Modified 2005 September 19 by CM:
// Allow for roundoff error when determining which POS pixels project
// onto each model facet
//
// Modified 2005 June 27 by CM:
// Renamed "round" function to "iround" to avoid conflicts
//
// Modified 2005 June 22 by CM:
// Slightly modified some comments
//
// Modified 2005 January 25 by CM:
// Take care of unused and uninitialized variables
//
// Modified 2004 December 19 by CM:
// Added more comments
// Put update of rectangular POS area into "POSrect" routine and applied it
// even to facets which lie outside the POS frame
//
// Modified 2004 Feb 11 by CM:
// Added comments
//
// Modified 2003 May 5 by CM:
// Removed redundant coordinate transformation of the unit normal n
// for the no-pvs_smoothing case
// *****************************************************************************************/
//extern "C" {
//#include "../shape/head.h"
//#include <limits.h>
//}
//
//#define maxbins 100
//__device__ int posvis_tiled_outbnd, posvis_tiled_smooth;
//
///* Note that the following custom atomic functions must be declared in each
// * file it is needed (consequence of being a static device function) */
//__device__ static float atomicMaxf(float* address, float val) {
// int* address_as_i = (int*) address;
// int old = *address_as_i, assumed;
// do {
// assumed = old;
// old = ::atomicCAS(address_as_i, assumed,
// __float_as_int(::fmaxf(val, __int_as_float(assumed))));
// } while (assumed != old);
// return __int_as_float(old);
//}
//__device__ static float atomicMax64(double* address, double val)
//{
// unsigned long long* address_as_i = (unsigned long long*) address;
// unsigned long long old = *address_as_i, assumed;
// do {
// assumed = old;
// old = ::atomicCAS(address_as_i, assumed,
// __double_as_longlong(::fmaxf(val, __longlong_as_double(assumed))));
// } while (assumed != old);
// return __longlong_as_double(old);
//}
//
//__global__ void posvis_tiled_init_krnl64(
// struct par_t *dpar,
// struct pos_t **pos,
// double4 *ijminmax_overall,
// double3 *oa,
// double3 *usrc,
// int *outbndarr,
// int c,
// int start,
// int src,
// int size,
// int set,
// int src_override) {
//
// /* nfrm_alloc-threaded */
// int f = blockIdx.x * blockDim.x + threadIdx.x + start;
//
// if (f < size) {
// if (f == start) {
// posvis_tiled_outbnd = 0;
// posvis_tiled_smooth = dpar->pos_smooth;
// if (src_override) posvis_tiled_smooth = 0;
// }
// ijminmax_overall[f].w = ijminmax_overall[f].y = HUGENUMBER;
// ijminmax_overall[f].x = ijminmax_overall[f].z = -HUGENUMBER;
// pos[f]->posbnd_logfactor = 0.0;
//
// dev_mtrnsps2(oa, pos[f]->ae, f);
// if (src) {
// /* We're viewing the model from the sun: at the center of each pixel
// * in the projected view, we want cos(incidence angle), distance from
// * the COM towards the sun, and the facet number. */
// dev_mmmul2(oa, pos[f]->se, oa, f); /* oa takes ast into sun coords */
// } else {
// /* We're viewing the model from Earth: at the center of each POS pixel
// * we want cos(scattering angle), distance from the COM towards Earth,
// * and the facet number. For bistatic situations (lightcurves) we also
// want cos(incidence angle) and the unit vector towards the source. */
// dev_mmmul2(oa, pos[f]->oe, oa, f); /* oa takes ast into obs coords */
// if (pos[f]->bistatic) {
// usrc[f].x = usrc[f].y = 0.0; /* unit vector towards source */
// usrc[f].z = 1.0;
// dev_cotrans1(&usrc[f], pos[f]->se, usrc[f], -1);
// dev_cotrans1(&usrc[f], pos[f]->oe, usrc[f], 1); /* in observer coordinates */
// }
// }
// outbndarr[f] = 0;
// }
//}
//
//__global__ void transform_facet_normals_krnl64a(
// struct mod_t *dmod,
// struct pos_t **pos,
// struct vertices_t **verts,
// double4 *ijminmax_overall,
// double3 orbit_offs,
// double3 *oa,
// double3 *usrc,
// int *outbndarr,
// int nf,
// int frm,
// int src,
// int blockSize)
//{
// /* This kernel launches 256 threads, performs a grid-stride loop through
// * all model facets and transforms each facet normal with oa[frm] and stores
// * the result back to dmod if n.z > 0.0. It also determines and stores the
// * facet and global model bounding box via i1,i2,j1,j2 and xlim/ylim.
// * These quantities are stored in pos_facet_t structures inside each frame's
// * pos. */
//
// /* Declare kernel variables */
// __shared__ int pn;
// __shared__ double kmpxl;
// int imin, jmin, imax, jmax, i1, i2, j1, j2;
// int3 fidx;
// double imin_dbl, jmin_dbl, imax_dbl, jmax_dbl;
// double3 n, v0, v1, v2;
//
// /* Initialize the shared variables (accessed by every thread) */
// if (threadIdx.x==0) {
// pn = pos[frm]->n;
// kmpxl = pos[frm]->km_per_pixel;
// }
// __syncthreads();
//
// /* Do a grid-stride loop on all facets */
// for (int f=threadIdx.x; f<nf; f+=blockSize) {
// /* Get vertex indices of the three vertices making up the facet */
// fidx.x = verts[0]->f[f].v[0]; fidx.y = verts[0]->f[f].v[1];
// fidx.z = verts[0]->f[f].v[2];
//
// /* Copy each vertex over to thread register memory */
// v0.x = verts[0]->v[fidx.x].x[0]; v0.y = verts[0]->v[fidx.x].x[1];
// v0.z = verts[0]->v[fidx.x].x[2]; v1.x = verts[0]->v[fidx.y].x[0];
// v1.y = verts[0]->v[fidx.y].x[1]; v1.z = verts[0]->v[fidx.y].x[2];
// v2.x = verts[0]->v[fidx.z].x[0]; v2.y = verts[0]->v[fidx.z].x[1];
// v2.z = verts[0]->v[fidx.z].x[2];
//
// /* Get the normal to this facet in body-fixed (asteroid) coordinates
// * and convert it to observer coordinates */
// n.x = verts[0]->f[f].n[0];
// n.y = verts[0]->f[f].n[1];
// n.z = verts[0]->f[f].n[2];
// dev_cotrans3(&n, oa, n, 1, frm);
//
// /* Check if this facet is visible - is the facet normal pointing
// * roughly at the observer? */
// if (n.z > 0.0) {
// /* First, store the transformed normal back to the model and increase
// * visible facet counter */
// pos[frm]->facet[f].nt.x = n.x;
// pos[frm]->facet[f].nt.y = n.y;
// pos[frm]->facet[f].nt.z = n.z;
// /* Convert the 3 vertex coordinates from body to observer
// * coordinates; orbit_offset is the center-of-mass offset
// * (in observer coordinates) for this model at this frame's
// * epoch due to orbital motion, in case the model is half of
// * a binary system. */
// dev_cotrans3(&v0, oa, v0, 1, frm);
// dev_cotrans3(&v1, oa, v1, 1, frm);
// dev_cotrans3(&v2, oa, v2, 1, frm);
// v0.x += orbit_offs.x; v0.y += orbit_offs.x; v0.z += orbit_offs.x;
// v1.x += orbit_offs.y; v1.y += orbit_offs.y; v1.z += orbit_offs.y;
// v2.x += orbit_offs.z; v2.y += orbit_offs.z; v2.z += orbit_offs.z;
//
// /* Find rectangular region (in POS pixels) containing the projected
// * facet - use floats in case model has illegal parameters and the
// * pixel numbers exceed the limits for valid integers */
// imin_dbl = floor(MIN(v0.x,MIN(v1.x,v2.x)) / kmpxl - SMALLVAL + 0.5);
// imax_dbl = floor(MAX(v0.x,MAX(v1.x,v2.x)) / kmpxl + SMALLVAL + 0.5);
// jmin_dbl = floor(MIN(v0.y,MIN(v1.y,v2.y)) / kmpxl - SMALLVAL + 0.5);
// jmax_dbl = floor(MAX(v0.y,MAX(v1.y,v2.y)) / kmpxl + SMALLVAL + 0.5);
//
// imin = (imin_dbl < INT_MIN) ? INT_MIN : (int) imin_dbl;
// imax = (imax_dbl > INT_MAX) ? INT_MAX : (int) imax_dbl;
// jmin = (jmin_dbl < INT_MIN) ? INT_MIN : (int) jmin_dbl;
// jmax = (jmax_dbl > INT_MAX) ? INT_MAX : (int) jmax_dbl;
//
// /* Set the outbnd flag if the facet extends beyond the POS window */
// if ((imin < (-pn)) || (imax > pn) || (jmin < (-pn)) || (jmax > pn)) {
// posvis_tiled_outbnd = 1;
// atomicExch(&outbndarr[frm], 1);
// }
//
// /* Figure out if facet projects at least partly within POS window;
// * if it does, look at each "contained" POS pixel and get the
// * z-coordinate and cos(scattering angle) */
// i1 = MAX(imin, -pn); i2 = MIN(imax, pn);
// j1 = MAX(jmin, -pn); j2 = MIN(jmax, pn);
//
// pos[frm]->facet[f].ilim.x = i1;
// pos[frm]->facet[f].ilim.y = i2;
// pos[frm]->facet[f].jlim.x = j1;
// pos[frm]->facet[f].jlim.y = j2;
// pos[frm]->facet[f].v0t = v0;
// pos[frm]->facet[f].v1t = v1;
// pos[frm]->facet[f].v2t = v2;
//
// /* Now keep track of the global region */
// if (i1 > pn || i2 < -pn || j1 > pn || j2 < -pn) {
// /* Facet is entirely outside POS frame: just track POS region */
// dev_POSrect_gpu64(pos, src, imin_dbl, imax_dbl, jmin_dbl,
// jmax_dbl, ijminmax_overall, frm);
//
// } else {
// dev_POSrect_gpu64(pos, src, (double)i1, (double)i2,
// (double)j1, (double)j2, ijminmax_overall, frm);
// }
// }
// else {
// /* The following makes a check in the bin_facets_krnl64 kernel easier */
// pos[frm]->facet[f].nt.x = -1.0;
// pos[frm]->facet[f].nt.y = -1.0;
// pos[frm]->facet[f].nt.z = -1.0;
// }
// }
//}
//
//__global__ void transform_facet_normals_krnl64b(
// struct mod_t *dmod,
// struct pos_t **pos,
// struct vertices_t **verts,
// double4 *ijminmax_overall,
// double3 orbit_offs,
// double3 *oa_gm,
// int *outbndarr,
// int nf,
// int src)
//{
// /* This kernel launches nframes blocks of threads, performs a grid-stride loop through
// * all model facets and transforms each facet normal with oa[frm] and stores
// * the result back to dmod if n.z > 0.0. It also determines and stores the
// * facet and global model bounding box via i1,i2,j1,j2 and xlim/ylim.
// * These quantities are stored in pos_facet_t structures inside each frame's
// * pos. This kernel also uses shared memory for ijminmax_overall_sh, used
// * as temporary (faster) storage for pos window calculation. Additionally,
// * the pos->xlim/ylim atomic operations have been moved to the very end of
// * this kernel to be processed just once instead of for every facet. */
//
// /* Declare kernel variables */
// __shared__ int pn;
// __shared__ double kmpxl, oa_sh[3][3];
// __shared__ double4 ijminmax_overall_sh;
// int frm=blockIdx.x, imin, jmin, imax, jmax, i1, i2, j1, j2;
// int3 fidx;
// double imin_dbl, jmin_dbl, imax_dbl, jmax_dbl;
// double3 n, v0, v1, v2;
//
// /* Initialize the shared variables (accessed by every thread) */
// if (threadIdx.x==0) {
// pn = pos[frm]->n;
// kmpxl = pos[frm]->km_per_pixel;
// ijminmax_overall_sh.w = ijminmax_overall_sh.x =
// ijminmax_overall_sh.y = ijminmax_overall_sh.z = 0.0f;
//
// /* Load oa for this frame into shared memory */
// oa_sh[0][0] = oa_gm[3*frm].x; oa_sh[0][1] = oa_gm[3*frm].y; oa_sh[0][2] = oa_gm[3*frm].z;
// oa_sh[1][0] = oa_gm[3*frm+1].x; oa_sh[1][1] = oa_gm[3*frm+1].y; oa_sh[1][2] = oa_gm[3*frm+1].z;
// oa_sh[2][0] = oa_gm[3*frm+2].x; oa_sh[2][1] = oa_gm[3*frm+2].y; oa_sh[2][2] = oa_gm[3*frm+2].z;
// }
// __syncthreads();
//
// /* Do a grid-stride loop on all facets */
// for (int f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Get vertex indices of the three vertices making up the facet */
// fidx.x = verts[0]->f[f].v[0]; fidx.y = verts[0]->f[f].v[1];
// fidx.z = verts[0]->f[f].v[2];
//
// /* Copy each vertex over to thread register memory */
// v0.x = verts[0]->v[fidx.x].x[0]; v0.y = verts[0]->v[fidx.x].x[1];
// v0.z = verts[0]->v[fidx.x].x[2]; v1.x = verts[0]->v[fidx.y].x[0];
// v1.y = verts[0]->v[fidx.y].x[1]; v1.z = verts[0]->v[fidx.y].x[2];
// v2.x = verts[0]->v[fidx.z].x[0]; v2.y = verts[0]->v[fidx.z].x[1];
// v2.z = verts[0]->v[fidx.z].x[2];
//
// /* Get the normal to this facet in body-fixed (asteroid) coordinates
// * and convert it to observer coordinates */
// n.x = verts[0]->f[f].n[0];
// n.y = verts[0]->f[f].n[1];
// n.z = verts[0]->f[f].n[2];
// dev_cotrans1(&n, oa_sh, n, 1);
//
// /* Check if this facet is visible - is the facet normal pointing
// * roughly at the observer? */
// if (n.z > 0.0) {
// /* First, store the transformed normal back to the model and increase
// * visible facet counter */
// pos[frm]->facet[f].nt.x = n.x;
// pos[frm]->facet[f].nt.y = n.y;
// pos[frm]->facet[f].nt.z = n.z;
// /* Convert the 3 vertex coordinates from body to observer
// * coordinates; orbit_offset is the center-of-mass offset
// * (in observer coordinates) for this model at this frame's
// * epoch due to orbital motion, in case the model is half of
// * a binary system. */
// dev_cotrans1(&v0, oa_sh, v0, 1);
// dev_cotrans1(&v1, oa_sh, v1, 1);
// dev_cotrans1(&v2, oa_sh, v2, 1);
// v0.x += orbit_offs.x; v0.y += orbit_offs.x; v0.z += orbit_offs.x;
// v1.x += orbit_offs.y; v1.y += orbit_offs.y; v1.z += orbit_offs.y;
// v2.x += orbit_offs.z; v2.y += orbit_offs.z; v2.z += orbit_offs.z;
//
// /* Find rectangular region (in POS pixels) containing the projected
// * facet - use floats in case model has illegal parameters and the
// * pixel numbers exceed the limits for valid integers */
// imin_dbl = floor(MIN(v0.x,MIN(v1.x,v2.x)) / kmpxl - SMALLVAL + 0.5);
// imax_dbl = floor(MAX(v0.x,MAX(v1.x,v2.x)) / kmpxl + SMALLVAL + 0.5);
// jmin_dbl = floor(MIN(v0.y,MIN(v1.y,v2.y)) / kmpxl - SMALLVAL + 0.5);
// jmax_dbl = floor(MAX(v0.y,MAX(v1.y,v2.y)) / kmpxl + SMALLVAL + 0.5);
//
// imin = (imin_dbl < INT_MIN) ? INT_MIN : (int) imin_dbl;
// imax = (imax_dbl > INT_MAX) ? INT_MAX : (int) imax_dbl;
// jmin = (jmin_dbl < INT_MIN) ? INT_MIN : (int) jmin_dbl;
// jmax = (jmax_dbl > INT_MAX) ? INT_MAX : (int) jmax_dbl;
//
// /* Set the outbnd flag if the facet extends beyond the POS window */
// if ((imin < (-pn)) || (imax > pn) || (jmin < (-pn)) || (jmax > pn)) {
// posvis_tiled_outbnd = 1;
// atomicExch(&outbndarr[frm], 1);
// }
//
// /* Figure out if facet projects at least partly within POS window;
// * if it does, look at each "contained" POS pixel and get the
// * z-coordinate and cos(scattering angle) */
// i1 = MAX(imin, -pn); i2 = MIN(imax, pn);
// j1 = MAX(jmin, -pn); j2 = MIN(jmax, pn);
//
// pos[frm]->facet[f].ilim.x = i1;
// pos[frm]->facet[f].ilim.y = i2;
// pos[frm]->facet[f].jlim.x = j1;
// pos[frm]->facet[f].jlim.y = j2;
// pos[frm]->facet[f].v0t = v0;
// pos[frm]->facet[f].v1t = v1;
// pos[frm]->facet[f].v2t = v2;
//
// /* Now keep track of the global region */
// if (i1 > pn || i2 < -pn || j1 > pn || j2 < -pn) {
// /* Facet is entirely outside POS frame: just track POS region */
// dev_POSrect_gpu64_shared(imin_dbl,imax_dbl, jmin_dbl,jmax_dbl,
// &ijminmax_overall_sh, pn);
//
// } else {
// dev_POSrect_gpu64_shared((double)i1, (double)i2,
// (double)j1, (double)j2, &ijminmax_overall_sh, pn);
// }
// }
// else {
// /* The following makes a check in the bin_facets_krnl64 kernel easier */
//// pos[frm]->facet[f].nt.x = -1.0;
//// pos[frm]->facet[f].nt.y = -1.0;
//// pos[frm]->facet[f].nt.z = -1.0;
// }
// }
// __syncthreads();
//
// /* Now write the POS frame window limits from shared mem back to global mem */
// if (threadIdx.x==0) {
// ijminmax_overall[frm].w = ijminmax_overall_sh.w;
// ijminmax_overall[frm].x = ijminmax_overall_sh.x;
// ijminmax_overall[frm].y = ijminmax_overall_sh.y;
// ijminmax_overall[frm].z = ijminmax_overall_sh.z;
//
// /* Update the subset of the POS frame that contains the target */
// /* imin_dbl - ijminmax_overall[frm].w
// * imax_dbl - ijminmax_overall[frm].x
// * jmin_dbl - ijminmax_overall[frm].y
// * jmax_dbl - ijminmax_overall[frm].z
// */
// int imin = (ijminmax_overall_sh.w < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.w;
// int imax = (ijminmax_overall_sh.x > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.x;
// int jmin = (ijminmax_overall_sh.y < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.y;
// int jmax = (ijminmax_overall_sh.z > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.z;
//
// /* Make sure it's smaller than n */
// imin = MAX(imin,-pn);
// imax = MIN(imax, pn);
// jmin = MAX(jmin,-pn);
// jmax = MIN(jmax, pn);
//
// if (src) {
// atomicMin(&pos[frm]->xlim2[0], imin);
// atomicMax(&pos[frm]->xlim2[1], imax);
// atomicMin(&pos[frm]->ylim2[0], jmin);
// atomicMax(&pos[frm]->ylim2[1], jmax);
// } else {
// atomicMin(&pos[frm]->xlim[0], imin);
// atomicMax(&pos[frm]->xlim[1], imax);
// atomicMin(&pos[frm]->ylim[0], jmin);
// atomicMax(&pos[frm]->ylim[1], jmax);
// }
// }
//}
//
//__global__ void transform_facet_normals_krnl64c(
// struct mod_t *dmod,
// struct pos_t **pos,
// struct vertices_t **verts,
// double4 *ijminmax_overall,
// double3 orbit_offs,
// double3 *oa_gm,
// int *outbndarr,
// int nf,
// int frm,
// int src)
//{
// /* This kernel launches 256 threads, performs a grid-stride loop through
// * all model facets and transforms each facet normal with oa[frm] and stores
// * the result back to dmod if n.z > 0.0. It also determines and stores the
// * facet and global model bounding box via i1,i2,j1,j2 and xlim/ylim.
// * These quantities are stored in pos_facet_t structures inside each frame's
// * pos. This kernel also uses shared memory for ijminmax_overall_sh, used
// * as temporary (faster) storage for pos window calculation. Additionally,
// * the pos->xlim/ylim atomic operations have been moved to the very end of
// * this kernel to be processed just once instead of for every facet. */
//
// /* Declare kernel variables */
// __shared__ int pn;
// __shared__ double kmpxl, oa_sh[3][3];
// __shared__ double4 ijminmax_overall_sh;
// int imin, jmin, imax, jmax, i1, i2, j1, j2;
// int3 fidx;
// double imin_dbl, jmin_dbl, imax_dbl, jmax_dbl;
// double3 n, v0, v1, v2;
//
// /* Initialize the shared variables (accessed by every thread) */
// if (threadIdx.x==0) {
// pn = pos[frm]->n;
// kmpxl = pos[frm]->km_per_pixel;
// ijminmax_overall_sh.w = ijminmax_overall_sh.x =
// ijminmax_overall_sh.y = ijminmax_overall_sh.z = 0.0f;
//
// /* Load oa for this frame into shared memory */
// oa_sh[0][0] = oa_gm[3*frm].x; oa_sh[0][1] = oa_gm[3*frm].y; oa_sh[0][2] = oa_gm[3*frm].z;
// oa_sh[1][0] = oa_gm[3*frm+1].x; oa_sh[1][1] = oa_gm[3*frm+1].y; oa_sh[1][2] = oa_gm[3*frm+1].z;
// oa_sh[2][0] = oa_gm[3*frm+2].x; oa_sh[2][1] = oa_gm[3*frm+2].y; oa_sh[2][2] = oa_gm[3*frm+2].z;
// }
// __syncthreads();
//
// /* Do a grid-stride loop on all facets */
// for (int f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Get vertex indices of the three vertices making up the facet */
// fidx.x = verts[0]->f[f].v[0]; fidx.y = verts[0]->f[f].v[1];
// fidx.z = verts[0]->f[f].v[2];
//
// /* Copy each vertex over to thread register memory */
// v0.x = verts[0]->v[fidx.x].x[0]; v0.y = verts[0]->v[fidx.x].x[1];
// v0.z = verts[0]->v[fidx.x].x[2]; v1.x = verts[0]->v[fidx.y].x[0];
// v1.y = verts[0]->v[fidx.y].x[1]; v1.z = verts[0]->v[fidx.y].x[2];
// v2.x = verts[0]->v[fidx.z].x[0]; v2.y = verts[0]->v[fidx.z].x[1];
// v2.z = verts[0]->v[fidx.z].x[2];
//
// /* Get the normal to this facet in body-fixed (asteroid) coordinates
// * and convert it to observer coordinates */
// n.x = verts[0]->f[f].n[0];
// n.y = verts[0]->f[f].n[1];
// n.z = verts[0]->f[f].n[2];
// dev_cotrans1(&n, oa_sh, n, 1);
//
// /* Check if this facet is visible - is the facet normal pointing
// * roughly at the observer? */
// if (n.z > 0.0) {
// /* First, store the transformed normal back to the model and increase
// * visible facet counter */
// pos[frm]->facet[f].nt.x = n.x;
// pos[frm]->facet[f].nt.y = n.y;
// pos[frm]->facet[f].nt.z = n.z;
// /* Convert the 3 vertex coordinates from body to observer
// * coordinates; orbit_offset is the center-of-mass offset
// * (in observer coordinates) for this model at this frame's
// * epoch due to orbital motion, in case the model is half of
// * a binary system. */
// dev_cotrans1(&v0, oa_sh, v0, 1);
// dev_cotrans1(&v1, oa_sh, v1, 1);
// dev_cotrans1(&v2, oa_sh, v2, 1);
// v0.x += orbit_offs.x; v0.y += orbit_offs.x; v0.z += orbit_offs.x;
// v1.x += orbit_offs.y; v1.y += orbit_offs.y; v1.z += orbit_offs.y;
// v2.x += orbit_offs.z; v2.y += orbit_offs.z; v2.z += orbit_offs.z;
//
// /* Find rectangular region (in POS pixels) containing the projected
// * facet - use floats in case model has illegal parameters and the
// * pixel numbers exceed the limits for valid integers */
// imin_dbl = floor(MIN(v0.x,MIN(v1.x,v2.x)) / kmpxl - SMALLVAL + 0.5);
// imax_dbl = floor(MAX(v0.x,MAX(v1.x,v2.x)) / kmpxl + SMALLVAL + 0.5);
// jmin_dbl = floor(MIN(v0.y,MIN(v1.y,v2.y)) / kmpxl - SMALLVAL + 0.5);
// jmax_dbl = floor(MAX(v0.y,MAX(v1.y,v2.y)) / kmpxl + SMALLVAL + 0.5);
//
// imin = (imin_dbl < INT_MIN) ? INT_MIN : (int) imin_dbl;
// imax = (imax_dbl > INT_MAX) ? INT_MAX : (int) imax_dbl;
// jmin = (jmin_dbl < INT_MIN) ? INT_MIN : (int) jmin_dbl;
// jmax = (jmax_dbl > INT_MAX) ? INT_MAX : (int) jmax_dbl;
//
// /* Set the outbnd flag if the facet extends beyond the POS window */
// if ((imin < (-pn)) || (imax > pn) || (jmin < (-pn)) || (jmax > pn)) {
// posvis_tiled_outbnd = 1;
// atomicExch(&outbndarr[frm], 1);
// }
//
// /* Figure out if facet projects at least partly within POS window;
// * if it does, look at each "contained" POS pixel and get the
// * z-coordinate and cos(scattering angle) */
// i1 = MAX(imin, -pn); i2 = MIN(imax, pn);
// j1 = MAX(jmin, -pn); j2 = MIN(jmax, pn);
//
// pos[frm]->facet[f].ilim.x = i1;
// pos[frm]->facet[f].ilim.y = i2;
// pos[frm]->facet[f].jlim.x = j1;
// pos[frm]->facet[f].jlim.y = j2;
// pos[frm]->facet[f].v0t = v0;
// pos[frm]->facet[f].v1t = v1;
// pos[frm]->facet[f].v2t = v2;
//
// /* Now keep track of the global region */
// if (i1 > pn || i2 < -pn || j1 > pn || j2 < -pn) {
// /* Facet is entirely outside POS frame: just track POS region */
// dev_POSrect_gpu64_shared(imin_dbl,imax_dbl, jmin_dbl,jmax_dbl,
// &ijminmax_overall_sh, pn);
//
// } else {
// dev_POSrect_gpu64_shared((double)i1, (double)i2,
// (double)j1, (double)j2, &ijminmax_overall_sh, pn);
// }
// }
// else {
// /* The following makes a check in the bin_facets_krnl64 kernel easier */
// pos[frm]->facet[f].nt.x = -1.0;
// pos[frm]->facet[f].nt.y = -1.0;
// pos[frm]->facet[f].nt.z = -1.0;
// }
// }
// __syncthreads();
//
// /* Now write the POS frame window limits from shared mem back to global mem */
// if (threadIdx.x==0) {
// ijminmax_overall[frm].w = ijminmax_overall_sh.w;
// ijminmax_overall[frm].x = ijminmax_overall_sh.x;
// ijminmax_overall[frm].y = ijminmax_overall_sh.y;
// ijminmax_overall[frm].z = ijminmax_overall_sh.z;
//
// /* Update the subset of the POS frame that contains the target */
// /* imin_dbl - ijminmax_overall[frm].w
// * imax_dbl - ijminmax_overall[frm].x
// * jmin_dbl - ijminmax_overall[frm].y
// * jmax_dbl - ijminmax_overall[frm].z
// */
// int imin = (ijminmax_overall_sh.w < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.w;
// int imax = (ijminmax_overall_sh.x > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.x;
// int jmin = (ijminmax_overall_sh.y < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.y;
// int jmax = (ijminmax_overall_sh.z > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.z;
//
// /* Make sure it's smaller than n */
// imin = MAX(imin,-pn);
// imax = MIN(imax, pn);
// jmin = MAX(jmin,-pn);
// jmax = MIN(jmax, pn);
//
// if (src) {
// atomicMin(&pos[frm]->xlim2[0], imin);
// atomicMax(&pos[frm]->xlim2[1], imax);
// atomicMin(&pos[frm]->ylim2[0], jmin);
// atomicMax(&pos[frm]->ylim2[1], jmax);
// } else {
// atomicMin(&pos[frm]->xlim[0], imin);
// atomicMax(&pos[frm]->xlim[1], imax);
// atomicMin(&pos[frm]->ylim[0], jmin);
// atomicMax(&pos[frm]->ylim[1], jmax);
// }
// }
//}
//
//__global__ void bin_facets_krnl64a(struct pos_t **pos,
// struct vertices_t **verts,
// int ***facet_index,
// int **entries,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size)
//{
// /* This kernel is responsible for binning visible model facets according to
// * which screen tile they appear on. Each facet can belong to 1, 2, or 4
// * different facets. (If the size of individual triangles should exceed
// * the tile size, this is no longer true.)
// * The kernel version has just one thread block with 1024 threads. It uses a grid-
// * stride loop to cover all facets
// */
// int f, current_i, next_i, current_j, next_j, i1, i2, j1, j2, bi, bin, old_indx;
// __shared__ int2 xlim, ylim; /* These are the global pos limits */
// extern __shared__ int addr_index[]; /* Used for the facet_index entries */
//
// /* Initialize shared variables that will be accessed by every thread */
// if (threadIdx.x==0) {
// xlim.x = pos[frm]->xlim[0];
// xlim.y = pos[frm]->xlim[1];
// ylim.x = pos[frm]->ylim[0];
// ylim.y = pos[frm]->ylim[1];
// for (bin=0; bin<n_tiles[frm]; bin++)
// addr_index[bin] = 0;
// }
// __syncthreads();
//
// /* Do grid-stride loop through all facets in model */
// for (f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Weed out any facets not visible to observer */
// if (pos[frm]->facet[f].nt.z > 0.0) {
// bi = 0; /* Bin index for the four facet bin entries*/
// /* Copy facet limits into register memory for faster access */
// i1 = pos[frm]->facet[f].ilim.x;
// i2 = pos[frm]->facet[f].ilim.y;
// j1 = pos[frm]->facet[f].jlim.x;
// j2 = pos[frm]->facet[f].jlim.y;
//
// /* Now check where the current facet lies, stepping through each
// * tile */
// for (int k=0; k<n_tiles_y[frm]; k++) {
// current_j = ylim.x + k * tile_size;
// next_j = current_j + tile_size;
// for (int n=0; n<n_tiles_x[frm]; n++) {
// bin = k*n_tiles_x[frm] + n;
// current_i = xlim.x + n * tile_size;
// next_i = current_i + tile_size;
//
// /* If i1 or i2 AND j1 or j2 fall into this tile, register it */
// if ((i1>=current_i && i1<next_i) || (i2>=current_i && i2<next_i)) {
// if ((j1>=current_j && j1<next_j) || (j2>=current_j && j2<next_j)) {
// pos[frm]->facet[f].bin[bi] = bin;
// old_indx = atomicAdd(&addr_index[bin], 1);
// facet_index[frm][bin][old_indx] = f;
// atomicAdd(&entries[frm][bin], 1);
// bi++;
// }
// }
// }
// }
// }
// }
//}
//
//__global__ void bin_facets_krnl64b(struct pos_t **pos,
// struct vertices_t **verts,
// int ***facet_index,
// int **entries,
// int **addr_index,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size)
//{
// /* This kernel is responsible for binning visible model facets according to
// * which screen tile they appear on. Each facet can belong to 1, 2, or 4
// * different facets. (If the size of individual triangles should exceed
// * the tile size, this is no longer true.)
// * The kernel version uses nf-threads with as many thread blocks as it
// * takes, considering the previously defined maxThreadsPerBlock. Because of
// * this, the addr_index array is in global memory (instead of shared). */
//
// int f, current_i, next_i, current_j, next_j, i1, i2, j1, j2, bi, bin, old_indx;
// __shared__ int2 xlim, ylim; /* These are the global pos limits */
//
// f = blockDim.x * blockIdx.x + threadIdx.x;
//
// /* Initialize shared variables that will be accessed by every thread */
// if (threadIdx.x==0) {
// xlim.x = pos[frm]->xlim[0];
// xlim.y = pos[frm]->xlim[1];
// ylim.x = pos[frm]->ylim[0];
// ylim.y = pos[frm]->ylim[1];
//// for (bin=0; bin<n_tiles[frm]; bin++)
//// addr_index[frm][bin] = 0;
// }
// __syncthreads();
//
// /* Do grid-stride loop through all facets in model */
// if (f < nf) {
// /* Weed out any facets not visible to observer */
// if (pos[frm]->facet[f].nt.z > 0.0) {
// bi = 0; /* Bin index for the four facet bin entries*/
// /* Copy facet limits into register memory for faster access */
// i1 = pos[frm]->facet[f].ilim.x;
// i2 = pos[frm]->facet[f].ilim.y;
// j1 = pos[frm]->facet[f].jlim.x;
// j2 = pos[frm]->facet[f].jlim.y;
//
// /* Now check where the current facet lies, stepping through each
// * tile */
// for (int k=0; k<n_tiles_y[frm]; k++) {
// current_j = ylim.x + k * tile_size;
// next_j = current_j + tile_size;
// for (int n=0; n<n_tiles_x[frm]; n++) {
// bin = k*n_tiles_x[frm] + n;
// current_i = xlim.x + n * tile_size;
// next_i = current_i + tile_size;
//
// /* If i1 or i2 AND j1 or j2 fall into this tile, register it */
// if ((i1>=current_i && i1<next_i) || (i2>=current_i && i2<next_i)) {
// if ((j1>=current_j && j1<next_j) || (j2>=current_j && j2<next_j)) {
// pos[frm]->facet[f].bin[bi] = bin;
// old_indx = atomicAdd(&addr_index[frm][bin], 1);
// facet_index[frm][bin][old_indx] = f;
// atomicAdd(&entries[frm][bin], 1);
// bi++;
// }
// }
// }
// }
// }
// }
//}
//
//__global__ void bin_facets_krnl64c(struct pos_t **pos,
// struct vertices_t **verts,
// int ***facet_index,
// int **entries,
// int nf,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size)
//{
// /* This kernel is responsible for binning visible model facets according to
// * which screen tile they appear on. Each facet can belong to 1, 2, or 4
// * different facets. (If the size of individual triangles should exceed
// * the tile size, this is no longer true.)
// * The kernel version uses nframes-thread blocks to cover all frames in one
// * go. Each block has its own __shared___ addr_index array and each of them
// * uses a grid-stride loop to cover all facets
// */
// int f, frm, current_i, next_i, current_j, next_j, i1, i2, j1, j2, bi, bin, old_indx;
// __shared__ int2 xlim, ylim; /* These are the global pos limits */
// __shared__ int addr_index[160]; /* This allows for 40x40 tiles (at 32x32
// tile size, allowing for a maximum POS resolution of 1280x1280 pixels */
// frm = blockIdx.x;
//
// /* Initialize shared variables that will be accessed by every thread */
// if (threadIdx.x==0) {
// xlim.x = pos[frm]->xlim[0];
// xlim.y = pos[frm]->xlim[1];
// ylim.x = pos[frm]->ylim[0];
// ylim.y = pos[frm]->ylim[1];
// for (bin=0; bin<n_tiles[frm]; bin++)
// addr_index[bin] = 0;
// }
// __syncthreads();
//
// /* Do grid-stride loop through all facets in model */
// for (f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Weed out any facets not visible to observer */
// if (pos[frm]->facet[f].nt.z > 0.0) {
// bi = 0; /* Bin index for the four facet bin entries*/
// /* Copy facet limits into register memory for faster access */
// i1 = pos[frm]->facet[f].ilim.x;
// i2 = pos[frm]->facet[f].ilim.y;
// j1 = pos[frm]->facet[f].jlim.x;
// j2 = pos[frm]->facet[f].jlim.y;
//
// /* Now check where the current facet lies, stepping through each
// * tile */
// for (int k=0; k<n_tiles_y[frm]; k++) {
// current_j = ylim.x + k * tile_size;
// next_j = current_j + tile_size;
// for (int n=0; n<n_tiles_x[frm]; n++) {
// bin = k*n_tiles_x[frm] + n;
// current_i = xlim.x + n * tile_size;
// next_i = current_i + tile_size;
//
// /* If i1 or i2 AND j1 or j2 fall into this tile, register it */
// if ((i1>=current_i && i1<next_i) || (i2>=current_i && i2<next_i)) {
// if ((j1>=current_j && j1<next_j) || (j2>=current_j && j2<next_j)) {
// pos[frm]->facet[f].bin[bi] = bin;
// old_indx = atomicAdd(&addr_index[bin], 1);
// facet_index[frm][bin][old_indx] = f;
// atomicAdd(&entries[frm][bin], 1);
// bi++;
// }
// }
// }
// }
// }
// }
//}
//
//__global__ void radar_raster_krnl64(struct pos_t **pos,
// struct vertices_t **verts,
// double3 *oa_gm,
// int ***facet_index,
// int **entries,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size,
// int tile_x) {
//
// /* This kernel performs the rasterization tile by tile. Each thread block
// * is responsible for one tile. */
// /* Determine which tile this thread block is responsible for and
// * which element of the thread block this thread is. */
// int bin = blockIdx.x;
// int index = threadIdx.x;
//
// /* Declare the shared memory variables and others */
// __shared__ double pos_z[32][32];//[55][55]; /* One per thread block */
// __shared__ double pos_cose[32][32];//[55][55]; /* One per thread block */
// __shared__ int2 bn;
// __shared__ int xlim, ylim, offsetx, offsety;
// __shared__ double kmpxl;
// __shared__ double oa_sh[3][3];
// int i, j, ig, jg, i1, i2, j1, j2; /* ig,jg are global indices */
// int tile_i1, tile_i2, tile_j1, tile_j2, fct_indx;
// double3 v0, v1, v2, n, tv0, tv1, tv2, n1n0;
// int3 fidx;
//
// /* Initialize the shared memory arrays with grid-stride loop */
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// pos_z[i][j] = -1e20;
// pos_cose[i][j] = 0.0;
// }
// __syncthreads();
//
// /* Load variables used by every thread (per block) to shared memory for
// * faster access */
// if (threadIdx.x==0) {
// xlim = pos[frm]->xlim[0];
// ylim = pos[frm]->ylim[0];
// kmpxl = pos[frm]->km_per_pixel;
// bn.x = bin % n_tiles_x[frm];
// bn.y = bin / n_tiles_x[frm];
//
// /* Calculate the pixel offsets needed to go back and forth between
// * tiled POS space for this block's tile and global POS space */
// offsetx = xlim + tile_x * bn.x;
// offsety = ylim + tile_x * bn.y;
//
// /* Load oa for this frame into shared memory */
// if (posvis_tiled_smooth) {
// oa_sh[0][0] = oa_gm[3*frm].x; oa_sh[0][1] = oa_gm[3*frm].y; oa_sh[0][2] = oa_gm[3*frm].z;
// oa_sh[1][0] = oa_gm[3*frm+1].x; oa_sh[1][1] = oa_gm[3*frm+1].y; oa_sh[1][2] = oa_gm[3*frm+1].z;
// oa_sh[2][0] = oa_gm[3*frm+2].x; oa_sh[2][1] = oa_gm[3*frm+2].y; oa_sh[2][2] = oa_gm[3*frm+2].z;
// }
//
// }
// __syncthreads();
//
// /* Using grid-stride loop, step through all facet entries for each bin where
// * each thread block is responsible for one bin/tile */
// for (index=threadIdx.x; index<entries[frm][bin]; index+=blockDim.x) {
//
// /* Load facet index into registers */
// fct_indx = facet_index[frm][bin][index];
//
// /* Load transformed facet vertices into registers */
// v0 = pos[frm]->facet[fct_indx].v0t;
// v1 = pos[frm]->facet[fct_indx].v1t;
// v2 = pos[frm]->facet[fct_indx].v2t;
// n = pos[frm]->facet[fct_indx].nt;
//
// /* Calculate and store the boundaries of this tile */
// tile_i1 = offsetx;
// tile_i2 = tile_i1 + tile_x;
// tile_j1 = offsety;
// tile_j2 = tile_j1 + tile_x;
//
// /* Load this facet's boundaries and clamp them if needed, then
// * convert to local shared memory array addressing */
// i1 = max(pos[frm]->facet[fct_indx].ilim.x, tile_i1);
// i2 = min(pos[frm]->facet[fct_indx].ilim.y, (tile_i2-1));
// j1 = max(pos[frm]->facet[fct_indx].jlim.x, tile_j1);
// j2 = min(pos[frm]->facet[fct_indx].jlim.y, (tile_j2-1));
//
// /* Precalculate s and t components for the pixel loop */
// double a, b, c, d, e, h, ti, tj, si, sj, si0, sj0, ti0, tj0, sz, tz, den, s, t, z, old;
// a = i1*kmpxl - v0.x;
// b = v2.y - v1.y;
// c = v2.x - v1.x;
// d = j1*kmpxl - v0.y;
// e = v1.x - v0.x;
// h = v1.y - v0.y;
// den = e*b - c*h;
// ti = -h*kmpxl/den;
// tj = e*kmpxl/den;
// si = b*kmpxl/den;
// sj = -c*kmpxl/den;
// si0 = (a*b - c*d)/den;
// ti0 = (e*d -a*h)/den;
// sz = v1.z - v0.z;
// tz = v2.z - v1.z;
//
// /* Now convert i1, i2, j1, j2 to shared-memory tile coordinates */
// i1 -= (offsetx);
// i2 -= (offsetx);
// j1 -= (offsety);
// j2 -= (offsety);
//
// /* Pre-calculate some quantities for cosine smoothing if enabled */
// if (posvis_tiled_smooth) {
// /* Assign temp. normal components as float3 */
// fidx.x = verts[0]->f[fct_indx].v[0];
// fidx.y = verts[0]->f[fct_indx].v[1];
// fidx.z = verts[0]->f[fct_indx].v[2];
//
// tv0.x = verts[0]->v[fidx.x].n[0]; tv0.y = verts[0]->v[fidx.x].n[1];
// tv0.z = verts[0]->v[fidx.x].n[2]; tv1.x = verts[0]->v[fidx.y].n[0];
// tv1.y = verts[0]->v[fidx.y].n[1]; tv1.z = verts[0]->v[fidx.y].n[2];
// tv2.x = verts[0]->v[fidx.z].n[0]; tv2.y = verts[0]->v[fidx.z].n[1];
// tv2.z = verts[0]->v[fidx.z].n[2];
// n1n0.x = tv1.x - tv0.x; n1n0.y = tv1.y - tv0.y; n1n0.z = tv1.z - tv0.z;
// tv2.x -= tv1.x; tv2.y -= tv1.y; tv2.z -= tv1.z;
// }
//
// /* Facet is at least partly within POS frame: find all POS
// * pixels whose centers project onto this facet */
// for (i=i1; i<=i2; i++) {
//
// sj0 = si0; /* Initialize this loop's base sj0, tj0 */
// tj0 = ti0;
//
// for (j=j1; j<=j2; j++) {
//
// /* Calculate s and t parameters */
// s = sj0;
// t = tj0;
//
// if ((s >= -SMALLVAL) && (s <= 1.0 + SMALLVAL)) {// &&
//
// if( (t >= -SMALLVAL) && (t <= s + SMALLVAL)) {
//
// /* Compute z-coordinate of pixel center: its
// * distance measured from the origin towards
// * Earth. */
// z = v0.z + s*sz + t*tz;
//
// /* Compare calculated z to stored shared memory z
// * array at this address and store the bigger value */
// old = atomicMax64(&pos_z[i][j], z);
//
// if (old < z){
//
// if (posvis_tiled_smooth) {
// /* Get pvs_smoothed version of facet unit
// * normal: Take the linear combination
// * of the three vertex normals; trans-
// * form from body to observer coordina-
// * tes; and make sure that it points
// * somewhat in our direction. */
// n.x = tv0.x + s * n1n0.x + t * tv2.x;
// n.y = tv0.y + s * n1n0.y + t * tv2.y;
// n.z = tv0.z + s * n1n0.z + t * tv2.z;
// dev_cotrans1(&n, oa_sh, n, 1);
//// dev_cotrans3(&n, oa_gm, n, 1, frm);
// dev_normalize3(&n);
// }
//
// /* Determine scattering angles. */
// if (n.z > 0.0) {
// atomicExch((unsigned long long int*)&pos_cose[i][j],
// __double_as_longlong(n.z));
// }
// /* Keeping track of facets may not be required. */
//// atomicExch(&pos[frm]->f[i][j], f);
//
// } /* end if (no other facet yet blocks this facet from view) */
// } /* end if 0 <= t <= s (facet center is "in" this POS pixel) */
// } /* end if 0 <= s <= 1 */
//
// sj0 += sj;
// tj0 += tj;
// } /* end j-loop over POS rows */
// /* Modify s and t step-wise for the next i-iteration of the pixel loop */
// si0 += si;
// ti0 += ti;
//
// } /* end i-loop over POS columns */
// }
// __syncthreads();
//
// /* Now write the shared memory array tiles into the global memory z buffer
// * and cosine array, again with a block-stride loop */
// if (facet_index[frm][bin][0]!=0)
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// ig = i + offsetx;
// jg = j + offsety;
// if (pos_z[i][j]!=-1e20)
// pos[frm]->z[ig][jg] = pos_z[i][j];
// if (pos_cose[i][j]!=0.0)
// pos[frm]->cose[ig][jg] = pos_cose[i][j];
// }
// __syncthreads();
//}
//
//__global__ void lightcurve_raster_krnl64(struct pos_t **pos,
// struct vertices_t **verts,
// double3 *oa,
// double3 *usrc,
// int ***facet_index,
// int **entries,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size,
// int tile_x,
// int src) {
//
// /* This kernel performs the rasterization tile by tile. Each thread block
// * is responsible for one tile. */
// /* Determine which tile this thread block is responsible for and
// * which element of the thread block this thread is. */
// int bin = blockIdx.x;
// int index = threadIdx.x;
//
// /* Declare the shared memory variables and others */
// __shared__ double pos_z[32][32];//[55][55]; /* One per thread block */
// __shared__ double pos_cose[32][32];//[55][55]; /* One per thread block */
// __shared__ double pos_cosi[32][32];
// __shared__ int2 bn;
// __shared__ int xlim, ylim, offsetx, offsety, bistatic;
// __shared__ double kmpxl;
// int i, j, ig, jg, i1, i2, j1, j2; /* ig,jg are global indices */
// int tile_i1, tile_i2, tile_j1, tile_j2, fct_indx;
// double3 v0, v1, v2, n, tv0, tv1, tv2, n1n0;
// int3 fidx;
//
// /* Initialize the shared memory arrays with grid-stride loop */
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// pos_z[i][j] = -1e20;
// pos_cose[i][j] = 0.0;
// }
// __syncthreads();
//
// /* Load variables used by every thread (per block) to shared memory for
// * faster access */
// if (threadIdx.x==0) {
// bistatic = pos[frm]->bistatic;
// xlim = pos[frm]->xlim[0];
// ylim = pos[frm]->ylim[0];
// kmpxl = pos[frm]->km_per_pixel;
// bn.x = bin % n_tiles_x[frm];
// bn.y = bin / n_tiles_x[frm];
//
// /* Calculate the pixel offsets needed to go back and forth between
// * tiled POS space for this block's tile and global POS space */
// offsetx = xlim + tile_x * bn.x;
// offsety = ylim + tile_x * bn.y;
// }
// __syncthreads();
//
// /* Using grid-stride loop, step through all facet entries for each bin where
// * each thread block is responsible for one bin/tile */
// for (index=threadIdx.x; index<entries[frm][bin]; index+=blockDim.x) {
//
// /* Load facet index into registers */
// fct_indx = facet_index[frm][bin][index];
//
// /* Load transformed facet vertices into registers */
// v0 = pos[frm]->facet[fct_indx].v0t;
// v1 = pos[frm]->facet[fct_indx].v1t;
// v2 = pos[frm]->facet[fct_indx].v2t;
// n = pos[frm]->facet[fct_indx].nt;
//
// /* Calculate and store the boundaries of this tile */
// tile_i1 = offsetx;
// tile_i2 = tile_i1 + tile_x;
// tile_j1 = offsety;
// tile_j2 = tile_j1 + tile_x;
//
// /* Load this facet's boundaries and clamp them if needed, then
// * convert to local shared memory array addressing */
// i1 = max(pos[frm]->facet[fct_indx].ilim.x, tile_i1);
// i2 = min(pos[frm]->facet[fct_indx].ilim.y, (tile_i2-1));
// j1 = max(pos[frm]->facet[fct_indx].jlim.x, tile_j1);
// j2 = min(pos[frm]->facet[fct_indx].jlim.y, (tile_j2-1));
//
// /* Pre-calculate s and t components for the pixel loop */
// double a, b, c, d, e, h, ti, tj, si, sj, si0, sj0, ti0, tj0, sz, tz, den, s, t, z, old;
// a = i1*kmpxl - v0.x;
// b = v2.y - v1.y;
// c = v2.x - v1.x;
// d = j1*kmpxl - v0.y;
// e = v1.x - v0.x;
// h = v1.y - v0.y;
// den = e*b - c*h;
// ti = -h*kmpxl/den;
// tj = e*kmpxl/den;
// si = b*kmpxl/den;
// sj = -c*kmpxl/den;
// si0 = (a*b - c*d)/den;
// ti0 = (e*d -a*h)/den;
// sz = v1.z - v0.z;
// tz = v2.z - v1.z;
//
// /* Now convert i1, i2, j1, j2 to shared-memory tile coordinates */
// i1 -= (offsetx);
// i2 -= (offsetx);
// j1 -= (offsety);
// j2 -= (offsety);
//
// /* Pre-calculate some quantities for cosine smoothing if enabled */
// if (posvis_tiled_smooth) {
// /* Assign temp. normal components as float3 */
// fidx.x = verts[0]->f[fct_indx].v[0];
// fidx.y = verts[0]->f[fct_indx].v[1];
// fidx.z = verts[0]->f[fct_indx].v[2];
//
// tv0.x = verts[0]->v[fidx.x].n[0]; tv0.y = verts[0]->v[fidx.x].n[1];
// tv0.z = verts[0]->v[fidx.x].n[2]; tv1.x = verts[0]->v[fidx.y].n[0];
// tv1.y = verts[0]->v[fidx.y].n[1]; tv1.z = verts[0]->v[fidx.y].n[2];
// tv2.x = verts[0]->v[fidx.z].n[0]; tv2.y = verts[0]->v[fidx.z].n[1];
// tv2.z = verts[0]->v[fidx.z].n[2];
// n1n0.x = tv1.x - tv0.x; n1n0.y = tv1.y - tv0.y; n1n0.z = tv1.z - tv0.z;
// tv2.x -= tv1.x; tv2.y -= tv1.y; tv2.z -= tv1.z;
// }
//
// /* Facet is at least partly within POS frame: find all POS
// * pixels whose centers project onto this facet */
// for (i=i1; i<=i2; i++) {
//
// sj0 = si0; /* Initialize this loop's base sj0, tj0 */
// tj0 = ti0;
//
// for (j=j1; j<=j2; j++) {
//
// /* Calculate s and t parameters */
// s = sj0;
// t = tj0;
//
// if ((s >= -SMALLVAL) && (s <= 1.0 + SMALLVAL)) {// &&
//
// if( (t >= -SMALLVAL) && (t <= s + SMALLVAL)) {
//
// /* Compute z-coordinate of pixel center: its
// * distance measured from the origin towards
// * Earth. */
// z = v0.z + s*sz + t*tz;
//
// /* Compare calculated z to stored shared memory z
// * array at this address and store the bigger value */
// old = atomicMax64(&pos_z[i][j], z);
//
// if (old < z){
// if (posvis_tiled_smooth) {
// /* Get pvs_smoothed version of facet unit
// * normal: Take the linear combination
// * of the three vertex normals; trans-
// * form from body to observer coordina-
// * tes; and make sure that it points
// * somewhat in our direction. */
// n.x = tv0.x + s * n1n0.x + t * tv2.x;
// n.y = tv0.y + s * n1n0.y + t * tv2.y;
// n.z = tv0.z + s * n1n0.z + t * tv2.z;
// dev_cotrans3(&n, oa, n, 1, frm);
// dev_normalize3(&n);
// }
//
// /* Determine scattering angles. */
// if (n.z > 0.0) {
// if (src)
// atomicExch((unsigned long long int*)&pos[frm]->cosill[i][j],
// __double_as_longlong(n.z));
// else
// atomicExch((unsigned long long int*)&pos[frm]->cose[i][j],
// __double_as_longlong(n.z));
//
// if ((!src) && (bistatic)) {
//
// double temp = dev_dot_d3(n,usrc[frm]);
// atomicExch((unsigned long long int*)&pos[frm]->cosi[i][j],
// __double_as_longlong(temp));
// if (pos[frm]->cosi[i][j] <= 0.0)
// pos[frm]->cose[i][j] = 0.0;
// }
//
// }
// /* Keeping track of facets may not be required. */
//// atomicExch(&pos[frm]->f[i][j], f);
//
// } /* end if (no other facet yet blocks this facet from view) */
// } /* end if 0 <= t <= s (facet center is "in" this POS pixel) */
// } /* end if 0 <= s <= 1 */
//
// sj0 += sj;
// tj0 += tj;
// } /* end j-loop over POS rows */
// /* Modify s and t step-wise for the next i-iteration of the pixel loop */
// si0 += si;
// ti0 += ti;
//
// } /* end i-loop over POS columns */
// }
// __syncthreads();
//
// /* Now write the shared memory array tiles into the global memory z buffer
// * and cosine array, again with a block-stride loop */
// if (facet_index[frm][bin][0]!=0)
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// ig = i + offsetx;
// jg = j + offsety;
// if (pos_z[i][j]!=-1e20)
// pos[frm]->z[ig][jg] = pos_z[i][j];
// if (pos_cose[i][j]!=0.0)
// pos[frm]->cose[ig][jg] = pos_cose[i][j];
// }
// __syncthreads();
//}
//
//__global__ void posvis_outbnd_tiled_krnl64(struct pos_t **pos,
// int *outbndarr, double4 *ijminmax_overall, int size, int start) {
// /* nfrm_alloc-threaded kernel */
// int posn, f = blockIdx.x * blockDim.x + threadIdx.x + start;
// double xfactor, yfactor;
// if (f <size) {
// if (outbndarr[f]) {
// /* ijminmax_overall.w = imin_overall
// * ijminmax_overall.x = imax_overall
// * ijminmax_overall.y = jmin_overall
// * ijminmax_overall.z = jmax_overall */
// posn = pos[f]->n;
// xfactor = (MAX( ijminmax_overall[f].x, posn) -
// MIN( ijminmax_overall[f].w, -posn) + 1) / (2*posn+1);
// yfactor = (MAX( ijminmax_overall[f].z, posn) -
// MIN( ijminmax_overall[f].y, -posn) + 1) / (2*posn+1);
// pos[f]->posbnd_logfactor = log(xfactor*yfactor);
// }
// }
//}
//
//__host__ int posvis_tiled_gpu64(
// struct par_t *dpar,
// struct mod_t *dmod,
// struct dat_t *ddat,
// struct pos_t **pos,
// struct vertices_t **verts,
// double3 orbit_offset,
// int *posn,
// int *outbndarr,
// int set,
// int nfrm_alloc,
// int src,
// int nf,
// int body, int comp, unsigned char type, hipStream_t *pv_stream,
// int src_override) {
//
// dim3 BLK,THD, BLKfrm, THD64, *BLKtile, BLKaf, THDaf;
// double4 *ijminmax_overall;
// double3 *oa, *usrc;
// int ***facet_index, *xspan, *yspan, *n_tiles_x, *n_tiles_y, *n_tiles, **entries;
// int f, outbnd, start, sharedMem, oasize, span, tile_size, **addr_index;
//
// oasize=nfrm_alloc*3;
// span=32;
// tile_size=span*span;
//
// /* Launch parameters for the facet_streams kernel */
// THD.x = 256; THD64.x = 64;
// BLK.x = floor((THD.x - 1 + nf) / THD.x);
// BLKfrm.x = floor((THD64.x - 1 + nfrm_alloc)/THD64.x);
// THDaf.x = 1024;
// BLKaf.x = nfrm_alloc;
//
// /* Set up the offset addressing for lightcurves if this is a lightcurve */
// if (type == LGHTCRV) start = 1; /* fixes the lightcurve offsets */
// else start = 0;
//
// /* Allocate temporary arrays/structs */
// cudaCalloc1((void**)&ijminmax_overall, sizeof(double4), nfrm_alloc);
// cudaCalloc1((void**)&oa, sizeof(double3), oasize);
// cudaCalloc1((void**)&usrc, sizeof(double3), nfrm_alloc);
// cudaCalloc1((void**)&xspan, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&yspan, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&n_tiles, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&n_tiles_x, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&n_tiles_y, sizeof(int), nfrm_alloc);
// /* Allocate the frame portion of the facet index triple pointer and
// * the bin entries counter */
// cudaCalloc((void**)&facet_index, sizeof(int**), nfrm_alloc);
// cudaCalloc1((void**)&entries, sizeof(int*), nfrm_alloc);
//// cudaCalloc1((void**)&addr_index, sizeof(int*), nfrm_alloc);
// cudaCalloc((void**)&BLKtile, sizeof(dim3), nfrm_alloc);
//
// /* Initialize/pre-calculate values for rasterization */
// posvis_tiled_init_krnl64<<<BLKfrm,THD64>>>(dpar, pos, ijminmax_overall, oa, usrc,
// outbndarr, comp, start, src, nfrm_alloc, set, src_override);
// checkErrorAfterKernelLaunch("posvis_tiled_init_krnl64");
//
// /* Transform facet normals and determine bounding for facets and pos. */
// for (f=start; f<nfrm_alloc; f++)
// transform_facet_normals_krnl64c<<<1,THD,0,pv_stream[f]>>>(dmod, pos,
// verts, ijminmax_overall, orbit_offset, oa, outbndarr, nf,
// f, src);
// checkErrorAfterKernelLaunch("transform_facet_normals_krnl64a");
//// /* Transform facet normals and determine bounding for facets and pos. */
//// for (f=start; f<nfrm_alloc; f++)
//// transform_facet_normals_krnl64b<<<BLK,THD,0,pv_stream[f]>>>(dmod, pos,
//// verts, ijminmax_overall, orbit_offset, oa, usrc, outbndarr, nf,
//// f, src);
//// checkErrorAfterKernelLaunch("transform_facet_normals_krnl64b");
//// transform_facet_normals_krnl64b<<<BLKaf,THD>>>(dmod, pos, verts, ijminmax_overall,
//// orbit_offset, oa, outbndarr, nf, src);
//// checkErrorAfterKernelLaunch("transform_facet_normals_krnl64a");
//
// for (f=start; f<nfrm_alloc; f++)
// hipStreamSynchronize(pv_stream[f]);
//
// /* Now calculate the tiling parameters to cover the POS view */
// for (f=start; f<nfrm_alloc; f++) {
// xspan[f] = pos[f]->xlim[1] - pos[f]->xlim[0] + 1;
// yspan[f] = pos[f]->ylim[1] - pos[f]->ylim[0] + 1;
// n_tiles_x[f] = (xspan[f]/span) + 1;
// n_tiles_y[f] = (yspan[f]/span) + 1;
// n_tiles[f] = n_tiles_x[f] * n_tiles_y[f];
// BLKtile[f].x = n_tiles[f];
// BLKtile[f].y = BLKtile[f].z = 1;
//
// /* Now allocate the tiles section of the facet index and then step
// * through each tile section to allocate enough space for 1024
// * facet indices. This is the maximum number of entries allowable
// * per thread block */
// /* Allocate the entries array to keep track of how many facets each bin holds */
//// cudaCalloc((void**)&addr_index[f], sizeof(int), n_tiles[f]);
// cudaCalloc((void**)&entries[f], sizeof(int), n_tiles[f]);
// cudaCalloc((void**)&facet_index[f], sizeof(int*),n_tiles[f]);
// for (int ti=0; ti<n_tiles[f]; ti++)
// cudaCalloc((void**)&facet_index[f][ti], sizeof(int), 4*1024);
// }
//
// bin_facets_krnl64c<<<BLKaf,THDaf>>>(pos, verts, facet_index,
// entries, nf, n_tiles, n_tiles_x, n_tiles_y, span);
// checkErrorAfterKernelLaunch("bin_facets_krnl64");
//
// /* Now we bin the triangles into the tiles */
// for (f=start; f<nfrm_alloc; f++) {
//// sharedMem = sizeof(int)*n_tiles[f];
//// bin_facets_krnl64a<<<1,THD,sharedMem,pv_stream[f]>>>(pos, verts, facet_index,
//// entries, nf, f, n_tiles, n_tiles_x, n_tiles_y, span);
//// bin_facets_krnl64b<<<BLK,THD,sharedMem,pv_stream[f]>>>(pos, verts, facet_index,
//// entries, addr_index, nf, f, n_tiles, n_tiles_x, n_tiles_y, span);
// radar_raster_krnl64<<<BLKtile[f],THD,0,pv_stream[f]>>>(pos, verts, oa,
// facet_index, entries, nf, f, n_tiles, n_tiles_x,
// n_tiles_y, tile_size, span);
// }
// checkErrorAfterKernelLaunch("bin_facets_krnl64");
//
// for (f=start; f<nfrm_alloc; f++)
// hipStreamSynchronize(pv_stream[f]);
//
// /* Take care of any posbnd flags */
// posvis_outbnd_tiled_krnl64<<<BLKfrm,THD64>>>(pos,
// outbndarr, ijminmax_overall, nfrm_alloc, start);
// checkErrorAfterKernelLaunch("posvis_outbnd_krnl64");
// gpuErrchk(hipMemcpyFromSymbol(&outbnd, posvis_tiled_outbnd, sizeof(int), 0,
// hipMemcpyDeviceToHost));
//
//// int n = 75;
//// int npixels = 151*151;
//// f = 0;
//// dbg_print_pos_arrays_full64(pos, 0, npixels, n);
//// dbg_print_pos_arrays_full64(pos, 1, npixels, n);
//// dbg_print_pos_arrays_full64(pos, 2, npixels, n);
//// dbg_print_pos_arrays_full64(pos, 3, npixels, n);
//
// /* Free temp arrays, destroy streams and timers, as applicable */
// hipFree(oa);
// hipFree(usrc);
// hipFree(xspan);
// hipFree(yspan);
// hipFree(n_tiles);
// hipFree(entries);
// hipFree(BLKtile);
// hipFree(n_tiles_x);
// hipFree(n_tiles_y);
//// hipFree(addr_index);
// hipFree(facet_index);
// hipFree(ijminmax_overall);
//
// return outbnd;
//}
| 33776125c7e97c4e3ff0dcaf470ef8703fefd5f6.cu | ///*****************************************************************************************
// posvis.c
//
// Fill in the portion of a plane-of-sky image due to a particular model component: Assign
// each relevant POS pixel a z-value in observer coordinates (distance from the origin
// towards Earth) and a value of cos(scattering angle).
//
// Return 1 if any portion of this component lies outside the specified POS window,
// 0 otherwise.
//
// If the "src" argument is true, the "observer" is the Sun rather than Earth, and
// "plane-of-sky" becomes "projection as viewed from the Sun."
//
// Modified 2014 February 20 by CM:
// Allow facets that partly project outside the POS frame to contribute to the POS frame
// (thus avoiding see-through "holes" in the model at the edge of a POS image)
//
// Modified 2010 May 18 by CM:
// Bug fix: When checking if a POS pixel hasn't already been assigned
// values during a previous call to posvis for a different component,
// check for fac[i][j] < 0 rather than cosa[i][j] == 0.0, since for
// bistatic situations the latter condition will also be true for
// pixels centered on Earth-facing facets that don't face the Sun
//
// Modified 2009 July 2 by CM:
// Eliminate the check that facets are "active": this term is now being
// interpreted to mean "not lying interior to the model," so the
// check is unnecessary and the determination of active vs. inactive
// status is inaccurate for half-exposed facets at the intersections
// between model components
//
// Modified 2009 April 3 by CM:
// Compute the "posbnd_logfactor" parameter: if the model extends beyond
// the POS frame, posbnd_logfactor is set to the logarithm of the
// ratio of the area that would have been required to "contain" the
// entire model divided by the area of the actual POS frame
// Work with floating-point pixel numbers (imin_dbl, etc.), at least
// initially, in case the sky rendering for a model with illegal
// parameters would involve huge pixel numbers that exceed the
// limits for valid integers
//
// Modified 2007 August 4 by CM:
// Add "orbit_offset" and "body" parameters and remove "facet" parameter
// Add body, bodyill, comp, and compill matrices for POS frames
//
// Modified 2006 June 21 by CM:
// For POS renderings, change res to km_per_pixel
//
// Modified 2005 September 19 by CM:
// Allow for roundoff error when determining which POS pixels project
// onto each model facet
//
// Modified 2005 June 27 by CM:
// Renamed "round" function to "iround" to avoid conflicts
//
// Modified 2005 June 22 by CM:
// Slightly modified some comments
//
// Modified 2005 January 25 by CM:
// Take care of unused and uninitialized variables
//
// Modified 2004 December 19 by CM:
// Added more comments
// Put update of rectangular POS area into "POSrect" routine and applied it
// even to facets which lie outside the POS frame
//
// Modified 2004 Feb 11 by CM:
// Added comments
//
// Modified 2003 May 5 by CM:
// Removed redundant coordinate transformation of the unit normal n
// for the no-pvs_smoothing case
// *****************************************************************************************/
//extern "C" {
//#include "../shape/head.h"
//#include <limits.h>
//}
//
//#define maxbins 100
//__device__ int posvis_tiled_outbnd, posvis_tiled_smooth;
//
///* Note that the following custom atomic functions must be declared in each
// * file it is needed (consequence of being a static device function) */
//__device__ static float atomicMaxf(float* address, float val) {
// int* address_as_i = (int*) address;
// int old = *address_as_i, assumed;
// do {
// assumed = old;
// old = ::atomicCAS(address_as_i, assumed,
// __float_as_int(::fmaxf(val, __int_as_float(assumed))));
// } while (assumed != old);
// return __int_as_float(old);
//}
//__device__ static float atomicMax64(double* address, double val)
//{
// unsigned long long* address_as_i = (unsigned long long*) address;
// unsigned long long old = *address_as_i, assumed;
// do {
// assumed = old;
// old = ::atomicCAS(address_as_i, assumed,
// __double_as_longlong(::fmaxf(val, __longlong_as_double(assumed))));
// } while (assumed != old);
// return __longlong_as_double(old);
//}
//
//__global__ void posvis_tiled_init_krnl64(
// struct par_t *dpar,
// struct pos_t **pos,
// double4 *ijminmax_overall,
// double3 *oa,
// double3 *usrc,
// int *outbndarr,
// int c,
// int start,
// int src,
// int size,
// int set,
// int src_override) {
//
// /* nfrm_alloc-threaded */
// int f = blockIdx.x * blockDim.x + threadIdx.x + start;
//
// if (f < size) {
// if (f == start) {
// posvis_tiled_outbnd = 0;
// posvis_tiled_smooth = dpar->pos_smooth;
// if (src_override) posvis_tiled_smooth = 0;
// }
// ijminmax_overall[f].w = ijminmax_overall[f].y = HUGENUMBER;
// ijminmax_overall[f].x = ijminmax_overall[f].z = -HUGENUMBER;
// pos[f]->posbnd_logfactor = 0.0;
//
// dev_mtrnsps2(oa, pos[f]->ae, f);
// if (src) {
// /* We're viewing the model from the sun: at the center of each pixel
// * in the projected view, we want cos(incidence angle), distance from
// * the COM towards the sun, and the facet number. */
// dev_mmmul2(oa, pos[f]->se, oa, f); /* oa takes ast into sun coords */
// } else {
// /* We're viewing the model from Earth: at the center of each POS pixel
// * we want cos(scattering angle), distance from the COM towards Earth,
// * and the facet number. For bistatic situations (lightcurves) we also
// want cos(incidence angle) and the unit vector towards the source. */
// dev_mmmul2(oa, pos[f]->oe, oa, f); /* oa takes ast into obs coords */
// if (pos[f]->bistatic) {
// usrc[f].x = usrc[f].y = 0.0; /* unit vector towards source */
// usrc[f].z = 1.0;
// dev_cotrans1(&usrc[f], pos[f]->se, usrc[f], -1);
// dev_cotrans1(&usrc[f], pos[f]->oe, usrc[f], 1); /* in observer coordinates */
// }
// }
// outbndarr[f] = 0;
// }
//}
//
//__global__ void transform_facet_normals_krnl64a(
// struct mod_t *dmod,
// struct pos_t **pos,
// struct vertices_t **verts,
// double4 *ijminmax_overall,
// double3 orbit_offs,
// double3 *oa,
// double3 *usrc,
// int *outbndarr,
// int nf,
// int frm,
// int src,
// int blockSize)
//{
// /* This kernel launches 256 threads, performs a grid-stride loop through
// * all model facets and transforms each facet normal with oa[frm] and stores
// * the result back to dmod if n.z > 0.0. It also determines and stores the
// * facet and global model bounding box via i1,i2,j1,j2 and xlim/ylim.
// * These quantities are stored in pos_facet_t structures inside each frame's
// * pos. */
//
// /* Declare kernel variables */
// __shared__ int pn;
// __shared__ double kmpxl;
// int imin, jmin, imax, jmax, i1, i2, j1, j2;
// int3 fidx;
// double imin_dbl, jmin_dbl, imax_dbl, jmax_dbl;
// double3 n, v0, v1, v2;
//
// /* Initialize the shared variables (accessed by every thread) */
// if (threadIdx.x==0) {
// pn = pos[frm]->n;
// kmpxl = pos[frm]->km_per_pixel;
// }
// __syncthreads();
//
// /* Do a grid-stride loop on all facets */
// for (int f=threadIdx.x; f<nf; f+=blockSize) {
// /* Get vertex indices of the three vertices making up the facet */
// fidx.x = verts[0]->f[f].v[0]; fidx.y = verts[0]->f[f].v[1];
// fidx.z = verts[0]->f[f].v[2];
//
// /* Copy each vertex over to thread register memory */
// v0.x = verts[0]->v[fidx.x].x[0]; v0.y = verts[0]->v[fidx.x].x[1];
// v0.z = verts[0]->v[fidx.x].x[2]; v1.x = verts[0]->v[fidx.y].x[0];
// v1.y = verts[0]->v[fidx.y].x[1]; v1.z = verts[0]->v[fidx.y].x[2];
// v2.x = verts[0]->v[fidx.z].x[0]; v2.y = verts[0]->v[fidx.z].x[1];
// v2.z = verts[0]->v[fidx.z].x[2];
//
// /* Get the normal to this facet in body-fixed (asteroid) coordinates
// * and convert it to observer coordinates */
// n.x = verts[0]->f[f].n[0];
// n.y = verts[0]->f[f].n[1];
// n.z = verts[0]->f[f].n[2];
// dev_cotrans3(&n, oa, n, 1, frm);
//
// /* Check if this facet is visible - is the facet normal pointing
// * roughly at the observer? */
// if (n.z > 0.0) {
// /* First, store the transformed normal back to the model and increase
// * visible facet counter */
// pos[frm]->facet[f].nt.x = n.x;
// pos[frm]->facet[f].nt.y = n.y;
// pos[frm]->facet[f].nt.z = n.z;
// /* Convert the 3 vertex coordinates from body to observer
// * coordinates; orbit_offset is the center-of-mass offset
// * (in observer coordinates) for this model at this frame's
// * epoch due to orbital motion, in case the model is half of
// * a binary system. */
// dev_cotrans3(&v0, oa, v0, 1, frm);
// dev_cotrans3(&v1, oa, v1, 1, frm);
// dev_cotrans3(&v2, oa, v2, 1, frm);
// v0.x += orbit_offs.x; v0.y += orbit_offs.x; v0.z += orbit_offs.x;
// v1.x += orbit_offs.y; v1.y += orbit_offs.y; v1.z += orbit_offs.y;
// v2.x += orbit_offs.z; v2.y += orbit_offs.z; v2.z += orbit_offs.z;
//
// /* Find rectangular region (in POS pixels) containing the projected
// * facet - use floats in case model has illegal parameters and the
// * pixel numbers exceed the limits for valid integers */
// imin_dbl = floor(MIN(v0.x,MIN(v1.x,v2.x)) / kmpxl - SMALLVAL + 0.5);
// imax_dbl = floor(MAX(v0.x,MAX(v1.x,v2.x)) / kmpxl + SMALLVAL + 0.5);
// jmin_dbl = floor(MIN(v0.y,MIN(v1.y,v2.y)) / kmpxl - SMALLVAL + 0.5);
// jmax_dbl = floor(MAX(v0.y,MAX(v1.y,v2.y)) / kmpxl + SMALLVAL + 0.5);
//
// imin = (imin_dbl < INT_MIN) ? INT_MIN : (int) imin_dbl;
// imax = (imax_dbl > INT_MAX) ? INT_MAX : (int) imax_dbl;
// jmin = (jmin_dbl < INT_MIN) ? INT_MIN : (int) jmin_dbl;
// jmax = (jmax_dbl > INT_MAX) ? INT_MAX : (int) jmax_dbl;
//
// /* Set the outbnd flag if the facet extends beyond the POS window */
// if ((imin < (-pn)) || (imax > pn) || (jmin < (-pn)) || (jmax > pn)) {
// posvis_tiled_outbnd = 1;
// atomicExch(&outbndarr[frm], 1);
// }
//
// /* Figure out if facet projects at least partly within POS window;
// * if it does, look at each "contained" POS pixel and get the
// * z-coordinate and cos(scattering angle) */
// i1 = MAX(imin, -pn); i2 = MIN(imax, pn);
// j1 = MAX(jmin, -pn); j2 = MIN(jmax, pn);
//
// pos[frm]->facet[f].ilim.x = i1;
// pos[frm]->facet[f].ilim.y = i2;
// pos[frm]->facet[f].jlim.x = j1;
// pos[frm]->facet[f].jlim.y = j2;
// pos[frm]->facet[f].v0t = v0;
// pos[frm]->facet[f].v1t = v1;
// pos[frm]->facet[f].v2t = v2;
//
// /* Now keep track of the global region */
// if (i1 > pn || i2 < -pn || j1 > pn || j2 < -pn) {
// /* Facet is entirely outside POS frame: just track POS region */
// dev_POSrect_gpu64(pos, src, imin_dbl, imax_dbl, jmin_dbl,
// jmax_dbl, ijminmax_overall, frm);
//
// } else {
// dev_POSrect_gpu64(pos, src, (double)i1, (double)i2,
// (double)j1, (double)j2, ijminmax_overall, frm);
// }
// }
// else {
// /* The following makes a check in the bin_facets_krnl64 kernel easier */
// pos[frm]->facet[f].nt.x = -1.0;
// pos[frm]->facet[f].nt.y = -1.0;
// pos[frm]->facet[f].nt.z = -1.0;
// }
// }
//}
//
//__global__ void transform_facet_normals_krnl64b(
// struct mod_t *dmod,
// struct pos_t **pos,
// struct vertices_t **verts,
// double4 *ijminmax_overall,
// double3 orbit_offs,
// double3 *oa_gm,
// int *outbndarr,
// int nf,
// int src)
//{
// /* This kernel launches nframes blocks of threads, performs a grid-stride loop through
// * all model facets and transforms each facet normal with oa[frm] and stores
// * the result back to dmod if n.z > 0.0. It also determines and stores the
// * facet and global model bounding box via i1,i2,j1,j2 and xlim/ylim.
// * These quantities are stored in pos_facet_t structures inside each frame's
// * pos. This kernel also uses shared memory for ijminmax_overall_sh, used
// * as temporary (faster) storage for pos window calculation. Additionally,
// * the pos->xlim/ylim atomic operations have been moved to the very end of
// * this kernel to be processed just once instead of for every facet. */
//
// /* Declare kernel variables */
// __shared__ int pn;
// __shared__ double kmpxl, oa_sh[3][3];
// __shared__ double4 ijminmax_overall_sh;
// int frm=blockIdx.x, imin, jmin, imax, jmax, i1, i2, j1, j2;
// int3 fidx;
// double imin_dbl, jmin_dbl, imax_dbl, jmax_dbl;
// double3 n, v0, v1, v2;
//
// /* Initialize the shared variables (accessed by every thread) */
// if (threadIdx.x==0) {
// pn = pos[frm]->n;
// kmpxl = pos[frm]->km_per_pixel;
// ijminmax_overall_sh.w = ijminmax_overall_sh.x =
// ijminmax_overall_sh.y = ijminmax_overall_sh.z = 0.0f;
//
// /* Load oa for this frame into shared memory */
// oa_sh[0][0] = oa_gm[3*frm].x; oa_sh[0][1] = oa_gm[3*frm].y; oa_sh[0][2] = oa_gm[3*frm].z;
// oa_sh[1][0] = oa_gm[3*frm+1].x; oa_sh[1][1] = oa_gm[3*frm+1].y; oa_sh[1][2] = oa_gm[3*frm+1].z;
// oa_sh[2][0] = oa_gm[3*frm+2].x; oa_sh[2][1] = oa_gm[3*frm+2].y; oa_sh[2][2] = oa_gm[3*frm+2].z;
// }
// __syncthreads();
//
// /* Do a grid-stride loop on all facets */
// for (int f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Get vertex indices of the three vertices making up the facet */
// fidx.x = verts[0]->f[f].v[0]; fidx.y = verts[0]->f[f].v[1];
// fidx.z = verts[0]->f[f].v[2];
//
// /* Copy each vertex over to thread register memory */
// v0.x = verts[0]->v[fidx.x].x[0]; v0.y = verts[0]->v[fidx.x].x[1];
// v0.z = verts[0]->v[fidx.x].x[2]; v1.x = verts[0]->v[fidx.y].x[0];
// v1.y = verts[0]->v[fidx.y].x[1]; v1.z = verts[0]->v[fidx.y].x[2];
// v2.x = verts[0]->v[fidx.z].x[0]; v2.y = verts[0]->v[fidx.z].x[1];
// v2.z = verts[0]->v[fidx.z].x[2];
//
// /* Get the normal to this facet in body-fixed (asteroid) coordinates
// * and convert it to observer coordinates */
// n.x = verts[0]->f[f].n[0];
// n.y = verts[0]->f[f].n[1];
// n.z = verts[0]->f[f].n[2];
// dev_cotrans1(&n, oa_sh, n, 1);
//
// /* Check if this facet is visible - is the facet normal pointing
// * roughly at the observer? */
// if (n.z > 0.0) {
// /* First, store the transformed normal back to the model and increase
// * visible facet counter */
// pos[frm]->facet[f].nt.x = n.x;
// pos[frm]->facet[f].nt.y = n.y;
// pos[frm]->facet[f].nt.z = n.z;
// /* Convert the 3 vertex coordinates from body to observer
// * coordinates; orbit_offset is the center-of-mass offset
// * (in observer coordinates) for this model at this frame's
// * epoch due to orbital motion, in case the model is half of
// * a binary system. */
// dev_cotrans1(&v0, oa_sh, v0, 1);
// dev_cotrans1(&v1, oa_sh, v1, 1);
// dev_cotrans1(&v2, oa_sh, v2, 1);
// v0.x += orbit_offs.x; v0.y += orbit_offs.x; v0.z += orbit_offs.x;
// v1.x += orbit_offs.y; v1.y += orbit_offs.y; v1.z += orbit_offs.y;
// v2.x += orbit_offs.z; v2.y += orbit_offs.z; v2.z += orbit_offs.z;
//
// /* Find rectangular region (in POS pixels) containing the projected
// * facet - use floats in case model has illegal parameters and the
// * pixel numbers exceed the limits for valid integers */
// imin_dbl = floor(MIN(v0.x,MIN(v1.x,v2.x)) / kmpxl - SMALLVAL + 0.5);
// imax_dbl = floor(MAX(v0.x,MAX(v1.x,v2.x)) / kmpxl + SMALLVAL + 0.5);
// jmin_dbl = floor(MIN(v0.y,MIN(v1.y,v2.y)) / kmpxl - SMALLVAL + 0.5);
// jmax_dbl = floor(MAX(v0.y,MAX(v1.y,v2.y)) / kmpxl + SMALLVAL + 0.5);
//
// imin = (imin_dbl < INT_MIN) ? INT_MIN : (int) imin_dbl;
// imax = (imax_dbl > INT_MAX) ? INT_MAX : (int) imax_dbl;
// jmin = (jmin_dbl < INT_MIN) ? INT_MIN : (int) jmin_dbl;
// jmax = (jmax_dbl > INT_MAX) ? INT_MAX : (int) jmax_dbl;
//
// /* Set the outbnd flag if the facet extends beyond the POS window */
// if ((imin < (-pn)) || (imax > pn) || (jmin < (-pn)) || (jmax > pn)) {
// posvis_tiled_outbnd = 1;
// atomicExch(&outbndarr[frm], 1);
// }
//
// /* Figure out if facet projects at least partly within POS window;
// * if it does, look at each "contained" POS pixel and get the
// * z-coordinate and cos(scattering angle) */
// i1 = MAX(imin, -pn); i2 = MIN(imax, pn);
// j1 = MAX(jmin, -pn); j2 = MIN(jmax, pn);
//
// pos[frm]->facet[f].ilim.x = i1;
// pos[frm]->facet[f].ilim.y = i2;
// pos[frm]->facet[f].jlim.x = j1;
// pos[frm]->facet[f].jlim.y = j2;
// pos[frm]->facet[f].v0t = v0;
// pos[frm]->facet[f].v1t = v1;
// pos[frm]->facet[f].v2t = v2;
//
// /* Now keep track of the global region */
// if (i1 > pn || i2 < -pn || j1 > pn || j2 < -pn) {
// /* Facet is entirely outside POS frame: just track POS region */
// dev_POSrect_gpu64_shared(imin_dbl,imax_dbl, jmin_dbl,jmax_dbl,
// &ijminmax_overall_sh, pn);
//
// } else {
// dev_POSrect_gpu64_shared((double)i1, (double)i2,
// (double)j1, (double)j2, &ijminmax_overall_sh, pn);
// }
// }
// else {
// /* The following makes a check in the bin_facets_krnl64 kernel easier */
//// pos[frm]->facet[f].nt.x = -1.0;
//// pos[frm]->facet[f].nt.y = -1.0;
//// pos[frm]->facet[f].nt.z = -1.0;
// }
// }
// __syncthreads();
//
// /* Now write the POS frame window limits from shared mem back to global mem */
// if (threadIdx.x==0) {
// ijminmax_overall[frm].w = ijminmax_overall_sh.w;
// ijminmax_overall[frm].x = ijminmax_overall_sh.x;
// ijminmax_overall[frm].y = ijminmax_overall_sh.y;
// ijminmax_overall[frm].z = ijminmax_overall_sh.z;
//
// /* Update the subset of the POS frame that contains the target */
// /* imin_dbl - ijminmax_overall[frm].w
// * imax_dbl - ijminmax_overall[frm].x
// * jmin_dbl - ijminmax_overall[frm].y
// * jmax_dbl - ijminmax_overall[frm].z
// */
// int imin = (ijminmax_overall_sh.w < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.w;
// int imax = (ijminmax_overall_sh.x > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.x;
// int jmin = (ijminmax_overall_sh.y < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.y;
// int jmax = (ijminmax_overall_sh.z > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.z;
//
// /* Make sure it's smaller than n */
// imin = MAX(imin,-pn);
// imax = MIN(imax, pn);
// jmin = MAX(jmin,-pn);
// jmax = MIN(jmax, pn);
//
// if (src) {
// atomicMin(&pos[frm]->xlim2[0], imin);
// atomicMax(&pos[frm]->xlim2[1], imax);
// atomicMin(&pos[frm]->ylim2[0], jmin);
// atomicMax(&pos[frm]->ylim2[1], jmax);
// } else {
// atomicMin(&pos[frm]->xlim[0], imin);
// atomicMax(&pos[frm]->xlim[1], imax);
// atomicMin(&pos[frm]->ylim[0], jmin);
// atomicMax(&pos[frm]->ylim[1], jmax);
// }
// }
//}
//
//__global__ void transform_facet_normals_krnl64c(
// struct mod_t *dmod,
// struct pos_t **pos,
// struct vertices_t **verts,
// double4 *ijminmax_overall,
// double3 orbit_offs,
// double3 *oa_gm,
// int *outbndarr,
// int nf,
// int frm,
// int src)
//{
// /* This kernel launches 256 threads, performs a grid-stride loop through
// * all model facets and transforms each facet normal with oa[frm] and stores
// * the result back to dmod if n.z > 0.0. It also determines and stores the
// * facet and global model bounding box via i1,i2,j1,j2 and xlim/ylim.
// * These quantities are stored in pos_facet_t structures inside each frame's
// * pos. This kernel also uses shared memory for ijminmax_overall_sh, used
// * as temporary (faster) storage for pos window calculation. Additionally,
// * the pos->xlim/ylim atomic operations have been moved to the very end of
// * this kernel to be processed just once instead of for every facet. */
//
// /* Declare kernel variables */
// __shared__ int pn;
// __shared__ double kmpxl, oa_sh[3][3];
// __shared__ double4 ijminmax_overall_sh;
// int imin, jmin, imax, jmax, i1, i2, j1, j2;
// int3 fidx;
// double imin_dbl, jmin_dbl, imax_dbl, jmax_dbl;
// double3 n, v0, v1, v2;
//
// /* Initialize the shared variables (accessed by every thread) */
// if (threadIdx.x==0) {
// pn = pos[frm]->n;
// kmpxl = pos[frm]->km_per_pixel;
// ijminmax_overall_sh.w = ijminmax_overall_sh.x =
// ijminmax_overall_sh.y = ijminmax_overall_sh.z = 0.0f;
//
// /* Load oa for this frame into shared memory */
// oa_sh[0][0] = oa_gm[3*frm].x; oa_sh[0][1] = oa_gm[3*frm].y; oa_sh[0][2] = oa_gm[3*frm].z;
// oa_sh[1][0] = oa_gm[3*frm+1].x; oa_sh[1][1] = oa_gm[3*frm+1].y; oa_sh[1][2] = oa_gm[3*frm+1].z;
// oa_sh[2][0] = oa_gm[3*frm+2].x; oa_sh[2][1] = oa_gm[3*frm+2].y; oa_sh[2][2] = oa_gm[3*frm+2].z;
// }
// __syncthreads();
//
// /* Do a grid-stride loop on all facets */
// for (int f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Get vertex indices of the three vertices making up the facet */
// fidx.x = verts[0]->f[f].v[0]; fidx.y = verts[0]->f[f].v[1];
// fidx.z = verts[0]->f[f].v[2];
//
// /* Copy each vertex over to thread register memory */
// v0.x = verts[0]->v[fidx.x].x[0]; v0.y = verts[0]->v[fidx.x].x[1];
// v0.z = verts[0]->v[fidx.x].x[2]; v1.x = verts[0]->v[fidx.y].x[0];
// v1.y = verts[0]->v[fidx.y].x[1]; v1.z = verts[0]->v[fidx.y].x[2];
// v2.x = verts[0]->v[fidx.z].x[0]; v2.y = verts[0]->v[fidx.z].x[1];
// v2.z = verts[0]->v[fidx.z].x[2];
//
// /* Get the normal to this facet in body-fixed (asteroid) coordinates
// * and convert it to observer coordinates */
// n.x = verts[0]->f[f].n[0];
// n.y = verts[0]->f[f].n[1];
// n.z = verts[0]->f[f].n[2];
// dev_cotrans1(&n, oa_sh, n, 1);
//
// /* Check if this facet is visible - is the facet normal pointing
// * roughly at the observer? */
// if (n.z > 0.0) {
// /* First, store the transformed normal back to the model and increase
// * visible facet counter */
// pos[frm]->facet[f].nt.x = n.x;
// pos[frm]->facet[f].nt.y = n.y;
// pos[frm]->facet[f].nt.z = n.z;
// /* Convert the 3 vertex coordinates from body to observer
// * coordinates; orbit_offset is the center-of-mass offset
// * (in observer coordinates) for this model at this frame's
// * epoch due to orbital motion, in case the model is half of
// * a binary system. */
// dev_cotrans1(&v0, oa_sh, v0, 1);
// dev_cotrans1(&v1, oa_sh, v1, 1);
// dev_cotrans1(&v2, oa_sh, v2, 1);
// v0.x += orbit_offs.x; v0.y += orbit_offs.x; v0.z += orbit_offs.x;
// v1.x += orbit_offs.y; v1.y += orbit_offs.y; v1.z += orbit_offs.y;
// v2.x += orbit_offs.z; v2.y += orbit_offs.z; v2.z += orbit_offs.z;
//
// /* Find rectangular region (in POS pixels) containing the projected
// * facet - use floats in case model has illegal parameters and the
// * pixel numbers exceed the limits for valid integers */
// imin_dbl = floor(MIN(v0.x,MIN(v1.x,v2.x)) / kmpxl - SMALLVAL + 0.5);
// imax_dbl = floor(MAX(v0.x,MAX(v1.x,v2.x)) / kmpxl + SMALLVAL + 0.5);
// jmin_dbl = floor(MIN(v0.y,MIN(v1.y,v2.y)) / kmpxl - SMALLVAL + 0.5);
// jmax_dbl = floor(MAX(v0.y,MAX(v1.y,v2.y)) / kmpxl + SMALLVAL + 0.5);
//
// imin = (imin_dbl < INT_MIN) ? INT_MIN : (int) imin_dbl;
// imax = (imax_dbl > INT_MAX) ? INT_MAX : (int) imax_dbl;
// jmin = (jmin_dbl < INT_MIN) ? INT_MIN : (int) jmin_dbl;
// jmax = (jmax_dbl > INT_MAX) ? INT_MAX : (int) jmax_dbl;
//
// /* Set the outbnd flag if the facet extends beyond the POS window */
// if ((imin < (-pn)) || (imax > pn) || (jmin < (-pn)) || (jmax > pn)) {
// posvis_tiled_outbnd = 1;
// atomicExch(&outbndarr[frm], 1);
// }
//
// /* Figure out if facet projects at least partly within POS window;
// * if it does, look at each "contained" POS pixel and get the
// * z-coordinate and cos(scattering angle) */
// i1 = MAX(imin, -pn); i2 = MIN(imax, pn);
// j1 = MAX(jmin, -pn); j2 = MIN(jmax, pn);
//
// pos[frm]->facet[f].ilim.x = i1;
// pos[frm]->facet[f].ilim.y = i2;
// pos[frm]->facet[f].jlim.x = j1;
// pos[frm]->facet[f].jlim.y = j2;
// pos[frm]->facet[f].v0t = v0;
// pos[frm]->facet[f].v1t = v1;
// pos[frm]->facet[f].v2t = v2;
//
// /* Now keep track of the global region */
// if (i1 > pn || i2 < -pn || j1 > pn || j2 < -pn) {
// /* Facet is entirely outside POS frame: just track POS region */
// dev_POSrect_gpu64_shared(imin_dbl,imax_dbl, jmin_dbl,jmax_dbl,
// &ijminmax_overall_sh, pn);
//
// } else {
// dev_POSrect_gpu64_shared((double)i1, (double)i2,
// (double)j1, (double)j2, &ijminmax_overall_sh, pn);
// }
// }
// else {
// /* The following makes a check in the bin_facets_krnl64 kernel easier */
// pos[frm]->facet[f].nt.x = -1.0;
// pos[frm]->facet[f].nt.y = -1.0;
// pos[frm]->facet[f].nt.z = -1.0;
// }
// }
// __syncthreads();
//
// /* Now write the POS frame window limits from shared mem back to global mem */
// if (threadIdx.x==0) {
// ijminmax_overall[frm].w = ijminmax_overall_sh.w;
// ijminmax_overall[frm].x = ijminmax_overall_sh.x;
// ijminmax_overall[frm].y = ijminmax_overall_sh.y;
// ijminmax_overall[frm].z = ijminmax_overall_sh.z;
//
// /* Update the subset of the POS frame that contains the target */
// /* imin_dbl - ijminmax_overall[frm].w
// * imax_dbl - ijminmax_overall[frm].x
// * jmin_dbl - ijminmax_overall[frm].y
// * jmax_dbl - ijminmax_overall[frm].z
// */
// int imin = (ijminmax_overall_sh.w < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.w;
// int imax = (ijminmax_overall_sh.x > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.x;
// int jmin = (ijminmax_overall_sh.y < INT_MIN) ? INT_MIN : (int) ijminmax_overall_sh.y;
// int jmax = (ijminmax_overall_sh.z > INT_MAX) ? INT_MAX : (int) ijminmax_overall_sh.z;
//
// /* Make sure it's smaller than n */
// imin = MAX(imin,-pn);
// imax = MIN(imax, pn);
// jmin = MAX(jmin,-pn);
// jmax = MIN(jmax, pn);
//
// if (src) {
// atomicMin(&pos[frm]->xlim2[0], imin);
// atomicMax(&pos[frm]->xlim2[1], imax);
// atomicMin(&pos[frm]->ylim2[0], jmin);
// atomicMax(&pos[frm]->ylim2[1], jmax);
// } else {
// atomicMin(&pos[frm]->xlim[0], imin);
// atomicMax(&pos[frm]->xlim[1], imax);
// atomicMin(&pos[frm]->ylim[0], jmin);
// atomicMax(&pos[frm]->ylim[1], jmax);
// }
// }
//}
//
//__global__ void bin_facets_krnl64a(struct pos_t **pos,
// struct vertices_t **verts,
// int ***facet_index,
// int **entries,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size)
//{
// /* This kernel is responsible for binning visible model facets according to
// * which screen tile they appear on. Each facet can belong to 1, 2, or 4
// * different facets. (If the size of individual triangles should exceed
// * the tile size, this is no longer true.)
// * The kernel version has just one thread block with 1024 threads. It uses a grid-
// * stride loop to cover all facets
// */
// int f, current_i, next_i, current_j, next_j, i1, i2, j1, j2, bi, bin, old_indx;
// __shared__ int2 xlim, ylim; /* These are the global pos limits */
// extern __shared__ int addr_index[]; /* Used for the facet_index entries */
//
// /* Initialize shared variables that will be accessed by every thread */
// if (threadIdx.x==0) {
// xlim.x = pos[frm]->xlim[0];
// xlim.y = pos[frm]->xlim[1];
// ylim.x = pos[frm]->ylim[0];
// ylim.y = pos[frm]->ylim[1];
// for (bin=0; bin<n_tiles[frm]; bin++)
// addr_index[bin] = 0;
// }
// __syncthreads();
//
// /* Do grid-stride loop through all facets in model */
// for (f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Weed out any facets not visible to observer */
// if (pos[frm]->facet[f].nt.z > 0.0) {
// bi = 0; /* Bin index for the four facet bin entries*/
// /* Copy facet limits into register memory for faster access */
// i1 = pos[frm]->facet[f].ilim.x;
// i2 = pos[frm]->facet[f].ilim.y;
// j1 = pos[frm]->facet[f].jlim.x;
// j2 = pos[frm]->facet[f].jlim.y;
//
// /* Now check where the current facet lies, stepping through each
// * tile */
// for (int k=0; k<n_tiles_y[frm]; k++) {
// current_j = ylim.x + k * tile_size;
// next_j = current_j + tile_size;
// for (int n=0; n<n_tiles_x[frm]; n++) {
// bin = k*n_tiles_x[frm] + n;
// current_i = xlim.x + n * tile_size;
// next_i = current_i + tile_size;
//
// /* If i1 or i2 AND j1 or j2 fall into this tile, register it */
// if ((i1>=current_i && i1<next_i) || (i2>=current_i && i2<next_i)) {
// if ((j1>=current_j && j1<next_j) || (j2>=current_j && j2<next_j)) {
// pos[frm]->facet[f].bin[bi] = bin;
// old_indx = atomicAdd(&addr_index[bin], 1);
// facet_index[frm][bin][old_indx] = f;
// atomicAdd(&entries[frm][bin], 1);
// bi++;
// }
// }
// }
// }
// }
// }
//}
//
//__global__ void bin_facets_krnl64b(struct pos_t **pos,
// struct vertices_t **verts,
// int ***facet_index,
// int **entries,
// int **addr_index,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size)
//{
// /* This kernel is responsible for binning visible model facets according to
// * which screen tile they appear on. Each facet can belong to 1, 2, or 4
// * different facets. (If the size of individual triangles should exceed
// * the tile size, this is no longer true.)
// * The kernel version uses nf-threads with as many thread blocks as it
// * takes, considering the previously defined maxThreadsPerBlock. Because of
// * this, the addr_index array is in global memory (instead of shared). */
//
// int f, current_i, next_i, current_j, next_j, i1, i2, j1, j2, bi, bin, old_indx;
// __shared__ int2 xlim, ylim; /* These are the global pos limits */
//
// f = blockDim.x * blockIdx.x + threadIdx.x;
//
// /* Initialize shared variables that will be accessed by every thread */
// if (threadIdx.x==0) {
// xlim.x = pos[frm]->xlim[0];
// xlim.y = pos[frm]->xlim[1];
// ylim.x = pos[frm]->ylim[0];
// ylim.y = pos[frm]->ylim[1];
//// for (bin=0; bin<n_tiles[frm]; bin++)
//// addr_index[frm][bin] = 0;
// }
// __syncthreads();
//
// /* Do grid-stride loop through all facets in model */
// if (f < nf) {
// /* Weed out any facets not visible to observer */
// if (pos[frm]->facet[f].nt.z > 0.0) {
// bi = 0; /* Bin index for the four facet bin entries*/
// /* Copy facet limits into register memory for faster access */
// i1 = pos[frm]->facet[f].ilim.x;
// i2 = pos[frm]->facet[f].ilim.y;
// j1 = pos[frm]->facet[f].jlim.x;
// j2 = pos[frm]->facet[f].jlim.y;
//
// /* Now check where the current facet lies, stepping through each
// * tile */
// for (int k=0; k<n_tiles_y[frm]; k++) {
// current_j = ylim.x + k * tile_size;
// next_j = current_j + tile_size;
// for (int n=0; n<n_tiles_x[frm]; n++) {
// bin = k*n_tiles_x[frm] + n;
// current_i = xlim.x + n * tile_size;
// next_i = current_i + tile_size;
//
// /* If i1 or i2 AND j1 or j2 fall into this tile, register it */
// if ((i1>=current_i && i1<next_i) || (i2>=current_i && i2<next_i)) {
// if ((j1>=current_j && j1<next_j) || (j2>=current_j && j2<next_j)) {
// pos[frm]->facet[f].bin[bi] = bin;
// old_indx = atomicAdd(&addr_index[frm][bin], 1);
// facet_index[frm][bin][old_indx] = f;
// atomicAdd(&entries[frm][bin], 1);
// bi++;
// }
// }
// }
// }
// }
// }
//}
//
//__global__ void bin_facets_krnl64c(struct pos_t **pos,
// struct vertices_t **verts,
// int ***facet_index,
// int **entries,
// int nf,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size)
//{
// /* This kernel is responsible for binning visible model facets according to
// * which screen tile they appear on. Each facet can belong to 1, 2, or 4
// * different facets. (If the size of individual triangles should exceed
// * the tile size, this is no longer true.)
// * The kernel version uses nframes-thread blocks to cover all frames in one
// * go. Each block has its own __shared___ addr_index array and each of them
// * uses a grid-stride loop to cover all facets
// */
// int f, frm, current_i, next_i, current_j, next_j, i1, i2, j1, j2, bi, bin, old_indx;
// __shared__ int2 xlim, ylim; /* These are the global pos limits */
// __shared__ int addr_index[160]; /* This allows for 40x40 tiles (at 32x32
// tile size, allowing for a maximum POS resolution of 1280x1280 pixels */
// frm = blockIdx.x;
//
// /* Initialize shared variables that will be accessed by every thread */
// if (threadIdx.x==0) {
// xlim.x = pos[frm]->xlim[0];
// xlim.y = pos[frm]->xlim[1];
// ylim.x = pos[frm]->ylim[0];
// ylim.y = pos[frm]->ylim[1];
// for (bin=0; bin<n_tiles[frm]; bin++)
// addr_index[bin] = 0;
// }
// __syncthreads();
//
// /* Do grid-stride loop through all facets in model */
// for (f=threadIdx.x; f<nf; f+=blockDim.x) {
// /* Weed out any facets not visible to observer */
// if (pos[frm]->facet[f].nt.z > 0.0) {
// bi = 0; /* Bin index for the four facet bin entries*/
// /* Copy facet limits into register memory for faster access */
// i1 = pos[frm]->facet[f].ilim.x;
// i2 = pos[frm]->facet[f].ilim.y;
// j1 = pos[frm]->facet[f].jlim.x;
// j2 = pos[frm]->facet[f].jlim.y;
//
// /* Now check where the current facet lies, stepping through each
// * tile */
// for (int k=0; k<n_tiles_y[frm]; k++) {
// current_j = ylim.x + k * tile_size;
// next_j = current_j + tile_size;
// for (int n=0; n<n_tiles_x[frm]; n++) {
// bin = k*n_tiles_x[frm] + n;
// current_i = xlim.x + n * tile_size;
// next_i = current_i + tile_size;
//
// /* If i1 or i2 AND j1 or j2 fall into this tile, register it */
// if ((i1>=current_i && i1<next_i) || (i2>=current_i && i2<next_i)) {
// if ((j1>=current_j && j1<next_j) || (j2>=current_j && j2<next_j)) {
// pos[frm]->facet[f].bin[bi] = bin;
// old_indx = atomicAdd(&addr_index[bin], 1);
// facet_index[frm][bin][old_indx] = f;
// atomicAdd(&entries[frm][bin], 1);
// bi++;
// }
// }
// }
// }
// }
// }
//}
//
//__global__ void radar_raster_krnl64(struct pos_t **pos,
// struct vertices_t **verts,
// double3 *oa_gm,
// int ***facet_index,
// int **entries,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size,
// int tile_x) {
//
// /* This kernel performs the rasterization tile by tile. Each thread block
// * is responsible for one tile. */
// /* Determine which tile this thread block is responsible for and
// * which element of the thread block this thread is. */
// int bin = blockIdx.x;
// int index = threadIdx.x;
//
// /* Declare the shared memory variables and others */
// __shared__ double pos_z[32][32];//[55][55]; /* One per thread block */
// __shared__ double pos_cose[32][32];//[55][55]; /* One per thread block */
// __shared__ int2 bn;
// __shared__ int xlim, ylim, offsetx, offsety;
// __shared__ double kmpxl;
// __shared__ double oa_sh[3][3];
// int i, j, ig, jg, i1, i2, j1, j2; /* ig,jg are global indices */
// int tile_i1, tile_i2, tile_j1, tile_j2, fct_indx;
// double3 v0, v1, v2, n, tv0, tv1, tv2, n1n0;
// int3 fidx;
//
// /* Initialize the shared memory arrays with grid-stride loop */
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// pos_z[i][j] = -1e20;
// pos_cose[i][j] = 0.0;
// }
// __syncthreads();
//
// /* Load variables used by every thread (per block) to shared memory for
// * faster access */
// if (threadIdx.x==0) {
// xlim = pos[frm]->xlim[0];
// ylim = pos[frm]->ylim[0];
// kmpxl = pos[frm]->km_per_pixel;
// bn.x = bin % n_tiles_x[frm];
// bn.y = bin / n_tiles_x[frm];
//
// /* Calculate the pixel offsets needed to go back and forth between
// * tiled POS space for this block's tile and global POS space */
// offsetx = xlim + tile_x * bn.x;
// offsety = ylim + tile_x * bn.y;
//
// /* Load oa for this frame into shared memory */
// if (posvis_tiled_smooth) {
// oa_sh[0][0] = oa_gm[3*frm].x; oa_sh[0][1] = oa_gm[3*frm].y; oa_sh[0][2] = oa_gm[3*frm].z;
// oa_sh[1][0] = oa_gm[3*frm+1].x; oa_sh[1][1] = oa_gm[3*frm+1].y; oa_sh[1][2] = oa_gm[3*frm+1].z;
// oa_sh[2][0] = oa_gm[3*frm+2].x; oa_sh[2][1] = oa_gm[3*frm+2].y; oa_sh[2][2] = oa_gm[3*frm+2].z;
// }
//
// }
// __syncthreads();
//
// /* Using grid-stride loop, step through all facet entries for each bin where
// * each thread block is responsible for one bin/tile */
// for (index=threadIdx.x; index<entries[frm][bin]; index+=blockDim.x) {
//
// /* Load facet index into registers */
// fct_indx = facet_index[frm][bin][index];
//
// /* Load transformed facet vertices into registers */
// v0 = pos[frm]->facet[fct_indx].v0t;
// v1 = pos[frm]->facet[fct_indx].v1t;
// v2 = pos[frm]->facet[fct_indx].v2t;
// n = pos[frm]->facet[fct_indx].nt;
//
// /* Calculate and store the boundaries of this tile */
// tile_i1 = offsetx;
// tile_i2 = tile_i1 + tile_x;
// tile_j1 = offsety;
// tile_j2 = tile_j1 + tile_x;
//
// /* Load this facet's boundaries and clamp them if needed, then
// * convert to local shared memory array addressing */
// i1 = max(pos[frm]->facet[fct_indx].ilim.x, tile_i1);
// i2 = min(pos[frm]->facet[fct_indx].ilim.y, (tile_i2-1));
// j1 = max(pos[frm]->facet[fct_indx].jlim.x, tile_j1);
// j2 = min(pos[frm]->facet[fct_indx].jlim.y, (tile_j2-1));
//
// /* Precalculate s and t components for the pixel loop */
// double a, b, c, d, e, h, ti, tj, si, sj, si0, sj0, ti0, tj0, sz, tz, den, s, t, z, old;
// a = i1*kmpxl - v0.x;
// b = v2.y - v1.y;
// c = v2.x - v1.x;
// d = j1*kmpxl - v0.y;
// e = v1.x - v0.x;
// h = v1.y - v0.y;
// den = e*b - c*h;
// ti = -h*kmpxl/den;
// tj = e*kmpxl/den;
// si = b*kmpxl/den;
// sj = -c*kmpxl/den;
// si0 = (a*b - c*d)/den;
// ti0 = (e*d -a*h)/den;
// sz = v1.z - v0.z;
// tz = v2.z - v1.z;
//
// /* Now convert i1, i2, j1, j2 to shared-memory tile coordinates */
// i1 -= (offsetx);
// i2 -= (offsetx);
// j1 -= (offsety);
// j2 -= (offsety);
//
// /* Pre-calculate some quantities for cosine smoothing if enabled */
// if (posvis_tiled_smooth) {
// /* Assign temp. normal components as float3 */
// fidx.x = verts[0]->f[fct_indx].v[0];
// fidx.y = verts[0]->f[fct_indx].v[1];
// fidx.z = verts[0]->f[fct_indx].v[2];
//
// tv0.x = verts[0]->v[fidx.x].n[0]; tv0.y = verts[0]->v[fidx.x].n[1];
// tv0.z = verts[0]->v[fidx.x].n[2]; tv1.x = verts[0]->v[fidx.y].n[0];
// tv1.y = verts[0]->v[fidx.y].n[1]; tv1.z = verts[0]->v[fidx.y].n[2];
// tv2.x = verts[0]->v[fidx.z].n[0]; tv2.y = verts[0]->v[fidx.z].n[1];
// tv2.z = verts[0]->v[fidx.z].n[2];
// n1n0.x = tv1.x - tv0.x; n1n0.y = tv1.y - tv0.y; n1n0.z = tv1.z - tv0.z;
// tv2.x -= tv1.x; tv2.y -= tv1.y; tv2.z -= tv1.z;
// }
//
// /* Facet is at least partly within POS frame: find all POS
// * pixels whose centers project onto this facet */
// for (i=i1; i<=i2; i++) {
//
// sj0 = si0; /* Initialize this loop's base sj0, tj0 */
// tj0 = ti0;
//
// for (j=j1; j<=j2; j++) {
//
// /* Calculate s and t parameters */
// s = sj0;
// t = tj0;
//
// if ((s >= -SMALLVAL) && (s <= 1.0 + SMALLVAL)) {// &&
//
// if( (t >= -SMALLVAL) && (t <= s + SMALLVAL)) {
//
// /* Compute z-coordinate of pixel center: its
// * distance measured from the origin towards
// * Earth. */
// z = v0.z + s*sz + t*tz;
//
// /* Compare calculated z to stored shared memory z
// * array at this address and store the bigger value */
// old = atomicMax64(&pos_z[i][j], z);
//
// if (old < z){
//
// if (posvis_tiled_smooth) {
// /* Get pvs_smoothed version of facet unit
// * normal: Take the linear combination
// * of the three vertex normals; trans-
// * form from body to observer coordina-
// * tes; and make sure that it points
// * somewhat in our direction. */
// n.x = tv0.x + s * n1n0.x + t * tv2.x;
// n.y = tv0.y + s * n1n0.y + t * tv2.y;
// n.z = tv0.z + s * n1n0.z + t * tv2.z;
// dev_cotrans1(&n, oa_sh, n, 1);
//// dev_cotrans3(&n, oa_gm, n, 1, frm);
// dev_normalize3(&n);
// }
//
// /* Determine scattering angles. */
// if (n.z > 0.0) {
// atomicExch((unsigned long long int*)&pos_cose[i][j],
// __double_as_longlong(n.z));
// }
// /* Keeping track of facets may not be required. */
//// atomicExch(&pos[frm]->f[i][j], f);
//
// } /* end if (no other facet yet blocks this facet from view) */
// } /* end if 0 <= t <= s (facet center is "in" this POS pixel) */
// } /* end if 0 <= s <= 1 */
//
// sj0 += sj;
// tj0 += tj;
// } /* end j-loop over POS rows */
// /* Modify s and t step-wise for the next i-iteration of the pixel loop */
// si0 += si;
// ti0 += ti;
//
// } /* end i-loop over POS columns */
// }
// __syncthreads();
//
// /* Now write the shared memory array tiles into the global memory z buffer
// * and cosine array, again with a block-stride loop */
// if (facet_index[frm][bin][0]!=0)
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// ig = i + offsetx;
// jg = j + offsety;
// if (pos_z[i][j]!=-1e20)
// pos[frm]->z[ig][jg] = pos_z[i][j];
// if (pos_cose[i][j]!=0.0)
// pos[frm]->cose[ig][jg] = pos_cose[i][j];
// }
// __syncthreads();
//}
//
//__global__ void lightcurve_raster_krnl64(struct pos_t **pos,
// struct vertices_t **verts,
// double3 *oa,
// double3 *usrc,
// int ***facet_index,
// int **entries,
// int nf,
// int frm,
// int *n_tiles,
// int *n_tiles_x,
// int *n_tiles_y,
// int tile_size,
// int tile_x,
// int src) {
//
// /* This kernel performs the rasterization tile by tile. Each thread block
// * is responsible for one tile. */
// /* Determine which tile this thread block is responsible for and
// * which element of the thread block this thread is. */
// int bin = blockIdx.x;
// int index = threadIdx.x;
//
// /* Declare the shared memory variables and others */
// __shared__ double pos_z[32][32];//[55][55]; /* One per thread block */
// __shared__ double pos_cose[32][32];//[55][55]; /* One per thread block */
// __shared__ double pos_cosi[32][32];
// __shared__ int2 bn;
// __shared__ int xlim, ylim, offsetx, offsety, bistatic;
// __shared__ double kmpxl;
// int i, j, ig, jg, i1, i2, j1, j2; /* ig,jg are global indices */
// int tile_i1, tile_i2, tile_j1, tile_j2, fct_indx;
// double3 v0, v1, v2, n, tv0, tv1, tv2, n1n0;
// int3 fidx;
//
// /* Initialize the shared memory arrays with grid-stride loop */
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// pos_z[i][j] = -1e20;
// pos_cose[i][j] = 0.0;
// }
// __syncthreads();
//
// /* Load variables used by every thread (per block) to shared memory for
// * faster access */
// if (threadIdx.x==0) {
// bistatic = pos[frm]->bistatic;
// xlim = pos[frm]->xlim[0];
// ylim = pos[frm]->ylim[0];
// kmpxl = pos[frm]->km_per_pixel;
// bn.x = bin % n_tiles_x[frm];
// bn.y = bin / n_tiles_x[frm];
//
// /* Calculate the pixel offsets needed to go back and forth between
// * tiled POS space for this block's tile and global POS space */
// offsetx = xlim + tile_x * bn.x;
// offsety = ylim + tile_x * bn.y;
// }
// __syncthreads();
//
// /* Using grid-stride loop, step through all facet entries for each bin where
// * each thread block is responsible for one bin/tile */
// for (index=threadIdx.x; index<entries[frm][bin]; index+=blockDim.x) {
//
// /* Load facet index into registers */
// fct_indx = facet_index[frm][bin][index];
//
// /* Load transformed facet vertices into registers */
// v0 = pos[frm]->facet[fct_indx].v0t;
// v1 = pos[frm]->facet[fct_indx].v1t;
// v2 = pos[frm]->facet[fct_indx].v2t;
// n = pos[frm]->facet[fct_indx].nt;
//
// /* Calculate and store the boundaries of this tile */
// tile_i1 = offsetx;
// tile_i2 = tile_i1 + tile_x;
// tile_j1 = offsety;
// tile_j2 = tile_j1 + tile_x;
//
// /* Load this facet's boundaries and clamp them if needed, then
// * convert to local shared memory array addressing */
// i1 = max(pos[frm]->facet[fct_indx].ilim.x, tile_i1);
// i2 = min(pos[frm]->facet[fct_indx].ilim.y, (tile_i2-1));
// j1 = max(pos[frm]->facet[fct_indx].jlim.x, tile_j1);
// j2 = min(pos[frm]->facet[fct_indx].jlim.y, (tile_j2-1));
//
// /* Pre-calculate s and t components for the pixel loop */
// double a, b, c, d, e, h, ti, tj, si, sj, si0, sj0, ti0, tj0, sz, tz, den, s, t, z, old;
// a = i1*kmpxl - v0.x;
// b = v2.y - v1.y;
// c = v2.x - v1.x;
// d = j1*kmpxl - v0.y;
// e = v1.x - v0.x;
// h = v1.y - v0.y;
// den = e*b - c*h;
// ti = -h*kmpxl/den;
// tj = e*kmpxl/den;
// si = b*kmpxl/den;
// sj = -c*kmpxl/den;
// si0 = (a*b - c*d)/den;
// ti0 = (e*d -a*h)/den;
// sz = v1.z - v0.z;
// tz = v2.z - v1.z;
//
// /* Now convert i1, i2, j1, j2 to shared-memory tile coordinates */
// i1 -= (offsetx);
// i2 -= (offsetx);
// j1 -= (offsety);
// j2 -= (offsety);
//
// /* Pre-calculate some quantities for cosine smoothing if enabled */
// if (posvis_tiled_smooth) {
// /* Assign temp. normal components as float3 */
// fidx.x = verts[0]->f[fct_indx].v[0];
// fidx.y = verts[0]->f[fct_indx].v[1];
// fidx.z = verts[0]->f[fct_indx].v[2];
//
// tv0.x = verts[0]->v[fidx.x].n[0]; tv0.y = verts[0]->v[fidx.x].n[1];
// tv0.z = verts[0]->v[fidx.x].n[2]; tv1.x = verts[0]->v[fidx.y].n[0];
// tv1.y = verts[0]->v[fidx.y].n[1]; tv1.z = verts[0]->v[fidx.y].n[2];
// tv2.x = verts[0]->v[fidx.z].n[0]; tv2.y = verts[0]->v[fidx.z].n[1];
// tv2.z = verts[0]->v[fidx.z].n[2];
// n1n0.x = tv1.x - tv0.x; n1n0.y = tv1.y - tv0.y; n1n0.z = tv1.z - tv0.z;
// tv2.x -= tv1.x; tv2.y -= tv1.y; tv2.z -= tv1.z;
// }
//
// /* Facet is at least partly within POS frame: find all POS
// * pixels whose centers project onto this facet */
// for (i=i1; i<=i2; i++) {
//
// sj0 = si0; /* Initialize this loop's base sj0, tj0 */
// tj0 = ti0;
//
// for (j=j1; j<=j2; j++) {
//
// /* Calculate s and t parameters */
// s = sj0;
// t = tj0;
//
// if ((s >= -SMALLVAL) && (s <= 1.0 + SMALLVAL)) {// &&
//
// if( (t >= -SMALLVAL) && (t <= s + SMALLVAL)) {
//
// /* Compute z-coordinate of pixel center: its
// * distance measured from the origin towards
// * Earth. */
// z = v0.z + s*sz + t*tz;
//
// /* Compare calculated z to stored shared memory z
// * array at this address and store the bigger value */
// old = atomicMax64(&pos_z[i][j], z);
//
// if (old < z){
// if (posvis_tiled_smooth) {
// /* Get pvs_smoothed version of facet unit
// * normal: Take the linear combination
// * of the three vertex normals; trans-
// * form from body to observer coordina-
// * tes; and make sure that it points
// * somewhat in our direction. */
// n.x = tv0.x + s * n1n0.x + t * tv2.x;
// n.y = tv0.y + s * n1n0.y + t * tv2.y;
// n.z = tv0.z + s * n1n0.z + t * tv2.z;
// dev_cotrans3(&n, oa, n, 1, frm);
// dev_normalize3(&n);
// }
//
// /* Determine scattering angles. */
// if (n.z > 0.0) {
// if (src)
// atomicExch((unsigned long long int*)&pos[frm]->cosill[i][j],
// __double_as_longlong(n.z));
// else
// atomicExch((unsigned long long int*)&pos[frm]->cose[i][j],
// __double_as_longlong(n.z));
//
// if ((!src) && (bistatic)) {
//
// double temp = dev_dot_d3(n,usrc[frm]);
// atomicExch((unsigned long long int*)&pos[frm]->cosi[i][j],
// __double_as_longlong(temp));
// if (pos[frm]->cosi[i][j] <= 0.0)
// pos[frm]->cose[i][j] = 0.0;
// }
//
// }
// /* Keeping track of facets may not be required. */
//// atomicExch(&pos[frm]->f[i][j], f);
//
// } /* end if (no other facet yet blocks this facet from view) */
// } /* end if 0 <= t <= s (facet center is "in" this POS pixel) */
// } /* end if 0 <= s <= 1 */
//
// sj0 += sj;
// tj0 += tj;
// } /* end j-loop over POS rows */
// /* Modify s and t step-wise for the next i-iteration of the pixel loop */
// si0 += si;
// ti0 += ti;
//
// } /* end i-loop over POS columns */
// }
// __syncthreads();
//
// /* Now write the shared memory array tiles into the global memory z buffer
// * and cosine array, again with a block-stride loop */
// if (facet_index[frm][bin][0]!=0)
// for (int index=threadIdx.x; index<tile_size; index+=blockDim.x) {
// i = index % tile_x;
// j = index / tile_x;
// ig = i + offsetx;
// jg = j + offsety;
// if (pos_z[i][j]!=-1e20)
// pos[frm]->z[ig][jg] = pos_z[i][j];
// if (pos_cose[i][j]!=0.0)
// pos[frm]->cose[ig][jg] = pos_cose[i][j];
// }
// __syncthreads();
//}
//
//__global__ void posvis_outbnd_tiled_krnl64(struct pos_t **pos,
// int *outbndarr, double4 *ijminmax_overall, int size, int start) {
// /* nfrm_alloc-threaded kernel */
// int posn, f = blockIdx.x * blockDim.x + threadIdx.x + start;
// double xfactor, yfactor;
// if (f <size) {
// if (outbndarr[f]) {
// /* ijminmax_overall.w = imin_overall
// * ijminmax_overall.x = imax_overall
// * ijminmax_overall.y = jmin_overall
// * ijminmax_overall.z = jmax_overall */
// posn = pos[f]->n;
// xfactor = (MAX( ijminmax_overall[f].x, posn) -
// MIN( ijminmax_overall[f].w, -posn) + 1) / (2*posn+1);
// yfactor = (MAX( ijminmax_overall[f].z, posn) -
// MIN( ijminmax_overall[f].y, -posn) + 1) / (2*posn+1);
// pos[f]->posbnd_logfactor = log(xfactor*yfactor);
// }
// }
//}
//
//__host__ int posvis_tiled_gpu64(
// struct par_t *dpar,
// struct mod_t *dmod,
// struct dat_t *ddat,
// struct pos_t **pos,
// struct vertices_t **verts,
// double3 orbit_offset,
// int *posn,
// int *outbndarr,
// int set,
// int nfrm_alloc,
// int src,
// int nf,
// int body, int comp, unsigned char type, cudaStream_t *pv_stream,
// int src_override) {
//
// dim3 BLK,THD, BLKfrm, THD64, *BLKtile, BLKaf, THDaf;
// double4 *ijminmax_overall;
// double3 *oa, *usrc;
// int ***facet_index, *xspan, *yspan, *n_tiles_x, *n_tiles_y, *n_tiles, **entries;
// int f, outbnd, start, sharedMem, oasize, span, tile_size, **addr_index;
//
// oasize=nfrm_alloc*3;
// span=32;
// tile_size=span*span;
//
// /* Launch parameters for the facet_streams kernel */
// THD.x = 256; THD64.x = 64;
// BLK.x = floor((THD.x - 1 + nf) / THD.x);
// BLKfrm.x = floor((THD64.x - 1 + nfrm_alloc)/THD64.x);
// THDaf.x = 1024;
// BLKaf.x = nfrm_alloc;
//
// /* Set up the offset addressing for lightcurves if this is a lightcurve */
// if (type == LGHTCRV) start = 1; /* fixes the lightcurve offsets */
// else start = 0;
//
// /* Allocate temporary arrays/structs */
// cudaCalloc1((void**)&ijminmax_overall, sizeof(double4), nfrm_alloc);
// cudaCalloc1((void**)&oa, sizeof(double3), oasize);
// cudaCalloc1((void**)&usrc, sizeof(double3), nfrm_alloc);
// cudaCalloc1((void**)&xspan, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&yspan, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&n_tiles, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&n_tiles_x, sizeof(int), nfrm_alloc);
// cudaCalloc1((void**)&n_tiles_y, sizeof(int), nfrm_alloc);
// /* Allocate the frame portion of the facet index triple pointer and
// * the bin entries counter */
// cudaCalloc((void**)&facet_index, sizeof(int**), nfrm_alloc);
// cudaCalloc1((void**)&entries, sizeof(int*), nfrm_alloc);
//// cudaCalloc1((void**)&addr_index, sizeof(int*), nfrm_alloc);
// cudaCalloc((void**)&BLKtile, sizeof(dim3), nfrm_alloc);
//
// /* Initialize/pre-calculate values for rasterization */
// posvis_tiled_init_krnl64<<<BLKfrm,THD64>>>(dpar, pos, ijminmax_overall, oa, usrc,
// outbndarr, comp, start, src, nfrm_alloc, set, src_override);
// checkErrorAfterKernelLaunch("posvis_tiled_init_krnl64");
//
// /* Transform facet normals and determine bounding for facets and pos. */
// for (f=start; f<nfrm_alloc; f++)
// transform_facet_normals_krnl64c<<<1,THD,0,pv_stream[f]>>>(dmod, pos,
// verts, ijminmax_overall, orbit_offset, oa, outbndarr, nf,
// f, src);
// checkErrorAfterKernelLaunch("transform_facet_normals_krnl64a");
//// /* Transform facet normals and determine bounding for facets and pos. */
//// for (f=start; f<nfrm_alloc; f++)
//// transform_facet_normals_krnl64b<<<BLK,THD,0,pv_stream[f]>>>(dmod, pos,
//// verts, ijminmax_overall, orbit_offset, oa, usrc, outbndarr, nf,
//// f, src);
//// checkErrorAfterKernelLaunch("transform_facet_normals_krnl64b");
//// transform_facet_normals_krnl64b<<<BLKaf,THD>>>(dmod, pos, verts, ijminmax_overall,
//// orbit_offset, oa, outbndarr, nf, src);
//// checkErrorAfterKernelLaunch("transform_facet_normals_krnl64a");
//
// for (f=start; f<nfrm_alloc; f++)
// cudaStreamSynchronize(pv_stream[f]);
//
// /* Now calculate the tiling parameters to cover the POS view */
// for (f=start; f<nfrm_alloc; f++) {
// xspan[f] = pos[f]->xlim[1] - pos[f]->xlim[0] + 1;
// yspan[f] = pos[f]->ylim[1] - pos[f]->ylim[0] + 1;
// n_tiles_x[f] = (xspan[f]/span) + 1;
// n_tiles_y[f] = (yspan[f]/span) + 1;
// n_tiles[f] = n_tiles_x[f] * n_tiles_y[f];
// BLKtile[f].x = n_tiles[f];
// BLKtile[f].y = BLKtile[f].z = 1;
//
// /* Now allocate the tiles section of the facet index and then step
// * through each tile section to allocate enough space for 1024
// * facet indices. This is the maximum number of entries allowable
// * per thread block */
// /* Allocate the entries array to keep track of how many facets each bin holds */
//// cudaCalloc((void**)&addr_index[f], sizeof(int), n_tiles[f]);
// cudaCalloc((void**)&entries[f], sizeof(int), n_tiles[f]);
// cudaCalloc((void**)&facet_index[f], sizeof(int*),n_tiles[f]);
// for (int ti=0; ti<n_tiles[f]; ti++)
// cudaCalloc((void**)&facet_index[f][ti], sizeof(int), 4*1024);
// }
//
// bin_facets_krnl64c<<<BLKaf,THDaf>>>(pos, verts, facet_index,
// entries, nf, n_tiles, n_tiles_x, n_tiles_y, span);
// checkErrorAfterKernelLaunch("bin_facets_krnl64");
//
// /* Now we bin the triangles into the tiles */
// for (f=start; f<nfrm_alloc; f++) {
//// sharedMem = sizeof(int)*n_tiles[f];
//// bin_facets_krnl64a<<<1,THD,sharedMem,pv_stream[f]>>>(pos, verts, facet_index,
//// entries, nf, f, n_tiles, n_tiles_x, n_tiles_y, span);
//// bin_facets_krnl64b<<<BLK,THD,sharedMem,pv_stream[f]>>>(pos, verts, facet_index,
//// entries, addr_index, nf, f, n_tiles, n_tiles_x, n_tiles_y, span);
// radar_raster_krnl64<<<BLKtile[f],THD,0,pv_stream[f]>>>(pos, verts, oa,
// facet_index, entries, nf, f, n_tiles, n_tiles_x,
// n_tiles_y, tile_size, span);
// }
// checkErrorAfterKernelLaunch("bin_facets_krnl64");
//
// for (f=start; f<nfrm_alloc; f++)
// cudaStreamSynchronize(pv_stream[f]);
//
// /* Take care of any posbnd flags */
// posvis_outbnd_tiled_krnl64<<<BLKfrm,THD64>>>(pos,
// outbndarr, ijminmax_overall, nfrm_alloc, start);
// checkErrorAfterKernelLaunch("posvis_outbnd_krnl64");
// gpuErrchk(cudaMemcpyFromSymbol(&outbnd, posvis_tiled_outbnd, sizeof(int), 0,
// cudaMemcpyDeviceToHost));
//
//// int n = 75;
//// int npixels = 151*151;
//// f = 0;
//// dbg_print_pos_arrays_full64(pos, 0, npixels, n);
//// dbg_print_pos_arrays_full64(pos, 1, npixels, n);
//// dbg_print_pos_arrays_full64(pos, 2, npixels, n);
//// dbg_print_pos_arrays_full64(pos, 3, npixels, n);
//
// /* Free temp arrays, destroy streams and timers, as applicable */
// cudaFree(oa);
// cudaFree(usrc);
// cudaFree(xspan);
// cudaFree(yspan);
// cudaFree(n_tiles);
// cudaFree(entries);
// cudaFree(BLKtile);
// cudaFree(n_tiles_x);
// cudaFree(n_tiles_y);
//// cudaFree(addr_index);
// cudaFree(facet_index);
// cudaFree(ijminmax_overall);
//
// return outbnd;
//}
|
99e0d422b852ce24f247ecf236a7fed17114592e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/apply_gradient_descent_impl.cuh"
template <typename T>
__global__ void ApplyGradientDescent(const size_t size, T *var, const T *alpha, const T *delta, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
const T alpha_value = alpha[0];
var[pos] -= alpha_value * delta[pos];
output[pos] = var[pos];
}
}
template <typename T>
void CalApplyGradientDescent(const size_t &size, T *var, const T *alpha, const T *delta, T *output,
hipStream_t cuda_stream) {
hipLaunchKernelGGL(( ApplyGradientDescent), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, var, alpha, delta, output);
}
template void CalApplyGradientDescent<float>(const size_t &size, float *var, const float *alpha, const float *delta,
float *output, hipStream_t cuda_stream);
template void CalApplyGradientDescent<half>(const size_t &size, half *var, const half *alpha, const half *delta,
half *output, hipStream_t cuda_stream);
| 99e0d422b852ce24f247ecf236a7fed17114592e.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "backend/kernel_compiler/gpu/cuda_impl/apply_gradient_descent_impl.cuh"
template <typename T>
__global__ void ApplyGradientDescent(const size_t size, T *var, const T *alpha, const T *delta, T *output) {
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) {
const T alpha_value = alpha[0];
var[pos] -= alpha_value * delta[pos];
output[pos] = var[pos];
}
}
template <typename T>
void CalApplyGradientDescent(const size_t &size, T *var, const T *alpha, const T *delta, T *output,
cudaStream_t cuda_stream) {
ApplyGradientDescent<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, var, alpha, delta, output);
}
template void CalApplyGradientDescent<float>(const size_t &size, float *var, const float *alpha, const float *delta,
float *output, cudaStream_t cuda_stream);
template void CalApplyGradientDescent<half>(const size_t &size, half *var, const half *alpha, const half *delta,
half *output, cudaStream_t cuda_stream);
|
642f47209a310344e880a360872422b81fe030d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ROIPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe | 642f47209a310344e880a360872422b81fe030d4.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/roi_pooling_layer.hpp"
using std::max;
using std::min;
namespace caffe {
template <typename Dtype>
__global__ void ROIPoolForward(const int nthreads, const Dtype* bottom_data,
const Dtype spatial_scale, const int channels, const int height,
const int width, const int pooled_height, const int pooled_width,
const Dtype* bottom_rois, Dtype* top_data, int* argmax_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, ph, pw) is an element in the pooled output
int pw = index % pooled_width;
int ph = (index / pooled_width) % pooled_height;
int c = (index / pooled_width / pooled_height) % channels;
int n = index / pooled_width / pooled_height / channels;
bottom_rois += n * 5;
int roi_batch_ind = bottom_rois[0];
int roi_start_w = round(bottom_rois[1] * spatial_scale);
int roi_start_h = round(bottom_rois[2] * spatial_scale);
int roi_end_w = round(bottom_rois[3] * spatial_scale);
int roi_end_h = round(bottom_rois[4] * spatial_scale);
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int hstart = static_cast<int>(floor(static_cast<Dtype>(ph)
* bin_size_h));
int wstart = static_cast<int>(floor(static_cast<Dtype>(pw)
* bin_size_w));
int hend = static_cast<int>(ceil(static_cast<Dtype>(ph + 1)
* bin_size_h));
int wend = static_cast<int>(ceil(static_cast<Dtype>(pw + 1)
* bin_size_w));
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart + roi_start_h, 0), height);
hend = min(max(hend + roi_start_h, 0), height);
wstart = min(max(wstart + roi_start_w, 0), width);
wend = min(max(wend + roi_start_w, 0), width);
bool is_empty = (hend <= hstart) || (wend <= wstart);
// Define an empty pooling region to be zero
Dtype maxval = is_empty ? 0 : -FLT_MAX;
// If nothing is pooled, argmax = -1 causes nothing to be backprop'd
int maxidx = -1;
bottom_data += (roi_batch_ind * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
int bottom_index = h * width + w;
if (bottom_data[bottom_index] > maxval) {
maxval = bottom_data[bottom_index];
maxidx = bottom_index;
}
}
}
top_data[index] = maxval;
argmax_data[index] = maxidx;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_rois = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int* argmax_data = max_idx_.mutable_gpu_data();
int count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, spatial_scale_, channels_, height_, width_,
pooled_height_, pooled_width_, bottom_rois, top_data, argmax_data);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void ROIPoolBackward(const int nthreads, const Dtype* top_diff,
const int* argmax_data, const int num_rois, const Dtype spatial_scale,
const int channels, const int height, const int width,
const int pooled_height, const int pooled_width, Dtype* bottom_diff,
const Dtype* bottom_rois) {
CUDA_KERNEL_LOOP(index, nthreads) {
// (n, c, h, w) coords in bottom data
int w = index % width;
int h = (index / width) % height;
int c = (index / width / height) % channels;
int n = index / width / height / channels;
Dtype gradient = 0;
// Accumulate gradient over all ROIs that pooled this element
for (int roi_n = 0; roi_n < num_rois; ++roi_n) {
const Dtype* offset_bottom_rois = bottom_rois + roi_n * 5;
int roi_batch_ind = offset_bottom_rois[0];
// Skip if ROI's batch index doesn't match n
if (n != roi_batch_ind) {
continue;
}
int roi_start_w = round(offset_bottom_rois[1] * spatial_scale);
int roi_start_h = round(offset_bottom_rois[2] * spatial_scale);
int roi_end_w = round(offset_bottom_rois[3] * spatial_scale);
int roi_end_h = round(offset_bottom_rois[4] * spatial_scale);
// Skip if ROI doesn't include (h, w)
const bool in_roi = (w >= roi_start_w && w <= roi_end_w &&
h >= roi_start_h && h <= roi_end_h);
if (!in_roi) {
continue;
}
int offset = (roi_n * channels + c) * pooled_height * pooled_width;
const Dtype* offset_top_diff = top_diff + offset;
const int* offset_argmax_data = argmax_data + offset;
// Compute feasible set of pooled units that could have pooled
// this bottom unit
// Force malformed ROIs to be 1x1
int roi_width = max(roi_end_w - roi_start_w + 1, 1);
int roi_height = max(roi_end_h - roi_start_h + 1, 1);
Dtype bin_size_h = static_cast<Dtype>(roi_height)
/ static_cast<Dtype>(pooled_height);
Dtype bin_size_w = static_cast<Dtype>(roi_width)
/ static_cast<Dtype>(pooled_width);
int phstart = floor(static_cast<Dtype>(h - roi_start_h) / bin_size_h);
int phend = ceil(static_cast<Dtype>(h - roi_start_h + 1) / bin_size_h);
int pwstart = floor(static_cast<Dtype>(w - roi_start_w) / bin_size_w);
int pwend = ceil(static_cast<Dtype>(w - roi_start_w + 1) / bin_size_w);
phstart = min(max(phstart, 0), pooled_height);
phend = min(max(phend, 0), pooled_height);
pwstart = min(max(pwstart, 0), pooled_width);
pwend = min(max(pwend, 0), pooled_width);
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
if (offset_argmax_data[ph * pooled_width + pw] == (h * width + w)) {
gradient += offset_top_diff[ph * pooled_width + pw];
}
}
}
}
bottom_diff[index] = gradient;
}
}
template <typename Dtype>
void ROIPoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0]) {
return;
}
const Dtype* bottom_rois = bottom[1]->gpu_data();
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int count = bottom[0]->count();
caffe_gpu_set(count, Dtype(0.), bottom_diff);
const int* argmax_data = max_idx_.gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ROIPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, argmax_data, top[0]->num(), spatial_scale_, channels_,
height_, width_, pooled_height_, pooled_width_, bottom_diff, bottom_rois);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(ROIPoolingLayer);
} // namespace caffe |
5cf38c452867404be7862a215f5dcf33b603547a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=1024 --blockDim=512
#include "common.h"
__global__ void bitonicMergeShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint dir
)
{
__requires(arrayLength == 2048);
__requires(size == 1024);
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
//Bitonic merge
uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
uint ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
| 5cf38c452867404be7862a215f5dcf33b603547a.cu | //pass
//--gridDim=1024 --blockDim=512
#include "common.h"
__global__ void bitonicMergeShared(
uint *d_DstKey,
uint *d_DstVal,
uint *d_SrcKey,
uint *d_SrcVal,
uint arrayLength,
uint size,
uint dir
)
{
__requires(arrayLength == 2048);
__requires(size == 1024);
//Shared memory storage for current subarray
__shared__ uint s_key[SHARED_SIZE_LIMIT];
__shared__ uint s_val[SHARED_SIZE_LIMIT];
d_SrcKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_SrcVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstKey += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
d_DstVal += blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = d_SrcKey[ 0];
s_val[threadIdx.x + 0] = d_SrcVal[ 0];
s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcKey[(SHARED_SIZE_LIMIT / 2)];
s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)] = d_SrcVal[(SHARED_SIZE_LIMIT / 2)];
//Bitonic merge
uint comparatorI = UMAD(blockIdx.x, blockDim.x, threadIdx.x) & ((arrayLength / 2) - 1);
uint ddd = dir ^ ((comparatorI & (size / 2)) != 0);
for (uint stride = SHARED_SIZE_LIMIT / 2; stride > 0; stride >>= 1)
{
__syncthreads();
uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(
s_key[pos + 0], s_val[pos + 0],
s_key[pos + stride], s_val[pos + stride],
ddd
);
}
__syncthreads();
d_DstKey[ 0] = s_key[threadIdx.x + 0];
d_DstVal[ 0] = s_val[threadIdx.x + 0];
d_DstKey[(SHARED_SIZE_LIMIT / 2)] = s_key[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
d_DstVal[(SHARED_SIZE_LIMIT / 2)] = s_val[threadIdx.x + (SHARED_SIZE_LIMIT / 2)];
}
|
f74f3dd3e23b740f9111cb22198d9219cd98407b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda/implementacion/kernel_procesar_archivo.h"
namespace clasificador_de_distribuciones
{
namespace cuda
{
namespace implementacion
{
__device__ void InicializarVariablesCompartidas(size_t* n_eventos_bloque,
short* estado_bloque,
size_t* posicion_error_bloque)
{
*n_eventos_bloque = 0;
*estado_bloque = 0;
*posicion_error_bloque = 0;
}
__device__ void CopiarSeccionDeArchivo(size_t hilo,
size_t ini, size_t fin,
size_t desplazamiento,
char* archivo_gl, size_t l_archivo,
char* texto_bloque)
{
if(hilo == kHilosXBloque - 1)
fin += 25;
for(size_t i = ini; i < fin && i + desplazamiento < l_archivo ; i++)
{
texto_bloque[i] = archivo_gl[i+desplazamiento];
}
}
__device__ size_t SiguienteSalto (size_t i, size_t l_bloque, char* texto_bloque)
{
for(; i<=l_bloque && texto_bloque[i] != '\n' && texto_bloque[i] != '\r';
++i);
if( texto_bloque[i] == '\r' && texto_bloque[i+1] == '\n' )
++i;
return ++i;
}
__device__ void ProcesarSeccion ( size_t ini, size_t fin,
char* texto_bloque,
Evento* eventos_bloque,
size_t* n_eventos_bloque,
short* estado_bloque)
{
int id_grupo;
int id_resultado;
for ( size_t i = ini; i <= fin && *estado_bloque == 0 ;i++ )
{
id_grupo = id_resultado = 0;
for(; texto_bloque[i] >= '0' && texto_bloque[i] <= '9'; i++)
{
id_grupo*=10;
id_grupo+=texto_bloque[i]-'0';
}
i++;
for(; texto_bloque[i] >= '0' && texto_bloque[i] <= '9'; i++)
{
id_resultado*=10;
id_resultado+=texto_bloque[i]-'0';
}
if(texto_bloque[i] == '\r' && texto_bloque[i+1] == '\n')
{
i++;
}
unsigned int pos = atomicInc( (unsigned int*) n_eventos_bloque,
kHilosXBloque * kLineasXHiloEst );
eventos_bloque[pos].id_grupo_ = id_grupo;
eventos_bloque[pos].valor_ = id_resultado;
}
}
__device__ void CopiarEventosBloque (size_t hilo, size_t bloque,
Evento* eventos_bloque,
size_t n_eventos_bloque,
Evento* eventos, size_t ancho_eventos,
size_t* n_eventos)
{
size_t eventos_x_hilo = n_eventos_bloque / kHilosXBloque;
if(hilo == 0)
{
n_eventos[bloque] = n_eventos_bloque;
}
Evento* fila_eventos = (Evento*)((char*)eventos + bloque * ancho_eventos);
for( size_t i=eventos_x_hilo*hilo; i < eventos_x_hilo*(hilo+1) ; i++ )
fila_eventos[i] = eventos_bloque[i];
if(hilo == kHilosXBloque - 1)
{
for(size_t i=eventos_x_hilo*(hilo+1); i < n_eventos_bloque; i++)
{
fila_eventos[i] = eventos_bloque[i];
}
}
}
__global__ void ProcesarArchivo (char* archivo, size_t l_archivo,
char separador,
Evento* eventos, size_t ancho_eventos,
size_t* n_eventos,
short* estado, int* posicion_error)
{
__shared__ size_t n_eventos_bloque;
__shared__ short estado_bloque;
__shared__ size_t posicion_error_bloque;
__shared__ char texto_bloque[kHilosXBloque * kCaracteresXHilo + 25];
__shared__ Evento eventos_bloque[kHilosXBloque * kLineasXHiloEst];
size_t hilo = threadIdx.x;
size_t bloque = blockIdx.x;
size_t ini = kCaracteresXHilo * hilo;
size_t fin = kCaracteresXHilo * (hilo + 1);
size_t desplazamiento = kCaracteresXHilo * kHilosXBloque * bloque;
if(hilo == 0)
{
InicializarVariablesCompartidas(&n_eventos_bloque,
&estado_bloque,
&posicion_error_bloque);
}
__syncthreads();
CopiarSeccionDeArchivo(hilo,
ini, fin,
desplazamiento,
archivo, l_archivo,
texto_bloque);
__syncthreads();
size_t l_bloque = min (kCaracteresXHilo*kHilosXBloque + 25U,
l_archivo - desplazamiento);
fin = min(fin, l_bloque-1);
if(ini < l_bloque)
{
if(hilo != 0 || bloque != 0)
{
ini = SiguienteSalto(ini, l_bloque, texto_bloque);
}
ProcesarSeccion (ini, fin,
texto_bloque,
eventos_bloque,
&n_eventos_bloque,
&estado_bloque);
}
__syncthreads();
CopiarEventosBloque(hilo, bloque,
eventos_bloque,
n_eventos_bloque,
eventos, ancho_eventos,
n_eventos);
}
} // namespace implementacion
} // namespace cuda
} // namespace clasificador_de_distribuciones | f74f3dd3e23b740f9111cb22198d9219cd98407b.cu | #include "cuda/implementacion/kernel_procesar_archivo.h"
namespace clasificador_de_distribuciones
{
namespace cuda
{
namespace implementacion
{
__device__ void InicializarVariablesCompartidas(size_t* n_eventos_bloque,
short* estado_bloque,
size_t* posicion_error_bloque)
{
*n_eventos_bloque = 0;
*estado_bloque = 0;
*posicion_error_bloque = 0;
}
__device__ void CopiarSeccionDeArchivo(size_t hilo,
size_t ini, size_t fin,
size_t desplazamiento,
char* archivo_gl, size_t l_archivo,
char* texto_bloque)
{
if(hilo == kHilosXBloque - 1)
fin += 25;
for(size_t i = ini; i < fin && i + desplazamiento < l_archivo ; i++)
{
texto_bloque[i] = archivo_gl[i+desplazamiento];
}
}
__device__ size_t SiguienteSalto (size_t i, size_t l_bloque, char* texto_bloque)
{
for(; i<=l_bloque && texto_bloque[i] != '\n' && texto_bloque[i] != '\r';
++i);
if( texto_bloque[i] == '\r' && texto_bloque[i+1] == '\n' )
++i;
return ++i;
}
__device__ void ProcesarSeccion ( size_t ini, size_t fin,
char* texto_bloque,
Evento* eventos_bloque,
size_t* n_eventos_bloque,
short* estado_bloque)
{
int id_grupo;
int id_resultado;
for ( size_t i = ini; i <= fin && *estado_bloque == 0 ;i++ )
{
id_grupo = id_resultado = 0;
for(; texto_bloque[i] >= '0' && texto_bloque[i] <= '9'; i++)
{
id_grupo*=10;
id_grupo+=texto_bloque[i]-'0';
}
i++;
for(; texto_bloque[i] >= '0' && texto_bloque[i] <= '9'; i++)
{
id_resultado*=10;
id_resultado+=texto_bloque[i]-'0';
}
if(texto_bloque[i] == '\r' && texto_bloque[i+1] == '\n')
{
i++;
}
unsigned int pos = atomicInc( (unsigned int*) n_eventos_bloque,
kHilosXBloque * kLineasXHiloEst );
eventos_bloque[pos].id_grupo_ = id_grupo;
eventos_bloque[pos].valor_ = id_resultado;
}
}
__device__ void CopiarEventosBloque (size_t hilo, size_t bloque,
Evento* eventos_bloque,
size_t n_eventos_bloque,
Evento* eventos, size_t ancho_eventos,
size_t* n_eventos)
{
size_t eventos_x_hilo = n_eventos_bloque / kHilosXBloque;
if(hilo == 0)
{
n_eventos[bloque] = n_eventos_bloque;
}
Evento* fila_eventos = (Evento*)((char*)eventos + bloque * ancho_eventos);
for( size_t i=eventos_x_hilo*hilo; i < eventos_x_hilo*(hilo+1) ; i++ )
fila_eventos[i] = eventos_bloque[i];
if(hilo == kHilosXBloque - 1)
{
for(size_t i=eventos_x_hilo*(hilo+1); i < n_eventos_bloque; i++)
{
fila_eventos[i] = eventos_bloque[i];
}
}
}
__global__ void ProcesarArchivo (char* archivo, size_t l_archivo,
char separador,
Evento* eventos, size_t ancho_eventos,
size_t* n_eventos,
short* estado, int* posicion_error)
{
__shared__ size_t n_eventos_bloque;
__shared__ short estado_bloque;
__shared__ size_t posicion_error_bloque;
__shared__ char texto_bloque[kHilosXBloque * kCaracteresXHilo + 25];
__shared__ Evento eventos_bloque[kHilosXBloque * kLineasXHiloEst];
size_t hilo = threadIdx.x;
size_t bloque = blockIdx.x;
size_t ini = kCaracteresXHilo * hilo;
size_t fin = kCaracteresXHilo * (hilo + 1);
size_t desplazamiento = kCaracteresXHilo * kHilosXBloque * bloque;
if(hilo == 0)
{
InicializarVariablesCompartidas(&n_eventos_bloque,
&estado_bloque,
&posicion_error_bloque);
}
__syncthreads();
CopiarSeccionDeArchivo(hilo,
ini, fin,
desplazamiento,
archivo, l_archivo,
texto_bloque);
__syncthreads();
size_t l_bloque = min (kCaracteresXHilo*kHilosXBloque + 25U,
l_archivo - desplazamiento);
fin = min(fin, l_bloque-1);
if(ini < l_bloque)
{
if(hilo != 0 || bloque != 0)
{
ini = SiguienteSalto(ini, l_bloque, texto_bloque);
}
ProcesarSeccion (ini, fin,
texto_bloque,
eventos_bloque,
&n_eventos_bloque,
&estado_bloque);
}
__syncthreads();
CopiarEventosBloque(hilo, bloque,
eventos_bloque,
n_eventos_bloque,
eventos, ancho_eventos,
n_eventos);
}
} // namespace implementacion
} // namespace cuda
} // namespace clasificador_de_distribuciones |
27fbf916ff5c8466b147ed003fa576532bdfd53a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__ void add_kernel(int *a, int *b, int *c) {
*c = *a + *b;
}
int main() {
// on Host
int a, b, c;
// copy on Device
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// allocate memory on device
//use a pointer to address to be populated
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// initialize a, b
a = 4;
b = 2;
// copy memory from host to device
hipMemcpy(d_a, &a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, &b, size, hipMemcpyHostToDevice);
// luanch the kernel to compute
hipLaunchKernelGGL(( add_kernel), dim3(1), dim3(1), 0, 0, d_a, d_b, d_c);
// since the result we need still be stored at device
// so we have to copy it to host memory
hipMemcpy(&c, d_c, size, hipMemcpyDeviceToHost);
printf("res: %d\n", c);
// all are done, so we can free all the memory we have allocated
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
} | 27fbf916ff5c8466b147ed003fa576532bdfd53a.cu | #include <stdio.h>
__global__ void add_kernel(int *a, int *b, int *c) {
*c = *a + *b;
}
int main() {
// on Host
int a, b, c;
// copy on Device
int *d_a, *d_b, *d_c;
int size = sizeof(int);
// allocate memory on device
//use a pointer to address to be populated
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// initialize a, b
a = 4;
b = 2;
// copy memory from host to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// luanch the kernel to compute
add_kernel<<<1, 1>>>(d_a, d_b, d_c);
// since the result we need still be stored at device
// so we have to copy it to host memory
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("res: %d\n", c);
// all are done, so we can free all the memory we have allocated
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
89f39b37b755908268e6b4a04ee836d96d8fd517.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* -----------------------------------------------------------------
* Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for a CUDA implementation
* of the NVECTOR package.
* -----------------------------------------------------------------*/
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <limits>
#include <nvector/nvector_cuda.h>
#include "VectorArrayKernels.cuh"
#include "VectorKernels.cuh"
#include "sundials_cuda.h"
#include "sundials_debug.h"
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
extern "C" {
using namespace sundials;
using namespace sundials::nvector_cuda;
/*
* Macro definitions
*/
#define NVEC_CUDA_CONTENT(x) ((N_VectorContent_Cuda)(x->content))
#define NVEC_CUDA_PRIVATE(x) ((N_PrivateVectorContent_Cuda)(NVEC_CUDA_CONTENT(x)->priv))
#define NVEC_CUDA_MEMSIZE(x) (NVEC_CUDA_CONTENT(x)->length * sizeof(realtype))
#define NVEC_CUDA_MEMHELP(x) (NVEC_CUDA_CONTENT(x)->mem_helper)
#define NVEC_CUDA_HDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->host_data->ptr)
#define NVEC_CUDA_DDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->device_data->ptr)
#define NVEC_CUDA_HBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_host->ptr)
#define NVEC_CUDA_DBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_dev->ptr)
#define NVEC_CUDA_STREAM(x) (NVEC_CUDA_CONTENT(x)->stream_exec_policy->stream())
/*
* Private structure definition
*/
struct _N_PrivateVectorContent_Cuda
{
booleantype use_managed_mem; /* indicates if the data pointers and buffer pointers are managed memory */
size_t reduce_buffer_allocated_bytes; /* current size of the reduction buffer */
SUNMemory reduce_buffer_dev; /* device buffer used for reductions */
SUNMemory reduce_buffer_host; /* host buffer used for reductions */
};
typedef struct _N_PrivateVectorContent_Cuda *N_PrivateVectorContent_Cuda;
/*
* Private function definitions
*/
static int AllocateData(N_Vector v);
static int InitializeReductionBuffer(N_Vector v, const realtype value);
static void FreeReductionBuffer(N_Vector v);
static int CopyReductionBufferFromDevice(N_Vector v, size_t n = 1);
static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block,
size_t& shMemSize, hipStream_t& stream, size_t n = 0);
static void PostKernelLaunch();
/*
* Private functions needed for N_VMakeWithManagedAllocator_Cuda
* backwards compatibility.
*/
/* DEPRECATION NOTICE: The 4 functions below can be removed once
N_VMakeWithManagedAllocator_Cuda (deprecated) is removed in the
next major release. The UserAllocHelper struct can also be removed. */
/* Struct that we use to pack up the user
provided alloc and free functions. */
typedef struct _UserAllocHelper
{
void* (*userallocfn)(size_t);
void (*userfreefn)(void*);
} UserAllocHelper;
static int UserAlloc(SUNMemoryHelper helper, SUNMemory* memptr,
size_t memsize, SUNMemoryType mem_type)
{
UserAllocHelper* ua = (UserAllocHelper*) helper->content;
SUNMemory mem = SUNMemoryNewEmpty();
mem->type = SUNMEMTYPE_UVM;
mem->ptr = ua->userallocfn(memsize);
mem->own = SUNTRUE;
if (mem->ptr == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in UserAlloc: user provided alloc failed\n");
free(mem);
return(-1);
}
*memptr = mem;
return(0);
}
static int UserDealloc(SUNMemoryHelper helper, SUNMemory mem)
{
UserAllocHelper* ua = (UserAllocHelper*) helper->content;
if (mem->own)
{
ua->userfreefn(mem->ptr);
mem->ptr = NULL;
}
free(mem);
return(0);
}
static SUNMemoryHelper HelperClone(SUNMemoryHelper helper)
{
UserAllocHelper* uaclone;
UserAllocHelper* ua = (UserAllocHelper*) helper->content;
SUNMemoryHelper hclone = SUNMemoryHelper_NewEmpty();
SUNMemoryHelper_CopyOps(helper, hclone);
uaclone = (UserAllocHelper*) malloc(sizeof(UserAllocHelper));
uaclone->userallocfn = ua->userallocfn;
uaclone->userfreefn = ua->userfreefn;
hclone->content = uaclone;
return(hclone);
}
static int HelperDestroy(SUNMemoryHelper helper)
{
free(helper->content);
helper->content = NULL;
free(helper->ops);
free(helper);
return(0);
}
N_Vector N_VNewEmpty_Cuda()
{
N_Vector v;
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_Cuda;
v->ops->nvclone = N_VClone_Cuda;
v->ops->nvcloneempty = N_VCloneEmpty_Cuda;
v->ops->nvdestroy = N_VDestroy_Cuda;
v->ops->nvspace = N_VSpace_Cuda;
v->ops->nvgetlength = N_VGetLength_Cuda;
v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Cuda;
v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Cuda;
v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Cuda;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_Cuda;
v->ops->nvconst = N_VConst_Cuda;
v->ops->nvprod = N_VProd_Cuda;
v->ops->nvdiv = N_VDiv_Cuda;
v->ops->nvscale = N_VScale_Cuda;
v->ops->nvabs = N_VAbs_Cuda;
v->ops->nvinv = N_VInv_Cuda;
v->ops->nvaddconst = N_VAddConst_Cuda;
v->ops->nvdotprod = N_VDotProd_Cuda;
v->ops->nvmaxnorm = N_VMaxNorm_Cuda;
v->ops->nvmin = N_VMin_Cuda;
v->ops->nvl1norm = N_VL1Norm_Cuda;
v->ops->nvinvtest = N_VInvTest_Cuda;
v->ops->nvconstrmask = N_VConstrMask_Cuda;
v->ops->nvminquotient = N_VMinQuotient_Cuda;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_Cuda;
v->ops->nvwrmsnorm = N_VWrmsNorm_Cuda;
v->ops->nvwl2norm = N_VWL2Norm_Cuda;
v->ops->nvcompare = N_VCompare_Cuda;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction operations */
v->ops->nvdotprodlocal = N_VDotProd_Cuda;
v->ops->nvmaxnormlocal = N_VMaxNorm_Cuda;
v->ops->nvminlocal = N_VMin_Cuda;
v->ops->nvl1normlocal = N_VL1Norm_Cuda;
v->ops->nvinvtestlocal = N_VInvTest_Cuda;
v->ops->nvconstrmasklocal = N_VConstrMask_Cuda;
v->ops->nvminquotientlocal = N_VMinQuotient_Cuda;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Cuda;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Cuda;
/* XBraid interface operations */
v->ops->nvbufsize = N_VBufSize_Cuda;
v->ops->nvbufpack = N_VBufPack_Cuda;
v->ops->nvbufunpack = N_VBufUnpack_Cuda;
/* print operation for debugging */
v->ops->nvprint = N_VPrint_Cuda;
v->ops->nvprintfile = N_VPrintFile_Cuda;
/* Create content */
v->content = (N_VectorContent_Cuda) malloc(sizeof(_N_VectorContent_Cuda));
if (v->content == NULL)
{
N_VDestroy(v);
return(NULL);
}
NVEC_CUDA_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Cuda));
if (NVEC_CUDA_CONTENT(v)->priv == NULL)
{
N_VDestroy(v);
return(NULL);
}
NVEC_CUDA_CONTENT(v)->length = 0;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = NULL;
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = NULL;
NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
return(v);
}
N_Vector N_VNew_Cuda(sunindextype length)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VNewWithMemHelp_Cuda(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper)
{
N_Vector v;
if (helper == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper is NULL\n");
return(NULL);
}
if (!SUNMemoryHelper_ImplementsRequiredOps(helper))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper doesn't implement all required ops\n");
return(NULL);
}
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = helper;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = use_managed_mem;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VNewManaged_Cuda(sunindextype length)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VMake_Cuda(sunindextype length, realtype *h_vdata, realtype *d_vdata)
{
N_Vector v;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST);
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE);
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (NVEC_CUDA_CONTENT(v)->device_data == NULL ||
NVEC_CUDA_CONTENT(v)->host_data == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: SUNMemoryHelper_Wrap returned NULL\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VMakeManaged_Cuda(sunindextype length, realtype *vdata)
{
N_Vector v;
if (vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM);
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data);
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (NVEC_CUDA_CONTENT(v)->device_data == NULL ||
NVEC_CUDA_CONTENT(v)->host_data == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: SUNMemoryHelper_Wrap returned NULL\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VMakeWithManagedAllocator_Cuda(sunindextype length,
void* (*allocfn)(size_t),
void (*freefn)(void*))
{
UserAllocHelper* ua;
N_Vector v;
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeWithManagedAllocator_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
ua = (UserAllocHelper*) malloc(sizeof(UserAllocHelper));
ua->userallocfn = allocfn;
ua->userfreefn = freefn;
NVEC_CUDA_MEMHELP(v)->content = (void*) ua;
NVEC_CUDA_MEMHELP(v)->ops->alloc = UserAlloc;
NVEC_CUDA_MEMHELP(v)->ops->dealloc = UserDealloc;
NVEC_CUDA_MEMHELP(v)->ops->clone = HelperClone;
NVEC_CUDA_MEMHELP(v)->ops->destroy = HelperDestroy;
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeWithManagedAllocator_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
/* ----------------------------------------------------------------------------
* Set pointer to the raw host data. Does not free the existing pointer.
*/
void N_VSetHostArrayPointer_Cuda(realtype* h_vdata, N_Vector v)
{
if (N_VIsManagedMemory_Cuda(v))
{
if (NVEC_CUDA_CONTENT(v)->host_data)
{
NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata;
NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) h_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM);
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data);
}
}
else
{
if (NVEC_CUDA_CONTENT(v)->host_data)
{
NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST);
}
}
}
/* ----------------------------------------------------------------------------
* Set pointer to the raw device data
*/
void N_VSetDeviceArrayPointer_Cuda(realtype* d_vdata, N_Vector v)
{
if (N_VIsManagedMemory_Cuda(v))
{
if (NVEC_CUDA_CONTENT(v)->device_data)
{
NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata;
NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) d_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM);
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->device_data);
}
}
else
{
if (NVEC_CUDA_CONTENT(v)->device_data)
{
NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE);
}
}
}
/* ----------------------------------------------------------------------------
* Return a flag indicating if the memory for the vector data is managed
*/
booleantype N_VIsManagedMemory_Cuda(N_Vector x)
{
return NVEC_CUDA_PRIVATE(x)->use_managed_mem;
}
int N_VSetKernelExecPolicy_Cuda(N_Vector x,
SUNCudaExecPolicy* stream_exec_policy,
SUNCudaExecPolicy* reduce_exec_policy)
{
if (x == NULL || stream_exec_policy == NULL || reduce_exec_policy == NULL)
return(-1);
if (NVEC_CUDA_CONTENT(x)->own_exec)
{
delete NVEC_CUDA_CONTENT(x)->stream_exec_policy;
delete NVEC_CUDA_CONTENT(x)->reduce_exec_policy;
}
NVEC_CUDA_CONTENT(x)->stream_exec_policy = stream_exec_policy;
NVEC_CUDA_CONTENT(x)->reduce_exec_policy = reduce_exec_policy;
NVEC_CUDA_CONTENT(x)->own_exec = SUNFALSE;
return(0);
}
/*
* ----------------------------------------------------------------------------
* DEPRECATED: will be removed in SUNDIALS v6.
* Sets the hipStream_t to use for execution of the CUDA kernels.
*/
void N_VSetCudaStream_Cuda(N_Vector x, hipStream_t *stream)
{
const CudaExecPolicy* xs = NVEC_CUDA_CONTENT(x)->stream_exec_policy;
const CudaExecPolicy* xr = NVEC_CUDA_CONTENT(x)->reduce_exec_policy;
CudaThreadDirectExecPolicy* s =
new CudaThreadDirectExecPolicy(xs->blockSize(), *stream);
CudaBlockReduceExecPolicy* r =
new CudaBlockReduceExecPolicy(xr->blockSize(), xr->gridSize(), *stream);
N_VSetKernelExecPolicy_Cuda(x, s, r);
NVEC_CUDA_CONTENT(x)->own_exec = SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Copy vector data to the device
*/
void N_VCopyToDevice_Cuda(N_Vector x)
{
int copy_fail;
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
NVEC_CUDA_CONTENT(x)->device_data,
NVEC_CUDA_CONTENT(x)->host_data,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n");
}
/* we synchronize with respect to the host, but only in this stream */
SUNDIALS_CUDA_VERIFY(hipStreamSynchronize(*NVEC_CUDA_STREAM(x)));
}
/* ----------------------------------------------------------------------------
* Copy vector data from the device to the host
*/
void N_VCopyFromDevice_Cuda(N_Vector x)
{
int copy_fail;
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
NVEC_CUDA_CONTENT(x)->host_data,
NVEC_CUDA_CONTENT(x)->device_data,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n");
}
/* we synchronize with respect to the host, but only in this stream */
SUNDIALS_CUDA_VERIFY(hipStreamSynchronize(*NVEC_CUDA_STREAM(x)));
}
/* ----------------------------------------------------------------------------
* Function to print the a CUDA-based vector to stdout
*/
void N_VPrint_Cuda(N_Vector x)
{
N_VPrintFile_Cuda(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print the a CUDA-based vector to outfile
*/
void N_VPrintFile_Cuda(N_Vector x, FILE *outfile)
{
sunindextype i;
for (i = 0; i < NVEC_CUDA_CONTENT(x)->length; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%35.32Lg\n", NVEC_CUDA_HDATAp(x)[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%19.16g\n", NVEC_CUDA_HDATAp(x)[i]);
#else
fprintf(outfile, "%11.8g\n", NVEC_CUDA_HDATAp(x)[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
N_Vector N_VCloneEmpty_Cuda(N_Vector w)
{
N_Vector v;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Set content */
NVEC_CUDA_CONTENT(v)->length = NVEC_CUDA_CONTENT(w)->length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = NULL;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = NVEC_CUDA_PRIVATE(w)->use_managed_mem;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
return(v);
}
N_Vector N_VClone_Cuda(N_Vector w)
{
N_Vector v;
v = NULL;
v = N_VCloneEmpty_Cuda(w);
if (v == NULL) return(NULL);
NVEC_CUDA_MEMHELP(v) = SUNMemoryHelper_Clone(NVEC_CUDA_MEMHELP(w));
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = NVEC_CUDA_CONTENT(w)->stream_exec_policy->clone();
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NVEC_CUDA_CONTENT(w)->reduce_exec_policy->clone();
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: SUNMemoryHelper_Clone returned NULL\n");
N_VDestroy(v);
return(NULL);
}
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
void N_VDestroy_Cuda(N_Vector v)
{
N_VectorContent_Cuda vc;
N_PrivateVectorContent_Cuda vcp;
if (v == NULL) return;
/* free ops structure */
if (v->ops != NULL)
{
free(v->ops);
v->ops = NULL;
}
/* extract content */
vc = NVEC_CUDA_CONTENT(v);
if (vc == NULL)
{
free(v);
v = NULL;
return;
}
/* free private content */
vcp = (N_PrivateVectorContent_Cuda) vc->priv;
if (vcp != NULL)
{
/* free items in private content */
FreeReductionBuffer(v);
free(vcp);
vc->priv = NULL;
}
/* free items in content */
if (vc->own_exec)
{
delete vc->stream_exec_policy;
vc->stream_exec_policy = NULL;
delete vc->reduce_exec_policy;
vc->reduce_exec_policy = NULL;
}
if (NVEC_CUDA_MEMHELP(v))
{
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->host_data);
vc->host_data = NULL;
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->device_data);
vc->device_data = NULL;
if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper);
vc->mem_helper = NULL;
}
/* free content struct */
free(vc);
/* free vector */
free(v);
return;
}
void N_VSpace_Cuda(N_Vector X, sunindextype *lrw, sunindextype *liw)
{
*lrw = NVEC_CUDA_CONTENT(X)->length;
*liw = 2;
}
void N_VConst_Cuda(realtype a, N_Vector X)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( setConstKernel), dim3(grid), dim3(block), shMemSize, stream,
a,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VLinearSum_Cuda(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( linearSumKernel), dim3(grid), dim3(block), shMemSize, stream,
a,
NVEC_CUDA_DDATAp(X),
b,
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VProd_Cuda(N_Vector X, N_Vector Y, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( prodKernel), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VDiv_Cuda(N_Vector X, N_Vector Y, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( divKernel), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VScale_Cuda(realtype a, N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( scaleKernel), dim3(grid), dim3(block), shMemSize, stream,
a,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VAbs_Cuda(N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( absKernel), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VInv_Cuda(N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( invKernel), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VAddConst_Cuda(N_Vector X, realtype b, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( addConstKernel), dim3(grid), dim3(block), shMemSize, stream,
b,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
realtype N_VDotProd_Cuda(N_Vector X, N_Vector Y)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( dotProdKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VMaxNorm_Cuda(N_Vector X)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( maxNormKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Finish reduction on CPU if there are less than two blocks of data left.
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWSqrSumLocal_Cuda(N_Vector X, N_Vector W)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( wL2NormSquareKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(W),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWrmsNorm_Cuda(N_Vector X, N_Vector W)
{
const realtype sum = N_VWSqrSumLocal_Cuda(X, W);
return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length);
}
realtype N_VWSqrSumMaskLocal_Cuda(N_Vector X, N_Vector W, N_Vector Id)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( wL2NormSquareMaskKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(W),
NVEC_CUDA_DDATAp(Id),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWrmsNormMask_Cuda(N_Vector X, N_Vector W, N_Vector Id)
{
const realtype sum = N_VWSqrSumMaskLocal_Cuda(X, W, Id);
return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length);
}
realtype N_VMin_Cuda(N_Vector X)
{
const realtype maxVal = std::numeric_limits<realtype>::max();
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, maxVal))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( findMinKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
maxVal,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWL2Norm_Cuda(N_Vector X, N_Vector W)
{
const realtype sum = N_VWSqrSumLocal_Cuda(X, W);
return std::sqrt(sum);
}
realtype N_VL1Norm_Cuda(N_Vector X)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( L1NormKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
void N_VCompare_Cuda(realtype c, N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( compareKernel), dim3(grid), dim3(block), shMemSize, stream,
c,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
booleantype N_VInvTest_Cuda(N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( invTestKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return (gpu_result < HALF);
}
booleantype N_VConstrMask_Cuda(N_Vector C, N_Vector X, N_Vector M)
{
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( constrMaskKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
NVEC_CUDA_DDATAp(C),
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(M),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return (gpu_result < HALF);
}
realtype N_VMinQuotient_Cuda(N_Vector num, N_Vector denom)
{
// Starting value for min reduction
const realtype maxVal = std::numeric_limits<realtype>::max();
size_t grid, block, shMemSize;
hipStream_t stream;
if (InitializeReductionBuffer(num, maxVal))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(num, true, grid, block, shMemSize, stream);
hipLaunchKernelGGL(( minQuotientKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
maxVal,
NVEC_CUDA_DDATAp(num),
NVEC_CUDA_DDATAp(denom),
NVEC_CUDA_DBUFFERp(num),
NVEC_CUDA_CONTENT(num)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(num);
realtype gpu_result = NVEC_CUDA_HBUFFERp(num)[0];
return gpu_result;
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector Z)
{
hipError_t err;
// Copy c array to device
realtype* d_c;
err = hipMalloc((void**) &d_c, nvec*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters and launch
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(X[0], false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( linearCombinationKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
d_c,
d_Xd,
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(Z)->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
// Free device arrays
err = hipFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VScaleAddMulti_Cuda(int nvec, realtype* c, N_Vector X, N_Vector* Y,
N_Vector* Z)
{
hipError_t err;
// Copy c array to device
realtype* d_c;
err = hipMalloc((void**) &d_c, nvec*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Yd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Yd[i] = NVEC_CUDA_DDATAp(Y[i]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Yd;
err = hipMalloc((void**) &d_Yd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( scaleAddMultiKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
d_c,
NVEC_CUDA_DDATAp(X),
d_Yd,
d_Zd,
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Free host array
delete[] h_Yd;
delete[] h_Zd;
// Free device arrays
err = hipFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VDotProdMulti_Cuda(int nvec, N_Vector X, N_Vector* Y, realtype* dots)
{
hipError_t err;
// Create array of device pointers on host
realtype** h_Yd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Yd[i] = NVEC_CUDA_DDATAp(Y[i]);
// Copy array of device pointers to device from host
realtype** d_Yd;
err = hipMalloc((void**) &d_Yd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) return(-1);
grid = nvec;
// Allocate reduction buffer on device
realtype* d_buff;
err = hipMalloc((void**) &d_buff, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemsetAsync(d_buff, 0, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
hipLaunchKernelGGL(( dotProdMultiKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
nvec,
NVEC_CUDA_DDATAp(X),
d_Yd,
d_buff,
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Copy GPU result to the cpu.
err = hipMemcpy(dots, d_buff, grid*sizeof(realtype), hipMemcpyDeviceToHost);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Free host array
delete[] h_Yd;
// Free device arrays
err = hipFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_buff);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
/*
* -----------------------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------------------
*/
int N_VLinearSumVectorArray_Cuda(int nvec, realtype a, N_Vector* X, realtype b,
N_Vector* Y, N_Vector* Z)
{
hipError_t err;
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Yd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Yd[i] = NVEC_CUDA_DDATAp(Y[i]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Yd;
err = hipMalloc((void**) &d_Yd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( linearSumVectorArrayKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
a,
d_Xd,
b,
d_Yd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Yd;
delete[] h_Zd;
// Free device arrays
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VScaleVectorArray_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
hipError_t err;
// Copy c array to device
realtype* d_c;
err = hipMalloc((void**) &d_c, nvec*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_c, c, nvec*sizeof(realtype), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( scaleVectorArrayKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
d_c,
d_Xd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Zd;
// Free device arrays
err = hipFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VConstVectorArray_Cuda(int nvec, realtype c, N_Vector* Z)
{
hipError_t err;
// Create array of device pointers on host
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Zd;
err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( constVectorArrayKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
c,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Zd;
// Free device arrays
err = hipFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VWrmsNormVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W,
realtype* norms)
{
hipError_t err;
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Wd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Wd[i] = NVEC_CUDA_DDATAp(W[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Wd;
err = hipMalloc((void**) &d_Wd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Wd, h_Wd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(X[0], true, grid, block, shMemSize, stream)) return(-1);
grid = nvec;
// Allocate reduction buffer on device
realtype* d_buff;
err = hipMalloc((void**) &d_buff, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemsetAsync(d_buff, 0, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
hipLaunchKernelGGL(( wL2NormSquareVectorArrayKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
nvec,
d_Xd,
d_Wd,
d_buff,
NVEC_CUDA_CONTENT(X[0])->length
);
PostKernelLaunch();
// Copy GPU result to the cpu.
err = hipMemcpy(norms, d_buff, grid*sizeof(realtype), hipMemcpyDeviceToHost);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Finish computation
for (int k=0; k<nvec; ++k)
norms[k] = std::sqrt(norms[k]/NVEC_CUDA_CONTENT(X[0])->length);
// Free host array
delete[] h_Xd;
delete[] h_Wd;
// Free device arrays
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Wd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_buff);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VWrmsNormMaskVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* norms)
{
hipError_t err;
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Wd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Wd[i] = NVEC_CUDA_DDATAp(W[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Wd;
err = hipMalloc((void**) &d_Wd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Wd, h_Wd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(X[0], true, grid, block, shMemSize, stream)) return(-1);
grid = nvec;
// Allocate reduction buffer on device
realtype* d_buff;
err = hipMalloc((void**) &d_buff, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemsetAsync(d_buff, 0, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
hipLaunchKernelGGL(( wL2NormSquareMaskVectorArrayKernel<realtype, sunindextype>), dim3(grid), dim3(block), shMemSize, stream,
nvec,
d_Xd,
d_Wd,
NVEC_CUDA_DDATAp(id),
d_buff,
NVEC_CUDA_CONTENT(X[0])->length
);
PostKernelLaunch();
// Copy GPU result to the cpu.
err = hipMemcpy(norms, d_buff, grid*sizeof(realtype), hipMemcpyDeviceToHost);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Finish computation
for (int k=0; k<nvec; ++k)
norms[k] = std::sqrt(norms[k]/NVEC_CUDA_CONTENT(X[0])->length);
// Free host array
delete[] h_Xd;
delete[] h_Wd;
// Free device arrays
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Wd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_buff);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VScaleAddMultiVectorArray_Cuda(int nvec, int nsum, realtype* c,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
hipError_t err;
// Copy c array to device
realtype* d_c;
err = hipMalloc((void**) &d_c, nsum*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_c, c, nsum*sizeof(realtype), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Yd = new realtype*[nsum*nvec];
for (int j=0; j<nvec; j++)
for (int i=0; i<nsum; i++)
h_Yd[j*nsum+i] = NVEC_CUDA_DDATAp(Y[i][j]);
realtype** h_Zd = new realtype*[nsum*nvec];
for (int j=0; j<nvec; j++)
for (int i=0; i<nsum; i++)
h_Zd[j*nsum+i] = NVEC_CUDA_DDATAp(Z[i][j]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Yd;
err = hipMalloc((void**) &d_Yd, nsum*nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Yd, h_Yd, nsum*nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = hipMalloc((void**) &d_Zd, nsum*nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Zd, h_Zd, nsum*nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(Z[0][0], false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( scaleAddMultiVectorArrayKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
nsum,
d_c,
d_Xd,
d_Yd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0][0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Yd;
delete[] h_Zd;
// Free device arrays
err = hipFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VLinearCombinationVectorArray_Cuda(int nvec, int nsum, realtype* c,
N_Vector** X, N_Vector* Z)
{
hipError_t err;
// Copy c array to device
realtype* d_c;
err = hipMalloc((void**) &d_c, nsum*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_c, c, nsum*sizeof(realtype), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nsum*nvec];
for (int j=0; j<nvec; j++)
for (int i=0; i<nsum; i++)
h_Xd[j*nsum+i] = NVEC_CUDA_DDATAp(X[i][j]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = hipMalloc((void**) &d_Xd, nsum*nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Xd, h_Xd, nsum*nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = hipMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), hipMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
hipStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
hipLaunchKernelGGL(( linearCombinationVectorArrayKernel), dim3(grid), dim3(block), shMemSize, stream,
nvec,
nsum,
d_c,
d_Xd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Zd;
// Free device arrays
err = hipFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = hipFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return hipGetLastError();
}
/*
* -----------------------------------------------------------------
* OPTIONAL XBraid interface operations
* -----------------------------------------------------------------
*/
int N_VBufSize_Cuda(N_Vector x, sunindextype *size)
{
if (x == NULL) return(-1);
*size = (sunindextype)NVEC_CUDA_MEMSIZE(x);
return(0);
}
int N_VBufPack_Cuda(N_Vector x, void *buf)
{
int copy_fail = 0;
hipError_t cuerr;
if (x == NULL || buf == NULL) return(-1);
SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST);
if (buf_mem == NULL) return(-1);
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
buf_mem,
NVEC_CUDA_CONTENT(x)->device_data,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
/* we synchronize with respect to the host, but only in this stream */
cuerr = hipStreamSynchronize(*NVEC_CUDA_STREAM(x));
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem);
return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0);
}
int N_VBufUnpack_Cuda(N_Vector x, void *buf)
{
int copy_fail = 0;
hipError_t cuerr;
if (x == NULL || buf == NULL) return(-1);
SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST);
if (buf_mem == NULL) return(-1);
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
NVEC_CUDA_CONTENT(x)->device_data,
buf_mem,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
/* we synchronize with respect to the host, but only in this stream */
cuerr = hipStreamSynchronize(*NVEC_CUDA_STREAM(x));
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem);
return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf)
{
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_Cuda;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_Cuda;
v->ops->nvdotprodmulti = N_VDotProdMulti_Cuda;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Cuda;
v->ops->nvscalevectorarray = N_VScaleVectorArray_Cuda;
v->ops->nvconstvectorarray = N_VConstVectorArray_Cuda;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Cuda;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Cuda;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Cuda;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Cuda;
}
else
{
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_Cuda;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_Cuda;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_Cuda;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Cuda;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_Cuda;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_Cuda;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Cuda;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Cuda;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Cuda;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Cuda;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
/*
* Private helper functions.
*/
int AllocateData(N_Vector v)
{
int alloc_fail = 0;
N_VectorContent_Cuda vc = NVEC_CUDA_CONTENT(v);
N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v);
if (N_VGetLength_Cuda(v) == 0) return(0);
if (vcp->use_managed_mem)
{
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data),
NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_UVM);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n");
}
vc->host_data = SUNMemoryHelper_Alias(vc->device_data);
}
else
{
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->host_data),
NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_HOST);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n");
}
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data),
NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n");
}
}
return(alloc_fail ? -1 : 0);
}
/*
* Initializes the internal buffer used for reductions.
* If the buffer is already allocated, it will only be reallocated
* if it is no longer large enough. This may occur if the length
* of the vector is increased. The buffer is initialized to the
* value given.
*/
int InitializeReductionBuffer(N_Vector v, const realtype value)
{
int alloc_fail = 0, copy_fail = 0;
size_t bytes = sizeof(realtype);
booleantype need_to_allocate = SUNFALSE;
N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v);
SUNMemory value_mem = SUNMemoryHelper_Wrap((void*) &value, SUNMEMTYPE_HOST);
/* we allocate if the existing reduction buffer is not large enough */
if (vcp->reduce_buffer_allocated_bytes < bytes)
{
FreeReductionBuffer(v);
need_to_allocate = SUNTRUE;
}
if (need_to_allocate)
{
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v),
&(vcp->reduce_buffer_host), bytes,
SUNMEMTYPE_PINNED);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("WARNING in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n");
/* try to allocate just plain host memory instead */
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v),
&(vcp->reduce_buffer_host), bytes,
SUNMEMTYPE_HOST);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n");
}
}
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v),
&(vcp->reduce_buffer_dev), bytes,
SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n");
}
}
if (!alloc_fail)
{
/* store the size of the buffer */
vcp->reduce_buffer_allocated_bytes = bytes;
/* initialize the memory with the value */
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v),
vcp->reduce_buffer_dev, value_mem,
bytes, (void*) NVEC_CUDA_STREAM(v));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_CopyAsync failed\n");
}
}
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), value_mem);
return((alloc_fail || copy_fail) ? -1 : 0);
}
/* Free the reduction buffer
*/
void FreeReductionBuffer(N_Vector v)
{
N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v);
if (vcp == NULL) return;
if (vcp->reduce_buffer_dev != NULL)
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_dev);
vcp->reduce_buffer_dev = NULL;
if (vcp->reduce_buffer_host != NULL)
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_host);
vcp->reduce_buffer_host = NULL;
}
/* Copy the reduction buffer from the device to the host.
*/
int CopyReductionBufferFromDevice(N_Vector v, size_t n)
{
int copy_fail;
hipError_t cuerr;
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v),
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host,
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev,
n*sizeof(realtype),
(void*) NVEC_CUDA_STREAM(v));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in CopyReductionBufferFromDevice: SUNMemoryHelper_CopyAsync returned nonzero\n");
}
/* we synchronize with respect to the host, but only in this stream */
cuerr = hipStreamSynchronize(*NVEC_CUDA_STREAM(v));
return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0);
}
/* Get the kernel launch parameters based on the kernel type (reduction or not),
* using the appropriate kernel execution policy.
*/
static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid,
size_t& block, size_t& shMemSize,
hipStream_t& stream, size_t n)
{
n = (n == 0) ? NVEC_CUDA_CONTENT(v)->length : n;
if (reduction)
{
SUNCudaExecPolicy* reduce_exec_policy = NVEC_CUDA_CONTENT(v)->reduce_exec_policy;
grid = reduce_exec_policy->gridSize(n);
block = reduce_exec_policy->blockSize();
shMemSize = 0;
stream = *(reduce_exec_policy->stream());
if (block % CUDA_WARP_SIZE)
{
#ifdef SUNDIALS_DEBUG
throw std::runtime_error("the block size must be a multiple must be of CUDA warp size");
#endif
return(-1);
}
}
else
{
SUNCudaExecPolicy* stream_exec_policy = NVEC_CUDA_CONTENT(v)->stream_exec_policy;
grid = stream_exec_policy->gridSize(n);
block = stream_exec_policy->blockSize();
shMemSize = 0;
stream = *(stream_exec_policy->stream());
}
if (grid == 0)
{
#ifdef SUNDIALS_DEBUG
throw std::runtime_error("the grid size must be > 0");
#endif
return(-1);
}
if (block == 0)
{
#ifdef SUNDIALS_DEBUG
throw std::runtime_error("the block size must be > 0");
#endif
return(-1);
}
return(0);
}
/* Should be called after a kernel launch.
* If SUNDIALS_DEBUG_CUDA_LASTERROR is not defined, then the function does nothing.
* If it is defined, the function will synchronize and check the last CUDA error.
*/
void PostKernelLaunch()
{
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
hipDeviceSynchronize();
SUNDIALS_CUDA_VERIFY(hipGetLastError());
#endif
}
} // extern "C"
| 89f39b37b755908268e6b4a04ee836d96d8fd517.cu | /* -----------------------------------------------------------------
* Programmer(s): Slaven Peles, and Cody J. Balos @ LLNL
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* This is the implementation file for a CUDA implementation
* of the NVECTOR package.
* -----------------------------------------------------------------*/
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <limits>
#include <nvector/nvector_cuda.h>
#include "VectorArrayKernels.cuh"
#include "VectorKernels.cuh"
#include "sundials_cuda.h"
#include "sundials_debug.h"
#define ZERO RCONST(0.0)
#define HALF RCONST(0.5)
extern "C" {
using namespace sundials;
using namespace sundials::nvector_cuda;
/*
* Macro definitions
*/
#define NVEC_CUDA_CONTENT(x) ((N_VectorContent_Cuda)(x->content))
#define NVEC_CUDA_PRIVATE(x) ((N_PrivateVectorContent_Cuda)(NVEC_CUDA_CONTENT(x)->priv))
#define NVEC_CUDA_MEMSIZE(x) (NVEC_CUDA_CONTENT(x)->length * sizeof(realtype))
#define NVEC_CUDA_MEMHELP(x) (NVEC_CUDA_CONTENT(x)->mem_helper)
#define NVEC_CUDA_HDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->host_data->ptr)
#define NVEC_CUDA_DDATAp(x) ((realtype*) NVEC_CUDA_CONTENT(x)->device_data->ptr)
#define NVEC_CUDA_HBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_host->ptr)
#define NVEC_CUDA_DBUFFERp(x) ((realtype*) NVEC_CUDA_PRIVATE(x)->reduce_buffer_dev->ptr)
#define NVEC_CUDA_STREAM(x) (NVEC_CUDA_CONTENT(x)->stream_exec_policy->stream())
/*
* Private structure definition
*/
struct _N_PrivateVectorContent_Cuda
{
booleantype use_managed_mem; /* indicates if the data pointers and buffer pointers are managed memory */
size_t reduce_buffer_allocated_bytes; /* current size of the reduction buffer */
SUNMemory reduce_buffer_dev; /* device buffer used for reductions */
SUNMemory reduce_buffer_host; /* host buffer used for reductions */
};
typedef struct _N_PrivateVectorContent_Cuda *N_PrivateVectorContent_Cuda;
/*
* Private function definitions
*/
static int AllocateData(N_Vector v);
static int InitializeReductionBuffer(N_Vector v, const realtype value);
static void FreeReductionBuffer(N_Vector v);
static int CopyReductionBufferFromDevice(N_Vector v, size_t n = 1);
static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid, size_t& block,
size_t& shMemSize, cudaStream_t& stream, size_t n = 0);
static void PostKernelLaunch();
/*
* Private functions needed for N_VMakeWithManagedAllocator_Cuda
* backwards compatibility.
*/
/* DEPRECATION NOTICE: The 4 functions below can be removed once
N_VMakeWithManagedAllocator_Cuda (deprecated) is removed in the
next major release. The UserAllocHelper struct can also be removed. */
/* Struct that we use to pack up the user
provided alloc and free functions. */
typedef struct _UserAllocHelper
{
void* (*userallocfn)(size_t);
void (*userfreefn)(void*);
} UserAllocHelper;
static int UserAlloc(SUNMemoryHelper helper, SUNMemory* memptr,
size_t memsize, SUNMemoryType mem_type)
{
UserAllocHelper* ua = (UserAllocHelper*) helper->content;
SUNMemory mem = SUNMemoryNewEmpty();
mem->type = SUNMEMTYPE_UVM;
mem->ptr = ua->userallocfn(memsize);
mem->own = SUNTRUE;
if (mem->ptr == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in UserAlloc: user provided alloc failed\n");
free(mem);
return(-1);
}
*memptr = mem;
return(0);
}
static int UserDealloc(SUNMemoryHelper helper, SUNMemory mem)
{
UserAllocHelper* ua = (UserAllocHelper*) helper->content;
if (mem->own)
{
ua->userfreefn(mem->ptr);
mem->ptr = NULL;
}
free(mem);
return(0);
}
static SUNMemoryHelper HelperClone(SUNMemoryHelper helper)
{
UserAllocHelper* uaclone;
UserAllocHelper* ua = (UserAllocHelper*) helper->content;
SUNMemoryHelper hclone = SUNMemoryHelper_NewEmpty();
SUNMemoryHelper_CopyOps(helper, hclone);
uaclone = (UserAllocHelper*) malloc(sizeof(UserAllocHelper));
uaclone->userallocfn = ua->userallocfn;
uaclone->userfreefn = ua->userfreefn;
hclone->content = uaclone;
return(hclone);
}
static int HelperDestroy(SUNMemoryHelper helper)
{
free(helper->content);
helper->content = NULL;
free(helper->ops);
free(helper);
return(0);
}
N_Vector N_VNewEmpty_Cuda()
{
N_Vector v;
/* Create vector */
v = NULL;
v = N_VNewEmpty();
if (v == NULL) return(NULL);
/* Attach operations */
/* constructors, destructors, and utility operations */
v->ops->nvgetvectorid = N_VGetVectorID_Cuda;
v->ops->nvclone = N_VClone_Cuda;
v->ops->nvcloneempty = N_VCloneEmpty_Cuda;
v->ops->nvdestroy = N_VDestroy_Cuda;
v->ops->nvspace = N_VSpace_Cuda;
v->ops->nvgetlength = N_VGetLength_Cuda;
v->ops->nvgetarraypointer = N_VGetHostArrayPointer_Cuda;
v->ops->nvgetdevicearraypointer = N_VGetDeviceArrayPointer_Cuda;
v->ops->nvsetarraypointer = N_VSetHostArrayPointer_Cuda;
/* standard vector operations */
v->ops->nvlinearsum = N_VLinearSum_Cuda;
v->ops->nvconst = N_VConst_Cuda;
v->ops->nvprod = N_VProd_Cuda;
v->ops->nvdiv = N_VDiv_Cuda;
v->ops->nvscale = N_VScale_Cuda;
v->ops->nvabs = N_VAbs_Cuda;
v->ops->nvinv = N_VInv_Cuda;
v->ops->nvaddconst = N_VAddConst_Cuda;
v->ops->nvdotprod = N_VDotProd_Cuda;
v->ops->nvmaxnorm = N_VMaxNorm_Cuda;
v->ops->nvmin = N_VMin_Cuda;
v->ops->nvl1norm = N_VL1Norm_Cuda;
v->ops->nvinvtest = N_VInvTest_Cuda;
v->ops->nvconstrmask = N_VConstrMask_Cuda;
v->ops->nvminquotient = N_VMinQuotient_Cuda;
v->ops->nvwrmsnormmask = N_VWrmsNormMask_Cuda;
v->ops->nvwrmsnorm = N_VWrmsNorm_Cuda;
v->ops->nvwl2norm = N_VWL2Norm_Cuda;
v->ops->nvcompare = N_VCompare_Cuda;
/* fused and vector array operations are disabled (NULL) by default */
/* local reduction operations */
v->ops->nvdotprodlocal = N_VDotProd_Cuda;
v->ops->nvmaxnormlocal = N_VMaxNorm_Cuda;
v->ops->nvminlocal = N_VMin_Cuda;
v->ops->nvl1normlocal = N_VL1Norm_Cuda;
v->ops->nvinvtestlocal = N_VInvTest_Cuda;
v->ops->nvconstrmasklocal = N_VConstrMask_Cuda;
v->ops->nvminquotientlocal = N_VMinQuotient_Cuda;
v->ops->nvwsqrsumlocal = N_VWSqrSumLocal_Cuda;
v->ops->nvwsqrsummasklocal = N_VWSqrSumMaskLocal_Cuda;
/* XBraid interface operations */
v->ops->nvbufsize = N_VBufSize_Cuda;
v->ops->nvbufpack = N_VBufPack_Cuda;
v->ops->nvbufunpack = N_VBufUnpack_Cuda;
/* print operation for debugging */
v->ops->nvprint = N_VPrint_Cuda;
v->ops->nvprintfile = N_VPrintFile_Cuda;
/* Create content */
v->content = (N_VectorContent_Cuda) malloc(sizeof(_N_VectorContent_Cuda));
if (v->content == NULL)
{
N_VDestroy(v);
return(NULL);
}
NVEC_CUDA_CONTENT(v)->priv = malloc(sizeof(_N_PrivateVectorContent_Cuda));
if (NVEC_CUDA_CONTENT(v)->priv == NULL)
{
N_VDestroy(v);
return(NULL);
}
NVEC_CUDA_CONTENT(v)->length = 0;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = NULL;
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = NULL;
NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
return(v);
}
N_Vector N_VNew_Cuda(sunindextype length)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNew_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VNewWithMemHelp_Cuda(sunindextype length, booleantype use_managed_mem, SUNMemoryHelper helper)
{
N_Vector v;
if (helper == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper is NULL\n");
return(NULL);
}
if (!SUNMemoryHelper_ImplementsRequiredOps(helper))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: helper doesn't implement all required ops\n");
return(NULL);
}
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = helper;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->own_helper = SUNFALSE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = use_managed_mem;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewWithMemHelp_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VNewManaged_Cuda(sunindextype length)
{
N_Vector v;
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VNewManaged_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VMake_Cuda(sunindextype length, realtype *h_vdata, realtype *d_vdata)
{
N_Vector v;
if (h_vdata == NULL || d_vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(h_vdata, SUNMEMTYPE_HOST);
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap(d_vdata, SUNMEMTYPE_DEVICE);
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNFALSE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (NVEC_CUDA_CONTENT(v)->device_data == NULL ||
NVEC_CUDA_CONTENT(v)->host_data == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMake_Cuda: SUNMemoryHelper_Wrap returned NULL\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VMakeManaged_Cuda(sunindextype length, realtype *vdata)
{
N_Vector v;
if (vdata == NULL) return(NULL);
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap(vdata, SUNMEMTYPE_UVM);
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data);
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
if (NVEC_CUDA_CONTENT(v)->device_data == NULL ||
NVEC_CUDA_CONTENT(v)->host_data == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeManaged_Cuda: SUNMemoryHelper_Wrap returned NULL\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
N_Vector N_VMakeWithManagedAllocator_Cuda(sunindextype length,
void* (*allocfn)(size_t),
void (*freefn)(void*))
{
UserAllocHelper* ua;
N_Vector v;
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
NVEC_CUDA_CONTENT(v)->length = length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = new CudaThreadDirectExecPolicy(256);
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = new CudaBlockReduceExecPolicy(256);
NVEC_CUDA_CONTENT(v)->mem_helper = SUNMemoryHelper_Cuda();
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeWithManagedAllocator_Cuda: memory helper is NULL\n");
N_VDestroy(v);
return(NULL);
}
ua = (UserAllocHelper*) malloc(sizeof(UserAllocHelper));
ua->userallocfn = allocfn;
ua->userfreefn = freefn;
NVEC_CUDA_MEMHELP(v)->content = (void*) ua;
NVEC_CUDA_MEMHELP(v)->ops->alloc = UserAlloc;
NVEC_CUDA_MEMHELP(v)->ops->dealloc = UserDealloc;
NVEC_CUDA_MEMHELP(v)->ops->clone = HelperClone;
NVEC_CUDA_MEMHELP(v)->ops->destroy = HelperDestroy;
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMakeWithManagedAllocator_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
/* ----------------------------------------------------------------------------
* Set pointer to the raw host data. Does not free the existing pointer.
*/
void N_VSetHostArrayPointer_Cuda(realtype* h_vdata, N_Vector v)
{
if (N_VIsManagedMemory_Cuda(v))
{
if (NVEC_CUDA_CONTENT(v)->host_data)
{
NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata;
NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) h_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_UVM);
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->host_data);
}
}
else
{
if (NVEC_CUDA_CONTENT(v)->host_data)
{
NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) h_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Wrap((void*) h_vdata, SUNMEMTYPE_HOST);
}
}
}
/* ----------------------------------------------------------------------------
* Set pointer to the raw device data
*/
void N_VSetDeviceArrayPointer_Cuda(realtype* d_vdata, N_Vector v)
{
if (N_VIsManagedMemory_Cuda(v))
{
if (NVEC_CUDA_CONTENT(v)->device_data)
{
NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata;
NVEC_CUDA_CONTENT(v)->host_data->ptr = (void*) d_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_UVM);
NVEC_CUDA_CONTENT(v)->host_data = SUNMemoryHelper_Alias(NVEC_CUDA_CONTENT(v)->device_data);
}
}
else
{
if (NVEC_CUDA_CONTENT(v)->device_data)
{
NVEC_CUDA_CONTENT(v)->device_data->ptr = (void*) d_vdata;
}
else
{
NVEC_CUDA_CONTENT(v)->device_data = SUNMemoryHelper_Wrap((void*) d_vdata, SUNMEMTYPE_DEVICE);
}
}
}
/* ----------------------------------------------------------------------------
* Return a flag indicating if the memory for the vector data is managed
*/
booleantype N_VIsManagedMemory_Cuda(N_Vector x)
{
return NVEC_CUDA_PRIVATE(x)->use_managed_mem;
}
int N_VSetKernelExecPolicy_Cuda(N_Vector x,
SUNCudaExecPolicy* stream_exec_policy,
SUNCudaExecPolicy* reduce_exec_policy)
{
if (x == NULL || stream_exec_policy == NULL || reduce_exec_policy == NULL)
return(-1);
if (NVEC_CUDA_CONTENT(x)->own_exec)
{
delete NVEC_CUDA_CONTENT(x)->stream_exec_policy;
delete NVEC_CUDA_CONTENT(x)->reduce_exec_policy;
}
NVEC_CUDA_CONTENT(x)->stream_exec_policy = stream_exec_policy;
NVEC_CUDA_CONTENT(x)->reduce_exec_policy = reduce_exec_policy;
NVEC_CUDA_CONTENT(x)->own_exec = SUNFALSE;
return(0);
}
/*
* ----------------------------------------------------------------------------
* DEPRECATED: will be removed in SUNDIALS v6.
* Sets the cudaStream_t to use for execution of the CUDA kernels.
*/
void N_VSetCudaStream_Cuda(N_Vector x, cudaStream_t *stream)
{
const CudaExecPolicy* xs = NVEC_CUDA_CONTENT(x)->stream_exec_policy;
const CudaExecPolicy* xr = NVEC_CUDA_CONTENT(x)->reduce_exec_policy;
CudaThreadDirectExecPolicy* s =
new CudaThreadDirectExecPolicy(xs->blockSize(), *stream);
CudaBlockReduceExecPolicy* r =
new CudaBlockReduceExecPolicy(xr->blockSize(), xr->gridSize(), *stream);
N_VSetKernelExecPolicy_Cuda(x, s, r);
NVEC_CUDA_CONTENT(x)->own_exec = SUNTRUE;
}
/* ----------------------------------------------------------------------------
* Copy vector data to the device
*/
void N_VCopyToDevice_Cuda(N_Vector x)
{
int copy_fail;
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
NVEC_CUDA_CONTENT(x)->device_data,
NVEC_CUDA_CONTENT(x)->host_data,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyToDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n");
}
/* we synchronize with respect to the host, but only in this stream */
SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(x)));
}
/* ----------------------------------------------------------------------------
* Copy vector data from the device to the host
*/
void N_VCopyFromDevice_Cuda(N_Vector x)
{
int copy_fail;
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
NVEC_CUDA_CONTENT(x)->host_data,
NVEC_CUDA_CONTENT(x)->device_data,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VCopyFromDevice_Cuda: SUNMemoryHelper_CopyAsync returned nonzero\n");
}
/* we synchronize with respect to the host, but only in this stream */
SUNDIALS_CUDA_VERIFY(cudaStreamSynchronize(*NVEC_CUDA_STREAM(x)));
}
/* ----------------------------------------------------------------------------
* Function to print the a CUDA-based vector to stdout
*/
void N_VPrint_Cuda(N_Vector x)
{
N_VPrintFile_Cuda(x, stdout);
}
/* ----------------------------------------------------------------------------
* Function to print the a CUDA-based vector to outfile
*/
void N_VPrintFile_Cuda(N_Vector x, FILE *outfile)
{
sunindextype i;
for (i = 0; i < NVEC_CUDA_CONTENT(x)->length; i++) {
#if defined(SUNDIALS_EXTENDED_PRECISION)
fprintf(outfile, "%35.32Lg\n", NVEC_CUDA_HDATAp(x)[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
fprintf(outfile, "%19.16g\n", NVEC_CUDA_HDATAp(x)[i]);
#else
fprintf(outfile, "%11.8g\n", NVEC_CUDA_HDATAp(x)[i]);
#endif
}
fprintf(outfile, "\n");
return;
}
/*
* -----------------------------------------------------------------
* implementation of vector operations
* -----------------------------------------------------------------
*/
N_Vector N_VCloneEmpty_Cuda(N_Vector w)
{
N_Vector v;
if (w == NULL) return(NULL);
/* Create vector */
v = NULL;
v = N_VNewEmpty_Cuda();
if (v == NULL) return(NULL);
/* Attach operations */
if (N_VCopyOps(w, v)) { N_VDestroy(v); return(NULL); }
/* Set content */
NVEC_CUDA_CONTENT(v)->length = NVEC_CUDA_CONTENT(w)->length;
NVEC_CUDA_CONTENT(v)->host_data = NULL;
NVEC_CUDA_CONTENT(v)->device_data = NULL;
NVEC_CUDA_CONTENT(v)->mem_helper = NULL;
NVEC_CUDA_CONTENT(v)->own_exec = SUNTRUE;
NVEC_CUDA_PRIVATE(v)->use_managed_mem = NVEC_CUDA_PRIVATE(w)->use_managed_mem;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host = NULL;
NVEC_CUDA_PRIVATE(v)->reduce_buffer_allocated_bytes = 0;
return(v);
}
N_Vector N_VClone_Cuda(N_Vector w)
{
N_Vector v;
v = NULL;
v = N_VCloneEmpty_Cuda(w);
if (v == NULL) return(NULL);
NVEC_CUDA_MEMHELP(v) = SUNMemoryHelper_Clone(NVEC_CUDA_MEMHELP(w));
NVEC_CUDA_CONTENT(v)->own_helper = SUNTRUE;
NVEC_CUDA_CONTENT(v)->stream_exec_policy = NVEC_CUDA_CONTENT(w)->stream_exec_policy->clone();
NVEC_CUDA_CONTENT(v)->reduce_exec_policy = NVEC_CUDA_CONTENT(w)->reduce_exec_policy->clone();
if (NVEC_CUDA_MEMHELP(v) == NULL)
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: SUNMemoryHelper_Clone returned NULL\n");
N_VDestroy(v);
return(NULL);
}
if (AllocateData(v))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VClone_Cuda: AllocateData returned nonzero\n");
N_VDestroy(v);
return(NULL);
}
return(v);
}
void N_VDestroy_Cuda(N_Vector v)
{
N_VectorContent_Cuda vc;
N_PrivateVectorContent_Cuda vcp;
if (v == NULL) return;
/* free ops structure */
if (v->ops != NULL)
{
free(v->ops);
v->ops = NULL;
}
/* extract content */
vc = NVEC_CUDA_CONTENT(v);
if (vc == NULL)
{
free(v);
v = NULL;
return;
}
/* free private content */
vcp = (N_PrivateVectorContent_Cuda) vc->priv;
if (vcp != NULL)
{
/* free items in private content */
FreeReductionBuffer(v);
free(vcp);
vc->priv = NULL;
}
/* free items in content */
if (vc->own_exec)
{
delete vc->stream_exec_policy;
vc->stream_exec_policy = NULL;
delete vc->reduce_exec_policy;
vc->reduce_exec_policy = NULL;
}
if (NVEC_CUDA_MEMHELP(v))
{
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->host_data);
vc->host_data = NULL;
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vc->device_data);
vc->device_data = NULL;
if (vc->own_helper) SUNMemoryHelper_Destroy(vc->mem_helper);
vc->mem_helper = NULL;
}
/* free content struct */
free(vc);
/* free vector */
free(v);
return;
}
void N_VSpace_Cuda(N_Vector X, sunindextype *lrw, sunindextype *liw)
{
*lrw = NVEC_CUDA_CONTENT(X)->length;
*liw = 2;
}
void N_VConst_Cuda(realtype a, N_Vector X)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
setConstKernel<<<grid, block, shMemSize, stream>>>
(
a,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VLinearSum_Cuda(realtype a, N_Vector X, realtype b, N_Vector Y, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
linearSumKernel<<<grid, block, shMemSize, stream>>>
(
a,
NVEC_CUDA_DDATAp(X),
b,
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VProd_Cuda(N_Vector X, N_Vector Y, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
prodKernel<<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VDiv_Cuda(N_Vector X, N_Vector Y, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
divKernel<<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VScale_Cuda(realtype a, N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
scaleKernel<<<grid, block, shMemSize, stream>>>
(
a,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VAbs_Cuda(N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
absKernel<<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VInv_Cuda(N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
invKernel<<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
void N_VAddConst_Cuda(N_Vector X, realtype b, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
addConstKernel<<<grid, block, shMemSize, stream>>>
(
b,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
realtype N_VDotProd_Cuda(N_Vector X, N_Vector Y)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VDotProd_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
dotProdKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Y),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VMaxNorm_Cuda(N_Vector X)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMaxNorm_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
maxNormKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Finish reduction on CPU if there are less than two blocks of data left.
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWSqrSumLocal_Cuda(N_Vector X, N_Vector W)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumLocal_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
wL2NormSquareKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(W),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWrmsNorm_Cuda(N_Vector X, N_Vector W)
{
const realtype sum = N_VWSqrSumLocal_Cuda(X, W);
return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length);
}
realtype N_VWSqrSumMaskLocal_Cuda(N_Vector X, N_Vector W, N_Vector Id)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VWSqrSumMaskLocal_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
wL2NormSquareMaskKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(W),
NVEC_CUDA_DDATAp(Id),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWrmsNormMask_Cuda(N_Vector X, N_Vector W, N_Vector Id)
{
const realtype sum = N_VWSqrSumMaskLocal_Cuda(X, W, Id);
return std::sqrt(sum/NVEC_CUDA_CONTENT(X)->length);
}
realtype N_VMin_Cuda(N_Vector X)
{
const realtype maxVal = std::numeric_limits<realtype>::max();
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, maxVal))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMin_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
findMinKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
maxVal,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
realtype N_VWL2Norm_Cuda(N_Vector X, N_Vector W)
{
const realtype sum = N_VWSqrSumLocal_Cuda(X, W);
return std::sqrt(sum);
}
realtype N_VL1Norm_Cuda(N_Vector X)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VL1Norm_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
L1NormKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return gpu_result;
}
void N_VCompare_Cuda(realtype c, N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
GetKernelParameters(X, false, grid, block, shMemSize, stream);
compareKernel<<<grid, block, shMemSize, stream>>>
(
c,
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
}
booleantype N_VInvTest_Cuda(N_Vector X, N_Vector Z)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VInvTest_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
invTestKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return (gpu_result < HALF);
}
booleantype N_VConstrMask_Cuda(N_Vector C, N_Vector X, N_Vector M)
{
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(X, ZERO))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VConstrMask_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(X, true, grid, block, shMemSize, stream);
constrMaskKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
NVEC_CUDA_DDATAp(C),
NVEC_CUDA_DDATAp(X),
NVEC_CUDA_DDATAp(M),
NVEC_CUDA_DBUFFERp(X),
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(X);
realtype gpu_result = NVEC_CUDA_HBUFFERp(X)[0];
return (gpu_result < HALF);
}
realtype N_VMinQuotient_Cuda(N_Vector num, N_Vector denom)
{
// Starting value for min reduction
const realtype maxVal = std::numeric_limits<realtype>::max();
size_t grid, block, shMemSize;
cudaStream_t stream;
if (InitializeReductionBuffer(num, maxVal))
{
SUNDIALS_DEBUG_PRINT("ERROR in N_VMinQuotient_Cuda: InitializeReductionBuffer returned nonzero\n");
}
GetKernelParameters(num, true, grid, block, shMemSize, stream);
minQuotientKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
maxVal,
NVEC_CUDA_DDATAp(num),
NVEC_CUDA_DDATAp(denom),
NVEC_CUDA_DBUFFERp(num),
NVEC_CUDA_CONTENT(num)->length
);
PostKernelLaunch();
// Get result from the GPU
CopyReductionBufferFromDevice(num);
realtype gpu_result = NVEC_CUDA_HBUFFERp(num)[0];
return gpu_result;
}
/*
* -----------------------------------------------------------------
* fused vector operations
* -----------------------------------------------------------------
*/
int N_VLinearCombination_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector Z)
{
cudaError_t err;
// Copy c array to device
realtype* d_c;
err = cudaMalloc((void**) &d_c, nvec*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_c, c, nvec*sizeof(realtype), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters and launch
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(X[0], false, grid, block, shMemSize, stream)) return(-1);
linearCombinationKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
d_c,
d_Xd,
NVEC_CUDA_DDATAp(Z),
NVEC_CUDA_CONTENT(Z)->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
// Free device arrays
err = cudaFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VScaleAddMulti_Cuda(int nvec, realtype* c, N_Vector X, N_Vector* Y,
N_Vector* Z)
{
cudaError_t err;
// Copy c array to device
realtype* d_c;
err = cudaMalloc((void**) &d_c, nvec*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_c, c, nvec*sizeof(realtype), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Yd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Yd[i] = NVEC_CUDA_DDATAp(Y[i]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Yd;
err = cudaMalloc((void**) &d_Yd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) return(-1);
scaleAddMultiKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
d_c,
NVEC_CUDA_DDATAp(X),
d_Yd,
d_Zd,
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Free host array
delete[] h_Yd;
delete[] h_Zd;
// Free device arrays
err = cudaFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VDotProdMulti_Cuda(int nvec, N_Vector X, N_Vector* Y, realtype* dots)
{
cudaError_t err;
// Create array of device pointers on host
realtype** h_Yd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Yd[i] = NVEC_CUDA_DDATAp(Y[i]);
// Copy array of device pointers to device from host
realtype** d_Yd;
err = cudaMalloc((void**) &d_Yd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(X, false, grid, block, shMemSize, stream)) return(-1);
grid = nvec;
// Allocate reduction buffer on device
realtype* d_buff;
err = cudaMalloc((void**) &d_buff, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemsetAsync(d_buff, 0, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
dotProdMultiKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
nvec,
NVEC_CUDA_DDATAp(X),
d_Yd,
d_buff,
NVEC_CUDA_CONTENT(X)->length
);
PostKernelLaunch();
// Copy GPU result to the cpu.
err = cudaMemcpy(dots, d_buff, grid*sizeof(realtype), cudaMemcpyDeviceToHost);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Free host array
delete[] h_Yd;
// Free device arrays
err = cudaFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_buff);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
/*
* -----------------------------------------------------------------------------
* vector array operations
* -----------------------------------------------------------------------------
*/
int N_VLinearSumVectorArray_Cuda(int nvec, realtype a, N_Vector* X, realtype b,
N_Vector* Y, N_Vector* Z)
{
cudaError_t err;
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Yd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Yd[i] = NVEC_CUDA_DDATAp(Y[i]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Yd;
err = cudaMalloc((void**) &d_Yd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Yd, h_Yd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
linearSumVectorArrayKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
a,
d_Xd,
b,
d_Yd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Yd;
delete[] h_Zd;
// Free device arrays
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VScaleVectorArray_Cuda(int nvec, realtype* c, N_Vector* X, N_Vector* Z)
{
cudaError_t err;
// Copy c array to device
realtype* d_c;
err = cudaMalloc((void**) &d_c, nvec*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_c, c, nvec*sizeof(realtype), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
scaleVectorArrayKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
d_c,
d_Xd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Zd;
// Free device arrays
err = cudaFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VConstVectorArray_Cuda(int nvec, realtype c, N_Vector* Z)
{
cudaError_t err;
// Create array of device pointers on host
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Zd;
err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
constVectorArrayKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
c,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Zd;
// Free device arrays
err = cudaFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VWrmsNormVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W,
realtype* norms)
{
cudaError_t err;
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Wd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Wd[i] = NVEC_CUDA_DDATAp(W[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Wd;
err = cudaMalloc((void**) &d_Wd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Wd, h_Wd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(X[0], true, grid, block, shMemSize, stream)) return(-1);
grid = nvec;
// Allocate reduction buffer on device
realtype* d_buff;
err = cudaMalloc((void**) &d_buff, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemsetAsync(d_buff, 0, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
wL2NormSquareVectorArrayKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
nvec,
d_Xd,
d_Wd,
d_buff,
NVEC_CUDA_CONTENT(X[0])->length
);
PostKernelLaunch();
// Copy GPU result to the cpu.
err = cudaMemcpy(norms, d_buff, grid*sizeof(realtype), cudaMemcpyDeviceToHost);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Finish computation
for (int k=0; k<nvec; ++k)
norms[k] = std::sqrt(norms[k]/NVEC_CUDA_CONTENT(X[0])->length);
// Free host array
delete[] h_Xd;
delete[] h_Wd;
// Free device arrays
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Wd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_buff);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VWrmsNormMaskVectorArray_Cuda(int nvec, N_Vector* X, N_Vector* W,
N_Vector id, realtype* norms)
{
cudaError_t err;
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Wd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Wd[i] = NVEC_CUDA_DDATAp(W[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Wd;
err = cudaMalloc((void**) &d_Wd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Wd, h_Wd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(X[0], true, grid, block, shMemSize, stream)) return(-1);
grid = nvec;
// Allocate reduction buffer on device
realtype* d_buff;
err = cudaMalloc((void**) &d_buff, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemsetAsync(d_buff, 0, grid*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
wL2NormSquareMaskVectorArrayKernel<realtype, sunindextype><<<grid, block, shMemSize, stream>>>
(
nvec,
d_Xd,
d_Wd,
NVEC_CUDA_DDATAp(id),
d_buff,
NVEC_CUDA_CONTENT(X[0])->length
);
PostKernelLaunch();
// Copy GPU result to the cpu.
err = cudaMemcpy(norms, d_buff, grid*sizeof(realtype), cudaMemcpyDeviceToHost);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Finish computation
for (int k=0; k<nvec; ++k)
norms[k] = std::sqrt(norms[k]/NVEC_CUDA_CONTENT(X[0])->length);
// Free host array
delete[] h_Xd;
delete[] h_Wd;
// Free device arrays
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Wd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_buff);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VScaleAddMultiVectorArray_Cuda(int nvec, int nsum, realtype* c,
N_Vector* X, N_Vector** Y, N_Vector** Z)
{
cudaError_t err;
// Copy c array to device
realtype* d_c;
err = cudaMalloc((void**) &d_c, nsum*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_c, c, nsum*sizeof(realtype), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Xd[i] = NVEC_CUDA_DDATAp(X[i]);
realtype** h_Yd = new realtype*[nsum*nvec];
for (int j=0; j<nvec; j++)
for (int i=0; i<nsum; i++)
h_Yd[j*nsum+i] = NVEC_CUDA_DDATAp(Y[i][j]);
realtype** h_Zd = new realtype*[nsum*nvec];
for (int j=0; j<nvec; j++)
for (int i=0; i<nsum; i++)
h_Zd[j*nsum+i] = NVEC_CUDA_DDATAp(Z[i][j]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Yd;
err = cudaMalloc((void**) &d_Yd, nsum*nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Yd, h_Yd, nsum*nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = cudaMalloc((void**) &d_Zd, nsum*nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Zd, h_Zd, nsum*nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(Z[0][0], false, grid, block, shMemSize, stream)) return(-1);
scaleAddMultiVectorArrayKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
nsum,
d_c,
d_Xd,
d_Yd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0][0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Yd;
delete[] h_Zd;
// Free device arrays
err = cudaFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Yd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return(0);
}
int N_VLinearCombinationVectorArray_Cuda(int nvec, int nsum, realtype* c,
N_Vector** X, N_Vector* Z)
{
cudaError_t err;
// Copy c array to device
realtype* d_c;
err = cudaMalloc((void**) &d_c, nsum*sizeof(realtype));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_c, c, nsum*sizeof(realtype), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Create array of device pointers on host
realtype** h_Xd = new realtype*[nsum*nvec];
for (int j=0; j<nvec; j++)
for (int i=0; i<nsum; i++)
h_Xd[j*nsum+i] = NVEC_CUDA_DDATAp(X[i][j]);
realtype** h_Zd = new realtype*[nvec];
for (int i=0; i<nvec; i++)
h_Zd[i] = NVEC_CUDA_DDATAp(Z[i]);
// Copy array of device pointers to device from host
realtype** d_Xd;
err = cudaMalloc((void**) &d_Xd, nsum*nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Xd, h_Xd, nsum*nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
realtype** d_Zd;
err = cudaMalloc((void**) &d_Zd, nvec*sizeof(realtype*));
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaMemcpy(d_Zd, h_Zd, nvec*sizeof(realtype*), cudaMemcpyHostToDevice);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
// Set kernel parameters
size_t grid, block, shMemSize;
cudaStream_t stream;
if (GetKernelParameters(Z[0], false, grid, block, shMemSize, stream)) return(-1);
linearCombinationVectorArrayKernel<<<grid, block, shMemSize, stream>>>
(
nvec,
nsum,
d_c,
d_Xd,
d_Zd,
NVEC_CUDA_CONTENT(Z[0])->length
);
PostKernelLaunch();
// Free host array
delete[] h_Xd;
delete[] h_Zd;
// Free device arrays
err = cudaFree(d_c);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Xd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
err = cudaFree(d_Zd);
if (!SUNDIALS_CUDA_VERIFY(err)) return(-1);
return cudaGetLastError();
}
/*
* -----------------------------------------------------------------
* OPTIONAL XBraid interface operations
* -----------------------------------------------------------------
*/
int N_VBufSize_Cuda(N_Vector x, sunindextype *size)
{
if (x == NULL) return(-1);
*size = (sunindextype)NVEC_CUDA_MEMSIZE(x);
return(0);
}
int N_VBufPack_Cuda(N_Vector x, void *buf)
{
int copy_fail = 0;
cudaError_t cuerr;
if (x == NULL || buf == NULL) return(-1);
SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST);
if (buf_mem == NULL) return(-1);
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
buf_mem,
NVEC_CUDA_CONTENT(x)->device_data,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
/* we synchronize with respect to the host, but only in this stream */
cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(x));
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem);
return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0);
}
int N_VBufUnpack_Cuda(N_Vector x, void *buf)
{
int copy_fail = 0;
cudaError_t cuerr;
if (x == NULL || buf == NULL) return(-1);
SUNMemory buf_mem = SUNMemoryHelper_Wrap(buf, SUNMEMTYPE_HOST);
if (buf_mem == NULL) return(-1);
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(x),
NVEC_CUDA_CONTENT(x)->device_data,
buf_mem,
NVEC_CUDA_MEMSIZE(x),
(void*) NVEC_CUDA_STREAM(x));
/* we synchronize with respect to the host, but only in this stream */
cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(x));
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(x), buf_mem);
return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0);
}
/*
* -----------------------------------------------------------------
* Enable / Disable fused and vector array operations
* -----------------------------------------------------------------
*/
int N_VEnableFusedOps_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
if (tf)
{
/* enable all fused vector operations */
v->ops->nvlinearcombination = N_VLinearCombination_Cuda;
v->ops->nvscaleaddmulti = N_VScaleAddMulti_Cuda;
v->ops->nvdotprodmulti = N_VDotProdMulti_Cuda;
/* enable all vector array operations */
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Cuda;
v->ops->nvscalevectorarray = N_VScaleVectorArray_Cuda;
v->ops->nvconstvectorarray = N_VConstVectorArray_Cuda;
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Cuda;
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Cuda;
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Cuda;
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Cuda;
}
else
{
/* disable all fused vector operations */
v->ops->nvlinearcombination = NULL;
v->ops->nvscaleaddmulti = NULL;
v->ops->nvdotprodmulti = NULL;
/* disable all vector array operations */
v->ops->nvlinearsumvectorarray = NULL;
v->ops->nvscalevectorarray = NULL;
v->ops->nvconstvectorarray = NULL;
v->ops->nvwrmsnormvectorarray = NULL;
v->ops->nvwrmsnormmaskvectorarray = NULL;
v->ops->nvscaleaddmultivectorarray = NULL;
v->ops->nvlinearcombinationvectorarray = NULL;
}
/* return success */
return(0);
}
int N_VEnableLinearCombination_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombination = N_VLinearCombination_Cuda;
else
v->ops->nvlinearcombination = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMulti_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmulti = N_VScaleAddMulti_Cuda;
else
v->ops->nvscaleaddmulti = NULL;
/* return success */
return(0);
}
int N_VEnableDotProdMulti_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvdotprodmulti = N_VDotProdMulti_Cuda;
else
v->ops->nvdotprodmulti = NULL;
/* return success */
return(0);
}
int N_VEnableLinearSumVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearsumvectorarray = N_VLinearSumVectorArray_Cuda;
else
v->ops->nvlinearsumvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscalevectorarray = N_VScaleVectorArray_Cuda;
else
v->ops->nvscalevectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableConstVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvconstvectorarray = N_VConstVectorArray_Cuda;
else
v->ops->nvconstvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormvectorarray = N_VWrmsNormVectorArray_Cuda;
else
v->ops->nvwrmsnormvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableWrmsNormMaskVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvwrmsnormmaskvectorarray = N_VWrmsNormMaskVectorArray_Cuda;
else
v->ops->nvwrmsnormmaskvectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableScaleAddMultiVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvscaleaddmultivectorarray = N_VScaleAddMultiVectorArray_Cuda;
else
v->ops->nvscaleaddmultivectorarray = NULL;
/* return success */
return(0);
}
int N_VEnableLinearCombinationVectorArray_Cuda(N_Vector v, booleantype tf)
{
/* check that vector is non-NULL */
if (v == NULL) return(-1);
/* check that ops structure is non-NULL */
if (v->ops == NULL) return(-1);
/* enable/disable operation */
if (tf)
v->ops->nvlinearcombinationvectorarray = N_VLinearCombinationVectorArray_Cuda;
else
v->ops->nvlinearcombinationvectorarray = NULL;
/* return success */
return(0);
}
/*
* Private helper functions.
*/
int AllocateData(N_Vector v)
{
int alloc_fail = 0;
N_VectorContent_Cuda vc = NVEC_CUDA_CONTENT(v);
N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v);
if (N_VGetLength_Cuda(v) == 0) return(0);
if (vcp->use_managed_mem)
{
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data),
NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_UVM);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed for SUNMEMTYPE_UVM\n");
}
vc->host_data = SUNMemoryHelper_Alias(vc->device_data);
}
else
{
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->host_data),
NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_HOST);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n");
}
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v), &(vc->device_data),
NVEC_CUDA_MEMSIZE(v), SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in AllocateData: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n");
}
}
return(alloc_fail ? -1 : 0);
}
/*
* Initializes the internal buffer used for reductions.
* If the buffer is already allocated, it will only be reallocated
* if it is no longer large enough. This may occur if the length
* of the vector is increased. The buffer is initialized to the
* value given.
*/
int InitializeReductionBuffer(N_Vector v, const realtype value)
{
int alloc_fail = 0, copy_fail = 0;
size_t bytes = sizeof(realtype);
booleantype need_to_allocate = SUNFALSE;
N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v);
SUNMemory value_mem = SUNMemoryHelper_Wrap((void*) &value, SUNMEMTYPE_HOST);
/* we allocate if the existing reduction buffer is not large enough */
if (vcp->reduce_buffer_allocated_bytes < bytes)
{
FreeReductionBuffer(v);
need_to_allocate = SUNTRUE;
}
if (need_to_allocate)
{
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v),
&(vcp->reduce_buffer_host), bytes,
SUNMEMTYPE_PINNED);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("WARNING in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_PINNED, using SUNMEMTYPE_HOST instead\n");
/* try to allocate just plain host memory instead */
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v),
&(vcp->reduce_buffer_host), bytes,
SUNMEMTYPE_HOST);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_HOST\n");
}
}
alloc_fail = SUNMemoryHelper_Alloc(NVEC_CUDA_MEMHELP(v),
&(vcp->reduce_buffer_dev), bytes,
SUNMEMTYPE_DEVICE);
if (alloc_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_Alloc failed to alloc SUNMEMTYPE_DEVICE\n");
}
}
if (!alloc_fail)
{
/* store the size of the buffer */
vcp->reduce_buffer_allocated_bytes = bytes;
/* initialize the memory with the value */
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v),
vcp->reduce_buffer_dev, value_mem,
bytes, (void*) NVEC_CUDA_STREAM(v));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in InitializeReductionBuffer: SUNMemoryHelper_CopyAsync failed\n");
}
}
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), value_mem);
return((alloc_fail || copy_fail) ? -1 : 0);
}
/* Free the reduction buffer
*/
void FreeReductionBuffer(N_Vector v)
{
N_PrivateVectorContent_Cuda vcp = NVEC_CUDA_PRIVATE(v);
if (vcp == NULL) return;
if (vcp->reduce_buffer_dev != NULL)
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_dev);
vcp->reduce_buffer_dev = NULL;
if (vcp->reduce_buffer_host != NULL)
SUNMemoryHelper_Dealloc(NVEC_CUDA_MEMHELP(v), vcp->reduce_buffer_host);
vcp->reduce_buffer_host = NULL;
}
/* Copy the reduction buffer from the device to the host.
*/
int CopyReductionBufferFromDevice(N_Vector v, size_t n)
{
int copy_fail;
cudaError_t cuerr;
copy_fail = SUNMemoryHelper_CopyAsync(NVEC_CUDA_MEMHELP(v),
NVEC_CUDA_PRIVATE(v)->reduce_buffer_host,
NVEC_CUDA_PRIVATE(v)->reduce_buffer_dev,
n*sizeof(realtype),
(void*) NVEC_CUDA_STREAM(v));
if (copy_fail)
{
SUNDIALS_DEBUG_PRINT("ERROR in CopyReductionBufferFromDevice: SUNMemoryHelper_CopyAsync returned nonzero\n");
}
/* we synchronize with respect to the host, but only in this stream */
cuerr = cudaStreamSynchronize(*NVEC_CUDA_STREAM(v));
return (!SUNDIALS_CUDA_VERIFY(cuerr) || copy_fail ? -1 : 0);
}
/* Get the kernel launch parameters based on the kernel type (reduction or not),
* using the appropriate kernel execution policy.
*/
static int GetKernelParameters(N_Vector v, booleantype reduction, size_t& grid,
size_t& block, size_t& shMemSize,
cudaStream_t& stream, size_t n)
{
n = (n == 0) ? NVEC_CUDA_CONTENT(v)->length : n;
if (reduction)
{
SUNCudaExecPolicy* reduce_exec_policy = NVEC_CUDA_CONTENT(v)->reduce_exec_policy;
grid = reduce_exec_policy->gridSize(n);
block = reduce_exec_policy->blockSize();
shMemSize = 0;
stream = *(reduce_exec_policy->stream());
if (block % CUDA_WARP_SIZE)
{
#ifdef SUNDIALS_DEBUG
throw std::runtime_error("the block size must be a multiple must be of CUDA warp size");
#endif
return(-1);
}
}
else
{
SUNCudaExecPolicy* stream_exec_policy = NVEC_CUDA_CONTENT(v)->stream_exec_policy;
grid = stream_exec_policy->gridSize(n);
block = stream_exec_policy->blockSize();
shMemSize = 0;
stream = *(stream_exec_policy->stream());
}
if (grid == 0)
{
#ifdef SUNDIALS_DEBUG
throw std::runtime_error("the grid size must be > 0");
#endif
return(-1);
}
if (block == 0)
{
#ifdef SUNDIALS_DEBUG
throw std::runtime_error("the block size must be > 0");
#endif
return(-1);
}
return(0);
}
/* Should be called after a kernel launch.
* If SUNDIALS_DEBUG_CUDA_LASTERROR is not defined, then the function does nothing.
* If it is defined, the function will synchronize and check the last CUDA error.
*/
void PostKernelLaunch()
{
#ifdef SUNDIALS_DEBUG_CUDA_LASTERROR
cudaDeviceSynchronize();
SUNDIALS_CUDA_VERIFY(cudaGetLastError());
#endif
}
} // extern "C"
|
e7a5d6ddc14bcdaca19d68e0f8f6e4af5b591c15.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <SFML/Graphics.hpp>
#include "aux_functions_gpu.h"
#define CONFIG 1
#if CONFIG == 0
#include "aux_declarations_gpu_rtsim_1.h"
#elif CONFIG == 1
#include "aux_declarations_gpu_rtsim_2.h"
#elif CONFIG == 2
#include "aux_declarations_gpu_rtsim_3.h"
#else
#include "aux_declarations_gpu_rtsim_1.h"
#endif
std::vector <sf::CircleShape> body_graphics;
enum nBody_config
{
NBODY_CONFIG_RANDOM,
NBODY_CONFIG_SPIRAL,
NBODY_CONFIG_EXPAND,
NBODY_NUM_CONFIGS
};
// main function
int main (const int argc, const char** argv) {
printf("\n");
int nBodies = 30000;
float dt = DT; // time step
int nIters = 1000, limit_iter = 0; // simulation iterations
int iter = 0, i = 0, grid_size = 0, stop = 0;
double total_time_gpu = 0;
float* x = NULL;
float* y = NULL;
body_pos *h_body_pos, *d_body_pos;
body_parameters *h_body_par, *d_body_par;
// reading the arguments (argv data)
switch(argc)
{
case 1:
printf("------------------------------------------------------\n\n");
printf("Default values considered, nBodies: 30000.\n\n");
printf("------------------------------------------------------\n\n");
break;
case 2:
nBodies = atoi(argv[1]);
printf("------------------------------------------------------\n\n");
printf("Values considered, nBodies: %i.\n\n", nBodies);
printf("------------------------------------------------------\n\n");
break;
case 3:
nBodies = atoi(argv[1]);
nIters = atoi(argv[2]);
limit_iter = 1;
printf("------------------------------------------------------\n\n");
printf("Values considered, nBodies: %i, nIters: %i.\n\n", nBodies, nIters);
printf("------------------------------------------------------\n\n");
break;
default:
printf("ERR: Invalid number of arguments passed.\n"
"Aborting...\n");
return -1;
}
// initialising the animation window
sf::ContextSettings settings;
settings.antialiasingLevel = 8;
sf::RenderWindow window(sf::VideoMode(X_RES, Y_RES), "N-Body Simulation", sf::Style::Default, settings);
// host side memory allocation
size_t pos_bytes = nBodies*sizeof(body_pos);
size_t par_bytes = nBodies*sizeof(body_parameters);
hipHostMalloc((body_pos **) &h_body_pos, pos_bytes);
hipHostMalloc((body_parameters **) &h_body_par, par_bytes);
x = new float[nBodies];
y = new float[nBodies];
// Init mass / pos / vel / acc data
initialize_bodies(h_body_pos, h_body_par, nBodies);
for (i = 0; i < nBodies; i++) {
x[i] = h_body_pos[i].x;
y[i] = h_body_pos[i].y;
}
// Device side memory allocation
hipMalloc((body_pos **) &d_body_pos, pos_bytes);
hipMalloc((body_parameters **) &d_body_par, par_bytes);
// determining the grid size
grid_size = (nBodies+BLOCK_SIZE-1)/BLOCK_SIZE;
// initializing the dim3 variables
dim3 block( BLOCK_SIZE, 1, 1 ) ;
dim3 grid( grid_size, 1, 1);
// starting the iterations
printf("---------GPU Data---------\n");
// memcopy (host -> device)
hipMemcpy(d_body_pos, h_body_pos, pos_bytes, hipMemcpyHostToDevice);
hipMemcpy(d_body_par, h_body_par, par_bytes, hipMemcpyHostToDevice);
while((window.isOpen() && !stop) ||
((limit_iter == 1) && (iter < nIters))) {
// print statements
if(iter%(nIters/3)==0) {
printf("iter:%i\n",iter);
printf("MASS 0\t\t\tMASS 1\t\t\tMASS 2\n");
printf("x:%.04f\t\tx:%.04f\t\tx:%.04f\n",h_body_pos[0].x,h_body_pos[1].x,h_body_pos[2].x);
printf("y:%.04f\t\ty:%.04f\t\ty:%.04f\n",h_body_pos[0].y,h_body_pos[1].y,h_body_pos[2].y);
printf("z:%.04f\t\tz:%.04f\t\tz:%.04f\n",h_body_pos[0].z,h_body_pos[1].z,h_body_pos[2].z);
printf("\n");
}
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
window.close();
}
double timeStampA = getTimeStamp();
// kernel call
hipLaunchKernelGGL(( nbody_acc_vel), dim3(grid), dim3(block), 0, 0, d_body_pos,d_body_par,dt,nBodies);
hipDeviceSynchronize();
hipLaunchKernelGGL(( nbody_integration), dim3(grid), dim3(block), 0, 0, d_body_pos,d_body_par,dt,nBodies);
hipDeviceSynchronize();
// memcopy (device -> host)
hipMemcpy(h_body_pos, d_body_pos, pos_bytes, hipMemcpyDeviceToHost);
double timeStampB = getTimeStamp();
gpuErrchk(hipPeekAtLastError());
for (i = 0; i < nBodies; i++){
body_graphics[i].move(h_body_pos[i].x - x[i] , h_body_pos[i].y - y[i]);
x[i] = h_body_pos[i].x;
y[i] = h_body_pos[i].y;
}
window.clear();
for (i = 0; i < nBodies; i++)
window.draw(body_graphics[i]);
window.display();
total_time_gpu = total_time_gpu + (timeStampB - timeStampA);
iter++;
if ((limit_iter == 1) && (iter == nIters)) {
stop = 1;
window.close();
}
}
printf("\n");
printf("GPU -- Total Time Taken: %lf\n\n", total_time_gpu);
// free memory
delete [] x;
delete [] y;
hipHostFree(h_body_pos);
hipHostFree(h_body_par);
hipFree(d_body_pos);
hipFree(d_body_par);
hipDeviceReset();
return 0;
}
void initialize_bodies(body_pos *b_pos, body_parameters *b_par, int n) {
int i = 0;
srand(1000);
// setting up the base body shape
sf::CircleShape shape_green(SIZE_OF_BODIES);
shape_green.setFillColor(sf::Color::Green);
sf::CircleShape shape_red(SIZE_OF_BODIES);
shape_red.setFillColor(sf::Color::Red);
for (i = 0; i < n; i++) {
b_par[i].ax = 0.0f;
b_par[i].ay = 0.0f;
b_par[i].vx = 0.0f;
b_par[i].vy = 0.0f;
}
switch(CONFIG)
{
case(NBODY_CONFIG_RANDOM):
for (i = 0; i < n; i++) {
if (i%50 == 0)
b_par[i].m = 10000*((rand() / (float)RAND_MAX) * MASS);
else
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
b_pos[i].x = ((rand() / (float)RAND_MAX) * X_RES);
b_pos[i].y = ((rand() / (float)RAND_MAX) * Y_RES);
}
break;
case(NBODY_CONFIG_SPIRAL):
for (i = 0; i < n; i++) {
b_pos[i].x = rand() % (MAX_NUMBER_X + 1 - MINIMUM_NUMBER_X) + MINIMUM_NUMBER_X;
b_pos[i].y = rand() % (MAX_NUMBER_Y + 1 - MINIMUM_NUMBER_Y) + MINIMUM_NUMBER_Y;
if (i == 0) {
b_par[i].m = 1000000* MASS;
b_pos[i].x = X_RES / 2;
b_pos[i].y = Y_RES / 2;
body_graphics.push_back(shape_red);
} else if (i%2500 == 0) {
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);//100000*MASS;
body_graphics.push_back(shape_red);
}
else {
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
body_graphics.push_back(shape_green);
}
body_graphics[i].setPosition(b_pos[i].x, b_pos[i].y);
float dist_x = b_pos[i].x - b_pos[0].x;
float dist_y = b_pos[i].y - b_pos[0].y;
float dist_sqr = dist_x*dist_x + dist_y*dist_y;
float dist = sqrtf(dist_sqr);
float vel_mag = sqrtf(G*b_par[0].m/dist);
b_par[i].vx = dist_y/dist*vel_mag*1.2;
b_par[i].vy = -1*dist_x/dist*vel_mag*1.2;
}
printf("Finished NBODY_CONFIG_SPIRAL Init\n");
break;
case(NBODY_CONFIG_EXPAND):
for (i = 0; i < n; i++) {
b_pos[i].x = rand() % (MAX_NUMBER_X + 1 - MINIMUM_NUMBER_X) + MINIMUM_NUMBER_X;
b_pos[i].y = rand() % (MAX_NUMBER_Y + 1 - MINIMUM_NUMBER_Y) + MINIMUM_NUMBER_Y;
if (i == 0) {
b_par[i].m = 10000*((rand() / (float)RAND_MAX) * MASS);
b_pos[i].x = X_RES / 2;
b_pos[i].y = Y_RES / 2;
} else if (i%150 == 0)
b_par[i].m = 10000*MASS;//((rand() / (float)RAND_MAX) * MASS);
else
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
// b_pos[i].x = ((rand() / (float)RAND_MAX) * X_RES);
// b_pos[i].y = ((rand() / (float)RAND_MAX) * Y_RES);
// b_pos[i].z = ((rand() / (float)RAND_MAX) * 500.0f);
b_par[i].ax = 0.0f;
b_par[i].ay = 0.0f;
// b_par[i].az = 0.0f;
b_par[i].vx = 0.0f;
b_par[i].vy = 0.0f;
// b_par[i].vz = 0.0f;
}
break;
default:
for (i = 0; i < n; i++) {
b_pos[i].x = rand() % (MAX_NUMBER_X + 1 - MINIMUM_NUMBER_X) + MINIMUM_NUMBER_X;
b_pos[i].y = rand() % (MAX_NUMBER_Y + 1 - MINIMUM_NUMBER_Y) + MINIMUM_NUMBER_Y;
if (i == 0) {
b_par[i].m = 100000*((rand() / (float)RAND_MAX) * MASS);
b_pos[i].x = X_RES / 2;
b_pos[i].y = Y_RES / 2;
} else if (i%50 == 0)
b_par[i].m = 10000*((rand() / (float)RAND_MAX) * MASS);
else
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
// b_pos[i].x = ((rand() / (float)RAND_MAX) * X_RES);
// b_pos[i].y = ((rand() / (float)RAND_MAX) * Y_RES);
// b_pos[i].z = ((rand() / (float)RAND_MAX) * 500.0f);
b_par[i].ax = 0.0f;
b_par[i].ay = 0.0f;
// b_par[i].az = 0.0f;
b_par[i].vx = 0.0f;
b_par[i].vy = 0.0f;
// b_par[i].vz = 0.0f;
}
break;
}
}
__global__ void nbody_acc_vel(body_pos* b_pos, body_parameters* b_par, float dt, int n) {
int idx = threadIdx.x + blockIdx.x*blockDim.x ;
int j = 0;
float dx = 0.0f,
dy = 0.0f,
// dz = 0.0f,
sx = 0.0f,
sy = 0.0f,
// sz = 0.0f,
distSqr = 0.0f,
distSqr3 = 0.0f,
invDist3 = 0.0f;
for (j = 0; j < n; j++) {
dx = b_pos[j].x - b_pos[idx].x;
dy = b_pos[j].y - b_pos[idx].y;
// dz = b_pos[j].z - b_pos[idx].z;
distSqr = dx*dx + dy*dy /* + dz*dz */ + EPS;
distSqr3 = distSqr * distSqr * distSqr;
invDist3 = (G * b_par[j].m)/sqrt(distSqr3);
sx += dx * invDist3; sy += dy * invDist3;
// sz += dz * invDist3;
}
// acceleration calculation
b_par[idx].ax = sx;
b_par[idx].ay = sy;
// b_par[idx].az += sz;
// velocity calculation
b_par[idx].vx += b_par[idx].ax * dt * DAMPING;
b_par[idx].vy += b_par[idx].ay * dt * DAMPING;
// b_par[idx].vz += b_par[idx].az * dt;
}
__global__ void nbody_integration(body_pos* b_pos, body_parameters* b_par, float dt, int n) {
int idx = threadIdx.x + blockIdx.x*blockDim.x ;
if (idx) {
// integrate and find the new positions
b_pos[idx].x += b_par[idx].vx*dt + b_par[idx].ax*dt*dt/2;
b_pos[idx].y += b_par[idx].vy*dt + b_par[idx].ay*dt*dt/2;
// b_pos[idx].z += b_par[idx].vz*dt + b_par[idx].az*dt*dt/2;
}
}
| e7a5d6ddc14bcdaca19d68e0f8f6e4af5b591c15.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <vector>
#include <SFML/Graphics.hpp>
#include "aux_functions_gpu.h"
#define CONFIG 1
#if CONFIG == 0
#include "aux_declarations_gpu_rtsim_1.h"
#elif CONFIG == 1
#include "aux_declarations_gpu_rtsim_2.h"
#elif CONFIG == 2
#include "aux_declarations_gpu_rtsim_3.h"
#else
#include "aux_declarations_gpu_rtsim_1.h"
#endif
std::vector <sf::CircleShape> body_graphics;
enum nBody_config
{
NBODY_CONFIG_RANDOM,
NBODY_CONFIG_SPIRAL,
NBODY_CONFIG_EXPAND,
NBODY_NUM_CONFIGS
};
// main function
int main (const int argc, const char** argv) {
printf("\n");
int nBodies = 30000;
float dt = DT; // time step
int nIters = 1000, limit_iter = 0; // simulation iterations
int iter = 0, i = 0, grid_size = 0, stop = 0;
double total_time_gpu = 0;
float* x = NULL;
float* y = NULL;
body_pos *h_body_pos, *d_body_pos;
body_parameters *h_body_par, *d_body_par;
// reading the arguments (argv data)
switch(argc)
{
case 1:
printf("------------------------------------------------------\n\n");
printf("Default values considered, nBodies: 30000.\n\n");
printf("------------------------------------------------------\n\n");
break;
case 2:
nBodies = atoi(argv[1]);
printf("------------------------------------------------------\n\n");
printf("Values considered, nBodies: %i.\n\n", nBodies);
printf("------------------------------------------------------\n\n");
break;
case 3:
nBodies = atoi(argv[1]);
nIters = atoi(argv[2]);
limit_iter = 1;
printf("------------------------------------------------------\n\n");
printf("Values considered, nBodies: %i, nIters: %i.\n\n", nBodies, nIters);
printf("------------------------------------------------------\n\n");
break;
default:
printf("ERR: Invalid number of arguments passed.\n"
"Aborting...\n");
return -1;
}
// initialising the animation window
sf::ContextSettings settings;
settings.antialiasingLevel = 8;
sf::RenderWindow window(sf::VideoMode(X_RES, Y_RES), "N-Body Simulation", sf::Style::Default, settings);
// host side memory allocation
size_t pos_bytes = nBodies*sizeof(body_pos);
size_t par_bytes = nBodies*sizeof(body_parameters);
cudaMallocHost((body_pos **) &h_body_pos, pos_bytes);
cudaMallocHost((body_parameters **) &h_body_par, par_bytes);
x = new float[nBodies];
y = new float[nBodies];
// Init mass / pos / vel / acc data
initialize_bodies(h_body_pos, h_body_par, nBodies);
for (i = 0; i < nBodies; i++) {
x[i] = h_body_pos[i].x;
y[i] = h_body_pos[i].y;
}
// Device side memory allocation
cudaMalloc((body_pos **) &d_body_pos, pos_bytes);
cudaMalloc((body_parameters **) &d_body_par, par_bytes);
// determining the grid size
grid_size = (nBodies+BLOCK_SIZE-1)/BLOCK_SIZE;
// initializing the dim3 variables
dim3 block( BLOCK_SIZE, 1, 1 ) ;
dim3 grid( grid_size, 1, 1);
// starting the iterations
printf("---------GPU Data---------\n");
// memcopy (host -> device)
cudaMemcpy(d_body_pos, h_body_pos, pos_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_body_par, h_body_par, par_bytes, cudaMemcpyHostToDevice);
while((window.isOpen() && !stop) ||
((limit_iter == 1) && (iter < nIters))) {
// print statements
if(iter%(nIters/3)==0) {
printf("iter:%i\n",iter);
printf("MASS 0\t\t\tMASS 1\t\t\tMASS 2\n");
printf("x:%.04f\t\tx:%.04f\t\tx:%.04f\n",h_body_pos[0].x,h_body_pos[1].x,h_body_pos[2].x);
printf("y:%.04f\t\ty:%.04f\t\ty:%.04f\n",h_body_pos[0].y,h_body_pos[1].y,h_body_pos[2].y);
printf("z:%.04f\t\tz:%.04f\t\tz:%.04f\n",h_body_pos[0].z,h_body_pos[1].z,h_body_pos[2].z);
printf("\n");
}
sf::Event event;
while (window.pollEvent(event))
{
if (event.type == sf::Event::Closed)
window.close();
}
double timeStampA = getTimeStamp();
// kernel call
nbody_acc_vel<<<grid, block>>>(d_body_pos,d_body_par,dt,nBodies);
cudaDeviceSynchronize();
nbody_integration<<<grid, block>>>(d_body_pos,d_body_par,dt,nBodies);
cudaDeviceSynchronize();
// memcopy (device -> host)
cudaMemcpy(h_body_pos, d_body_pos, pos_bytes, cudaMemcpyDeviceToHost);
double timeStampB = getTimeStamp();
gpuErrchk(cudaPeekAtLastError());
for (i = 0; i < nBodies; i++){
body_graphics[i].move(h_body_pos[i].x - x[i] , h_body_pos[i].y - y[i]);
x[i] = h_body_pos[i].x;
y[i] = h_body_pos[i].y;
}
window.clear();
for (i = 0; i < nBodies; i++)
window.draw(body_graphics[i]);
window.display();
total_time_gpu = total_time_gpu + (timeStampB - timeStampA);
iter++;
if ((limit_iter == 1) && (iter == nIters)) {
stop = 1;
window.close();
}
}
printf("\n");
printf("GPU -- Total Time Taken: %lf\n\n", total_time_gpu);
// free memory
delete [] x;
delete [] y;
cudaFreeHost(h_body_pos);
cudaFreeHost(h_body_par);
cudaFree(d_body_pos);
cudaFree(d_body_par);
cudaDeviceReset();
return 0;
}
void initialize_bodies(body_pos *b_pos, body_parameters *b_par, int n) {
int i = 0;
srand(1000);
// setting up the base body shape
sf::CircleShape shape_green(SIZE_OF_BODIES);
shape_green.setFillColor(sf::Color::Green);
sf::CircleShape shape_red(SIZE_OF_BODIES);
shape_red.setFillColor(sf::Color::Red);
for (i = 0; i < n; i++) {
b_par[i].ax = 0.0f;
b_par[i].ay = 0.0f;
b_par[i].vx = 0.0f;
b_par[i].vy = 0.0f;
}
switch(CONFIG)
{
case(NBODY_CONFIG_RANDOM):
for (i = 0; i < n; i++) {
if (i%50 == 0)
b_par[i].m = 10000*((rand() / (float)RAND_MAX) * MASS);
else
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
b_pos[i].x = ((rand() / (float)RAND_MAX) * X_RES);
b_pos[i].y = ((rand() / (float)RAND_MAX) * Y_RES);
}
break;
case(NBODY_CONFIG_SPIRAL):
for (i = 0; i < n; i++) {
b_pos[i].x = rand() % (MAX_NUMBER_X + 1 - MINIMUM_NUMBER_X) + MINIMUM_NUMBER_X;
b_pos[i].y = rand() % (MAX_NUMBER_Y + 1 - MINIMUM_NUMBER_Y) + MINIMUM_NUMBER_Y;
if (i == 0) {
b_par[i].m = 1000000* MASS;
b_pos[i].x = X_RES / 2;
b_pos[i].y = Y_RES / 2;
body_graphics.push_back(shape_red);
} else if (i%2500 == 0) {
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);//100000*MASS;
body_graphics.push_back(shape_red);
}
else {
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
body_graphics.push_back(shape_green);
}
body_graphics[i].setPosition(b_pos[i].x, b_pos[i].y);
float dist_x = b_pos[i].x - b_pos[0].x;
float dist_y = b_pos[i].y - b_pos[0].y;
float dist_sqr = dist_x*dist_x + dist_y*dist_y;
float dist = sqrtf(dist_sqr);
float vel_mag = sqrtf(G*b_par[0].m/dist);
b_par[i].vx = dist_y/dist*vel_mag*1.2;
b_par[i].vy = -1*dist_x/dist*vel_mag*1.2;
}
printf("Finished NBODY_CONFIG_SPIRAL Init\n");
break;
case(NBODY_CONFIG_EXPAND):
for (i = 0; i < n; i++) {
b_pos[i].x = rand() % (MAX_NUMBER_X + 1 - MINIMUM_NUMBER_X) + MINIMUM_NUMBER_X;
b_pos[i].y = rand() % (MAX_NUMBER_Y + 1 - MINIMUM_NUMBER_Y) + MINIMUM_NUMBER_Y;
if (i == 0) {
b_par[i].m = 10000*((rand() / (float)RAND_MAX) * MASS);
b_pos[i].x = X_RES / 2;
b_pos[i].y = Y_RES / 2;
} else if (i%150 == 0)
b_par[i].m = 10000*MASS;//((rand() / (float)RAND_MAX) * MASS);
else
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
// b_pos[i].x = ((rand() / (float)RAND_MAX) * X_RES);
// b_pos[i].y = ((rand() / (float)RAND_MAX) * Y_RES);
// b_pos[i].z = ((rand() / (float)RAND_MAX) * 500.0f);
b_par[i].ax = 0.0f;
b_par[i].ay = 0.0f;
// b_par[i].az = 0.0f;
b_par[i].vx = 0.0f;
b_par[i].vy = 0.0f;
// b_par[i].vz = 0.0f;
}
break;
default:
for (i = 0; i < n; i++) {
b_pos[i].x = rand() % (MAX_NUMBER_X + 1 - MINIMUM_NUMBER_X) + MINIMUM_NUMBER_X;
b_pos[i].y = rand() % (MAX_NUMBER_Y + 1 - MINIMUM_NUMBER_Y) + MINIMUM_NUMBER_Y;
if (i == 0) {
b_par[i].m = 100000*((rand() / (float)RAND_MAX) * MASS);
b_pos[i].x = X_RES / 2;
b_pos[i].y = Y_RES / 2;
} else if (i%50 == 0)
b_par[i].m = 10000*((rand() / (float)RAND_MAX) * MASS);
else
b_par[i].m = ((rand() / (float)RAND_MAX) * MASS);
// b_pos[i].x = ((rand() / (float)RAND_MAX) * X_RES);
// b_pos[i].y = ((rand() / (float)RAND_MAX) * Y_RES);
// b_pos[i].z = ((rand() / (float)RAND_MAX) * 500.0f);
b_par[i].ax = 0.0f;
b_par[i].ay = 0.0f;
// b_par[i].az = 0.0f;
b_par[i].vx = 0.0f;
b_par[i].vy = 0.0f;
// b_par[i].vz = 0.0f;
}
break;
}
}
__global__ void nbody_acc_vel(body_pos* b_pos, body_parameters* b_par, float dt, int n) {
int idx = threadIdx.x + blockIdx.x*blockDim.x ;
int j = 0;
float dx = 0.0f,
dy = 0.0f,
// dz = 0.0f,
sx = 0.0f,
sy = 0.0f,
// sz = 0.0f,
distSqr = 0.0f,
distSqr3 = 0.0f,
invDist3 = 0.0f;
for (j = 0; j < n; j++) {
dx = b_pos[j].x - b_pos[idx].x;
dy = b_pos[j].y - b_pos[idx].y;
// dz = b_pos[j].z - b_pos[idx].z;
distSqr = dx*dx + dy*dy /* + dz*dz */ + EPS;
distSqr3 = distSqr * distSqr * distSqr;
invDist3 = (G * b_par[j].m)/sqrt(distSqr3);
sx += dx * invDist3; sy += dy * invDist3;
// sz += dz * invDist3;
}
// acceleration calculation
b_par[idx].ax = sx;
b_par[idx].ay = sy;
// b_par[idx].az += sz;
// velocity calculation
b_par[idx].vx += b_par[idx].ax * dt * DAMPING;
b_par[idx].vy += b_par[idx].ay * dt * DAMPING;
// b_par[idx].vz += b_par[idx].az * dt;
}
__global__ void nbody_integration(body_pos* b_pos, body_parameters* b_par, float dt, int n) {
int idx = threadIdx.x + blockIdx.x*blockDim.x ;
if (idx) {
// integrate and find the new positions
b_pos[idx].x += b_par[idx].vx*dt + b_par[idx].ax*dt*dt/2;
b_pos[idx].y += b_par[idx].vy*dt + b_par[idx].ay*dt*dt/2;
// b_pos[idx].z += b_par[idx].vz*dt + b_par[idx].az*dt*dt/2;
}
}
|
33ea3d517e9f2c08c8887e238b42ab49c9513597.hip | // !!! This is a file automatically generated by hipify!!!
#include <THH/THHTensorTopK.cuh>
#include <THH/THHTensor.hpp>
#include <THH/generic/THHTensorTopK.hip>
#include <THH/THHGenerateFloatType.h>
| 33ea3d517e9f2c08c8887e238b42ab49c9513597.cu | #include <THC/THCTensorTopK.cuh>
#include <THC/THCTensor.hpp>
#include <THC/generic/THCTensorTopK.cu>
#include <THC/THCGenerateFloatType.h>
|
e430244871e400bf4f4f8b89ad8c8b1f5b7604c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/legacy/cuda_utils.hpp>
#include <type_traits>
#include <thrust/device_vector.h>
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/legacy/column.hpp>
#include <utility/utility.hpp>
#include <cuspatial/hausdorff.hpp>
namespace {
const unsigned int NUM_THREADS = 1024;
template <typename T>
__global__ void kernel_Hausdorff_Full(
int num_traj,
T *xx,
T *yy,
uint32_t *pos,
T *results
)
{
int bidx = blockIdx.y*gridDim.x+blockIdx.x;
if (bidx < num_traj*num_traj)
{
int seg_id_left = bidx/num_traj;
int seg_id_right =bidx%num_traj;
__shared__ T sdata[NUM_THREADS];
sdata[threadIdx.x] = -1;
__syncthreads();
int start_left = seg_id_left == 0 ? 0 : pos[seg_id_left-1];
int stop_left = pos[seg_id_left];
int start_right = seg_id_right == 0 ? 0 : pos[seg_id_right-1];
int stop_right = pos[seg_id_right];
T dist = 1e20;
int max_threads = 0;
{
max_threads = stop_left-start_left;
if (threadIdx.x < max_threads)
{
T my_xx = xx[start_left+threadIdx.x];
T my_yy = yy[start_left+threadIdx.x];
for (int i = start_right; i < stop_right; i++)
{
T other_xx = xx[i];
T other_yy = yy[i];
T new_dist = (my_xx-other_xx)*(my_xx-other_xx)
+ (my_yy-other_yy)*(my_yy-other_yy);
dist= min(dist, new_dist);//dist < new_dist ? dist : new_dist;
}
}
}
if (dist > 1e10)
dist = -1;
if(threadIdx.x < max_threads)
sdata[threadIdx.x] = dist;
__syncthreads();
//reduction
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
T tmp = sdata[threadIdx.x + offset];
T tmp2 = sdata[threadIdx.x];
sdata[threadIdx.x] = max(tmp2, tmp);
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0)
results[bidx] = (sdata[0]>=0)?sqrt(sdata[0]):1e10;
}
}
struct Hausdorff_functor {
template <typename T>
static constexpr bool is_supported()
{
return std::is_floating_point<T>::value;
}
template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr>
gdf_column operator()(const gdf_column& x, const gdf_column& y,
const gdf_column& vertex_counts)
{
gdf_column d_matrix;
memset(&d_matrix,0,sizeof(gdf_column));
int num_set=vertex_counts.size;
int block_sz = num_set*num_set;
hipStream_t stream{0};
auto exec_policy = rmm::exec_policy(stream)->on(stream);
T *temp_matrix{nullptr};
RMM_TRY( RMM_ALLOC(&temp_matrix, block_sz * sizeof(T), stream) );
uint32_t *vertex_positions{nullptr};
RMM_TRY( RMM_ALLOC((void**)&vertex_positions, sizeof(uint32_t)*num_set, stream) );
uint32_t *vertex_counts_ptr=static_cast<uint32_t*>(vertex_counts.data);
thrust::inclusive_scan(exec_policy,vertex_counts_ptr,vertex_counts_ptr+num_set,vertex_positions);
int block_x = block_sz, block_y = 1;
if (block_sz > 65535)
{
block_y = ceil((float)block_sz/65535.0);
block_x = 65535;
}
dim3 grid(block_x, block_y);
dim3 block(NUM_THREADS);
hipLaunchKernelGGL(( kernel_Hausdorff_Full<T>) , dim3(grid),dim3(block) , 0, 0, num_set,
static_cast<T*>(x.data), static_cast<T*>(y.data),
vertex_positions,temp_matrix);
CUDA_TRY( hipDeviceSynchronize() );
RMM_TRY( RMM_FREE(vertex_positions, stream) );
gdf_column_view_augmented(&d_matrix, temp_matrix, nullptr, block_sz,
x.dtype, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "hausdorff_matrix");
return d_matrix;
}
template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr>
gdf_column operator()(const gdf_column& x,const gdf_column& y,const gdf_column& vertex_counts)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
};
} // namespace anonymous
/**
* @brief compute Hausdorff distances among all pairs of a set of trajectories
* see hausdorff.hpp
*/
namespace cuspatial {
gdf_column directed_hausdorff_distance(const gdf_column& x,const gdf_column& y,const gdf_column& vertex_counts)
{
CUDF_EXPECTS(x.data != nullptr &&y.data!=nullptr && vertex_counts.data!=nullptr,
"x/y/vertex_counts data can not be null");
CUDF_EXPECTS(x.size == y.size ,"x/y/must have the same size");
//future versions might allow x/y/vertex_counts have null_count>0, which might be useful for taking query results as inputs
CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0 && vertex_counts.null_count==0,
"this version does not support x/y/vertex_counts contains nulls");
CUDF_EXPECTS(x.size >= vertex_counts.size ,"one trajectory must have at least one point");
gdf_column dist =cudf::type_dispatcher(x.dtype, Hausdorff_functor(), x,y,vertex_counts);
return dist;
}//hausdorff_distance
}// namespace cuspatial
| e430244871e400bf4f4f8b89ad8c8b1f5b7604c4.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/utilities/legacy/type_dispatcher.hpp>
#include <utilities/legacy/cuda_utils.hpp>
#include <type_traits>
#include <thrust/device_vector.h>
#include <rmm/thrust_rmm_allocator.h>
#include <cudf/legacy/column.hpp>
#include <utility/utility.hpp>
#include <cuspatial/hausdorff.hpp>
namespace {
const unsigned int NUM_THREADS = 1024;
template <typename T>
__global__ void kernel_Hausdorff_Full(
int num_traj,
T *xx,
T *yy,
uint32_t *pos,
T *results
)
{
int bidx = blockIdx.y*gridDim.x+blockIdx.x;
if (bidx < num_traj*num_traj)
{
int seg_id_left = bidx/num_traj;
int seg_id_right =bidx%num_traj;
__shared__ T sdata[NUM_THREADS];
sdata[threadIdx.x] = -1;
__syncthreads();
int start_left = seg_id_left == 0 ? 0 : pos[seg_id_left-1];
int stop_left = pos[seg_id_left];
int start_right = seg_id_right == 0 ? 0 : pos[seg_id_right-1];
int stop_right = pos[seg_id_right];
T dist = 1e20;
int max_threads = 0;
{
max_threads = stop_left-start_left;
if (threadIdx.x < max_threads)
{
T my_xx = xx[start_left+threadIdx.x];
T my_yy = yy[start_left+threadIdx.x];
for (int i = start_right; i < stop_right; i++)
{
T other_xx = xx[i];
T other_yy = yy[i];
T new_dist = (my_xx-other_xx)*(my_xx-other_xx)
+ (my_yy-other_yy)*(my_yy-other_yy);
dist= min(dist, new_dist);//dist < new_dist ? dist : new_dist;
}
}
}
if (dist > 1e10)
dist = -1;
if(threadIdx.x < max_threads)
sdata[threadIdx.x] = dist;
__syncthreads();
//reduction
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
T tmp = sdata[threadIdx.x + offset];
T tmp2 = sdata[threadIdx.x];
sdata[threadIdx.x] = max(tmp2, tmp);
}
__syncthreads();
}
__syncthreads();
if (threadIdx.x == 0)
results[bidx] = (sdata[0]>=0)?sqrt(sdata[0]):1e10;
}
}
struct Hausdorff_functor {
template <typename T>
static constexpr bool is_supported()
{
return std::is_floating_point<T>::value;
}
template <typename T, std::enable_if_t< is_supported<T>() >* = nullptr>
gdf_column operator()(const gdf_column& x, const gdf_column& y,
const gdf_column& vertex_counts)
{
gdf_column d_matrix;
memset(&d_matrix,0,sizeof(gdf_column));
int num_set=vertex_counts.size;
int block_sz = num_set*num_set;
cudaStream_t stream{0};
auto exec_policy = rmm::exec_policy(stream)->on(stream);
T *temp_matrix{nullptr};
RMM_TRY( RMM_ALLOC(&temp_matrix, block_sz * sizeof(T), stream) );
uint32_t *vertex_positions{nullptr};
RMM_TRY( RMM_ALLOC((void**)&vertex_positions, sizeof(uint32_t)*num_set, stream) );
uint32_t *vertex_counts_ptr=static_cast<uint32_t*>(vertex_counts.data);
thrust::inclusive_scan(exec_policy,vertex_counts_ptr,vertex_counts_ptr+num_set,vertex_positions);
int block_x = block_sz, block_y = 1;
if (block_sz > 65535)
{
block_y = ceil((float)block_sz/65535.0);
block_x = 65535;
}
dim3 grid(block_x, block_y);
dim3 block(NUM_THREADS);
kernel_Hausdorff_Full<T> <<< grid,block >>> (num_set,
static_cast<T*>(x.data), static_cast<T*>(y.data),
vertex_positions,temp_matrix);
CUDA_TRY( cudaDeviceSynchronize() );
RMM_TRY( RMM_FREE(vertex_positions, stream) );
gdf_column_view_augmented(&d_matrix, temp_matrix, nullptr, block_sz,
x.dtype, 0,
gdf_dtype_extra_info{TIME_UNIT_NONE}, "hausdorff_matrix");
return d_matrix;
}
template <typename T, std::enable_if_t< !is_supported<T>() >* = nullptr>
gdf_column operator()(const gdf_column& x,const gdf_column& y,const gdf_column& vertex_counts)
{
CUDF_FAIL("Non-floating point operation is not supported");
}
};
} // namespace anonymous
/**
* @brief compute Hausdorff distances among all pairs of a set of trajectories
* see hausdorff.hpp
*/
namespace cuspatial {
gdf_column directed_hausdorff_distance(const gdf_column& x,const gdf_column& y,const gdf_column& vertex_counts)
{
CUDF_EXPECTS(x.data != nullptr &&y.data!=nullptr && vertex_counts.data!=nullptr,
"x/y/vertex_counts data can not be null");
CUDF_EXPECTS(x.size == y.size ,"x/y/must have the same size");
//future versions might allow x/y/vertex_counts have null_count>0, which might be useful for taking query results as inputs
CUDF_EXPECTS(x.null_count == 0 && y.null_count == 0 && vertex_counts.null_count==0,
"this version does not support x/y/vertex_counts contains nulls");
CUDF_EXPECTS(x.size >= vertex_counts.size ,"one trajectory must have at least one point");
gdf_column dist =cudf::type_dispatcher(x.dtype, Hausdorff_functor(), x,y,vertex_counts);
return dist;
}//hausdorff_distance
}// namespace cuspatial
|
51fa95dbee12b1fa2136bb476c4aa73bf1c7098a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialGridSamplerGaussian.cu"
#else
static inline void THNN_(SpatialGridSamplerGaussian_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *gradOutput) {
THCUNN_argCheck(state, THCTensor_(nDimension)(state, input) == 4, 2, input,
"4D input tensor expected but got: %s");
THCUNN_argCheck(state, THCTensor_(nDimension)(state, grid) == 4, 2, grid,
"4D grid tensor expected but got: %s");
int64_t nbatch = THCTensor_(size)(state, input, 0);
int64_t channels = THCTensor_(size)(state, input, 1);
int64_t iheight = THCTensor_(size)(state, input, 2);
int64_t iwidth = THCTensor_(size)(state, input, 3);
int64_t oheight = THCTensor_(size)(state, grid, 1);
int64_t owidth = THCTensor_(size)(state, grid, 2);
THCUNN_check_dim_size(state, grid, 4, 0, nbatch);
THCUNN_check_dim_size(state, grid, 4, 3, 2);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 4, 0, nbatch);
THCUNN_check_dim_size(state, gradOutput, 4, 1, channels);
THCUNN_check_dim_size(state, gradOutput, 4, 2, oheight);
THCUNN_check_dim_size(state, gradOutput, 4, 3, owidth);
}
}
TH_API void THNN_(SpatialGridSamplerGaussian_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *output,
int kernel_size,
float kernel_std,
int padding_mode) {
THCUNN_assertSameGPU(state, 3, input, grid, output);
THNN_(SpatialGridSamplerGaussian_shapeCheck)(state, input, grid, NULL);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state,grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
// resize output to the same shape as input
THCTensor_(resize4d)(state, output, N, C, H, W);
THCTensor_(zero)(state, output);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devOutput = toDeviceTensor<real, 4>(state, output);
int count = static_cast<int>(N*H*W);
hipLaunchKernelGGL(( SpatialGridSamplerGaussian_updateOutput_kernel)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
count, devInput, devGrid, devOutput, kernel_size, kernel_std, padding_mode);
THCudaCheck(hipGetLastError());
}
TH_API void THNN_(SpatialGridSamplerGaussian_updateGradInput)(
THCState *state,
THCTensor *input, THCTensor *gradInput,
THCTensor *grid, THCTensor *gradGrid,
THCTensor *gradOutput,
int kernel_size,
float kernel_std,
int padding_mode) {
THCUNN_assertSameGPU(state, 5, input, gradInput, grid, gradGrid, gradOutput);
THNN_(SpatialGridSamplerGaussian_shapeCheck)(state, input, grid, gradOutput);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state, grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
THCTensor_(resize4d)(state, gradInput, N, C, IH, IW);
THCTensor_(resize4d)(state, gradGrid, N, H, W, 2);
THCTensor_(zero)(state, gradInput);
THCTensor_(zero)(state, gradGrid);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGradInput = toDeviceTensor<real, 4>(state, gradInput);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devGradGrid = toDeviceTensor<real, 4>(state, gradGrid);
THCDeviceTensor<real, 4> devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
int count = static_cast<int>(N*H*W);
hipLaunchKernelGGL(( SpatialGridSamplerGaussian_updateGradInput_kernel)
, dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state),
count, devInput, devGradInput, devGrid, devGradGrid, devGradOutput,kernel_size, kernel_std, padding_mode);
THCudaCheck(hipGetLastError());
}
#endif
| 51fa95dbee12b1fa2136bb476c4aa73bf1c7098a.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialGridSamplerGaussian.cu"
#else
static inline void THNN_(SpatialGridSamplerGaussian_shapeCheck)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *gradOutput) {
THCUNN_argCheck(state, THCTensor_(nDimension)(state, input) == 4, 2, input,
"4D input tensor expected but got: %s");
THCUNN_argCheck(state, THCTensor_(nDimension)(state, grid) == 4, 2, grid,
"4D grid tensor expected but got: %s");
int64_t nbatch = THCTensor_(size)(state, input, 0);
int64_t channels = THCTensor_(size)(state, input, 1);
int64_t iheight = THCTensor_(size)(state, input, 2);
int64_t iwidth = THCTensor_(size)(state, input, 3);
int64_t oheight = THCTensor_(size)(state, grid, 1);
int64_t owidth = THCTensor_(size)(state, grid, 2);
THCUNN_check_dim_size(state, grid, 4, 0, nbatch);
THCUNN_check_dim_size(state, grid, 4, 3, 2);
if (gradOutput != NULL) {
THCUNN_check_dim_size(state, gradOutput, 4, 0, nbatch);
THCUNN_check_dim_size(state, gradOutput, 4, 1, channels);
THCUNN_check_dim_size(state, gradOutput, 4, 2, oheight);
THCUNN_check_dim_size(state, gradOutput, 4, 3, owidth);
}
}
TH_API void THNN_(SpatialGridSamplerGaussian_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *grid,
THCTensor *output,
int kernel_size,
float kernel_std,
int padding_mode) {
THCUNN_assertSameGPU(state, 3, input, grid, output);
THNN_(SpatialGridSamplerGaussian_shapeCheck)(state, input, grid, NULL);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state,grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
// resize output to the same shape as input
THCTensor_(resize4d)(state, output, N, C, H, W);
THCTensor_(zero)(state, output);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devOutput = toDeviceTensor<real, 4>(state, output);
int count = static_cast<int>(N*H*W);
SpatialGridSamplerGaussian_updateOutput_kernel
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
count, devInput, devGrid, devOutput, kernel_size, kernel_std, padding_mode);
THCudaCheck(cudaGetLastError());
}
TH_API void THNN_(SpatialGridSamplerGaussian_updateGradInput)(
THCState *state,
THCTensor *input, THCTensor *gradInput,
THCTensor *grid, THCTensor *gradGrid,
THCTensor *gradOutput,
int kernel_size,
float kernel_std,
int padding_mode) {
THCUNN_assertSameGPU(state, 5, input, gradInput, grid, gradGrid, gradOutput);
THNN_(SpatialGridSamplerGaussian_shapeCheck)(state, input, grid, gradOutput);
int64_t N = THCTensor_(size)(state, input, 0);
int64_t C = THCTensor_(size)(state, input, 1);
int64_t IH = THCTensor_(size)(state, input, 2);
int64_t IW = THCTensor_(size)(state, input, 3);
int64_t H = THCTensor_(size)(state, grid, 1);
int64_t W = THCTensor_(size)(state, grid, 2);
THCTensor_(resize4d)(state, gradInput, N, C, IH, IW);
THCTensor_(resize4d)(state, gradGrid, N, H, W, 2);
THCTensor_(zero)(state, gradInput);
THCTensor_(zero)(state, gradGrid);
THCDeviceTensor<real, 4> devInput = toDeviceTensor<real, 4>(state, input);
THCDeviceTensor<real, 4> devGradInput = toDeviceTensor<real, 4>(state, gradInput);
THCDeviceTensor<real, 4> devGrid = toDeviceTensor<real, 4>(state, grid);
THCDeviceTensor<real, 4> devGradGrid = toDeviceTensor<real, 4>(state, gradGrid);
THCDeviceTensor<real, 4> devGradOutput = toDeviceTensor<real, 4>(state, gradOutput);
int count = static_cast<int>(N*H*W);
SpatialGridSamplerGaussian_updateGradInput_kernel
<<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state)>>>(
count, devInput, devGradInput, devGrid, devGradGrid, devGradOutput,kernel_size, kernel_std, padding_mode);
THCudaCheck(cudaGetLastError());
}
#endif
|
34348c576961a3e135f2c947e394c704f8bcecd2.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <rocblas.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "tools/util/half.h"
#include "tools/util/host_tensor.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/gemm/volta884_gemm_traits.h"
#include "cutlass/gemm/gemm.h"
#include "tools/test/unit/gemm/gemm_testbed.h"
#include "tools/test/unit/gemm/run_gemm.h"
#if CUTLASS_ENABLE_TENSOR_CORE_MMA
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 530
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Contiguous - h884gemm
//
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_h884gemm_64x64x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x64x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_64x128x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x128x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_256x128x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x256x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_64x64x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x64x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_64x128x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x128x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_256x128x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x256x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 530
#endif // defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
| 34348c576961a3e135f2c947e394c704f8bcecd2.cu | /***************************************************************************************************
* Copyright (c) 2017-2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
#include <cublas_v2.h>
#include <cstring>
#include "cutlass_unit_test.h"
#include "tools/util/half.h"
#include "tools/util/host_tensor.h"
#include "tools/util/tensor_view_io.h"
#include "cutlass/gemm/volta884_gemm_traits.h"
#include "cutlass/gemm/gemm.h"
#include "tools/test/unit/gemm/gemm_testbed.h"
#include "tools/test/unit/gemm/run_gemm.h"
#if CUTLASS_ENABLE_TENSOR_CORE_MMA
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 530
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Contiguous - h884gemm
//
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
TEST(Volta884_h884gemm_64x64x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x64x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_64x128x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x128x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_256x128x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x256x32_nt, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kColumnMajor,
cutlass::MatrixLayout::kRowMajor,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_64x64x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x64x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 64, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_64x128x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 64>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x128x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_256x128x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 128, 256>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
TEST(Volta884_h884gemm_128x256x32_tn, 520x264x136) {
typedef cutlass::gemm::Volta884GemmTraits<
cutlass::MatrixLayout::kRowMajor,
cutlass::MatrixLayout::kColumnMajor,
cutlass::Shape<32, 256, 128>,
cutlass::Shape<32, 64, 64>,
half,
half,
half,
2
> GemmTraits;
run_gemm<GemmTraits>(520, 264, 136);
}
#endif // #if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 530
#endif // defined(CUTLASS_ENABLE_TENSOR_CORE_MMA)
|
06c7faa0de9da44ae455624f60d5724419c05aa3.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "convert_to_luv_gpu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *in_img = NULL;
hipMalloc(&in_img, XSIZE*YSIZE);
float *out_img = NULL;
hipMalloc(&out_img, XSIZE*YSIZE);
int cols = YSIZE;
int rows = XSIZE;
bool use_rgb = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
convert_to_luv_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in_img,out_img,cols,rows,use_rgb);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
convert_to_luv_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in_img,out_img,cols,rows,use_rgb);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
convert_to_luv_gpu_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, in_img,out_img,cols,rows,use_rgb);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 06c7faa0de9da44ae455624f60d5724419c05aa3.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "convert_to_luv_gpu_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *in_img = NULL;
cudaMalloc(&in_img, XSIZE*YSIZE);
float *out_img = NULL;
cudaMalloc(&out_img, XSIZE*YSIZE);
int cols = YSIZE;
int rows = XSIZE;
bool use_rgb = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
convert_to_luv_gpu_kernel<<<gridBlock,threadBlock>>>(in_img,out_img,cols,rows,use_rgb);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
convert_to_luv_gpu_kernel<<<gridBlock,threadBlock>>>(in_img,out_img,cols,rows,use_rgb);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
convert_to_luv_gpu_kernel<<<gridBlock,threadBlock>>>(in_img,out_img,cols,rows,use_rgb);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
f1e70b2f88686b88e77e1a45ca8fbfd11b3293b3.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <getopt.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "utils.h"
#define DEFAULT_ITERS 10
#define DEFAULT_MIN_MSG_SIZE 1
#define DEFAULT_MAX_MSG_SIZE 128 * 1024 * 1024
typedef enum { PUSH = 0, PULL = 1 } dir_t;
__global__ void test_kernel(void *data_d_local, long long int ncycles) {
long long int sclk = clock64();
long long int cyc = 0;
while (cyc < ncycles) {
cyc = clock64() - sclk;
}
*(long long int *)data_d_local = cyc;
}
int lat(void *data_d, void *data_d_local, int sizeBytes, int pe, int iter, dir_t dir,
hipStream_t strm, hipEvent_t sev, hipEvent_t eev, float *ms1, float *ms2, int ng, int nb,
long long int ncycles) {
int status = 0;
int peer = !pe;
if (dir == PUSH) {
CUDA_CHECK(hipEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
hipLaunchKernelGGL(( test_kernel), dim3(ng), dim3(nb), 0, strm, data_d_local, ncycles);
nvshmemx_putmem_on_stream((void *)data_d, (void *)data_d_local, sizeBytes, peer, strm);
}
CUDA_CHECK(hipEventRecord(eev, strm));
CUDA_CHECK(hipEventSynchronize(eev));
CUDA_CHECK(hipEventElapsedTime(ms1, sev, eev));
CUDA_CHECK(hipEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
hipLaunchKernelGGL(( test_kernel), dim3(ng), dim3(nb), 0, strm, data_d_local, ncycles);
CUDA_CHECK(hipStreamSynchronize(strm));
nvshmem_putmem((void *)data_d, (void *)data_d_local, sizeBytes, peer);
}
CUDA_CHECK(hipEventRecord(eev, strm));
CUDA_CHECK(hipEventSynchronize(eev));
CUDA_CHECK(hipEventElapsedTime(ms2, sev, eev));
} else {
CUDA_CHECK(hipEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
nvshmemx_getmem_on_stream((void *)data_d_local, (void *)data_d, sizeBytes, peer, strm);
hipLaunchKernelGGL(( test_kernel), dim3(ng), dim3(nb), 0, strm, data_d_local, ncycles);
}
CUDA_CHECK(hipEventRecord(eev, strm));
CUDA_CHECK(hipEventSynchronize(eev));
CUDA_CHECK(hipEventElapsedTime(ms1, sev, eev));
CUDA_CHECK(hipEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
nvshmem_getmem((void *)data_d_local, (void *)data_d, sizeBytes,
peer); // shmem_getmem is blocking, so nvshmem_quiet is not needed
hipLaunchKernelGGL(( test_kernel), dim3(ng), dim3(nb), 0, strm, data_d_local, ncycles);
}
CUDA_CHECK(hipEventRecord(eev, strm));
CUDA_CHECK(hipEventSynchronize(eev));
CUDA_CHECK(hipEventElapsedTime(ms2, sev, eev));
}
return status;
}
int main(int argc, char *argv[]) {
int status = 0;
int mype, npes;
char *data_d = NULL, *data_d_local = NULL;
uint64_t *size_array = NULL;
double *offs_latency_array = NULL;
double *ons_latency_array = NULL;
int num_entries;
int i;
dir_t dir = PUSH;
int iter = DEFAULT_ITERS;
int min_msg_size = DEFAULT_MIN_MSG_SIZE;
int max_msg_size = DEFAULT_MAX_MSG_SIZE;
int nb = 1, nt = 32;
long long int ncycles = 1;
init_wrapper(&argc, &argv);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
if (npes != 2) {
fprintf(stderr, "This test requires exactly two processes \n");
status = -1;
goto finalize;
}
while (1) {
int c;
c = getopt(argc, argv, "s:S:n:i:d:b:t:c:h");
if (c == -1) break;
switch (c) {
case 's':
min_msg_size = strtol(optarg, NULL, 0);
break;
case 'S':
max_msg_size = strtol(optarg, NULL, 0);
break;
case 'n':
iter = strtol(optarg, NULL, 0);
break;
case 'd':
dir = (dir_t)strtol(optarg, NULL, 0);
break;
case 'b':
nb = strtol(optarg, NULL, 0);
break;
case 't':
nt = strtol(optarg, NULL, 0);
break;
case 'c':
ncycles = strtol(optarg, NULL, 0);
break;
default:
case 'h':
printf(
"-n [Iterations] -S [Max message size] -s [Min message size] -i [Put/Get issue type : ON_STREAM(0) otherwise 1] -d [Direction of copy : PUSH(0) or PULL(1)] -b [# blocks] \
-t [# threads] -c [# cycles to wait in the the kernel]\n");
goto finalize;
}
}
num_entries = floor(log2((float)max_msg_size)) - floor(log2((float)min_msg_size)) + 1;
size_array = (uint64_t *)calloc(sizeof(uint64_t), num_entries);
if (!size_array) {
status = -1;
goto finalize;
}
offs_latency_array = (double *)calloc(sizeof(double), num_entries);
if (!offs_latency_array) {
status = -1;
goto finalize;
}
ons_latency_array = (double *)calloc(sizeof(double), num_entries);
if (!ons_latency_array) {
status = -1;
goto finalize;
}
data_d = (char *)nvshmem_malloc(max_msg_size);
CUDA_CHECK(hipMemset(data_d, 0, max_msg_size));
data_d_local = (char *)nvshmem_malloc(max_msg_size);
CUDA_CHECK(hipMemset(data_d, 0, max_msg_size));
hipStream_t strm;
CUDA_CHECK(hipStreamCreateWithFlags(&strm, hipStreamNonBlocking));
CUDA_CHECK(hipDeviceSynchronize());
if (mype == 0) {
float ms1, ms2;
hipEvent_t sev, eev;
CUDA_CHECK(hipEventCreate(&sev));
CUDA_CHECK(hipEventCreate(&eev));
i = 0;
for (int size = min_msg_size; size <= max_msg_size; size *= 2) {
size_array[i] = size;
lat(data_d, data_d_local, size, mype, iter, dir, strm, sev, eev, &ms1, &ms2, nb, nt,
ncycles);
ons_latency_array[i] = ms1 / iter * 1000;
offs_latency_array[i] = ms2 / iter * 1000;
i++;
}
print_table("Stream_Latency", "with _on_stream", "size (Bytes)", "latency", "us", '-', size_array, ons_latency_array, i);
print_table("Stream_Latency", "without _on_stream", "size (Bytes)", "latency", "us", '-', size_array, offs_latency_array, i);
CUDA_CHECK(hipEventDestroy(sev));
CUDA_CHECK(hipEventDestroy(eev));
nvshmem_barrier_all();
} else {
nvshmem_barrier_all();
}
finalize:
CUDA_CHECK(hipStreamDestroy(strm));
if (data_d) nvshmem_free(data_d);
if (size_array) free(size_array);
if (ons_latency_array) free(ons_latency_array);
if (offs_latency_array) free(offs_latency_array);
if (data_d_local) nvshmem_free(data_d_local);
finalize_wrapper();
return status;
}
| f1e70b2f88686b88e77e1a45ca8fbfd11b3293b3.cu | /*
* Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
*
* NVIDIA CORPORATION and its licensors retain all intellectual property
* and proprietary rights in and to this software, related documentation
* and any modifications thereto. Any use, reproduction, disclosure or
* distribution of this software and related documentation without an express
* license agreement from NVIDIA CORPORATION is strictly prohibited.
*
* See COPYRIGHT.txt for license information
*/
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <getopt.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "utils.h"
#define DEFAULT_ITERS 10
#define DEFAULT_MIN_MSG_SIZE 1
#define DEFAULT_MAX_MSG_SIZE 128 * 1024 * 1024
typedef enum { PUSH = 0, PULL = 1 } dir_t;
__global__ void test_kernel(void *data_d_local, long long int ncycles) {
long long int sclk = clock64();
long long int cyc = 0;
while (cyc < ncycles) {
cyc = clock64() - sclk;
}
*(long long int *)data_d_local = cyc;
}
int lat(void *data_d, void *data_d_local, int sizeBytes, int pe, int iter, dir_t dir,
cudaStream_t strm, cudaEvent_t sev, cudaEvent_t eev, float *ms1, float *ms2, int ng, int nb,
long long int ncycles) {
int status = 0;
int peer = !pe;
if (dir == PUSH) {
CUDA_CHECK(cudaEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
test_kernel<<<ng, nb, 0, strm>>>(data_d_local, ncycles);
nvshmemx_putmem_on_stream((void *)data_d, (void *)data_d_local, sizeBytes, peer, strm);
}
CUDA_CHECK(cudaEventRecord(eev, strm));
CUDA_CHECK(cudaEventSynchronize(eev));
CUDA_CHECK(cudaEventElapsedTime(ms1, sev, eev));
CUDA_CHECK(cudaEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
test_kernel<<<ng, nb, 0, strm>>>(data_d_local, ncycles);
CUDA_CHECK(cudaStreamSynchronize(strm));
nvshmem_putmem((void *)data_d, (void *)data_d_local, sizeBytes, peer);
}
CUDA_CHECK(cudaEventRecord(eev, strm));
CUDA_CHECK(cudaEventSynchronize(eev));
CUDA_CHECK(cudaEventElapsedTime(ms2, sev, eev));
} else {
CUDA_CHECK(cudaEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
nvshmemx_getmem_on_stream((void *)data_d_local, (void *)data_d, sizeBytes, peer, strm);
test_kernel<<<ng, nb, 0, strm>>>(data_d_local, ncycles);
}
CUDA_CHECK(cudaEventRecord(eev, strm));
CUDA_CHECK(cudaEventSynchronize(eev));
CUDA_CHECK(cudaEventElapsedTime(ms1, sev, eev));
CUDA_CHECK(cudaEventRecord(sev, strm));
for (int i = 0; i < iter; i++) {
nvshmem_getmem((void *)data_d_local, (void *)data_d, sizeBytes,
peer); // shmem_getmem is blocking, so nvshmem_quiet is not needed
test_kernel<<<ng, nb, 0, strm>>>(data_d_local, ncycles);
}
CUDA_CHECK(cudaEventRecord(eev, strm));
CUDA_CHECK(cudaEventSynchronize(eev));
CUDA_CHECK(cudaEventElapsedTime(ms2, sev, eev));
}
return status;
}
int main(int argc, char *argv[]) {
int status = 0;
int mype, npes;
char *data_d = NULL, *data_d_local = NULL;
uint64_t *size_array = NULL;
double *offs_latency_array = NULL;
double *ons_latency_array = NULL;
int num_entries;
int i;
dir_t dir = PUSH;
int iter = DEFAULT_ITERS;
int min_msg_size = DEFAULT_MIN_MSG_SIZE;
int max_msg_size = DEFAULT_MAX_MSG_SIZE;
int nb = 1, nt = 32;
long long int ncycles = 1;
init_wrapper(&argc, &argv);
mype = nvshmem_my_pe();
npes = nvshmem_n_pes();
if (npes != 2) {
fprintf(stderr, "This test requires exactly two processes \n");
status = -1;
goto finalize;
}
while (1) {
int c;
c = getopt(argc, argv, "s:S:n:i:d:b:t:c:h");
if (c == -1) break;
switch (c) {
case 's':
min_msg_size = strtol(optarg, NULL, 0);
break;
case 'S':
max_msg_size = strtol(optarg, NULL, 0);
break;
case 'n':
iter = strtol(optarg, NULL, 0);
break;
case 'd':
dir = (dir_t)strtol(optarg, NULL, 0);
break;
case 'b':
nb = strtol(optarg, NULL, 0);
break;
case 't':
nt = strtol(optarg, NULL, 0);
break;
case 'c':
ncycles = strtol(optarg, NULL, 0);
break;
default:
case 'h':
printf(
"-n [Iterations] -S [Max message size] -s [Min message size] -i [Put/Get issue type : ON_STREAM(0) otherwise 1] -d [Direction of copy : PUSH(0) or PULL(1)] -b [# blocks] \
-t [# threads] -c [# cycles to wait in the the kernel]\n");
goto finalize;
}
}
num_entries = floor(log2((float)max_msg_size)) - floor(log2((float)min_msg_size)) + 1;
size_array = (uint64_t *)calloc(sizeof(uint64_t), num_entries);
if (!size_array) {
status = -1;
goto finalize;
}
offs_latency_array = (double *)calloc(sizeof(double), num_entries);
if (!offs_latency_array) {
status = -1;
goto finalize;
}
ons_latency_array = (double *)calloc(sizeof(double), num_entries);
if (!ons_latency_array) {
status = -1;
goto finalize;
}
data_d = (char *)nvshmem_malloc(max_msg_size);
CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size));
data_d_local = (char *)nvshmem_malloc(max_msg_size);
CUDA_CHECK(cudaMemset(data_d, 0, max_msg_size));
cudaStream_t strm;
CUDA_CHECK(cudaStreamCreateWithFlags(&strm, cudaStreamNonBlocking));
CUDA_CHECK(cudaDeviceSynchronize());
if (mype == 0) {
float ms1, ms2;
cudaEvent_t sev, eev;
CUDA_CHECK(cudaEventCreate(&sev));
CUDA_CHECK(cudaEventCreate(&eev));
i = 0;
for (int size = min_msg_size; size <= max_msg_size; size *= 2) {
size_array[i] = size;
lat(data_d, data_d_local, size, mype, iter, dir, strm, sev, eev, &ms1, &ms2, nb, nt,
ncycles);
ons_latency_array[i] = ms1 / iter * 1000;
offs_latency_array[i] = ms2 / iter * 1000;
i++;
}
print_table("Stream_Latency", "with _on_stream", "size (Bytes)", "latency", "us", '-', size_array, ons_latency_array, i);
print_table("Stream_Latency", "without _on_stream", "size (Bytes)", "latency", "us", '-', size_array, offs_latency_array, i);
CUDA_CHECK(cudaEventDestroy(sev));
CUDA_CHECK(cudaEventDestroy(eev));
nvshmem_barrier_all();
} else {
nvshmem_barrier_all();
}
finalize:
CUDA_CHECK(cudaStreamDestroy(strm));
if (data_d) nvshmem_free(data_d);
if (size_array) free(size_array);
if (ons_latency_array) free(ons_latency_array);
if (offs_latency_array) free(offs_latency_array);
if (data_d_local) nvshmem_free(data_d_local);
finalize_wrapper();
return status;
}
|
c0e472f67f5bc3c4cb1c05ca5e9b1b2eaecaecb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// add elements of x and y, store into y
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void){
int N = 1<<20; // 1 million elements
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
//initialize x and y arrays on the host
for(int i = 0; i < N; i++){
x[i] = 1.0f;
y[i] = 2.0f;
}
//run the kernel on 1M elements on the CPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
//wait for GPU to finish before accessing on host
hipDeviceSynchronize();
//check for errors (all values should be 3.0f)
float maxError = 0.0f;
for(int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
//free memory
hipFree(x);
hipFree(y);
return 0;
} | c0e472f67f5bc3c4cb1c05ca5e9b1b2eaecaecb5.cu | #include <iostream>
#include <math.h>
// add elements of x and y, store into y
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void){
int N = 1<<20; // 1 million elements
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
//initialize x and y arrays on the host
for(int i = 0; i < N; i++){
x[i] = 1.0f;
y[i] = 2.0f;
}
//run the kernel on 1M elements on the CPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
//wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//check for errors (all values should be 3.0f)
float maxError = 0.0f;
for(int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
//free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
97343c37b2f59232677e0029bb0128b027646e9f.hip | // !!! This is a file automatically generated by hipify!!!
// includes, system
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <fstream>
#include <vector>
#include <iterator>
#include <stdlib.h>
// includes, project
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
// includes, kernels
#include "kernel_2D.cuh"
#include "kernel_3D.cuh"
#include "reduce_hip.cuh"
#include "config.h"
#include "statistic.h"
// includes, cmdline
//#include "cmdline.h"
//#include "cmdline_types.h"
static hipError_t err;
#define cuda(f, ...) \
\
if((err = cuda##f(__VA_ARGS__)) != hipSuccess) { \
fprintf(stderr, #f "() %s\n", hipGetErrorString(err)); \
exit(-1); \
\
}
#define cudaNoSync(...) cuda(__VA_ARGS__)
using namespace std;
template <typename F = float> struct obj { F a; };
class Simulation {
Statistic *errors;
obj<float> z;
data<> *d_idata, *h_idata, *h_odata_test;
vector<double> mean;
vector<float> mean_time;
std::size_t x, dimension, grid_size;
std::size_t *seed;
int _genseed;
float n, gamma, lambda;
public:
Simulation(int x, int dimension, float n, float gamma, float lambda,
int _genseed)
: x(x), dimension(dimension), n(n), gamma(gamma), lambda(lambda),
_genseed(_genseed) {
errors = new Statistic(x, n);
grid_size = (dimension == 3) ? x * x * x : x * x;
h_odata_test = (data<> *)malloc(sizeof(data<>) * grid_size);
}
void extended_phi4_metropolis_2D(int& iteration) {
data<> *d_idata;
unsigned int *d_seed;
ofstream myfile("data");
ofstream myfile2("data2");
unsigned long mem_size = (sizeof(data<>) * grid_size);
h_idata = (data<> *)malloc(mem_size);
int seed_size = sizeof(unsigned int) * 4 * grid_size;
seed = (unsigned int *)malloc(seed_size);
init_gen(_genseed, h_idata, seed);
cuda(Malloc, (void **)&d_idata, mem_size);
cuda(Malloc, (void **)&d_seed, seed_size);
cuda(Memcpy, d_idata, h_idata, mem_size, hipMemcpyHostToDevice);
cuda(Memcpy, d_seed, seed, seed_size, hipMemcpyHostToDevice);
data<> *h_odata = (data<> *)malloc(mem_size);
dim3 grid(x / 32, x / 32);
dim3 threads(8, 4);
double timer = 0;
double rt1 = 0;
int limit = 0;
dim3 seedgrid(x / 32, x / 32);
dim3 seedthreads(8, 4);
for (int i = 0; i < iteration; i++) {
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 0, 0);
hipDeviceSynchronize();
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 1, 1);
hipDeviceSynchronize();
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 0, 1);
hipDeviceSynchronize();
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 1, 0);
hipDeviceSynchronize();
if (i % 1000 == 0) {
printf("Iteration %d\n", i);
}
if (i > 1000) {
rt1 += sqrt_mag(d_idata);
limit++;
if (i % 50 == 0 && i > 0) {
// printf(" Iteration %d Totality of M^2 :=
// %4.10f\n",i*100,(rt/limit)/(x*x));
mean.push_back((rt1 / limit) / grid_size);
myfile << (rt1 / limit) / grid_size << endl;
rt1 = 0.0f;
limit = 0;
}
hipDeviceSynchronize();
}
}
double analytical_value = errors->analytical_phi_2d(lambda);
cout << "Monte Carlo output " << GPU_correlation(d_idata, 4) << endl;
cout << "Analytica value: " << analytical_value << endl;
errors->autocorr(analytical_value, mean);
// stop kernel and timer
double time = 0 - timer;
cout << "Time: " << time << endl;
cuda(Memcpy, h_odata, d_idata, mem_size, hipMemcpyDeviceToHost);
// create_vtkfile(h_odata,x);
// cleanup memory
free(h_idata);
free(seed);
cuda(Free, d_idata);
cuda(Free, d_seed);
hipDeviceReset();
}
void extended_phi4_metropolis_3D(int &GLOBAL_SWEEPS) {
ofstream myfile("data");
ofstream myfile2("data2");
std::size_t mem_size = (sizeof(data<>) * grid_size);
h_idata = (data<> *)malloc(mem_size);
int seed_size = sizeof(std::size_t) * 4 * (grid_size);
seed = (std::size_t *)malloc(seed_size);
init_gen(_genseed, h_idata, seed);
data<> *d_idata;
std::size_t *d_seed;
cuda(Malloc, (void **)&d_idata, mem_size);
cuda(Malloc, (void **)&d_seed, seed_size);
cuda(Memcpy, d_idata, h_idata, mem_size, hipMemcpyHostToDevice);
cuda(Memcpy, d_seed, seed, seed_size, hipMemcpyHostToDevice);
data<> *h_odata = (data<> *)malloc(mem_size);
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float tempM;
std::size_t limitM;
float rt1 = 0;
std::size_t limit = 0;
dim3 grid(x / (2 * SHMEM_CUBE_SIZE), x / (2 * SHMEM_CUBE_SIZE),
x / (2 * SHMEM_CUBE_SIZE)); //( x 4,y 4)
dim3 threads(SHMEM_CUBE_SIZE / 2, SHMEM_CUBE_SIZE / 4, SHMEM_CUBE_SIZE / 2);
hipEventRecord(start);
float last_mean = 0;
#define Phi_3D_Kernel(stride_x, stride_y, stride_z) \
Phi_3D<float> << <grid, threads>>> \
(d_idata, d_seed, x, n, gamma, lambda, stride_x, stride_y, stride_z); \
hipDeviceSynchronize();
for (int i = 0; i < GLOBAL_SWEEPS; ++i) {
Phi_3D_Kernel(0, 0, 0);
Phi_3D_Kernel(1, 1, 0);
Phi_3D_Kernel(0, 1, 0);
Phi_3D_Kernel(1, 0, 0);
Phi_3D_Kernel(0, 0, 1);
Phi_3D_Kernel(1, 1, 1);
Phi_3D_Kernel(0, 1, 1);
Phi_3D_Kernel(1, 0, 1);
/*
Phi_3D<float> << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 0, 0);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 1, 0);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 1, 0);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 0, 0);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 0, 1);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 1, 1);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 1, 1);
hipDeviceSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 0, 1);
hipDeviceSynchronize();
*/
if (i > 400) {
rt1 += sqrt_mag(d_idata);
limit++;
if (i > 400) {
mean.push_back((rt1 / limit) / grid_size);
myfile << (rt1 / limit) / (x * x * x) << endl;
last_mean = (rt1 / limit) / grid_size;
printf(" Iteration %d Totality of M^2 := %4.10f\n", i,
last_mean);
rt1 = 0.0f;
limit = 0;
}
hipDeviceSynchronize();
}
}
hipEventRecord(stop);
hipEventRecord(stop);
float timer;
hipEventElapsedTime(&timer, start, stop);
tempM = rt1;
limitM = limit;
cuda(Memcpy, h_odata, d_idata, mem_size, hipMemcpyDeviceToHost);
double analytical_value = errors->analytical_phi_3d(lambda);
std::cout << endl << "Monte Carlo Mean: " << last_mean
<< std::endl;
std::cout << "Analytical Mean: " << analytical_value << endl;
errors->autocorr(analytical_value, mean);
free(h_odata);
free(h_idata);
free(seed);
cuda(Free, d_idata);
cuda(Free, d_seed);
hipDeviceReset();
}
void getGPU() {
int devId = -1;
hipDeviceProp_t pdev;
hipGetDevice(&devId);
hipGetDeviceProperties(&pdev, devId);
cout << "\t"
<< "GPU properties: " << endl;
cout << "\t"
<< "name: " << pdev.name << endl;
cout << "\t"
<< "capability: " << pdev.major << "." << pdev.minor << endl;
cout << "\t"
<< "clock: " << pdev.clockRate / 1000000.0 << " GHz" << endl;
cout << "\t"
<< "processors: " << pdev.multiProcessorCount << endl;
cout << "\t"
<< "cores: " << 32 * pdev.multiProcessorCount << endl;
cout << "\t"
<< "warp: " << pdev.warpSize << endl;
cout << "\t"
<< "max thr/blk: " << pdev.maxThreadsPerBlock << endl;
cout << "\t"
<< "max blk size: " << pdev.maxThreadsDim[0] << "x"
<< pdev.maxThreadsDim[1] << "x" << pdev.maxThreadsDim[2] << endl;
cout << "\t"
<< "max grd size: " << pdev.maxGridSize[0] << "x"
<< pdev.maxGridSize[1] << endl;
}
void create_vtkfile(data<> *V, int &size) {
ofstream myfile("phi.vtk");
myfile << "# vtk DataFile Version 2.0 " << endl;
myfile << "Cuda simulation of Phi4 Model" << endl;
myfile << "ASCII" << endl;
myfile << "DATASET STRUCTURED_GRID" << endl;
myfile << "DIMENSIONS " << size << " " << size << " " << 1 << endl;
myfile << "POINTS " << (int)(x * x) << " float" << endl;
for (std::size_t i = 0; i < x; i++) {
for (std::size_t j = 0; j < x; j++) {
myfile << (float)i << " " << (float)j << " 0.0" << endl;
}
}
// data points
myfile << "POINT_DATA " << (int)(x * x) << endl;
myfile << "SCALARS data float" << endl;
myfile << "LOOKUP_TABLE default" << endl;
for (std::size_t z = 0; z < (x * x); ++z) {
myfile << V[z].vector[0] << endl;
}
}
float sqrt_mag(data<> *d_idata) {
int threads = 64;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(grid_size / threads, 1, 1);
float *d_odata = NULL;
cuda(Malloc, (void **)&d_odata, grid_size / threads * sizeof(float));
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
reduce2<data<>> << <dimGrid, dimBlock, smemSize>>>
(d_idata, d_odata, grid_size);
float *out = NULL;
out = (float *)malloc(grid_size / threads * sizeof(float));
cuda(Memcpy, out, d_odata, grid_size / threads * sizeof(float),
hipMemcpyDeviceToHost);
float gpu_result = 0;
for (std::size_t i = 0; i < (grid_size / threads); ++i) {
gpu_result += out[i];
}
free(out);
cuda(Free, d_odata);
return (gpu_result);
}
float sqrt_mag2(float *d_idata) {
int threads = 256;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(grid_size / threads, 1, 1);
float *d_odata = NULL;
cuda(Malloc, (void **)&d_odata, grid_size / threads * sizeof(float));
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
reduce6<float, 256, true> << <dimGrid, dimBlock, smemSize>>>
(d_idata, d_odata, x * x * x);
float *out = NULL;
out = (float *)malloc(grid_size / threads * sizeof(float));
cuda(Memcpy, out, d_odata, grid_size / threads * sizeof(float),
hipMemcpyDeviceToHost);
float gpu_result = 0;
for (std::size_t i = 0; i < (grid_size / threads); i++) {
gpu_result += out[i];
}
free(out);
cuda(Free, d_odata);
return (gpu_result);
}
void init_gen(int &genseed, data<> *h_idata, unsigned int *seed) {
srand(*seed);
for (std::size_t i = 0; i < grid_size; ++i) {
h_idata[i].vector[0] = static_cast<float>(rand() / RAND_MAX);
}
for (std::size_t i = 0; i < 4 * grid_size; ++i) {
seed[i] = rand();
}
}
float GPU_correlation(data<> *d_idata, int R) {
int threads = 1024;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(grid_size / threads, 1, 1);
float *d_odata = NULL;
cuda(Malloc, (void **)&d_odata, grid_size / threads * sizeof(float));
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
correlation<data<>> << <dimGrid, dimBlock, smemSize>>>
(d_idata, d_odata, grid_size, x, R);
float *out = NULL;
out = (float *)malloc(grid_size / threads * sizeof(float));
cuda(Memcpy, out, d_odata, grid_size / threads * sizeof(float),
hipMemcpyDeviceToHost);
float gpu_result = 0;
for (std::size_t i = 0; i < (grid_size / threads); i++) {
gpu_result += out[i];
}
free(out);
cuda(Free, d_odata);
return (gpu_result) / (2 * grid_size);
}
~Simulation() {
delete errors;
free(h_odata_test);
}
};
int main(int argc, char **argv) {
int dimension = 3;
int x = 64;
float mi = 0.25f;
float gamma = 0.0f;
float lambda = 2.0f;
int iteration = 1000;
int seed = 12345;
Simulation *Phi = new Simulation(x, dimension, mi, gamma, lambda, seed);
if (dimension == 2) {
Phi->extended_phi4_metropolis_2D(iteration);
} else if (dimension == 3) {
Phi->extended_phi4_metropolis_3D(iteration);
}
delete Phi;
return 0;
}
| 97343c37b2f59232677e0029bb0128b027646e9f.cu | // includes, system
#include <iostream>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include <fstream>
#include <vector>
#include <iterator>
#include <stdlib.h>
// includes, project
#include <cuda.h>
#include <curand_kernel.h>
// includes, kernels
#include "kernel_2D.cuh"
#include "kernel_3D.cuh"
#include "reduce.cuh"
#include "config.h"
#include "statistic.h"
// includes, cmdline
//#include "cmdline.h"
//#include "cmdline_types.h"
static cudaError err;
#define cuda(f, ...) \
\
if((err = cuda##f(__VA_ARGS__)) != cudaSuccess) { \
fprintf(stderr, #f "() %s\n", cudaGetErrorString(err)); \
exit(-1); \
\
}
#define cudaNoSync(...) cuda(__VA_ARGS__)
using namespace std;
template <typename F = float> struct obj { F a; };
class Simulation {
Statistic *errors;
obj<float> z;
data<> *d_idata, *h_idata, *h_odata_test;
vector<double> mean;
vector<float> mean_time;
std::size_t x, dimension, grid_size;
std::size_t *seed;
int _genseed;
float n, gamma, lambda;
public:
Simulation(int x, int dimension, float n, float gamma, float lambda,
int _genseed)
: x(x), dimension(dimension), n(n), gamma(gamma), lambda(lambda),
_genseed(_genseed) {
errors = new Statistic(x, n);
grid_size = (dimension == 3) ? x * x * x : x * x;
h_odata_test = (data<> *)malloc(sizeof(data<>) * grid_size);
}
void extended_phi4_metropolis_2D(int& iteration) {
data<> *d_idata;
unsigned int *d_seed;
ofstream myfile("data");
ofstream myfile2("data2");
unsigned long mem_size = (sizeof(data<>) * grid_size);
h_idata = (data<> *)malloc(mem_size);
int seed_size = sizeof(unsigned int) * 4 * grid_size;
seed = (unsigned int *)malloc(seed_size);
init_gen(_genseed, h_idata, seed);
cuda(Malloc, (void **)&d_idata, mem_size);
cuda(Malloc, (void **)&d_seed, seed_size);
cuda(Memcpy, d_idata, h_idata, mem_size, cudaMemcpyHostToDevice);
cuda(Memcpy, d_seed, seed, seed_size, cudaMemcpyHostToDevice);
data<> *h_odata = (data<> *)malloc(mem_size);
dim3 grid(x / 32, x / 32);
dim3 threads(8, 4);
double timer = 0;
double rt1 = 0;
int limit = 0;
dim3 seedgrid(x / 32, x / 32);
dim3 seedthreads(8, 4);
for (int i = 0; i < iteration; i++) {
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 0, 0);
cudaThreadSynchronize();
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 1, 1);
cudaThreadSynchronize();
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 0, 1);
cudaThreadSynchronize();
Phi_2D << <grid, threads>>>
(d_idata, d_seed, iteration, x, n, gamma, lambda, 1, 0);
cudaThreadSynchronize();
if (i % 1000 == 0) {
printf("Iteration %d\n", i);
}
if (i > 1000) {
rt1 += sqrt_mag(d_idata);
limit++;
if (i % 50 == 0 && i > 0) {
// printf(" Iteration %d Totality of M^2 :=
// %4.10f\n",i*100,(rt/limit)/(x*x));
mean.push_back((rt1 / limit) / grid_size);
myfile << (rt1 / limit) / grid_size << endl;
rt1 = 0.0f;
limit = 0;
}
cudaThreadSynchronize();
}
}
double analytical_value = errors->analytical_phi_2d(lambda);
cout << "Monte Carlo output " << GPU_correlation(d_idata, 4) << endl;
cout << "Analytica value: " << analytical_value << endl;
errors->autocorr(analytical_value, mean);
// stop kernel and timer
double time = 0 - timer;
cout << "Time: " << time << endl;
cuda(Memcpy, h_odata, d_idata, mem_size, cudaMemcpyDeviceToHost);
// create_vtkfile(h_odata,x);
// cleanup memory
free(h_idata);
free(seed);
cuda(Free, d_idata);
cuda(Free, d_seed);
cudaThreadExit();
}
void extended_phi4_metropolis_3D(int &GLOBAL_SWEEPS) {
ofstream myfile("data");
ofstream myfile2("data2");
std::size_t mem_size = (sizeof(data<>) * grid_size);
h_idata = (data<> *)malloc(mem_size);
int seed_size = sizeof(std::size_t) * 4 * (grid_size);
seed = (std::size_t *)malloc(seed_size);
init_gen(_genseed, h_idata, seed);
data<> *d_idata;
std::size_t *d_seed;
cuda(Malloc, (void **)&d_idata, mem_size);
cuda(Malloc, (void **)&d_seed, seed_size);
cuda(Memcpy, d_idata, h_idata, mem_size, cudaMemcpyHostToDevice);
cuda(Memcpy, d_seed, seed, seed_size, cudaMemcpyHostToDevice);
data<> *h_odata = (data<> *)malloc(mem_size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float tempM;
std::size_t limitM;
float rt1 = 0;
std::size_t limit = 0;
dim3 grid(x / (2 * SHMEM_CUBE_SIZE), x / (2 * SHMEM_CUBE_SIZE),
x / (2 * SHMEM_CUBE_SIZE)); //( x 4,y 4)
dim3 threads(SHMEM_CUBE_SIZE / 2, SHMEM_CUBE_SIZE / 4, SHMEM_CUBE_SIZE / 2);
cudaEventRecord(start);
float last_mean = 0;
#define Phi_3D_Kernel(stride_x, stride_y, stride_z) \
Phi_3D<float> << <grid, threads>>> \
(d_idata, d_seed, x, n, gamma, lambda, stride_x, stride_y, stride_z); \
cudaThreadSynchronize();
for (int i = 0; i < GLOBAL_SWEEPS; ++i) {
Phi_3D_Kernel(0, 0, 0);
Phi_3D_Kernel(1, 1, 0);
Phi_3D_Kernel(0, 1, 0);
Phi_3D_Kernel(1, 0, 0);
Phi_3D_Kernel(0, 0, 1);
Phi_3D_Kernel(1, 1, 1);
Phi_3D_Kernel(0, 1, 1);
Phi_3D_Kernel(1, 0, 1);
/*
Phi_3D<float> << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 0, 0);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 1, 0);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 1, 0);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 0, 0);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 0, 1);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 1, 1);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 0, 1, 1);
cudaThreadSynchronize();
Phi_3D << <grid, threads>>>
(d_idata, d_seed, x, n, gamma, lambda, 1, 0, 1);
cudaThreadSynchronize();
*/
if (i > 400) {
rt1 += sqrt_mag(d_idata);
limit++;
if (i > 400) {
mean.push_back((rt1 / limit) / grid_size);
myfile << (rt1 / limit) / (x * x * x) << endl;
last_mean = (rt1 / limit) / grid_size;
printf(" Iteration %d Totality of M^2 := %4.10f\n", i,
last_mean);
rt1 = 0.0f;
limit = 0;
}
cudaThreadSynchronize();
}
}
cudaEventRecord(stop);
cudaEventRecord(stop);
float timer;
cudaEventElapsedTime(&timer, start, stop);
tempM = rt1;
limitM = limit;
cuda(Memcpy, h_odata, d_idata, mem_size, cudaMemcpyDeviceToHost);
double analytical_value = errors->analytical_phi_3d(lambda);
std::cout << endl << "Monte Carlo Mean: " << last_mean
<< std::endl;
std::cout << "Analytical Mean: " << analytical_value << endl;
errors->autocorr(analytical_value, mean);
free(h_odata);
free(h_idata);
free(seed);
cuda(Free, d_idata);
cuda(Free, d_seed);
cudaThreadExit();
}
void getGPU() {
int devId = -1;
cudaDeviceProp pdev;
cudaGetDevice(&devId);
cudaGetDeviceProperties(&pdev, devId);
cout << "\t"
<< "GPU properties: " << endl;
cout << "\t"
<< "name: " << pdev.name << endl;
cout << "\t"
<< "capability: " << pdev.major << "." << pdev.minor << endl;
cout << "\t"
<< "clock: " << pdev.clockRate / 1000000.0 << " GHz" << endl;
cout << "\t"
<< "processors: " << pdev.multiProcessorCount << endl;
cout << "\t"
<< "cores: " << 32 * pdev.multiProcessorCount << endl;
cout << "\t"
<< "warp: " << pdev.warpSize << endl;
cout << "\t"
<< "max thr/blk: " << pdev.maxThreadsPerBlock << endl;
cout << "\t"
<< "max blk size: " << pdev.maxThreadsDim[0] << "x"
<< pdev.maxThreadsDim[1] << "x" << pdev.maxThreadsDim[2] << endl;
cout << "\t"
<< "max grd size: " << pdev.maxGridSize[0] << "x"
<< pdev.maxGridSize[1] << endl;
}
void create_vtkfile(data<> *V, int &size) {
ofstream myfile("phi.vtk");
myfile << "# vtk DataFile Version 2.0 " << endl;
myfile << "Cuda simulation of Phi4 Model" << endl;
myfile << "ASCII" << endl;
myfile << "DATASET STRUCTURED_GRID" << endl;
myfile << "DIMENSIONS " << size << " " << size << " " << 1 << endl;
myfile << "POINTS " << (int)(x * x) << " float" << endl;
for (std::size_t i = 0; i < x; i++) {
for (std::size_t j = 0; j < x; j++) {
myfile << (float)i << " " << (float)j << " 0.0" << endl;
}
}
// data points
myfile << "POINT_DATA " << (int)(x * x) << endl;
myfile << "SCALARS data float" << endl;
myfile << "LOOKUP_TABLE default" << endl;
for (std::size_t z = 0; z < (x * x); ++z) {
myfile << V[z].vector[0] << endl;
}
}
float sqrt_mag(data<> *d_idata) {
int threads = 64;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(grid_size / threads, 1, 1);
float *d_odata = NULL;
cuda(Malloc, (void **)&d_odata, grid_size / threads * sizeof(float));
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
reduce2<data<>> << <dimGrid, dimBlock, smemSize>>>
(d_idata, d_odata, grid_size);
float *out = NULL;
out = (float *)malloc(grid_size / threads * sizeof(float));
cuda(Memcpy, out, d_odata, grid_size / threads * sizeof(float),
cudaMemcpyDeviceToHost);
float gpu_result = 0;
for (std::size_t i = 0; i < (grid_size / threads); ++i) {
gpu_result += out[i];
}
free(out);
cuda(Free, d_odata);
return (gpu_result);
}
float sqrt_mag2(float *d_idata) {
int threads = 256;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(grid_size / threads, 1, 1);
float *d_odata = NULL;
cuda(Malloc, (void **)&d_odata, grid_size / threads * sizeof(float));
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
reduce6<float, 256, true> << <dimGrid, dimBlock, smemSize>>>
(d_idata, d_odata, x * x * x);
float *out = NULL;
out = (float *)malloc(grid_size / threads * sizeof(float));
cuda(Memcpy, out, d_odata, grid_size / threads * sizeof(float),
cudaMemcpyDeviceToHost);
float gpu_result = 0;
for (std::size_t i = 0; i < (grid_size / threads); i++) {
gpu_result += out[i];
}
free(out);
cuda(Free, d_odata);
return (gpu_result);
}
void init_gen(int &genseed, data<> *h_idata, unsigned int *seed) {
srand(*seed);
for (std::size_t i = 0; i < grid_size; ++i) {
h_idata[i].vector[0] = static_cast<float>(rand() / RAND_MAX);
}
for (std::size_t i = 0; i < 4 * grid_size; ++i) {
seed[i] = rand();
}
}
float GPU_correlation(data<> *d_idata, int R) {
int threads = 1024;
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(grid_size / threads, 1, 1);
float *d_odata = NULL;
cuda(Malloc, (void **)&d_odata, grid_size / threads * sizeof(float));
int smemSize =
(threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
correlation<data<>> << <dimGrid, dimBlock, smemSize>>>
(d_idata, d_odata, grid_size, x, R);
float *out = NULL;
out = (float *)malloc(grid_size / threads * sizeof(float));
cuda(Memcpy, out, d_odata, grid_size / threads * sizeof(float),
cudaMemcpyDeviceToHost);
float gpu_result = 0;
for (std::size_t i = 0; i < (grid_size / threads); i++) {
gpu_result += out[i];
}
free(out);
cuda(Free, d_odata);
return (gpu_result) / (2 * grid_size);
}
~Simulation() {
delete errors;
free(h_odata_test);
}
};
int main(int argc, char **argv) {
int dimension = 3;
int x = 64;
float mi = 0.25f;
float gamma = 0.0f;
float lambda = 2.0f;
int iteration = 1000;
int seed = 12345;
Simulation *Phi = new Simulation(x, dimension, mi, gamma, lambda, seed);
if (dimension == 2) {
Phi->extended_phi4_metropolis_2D(iteration);
} else if (dimension == 3) {
Phi->extended_phi4_metropolis_3D(iteration);
}
delete Phi;
return 0;
}
|
d92e00650eb26a0fe0b68ac80af3bd958869ad52.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip/hip_fp16.h"
#include "src/cuda/conv_bias/chanwise/kern.cuh"
#include "src/cuda/conv_bias/chanwise/kern_helper.cuh"
#include "src/cuda/conv_bias/chanwise/launch_config.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace conv_bias;
using namespace chanwise;
namespace {
enum DepthwiseConv2dDirection { DIRECTION_FORWARD, DIRECTION_BACKWARD };
// CUDA kernel to compute the depthwise convolution forward pass in NCHW format,
// tailored for small images up to 32x32. Stride and depth multiplier must be 1.
// Padding must be 'SAME', which allows to reuse the index computation. Only
// use this kernel if CanLaunchDepthwiseConv2dGPUSmall(args) returns true.
// Tiles of the input and filter tensors are loaded into shared memory before
// performing the convolution. Each thread handles two elements per iteration,
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180.
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
__global__ void
#if __CUDA_ARCH__ >= 750
__launch_bounds__(1024, 1)
#else
__launch_bounds__(1024, 2)
#endif
DepthwiseConv2dGPUKernelNCHWSmall(
const Param param, const T* input, const T* filter, T* output) {
// Holds block plus halo and filter data for blockDim.z depths.
extern __shared__ __align__(8) unsigned char shared_memory[];
static_assert(sizeof(T) <= 8, "Insufficient alignment detected");
T* const shared_data = reinterpret_cast<T*>(shared_memory);
const int num_batches = static_cast<int>(param.batch);
const int in_height = static_cast<int>(param.src_h);
const int in_width = static_cast<int>(param.src_w);
const int in_depth = static_cast<int>(param.src_chl);
const int filter_height =
kKnownFilterHeight < 0 ? static_cast<int>(param.flt_h) : kKnownFilterHeight;
const int filter_width =
kKnownFilterWidth < 0 ? static_cast<int>(param.flt_w) : kKnownFilterWidth;
const int pad_height = static_cast<int>(param.pad_h);
const int pad_width = static_cast<int>(param.pad_w);
// Fixed blockDim.z, tailored for maximum grid size for images of size
// 16x16. assert(blockDim.x == param.src_w); assert(blockDim.z ==
// kBlockDepth);
const int block_height = blockDim.y;
// These values are the same for all threads and could
// be precomputed on the CPU.
const int block_pixels = in_width * block_height;
const int block_size = block_pixels * kBlockDepth;
const int in_pixels = in_width * in_height;
const int in_increment = in_width - 1;
const int filter_pixels = filter_height * filter_width;
const int tile_width = in_width + filter_width - 1;
const int even_height = kKnownEvenHeight || (1 & ~in_height);
const int tile_height = in_height + filter_height - even_height;
const int tile_pixels = tile_width * tile_height;
const int tile_size = tile_pixels * kBlockDepth;
const int tile_offset = block_height * tile_width;
const int pad_offset = pad_height * tile_width + pad_width;
const int in_total_depth = in_depth * num_batches;
const int in_blocks = (in_total_depth + kBlockDepth - 1) / kBlockDepth;
const int thread_col = threadIdx.x;
const int thread_row = threadIdx.y;
const int thread_depth = threadIdx.z;
// Position in block.
const int thread_pix = thread_row * in_width + thread_col;
const int thread_idx = thread_depth * block_pixels + thread_pix;
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
shared_data[i] = T();
}
__syncthreads();
// Position in tensors.
const int tensor_idx = thread_depth * in_pixels + thread_pix;
// Position in (padded) shared memory.
const int data_pix = thread_row * tile_width + thread_col;
const int data_idx = thread_depth * tile_pixels + data_pix;
// Position in shared memory, offset by pad_height / pad_width.
const int tile_idx = data_idx + pad_offset;
// Filter is always in HWCK format, irrespective of the input/output format.
const int filter_pix = thread_idx / kBlockDepth;
const int filter_channel = thread_idx % kBlockDepth;
const int max_channel = in_total_depth - thread_depth;
const int filter_write_offset =
filter_pix < filter_pixels ? tile_size + thread_idx : 0;
const int filter_read_offset =
tile_size + thread_depth +
(kDirection == DIRECTION_FORWARD ? 0 : filter_pixels * kBlockDepth);
const bool skip_second =
!kKnownEvenHeight && thread_row + (in_height & 1) == block_height;
for (int b = blockIdx.x; b < in_blocks; b += gridDim.x) {
const int channel = b * kBlockDepth;
const int inout_offset = channel * in_pixels + tensor_idx;
const bool channel_in_range = channel < max_channel;
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
T* const tile_ptr = tile_idx + shared_data;
tile_ptr[0] = *in_ptr;
if (!skip_second) {
tile_ptr[tile_offset] = *(block_pixels + in_ptr);
}
}
if (filter_write_offset != 0) {
const int filter_offset =
(channel + filter_channel) % in_depth * filter_pixels + filter_pix;
shared_data[filter_write_offset] = *(filter_offset + filter);
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
T2 sum = {0.0, 0.0};
int shared_offset = data_idx;
const T* filter_ptr = filter_read_offset + shared_data;
#pragma unroll
for (int r = 0; r < filter_height; ++r) {
#pragma unroll
for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
const T2 filter_value = {*filter_ptr, *filter_ptr};
const T* const tile_ptr = shared_offset + shared_data;
const T2 tile_value = {tile_ptr[0], tile_ptr[tile_offset]};
sum = fma2(filter_value, tile_value, sum);
++shared_offset;
if (kDirection == DIRECTION_FORWARD) {
filter_ptr += kBlockDepth;
}
}
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
out_ptr[0] = static_cast<T>(sum.x);
if (!skip_second) {
out_ptr[block_pixels] = static_cast<T>(sum.y);
}
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
hipStream_t stream) {
const int block_height = (param.src_h + 1) / 2;
dim3 block_dim;
int block_count;
void (*kernel)(const Param, const T*, const T*, T*);
block_dim = dim3(param.src_w, block_height, kBlockDepth);
block_count = DIVUP(param.batch * param.src_chl * param.chl_mul, kBlockDepth) *
kBlockDepth;
kernel = DepthwiseConv2dGPUKernelNCHWSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
kKnownEvenHeight>;
const int tile_width = param.src_w + param.flt_w - 1;
const int tile_height = block_height * 2 + param.flt_h - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = param.flt_h * param.flt_w;
const int shared_memory_size =
kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
const int num_outputs = param.out_h * param.out_w * block_count;
block_count = GetFixedBlockSize(
num_outputs, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
hipLaunchKernelGGL(( kernel), dim3(block_count), dim3(block_dim), shared_memory_size, stream,
param, input, filter, output);
after_kernel_launch();
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
hipStream_t stream) {
if (param.src_h & 1) {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
false>(param, input, filter, output, stream);
} else {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
true>(param, input, filter, output, stream);
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
hipStream_t stream) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (param.src_h + 1) / 2 * param.src_w;
if (block_pixels > 256) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 2>(
param, input, filter, output, stream);
} else if (block_pixels > 128) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 4>(
param, input, filter, output, stream);
} else {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 8>(
param, input, filter, output, stream);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace conv_bias {
namespace chanwise {
// =====================================fwd=====================================
#define LAUNCH(type, type2) \
if (param.flt_h == 3 && param.flt_w == 3) { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_FORWARD, 3, 3>( \
param, src, flt, dst, stream); \
} else { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_FORWARD, -1, -1>( \
param, src, flt, dst, stream); \
}
template <>
void run_fwd_small(
float* dst, const float* src, const float* flt, const Param& param,
hipStream_t stream) {
LAUNCH(float, float2);
}
#if TORCH_HIP_VERSION >= 9000
template <>
void run_fwd_small(
__half* dst, const __half* src, const __half* flt, const Param& param,
hipStream_t stream) {
LAUNCH(__half, __half2);
}
#endif
#undef LAUNCH
} // namespace chanwise
} // namespace conv_bias
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
| d92e00650eb26a0fe0b68ac80af3bd958869ad52.cu | #include "cuda.h"
#include "cuda_fp16.h"
#include "src/cuda/conv_bias/chanwise/kern.cuh"
#include "src/cuda/conv_bias/chanwise/kern_helper.cuh"
#include "src/cuda/conv_bias/chanwise/launch_config.cuh"
#include "src/cuda/fp16_help.cuh"
using namespace megdnn;
using namespace cuda;
using namespace conv_bias;
using namespace chanwise;
namespace {
enum DepthwiseConv2dDirection { DIRECTION_FORWARD, DIRECTION_BACKWARD };
// CUDA kernel to compute the depthwise convolution forward pass in NCHW format,
// tailored for small images up to 32x32. Stride and depth multiplier must be 1.
// Padding must be 'SAME', which allows to reuse the index computation. Only
// use this kernel if CanLaunchDepthwiseConv2dGPUSmall(args) returns true.
// Tiles of the input and filter tensors are loaded into shared memory before
// performing the convolution. Each thread handles two elements per iteration,
// one each in the lower and upper half of a tile.
// Backprop input direction is the same as forward direction with the filter
// rotated by 180°.
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
__global__ void
#if __CUDA_ARCH__ >= 750
__launch_bounds__(1024, 1)
#else
__launch_bounds__(1024, 2)
#endif
DepthwiseConv2dGPUKernelNCHWSmall(
const Param param, const T* input, const T* filter, T* output) {
// Holds block plus halo and filter data for blockDim.z depths.
extern __shared__ __align__(8) unsigned char shared_memory[];
static_assert(sizeof(T) <= 8, "Insufficient alignment detected");
T* const shared_data = reinterpret_cast<T*>(shared_memory);
const int num_batches = static_cast<int>(param.batch);
const int in_height = static_cast<int>(param.src_h);
const int in_width = static_cast<int>(param.src_w);
const int in_depth = static_cast<int>(param.src_chl);
const int filter_height =
kKnownFilterHeight < 0 ? static_cast<int>(param.flt_h) : kKnownFilterHeight;
const int filter_width =
kKnownFilterWidth < 0 ? static_cast<int>(param.flt_w) : kKnownFilterWidth;
const int pad_height = static_cast<int>(param.pad_h);
const int pad_width = static_cast<int>(param.pad_w);
// Fixed blockDim.z, tailored for maximum grid size for images of size
// 16x16. assert(blockDim.x == param.src_w); assert(blockDim.z ==
// kBlockDepth);
const int block_height = blockDim.y;
// These values are the same for all threads and could
// be precomputed on the CPU.
const int block_pixels = in_width * block_height;
const int block_size = block_pixels * kBlockDepth;
const int in_pixels = in_width * in_height;
const int in_increment = in_width - 1;
const int filter_pixels = filter_height * filter_width;
const int tile_width = in_width + filter_width - 1;
const int even_height = kKnownEvenHeight || (1 & ~in_height);
const int tile_height = in_height + filter_height - even_height;
const int tile_pixels = tile_width * tile_height;
const int tile_size = tile_pixels * kBlockDepth;
const int tile_offset = block_height * tile_width;
const int pad_offset = pad_height * tile_width + pad_width;
const int in_total_depth = in_depth * num_batches;
const int in_blocks = (in_total_depth + kBlockDepth - 1) / kBlockDepth;
const int thread_col = threadIdx.x;
const int thread_row = threadIdx.y;
const int thread_depth = threadIdx.z;
// Position in block.
const int thread_pix = thread_row * in_width + thread_col;
const int thread_idx = thread_depth * block_pixels + thread_pix;
// Initialize tile, in particular the padding.
for (int i = thread_idx; i < tile_size; i += block_size) {
shared_data[i] = T();
}
__syncthreads();
// Position in tensors.
const int tensor_idx = thread_depth * in_pixels + thread_pix;
// Position in (padded) shared memory.
const int data_pix = thread_row * tile_width + thread_col;
const int data_idx = thread_depth * tile_pixels + data_pix;
// Position in shared memory, offset by pad_height / pad_width.
const int tile_idx = data_idx + pad_offset;
// Filter is always in HWCK format, irrespective of the input/output format.
const int filter_pix = thread_idx / kBlockDepth;
const int filter_channel = thread_idx % kBlockDepth;
const int max_channel = in_total_depth - thread_depth;
const int filter_write_offset =
filter_pix < filter_pixels ? tile_size + thread_idx : 0;
const int filter_read_offset =
tile_size + thread_depth +
(kDirection == DIRECTION_FORWARD ? 0 : filter_pixels * kBlockDepth);
const bool skip_second =
!kKnownEvenHeight && thread_row + (in_height & 1) == block_height;
for (int b = blockIdx.x; b < in_blocks; b += gridDim.x) {
const int channel = b * kBlockDepth;
const int inout_offset = channel * in_pixels + tensor_idx;
const bool channel_in_range = channel < max_channel;
if (channel_in_range) {
const T* const in_ptr = inout_offset + input;
T* const tile_ptr = tile_idx + shared_data;
tile_ptr[0] = *in_ptr;
if (!skip_second) {
tile_ptr[tile_offset] = *(block_pixels + in_ptr);
}
}
if (filter_write_offset != 0) {
const int filter_offset =
(channel + filter_channel) % in_depth * filter_pixels + filter_pix;
shared_data[filter_write_offset] = *(filter_offset + filter);
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
if (channel_in_range) {
T2 sum = {0.0, 0.0};
int shared_offset = data_idx;
const T* filter_ptr = filter_read_offset + shared_data;
#pragma unroll
for (int r = 0; r < filter_height; ++r) {
#pragma unroll
for (int c = 0; c < filter_width; ++c) {
if (kDirection == DIRECTION_BACKWARD) {
filter_ptr -= kBlockDepth;
}
const T2 filter_value = {*filter_ptr, *filter_ptr};
const T* const tile_ptr = shared_offset + shared_data;
const T2 tile_value = {tile_ptr[0], tile_ptr[tile_offset]};
sum = fma2(filter_value, tile_value, sum);
++shared_offset;
if (kDirection == DIRECTION_FORWARD) {
filter_ptr += kBlockDepth;
}
}
shared_offset += in_increment;
}
T* const out_ptr = inout_offset + output;
out_ptr[0] = static_cast<T>(sum.x);
if (!skip_second) {
out_ptr[block_pixels] = static_cast<T>(sum.y);
}
}
// Note: the condition to reach this is uniform across the entire block.
__syncthreads();
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth,
bool kKnownEvenHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
cudaStream_t stream) {
const int block_height = (param.src_h + 1) / 2;
dim3 block_dim;
int block_count;
void (*kernel)(const Param, const T*, const T*, T*);
block_dim = dim3(param.src_w, block_height, kBlockDepth);
block_count = DIVUP(param.batch * param.src_chl * param.chl_mul, kBlockDepth) *
kBlockDepth;
kernel = DepthwiseConv2dGPUKernelNCHWSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
kKnownEvenHeight>;
const int tile_width = param.src_w + param.flt_w - 1;
const int tile_height = block_height * 2 + param.flt_h - 1;
const int tile_pixels = tile_height * tile_width;
const int filter_pixels = param.flt_h * param.flt_w;
const int shared_memory_size =
kBlockDepth * (tile_pixels + filter_pixels) * sizeof(T);
const int num_outputs = param.out_h * param.out_w * block_count;
block_count = GetFixedBlockSize(
num_outputs, kernel, shared_memory_size,
block_dim.x * block_dim.y * block_dim.z);
kernel<<<block_count, block_dim, shared_memory_size, stream>>>(
param, input, filter, output);
after_kernel_launch();
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight, int kBlockDepth>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
cudaStream_t stream) {
if (param.src_h & 1) {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
false>(param, input, filter, output, stream);
} else {
return LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, kBlockDepth,
true>(param, input, filter, output, stream);
}
}
template <
typename T, typename T2, DepthwiseConv2dDirection kDirection,
int kKnownFilterWidth, int kKnownFilterHeight>
void LaunchDepthwiseConv2dGPUSmall(
const Param& param, const T* input, const T* filter, T* output,
cudaStream_t stream) {
// Maximize (power of two) kBlockDepth while keeping a block within 1024
// threads (2 pixels per thread).
const int block_pixels = (param.src_h + 1) / 2 * param.src_w;
if (block_pixels > 256) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 2>(
param, input, filter, output, stream);
} else if (block_pixels > 128) {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 4>(
param, input, filter, output, stream);
} else {
LaunchDepthwiseConv2dGPUSmall<
T, T2, kDirection, kKnownFilterWidth, kKnownFilterHeight, 8>(
param, input, filter, output, stream);
}
}
} // anonymous namespace
namespace megdnn {
namespace cuda {
namespace conv_bias {
namespace chanwise {
// =====================================fwd=====================================
#define LAUNCH(type, type2) \
if (param.flt_h == 3 && param.flt_w == 3) { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_FORWARD, 3, 3>( \
param, src, flt, dst, stream); \
} else { \
LaunchDepthwiseConv2dGPUSmall< \
type, type2, DepthwiseConv2dDirection::DIRECTION_FORWARD, -1, -1>( \
param, src, flt, dst, stream); \
}
template <>
void run_fwd_small(
float* dst, const float* src, const float* flt, const Param& param,
cudaStream_t stream) {
LAUNCH(float, float2);
}
#if CUDA_VERSION >= 9000
template <>
void run_fwd_small(
__half* dst, const __half* src, const __half* flt, const Param& param,
cudaStream_t stream) {
LAUNCH(__half, __half2);
}
#endif
#undef LAUNCH
} // namespace chanwise
} // namespace conv_bias
} // namespace cuda
} // namespace megdnn
// vim: syntax=cuda.doxygen
|
483003e97fec9f48363a31daacb01b13c799c53e.hip | // !!! This is a file automatically generated by hipify!!!
/**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/dgemv_offset.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include "gemv_offset_core.cuh"
#if(TARGET_SM >= 30)
#define dgemvn_offset_bs (32)
#define dgemvn_offset_ty (8)
#define dgemvn_offset_by (4)
#define dgemvt_offset_bs (32)
#define dgemvt_offset_ty (8)
#define dgemvt_offset_by (4)
#else
#define dgemvn_offset_bs (64)
#define dgemvn_offset_ty (8)
#define dgemvn_offset_by (2)
#define dgemvt_offset_bs (64)
#define dgemvt_offset_ty (8)
#define dgemvt_offset_by (2)
#endif
extern "C"
int kblas_dscal_async(int n, double alpha, double *x, int incx, hipStream_t stream);
int kblas_dgemv_offset_driver( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy,
int offset_r, int offset_c,
hipStream_t stream = 0)
{
if(trans == 'n' || trans == 'N')
{
//**** Config parameters
const int thread_x = dgemvn_offset_bs;
const int thread_y = dgemvn_offset_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_n = dgemvn_offset_by;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvn_offset_bs;
int offset_c_ = offset_c % dgemvn_offset_bs;
int total_blocks_skipped_r = offset_r / dgemvn_offset_bs;
int total_blocks_skipped_c = offset_c / dgemvn_offset_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvn_offset_bs * lda;
dA += my_skipped_blocks_r * dgemvn_offset_bs;
dX += my_skipped_blocks_c * dgemvn_offset_bs * incx;
dY += my_skipped_blocks_r * dgemvn_offset_bs * incy;
rows -= my_skipped_blocks_r * dgemvn_offset_bs;
cols -= my_skipped_blocks_c * dgemvn_offset_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvn_offset_bs) + ((cols%dgemvn_offset_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)hipblasSscal(rows-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_dscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream);
int cols_ = dgemvn_offset_bs * ( (cols/dgemvn_offset_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvn_offset_bs)%ngpus) cols_ += dgemvn_offset_bs;
if(new_gpu_gid == (cols/dgemvn_offset_bs)%ngpus) cols_ += cols%dgemvn_offset_bs;
int mod_r = rows % dgemvn_offset_bs;
int mod_c = cols_ % dgemvn_offset_bs;
if(mod_r == 0 && mod_c == 0)
{
// special case
int blocks = rows/dgemvn_offset_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvn_special_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread>)
, dim3(dimGrid), dim3(dimBlock), 0, stream,
rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/dgemvn_offset_bs) + (mod_r != 0);
if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 1:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 2:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 3:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 4:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 5:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 6:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 7:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 8:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 9:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 10:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 11:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 12:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 13:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 14:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 15:hipLaunchKernelGGL(( gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//**** Config parameters
const int thread_x = dgemvt_offset_bs;
const int thread_y = dgemvt_offset_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_t = dgemvt_offset_by;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvt_offset_bs;
int offset_c_ = offset_c % dgemvt_offset_bs;
int total_blocks_skipped_r = offset_r / dgemvt_offset_bs;
int total_blocks_skipped_c = offset_c / dgemvt_offset_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
//if(new_gpu_gid != 3){return 0;}
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvt_offset_bs * lda;
dA += my_skipped_blocks_r * dgemvt_offset_bs;
dX += my_skipped_blocks_r * dgemvt_offset_bs * incx;
dY += my_skipped_blocks_c * dgemvt_offset_bs * incy;
rows -= my_skipped_blocks_r * dgemvt_offset_bs;
cols -= my_skipped_blocks_c * dgemvt_offset_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvt_offset_bs) + ((cols%dgemvt_offset_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)hipblasSscal(cols-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_dscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream);
int cols_ = dgemvt_offset_bs * ( (cols/dgemvt_offset_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvt_offset_bs)%ngpus) cols_ += dgemvt_offset_bs;
if(new_gpu_gid == (cols/dgemvt_offset_bs)%ngpus) cols_ += cols%dgemvt_offset_bs;
int mod_r = rows % dgemvt_offset_bs;
int mod_c = cols_ % dgemvt_offset_bs;
if(mod_r == 0 && mod_c == 0)
{
int blocks = cols_/dgemvt_offset_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
hipLaunchKernelGGL(( gemvt_special_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_, conj);
}
else
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/dgemvt_offset_bs + (mod_c != 0);
int gpu_last = (nstripes+ngpus-1)%ngpus;
if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 0>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 1:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 1>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 2:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 2>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 3:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 3>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 4:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 4>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 5:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 5>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 6:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 6>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 7:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 7>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 8:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 8>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 9:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 9>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 10:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 10>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 11:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 11>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 12:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 12>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 13:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 13>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 14:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 14>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 15:hipLaunchKernelGGL(( gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 15>), dim3(dimGrid), dim3(dimBlock), 0, stream, rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("DGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/***********************************************************************************/
extern "C"
int kblas_dgemv_offset( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy,
int offset_r, int offset_c)
{
return kblas_dgemv_offset_driver( trans, rows, cols,
alpha, dA, lda,
dX, incx,
beta, dY, incy,
offset_r, offset_c);
}
/*************************************************************************************/
extern "C"
int kblas_dgemv_offset_async( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy,
int offset_r, int offset_c,
hipStream_t stream)
{
return kblas_dgemv_offset_driver( trans, rows, cols,
alpha, dA, lda,
dX, incx,
beta, dY, incy, offset_r, offset_c,
stream);
}
/*************************************************************************************/
| 483003e97fec9f48363a31daacb01b13c799c53e.cu | /**
* @copyright (c) 2012- King Abdullah University of Science and
* Technology (KAUST). All rights reserved.
**/
/**
* @file src/blas_l2/dgemv_offset.cu
* KBLAS is a high performance CUDA library for subset of BLAS
* and LAPACK routines optimized for NVIDIA GPUs.
* KBLAS is provided by KAUST.
*
* @version 3.0.0
* @author Ahmad Abdelfattah
* @date 2018-11-14
**/
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cublas.h>
#include "gemv_offset_core.cuh"
#if(TARGET_SM >= 30)
#define dgemvn_offset_bs (32)
#define dgemvn_offset_ty (8)
#define dgemvn_offset_by (4)
#define dgemvt_offset_bs (32)
#define dgemvt_offset_ty (8)
#define dgemvt_offset_by (4)
#else
#define dgemvn_offset_bs (64)
#define dgemvn_offset_ty (8)
#define dgemvn_offset_by (2)
#define dgemvt_offset_bs (64)
#define dgemvt_offset_ty (8)
#define dgemvt_offset_by (2)
#endif
extern "C"
int kblas_dscal_async(int n, double alpha, double *x, int incx, cudaStream_t stream);
int kblas_dgemv_offset_driver( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy,
int offset_r, int offset_c,
cudaStream_t stream = 0)
{
if(trans == 'n' || trans == 'N')
{
//**** Config parameters
const int thread_x = dgemvn_offset_bs;
const int thread_y = dgemvn_offset_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_n = dgemvn_offset_by;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvn_offset_bs;
int offset_c_ = offset_c % dgemvn_offset_bs;
int total_blocks_skipped_r = offset_r / dgemvn_offset_bs;
int total_blocks_skipped_c = offset_c / dgemvn_offset_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvn_offset_bs * lda;
dA += my_skipped_blocks_r * dgemvn_offset_bs;
dX += my_skipped_blocks_c * dgemvn_offset_bs * incx;
dY += my_skipped_blocks_r * dgemvn_offset_bs * incy;
rows -= my_skipped_blocks_r * dgemvn_offset_bs;
cols -= my_skipped_blocks_c * dgemvn_offset_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvn_offset_bs) + ((cols%dgemvn_offset_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)cublasSscal(rows-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_dscal_async(rows-offset_r_, beta, dY+(offset_r_*incy), incy, stream);
int cols_ = dgemvn_offset_bs * ( (cols/dgemvn_offset_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvn_offset_bs)%ngpus) cols_ += dgemvn_offset_bs;
if(new_gpu_gid == (cols/dgemvn_offset_bs)%ngpus) cols_ += cols%dgemvn_offset_bs;
int mod_r = rows % dgemvn_offset_bs;
int mod_c = cols_ % dgemvn_offset_bs;
if(mod_r == 0 && mod_c == 0)
{
// special case
int blocks = rows/dgemvn_offset_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
gemvn_special_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread>
<<<dimGrid, dimBlock, 0, stream>>>
(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_);
}
else
{
// generic case for columns only
const int irregular_cols = mod_c % elements_per_thread;
int blocks = (rows/dgemvn_offset_bs) + (mod_r != 0);
if(mod_r == 0)blocks += 1; // dummy thread block, will return immediately if mod_r == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_n);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 1: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 2: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 3: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 4: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 5: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 6: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 7: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 8: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 9: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 10: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 11: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 12: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 13: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 14: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
case 15: gemvn_generic_offset<double, dgemvn_offset_bs, dgemvn_offset_bs, dgemvn_offset_ty, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_); break;
default: printf("DGEMV-N error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
} // end of non-transpose case
else if (trans == 't' || trans == 'T' || trans == 'c' || trans == 'C')
{
int conj;
if(trans == 'c' || trans == 'C') conj = 1;
else conj = 0;
//**** Config parameters
const int thread_x = dgemvt_offset_bs;
const int thread_y = dgemvt_offset_ty;
const int elements_per_thread = thread_x/(2*thread_y);
const int grid_y_t = dgemvt_offset_by;
//*************************
/** offset necessary calculation **/
int offset_r_ = offset_r % dgemvt_offset_bs;
int offset_c_ = offset_c % dgemvt_offset_bs;
int total_blocks_skipped_r = offset_r / dgemvt_offset_bs;
int total_blocks_skipped_c = offset_c / dgemvt_offset_bs;
int my_skipped_blocks_r = total_blocks_skipped_r;
int my_skipped_blocks_c = total_blocks_skipped_c/ngpus;
if(gpu_gid < (total_blocks_skipped_c%ngpus)) my_skipped_blocks_c += 1;
int ref_gpu = total_blocks_skipped_c%ngpus;
int new_gpu_gid = (gpu_gid - ref_gpu + ngpus) % ngpus;
//if(new_gpu_gid != 3){return 0;}
// Advance pointers accordingly
dA += my_skipped_blocks_c * dgemvt_offset_bs * lda;
dA += my_skipped_blocks_r * dgemvt_offset_bs;
dX += my_skipped_blocks_r * dgemvt_offset_bs * incx;
dY += my_skipped_blocks_c * dgemvt_offset_bs * incy;
rows -= my_skipped_blocks_r * dgemvt_offset_bs;
cols -= my_skipped_blocks_c * dgemvt_offset_bs;
/** end offset necessary calculation **/
int nstripes = (cols/dgemvt_offset_bs) + ((cols%dgemvt_offset_bs) != 0);
// scaling with beta
//if(gpu_gid == 0)cublasSscal(cols-offset_, beta, dY+(offset_*incy), incy);
if(gpu_gid == 0)kblas_dscal_async(cols-offset_c_, beta, dY+(offset_c_*incy), incy, stream);
int cols_ = dgemvt_offset_bs * ( (cols/dgemvt_offset_bs)/ngpus );
if(new_gpu_gid < (cols/dgemvt_offset_bs)%ngpus) cols_ += dgemvt_offset_bs;
if(new_gpu_gid == (cols/dgemvt_offset_bs)%ngpus) cols_ += cols%dgemvt_offset_bs;
int mod_r = rows % dgemvt_offset_bs;
int mod_c = cols_ % dgemvt_offset_bs;
if(mod_r == 0 && mod_c == 0)
{
int blocks = cols_/dgemvt_offset_bs;
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
gemvt_special_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, nstripes, offset_r_, offset_c_, conj);
}
else
{
const int irregular_cols = mod_c % elements_per_thread;
int blocks = cols_/dgemvt_offset_bs + (mod_c != 0);
int gpu_last = (nstripes+ngpus-1)%ngpus;
if(mod_c == 0 && new_gpu_gid == gpu_last) blocks += 1; // dummy thread block, will return if mod_c == 0
dim3 dimBlock(thread_x, thread_y);
dim3 dimGrid(blocks, grid_y_t);
if(blocks == 0) return 0;
switch(irregular_cols)
{
/**
* The kernel for irregular dimensions has an extra template parameter.
* This parameter must be among the values listed in the switch-case statement below.
* The possible values are in the range 0 - (elements_per_thread-1)
* Make sure these values are updated whenever you change the configuration parameters.
**/
case 0: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 0><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 1: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 1><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 2: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 2><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 3: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 3><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 4: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 4><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 5: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 5><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 6: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 6><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 7: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 7><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 8: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 8><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 9: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 9><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 10: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 10><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 11: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 11><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 12: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 12><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 13: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 13><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 14: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 14><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
case 15: gemvt_generic_offset<double, dgemvt_offset_bs, thread_x, thread_y, elements_per_thread, 15><<<dimGrid, dimBlock, 0, stream>>>(rows, cols_, alpha, dA, lda, dX, incx, beta, dY, incy, mod_r, mod_c, nstripes, offset_r_, offset_c_, conj); break;
default: printf("DGEMV-T error: improper template parameter. Please read the inline documentation for this function. \n"); return -1;
}
}
}
else
{
printf("DGEMV error: Unrecognized transpose mode %c \n", trans);
return -1;
}
return 0;
}
/***********************************************************************************/
extern "C"
int kblas_dgemv_offset( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy,
int offset_r, int offset_c)
{
return kblas_dgemv_offset_driver( trans, rows, cols,
alpha, dA, lda,
dX, incx,
beta, dY, incy,
offset_r, offset_c);
}
/*************************************************************************************/
extern "C"
int kblas_dgemv_offset_async( char trans, int rows, int cols,
double alpha, double *dA, int lda,
double *dX, int incx,
double beta, double *dY, int incy,
int offset_r, int offset_c,
cudaStream_t stream)
{
return kblas_dgemv_offset_driver( trans, rows, cols,
alpha, dA, lda,
dX, incx,
beta, dY, incy, offset_r, offset_c,
stream);
}
/*************************************************************************************/
|
9ac2c4b5f73ffdb765e21af8d4bd731295553b0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "SceNodes.h"
__constant__ double sceInterPara[5];
__constant__ double sceIntraPara[4];
__constant__ double sceInterDiffPara[5];
__constant__ double sceProfilePara[7];
__constant__ double sceECMPara[5];
__constant__ double sceDiffPara[5];
double sceInterParaCPU[5];
double sceIntraParaCPU[4];
double sceInterDiffParaCPU[5];
double sceProfileParaCPU[7];
double sceECMParaCPU[5];
double sceDiffParaCPU[5];
__constant__ uint ProfilebeginPos;
__constant__ uint ECMbeginPos;
__constant__ uint cellNodeBeginPos;
__constant__ uint nodeCountPerECM;
__constant__ uint nodeCountPerCell;
// This template method expands an input sequence by
// replicating each element a variable number of times. For example,
//
// expand([2,2,2],[A,B,C]) -> [A,A,B,B,C,C]
// expand([3,0,1],[A,B,C]) -> [A,A,A,C]
// expand([1,3,2],[A,B,C]) -> [A,B,B,B,C,C]
//
// The element counts are assumed to be non-negative integers
template<typename InputIterator1, typename InputIterator2,
typename OutputIterator>
OutputIterator expand(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(),
output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2,
output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
SceNodes::SceNodes(uint totalBdryNodeCount, uint maxProfileNodeCount,
uint maxTotalECMCount, uint maxNodeInECM, uint maxTotalCellCount,
uint maxNodeInCell) {
std::cout << "start creating SceNodes object" << std::endl;
maxCellCount = maxTotalCellCount;
maxNodeOfOneCell = maxNodeInCell;
maxNodePerECM = maxNodeInECM;
maxECMCount = maxTotalECMCount;
this->maxProfileNodeCount = maxProfileNodeCount;
currentActiveProfileNodeCount = 0;
BdryNodeCount = totalBdryNodeCount;
currentActiveCellCount = 0;
maxTotalECMNodeCount = maxECMCount * maxNodePerECM;
currentActiveECM = 0;
// will need to change this value after we have more detail about ECM
maxTotalCellNodeCount = maxTotalCellCount * maxNodeOfOneCell;
//cellRanks.resize(maxTotalNodeCount);
//nodeRanks.resize(maxTotalNodeCount);
//std::cout << "before resizing vectors" << std::endl;
uint maxTotalNodeCount = totalBdryNodeCount + maxProfileNodeCount
+ maxTotalECMNodeCount + maxTotalCellNodeCount;
//std::cout << "maxTotalNodeCount = " << maxTotalNodeCount << std::endl;
//thrust::host_vector<bool> nodeIsActiveHost
nodeLocX.resize(maxTotalNodeCount);
nodeLocY.resize(maxTotalNodeCount);
nodeLocZ.resize(maxTotalNodeCount);
nodeVelX.resize(maxTotalNodeCount);
nodeVelY.resize(maxTotalNodeCount);
nodeVelZ.resize(maxTotalNodeCount);
nodeCellType.resize(maxTotalNodeCount);
nodeCellRank.resize(maxTotalNodeCount);
nodeIsActive.resize(maxTotalNodeCount);
startPosProfile = totalBdryNodeCount;
startPosECM = startPosProfile + maxProfileNodeCount;
startPosCells = startPosECM + maxTotalECMNodeCount;
thrust::host_vector<CellType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
thrust::host_vector<int> hostTmpVector3(maxTotalNodeCount);
for (int i = 0; i < maxTotalNodeCount; i++) {
if (i < startPosProfile) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else if (i < startPosECM) {
hostTmpVector[i] = Profile;
hostTmpVector3[i] = 0;
} else if (i < startPosCells) {
hostTmpVector[i] = ECM;
hostTmpVector3[i] = (i - startPosECM) / maxNodeInECM;
} else {
// all initialized as FNM
hostTmpVector[i] = FNM;
hostTmpVector3[i] = (i - startPosCells) / maxNodeOfOneCell;
}
nodeIsActive[i] = false;
}
nodeCellType = hostTmpVector;
nodeIsActive = hostTmpVector2;
nodeCellRank = hostTmpVector3;
copyParaToGPUConstMem();
}
void SceNodes::copyParaToGPUConstMem() {
static const double U0 =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_U0_DivFactor").toDouble();
static const double V0 =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_V0_DivFactor").toDouble();
static const double k1 =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k1_DivFactor").toDouble();
static const double k2 =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k2_DivFactor").toDouble();
static const double interLinkEffectiveRange =
globalConfigVars.getConfigValue("InterCellLinkBreakRange").toDouble();
sceInterParaCPU[0] = U0;
sceInterParaCPU[1] = V0;
sceInterParaCPU[2] = k1;
sceInterParaCPU[3] = k2;
sceInterParaCPU[4] = interLinkEffectiveRange;
static const double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_U0_DivFactor").toDouble();
static const double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_V0_DivFactor").toDouble();
static const double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k1_DivFactor").toDouble();
static const double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k2_DivFactor").toDouble();
sceIntraParaCPU[0] = U0_Intra;
sceIntraParaCPU[1] = V0_Intra;
sceIntraParaCPU[2] = k1_Intra;
sceIntraParaCPU[3] = k2_Intra;
//std::cout << "in SceNodes, before cuda memory copy to symbol:" << std::endl;
hipMemcpyToSymbol(sceInterPara, sceInterParaCPU, 5 * sizeof(double));
hipMemcpyToSymbol(sceIntraPara, sceIntraParaCPU, 4 * sizeof(double));
hipMemcpyToSymbol(ProfilebeginPos, &startPosProfile, sizeof(uint));
hipMemcpyToSymbol(ECMbeginPos, &startPosECM, sizeof(uint));
hipMemcpyToSymbol(cellNodeBeginPos, &startPosCells, sizeof(uint));
hipMemcpyToSymbol(nodeCountPerECM, &maxNodePerECM, sizeof(uint));
hipMemcpyToSymbol(nodeCountPerCell, &maxNodeOfOneCell, sizeof(uint));
static const double U0_Diff =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_U0_DivFactor").toDouble();
static const double V0_Diff =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_V0_DivFactor").toDouble();
static const double k1_Diff =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_k1_DivFactor").toDouble();
static const double k2_Diff =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_k2_DivFactor").toDouble();
sceInterDiffParaCPU[0] = U0_Diff;
sceInterDiffParaCPU[1] = V0_Diff;
sceInterDiffParaCPU[2] = k1_Diff;
sceInterDiffParaCPU[3] = k2_Diff;
sceInterDiffParaCPU[4] = interLinkEffectiveRange;
static const double U0_Bdry =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_U0_DivFactor").toDouble();
static const double V0_Bdry =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_V0_DivFactor").toDouble();
static const double k1_Bdry =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_k1_DivFactor").toDouble();
static const double k2_Bdry =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_k2_DivFactor").toDouble();
// 1.8 comes from standard
static const double neutralLength = globalConfigVars.getConfigValue(
"Bdry_base_neutral_dist").toDouble() / k2_Bdry
* globalConfigVars.getConfigValue("baseline_k_value").toDouble();
static const double linearParameter = globalConfigVars.getConfigValue(
"Profile_linear_parameter").toDouble();
sceProfileParaCPU[0] = U0_Bdry;
sceProfileParaCPU[1] = V0_Bdry;
sceProfileParaCPU[2] = k1_Bdry;
sceProfileParaCPU[3] = k2_Bdry;
sceProfileParaCPU[4] = interLinkEffectiveRange;
sceProfileParaCPU[5] = linearParameter;
sceProfileParaCPU[6] = neutralLength;
static const double U0_ECM =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_U0_DivFactor").toDouble();
static const double V0_ECM =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_V0_DivFactor").toDouble();
static const double k1_ECM =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_k1_DivFactor").toDouble();
static const double k2_ECM =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_k2_DivFactor").toDouble();
sceECMParaCPU[0] = U0_ECM;
sceECMParaCPU[1] = V0_ECM;
sceECMParaCPU[2] = k1_ECM;
sceECMParaCPU[3] = k2_ECM;
sceECMParaCPU[4] = interLinkEffectiveRange;
hipMemcpyToSymbol(sceProfilePara, sceProfileParaCPU, 7 * sizeof(double));
hipMemcpyToSymbol(sceInterDiffPara, sceInterDiffParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceECMPara, sceECMParaCPU, 5 * sizeof(double));
//std::cout << "finished SceNodes:" << std::endl;
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell) {
uint shiftSize = nodeLocXNewCell.size();
assert(shiftSize % maxNodeOfOneCell == 0);
uint addCellCount = shiftSize / maxNodeOfOneCell;
uint shiftStartPos = startPosCells
+ currentActiveCellCount * maxNodeOfOneCell;
uint shiftEndPos = shiftStartPos + currentActiveECM * maxNodePerECM;
uint ECMStartPos = shiftStartPos + shiftSize;
// reason using this tmp vector is that GPU copying does not guarantee copying sequence.
// will cause undefined behavior if copy directly.
//std::cout << "shift start position = " << shiftStartPos << ", end pos = "
// << shiftEndPos << std::endl;
thrust::device_vector<double> tmpPosXECM(nodeLocX.begin() + shiftStartPos,
nodeLocX.begin() + shiftEndPos);
thrust::device_vector<double> tmpPosYECM(nodeLocY.begin() + shiftStartPos,
nodeLocY.begin() + shiftEndPos);
thrust::device_vector<double> tmpPosZECM(nodeLocZ.begin() + shiftStartPos,
nodeLocZ.begin() + shiftEndPos);
thrust::device_vector<bool> tmpIsActive(
nodeIsActive.begin() + shiftStartPos,
nodeIsActive.begin() + shiftEndPos);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin()))
+ shiftStartPos);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(tmpPosXECM.begin(), tmpPosYECM.begin(),
tmpPosZECM.begin(), tmpIsActive.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(tmpPosXECM.end(), tmpPosYECM.end(),
tmpPosZECM.end(), tmpIsActive.end())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin()))
+ ECMStartPos);
currentActiveCellCount = currentActiveCellCount + addCellCount;
}
void SceNodes::initDimension(double domainMinX, double domainMaxX,
double domainMinY, double domainMaxY, double domainBucketSize) {
minX = domainMinX;
maxX = domainMaxX;
minY = domainMinY;
maxY = domainMaxY;
bucketSize = domainBucketSize;
numOfBucketsInXDim = (maxX - minX) / bucketSize + 1;
numOfBucketsInYDim = (maxY - minY) / bucketSize + 1;
totalBucketCount = numOfBucketsInXDim * numOfBucketsInYDim;
keyBegin.resize(totalBucketCount);
keyEnd.resize(totalBucketCount);
/*
std::cout << "after initialization, values:" << std::endl;
std::cout << "minX = " << minX << ", maxX = " << maxX << std::endl;
std::cout << "minX = " << minX << ", maxX = " << maxX << std::endl;
std::cout << "numOfBucketsInXDim = " << numOfBucketsInXDim
<< ", numOfBucketsInYDim = " << numOfBucketsInYDim << std::endl;
std::cout << "totalBucketCount= " << totalBucketCount << std::endl;
*/
//int jj;
//std::cin >> jj;
}
std::vector<std::pair<uint, uint> > SceNodes::obtainNeighborPairs() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = keyBegin;
thrust::host_vector<uint> keyEndCPU = keyEnd;
thrust::host_vector<uint> bucketKeysCPU = bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
bucketValuesIncludingNeighbor;
int size = bucketKeysCPU.size();
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
//std::cout << "pair node 1: " << bucketValues[i] << ",pair node2: "
// << bucketValuesIncludingNeighbor[j] << std::endl;
result.push_back(
std::make_pair<uint, uint>(bucketValues[i],
bucketValuesIncludingNeighbor[j]));
}
}
return result;
}
void SceNodes::initValues(std::vector<double>& initBdryCellNodePosX,
std::vector<double>& initBdryCellNodePosY,
std::vector<double>& initProfileNodePosX,
std::vector<double>& initProfileNodePosY,
std::vector<double>& initECMNodePosX,
std::vector<double>& initECMNodePosY,
std::vector<double>& initFNMCellNodePosX,
std::vector<double>& initFNMCellNodePosY,
std::vector<double>& initMXCellNodePosX,
std::vector<double>& initMXCellNodePosY) {
uint FNMNodeCountX = initFNMCellNodePosX.size();
uint MXNodeCountX = initMXCellNodePosX.size();
uint beginAddressOfProfile = startPosProfile;
// find the begining position of ECM.
uint beginAddressOfECM = startPosECM;
// find the begining position of FNM cells.
uint beginAddressOfFNM = startPosCells;
// find the begining position of MX cells.
uint beginAddressOfMX = beginAddressOfFNM + FNMNodeCountX;
//std::cerr << "before copying arrays" << endl;
thrust::copy(initBdryCellNodePosX.begin(), initBdryCellNodePosX.end(),
nodeLocX.begin());
thrust::copy(initBdryCellNodePosY.begin(), initBdryCellNodePosY.end(),
nodeLocY.begin());
//std::cerr << "copy 1" << endl;
// copy x and y position of nodes of Profile to actual node position.
thrust::copy(initProfileNodePosX.begin(), initProfileNodePosX.end(),
nodeLocX.begin() + beginAddressOfProfile);
thrust::copy(initProfileNodePosY.begin(), initProfileNodePosY.end(),
nodeLocY.begin() + beginAddressOfProfile);
//std::cerr << "copy 2" << endl;
// copy x and y position of nodes of ECM to actual node position.
thrust::copy(initECMNodePosX.begin(), initECMNodePosX.end(),
nodeLocX.begin() + beginAddressOfECM);
thrust::copy(initECMNodePosY.begin(), initECMNodePosY.end(),
nodeLocY.begin() + beginAddressOfECM);
// debug
for (int i = 0; i < initECMNodePosX.size(); i++) {
assert(nodeLocX[i + beginAddressOfECM] == initECMNodePosX[i]);
assert(!isnan(initECMNodePosX[i]));
}
// std::cerr << "copy 3" << endl;
// copy x and y position of nodes of FNM cells to actual node position.
thrust::copy(initFNMCellNodePosX.begin(), initFNMCellNodePosX.end(),
nodeLocX.begin() + beginAddressOfFNM);
thrust::copy(initFNMCellNodePosY.begin(), initFNMCellNodePosY.end(),
nodeLocY.begin() + beginAddressOfFNM);
// std::cerr << "copy 4" << endl;
thrust::fill(nodeCellType.begin() + beginAddressOfFNM,
nodeCellType.begin() + beginAddressOfMX, FNM);
// copy x and y position of nodes of MX cells to actual node position.
thrust::copy(initMXCellNodePosX.begin(), initMXCellNodePosX.end(),
nodeLocX.begin() + beginAddressOfMX);
thrust::copy(initMXCellNodePosY.begin(), initMXCellNodePosY.end(),
nodeLocY.begin() + beginAddressOfMX);
//std::cerr << "after copying arrays" << endl;
thrust::fill(nodeCellType.begin() + beginAddressOfMX,
nodeCellType.begin() + beginAddressOfMX + MXNodeCountX, MX);
//std::cout << "initial MX cell numbers: " << mxQuotient << std::endl;
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell,
thrust::device_vector<CellType> &nodeCellTypeNewCell) {
// data validation
uint nodesSize = nodeLocXNewCell.size();
assert(nodesSize % maxNodeOfOneCell == 0);
uint addCellCount = nodesSize / maxNodeOfOneCell;
// position that we will add newly divided cells.
uint shiftStartPosNewCell = startPosCells
+ currentActiveCellCount * maxNodeOfOneCell;
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin(),
nodeCellTypeNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end(),
nodeCellTypeNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin(),
nodeCellType.begin())) + shiftStartPosNewCell);
// total number of cells has increased.
currentActiveCellCount = currentActiveCellCount + addCellCount;
}
void SceNodes::buildBuckets2D() {
int totalActiveNodes = startPosCells
+ currentActiveCellCount * maxNodeOfOneCell;
bucketKeys.resize(totalActiveNodes);
bucketValues.resize(totalActiveNodes);
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(totalActiveNodes);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin(),
countingIterBegin)),
make_zip_iterator(
make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin(),
countingIterBegin)) + totalActiveNodes,
make_zip_iterator(
make_tuple(bucketKeys.begin(), bucketValues.begin())),
pointToBucketIndex2D(minX, maxX, minY, maxY, bucketSize));
// sort the points by their bucket index
thrust::sort_by_key(bucketKeys.begin(), bucketKeys.end(),
bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(bucketKeys.begin(), bucketKeys.end(),
UINT_MAX);
bucketKeys.erase(bucketKeys.end() - numberOfOutOfRange, bucketKeys.end());
bucketValues.erase(bucketValues.end() - numberOfOutOfRange,
bucketValues.end());
}
__device__
double computeDist(double &xPos, double &yPos, double &zPos, double &xPos2,
double &yPos2, double &zPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)
+ (zPos - zPos2) * (zPos - zPos2));
}
__device__
void calculateAndAddECMForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceECMPara[4]) {
forceValue = 0;
} else {
forceValue = -sceECMPara[0] / sceECMPara[2]
* exp(-linkLength / sceECMPara[2])
+ sceECMPara[1] / sceECMPara[3]
* exp(-linkLength / sceECMPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddProfileForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
forceValue = -sceProfilePara[5] * (linkLength - sceProfilePara[6]);
/*
if (linkLength > sceProfilePara[4]) {
forceValue = 0;
} else {
forceValue = -sceProfilePara[0] / sceProfilePara[2]
* exp(-linkLength / sceProfilePara[2])
+ sceProfilePara[1] / sceProfilePara[3]
* exp(-linkLength / sceProfilePara[3]);
// positive value means force is attraction
if (linkLength > sceProfilePara[6]) {
forceValue = sceProfilePara[5] * (linkLength - sceProfilePara[6]);
//if (forceValue < 0) {
// forceValue = 0;
//}
}
}
*/
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddInterForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddDiffInterCellForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterDiffPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterDiffPara[0] / sceInterDiffPara[2]
* exp(-linkLength / sceInterDiffPara[2])
+ sceInterDiffPara[1] / sceInterDiffPara[3]
* exp(-linkLength / sceInterDiffPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddInterForceDiffType(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddIntraForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__ bool bothNodesCellNode(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint cellNodesThreshold) {
if (nodeGlobalRank1 < cellNodesThreshold
&& nodeGlobalRank2 < cellNodesThreshold) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint nodeCountPerCell) {
if (nodeGlobalRank1 / nodeCountPerCell
== nodeGlobalRank2 / nodeCountPerCell) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - cellNodeBeginPos) / nodeCountPerCell
== (nodeGlobalRank2 - cellNodeBeginPos) / nodeCountPerCell) {
return true;
} else {
return false;
}
}
__device__ bool isSameECM(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
return true;
} else {
return false;
}
}
__device__ bool isNeighborECMNodes(uint nodeGlobalRank1, uint nodeGlobalRank2) {
// this means that two nodes are from the same ECM
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
// this means that two nodes are actually close to each other
// seems to be strange because of unsigned int.
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
}
return false;
}
__device__ bool isNeighborProfileNodes(uint nodeGlobalRank1,
uint nodeGlobalRank2) {
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
return false;
}
__device__ bool ofSameType(uint cellType1, uint cellType2) {
if (cellType1 == cellType2) {
return true;
} else {
return false;
}
}
__device__ bool bothCellNodes(CellType &type1, CellType &type2) {
if ((type1 == MX || type1 == FNM) && (type2 == MX || type2 == FNM)) {
return true;
} else {
return false;
}
}
__device__
void handleForceBetweenNodes(uint &nodeRank1, CellType &type1, uint &nodeRank2,
CellType &type2, double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes, double* _nodeLocXAddress, double* _nodeLocYAddress,
double* _nodeLocZAddress, uint beginPosOfCells) {
// this means that both nodes come from cells
if (bothCellNodes(type1, type2)) {
// this means that nodes come from different type of cell, apply differential adhesion
if (type1 != type2) {
// TODO: apply differential adhesion here.
// It should be a different type of inter force.
calculateAndAddDiffInterCellForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2], _nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
} else {
// TODO: this function needs to be modified.
// (1) nodeCountPerCell need to be stored in constant memory.
// (2) begin address of cell nodes need to be stored in constant memory.
if (isSameCell(nodeRank1, nodeRank2, beginPosOfCells)) {
calculateAndAddIntraForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
} else {
calculateAndAddInterForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
}
}
}
// this means that both nodes come from ECM and from same ECM
else if (type1 == ECM && type2 == ECM && isSameECM(nodeRank1, nodeRank2)) {
if (isNeighborECMNodes(nodeRank1, nodeRank2)) {
// TODO: need to create another two vectors that holds the neighbor information for ECM.
// TODO: alternatively, try to store ECM begin address and number of node per ECM in constant memory.
// TODO: implement this function.
calculateAndAddECMForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2], _nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
}
// if both nodes belong to same ECM but are not neighbors they shouldn't interact.
}
// this means that both nodes come from profile ( Epithilum layer).
else if (type1 == Profile && type2 == Profile) {
if (isNeighborProfileNodes(nodeRank1, nodeRank2)) {
// TODO: need a set of parameters for calculating linking force between profile nodes
calculateAndAddProfileForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2], _nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
}
// if both nodes belong to Profile but are not neighbors they shouldn't interact.
} else {
// for now, we assume that interaction between other nodes are the same as inter-cell force.
calculateAndAddInterForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
}
}
void SceNodes::extendBuckets2D() {
static const uint extensionFactor2D = 9;
uint valuesCount = bucketValues.size();
bucketKeysExpanded.resize(valuesCount * extensionFactor2D);
bucketValuesIncludingNeighbor.resize(valuesCount * extensionFactor2D);
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(extensionFactor2D);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + valuesCount;
expand(first, last,
make_zip_iterator(
make_tuple(bucketKeys.begin(), bucketValues.begin())),
make_zip_iterator(
make_tuple(bucketKeysExpanded.begin(),
bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd = countingBegin
+ valuesCount * extensionFactor2D;
//std::cout << "number of values for array holding extended value= "
// << valuesCount * extensionFactor2D << std::endl;
//thrust::for_each(
// thrust::make_zip_iterator(
// make_tuple(bucketKeysExpanded.begin(), countingBegin)),
// thrust::make_zip_iterator(
// make_tuple(bucketKeysExpanded.end(), countingEnd)),
// NeighborFunctor2D(numOfBucketsInXDim, numOfBucketsInYDim));
thrust::transform(
make_zip_iterator(
make_tuple(bucketKeysExpanded.begin(), countingBegin)),
make_zip_iterator(
make_tuple(bucketKeysExpanded.end(), countingEnd)),
make_zip_iterator(
make_tuple(bucketKeysExpanded.begin(), countingBegin)),
NeighborFunctor2D(numOfBucketsInXDim, numOfBucketsInYDim));
int numberOfOutOfRange = thrust::count(bucketKeysExpanded.begin(),
bucketKeysExpanded.end(), UINT_MAX);
//std::cout << "number out of range = " << numberOfOutOfRange << std::endl;
int sizeBeforeShrink = bucketKeysExpanded.size();
int numberInsideRange = sizeBeforeShrink - numberOfOutOfRange;
thrust::sort_by_key(bucketKeysExpanded.begin(), bucketKeysExpanded.end(),
bucketValuesIncludingNeighbor.begin());
bucketKeysExpanded.erase(bucketKeysExpanded.begin() + numberInsideRange,
bucketKeysExpanded.end());
bucketValuesIncludingNeighbor.erase(
bucketValuesIncludingNeighbor.begin() + numberInsideRange,
bucketValuesIncludingNeighbor.end());
}
void SceNodes::applySceForces() {
std::cout << "begin apply sce forces" << std::endl;
std::cout << "size of lower = " << keyBegin.size() << std::endl;
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(bucketKeysExpanded.begin(), bucketKeysExpanded.end(),
search_begin, search_begin + totalBucketCount, keyBegin.begin());
thrust::upper_bound(bucketKeysExpanded.begin(), bucketKeysExpanded.end(),
search_begin, search_begin + totalBucketCount, keyEnd.begin());
thrust::host_vector<uint> lowerCPU = keyBegin;
std::cout << "finished finding bounds" << std::endl;
int test1 = lowerCPU[0];
int test2 = lowerCPU[0];
std::cout << "test 1 =" << test1 << ", test 2 = " << test2 << std::endl;
std::cout.flush();
int test3 = keyBegin[totalBucketCount - 1];
int test4 = keyEnd[totalBucketCount - 1];
std::cout << "test 3 =" << test3 << ", test 4 = " << test4 << std::endl;
uint* valueAddress = thrust::raw_pointer_cast(
&bucketValuesIncludingNeighbor[0]);
std::cout << "begin pointer casting" << std::endl;
double* nodeLocXAddress = thrust::raw_pointer_cast(&nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&nodeLocZ[0]);
uint* nodeRankAddress = thrust::raw_pointer_cast(&nodeCellRank[0]);
CellType* nodeTypeAddress = thrust::raw_pointer_cast(&nodeCellType[0]);
std::cout << "begin transformation" << std::endl;
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(keyBegin.begin(),
bucketKeys.begin()),
make_permutation_iterator(keyEnd.begin(),
bucketKeys.begin()), bucketValues.begin(),
make_permutation_iterator(nodeLocX.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeLocY.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeLocZ.begin(),
bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(keyBegin.begin(),
bucketKeys.end()),
make_permutation_iterator(keyEnd.begin(),
bucketKeys.end()), bucketValues.end(),
make_permutation_iterator(nodeLocX.begin(),
bucketValues.end()),
make_permutation_iterator(nodeLocY.begin(),
bucketValues.end()),
make_permutation_iterator(nodeLocZ.begin(),
bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(nodeVelX.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeVelY.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeVelZ.begin(),
bucketValues.begin()))),
AddSceForce(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress, nodeRankAddress, nodeTypeAddress,
maxTotalCellNodeCount, startPosCells, maxNodeOfOneCell,
maxNodePerECM));
std::cout << "after transformation" << std::endl;
}
void SceNodes::calculateAndApplySceForces() {
//const int numberOfBucketsInXDim = (maxX - minX) / bucketSize + 1;
//const int numberOfBucketsInYDim = (maxY - minY) / bucketSize + 1;
std::cout << "in SceNodes, before build buckets 2D:" << std::endl;
buildBuckets2D();
std::cout << "in SceNodes, before extend buckets 2D:" << std::endl;
extendBuckets2D();
std::cout << "in SceNodes, before apply sce forces:" << std::endl;
applySceForces();
std::cout << "in SceNodes, finished apply sce forces:" << std::endl;
}
| 9ac2c4b5f73ffdb765e21af8d4bd731295553b0b.cu | #include "SceNodes.h"
__constant__ double sceInterPara[5];
__constant__ double sceIntraPara[4];
__constant__ double sceInterDiffPara[5];
__constant__ double sceProfilePara[7];
__constant__ double sceECMPara[5];
__constant__ double sceDiffPara[5];
double sceInterParaCPU[5];
double sceIntraParaCPU[4];
double sceInterDiffParaCPU[5];
double sceProfileParaCPU[7];
double sceECMParaCPU[5];
double sceDiffParaCPU[5];
__constant__ uint ProfilebeginPos;
__constant__ uint ECMbeginPos;
__constant__ uint cellNodeBeginPos;
__constant__ uint nodeCountPerECM;
__constant__ uint nodeCountPerCell;
// This template method expands an input sequence by
// replicating each element a variable number of times. For example,
//
// expand([2,2,2],[A,B,C]) -> [A,A,B,B,C,C]
// expand([3,0,1],[A,B,C]) -> [A,A,A,C]
// expand([1,3,2],[A,B,C]) -> [A,B,B,B,C,C]
//
// The element counts are assumed to be non-negative integers
template<typename InputIterator1, typename InputIterator2,
typename OutputIterator>
OutputIterator expand(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(),
output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2,
output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
SceNodes::SceNodes(uint totalBdryNodeCount, uint maxProfileNodeCount,
uint maxTotalECMCount, uint maxNodeInECM, uint maxTotalCellCount,
uint maxNodeInCell) {
std::cout << "start creating SceNodes object" << std::endl;
maxCellCount = maxTotalCellCount;
maxNodeOfOneCell = maxNodeInCell;
maxNodePerECM = maxNodeInECM;
maxECMCount = maxTotalECMCount;
this->maxProfileNodeCount = maxProfileNodeCount;
currentActiveProfileNodeCount = 0;
BdryNodeCount = totalBdryNodeCount;
currentActiveCellCount = 0;
maxTotalECMNodeCount = maxECMCount * maxNodePerECM;
currentActiveECM = 0;
// will need to change this value after we have more detail about ECM
maxTotalCellNodeCount = maxTotalCellCount * maxNodeOfOneCell;
//cellRanks.resize(maxTotalNodeCount);
//nodeRanks.resize(maxTotalNodeCount);
//std::cout << "before resizing vectors" << std::endl;
uint maxTotalNodeCount = totalBdryNodeCount + maxProfileNodeCount
+ maxTotalECMNodeCount + maxTotalCellNodeCount;
//std::cout << "maxTotalNodeCount = " << maxTotalNodeCount << std::endl;
//thrust::host_vector<bool> nodeIsActiveHost
nodeLocX.resize(maxTotalNodeCount);
nodeLocY.resize(maxTotalNodeCount);
nodeLocZ.resize(maxTotalNodeCount);
nodeVelX.resize(maxTotalNodeCount);
nodeVelY.resize(maxTotalNodeCount);
nodeVelZ.resize(maxTotalNodeCount);
nodeCellType.resize(maxTotalNodeCount);
nodeCellRank.resize(maxTotalNodeCount);
nodeIsActive.resize(maxTotalNodeCount);
startPosProfile = totalBdryNodeCount;
startPosECM = startPosProfile + maxProfileNodeCount;
startPosCells = startPosECM + maxTotalECMNodeCount;
thrust::host_vector<CellType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
thrust::host_vector<int> hostTmpVector3(maxTotalNodeCount);
for (int i = 0; i < maxTotalNodeCount; i++) {
if (i < startPosProfile) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else if (i < startPosECM) {
hostTmpVector[i] = Profile;
hostTmpVector3[i] = 0;
} else if (i < startPosCells) {
hostTmpVector[i] = ECM;
hostTmpVector3[i] = (i - startPosECM) / maxNodeInECM;
} else {
// all initialized as FNM
hostTmpVector[i] = FNM;
hostTmpVector3[i] = (i - startPosCells) / maxNodeOfOneCell;
}
nodeIsActive[i] = false;
}
nodeCellType = hostTmpVector;
nodeIsActive = hostTmpVector2;
nodeCellRank = hostTmpVector3;
copyParaToGPUConstMem();
}
void SceNodes::copyParaToGPUConstMem() {
static const double U0 =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_U0_DivFactor").toDouble();
static const double V0 =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_V0_DivFactor").toDouble();
static const double k1 =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k1_DivFactor").toDouble();
static const double k2 =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k2_DivFactor").toDouble();
static const double interLinkEffectiveRange =
globalConfigVars.getConfigValue("InterCellLinkBreakRange").toDouble();
sceInterParaCPU[0] = U0;
sceInterParaCPU[1] = V0;
sceInterParaCPU[2] = k1;
sceInterParaCPU[3] = k2;
sceInterParaCPU[4] = interLinkEffectiveRange;
static const double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_U0_DivFactor").toDouble();
static const double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_V0_DivFactor").toDouble();
static const double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k1_DivFactor").toDouble();
static const double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k2_DivFactor").toDouble();
sceIntraParaCPU[0] = U0_Intra;
sceIntraParaCPU[1] = V0_Intra;
sceIntraParaCPU[2] = k1_Intra;
sceIntraParaCPU[3] = k2_Intra;
//std::cout << "in SceNodes, before cuda memory copy to symbol:" << std::endl;
cudaMemcpyToSymbol(sceInterPara, sceInterParaCPU, 5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraPara, sceIntraParaCPU, 4 * sizeof(double));
cudaMemcpyToSymbol(ProfilebeginPos, &startPosProfile, sizeof(uint));
cudaMemcpyToSymbol(ECMbeginPos, &startPosECM, sizeof(uint));
cudaMemcpyToSymbol(cellNodeBeginPos, &startPosCells, sizeof(uint));
cudaMemcpyToSymbol(nodeCountPerECM, &maxNodePerECM, sizeof(uint));
cudaMemcpyToSymbol(nodeCountPerCell, &maxNodeOfOneCell, sizeof(uint));
static const double U0_Diff =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_U0_DivFactor").toDouble();
static const double V0_Diff =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_V0_DivFactor").toDouble();
static const double k1_Diff =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_k1_DivFactor").toDouble();
static const double k2_Diff =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Diff_k2_DivFactor").toDouble();
sceInterDiffParaCPU[0] = U0_Diff;
sceInterDiffParaCPU[1] = V0_Diff;
sceInterDiffParaCPU[2] = k1_Diff;
sceInterDiffParaCPU[3] = k2_Diff;
sceInterDiffParaCPU[4] = interLinkEffectiveRange;
static const double U0_Bdry =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_U0_DivFactor").toDouble();
static const double V0_Bdry =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_V0_DivFactor").toDouble();
static const double k1_Bdry =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_k1_DivFactor").toDouble();
static const double k2_Bdry =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_Bdry_k2_DivFactor").toDouble();
// 1.8 comes from standard
static const double neutralLength = globalConfigVars.getConfigValue(
"Bdry_base_neutral_dist").toDouble() / k2_Bdry
* globalConfigVars.getConfigValue("baseline_k_value").toDouble();
static const double linearParameter = globalConfigVars.getConfigValue(
"Profile_linear_parameter").toDouble();
sceProfileParaCPU[0] = U0_Bdry;
sceProfileParaCPU[1] = V0_Bdry;
sceProfileParaCPU[2] = k1_Bdry;
sceProfileParaCPU[3] = k2_Bdry;
sceProfileParaCPU[4] = interLinkEffectiveRange;
sceProfileParaCPU[5] = linearParameter;
sceProfileParaCPU[6] = neutralLength;
static const double U0_ECM =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_U0_DivFactor").toDouble();
static const double V0_ECM =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_V0_DivFactor").toDouble();
static const double k1_ECM =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_k1_DivFactor").toDouble();
static const double k2_ECM =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"InterCell_ECM_k2_DivFactor").toDouble();
sceECMParaCPU[0] = U0_ECM;
sceECMParaCPU[1] = V0_ECM;
sceECMParaCPU[2] = k1_ECM;
sceECMParaCPU[3] = k2_ECM;
sceECMParaCPU[4] = interLinkEffectiveRange;
cudaMemcpyToSymbol(sceProfilePara, sceProfileParaCPU, 7 * sizeof(double));
cudaMemcpyToSymbol(sceInterDiffPara, sceInterDiffParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceECMPara, sceECMParaCPU, 5 * sizeof(double));
//std::cout << "finished SceNodes:" << std::endl;
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell) {
uint shiftSize = nodeLocXNewCell.size();
assert(shiftSize % maxNodeOfOneCell == 0);
uint addCellCount = shiftSize / maxNodeOfOneCell;
uint shiftStartPos = startPosCells
+ currentActiveCellCount * maxNodeOfOneCell;
uint shiftEndPos = shiftStartPos + currentActiveECM * maxNodePerECM;
uint ECMStartPos = shiftStartPos + shiftSize;
// reason using this tmp vector is that GPU copying does not guarantee copying sequence.
// will cause undefined behavior if copy directly.
//std::cout << "shift start position = " << shiftStartPos << ", end pos = "
// << shiftEndPos << std::endl;
thrust::device_vector<double> tmpPosXECM(nodeLocX.begin() + shiftStartPos,
nodeLocX.begin() + shiftEndPos);
thrust::device_vector<double> tmpPosYECM(nodeLocY.begin() + shiftStartPos,
nodeLocY.begin() + shiftEndPos);
thrust::device_vector<double> tmpPosZECM(nodeLocZ.begin() + shiftStartPos,
nodeLocZ.begin() + shiftEndPos);
thrust::device_vector<bool> tmpIsActive(
nodeIsActive.begin() + shiftStartPos,
nodeIsActive.begin() + shiftEndPos);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin()))
+ shiftStartPos);
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(tmpPosXECM.begin(), tmpPosYECM.begin(),
tmpPosZECM.begin(), tmpIsActive.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(tmpPosXECM.end(), tmpPosYECM.end(),
tmpPosZECM.end(), tmpIsActive.end())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin()))
+ ECMStartPos);
currentActiveCellCount = currentActiveCellCount + addCellCount;
}
void SceNodes::initDimension(double domainMinX, double domainMaxX,
double domainMinY, double domainMaxY, double domainBucketSize) {
minX = domainMinX;
maxX = domainMaxX;
minY = domainMinY;
maxY = domainMaxY;
bucketSize = domainBucketSize;
numOfBucketsInXDim = (maxX - minX) / bucketSize + 1;
numOfBucketsInYDim = (maxY - minY) / bucketSize + 1;
totalBucketCount = numOfBucketsInXDim * numOfBucketsInYDim;
keyBegin.resize(totalBucketCount);
keyEnd.resize(totalBucketCount);
/*
std::cout << "after initialization, values:" << std::endl;
std::cout << "minX = " << minX << ", maxX = " << maxX << std::endl;
std::cout << "minX = " << minX << ", maxX = " << maxX << std::endl;
std::cout << "numOfBucketsInXDim = " << numOfBucketsInXDim
<< ", numOfBucketsInYDim = " << numOfBucketsInYDim << std::endl;
std::cout << "totalBucketCount= " << totalBucketCount << std::endl;
*/
//int jj;
//std::cin >> jj;
}
std::vector<std::pair<uint, uint> > SceNodes::obtainNeighborPairs() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = keyBegin;
thrust::host_vector<uint> keyEndCPU = keyEnd;
thrust::host_vector<uint> bucketKeysCPU = bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
bucketValuesIncludingNeighbor;
int size = bucketKeysCPU.size();
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
//std::cout << "pair node 1: " << bucketValues[i] << ",pair node2: "
// << bucketValuesIncludingNeighbor[j] << std::endl;
result.push_back(
std::make_pair<uint, uint>(bucketValues[i],
bucketValuesIncludingNeighbor[j]));
}
}
return result;
}
void SceNodes::initValues(std::vector<double>& initBdryCellNodePosX,
std::vector<double>& initBdryCellNodePosY,
std::vector<double>& initProfileNodePosX,
std::vector<double>& initProfileNodePosY,
std::vector<double>& initECMNodePosX,
std::vector<double>& initECMNodePosY,
std::vector<double>& initFNMCellNodePosX,
std::vector<double>& initFNMCellNodePosY,
std::vector<double>& initMXCellNodePosX,
std::vector<double>& initMXCellNodePosY) {
uint FNMNodeCountX = initFNMCellNodePosX.size();
uint MXNodeCountX = initMXCellNodePosX.size();
uint beginAddressOfProfile = startPosProfile;
// find the begining position of ECM.
uint beginAddressOfECM = startPosECM;
// find the begining position of FNM cells.
uint beginAddressOfFNM = startPosCells;
// find the begining position of MX cells.
uint beginAddressOfMX = beginAddressOfFNM + FNMNodeCountX;
//std::cerr << "before copying arrays" << endl;
thrust::copy(initBdryCellNodePosX.begin(), initBdryCellNodePosX.end(),
nodeLocX.begin());
thrust::copy(initBdryCellNodePosY.begin(), initBdryCellNodePosY.end(),
nodeLocY.begin());
//std::cerr << "copy 1" << endl;
// copy x and y position of nodes of Profile to actual node position.
thrust::copy(initProfileNodePosX.begin(), initProfileNodePosX.end(),
nodeLocX.begin() + beginAddressOfProfile);
thrust::copy(initProfileNodePosY.begin(), initProfileNodePosY.end(),
nodeLocY.begin() + beginAddressOfProfile);
//std::cerr << "copy 2" << endl;
// copy x and y position of nodes of ECM to actual node position.
thrust::copy(initECMNodePosX.begin(), initECMNodePosX.end(),
nodeLocX.begin() + beginAddressOfECM);
thrust::copy(initECMNodePosY.begin(), initECMNodePosY.end(),
nodeLocY.begin() + beginAddressOfECM);
// debug
for (int i = 0; i < initECMNodePosX.size(); i++) {
assert(nodeLocX[i + beginAddressOfECM] == initECMNodePosX[i]);
assert(!isnan(initECMNodePosX[i]));
}
// std::cerr << "copy 3" << endl;
// copy x and y position of nodes of FNM cells to actual node position.
thrust::copy(initFNMCellNodePosX.begin(), initFNMCellNodePosX.end(),
nodeLocX.begin() + beginAddressOfFNM);
thrust::copy(initFNMCellNodePosY.begin(), initFNMCellNodePosY.end(),
nodeLocY.begin() + beginAddressOfFNM);
// std::cerr << "copy 4" << endl;
thrust::fill(nodeCellType.begin() + beginAddressOfFNM,
nodeCellType.begin() + beginAddressOfMX, FNM);
// copy x and y position of nodes of MX cells to actual node position.
thrust::copy(initMXCellNodePosX.begin(), initMXCellNodePosX.end(),
nodeLocX.begin() + beginAddressOfMX);
thrust::copy(initMXCellNodePosY.begin(), initMXCellNodePosY.end(),
nodeLocY.begin() + beginAddressOfMX);
//std::cerr << "after copying arrays" << endl;
thrust::fill(nodeCellType.begin() + beginAddressOfMX,
nodeCellType.begin() + beginAddressOfMX + MXNodeCountX, MX);
//std::cout << "initial MX cell numbers: " << mxQuotient << std::endl;
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell,
thrust::device_vector<CellType> &nodeCellTypeNewCell) {
// data validation
uint nodesSize = nodeLocXNewCell.size();
assert(nodesSize % maxNodeOfOneCell == 0);
uint addCellCount = nodesSize / maxNodeOfOneCell;
// position that we will add newly divided cells.
uint shiftStartPosNewCell = startPosCells
+ currentActiveCellCount * maxNodeOfOneCell;
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin(),
nodeCellTypeNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end(),
nodeCellTypeNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin(),
nodeCellType.begin())) + shiftStartPosNewCell);
// total number of cells has increased.
currentActiveCellCount = currentActiveCellCount + addCellCount;
}
void SceNodes::buildBuckets2D() {
int totalActiveNodes = startPosCells
+ currentActiveCellCount * maxNodeOfOneCell;
bucketKeys.resize(totalActiveNodes);
bucketValues.resize(totalActiveNodes);
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(totalActiveNodes);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin(),
countingIterBegin)),
make_zip_iterator(
make_tuple(nodeLocX.begin(), nodeLocY.begin(),
nodeLocZ.begin(), nodeIsActive.begin(),
countingIterBegin)) + totalActiveNodes,
make_zip_iterator(
make_tuple(bucketKeys.begin(), bucketValues.begin())),
pointToBucketIndex2D(minX, maxX, minY, maxY, bucketSize));
// sort the points by their bucket index
thrust::sort_by_key(bucketKeys.begin(), bucketKeys.end(),
bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(bucketKeys.begin(), bucketKeys.end(),
UINT_MAX);
bucketKeys.erase(bucketKeys.end() - numberOfOutOfRange, bucketKeys.end());
bucketValues.erase(bucketValues.end() - numberOfOutOfRange,
bucketValues.end());
}
__device__
double computeDist(double &xPos, double &yPos, double &zPos, double &xPos2,
double &yPos2, double &zPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)
+ (zPos - zPos2) * (zPos - zPos2));
}
__device__
void calculateAndAddECMForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceECMPara[4]) {
forceValue = 0;
} else {
forceValue = -sceECMPara[0] / sceECMPara[2]
* exp(-linkLength / sceECMPara[2])
+ sceECMPara[1] / sceECMPara[3]
* exp(-linkLength / sceECMPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddProfileForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
forceValue = -sceProfilePara[5] * (linkLength - sceProfilePara[6]);
/*
if (linkLength > sceProfilePara[4]) {
forceValue = 0;
} else {
forceValue = -sceProfilePara[0] / sceProfilePara[2]
* exp(-linkLength / sceProfilePara[2])
+ sceProfilePara[1] / sceProfilePara[3]
* exp(-linkLength / sceProfilePara[3]);
// positive value means force is attraction
if (linkLength > sceProfilePara[6]) {
forceValue = sceProfilePara[5] * (linkLength - sceProfilePara[6]);
//if (forceValue < 0) {
// forceValue = 0;
//}
}
}
*/
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddInterForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddDiffInterCellForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterDiffPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterDiffPara[0] / sceInterDiffPara[2]
* exp(-linkLength / sceInterDiffPara[2])
+ sceInterDiffPara[1] / sceInterDiffPara[3]
* exp(-linkLength / sceInterDiffPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddInterForceDiffType(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddIntraForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__ bool bothNodesCellNode(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint cellNodesThreshold) {
if (nodeGlobalRank1 < cellNodesThreshold
&& nodeGlobalRank2 < cellNodesThreshold) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint nodeCountPerCell) {
if (nodeGlobalRank1 / nodeCountPerCell
== nodeGlobalRank2 / nodeCountPerCell) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - cellNodeBeginPos) / nodeCountPerCell
== (nodeGlobalRank2 - cellNodeBeginPos) / nodeCountPerCell) {
return true;
} else {
return false;
}
}
__device__ bool isSameECM(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
return true;
} else {
return false;
}
}
__device__ bool isNeighborECMNodes(uint nodeGlobalRank1, uint nodeGlobalRank2) {
// this means that two nodes are from the same ECM
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
// this means that two nodes are actually close to each other
// seems to be strange because of unsigned int.
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
}
return false;
}
__device__ bool isNeighborProfileNodes(uint nodeGlobalRank1,
uint nodeGlobalRank2) {
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
return false;
}
__device__ bool ofSameType(uint cellType1, uint cellType2) {
if (cellType1 == cellType2) {
return true;
} else {
return false;
}
}
__device__ bool bothCellNodes(CellType &type1, CellType &type2) {
if ((type1 == MX || type1 == FNM) && (type2 == MX || type2 == FNM)) {
return true;
} else {
return false;
}
}
__device__
void handleForceBetweenNodes(uint &nodeRank1, CellType &type1, uint &nodeRank2,
CellType &type2, double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes, double* _nodeLocXAddress, double* _nodeLocYAddress,
double* _nodeLocZAddress, uint beginPosOfCells) {
// this means that both nodes come from cells
if (bothCellNodes(type1, type2)) {
// this means that nodes come from different type of cell, apply differential adhesion
if (type1 != type2) {
// TODO: apply differential adhesion here.
// It should be a different type of inter force.
calculateAndAddDiffInterCellForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2], _nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
} else {
// TODO: this function needs to be modified.
// (1) nodeCountPerCell need to be stored in constant memory.
// (2) begin address of cell nodes need to be stored in constant memory.
if (isSameCell(nodeRank1, nodeRank2, beginPosOfCells)) {
calculateAndAddIntraForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
} else {
calculateAndAddInterForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
}
}
}
// this means that both nodes come from ECM and from same ECM
else if (type1 == ECM && type2 == ECM && isSameECM(nodeRank1, nodeRank2)) {
if (isNeighborECMNodes(nodeRank1, nodeRank2)) {
// TODO: need to create another two vectors that holds the neighbor information for ECM.
// TODO: alternatively, try to store ECM begin address and number of node per ECM in constant memory.
// TODO: implement this function.
calculateAndAddECMForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2], _nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
}
// if both nodes belong to same ECM but are not neighbors they shouldn't interact.
}
// this means that both nodes come from profile ( Epithilum layer).
else if (type1 == Profile && type2 == Profile) {
if (isNeighborProfileNodes(nodeRank1, nodeRank2)) {
// TODO: need a set of parameters for calculating linking force between profile nodes
calculateAndAddProfileForce(xPos, yPos, zPos,
_nodeLocXAddress[nodeRank2], _nodeLocYAddress[nodeRank2],
_nodeLocZAddress[nodeRank2], xRes, yRes, zRes);
}
// if both nodes belong to Profile but are not neighbors they shouldn't interact.
} else {
// for now, we assume that interaction between other nodes are the same as inter-cell force.
calculateAndAddInterForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
}
}
void SceNodes::extendBuckets2D() {
static const uint extensionFactor2D = 9;
uint valuesCount = bucketValues.size();
bucketKeysExpanded.resize(valuesCount * extensionFactor2D);
bucketValuesIncludingNeighbor.resize(valuesCount * extensionFactor2D);
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(extensionFactor2D);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + valuesCount;
expand(first, last,
make_zip_iterator(
make_tuple(bucketKeys.begin(), bucketValues.begin())),
make_zip_iterator(
make_tuple(bucketKeysExpanded.begin(),
bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd = countingBegin
+ valuesCount * extensionFactor2D;
//std::cout << "number of values for array holding extended value= "
// << valuesCount * extensionFactor2D << std::endl;
//thrust::for_each(
// thrust::make_zip_iterator(
// make_tuple(bucketKeysExpanded.begin(), countingBegin)),
// thrust::make_zip_iterator(
// make_tuple(bucketKeysExpanded.end(), countingEnd)),
// NeighborFunctor2D(numOfBucketsInXDim, numOfBucketsInYDim));
thrust::transform(
make_zip_iterator(
make_tuple(bucketKeysExpanded.begin(), countingBegin)),
make_zip_iterator(
make_tuple(bucketKeysExpanded.end(), countingEnd)),
make_zip_iterator(
make_tuple(bucketKeysExpanded.begin(), countingBegin)),
NeighborFunctor2D(numOfBucketsInXDim, numOfBucketsInYDim));
int numberOfOutOfRange = thrust::count(bucketKeysExpanded.begin(),
bucketKeysExpanded.end(), UINT_MAX);
//std::cout << "number out of range = " << numberOfOutOfRange << std::endl;
int sizeBeforeShrink = bucketKeysExpanded.size();
int numberInsideRange = sizeBeforeShrink - numberOfOutOfRange;
thrust::sort_by_key(bucketKeysExpanded.begin(), bucketKeysExpanded.end(),
bucketValuesIncludingNeighbor.begin());
bucketKeysExpanded.erase(bucketKeysExpanded.begin() + numberInsideRange,
bucketKeysExpanded.end());
bucketValuesIncludingNeighbor.erase(
bucketValuesIncludingNeighbor.begin() + numberInsideRange,
bucketValuesIncludingNeighbor.end());
}
void SceNodes::applySceForces() {
std::cout << "begin apply sce forces" << std::endl;
std::cout << "size of lower = " << keyBegin.size() << std::endl;
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(bucketKeysExpanded.begin(), bucketKeysExpanded.end(),
search_begin, search_begin + totalBucketCount, keyBegin.begin());
thrust::upper_bound(bucketKeysExpanded.begin(), bucketKeysExpanded.end(),
search_begin, search_begin + totalBucketCount, keyEnd.begin());
thrust::host_vector<uint> lowerCPU = keyBegin;
std::cout << "finished finding bounds" << std::endl;
int test1 = lowerCPU[0];
int test2 = lowerCPU[0];
std::cout << "test 1 =" << test1 << ", test 2 = " << test2 << std::endl;
std::cout.flush();
int test3 = keyBegin[totalBucketCount - 1];
int test4 = keyEnd[totalBucketCount - 1];
std::cout << "test 3 =" << test3 << ", test 4 = " << test4 << std::endl;
uint* valueAddress = thrust::raw_pointer_cast(
&bucketValuesIncludingNeighbor[0]);
std::cout << "begin pointer casting" << std::endl;
double* nodeLocXAddress = thrust::raw_pointer_cast(&nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&nodeLocZ[0]);
uint* nodeRankAddress = thrust::raw_pointer_cast(&nodeCellRank[0]);
CellType* nodeTypeAddress = thrust::raw_pointer_cast(&nodeCellType[0]);
std::cout << "begin transformation" << std::endl;
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(keyBegin.begin(),
bucketKeys.begin()),
make_permutation_iterator(keyEnd.begin(),
bucketKeys.begin()), bucketValues.begin(),
make_permutation_iterator(nodeLocX.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeLocY.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeLocZ.begin(),
bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(keyBegin.begin(),
bucketKeys.end()),
make_permutation_iterator(keyEnd.begin(),
bucketKeys.end()), bucketValues.end(),
make_permutation_iterator(nodeLocX.begin(),
bucketValues.end()),
make_permutation_iterator(nodeLocY.begin(),
bucketValues.end()),
make_permutation_iterator(nodeLocZ.begin(),
bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(nodeVelX.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeVelY.begin(),
bucketValues.begin()),
make_permutation_iterator(nodeVelZ.begin(),
bucketValues.begin()))),
AddSceForce(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress, nodeRankAddress, nodeTypeAddress,
maxTotalCellNodeCount, startPosCells, maxNodeOfOneCell,
maxNodePerECM));
std::cout << "after transformation" << std::endl;
}
void SceNodes::calculateAndApplySceForces() {
//const int numberOfBucketsInXDim = (maxX - minX) / bucketSize + 1;
//const int numberOfBucketsInYDim = (maxY - minY) / bucketSize + 1;
std::cout << "in SceNodes, before build buckets 2D:" << std::endl;
buildBuckets2D();
std::cout << "in SceNodes, before extend buckets 2D:" << std::endl;
extendBuckets2D();
std::cout << "in SceNodes, before apply sce forces:" << std::endl;
applySceForces();
std::cout << "in SceNodes, finished apply sce forces:" << std::endl;
}
|
e4a8378f96dadc727b353e9a35e9908bbf557434.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2017 Darius Rckert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/time/timer.h"
namespace Saiga
{
namespace CUDA
{
template <typename T>
HD inline T recFact(T a)
{
if (a == T(0))
return 1;
else
return a * recFact(a - 1);
}
template <typename T>
HD inline T recFib(T a)
{
if (a == T(0))
return 0;
else if (a == T(1))
return 1;
else
return recFib(a - 1) + recFib(a - 2);
}
template <typename T, unsigned int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE) __global__ static void recurseFact(ArrayView<T> data)
{
CUDA::ThreadInfo<BLOCK_SIZE> ti;
// grid stride loop
for (auto id = ti.thread_id; id < data.size(); id += ti.grid_size)
{
data[id] = recFact(id);
}
}
// This produces the following warning:
// ptxas warning : Stack size for entry function '_ZN4CUDA10recurseFibIiLj128EEEv10ArrayViewIT_E' cannot be statically
// determined
template <typename T, unsigned int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE) __global__ static void recurseFib(ArrayView<T> data)
{
CUDA::ThreadInfo<BLOCK_SIZE> ti;
// grid stride loop
for (auto id = ti.thread_id; id < data.size(); id += ti.grid_size)
{
// data[id] = recFib(id);
}
}
// nvcc $CPPFLAGS -I ~/Master/libs/data/include/eigen3/ -ptx -lineinfo -src-in-ptx
// -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr inverse_test.cu nvcc $CPPFLAGS -I
// ~/Master/libs/data/include/eigen3/ -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11
// --expt-relaxed-constexpr recursion_test.cu
void recursionTest()
{
CUDA_SYNC_CHECK_ERROR();
using ElementType = int;
int N = 30;
thrust::host_vector<ElementType> data(N, 0);
thrust::device_vector<ElementType> d_data(data);
thrust::host_vector<ElementType> ref(N, 0);
for (int i = 0; i < N; ++i)
{
ref[i] = recFact(i);
}
{
const int BLOCK_SIZE = 128;
d_data = data;
{
CUDA::CudaScopedTimerPrint t("recurseFact");
hipLaunchKernelGGL(( recurseFact<ElementType, BLOCK_SIZE>), dim3(CUDA::getBlockCount(N, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data);
}
CUDA_SYNC_CHECK_ERROR();
}
SAIGA_ASSERT(ref == d_data);
for (int i = 0; i < N; ++i)
{
ref[i] = recFib(i);
}
{
const int BLOCK_SIZE = 128;
d_data = data;
{
CUDA::CudaScopedTimerPrint t("recurseFib");
hipLaunchKernelGGL(( recurseFib<ElementType, BLOCK_SIZE>), dim3(CUDA::getBlockCount(N, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, d_data);
}
CUDA_SYNC_CHECK_ERROR();
}
SAIGA_ASSERT(ref == d_data);
std::cout << "Recursion test success!" << std::endl;
CUDA_SYNC_CHECK_ERROR();
}
} // namespace CUDA
} // namespace Saiga
| e4a8378f96dadc727b353e9a35e9908bbf557434.cu | /**
* Copyright (c) 2017 Darius Rückert
* Licensed under the MIT License.
* See LICENSE file for more information.
*/
#include "saiga/cuda/cudaHelper.h"
#include "saiga/cuda/tests/test.h"
#include "saiga/cuda/tests/test_helper.h"
#include "saiga/cuda/thread_info.h"
#include "saiga/time/timer.h"
namespace Saiga
{
namespace CUDA
{
template <typename T>
HD inline T recFact(T a)
{
if (a == T(0))
return 1;
else
return a * recFact(a - 1);
}
template <typename T>
HD inline T recFib(T a)
{
if (a == T(0))
return 0;
else if (a == T(1))
return 1;
else
return recFib(a - 1) + recFib(a - 2);
}
template <typename T, unsigned int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE) __global__ static void recurseFact(ArrayView<T> data)
{
CUDA::ThreadInfo<BLOCK_SIZE> ti;
// grid stride loop
for (auto id = ti.thread_id; id < data.size(); id += ti.grid_size)
{
data[id] = recFact(id);
}
}
// This produces the following warning:
// ptxas warning : Stack size for entry function '_ZN4CUDA10recurseFibIiLj128EEEv10ArrayViewIT_E' cannot be statically
// determined
template <typename T, unsigned int BLOCK_SIZE>
__launch_bounds__(BLOCK_SIZE) __global__ static void recurseFib(ArrayView<T> data)
{
CUDA::ThreadInfo<BLOCK_SIZE> ti;
// grid stride loop
for (auto id = ti.thread_id; id < data.size(); id += ti.grid_size)
{
// data[id] = recFib(id);
}
}
// nvcc $CPPFLAGS -I ~/Master/libs/data/include/eigen3/ -ptx -lineinfo -src-in-ptx
// -gencode=arch=compute_52,code=compute_52 -g -std=c++11 --expt-relaxed-constexpr inverse_test.cu nvcc $CPPFLAGS -I
// ~/Master/libs/data/include/eigen3/ -ptx -gencode=arch=compute_52,code=compute_52 -g -std=c++11
// --expt-relaxed-constexpr recursion_test.cu
void recursionTest()
{
CUDA_SYNC_CHECK_ERROR();
using ElementType = int;
int N = 30;
thrust::host_vector<ElementType> data(N, 0);
thrust::device_vector<ElementType> d_data(data);
thrust::host_vector<ElementType> ref(N, 0);
for (int i = 0; i < N; ++i)
{
ref[i] = recFact(i);
}
{
const int BLOCK_SIZE = 128;
d_data = data;
{
CUDA::CudaScopedTimerPrint t("recurseFact");
recurseFact<ElementType, BLOCK_SIZE><<<CUDA::getBlockCount(N, BLOCK_SIZE), BLOCK_SIZE>>>(d_data);
}
CUDA_SYNC_CHECK_ERROR();
}
SAIGA_ASSERT(ref == d_data);
for (int i = 0; i < N; ++i)
{
ref[i] = recFib(i);
}
{
const int BLOCK_SIZE = 128;
d_data = data;
{
CUDA::CudaScopedTimerPrint t("recurseFib");
recurseFib<ElementType, BLOCK_SIZE><<<CUDA::getBlockCount(N, BLOCK_SIZE), BLOCK_SIZE>>>(d_data);
}
CUDA_SYNC_CHECK_ERROR();
}
SAIGA_ASSERT(ref == d_data);
std::cout << "Recursion test success!" << std::endl;
CUDA_SYNC_CHECK_ERROR();
}
} // namespace CUDA
} // namespace Saiga
|
e5bc7315493a87e7073e374fa7afea2167584c89.hip | // !!! This is a file automatically generated by hipify!!!
#include "px2od.h"
px2OD::px2OD(dwContextHandle_t dwContext, hipStream_t* cudaStreamPtr)
{
mContext = dwContext;
SetCudaStream(cudaStreamPtr);
}
px2OD::~px2OD()
{
}
void px2OD::Init()
{
CHECK_DW_ERROR(dwDriveNet_initDefaultParams(&mDriveNetParams));
mDriveNetParams.maxClustersPerClass = mMaxClustersPerClass;
mDriveNetParams.maxProposalsPerClass = mMaxProposalsPerClass;
mDriveNetParams.networkModel = DW_DRIVENET_MODEL_FRONT;
mDriveNetParams.batchSize = DW_DRIVENET_BATCH_SIZE_1;
mDriveNetParams.networkPrecision = DW_PRECISION_FP32;
CHECK_DW_ERROR(dwDriveNet_initialize(&mDriveNet, &mObjectClusteringHandles,
&mDriveNetClasses,
&mNumDriveNetClasses,
&mDriveNetParams, mContext));
// Initialize Object Detector from DriveNet
CHECK_DW_ERROR(dwObjectDetector_initDefaultParams(&mDetectorParams));
mDetectorParams.enableFuseObjects = false;
mDetectorParams.maxNumImages = 1;
CHECK_DW_ERROR(dwObjectDetector_initializeFromDriveNet(&mDriveNetDetector, &mDetectorParams,
mDriveNet, mContext));
CHECK_DW_ERROR(dwObjectDetector_setCUDAStream(*mCudaStreamPtr, mDriveNetDetector));
float32_t driveNetInputAR = 1.0f;
dwBlobSize driveNetInputBlob;
CHECK_DW_ERROR(dwDriveNet_getInputBlobsize(&driveNetInputBlob, mDriveNet));
driveNetInputAR = static_cast<float32_t>(driveNetInputBlob.height) / static_cast<float32_t>(driveNetInputBlob.width);
dwRect driveNetROI;
driveNetROI = {0, 0, static_cast<int32_t>(CAM_IMG_WIDTH), static_cast<int32_t>(CAM_IMG_WIDTH*driveNetInputAR)};
dwTransformation2D driveNetROITrans ={{1.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 1.0f}};
CHECK_DW_ERROR(dwObjectDetector_setROI(0, &driveNetROI, &driveNetROITrans, mDriveNetDetector));
CHECK_DW_ERROR(dwObjectDetector_getROI(&mDetectorParams.ROIs[0], &mDetectorParams.transformations[0], 0, mDriveNetDetector));
mDetectorROI.x = mDetectorParams.ROIs[0].x;
mDetectorROI.y = mDetectorParams.ROIs[0].y;
mDetectorROI.width = mDetectorParams.ROIs[0].width;
mDetectorROI.height = mDetectorParams.ROIs[0].height;
CHECK_DW_ERROR(dwObjectDetector_bindInput(&mODInputImg, 1, mDriveNetDetector));
for(uint32_t classIdx = 0; classIdx < mNumDriveNetClasses; ++classIdx)
{
mDetectorOutputObjects[classIdx].reset(new dwObjectHandle_t[MAX_OBJECT_OUTPUT_COUNT]);
mClustererOutputObjects[classIdx].reset(new dwObjectHandle_t[MAX_OBJECT_OUTPUT_COUNT]);
// Initialize each object handle
for (uint32_t objIdx = 0U; objIdx < MAX_OBJECT_OUTPUT_COUNT; ++objIdx)
{
dwObjectData objectData{};
dwObjectDataCamera objectDataCamera{};
CHECK_DW_ERROR(dwObject_createCamera(&mDetectorOutputObjects[classIdx][objIdx], &objectData, &objectDataCamera));
CHECK_DW_ERROR(dwObject_createCamera(&mClustererOutputObjects[classIdx][objIdx], &objectData, &objectDataCamera));
}
mDetectorOutput[classIdx].count = 0;
mDetectorOutput[classIdx].objects = mDetectorOutputObjects[classIdx].get();
mDetectorOutput[classIdx].maxCount = MAX_OBJECT_OUTPUT_COUNT;
mClustererOutput[classIdx].count = 0;
mClustererOutput[classIdx].objects = mClustererOutputObjects[classIdx].get();
mClustererOutput[classIdx].maxCount = MAX_OBJECT_OUTPUT_COUNT;
CHECK_DW_ERROR(dwObjectDetector_bindOutput(&mDetectorOutput[classIdx], 0, classIdx, mDriveNetDetector));
CHECK_DW_ERROR(dwObjectClustering_bindInput(&mDetectorOutput[classIdx], mObjectClusteringHandles[classIdx]));
CHECK_DW_ERROR(dwObjectClustering_bindOutput(&mClustererOutput[classIdx], mObjectClusteringHandles[classIdx]));
}
// Initialize box list
mDnnBoxList.resize(mNumDriveNetClasses);
mDnnLabelList.resize(mNumDriveNetClasses);
mDnnLabelListPtr.resize(mNumDriveNetClasses);
mDnnConfidence.resize(mNumDriveNetClasses);
mDnnObjectID.resize(mNumDriveNetClasses);
// Get which label name for each class id
mClassLabels.resize(mNumDriveNetClasses);
for(uint32_t classIdx = 0U; classIdx < mNumDriveNetClasses; ++classIdx)
{
const char* classLabel;
CHECK_DW_ERROR(dwDriveNet_getClassLabel(&classLabel, classIdx, mDriveNet));
mClassLabels[classIdx] = classLabel;
// Reserve label and box lists
mDnnBoxList[classIdx].reserve(mMaxClustersPerClass);
mDnnLabelList[classIdx].reserve(mMaxClustersPerClass);
mDnnLabelListPtr[classIdx].reserve(mMaxClustersPerClass);
mDnnConfidence[classIdx].reserve(mMaxClustersPerClass);
mDnnObjectID[classIdx].reserve(mMaxClustersPerClass);
}
}
void px2OD::DetectObjects(dwImageCUDA* dwODInputImg,
vector<vector<dwRectf> >& outputODRectPerClass,
vector<const float32_t*>& outputODRectColorPerClass,
vector<vector<const char*> >& outputODLabelPerClass,
vector<vector<float32_t> >& outputODConfidencePerClass,
vector<vector<int> >& outputODIDPerClass)
{
mODInputImg = dwODInputImg;
CHECK_DW_ERROR(dwObjectDetector_processDeviceAsync(mDriveNetDetector));
CHECK_DW_ERROR(dwObjectDetector_processHost(mDriveNetDetector));
for (uint32_t classIdx = 0U; classIdx < mClassLabels.size(); ++classIdx)
{
CHECK_DW_ERROR(dwObjectClustering_process(mObjectClusteringHandles[classIdx]));
// Get outputs of object clustering
mDnnLabelListPtr[classIdx].clear();
mDnnLabelList[classIdx].clear();
mDnnBoxList[classIdx].clear();
mDnnConfidence[classIdx].clear();
mDnnObjectID[classIdx].clear();
dwObjectHandleList clusters = mClustererOutput[classIdx];
for (uint32_t objIdx = 0U; objIdx < clusters.count; ++objIdx)
{
dwObjectHandle_t obj = clusters.objects[objIdx];
dwObjectDataCamera objCameraData{};
dwObject_getDataCamera(&objCameraData, 0, obj);
mDnnBoxList[classIdx].push_back(objCameraData.box2D);
mDnnConfidence[classIdx].push_back(objCameraData.classConfidence);
dwObjectData objData{};
dwObject_getData(&objData, 0, obj);
mDnnObjectID[classIdx].push_back(objData.id);
string boxAnnot = mClassLabels[classIdx];
mDnnLabelList[classIdx].push_back(boxAnnot);
mDnnLabelListPtr[classIdx].push_back(mDnnLabelList[classIdx].back().c_str());
}
}
outputODRectPerClass = mDnnBoxList;
outputODRectColorPerClass = vector<const float*>(mOdBoxColorList, mOdBoxColorList + sizeof mOdBoxColorList/ sizeof mOdBoxColorList[0]);
outputODLabelPerClass = mDnnLabelListPtr;
outputODConfidencePerClass = mDnnConfidence;
outputODIDPerClass = mDnnObjectID;
}
| e5bc7315493a87e7073e374fa7afea2167584c89.cu | #include "px2od.h"
px2OD::px2OD(dwContextHandle_t dwContext, cudaStream_t* cudaStreamPtr)
{
mContext = dwContext;
SetCudaStream(cudaStreamPtr);
}
px2OD::~px2OD()
{
}
void px2OD::Init()
{
CHECK_DW_ERROR(dwDriveNet_initDefaultParams(&mDriveNetParams));
mDriveNetParams.maxClustersPerClass = mMaxClustersPerClass;
mDriveNetParams.maxProposalsPerClass = mMaxProposalsPerClass;
mDriveNetParams.networkModel = DW_DRIVENET_MODEL_FRONT;
mDriveNetParams.batchSize = DW_DRIVENET_BATCH_SIZE_1;
mDriveNetParams.networkPrecision = DW_PRECISION_FP32;
CHECK_DW_ERROR(dwDriveNet_initialize(&mDriveNet, &mObjectClusteringHandles,
&mDriveNetClasses,
&mNumDriveNetClasses,
&mDriveNetParams, mContext));
// Initialize Object Detector from DriveNet
CHECK_DW_ERROR(dwObjectDetector_initDefaultParams(&mDetectorParams));
mDetectorParams.enableFuseObjects = false;
mDetectorParams.maxNumImages = 1;
CHECK_DW_ERROR(dwObjectDetector_initializeFromDriveNet(&mDriveNetDetector, &mDetectorParams,
mDriveNet, mContext));
CHECK_DW_ERROR(dwObjectDetector_setCUDAStream(*mCudaStreamPtr, mDriveNetDetector));
float32_t driveNetInputAR = 1.0f;
dwBlobSize driveNetInputBlob;
CHECK_DW_ERROR(dwDriveNet_getInputBlobsize(&driveNetInputBlob, mDriveNet));
driveNetInputAR = static_cast<float32_t>(driveNetInputBlob.height) / static_cast<float32_t>(driveNetInputBlob.width);
dwRect driveNetROI;
driveNetROI = {0, 0, static_cast<int32_t>(CAM_IMG_WIDTH), static_cast<int32_t>(CAM_IMG_WIDTH*driveNetInputAR)};
dwTransformation2D driveNetROITrans ={{1.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 1.0f}};
CHECK_DW_ERROR(dwObjectDetector_setROI(0, &driveNetROI, &driveNetROITrans, mDriveNetDetector));
CHECK_DW_ERROR(dwObjectDetector_getROI(&mDetectorParams.ROIs[0], &mDetectorParams.transformations[0], 0, mDriveNetDetector));
mDetectorROI.x = mDetectorParams.ROIs[0].x;
mDetectorROI.y = mDetectorParams.ROIs[0].y;
mDetectorROI.width = mDetectorParams.ROIs[0].width;
mDetectorROI.height = mDetectorParams.ROIs[0].height;
CHECK_DW_ERROR(dwObjectDetector_bindInput(&mODInputImg, 1, mDriveNetDetector));
for(uint32_t classIdx = 0; classIdx < mNumDriveNetClasses; ++classIdx)
{
mDetectorOutputObjects[classIdx].reset(new dwObjectHandle_t[MAX_OBJECT_OUTPUT_COUNT]);
mClustererOutputObjects[classIdx].reset(new dwObjectHandle_t[MAX_OBJECT_OUTPUT_COUNT]);
// Initialize each object handle
for (uint32_t objIdx = 0U; objIdx < MAX_OBJECT_OUTPUT_COUNT; ++objIdx)
{
dwObjectData objectData{};
dwObjectDataCamera objectDataCamera{};
CHECK_DW_ERROR(dwObject_createCamera(&mDetectorOutputObjects[classIdx][objIdx], &objectData, &objectDataCamera));
CHECK_DW_ERROR(dwObject_createCamera(&mClustererOutputObjects[classIdx][objIdx], &objectData, &objectDataCamera));
}
mDetectorOutput[classIdx].count = 0;
mDetectorOutput[classIdx].objects = mDetectorOutputObjects[classIdx].get();
mDetectorOutput[classIdx].maxCount = MAX_OBJECT_OUTPUT_COUNT;
mClustererOutput[classIdx].count = 0;
mClustererOutput[classIdx].objects = mClustererOutputObjects[classIdx].get();
mClustererOutput[classIdx].maxCount = MAX_OBJECT_OUTPUT_COUNT;
CHECK_DW_ERROR(dwObjectDetector_bindOutput(&mDetectorOutput[classIdx], 0, classIdx, mDriveNetDetector));
CHECK_DW_ERROR(dwObjectClustering_bindInput(&mDetectorOutput[classIdx], mObjectClusteringHandles[classIdx]));
CHECK_DW_ERROR(dwObjectClustering_bindOutput(&mClustererOutput[classIdx], mObjectClusteringHandles[classIdx]));
}
// Initialize box list
mDnnBoxList.resize(mNumDriveNetClasses);
mDnnLabelList.resize(mNumDriveNetClasses);
mDnnLabelListPtr.resize(mNumDriveNetClasses);
mDnnConfidence.resize(mNumDriveNetClasses);
mDnnObjectID.resize(mNumDriveNetClasses);
// Get which label name for each class id
mClassLabels.resize(mNumDriveNetClasses);
for(uint32_t classIdx = 0U; classIdx < mNumDriveNetClasses; ++classIdx)
{
const char* classLabel;
CHECK_DW_ERROR(dwDriveNet_getClassLabel(&classLabel, classIdx, mDriveNet));
mClassLabels[classIdx] = classLabel;
// Reserve label and box lists
mDnnBoxList[classIdx].reserve(mMaxClustersPerClass);
mDnnLabelList[classIdx].reserve(mMaxClustersPerClass);
mDnnLabelListPtr[classIdx].reserve(mMaxClustersPerClass);
mDnnConfidence[classIdx].reserve(mMaxClustersPerClass);
mDnnObjectID[classIdx].reserve(mMaxClustersPerClass);
}
}
void px2OD::DetectObjects(dwImageCUDA* dwODInputImg,
vector<vector<dwRectf> >& outputODRectPerClass,
vector<const float32_t*>& outputODRectColorPerClass,
vector<vector<const char*> >& outputODLabelPerClass,
vector<vector<float32_t> >& outputODConfidencePerClass,
vector<vector<int> >& outputODIDPerClass)
{
mODInputImg = dwODInputImg;
CHECK_DW_ERROR(dwObjectDetector_processDeviceAsync(mDriveNetDetector));
CHECK_DW_ERROR(dwObjectDetector_processHost(mDriveNetDetector));
for (uint32_t classIdx = 0U; classIdx < mClassLabels.size(); ++classIdx)
{
CHECK_DW_ERROR(dwObjectClustering_process(mObjectClusteringHandles[classIdx]));
// Get outputs of object clustering
mDnnLabelListPtr[classIdx].clear();
mDnnLabelList[classIdx].clear();
mDnnBoxList[classIdx].clear();
mDnnConfidence[classIdx].clear();
mDnnObjectID[classIdx].clear();
dwObjectHandleList clusters = mClustererOutput[classIdx];
for (uint32_t objIdx = 0U; objIdx < clusters.count; ++objIdx)
{
dwObjectHandle_t obj = clusters.objects[objIdx];
dwObjectDataCamera objCameraData{};
dwObject_getDataCamera(&objCameraData, 0, obj);
mDnnBoxList[classIdx].push_back(objCameraData.box2D);
mDnnConfidence[classIdx].push_back(objCameraData.classConfidence);
dwObjectData objData{};
dwObject_getData(&objData, 0, obj);
mDnnObjectID[classIdx].push_back(objData.id);
string boxAnnot = mClassLabels[classIdx];
mDnnLabelList[classIdx].push_back(boxAnnot);
mDnnLabelListPtr[classIdx].push_back(mDnnLabelList[classIdx].back().c_str());
}
}
outputODRectPerClass = mDnnBoxList;
outputODRectColorPerClass = vector<const float*>(mOdBoxColorList, mOdBoxColorList + sizeof mOdBoxColorList/ sizeof mOdBoxColorList[0]);
outputODLabelPerClass = mDnnLabelListPtr;
outputODConfidencePerClass = mDnnConfidence;
outputODIDPerClass = mDnnObjectID;
}
|
c249653d2aa8ae1ca3e093feb7bdd38a8429914f.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <THH.h>
#include <THHGeneral.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <iostream>
#define EPSILON 1e-6
#define WARP_SIZE 32
#define BLOCK_SIZE 512
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != hipSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << hipGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
namespace {
template <typename scalar_t>
__global__ void bn_cuda_forward_kernel(
const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ inv_std,
const scalar_t* __restrict__ gamma,
const scalar_t* __restrict__ beta,
const size_t b,
const size_t h){
int c = gridDim.x;
int c_idx = blockIdx.x;
scalar_t temp_mean = mean[c_idx];
scalar_t temp_inv_std = inv_std[c_idx];
scalar_t temp_gamma = gamma[c_idx];
scalar_t temp_beta = beta[c_idx];
int mch = c * h;
int mcidh = c_idx * h;
for(int i=0; i<b; ++i){
for(int j=threadIdx.x; j<h; j+=BLOCK_SIZE){
output[i*mch + mcidh + j] = \
(input[i*mch + mcidh + j] - temp_mean) * temp_inv_std * temp_gamma + temp_beta;
}
}
}
template <typename scalar_t>
__global__ void bn_cuda_backward_kernel(
const scalar_t* __restrict__ grad_out,
const scalar_t* __restrict__ input,
scalar_t* __restrict__ grad_in,
scalar_t* __restrict__ grad_mean,
scalar_t* __restrict__ grad_inv_std,
scalar_t* __restrict__ grad_gamma,
scalar_t* __restrict__ grad_beta,
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ inv_std,
const scalar_t* __restrict__ gamma,
const scalar_t* __restrict__ beta,
const size_t b,
const size_t h,
bool train){
__shared__ scalar_t values[BLOCK_SIZE];
__shared__ scalar_t valuess[BLOCK_SIZE];
//__syncthreads();
int c_idx = blockIdx.x;
int c = gridDim.x;
int mch = c * h;
int mcidh = c_idx * h;
scalar_t temp_s = 0, temp_ss = 0, temp;
for(int i=0; i<b; ++i){
for(int j=threadIdx.x; j<h; j+=BLOCK_SIZE){
temp = grad_out[i*mch + mcidh + j];
temp_s += temp;
temp_ss += temp * input[i*mch + mcidh + j];
}
}
values[threadIdx.x] = temp_s;
valuess[threadIdx.x] = temp_ss;
__syncthreads();
// sum over warp
__shared__ scalar_t values_sum[WARP_SIZE];
__shared__ scalar_t values_ssum[WARP_SIZE];
if(threadIdx.x % WARP_SIZE == 0){
int temp_index = threadIdx.x / WARP_SIZE;
temp_s = 0;
temp_ss = 0;
int temp_max = threadIdx.x + WARP_SIZE;
for(int i=threadIdx.x; i<temp_max; ++i){
temp_s += values[i];
temp_ss += valuess[i];
}
values_sum[temp_index] = temp_s;
values_ssum[temp_index] = temp_ss;
}
__syncthreads();
// sum over sum
if(threadIdx.x == 0){
int max = (BLOCK_SIZE-1) / WARP_SIZE + 1;
temp_s = 0;
temp_ss = 0;
for(int i=0; i<max; ++i){
temp_s += values_sum[i];
temp_ss += values_ssum[i];
}
values_sum[0] = temp_s;
values_ssum[0] = temp_ss;
}
// the reduce operation is done above, next is to assign value
if(threadIdx.x == 0){
if(train){
grad_mean[c_idx] = - gamma[c_idx] * inv_std[c_idx] * values_sum[0];
grad_inv_std[c_idx] = gamma[c_idx] * (values_ssum[0] - mean[c_idx] * values_sum[0]);
}
grad_gamma[c_idx] = inv_std[c_idx] * (values_ssum[0] - mean[c_idx] * values_sum[0]);
grad_beta[c_idx] = values_sum[0];
}
scalar_t scale = gamma[c_idx] * inv_std[c_idx];
for(int i=0; i<b; ++i){
for(int j=threadIdx.x; j<h; j+=BLOCK_SIZE){
grad_in[i*mch + mcidh + j] = grad_out[i*mch + mcidh + j] * scale;
}
}
}
} // namespace
std::vector<at::Tensor> bn_cuda_forward(
at::Tensor input,
at::Tensor mean,
at::Tensor inv_std,
at::Tensor gamma,
at::Tensor beta){
//hipDeviceSynchronize();
//THCudaCheck(hipGetLastError());
const auto b = input.size(0);
const auto c = input.size(1);
const auto h = input.size(2);
auto output = at::zeros_like(input);
const int threads = BLOCK_SIZE;
const int blocks = c;
AT_DISPATCH_FLOATING_TYPES(input.type(), "bn_forward_cuda", ([&] {
hipLaunchKernelGGL(( bn_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
input.data<scalar_t>(),
output.data<scalar_t>(),
mean.data<scalar_t>(),
inv_std.data<scalar_t>(),
gamma.data<scalar_t>(),
beta.data<scalar_t>(),
b, h);
}));
//hipDeviceSynchronize();
//THCudaCheck(hipGetLastError());
return {output};
}
std::vector<at::Tensor> bn_cuda_backward(
at::Tensor grad_out,
at::Tensor input,
at::Tensor mean,
at::Tensor inv_std,
at::Tensor gamma,
at::Tensor beta,
bool train){
//hipDeviceSynchronize();
//THCudaCheck(hipGetLastError());
const auto b = grad_out.size(0);
const auto c = grad_out.size(1);
const auto h = grad_out.size(2);
auto grad_in = at::zeros_like(grad_out);
auto grad_mean = at::zeros_like(mean);
auto grad_inv_std = at::zeros_like(inv_std);
auto grad_gamma = at::zeros_like(gamma);
auto grad_beta = at::zeros_like(beta);
const int threads = BLOCK_SIZE;
const int blocks = c;
//hipDeviceSynchronize();
//THCudaCheck(hipGetLastError());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "bn_backward_cuda", ([&] {
hipLaunchKernelGGL(( bn_cuda_backward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
grad_out.data<scalar_t>(),
input.data<scalar_t>(),
grad_in.data<scalar_t>(),
grad_mean.data<scalar_t>(),
grad_inv_std.data<scalar_t>(),
grad_gamma.data<scalar_t>(),
grad_beta.data<scalar_t>(),
mean.data<scalar_t>(),
inv_std.data<scalar_t>(),
gamma.data<scalar_t>(),
beta.data<scalar_t>(),
b,
h,
train);
}));
//hipDeviceSynchronize();
//THCudaCheck(hipGetLastError());
return {grad_in, grad_mean, grad_inv_std, grad_gamma, grad_beta};
}
| c249653d2aa8ae1ca3e093feb7bdd38a8429914f.cu | #include <ATen/ATen.h>
#include <THC.h>
#include <THCGeneral.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <iostream>
#define EPSILON 1e-6
#define WARP_SIZE 32
#define BLOCK_SIZE 512
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
namespace {
template <typename scalar_t>
__global__ void bn_cuda_forward_kernel(
const scalar_t* __restrict__ input,
scalar_t* __restrict__ output,
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ inv_std,
const scalar_t* __restrict__ gamma,
const scalar_t* __restrict__ beta,
const size_t b,
const size_t h){
int c = gridDim.x;
int c_idx = blockIdx.x;
scalar_t temp_mean = mean[c_idx];
scalar_t temp_inv_std = inv_std[c_idx];
scalar_t temp_gamma = gamma[c_idx];
scalar_t temp_beta = beta[c_idx];
int mch = c * h;
int mcidh = c_idx * h;
for(int i=0; i<b; ++i){
for(int j=threadIdx.x; j<h; j+=BLOCK_SIZE){
output[i*mch + mcidh + j] = \
(input[i*mch + mcidh + j] - temp_mean) * temp_inv_std * temp_gamma + temp_beta;
}
}
}
template <typename scalar_t>
__global__ void bn_cuda_backward_kernel(
const scalar_t* __restrict__ grad_out,
const scalar_t* __restrict__ input,
scalar_t* __restrict__ grad_in,
scalar_t* __restrict__ grad_mean,
scalar_t* __restrict__ grad_inv_std,
scalar_t* __restrict__ grad_gamma,
scalar_t* __restrict__ grad_beta,
const scalar_t* __restrict__ mean,
const scalar_t* __restrict__ inv_std,
const scalar_t* __restrict__ gamma,
const scalar_t* __restrict__ beta,
const size_t b,
const size_t h,
bool train){
__shared__ scalar_t values[BLOCK_SIZE];
__shared__ scalar_t valuess[BLOCK_SIZE];
//__syncthreads();
int c_idx = blockIdx.x;
int c = gridDim.x;
int mch = c * h;
int mcidh = c_idx * h;
scalar_t temp_s = 0, temp_ss = 0, temp;
for(int i=0; i<b; ++i){
for(int j=threadIdx.x; j<h; j+=BLOCK_SIZE){
temp = grad_out[i*mch + mcidh + j];
temp_s += temp;
temp_ss += temp * input[i*mch + mcidh + j];
}
}
values[threadIdx.x] = temp_s;
valuess[threadIdx.x] = temp_ss;
__syncthreads();
// sum over warp
__shared__ scalar_t values_sum[WARP_SIZE];
__shared__ scalar_t values_ssum[WARP_SIZE];
if(threadIdx.x % WARP_SIZE == 0){
int temp_index = threadIdx.x / WARP_SIZE;
temp_s = 0;
temp_ss = 0;
int temp_max = threadIdx.x + WARP_SIZE;
for(int i=threadIdx.x; i<temp_max; ++i){
temp_s += values[i];
temp_ss += valuess[i];
}
values_sum[temp_index] = temp_s;
values_ssum[temp_index] = temp_ss;
}
__syncthreads();
// sum over sum
if(threadIdx.x == 0){
int max = (BLOCK_SIZE-1) / WARP_SIZE + 1;
temp_s = 0;
temp_ss = 0;
for(int i=0; i<max; ++i){
temp_s += values_sum[i];
temp_ss += values_ssum[i];
}
values_sum[0] = temp_s;
values_ssum[0] = temp_ss;
}
// the reduce operation is done above, next is to assign value
if(threadIdx.x == 0){
if(train){
grad_mean[c_idx] = - gamma[c_idx] * inv_std[c_idx] * values_sum[0];
grad_inv_std[c_idx] = gamma[c_idx] * (values_ssum[0] - mean[c_idx] * values_sum[0]);
}
grad_gamma[c_idx] = inv_std[c_idx] * (values_ssum[0] - mean[c_idx] * values_sum[0]);
grad_beta[c_idx] = values_sum[0];
}
scalar_t scale = gamma[c_idx] * inv_std[c_idx];
for(int i=0; i<b; ++i){
for(int j=threadIdx.x; j<h; j+=BLOCK_SIZE){
grad_in[i*mch + mcidh + j] = grad_out[i*mch + mcidh + j] * scale;
}
}
}
} // namespace
std::vector<at::Tensor> bn_cuda_forward(
at::Tensor input,
at::Tensor mean,
at::Tensor inv_std,
at::Tensor gamma,
at::Tensor beta){
//cudaDeviceSynchronize();
//THCudaCheck(cudaGetLastError());
const auto b = input.size(0);
const auto c = input.size(1);
const auto h = input.size(2);
auto output = at::zeros_like(input);
const int threads = BLOCK_SIZE;
const int blocks = c;
AT_DISPATCH_FLOATING_TYPES(input.type(), "bn_forward_cuda", ([&] {
bn_cuda_forward_kernel<scalar_t><<<blocks, threads>>>(
input.data<scalar_t>(),
output.data<scalar_t>(),
mean.data<scalar_t>(),
inv_std.data<scalar_t>(),
gamma.data<scalar_t>(),
beta.data<scalar_t>(),
b, h);
}));
//cudaDeviceSynchronize();
//THCudaCheck(cudaGetLastError());
return {output};
}
std::vector<at::Tensor> bn_cuda_backward(
at::Tensor grad_out,
at::Tensor input,
at::Tensor mean,
at::Tensor inv_std,
at::Tensor gamma,
at::Tensor beta,
bool train){
//cudaDeviceSynchronize();
//THCudaCheck(cudaGetLastError());
const auto b = grad_out.size(0);
const auto c = grad_out.size(1);
const auto h = grad_out.size(2);
auto grad_in = at::zeros_like(grad_out);
auto grad_mean = at::zeros_like(mean);
auto grad_inv_std = at::zeros_like(inv_std);
auto grad_gamma = at::zeros_like(gamma);
auto grad_beta = at::zeros_like(beta);
const int threads = BLOCK_SIZE;
const int blocks = c;
//cudaDeviceSynchronize();
//THCudaCheck(cudaGetLastError());
AT_DISPATCH_FLOATING_TYPES(grad_out.type(), "bn_backward_cuda", ([&] {
bn_cuda_backward_kernel<scalar_t><<<blocks, threads>>>(
grad_out.data<scalar_t>(),
input.data<scalar_t>(),
grad_in.data<scalar_t>(),
grad_mean.data<scalar_t>(),
grad_inv_std.data<scalar_t>(),
grad_gamma.data<scalar_t>(),
grad_beta.data<scalar_t>(),
mean.data<scalar_t>(),
inv_std.data<scalar_t>(),
gamma.data<scalar_t>(),
beta.data<scalar_t>(),
b,
h,
train);
}));
//cudaDeviceSynchronize();
//THCudaCheck(cudaGetLastError());
return {grad_in, grad_mean, grad_inv_std, grad_gamma, grad_beta};
}
|
93e7dedfda0d09c92fb1d13d565bbce098ad23b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/detail/KernelUtils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at::native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
int64_t outputImageSize = outputHeight * outputWidth;
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
CUDA_KERNEL_ASSERT(maxind >= 0 && maxind < outputImageSize);
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor64<T, 4> input,
PackedTensorAccessor64<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
int64_t outputImageSize = oT * oH * oW;
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
CUDA_KERNEL_ASSERT(index >= 0 && index < outputImageSize);
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor64<int64_t, 4> indices,
PackedTensorAccessor64<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling2d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices_.scalar_type());
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
for (int64_t i = 1; i < self_.ndimension(); ++i) {
TORCH_CHECK(self_.size(i) > 0, "max_unpooling2d_forward_out_cuda(): ",
"Expected input to have non-zero size for non-batch dimensions, but got ",
self_.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, but got tensor with dimension: ", self_.ndimension());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size, but got ", output_size.size(), " elements.");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
if (count != 0) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_forward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.numel(),
self.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}));
}
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling2d_forward_out_cuda(self, indices, output_size, output);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
const char *fn_name) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices.scalar_type());
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with dim ", input.ndimension());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size, but got ", output_size.size(), " elements.");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride, but got: ", stride.size(), " elements.");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding, but got: ", padding.size(), " elements.");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Expected shape of indices to be: ", input.sizes(), " but got: ", indices.sizes());
for (int64_t i = 1; i < input.ndimension(); ++i) {
TORCH_CHECK(input.size(i) > 0, fn_name,
": Expected input to have non-zero size for non-batch dimensions, but got ",
input.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling3d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding, "max_unpooling3d_forward_out_cuda()");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (self.numel() == 0) {
return output;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_forward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
self.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
output.data_ptr<scalar_t>(),
oT,
oH,
oW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling3d_forward_out_cuda(
self, indices, output_size, stride, padding, output);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& grad_input) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got type: ", indices_.scalar_type());
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements, got size: ", output_size.size());
int64_t nInputCols, nInputRows, nInputPlane;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int64_t count = self.numel();
if (count == 0) {
return grad_input;
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
hipLaunchKernelGGL(( max_unpooling2d_backward_kernel),
dim3(GET_BLOCKS(count)),
dim3(CUDA_NUM_THREADS),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
count,
grad_output.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.data_ptr<scalar_t>());
C10_HIP_KERNEL_LAUNCH_CHECK();
}));
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling2d_backward_out_cuda(
grad_output, self, indices, output_size, grad_input);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& grad_input) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding, "max_unpooling3d_backward_out_cuda()");
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (grad_input.numel() == 0) {
return grad_input;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
hipLaunchKernelGGL(( max_unpooling3d_backward_kernel),
dim3(grid),
dim3(block),
0,
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output.data_ptr<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor64<int64_t, 4>(),
grad_input_reshaped.packed_accessor64<scalar_t, 4>(),
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling3d_backward_out_cuda(
grad_output, self, indices, output_size, stride, padding, grad_input);
return grad_input;
}
} // namespace at::native
| 93e7dedfda0d09c92fb1d13d565bbce098ad23b4.cu | #define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include <ATen/core/Tensor.h>
#include <ATen/Dispatch.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/detail/KernelUtils.h>
#include <c10/util/Exception.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#include <ATen/NativeFunctions.h>
#else
#include <ATen/ops/max_unpool2d_native.h>
#include <ATen/ops/max_unpool3d_native.h>
#include <ATen/ops/empty.h>
#include <ATen/ops/empty_like.h>
#endif
namespace at::native {
using namespace at::cuda::detail;
template <typename T>
__host__ __device__ __forceinline__ T ceilDiv(T a, T b) {
return (a + b - 1) / b;
}
template <typename T>
__global__ void max_unpooling2d_forward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
int64_t outputImageSize = outputHeight * outputWidth;
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
output += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
CUDA_KERNEL_ASSERT(maxind >= 0 && maxind < outputImageSize);
output[maxind] = input[linearIndex];
}
}
template <typename T>
__global__ void max_unpooling3d_forward_kernel(
PackedTensorAccessor64<T, 4> input,
PackedTensorAccessor64<int64_t, 4> indices,
T* output,
const int64_t oT,
const int64_t oH,
const int64_t oW,
const int64_t offsetZ) {
int64_t iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int64_t iRow = blockIdx.y * blockDim.y + threadIdx.y;
int64_t iFrame = (blockIdx.z + offsetZ) % input.size(1); // input frame/time
int64_t slice = (blockIdx.z + offsetZ) / input.size(1); // input slice/feature
int64_t outputImageSize = oT * oH * oW;
if (iRow < input.size(2) && iColumn < input.size(3)) {
T val = input[slice][iFrame][iRow][iColumn];
int64_t index = indices[slice][iFrame][iRow][iColumn];
CUDA_KERNEL_ASSERT(index >= 0 && index < outputImageSize);
output[slice * oT * oH * oW + index] = val;
}
}
template <typename T>
__global__ void max_unpooling2d_backward_kernel(
const int64_t numInputElements,
const T* input,
const int64_t* indices,
const int64_t numChannels,
const int64_t inputHeight,
const int64_t inputWidth,
const int64_t outputHeight,
const int64_t outputWidth,
T* output) {
CUDA_KERNEL_LOOP(linearIndex, numInputElements) {
int c = (linearIndex / inputWidth / inputHeight) % numChannels;
int n = linearIndex / inputWidth / inputHeight / numChannels;
input += (n * numChannels + c) * outputHeight * outputWidth;
int maxind = indices[linearIndex];
output[linearIndex] = input[maxind];
}
}
template <typename T>
__global__ void max_unpooling3d_backward_kernel(
T* gradOutputData,
int64_t oT,
int64_t oH,
int64_t oW,
PackedTensorAccessor64<int64_t, 4> indices,
PackedTensorAccessor64<T, 4> gradInput,
int offsetZ) {
int iColumn = blockIdx.x * blockDim.x + threadIdx.x;
int iRow = blockIdx.y * blockDim.y + threadIdx.y;
int iFrame = (blockIdx.z + offsetZ) % gradInput.size(1); // output frame/time
int slice =
(blockIdx.z + offsetZ) / gradInput.size(1); // output slice/feature
if (iRow < gradInput.size(2) && iColumn < gradInput.size(3)) {
int64_t index = indices[slice][iFrame][iRow][iColumn];
T grad_val = gradOutputData[slice * oT * oH * oW + index];
gradInput[slice][iFrame][iRow][iColumn] = grad_val;
}
}
Tensor& max_unpooling2d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling2d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices_.scalar_type());
auto oheight = output_size[0];
auto owidth = output_size[1];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling2d_forward_out_cuda", {output_arg, self_arg, indices_arg});
for (int64_t i = 1; i < self_.ndimension(); ++i) {
TORCH_CHECK(self_.size(i) > 0, "max_unpooling2d_forward_out_cuda(): ",
"Expected input to have non-zero size for non-batch dimensions, but got ",
self_.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, but got tensor with dimension: ", self_.ndimension());
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(
output_size.size() == 2,
"There should be exactly two elements (width, height) in output_size, but got ", output_size.size(), " elements.");
int64_t dimw = 2;
int64_t dimh = 1;
int64_t numBatch = 1;
int64_t numChannels;
int64_t inputHeight;
int64_t inputWidth;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
if (self.ndimension() == 4) {
numBatch = self.size(0);
dimw++;
dimh++;
}
numChannels = self.size(dimh - 1);
inputHeight = self.size(dimh);
inputWidth = self.size(dimw);
output.resize_({numBatch, numChannels, oheight, owidth});
output.zero_();
auto count = self.numel();
if (count != 0) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_forward_kernel", ([&] {
max_unpooling2d_forward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.numel(),
self.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
numChannels,
inputHeight,
inputWidth,
oheight,
owidth,
output.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}));
}
if (self.ndimension() == 3) {
output.resize_({numChannels, oheight, owidth});
}
return output;
}
Tensor max_unpooling2d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling2d_forward_out_cuda(self, indices, output_size, output);
return output;
}
static void max_unpooling3d_shape_check(
const Tensor& input,
const Tensor& gradOutput,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
const char *fn_name) {
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TORCH_CHECK(
indices.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got: ", indices.scalar_type());
TORCH_CHECK(
(input.ndimension() == 4 || input.ndimension() == 5),
"Input to max_unpooling3d should be a 4d or 5d Tensor, but got a tensor with dim ", input.ndimension());
TORCH_CHECK(
output_size.size() == 3,
"There should be exactly three elements (depth, height, width) in output_size, but got ", output_size.size(), " elements.");
TORCH_CHECK(
stride.size() == 3,
"There should be exactly three elements (depth, height, width) in stride, but got: ", stride.size(), " elements.");
TORCH_CHECK(
padding.size() == 3,
"There should be exactly three elements (depth, height, width) in padding, but got: ", padding.size(), " elements.");
TORCH_CHECK(
input.sizes() == indices.sizes(),
"Expected shape of indices to be: ", input.sizes(), " but got: ", indices.sizes());
for (int64_t i = 1; i < input.ndimension(); ++i) {
TORCH_CHECK(input.size(i) > 0, fn_name,
": Expected input to have non-zero size for non-batch dimensions, but got ",
input.sizes(), " with dimension ", i , " being empty.");
}
TORCH_CHECK(
stride[0] > 0 && stride[1] > 0 && stride[2] > 0,
"strides should be greater than zero, but got stride: ",
stride);
int dimw = 3;
int dimh = 2;
int dimt = 1;
int dimn = 0;
if (input.ndimension() == 5) {
dimw++;
dimh++;
dimt++;
dimn++;
}
int nslices = input.size(dimn);
if (gradOutput.defined()) {
if (oT != gradOutput.size(dimt) || oH != gradOutput.size(dimh) ||
oW != gradOutput.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. oT= ",
oT,
", oH= ",
oH,
", oW= ",
oW,
". gradOutput: ",
gradOutput.size(dimt),
"x",
gradOutput.size(dimh),
"x",
gradOutput.size(dimw));
}
TORCH_CHECK(
gradOutput.ndimension() == input.ndimension() &&
gradOutput.size(dimn) == nslices,
"gradOutput and input Tensors should have same number of dimensions and also the same number of channels/slices");
}
}
Tensor& max_unpooling3d_forward_out_cuda(const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& output) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic with duplicate indices
at::globalContext().alertNotDeterministic("max_unpooling3d_forward_out");
TORCH_CHECK(output.is_contiguous(), "output must be contiguous");
max_unpooling3d_shape_check(
self_, Tensor(), indices_, output_size, stride, padding, "max_unpooling3d_forward_out_cuda()");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
TensorArg output_arg{output, "output", 1}, self_arg{self_, "self_", 2},
indices_arg{indices_, "indices_", 3};
checkAllSameGPU(
"max_unpooling3d_forward_out_cuda", {output_arg, self_arg, indices_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
int64_t batchSize;
int64_t inputSlices;
int64_t inputTime;
int64_t inputHeight;
int64_t inputWidth;
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
output.resize_({inputSlices, oT, oH, oW});
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
output.resize_({batchSize, inputSlices, oT, oH, oW});
}
output.zero_();
// Collapse batch and feature dimensions if needed
if (self.ndimension() == 5) {
self = self.reshape({self.size(0) * self.size(1),
self.size(2),
self.size(3),
self.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (self.numel() == 0) {
return output;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_forward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_forward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
self.packed_accessor64<scalar_t, 4>(),
indices.packed_accessor64<int64_t, 4>(),
output.data_ptr<scalar_t>(),
oT,
oH,
oW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return output;
}
Tensor max_unpooling3d_forward_cuda(
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto output = at::empty({0}, self.options());
at::native::max_unpooling3d_forward_out_cuda(
self, indices, output_size, stride, padding, output);
return output;
}
at::Tensor& max_unpooling2d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
Tensor& grad_input) {
int64_t oheight = output_size[0];
int64_t owidth = output_size[1];
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
TORCH_CHECK(
indices_.scalar_type() == at::ScalarType::Long,
"elements in indices should be type int64 but got type: ", indices_.scalar_type());
TensorArg grad_input_arg{grad_input, "grad_input", 1},
grad_output_arg{grad_output_, "grad_output_", 2},
self_arg{self_, "self_", 3}, indices_arg{indices_, "indices_", 4};
checkAllSameGPU(
"max_unpooling2d_backward_out_cuda",
{grad_input_arg, grad_output_arg, self_arg, indices_arg});
TORCH_CHECK(
(self_.ndimension() == 3 || self_.ndimension() == 4),
"Input to max_unpooling2d should be a 3d or 4d Tensor, instead got: ",
self_);
TORCH_CHECK(
self_.sizes() == indices_.sizes(),
"Expected shape of indices to be: ", self_.sizes(), " but got: ", indices_.sizes());
TORCH_CHECK(output_size.size() == 2, "output_size must have two elements, got size: ", output_size.size());
int64_t nInputCols, nInputRows, nInputPlane;
int dimw = 2;
int dimh = 1;
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 3) {
nInputPlane = self.size(0);
} else {
++dimw;
++dimh;
nInputPlane = self.size(1);
}
nInputCols = self.size(dimw);
nInputRows = self.size(dimh);
if (oheight != grad_output.size(dimh) || owidth != grad_output.size(dimw)) {
AT_ERROR(
"Inconsistent gradOutput size. output height: ",
oheight,
", output width= ",
owidth,
", gradOutput: ",
grad_output.size(dimh),
"x",
grad_output.size(dimw));
}
grad_input.resize_as_(self);
grad_input.zero_();
int64_t count = self.numel();
if (count == 0) {
return grad_input;
}
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling2d_backward_kernel", ([&] {
max_unpooling2d_backward_kernel<<<
GET_BLOCKS(count),
CUDA_NUM_THREADS,
0,
at::cuda::getCurrentCUDAStream()>>>(
count,
grad_output.data_ptr<scalar_t>(),
indices.data_ptr<int64_t>(),
nInputPlane,
nInputRows,
nInputCols,
oheight,
owidth,
grad_input.data_ptr<scalar_t>());
C10_CUDA_KERNEL_LAUNCH_CHECK();
}));
return grad_input;
}
at::Tensor max_unpooling2d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling2d_backward_out_cuda(
grad_output, self, indices, output_size, grad_input);
return grad_input;
}
at::Tensor& max_unpooling3d_backward_out_cuda(const Tensor& grad_output_,
const Tensor& self_,
const Tensor& indices_,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding,
Tensor& grad_input) {
TORCH_CHECK(grad_input.is_contiguous(), "grad_input must be contiguous");
int64_t oT = output_size[0];
int64_t oH = output_size[1];
int64_t oW = output_size[2];
max_unpooling3d_shape_check(
self_, grad_output_, indices_, output_size, stride, padding, "max_unpooling3d_backward_out_cuda()");
int batchSize = 0;
int inputSlices = 0;
int inputTime = 0;
int64_t inputHeight = 0;
int64_t inputWidth = 0;
TensorArg self_arg{self_, "self_", 1}, indices_arg{indices_, "indices_", 2},
grad_output_arg{grad_output_, "grad_output_", 3},
grad_input_arg{grad_input, "grad_input", 4};
checkAllSameGPU(
"max_unpooling3d_backward_out_cuda",
{self_arg, indices_arg, grad_output_arg, grad_input_arg});
auto self = self_.contiguous();
auto indices = indices_.contiguous();
auto grad_output = grad_output_.contiguous();
if (self.ndimension() == 4) {
batchSize = 1;
inputSlices = self.size(0);
inputTime = self.size(1);
inputHeight = self.size(2);
inputWidth = self.size(3);
} else {
batchSize = self.size(0);
inputSlices = self.size(1);
inputTime = self.size(2);
inputHeight = self.size(3);
inputWidth = self.size(4);
}
grad_input.resize_as_(self);
grad_input.zero_();
// Collapse batch and feature dimensions if needed
auto grad_input_reshaped = grad_input;
if (grad_input.ndimension() == 5) {
grad_input_reshaped =
grad_input.reshape({grad_input.size(0) * grad_input.size(1),
grad_input.size(2),
grad_input.size(3),
grad_input.size(4)});
indices = indices.reshape({indices.size(0) * indices.size(1),
indices.size(2),
indices.size(3),
indices.size(4)});
}
if (grad_input.numel() == 0) {
return grad_input;
}
int totalZ = inputTime * inputSlices * batchSize;
int offsetZ = 0;
dim3 block(32, 8);
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half,
self.scalar_type(), "max_unpooling3d_backward_kernel", ([&] {
while (totalZ > 0) {
dim3 grid(
ceilDiv(inputWidth, static_cast<int64_t>(block.x)),
ceilDiv(inputHeight, static_cast<int64_t>(block.y)),
totalZ > 65535 ? 65535 : totalZ);
max_unpooling3d_backward_kernel<<<
grid,
block,
0,
at::cuda::getCurrentCUDAStream()>>>(
grad_output.data_ptr<scalar_t>(),
oT,
oH,
oW,
indices.packed_accessor64<int64_t, 4>(),
grad_input_reshaped.packed_accessor64<scalar_t, 4>(),
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}));
return grad_input;
}
at::Tensor max_unpooling3d_backward_cuda(
const Tensor& grad_output,
const Tensor& self,
const Tensor& indices,
IntArrayRef output_size,
IntArrayRef stride,
IntArrayRef padding) {
auto grad_input = at::empty_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
at::native::max_unpooling3d_backward_out_cuda(
grad_output, self, indices, output_size, stride, padding, grad_input);
return grad_input;
}
} // namespace at::native
|
b330df58356ef5fc4eebbf84fb7b52979e11ac02.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zmergebicgstab3.cu, normal z -> d, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from dmergebicgstab into one
// The difference to dmergedbicgstab2 is that the SpMV is not merged into the
// kernes. This results in higher flexibility at the price of lower performance.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge1_kernel(
int n,
double * skp,
double * v,
double * r,
double * p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double beta=skp[1];
double omega=skp[2];
if ( i<n ) {
p[i] = r[i] + beta * ( p[i] - omega * v[i] );
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = beta*p
p = p-omega*beta*v
p = p+r
-> p = r + beta * ( p - omega * v )
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDouble_ptr
set of scalar parameters
@param[in]
v magmaDouble_ptr
input vector v
@param[in]
r magmaDouble_ptr
input vector r
@param[in,out]
p magmaDouble_ptr
input/output vector p
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge1(
magma_int_t n,
magmaDouble_ptr skp,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgmerge1_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, v, r, p );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge2_kernel(
int n,
double * skp,
double * r,
double * v,
double * s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double alpha=skp[0];
if ( i < n ) {
s[i] = r[i] - alpha * v[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s=r
s=s-alpha*v
-> s = r - alpha * v
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDouble_ptr
set of scalar parameters
@param[in]
r magmaDouble_ptr
input vector r
@param[in]
v magmaDouble_ptr
input vector v
@param[out]
s magmaDouble_ptr
output vector s
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge2(
magma_int_t n,
magmaDouble_ptr skp,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgmerge2_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, r, v, s );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge3_kernel(
int n,
double * skp,
double * p,
double * se,
double * t,
double * x,
double * r
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double alpha=skp[0];
double omega=skp[2];
if ( i<n ) {
double s;
s = se[i];
x[i] = x[i] + alpha * p[i] + omega * s;
r[i] = s - omega * t[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x=x+alpha*p
x=x+omega*s
r=s
r=r-omega*t
-> x = x + alpha * p + omega * s
-> r = s - omega * t
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDouble_ptr
set of scalar parameters
@param[in]
p magmaDouble_ptr
input p
@param[in]
s magmaDouble_ptr
input s
@param[in]
t magmaDouble_ptr
input t
@param[in,out]
x magmaDouble_ptr
input/output x
@param[in,out]
r magmaDouble_ptr
input/output r
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge3(
magma_int_t n,
magmaDouble_ptr skp,
magmaDouble_ptr p,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
hipLaunchKernelGGL(( magma_dbicgmerge3_kernel), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , n, skp, p, s, t, x, r );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge4_kernel_1(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
double tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
__global__ void
magma_dbicgmerge4_kernel_2(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
__global__ void
magma_dbicgmerge4_kernel_3(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
double tmp1 = skp[4]/skp[3];
double tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
//skp[1] = skp[4]/skp[3] * skp[0] / skp[2];
}
}
/**
Purpose
-------
Performs some parameter operations for the BiCGSTAB with scalars on GPU.
Arguments
---------
@param[in]
type int
kernel type
@param[in,out]
skp magmaDouble_ptr
vector with parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge4(
magma_int_t type,
magmaDouble_ptr skp,
magma_queue_t queue )
{
dim3 Bs( 1 );
dim3 Gs( 1 );
if ( type == 1 )
hipLaunchKernelGGL(( magma_dbicgmerge4_kernel_1), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else if ( type == 2 )
hipLaunchKernelGGL(( magma_dbicgmerge4_kernel_2), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else if ( type == 3 )
hipLaunchKernelGGL(( magma_dbicgmerge4_kernel_3), dim3(Gs), dim3(Bs), 0, queue->cuda_stream() , skp );
else
printf("error: no kernel called\n");
return MAGMA_SUCCESS;
}
| b330df58356ef5fc4eebbf84fb7b52979e11ac02.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from sparse/blas/zmergebicgstab3.cu, normal z -> d, Wed Jan 2 14:18:53 2019
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 512
#define PRECISION_d
// These routines merge multiple kernels from dmergebicgstab into one
// The difference to dmergedbicgstab2 is that the SpMV is not merged into the
// kernes. This results in higher flexibility at the price of lower performance.
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge1_kernel(
int n,
double * skp,
double * v,
double * r,
double * p )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double beta=skp[1];
double omega=skp[2];
if ( i<n ) {
p[i] = r[i] + beta * ( p[i] - omega * v[i] );
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
p = beta*p
p = p-omega*beta*v
p = p+r
-> p = r + beta * ( p - omega * v )
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDouble_ptr
set of scalar parameters
@param[in]
v magmaDouble_ptr
input vector v
@param[in]
r magmaDouble_ptr
input vector r
@param[in,out]
p magmaDouble_ptr
input/output vector p
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge1(
magma_int_t n,
magmaDouble_ptr skp,
magmaDouble_ptr v,
magmaDouble_ptr r,
magmaDouble_ptr p,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_dbicgmerge1_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, v, r, p );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge2_kernel(
int n,
double * skp,
double * r,
double * v,
double * s )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double alpha=skp[0];
if ( i < n ) {
s[i] = r[i] - alpha * v[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
s=r
s=s-alpha*v
-> s = r - alpha * v
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDouble_ptr
set of scalar parameters
@param[in]
r magmaDouble_ptr
input vector r
@param[in]
v magmaDouble_ptr
input vector v
@param[out]
s magmaDouble_ptr
output vector s
@param[in]
queue magma_queue_t
queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge2(
magma_int_t n,
magmaDouble_ptr skp,
magmaDouble_ptr r,
magmaDouble_ptr v,
magmaDouble_ptr s,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_dbicgmerge2_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, r, v, s );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge3_kernel(
int n,
double * skp,
double * p,
double * se,
double * t,
double * x,
double * r
)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
double alpha=skp[0];
double omega=skp[2];
if ( i<n ) {
double s;
s = se[i];
x[i] = x[i] + alpha * p[i] + omega * s;
r[i] = s - omega * t[i];
}
}
/**
Purpose
-------
Mergels multiple operations into one kernel:
x=x+alpha*p
x=x+omega*s
r=s
r=r-omega*t
-> x = x + alpha * p + omega * s
-> r = s - omega * t
Arguments
---------
@param[in]
n int
dimension n
@param[in]
skp magmaDouble_ptr
set of scalar parameters
@param[in]
p magmaDouble_ptr
input p
@param[in]
s magmaDouble_ptr
input s
@param[in]
t magmaDouble_ptr
input t
@param[in,out]
x magmaDouble_ptr
input/output x
@param[in,out]
r magmaDouble_ptr
input/output r
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge3(
magma_int_t n,
magmaDouble_ptr skp,
magmaDouble_ptr p,
magmaDouble_ptr s,
magmaDouble_ptr t,
magmaDouble_ptr x,
magmaDouble_ptr r,
magma_queue_t queue )
{
dim3 Bs( BLOCK_SIZE );
dim3 Gs( magma_ceildiv( n, BLOCK_SIZE ) );
magma_dbicgmerge3_kernel<<< Gs, Bs, 0, queue->cuda_stream() >>>( n, skp, p, s, t, x, r );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_dbicgmerge4_kernel_1(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
double tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
__global__ void
magma_dbicgmerge4_kernel_2(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
__global__ void
magma_dbicgmerge4_kernel_3(
double * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ( i==0 ) {
double tmp1 = skp[4]/skp[3];
double tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
//skp[1] = skp[4]/skp[3] * skp[0] / skp[2];
}
}
/**
Purpose
-------
Performs some parameter operations for the BiCGSTAB with scalars on GPU.
Arguments
---------
@param[in]
type int
kernel type
@param[in,out]
skp magmaDouble_ptr
vector with parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_dgegpuk
********************************************************************/
extern "C" magma_int_t
magma_dbicgmerge4(
magma_int_t type,
magmaDouble_ptr skp,
magma_queue_t queue )
{
dim3 Bs( 1 );
dim3 Gs( 1 );
if ( type == 1 )
magma_dbicgmerge4_kernel_1<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else if ( type == 2 )
magma_dbicgmerge4_kernel_2<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else if ( type == 3 )
magma_dbicgmerge4_kernel_3<<< Gs, Bs, 0, queue->cuda_stream() >>>( skp );
else
printf("error: no kernel called\n");
return MAGMA_SUCCESS;
}
|
cf59a2fa78aafc8bbd13d7b337631b1e317d4485.hip | // !!! This is a file automatically generated by hipify!!!
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <iostream>
using namespace std;
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
int main() {
int N = 6;
thrust::host_vector<int> A(N);
for(int i=0;i<N;++i)A[i]=i*i;
thrust::device_vector<int> B = A;
thrust::inclusive_scan(B.begin(), B.end(), B.begin());
thrust::host_vector<int> C = B;
for(int i = 0; i<A.size();++i) cout << A[i] << " "; cout << endl;
for(int i=0;i<C.size();++i) cout << C[i] << " "; cout << endl;
thrust::device_vector<double> D(N);
double* D_ptr = thrust::raw_pointer_cast(D.data());
hiprandGenerator_t gen;
hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT);
hiprandGenerateUniformDouble(gen, D_ptr, N);
thrust::host_vector<double> E(N);
thrust::copy(D.begin(), D.end(), E.begin());
cout << "Random Gaussian : " << endl;
for(int i = 0; i < E.size(); i++) cout << " >> " << E[i] << endl;
}
| cf59a2fa78aafc8bbd13d7b337631b1e317d4485.cu | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/sort.h>
#include <thrust/scan.h>
#include <iostream>
using namespace std;
#include <cuda.h>
#include <curand.h>
int main() {
int N = 6;
thrust::host_vector<int> A(N);
for(int i=0;i<N;++i)A[i]=i*i;
thrust::device_vector<int> B = A;
thrust::inclusive_scan(B.begin(), B.end(), B.begin());
thrust::host_vector<int> C = B;
for(int i = 0; i<A.size();++i) cout << A[i] << " "; cout << endl;
for(int i=0;i<C.size();++i) cout << C[i] << " "; cout << endl;
thrust::device_vector<double> D(N);
double* D_ptr = thrust::raw_pointer_cast(D.data());
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniformDouble(gen, D_ptr, N);
thrust::host_vector<double> E(N);
thrust::copy(D.begin(), D.end(), E.begin());
cout << "Random Gaussian : " << endl;
for(int i = 0; i < E.size(); i++) cout << " >> " << E[i] << endl;
}
|
9072284ff18935ffc69c92e0f5cfc88c1cdf64fe.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <stdlib.h>
#include <time.h>
#define DIMX 512
#define cudaCheck(e) do { \
if (hipSuccess != (e)) { \
fprintf(stderr, "Cuda runtime error in line %d of file %s \
: %s \n", __LINE__, __FILE__, hipGetErrorString(hipGetLastError()) ); \
exit(EXIT_FAILURE); \
} \
} while(0);
template <typename DType>
__global__ void reduceGmem(DType* out, DType* in, size_t n) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx >= n) return;
int tid = threadIdx.x;
DType* idata = in + blockIdx.x * blockDim.x;
if(blockDim.x >= 1024 && tid < 512 ) idata[tid] += idata[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) idata[tid] += idata[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) idata[tid] += idata[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) idata[tid] += idata[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = idata[0];
//printf("ID:%d, sum:%5f\n", blockIdx.x, idata[0]);
}
}
template <typename DType>
__global__ void reduceSmem(DType* out, DType* in, size_t n) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ DType smem[DIMX];
int tid = threadIdx.x;
DType* idata = in + blockIdx.x * blockDim.x;
/// global mem. -> shared mem.
if(idx < n) smem[tid] = idata[tid];
else smem[tid] = 0;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
//printf("ID:%d, sum:%5f\n", blockIdx.x, idata[0]);
}
}
template <typename DType>
__global__ void reduceSmemUnroll(DType* out, DType* in, size_t n) {
__shared__ DType smem[DIMX];
int tid = threadIdx.x;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x * 4;
/// global mem. -> shared mem.
DType tmp_sum = 0;
if(idx + 3 * blockDim.x < n) {
DType a1 = in[idx];
DType a2 = in[idx + blockDim.x];
DType a3 = in[idx + blockDim.x*2];
DType a4 = in[idx + blockDim.x*3];
tmp_sum = a1 + a2 + a3 + a4;
}
smem[tid] = tmp_sum;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
}
}
template <typename DType>
__global__ void reduceSmemUnrollDynamic(DType* out, DType* in, size_t n) {
extern __shared__ DType smem[]; //! dynamic shared memory
int tid = threadIdx.x;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x * 4;
/// global mem. -> shared mem.
DType tmp_sum = 0;
if(idx + 3 * blockDim.x < n) {
DType a1 = in[idx];
DType a2 = in[idx + blockDim.x];
DType a3 = in[idx + blockDim.x*2];
DType a4 = in[idx + blockDim.x*3];
tmp_sum = a1 + a2 + a3 + a4;
}
smem[tid] = tmp_sum;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
}
}
int main(int argc, char* agv[]) {
srand(time(NULL));
hipStream_t stream[2];
cudaCheck(hipSetDevice(0)); //! CUDA Streams
for(int i = 0; i < 2; ++i) cudaCheck(hipStreamCreate(&stream[i]));
hipProfilerStart();
void * buffers[7];
const size_t N = 1 << 24;
float * pdata = new float[N];
float res = 0;
double res_check = 0;
for(size_t i = 0; i < N; ++i) {
//pdata[i] = 1;
pdata[i] = rand() / double(RAND_MAX) * 0.5;
res_check += pdata[i];
}
const int threads_per_block = DIMX;
const int num_blocks = (N + threads_per_block - 1) / threads_per_block;
const int num_blocks2 = (num_blocks + threads_per_block - 1) / threads_per_block;
printf("threads_per_block:%d, num_blocks:%d, %d\n", threads_per_block, num_blocks, num_blocks2);
/// allocate gpu mem.
cudaCheck(hipMalloc(&buffers[0], sizeof(float)*num_blocks*threads_per_block));
cudaCheck(hipMalloc(&buffers[1], sizeof(float)*num_blocks2 * threads_per_block));
cudaCheck(hipMalloc(&buffers[2], sizeof(float)*num_blocks2));
cudaCheck(hipMalloc(&buffers[3], sizeof(float)*num_blocks*threads_per_block));
cudaCheck(hipMalloc(&buffers[4], sizeof(float)*num_blocks2*threads_per_block));
cudaCheck(hipMalloc(&buffers[5], sizeof(float)*num_blocks2));
cudaCheck(hipMalloc(&buffers[6], sizeof(float)*4));
/// pinned memory
float * c_buffer;
cudaCheck(hipHostMalloc(&c_buffer, sizeof(float)*N));
double cpu_res = 0.;
for(size_t i = 0 ; i < N; ++i) {
c_buffer[i] = rand() / double(RAND_MAX) * 0.1;
cpu_res += c_buffer[i];
}
printf("Starting reduction ...");
/// cpu mem. -> gpu mem.
cudaCheck(hipMemcpyAsync(buffers[0], pdata, sizeof(float)*N, hipMemcpyHostToDevice, stream[0]));
/// reduceGmem
hipLaunchKernelGGL(( reduceGmem<float>), dim3(num_blocks), dim3(threads_per_block), 0, stream[0], (float*)buffers[1], (float*)buffers[0], N);
hipLaunchKernelGGL(( reduceGmem<float>), dim3(num_blocks2), dim3(threads_per_block), 0, stream[0], (float*)buffers[2], (float*)buffers[1], num_blocks);
hipLaunchKernelGGL(( reduceGmem<float>), dim3(1), dim3(threads_per_block), 0, stream[0], (float*)buffers[6], (float*)buffers[2], num_blocks2);
cudaCheck(hipMemsetAsync(buffers[1], 0, sizeof(float)*num_blocks2*threads_per_block, stream[0]));
cudaCheck(hipMemsetAsync(buffers[2], 0, sizeof(float)*num_blocks2, stream[0]));
cudaCheck(hipMemcpyAsync(buffers[0], pdata, sizeof(float)*N, hipMemcpyHostToDevice, stream[0]));
/// reduceSmem
hipLaunchKernelGGL(( reduceSmem<float>), dim3(num_blocks), dim3(threads_per_block), 0, stream[0], (float*)buffers[1], (float*)buffers[0], N);
hipLaunchKernelGGL(( reduceSmem<float>), dim3(num_blocks2), dim3(threads_per_block), 0, stream[0], (float*)buffers[2], (float*)buffers[1], num_blocks);
hipLaunchKernelGGL(( reduceSmem<float>), dim3(1), dim3(threads_per_block), 0, stream[0], (float*)buffers[6]+1, (float*)buffers[2], num_blocks2);
/// stream[1]
cudaCheck(hipMemcpyAsync(buffers[3], c_buffer, sizeof(float)*N, hipMemcpyHostToDevice, stream[1]));
/// reduceSmemUnroll
hipLaunchKernelGGL(( reduceSmemUnroll<float>), dim3(num_blocks / 4), dim3(threads_per_block), 0, stream[1], (float*)buffers[4], (float*)buffers[3], N);
hipLaunchKernelGGL(( reduceSmemUnroll<float>), dim3(num_blocks2 / 16), dim3(threads_per_block), 0, stream[1], (float*)buffers[5], (float*)buffers[4], num_blocks / 4);
hipLaunchKernelGGL(( reduceSmem<float>), dim3(1), dim3(threads_per_block), 0, stream[1], (float*)buffers[6]+2, (float*)buffers[5], num_blocks2 / 16);
/// reduceSmemUnrollDynamic
cudaCheck(hipMemsetAsync(buffers[4], 0, sizeof(float)*num_blocks2*threads_per_block, stream[1]));
cudaCheck(hipMemsetAsync(buffers[5], 0, sizeof(float)*num_blocks2, stream[1]));
cudaCheck(hipMemcpyAsync(buffers[3], c_buffer, sizeof(float)*N, hipMemcpyHostToDevice, stream[1]));
hipLaunchKernelGGL(( reduceSmemUnrollDynamic<float>), dim3(num_blocks / 4), dim3(threads_per_block), sizeof(float)*threads_per_block, stream[1], (float*)buffers[4], (float*)buffers[3], N);
hipLaunchKernelGGL(( reduceSmemUnrollDynamic<float>), dim3(num_blocks2 / 16), dim3(threads_per_block), sizeof(float)*threads_per_block, stream[1], (float*)buffers[5], (float*)buffers[4], num_blocks / 4);
hipLaunchKernelGGL(( reduceSmem<float>), dim3(1), dim3(threads_per_block), 0, stream[1], (float*)buffers[6]+3, (float*)buffers[5], num_blocks2 / 16);
/// compare results
cudaCheck(hipMemcpy(&res, buffers[6], sizeof(float), hipMemcpyDeviceToHost));
printf("Global memory GPU Sum:%5f CPU Sum:%5f\n", res, res_check);
cudaCheck(hipMemcpy(&res, (float*)buffers[6]+1, sizeof(float), hipMemcpyDeviceToHost));
printf("Shared memory GPU Sum:%5f CPU Sum:%5f\n", res, res_check);
cudaCheck(hipMemcpy(&res, (float*)buffers[6]+2, sizeof(float), hipMemcpyDeviceToHost));
printf("Shared memory Unroll GPU Sum:%5f CPU Sum:%5f\n", res, cpu_res);
cudaCheck(hipMemcpy(&res, (float*)buffers[6]+3, sizeof(float), hipMemcpyDeviceToHost));
printf("Shared memory Unroll Dynamic GPU Sum:%5f CPU Sum:%5f\n", res, cpu_res);
/// free
hipDeviceSynchronize();
hipProfilerStop();
for(auto & e: buffers) cudaCheck(hipFree(e));
cudaCheck(hipHostFree(c_buffer));
delete [] pdata;
return 0;
}
| 9072284ff18935ffc69c92e0f5cfc88c1cdf64fe.cu | #include <stdio.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <time.h>
#define DIMX 512
#define cudaCheck(e) do { \
if (cudaSuccess != (e)) { \
fprintf(stderr, "Cuda runtime error in line %d of file %s \
: %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \
exit(EXIT_FAILURE); \
} \
} while(0);
template <typename DType>
__global__ void reduceGmem(DType* out, DType* in, size_t n) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx >= n) return;
int tid = threadIdx.x;
DType* idata = in + blockIdx.x * blockDim.x;
if(blockDim.x >= 1024 && tid < 512 ) idata[tid] += idata[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) idata[tid] += idata[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) idata[tid] += idata[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) idata[tid] += idata[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = idata[0];
//printf("ID:%d, sum:%5f\n", blockIdx.x, idata[0]);
}
}
template <typename DType>
__global__ void reduceSmem(DType* out, DType* in, size_t n) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ DType smem[DIMX];
int tid = threadIdx.x;
DType* idata = in + blockIdx.x * blockDim.x;
/// global mem. -> shared mem.
if(idx < n) smem[tid] = idata[tid];
else smem[tid] = 0;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
//printf("ID:%d, sum:%5f\n", blockIdx.x, idata[0]);
}
}
template <typename DType>
__global__ void reduceSmemUnroll(DType* out, DType* in, size_t n) {
__shared__ DType smem[DIMX];
int tid = threadIdx.x;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x * 4;
/// global mem. -> shared mem.
DType tmp_sum = 0;
if(idx + 3 * blockDim.x < n) {
DType a1 = in[idx];
DType a2 = in[idx + blockDim.x];
DType a3 = in[idx + blockDim.x*2];
DType a4 = in[idx + blockDim.x*3];
tmp_sum = a1 + a2 + a3 + a4;
}
smem[tid] = tmp_sum;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
}
}
template <typename DType>
__global__ void reduceSmemUnrollDynamic(DType* out, DType* in, size_t n) {
extern __shared__ DType smem[]; //! dynamic shared memory
int tid = threadIdx.x;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x * 4;
/// global mem. -> shared mem.
DType tmp_sum = 0;
if(idx + 3 * blockDim.x < n) {
DType a1 = in[idx];
DType a2 = in[idx + blockDim.x];
DType a3 = in[idx + blockDim.x*2];
DType a4 = in[idx + blockDim.x*3];
tmp_sum = a1 + a2 + a3 + a4;
}
smem[tid] = tmp_sum;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
}
}
int main(int argc, char* agv[]) {
srand(time(NULL));
cudaStream_t stream[2];
cudaCheck(cudaSetDevice(0)); //! CUDA Streams
for(int i = 0; i < 2; ++i) cudaCheck(cudaStreamCreate(&stream[i]));
cudaProfilerStart();
void * buffers[7];
const size_t N = 1 << 24;
float * pdata = new float[N];
float res = 0;
double res_check = 0;
for(size_t i = 0; i < N; ++i) {
//pdata[i] = 1;
pdata[i] = rand() / double(RAND_MAX) * 0.5;
res_check += pdata[i];
}
const int threads_per_block = DIMX;
const int num_blocks = (N + threads_per_block - 1) / threads_per_block;
const int num_blocks2 = (num_blocks + threads_per_block - 1) / threads_per_block;
printf("threads_per_block:%d, num_blocks:%d, %d\n", threads_per_block, num_blocks, num_blocks2);
/// allocate gpu mem.
cudaCheck(cudaMalloc(&buffers[0], sizeof(float)*num_blocks*threads_per_block));
cudaCheck(cudaMalloc(&buffers[1], sizeof(float)*num_blocks2 * threads_per_block));
cudaCheck(cudaMalloc(&buffers[2], sizeof(float)*num_blocks2));
cudaCheck(cudaMalloc(&buffers[3], sizeof(float)*num_blocks*threads_per_block));
cudaCheck(cudaMalloc(&buffers[4], sizeof(float)*num_blocks2*threads_per_block));
cudaCheck(cudaMalloc(&buffers[5], sizeof(float)*num_blocks2));
cudaCheck(cudaMalloc(&buffers[6], sizeof(float)*4));
/// pinned memory
float * c_buffer;
cudaCheck(cudaMallocHost(&c_buffer, sizeof(float)*N));
double cpu_res = 0.;
for(size_t i = 0 ; i < N; ++i) {
c_buffer[i] = rand() / double(RAND_MAX) * 0.1;
cpu_res += c_buffer[i];
}
printf("Starting reduction ...");
/// cpu mem. -> gpu mem.
cudaCheck(cudaMemcpyAsync(buffers[0], pdata, sizeof(float)*N, cudaMemcpyHostToDevice, stream[0]));
/// reduceGmem
reduceGmem<float><<<num_blocks, threads_per_block, 0, stream[0]>>>((float*)buffers[1], (float*)buffers[0], N);
reduceGmem<float><<<num_blocks2, threads_per_block, 0, stream[0]>>>((float*)buffers[2], (float*)buffers[1], num_blocks);
reduceGmem<float><<<1, threads_per_block, 0, stream[0]>>>((float*)buffers[6], (float*)buffers[2], num_blocks2);
cudaCheck(cudaMemsetAsync(buffers[1], 0, sizeof(float)*num_blocks2*threads_per_block, stream[0]));
cudaCheck(cudaMemsetAsync(buffers[2], 0, sizeof(float)*num_blocks2, stream[0]));
cudaCheck(cudaMemcpyAsync(buffers[0], pdata, sizeof(float)*N, cudaMemcpyHostToDevice, stream[0]));
/// reduceSmem
reduceSmem<float><<<num_blocks, threads_per_block, 0, stream[0]>>>((float*)buffers[1], (float*)buffers[0], N);
reduceSmem<float><<<num_blocks2, threads_per_block, 0, stream[0]>>>((float*)buffers[2], (float*)buffers[1], num_blocks);
reduceSmem<float><<<1, threads_per_block, 0, stream[0]>>>((float*)buffers[6]+1, (float*)buffers[2], num_blocks2);
/// stream[1]
cudaCheck(cudaMemcpyAsync(buffers[3], c_buffer, sizeof(float)*N, cudaMemcpyHostToDevice, stream[1]));
/// reduceSmemUnroll
reduceSmemUnroll<float><<<num_blocks / 4, threads_per_block, 0, stream[1]>>>((float*)buffers[4], (float*)buffers[3], N);
reduceSmemUnroll<float><<<num_blocks2 / 16, threads_per_block, 0, stream[1]>>>((float*)buffers[5], (float*)buffers[4], num_blocks / 4);
reduceSmem<float><<<1, threads_per_block, 0, stream[1]>>>((float*)buffers[6]+2, (float*)buffers[5], num_blocks2 / 16);
/// reduceSmemUnrollDynamic
cudaCheck(cudaMemsetAsync(buffers[4], 0, sizeof(float)*num_blocks2*threads_per_block, stream[1]));
cudaCheck(cudaMemsetAsync(buffers[5], 0, sizeof(float)*num_blocks2, stream[1]));
cudaCheck(cudaMemcpyAsync(buffers[3], c_buffer, sizeof(float)*N, cudaMemcpyHostToDevice, stream[1]));
reduceSmemUnrollDynamic<float><<<num_blocks / 4, threads_per_block, sizeof(float)*threads_per_block, stream[1]>>>((float*)buffers[4], (float*)buffers[3], N);
reduceSmemUnrollDynamic<float><<<num_blocks2 / 16, threads_per_block, sizeof(float)*threads_per_block, stream[1]>>>((float*)buffers[5], (float*)buffers[4], num_blocks / 4);
reduceSmem<float><<<1, threads_per_block, 0, stream[1]>>>((float*)buffers[6]+3, (float*)buffers[5], num_blocks2 / 16);
/// compare results
cudaCheck(cudaMemcpy(&res, buffers[6], sizeof(float), cudaMemcpyDeviceToHost));
printf("Global memory GPU Sum:%5f CPU Sum:%5f\n", res, res_check);
cudaCheck(cudaMemcpy(&res, (float*)buffers[6]+1, sizeof(float), cudaMemcpyDeviceToHost));
printf("Shared memory GPU Sum:%5f CPU Sum:%5f\n", res, res_check);
cudaCheck(cudaMemcpy(&res, (float*)buffers[6]+2, sizeof(float), cudaMemcpyDeviceToHost));
printf("Shared memory Unroll GPU Sum:%5f CPU Sum:%5f\n", res, cpu_res);
cudaCheck(cudaMemcpy(&res, (float*)buffers[6]+3, sizeof(float), cudaMemcpyDeviceToHost));
printf("Shared memory Unroll Dynamic GPU Sum:%5f CPU Sum:%5f\n", res, cpu_res);
/// free
cudaDeviceSynchronize();
cudaProfilerStop();
for(auto & e: buffers) cudaCheck(cudaFree(e));
cudaCheck(cudaFreeHost(c_buffer));
delete [] pdata;
return 0;
}
|
9811b1cac02f9b9ef59ecc4a814eb4c60c516fa4.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "normArray.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
normArray), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
normArray), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
normArray), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 9811b1cac02f9b9ef59ecc4a814eb4c60c516fa4.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "normArray.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int n = 1;
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
normArray<<<gridBlock,threadBlock>>>(n,a);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
normArray<<<gridBlock,threadBlock>>>(n,a);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
normArray<<<gridBlock,threadBlock>>>(n,a);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
79476721820dbdb6a79ec116a899680196b01706.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(float stepX, float stepY, float lowerX, float lowerY, int* img_result, int maxIterations) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int idx = (blockIdx.y * blockDim.y + threadIdx.y) * (gridDim.x * blockDim.x) + (blockIdx.x * blockDim.x + threadIdx.x);
float x = lowerX + (blockIdx.x * blockDim.x + threadIdx.x) * stepX;
float y = lowerY + (blockIdx.y * blockDim.y + threadIdx.y) * stepY;
float z_re = x, z_im = y;
int i;
for (i = 0; i < maxIterations; ++i) {
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
img_result[idx] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// Declare the host memory
int *h_result = (int *)malloc(resX * resY * sizeof(int));
// Declare the cuda memory
int *c_result;
hipMalloc(&c_result, resX * resY * sizeof(int));
dim3 blockSize(16, 16);
dim3 numBlock(resX / 16, resY / 16);
hipLaunchKernelGGL(( mandelKernel), dim3(numBlock), dim3(blockSize), 0, 0, stepX, stepY, lowerX, lowerY, c_result, maxIterations);
// GPU thread
hipDeviceSynchronize();
// Device Host
hipMemcpy(h_result, c_result, resX * resY * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < resX * resY; ++i) {
*(img+i) = *(h_result+i);
}
// free memory
free(h_result);
hipFree(c_result);
}
| 79476721820dbdb6a79ec116a899680196b01706.cu | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(float stepX, float stepY, float lowerX, float lowerY, int* img_result, int maxIterations) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int idx = (blockIdx.y * blockDim.y + threadIdx.y) * (gridDim.x * blockDim.x) + (blockIdx.x * blockDim.x + threadIdx.x);
float x = lowerX + (blockIdx.x * blockDim.x + threadIdx.x) * stepX;
float y = lowerY + (blockIdx.y * blockDim.y + threadIdx.y) * stepY;
float z_re = x, z_im = y;
int i;
for (i = 0; i < maxIterations; ++i) {
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
img_result[idx] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// Declare the host memory
int *h_result = (int *)malloc(resX * resY * sizeof(int));
// Declare the cuda memory
int *c_result;
cudaMalloc(&c_result, resX * resY * sizeof(int));
dim3 blockSize(16, 16);
dim3 numBlock(resX / 16, resY / 16);
mandelKernel<<<numBlock, blockSize>>>(stepX, stepY, lowerX, lowerY, c_result, maxIterations);
// 等待 GPU 所有 thread 完成
cudaDeviceSynchronize();
// 將 Device 的資料傳回給 Host
cudaMemcpy(h_result, c_result, resX * resY * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < resX * resY; ++i) {
*(img+i) = *(h_result+i);
}
// free memory
free(h_result);
cudaFree(c_result);
}
|
f49ffb206e14ed86f74a034ba5a13fc4d825ce77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <wmma_extension/tcec/tcec.hpp>
#include "utils.hpp"
constexpr unsigned warp_size = 32;
template <unsigned N, class T, class Policy>
__global__ void test_elementwise_kernel(float* const ptr) {
__shared__ float smem[N * N];
mtk::wmma::tcec::fragment<nvcuda::wmma::accumulator, N, N, N, T, void, Policy> frag;
mtk::wmma::tcec::fill_fragment(frag, 0.0f);
for (unsigned i = 0; i < frag.num_elements; i++) {
frag.x(i) = threadIdx.x * 100 + i;
}
mtk::wmma::tcec::store_matrix_sync(smem, frag, N, nvcuda::wmma::mem_col_major);
for (unsigned i = 0; i < N * N; i += warp_size) {
const auto index = i + threadIdx.x;
ptr[index] = smem[index];
}
}
template <unsigned N, class T, class Policy>
void test_elementwise() {
std::printf("[%s, N = %u, T = %s, Policy = <%7s,%9s,%2u,%2u,%2u>]\n",
__func__,
N,
mtk::test_utils::to_string<T>().c_str(),
mtk::test_utils::to_string<typename Policy::op>().c_str(),
std::is_same<typename Policy::error_correction, mtk::wmma::tcec::with_ec>::value ? "{w/ ec}" : "{w/o ec}",
Policy::m,
Policy::n,
Policy::k
);
float* hC;
hipHostMalloc(&hC, N * N * sizeof(float));
hipLaunchKernelGGL(( test_elementwise_kernel<N, T, Policy>), dim3(1), dim3(warp_size), 0, 0, hC);
hipDeviceSynchronize();
for (unsigned i = 0; i < N; i++) {
for (unsigned j = 0; j < N; j++) {
std::printf("%e ", hC[i + j * N]);
}
std::printf("\n");
}
}
int main() {
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_mma >::type>();
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_mma >::type>();
test_elementwise<32, float , typename mtk::wmma::tcec::detail::default_policy<float , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_simt>::type>();
#ifdef TEST_SIMT
test_elementwise<32, float , typename mtk::wmma::tcec::detail::default_policy<float , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_simt>::type>();
#endif
#ifdef TEST_TF32
test_elementwise<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_elementwise<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
#endif
}
| f49ffb206e14ed86f74a034ba5a13fc4d825ce77.cu | #include <iostream>
#include <wmma_extension/tcec/tcec.hpp>
#include "utils.hpp"
constexpr unsigned warp_size = 32;
template <unsigned N, class T, class Policy>
__global__ void test_elementwise_kernel(float* const ptr) {
__shared__ float smem[N * N];
mtk::wmma::tcec::fragment<nvcuda::wmma::accumulator, N, N, N, T, void, Policy> frag;
mtk::wmma::tcec::fill_fragment(frag, 0.0f);
for (unsigned i = 0; i < frag.num_elements; i++) {
frag.x(i) = threadIdx.x * 100 + i;
}
mtk::wmma::tcec::store_matrix_sync(smem, frag, N, nvcuda::wmma::mem_col_major);
for (unsigned i = 0; i < N * N; i += warp_size) {
const auto index = i + threadIdx.x;
ptr[index] = smem[index];
}
}
template <unsigned N, class T, class Policy>
void test_elementwise() {
std::printf("[%s, N = %u, T = %s, Policy = <%7s,%9s,%2u,%2u,%2u>]\n",
__func__,
N,
mtk::test_utils::to_string<T>().c_str(),
mtk::test_utils::to_string<typename Policy::op>().c_str(),
std::is_same<typename Policy::error_correction, mtk::wmma::tcec::with_ec>::value ? "{w/ ec}" : "{w/o ec}",
Policy::m,
Policy::n,
Policy::k
);
float* hC;
cudaMallocHost(&hC, N * N * sizeof(float));
test_elementwise_kernel<N, T, Policy><<<1, warp_size>>>(hC);
cudaDeviceSynchronize();
for (unsigned i = 0; i < N; i++) {
for (unsigned j = 0; j < N; j++) {
std::printf("%e ", hC[i + j * N]);
}
std::printf("\n");
}
}
int main() {
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_mma >::type>();
test_elementwise<32, half , typename mtk::wmma::tcec::detail::default_policy<half , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_mma >::type>();
test_elementwise<32, float , typename mtk::wmma::tcec::detail::default_policy<float , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_simt>::type>();
#ifdef TEST_SIMT
test_elementwise<32, float , typename mtk::wmma::tcec::detail::default_policy<float , mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_simt>::type>();
#endif
#ifdef TEST_TF32
test_elementwise<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::with_ec , mtk::wmma::tcec::op_wmma>::type>();
test_elementwise<32, nvcuda::wmma::precision::tf32, typename mtk::wmma::tcec::detail::default_policy<nvcuda::wmma::precision::tf32, mtk::wmma::tcec::without_ec, mtk::wmma::tcec::op_wmma>::type>();
#endif
}
|
730c4443caf913de694d6f8deb6a3944ef21fabf.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "log_tonemap_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *imageIn = NULL;
hipMalloc(&imageIn, XSIZE*YSIZE);
float *imageOut = NULL;
hipMalloc(&imageOut, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int channels = 1;
float k = 1;
float q = 1;
float *max = NULL;
hipMalloc(&max, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
log_tonemap_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imageIn,imageOut,width,height,channels,k,q,max);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
log_tonemap_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imageIn,imageOut,width,height,channels,k,q,max);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
log_tonemap_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, imageIn,imageOut,width,height,channels,k,q,max);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 730c4443caf913de694d6f8deb6a3944ef21fabf.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "log_tonemap_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *imageIn = NULL;
cudaMalloc(&imageIn, XSIZE*YSIZE);
float *imageOut = NULL;
cudaMalloc(&imageOut, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int channels = 1;
float k = 1;
float q = 1;
float *max = NULL;
cudaMalloc(&max, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
log_tonemap_kernel<<<gridBlock,threadBlock>>>(imageIn,imageOut,width,height,channels,k,q,max);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
log_tonemap_kernel<<<gridBlock,threadBlock>>>(imageIn,imageOut,width,height,channels,k,q,max);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
log_tonemap_kernel<<<gridBlock,threadBlock>>>(imageIn,imageOut,width,height,channels,k,q,max);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
dc54b92070b2e54413d2bd8d1da71ab9f331baf4.hip | // !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
#include<math.h>
using namespace std;
using namespace cv;
void sumaEulerCPU1(int *a, Mat img, int *c, int N, int M)
{
// Se calculan valores sin considerar la orilla para3x3.
for (int i=1; i<N-1; i++)
for (int j=1; j<M-1; j++)
c[i*M+j] = ( img.at<uchar>(i*M+j) * a[4] + img.at<uchar>((i-1)*M+j-1) * a[0] + img.at<uchar>((i-1)*M+j) * a[1] + img.at<uchar>((i-1)*M+j+1) * a[2] + img.at<uchar>(i*M+j-1) * a[3] + img.at<uchar>(i*M+j+1) * a[5] + img.at<uchar>((i+1)*M+j-1) * a[6] + img.at<uchar>((i+1)*M+j) * a[7] + img.at<uchar>((i+1)*M+j+1) * a[8] ) / 9;
// En medio, esq sup izq, arriba, esq sup der, izq, der, esq inf izq, abajo, esq inf der
}
float convolucionCPU1(int *a, Mat img, int *c, int N, int M)
{
hipEvent_t cpuI, cpuF;
float cpuT;
hipEventCreate( &cpuI );
hipEventCreate( &cpuF );
hipEventRecord( cpuI, 0 );
sumaEulerCPU1(a, img, c, N, M);
hipEventRecord( cpuF, 0 );
hipEventSynchronize( cpuF );
hipEventElapsedTime( &cpuT, cpuI, cpuF);
return cpuT;
}
/*Funcin para combinar las 2 matrices X y Y*/
float combinar(int *a, int *b, Mat &res)
{
hipEvent_t cpuI, cpuF;
float cpuT;
hipEventCreate( &cpuI );
hipEventCreate( &cpuF );
hipEventRecord( cpuI, 0 );
int sum;
/*Se tienen las 2 matrices y se juntan para formar la imagen final en base al algoritmo de PREWITT*/
for(int y = 1; y < res.rows - 1; y++){
for(int x = 1; x < res.cols - 1; x++){
sum = abs(a[y*res.cols+x]) + abs(b[y*res.cols+x]);
sum = sum > 255 ? 255:sum;
sum = sum < 0 ? 0 : sum;
res.at<uchar>(y,x) = b[y*res.cols+x];
//res.at<uchar>(y,x) = sum; //Esto es lo que debera funcionar pero por alguna razn saca la pantalla negra
}
}
hipEventRecord( cpuF, 0 );
hipEventSynchronize( cpuF );
hipEventElapsedTime( &cpuT, cpuI, cpuF);
return cpuT;
}
int main(int argc, char *argv[])
{
string imagePath;
if(argc < 2)
imagePath = "megaPixel.jpg";
//imagePath = "space-wallpaper_2880x1800.jpg";
else
imagePath = argv[1];
//Read input image from the disk in greyscale
Mat input = imread(imagePath, CV_LOAD_IMAGE_GRAYSCALE);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
/*Se inicializan los apuntadores de las matrices*/
int *resX, *resY;
resX = (int*) malloc(input.rows*input.cols*sizeof(int));
resY = (int*) malloc(input.rows*input.cols*sizeof(int));
/*Matrices utilizadas para Prewitt*/
int arregloX[9] = {-1,0,1,-1,0,1,-1,0,1};
int arregloY[9] = {-1,-1,-1,0,0,0,1,1,1};
float tiempo, tiempo2, tiempo3;
tiempo= convolucionCPU1( arregloX, input , resX, input.rows, input.cols );
tiempo2= convolucionCPU1( arregloY, input , resY, input.rows, input.cols );
Mat final;
final = input.clone(); //hagamos un clon
for(int y = 0; y < input.rows; y++) //recorramos las filas
for(int x = 0; x < input.cols; x++) //recorramos las columnas
final.at<uchar>(y,x) = 0.0; //punto inicial
tiempo3 = combinar(resX, resY, final);
printf("Tiempo: %f \n", tiempo + tiempo2 + tiempo3);
//Show the input and output
namedWindow("Input", WINDOW_NORMAL);
imshow("Input", input);
namedWindow("Output", WINDOW_NORMAL);
imshow("Output", final);
//Wait for key press
waitKey();
return 0;
}
| dc54b92070b2e54413d2bd8d1da71ab9f331baf4.cu | #include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include<opencv2/imgproc/imgproc.hpp>
#include<opencv2/highgui/highgui.hpp>
#include<cuda_runtime.h>
#include<math.h>
using namespace std;
using namespace cv;
void sumaEulerCPU1(int *a, Mat img, int *c, int N, int M)
{
// Se calculan valores sin considerar la orilla para3x3.
for (int i=1; i<N-1; i++)
for (int j=1; j<M-1; j++)
c[i*M+j] = ( img.at<uchar>(i*M+j) * a[4] + img.at<uchar>((i-1)*M+j-1) * a[0] + img.at<uchar>((i-1)*M+j) * a[1] + img.at<uchar>((i-1)*M+j+1) * a[2] + img.at<uchar>(i*M+j-1) * a[3] + img.at<uchar>(i*M+j+1) * a[5] + img.at<uchar>((i+1)*M+j-1) * a[6] + img.at<uchar>((i+1)*M+j) * a[7] + img.at<uchar>((i+1)*M+j+1) * a[8] ) / 9;
// En medio, esq sup izq, arriba, esq sup der, izq, der, esq inf izq, abajo, esq inf der
}
float convolucionCPU1(int *a, Mat img, int *c, int N, int M)
{
cudaEvent_t cpuI, cpuF;
float cpuT;
cudaEventCreate( &cpuI );
cudaEventCreate( &cpuF );
cudaEventRecord( cpuI, 0 );
sumaEulerCPU1(a, img, c, N, M);
cudaEventRecord( cpuF, 0 );
cudaEventSynchronize( cpuF );
cudaEventElapsedTime( &cpuT, cpuI, cpuF);
return cpuT;
}
/*Función para combinar las 2 matrices X y Y*/
float combinar(int *a, int *b, Mat &res)
{
cudaEvent_t cpuI, cpuF;
float cpuT;
cudaEventCreate( &cpuI );
cudaEventCreate( &cpuF );
cudaEventRecord( cpuI, 0 );
int sum;
/*Se tienen las 2 matrices y se juntan para formar la imagen final en base al algoritmo de PREWITT*/
for(int y = 1; y < res.rows - 1; y++){
for(int x = 1; x < res.cols - 1; x++){
sum = abs(a[y*res.cols+x]) + abs(b[y*res.cols+x]);
sum = sum > 255 ? 255:sum;
sum = sum < 0 ? 0 : sum;
res.at<uchar>(y,x) = b[y*res.cols+x];
//res.at<uchar>(y,x) = sum; //Esto es lo que debería funcionar pero por alguna razón saca la pantalla negra
}
}
cudaEventRecord( cpuF, 0 );
cudaEventSynchronize( cpuF );
cudaEventElapsedTime( &cpuT, cpuI, cpuF);
return cpuT;
}
int main(int argc, char *argv[])
{
string imagePath;
if(argc < 2)
imagePath = "megaPixel.jpg";
//imagePath = "space-wallpaper_2880x1800.jpg";
else
imagePath = argv[1];
//Read input image from the disk in greyscale
Mat input = imread(imagePath, CV_LOAD_IMAGE_GRAYSCALE);
if (input.empty())
{
cout << "Image Not Found!" << std::endl;
cin.get();
return -1;
}
/*Se inicializan los apuntadores de las matrices*/
int *resX, *resY;
resX = (int*) malloc(input.rows*input.cols*sizeof(int));
resY = (int*) malloc(input.rows*input.cols*sizeof(int));
/*Matrices utilizadas para Prewitt*/
int arregloX[9] = {-1,0,1,-1,0,1,-1,0,1};
int arregloY[9] = {-1,-1,-1,0,0,0,1,1,1};
float tiempo, tiempo2, tiempo3;
tiempo= convolucionCPU1( arregloX, input , resX, input.rows, input.cols );
tiempo2= convolucionCPU1( arregloY, input , resY, input.rows, input.cols );
Mat final;
final = input.clone(); //hagamos un clon
for(int y = 0; y < input.rows; y++) //recorramos las filas
for(int x = 0; x < input.cols; x++) //recorramos las columnas
final.at<uchar>(y,x) = 0.0; //punto inicial
tiempo3 = combinar(resX, resY, final);
printf("Tiempo: %f \n", tiempo + tiempo2 + tiempo3);
//Show the input and output
namedWindow("Input", WINDOW_NORMAL);
imshow("Input", input);
namedWindow("Output", WINDOW_NORMAL);
imshow("Output", final);
//Wait for key press
waitKey();
return 0;
}
|
98d505933059af359ed3eaf1b3460afc19434b26.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatrixAddition.h"
#include "GPUErrors.h"
//GPU kernel with 2D Grid and 1D block Arrangement
__global__ void MatrixAddition2DG1DB(float* g_A, float* g_B, float* g_C, const int ny, const int nx)
{
int ix = threadIdx.x + (blockIdx.x * blockDim.x);
int iy = blockDim.y;
int idx = iy * nx + ix;
if (ix < nx && iy < ny)
{
g_C[idx] = g_A[idx] + g_C[idx];
}
}
//GPU Host Function
__host__ void MatrixAdditionOnGPU2DG1DB(float* h_A, float* h_B, float* h_C, float* ref, const int ny, const int nx)
{
float* d_A, * d_B, * d_C;
const int MatrixSizeInBytes = ny * nx * sizeof(float);
hipEvent_t kernel_start;
hipEvent_t kernel_stop;
float fElapsedTime;
HandleCUDAError(hipEventCreate(&kernel_start));
HandleCUDAError(hipEventCreate(&kernel_stop));
//Allocate device memory on the global memory
HandleCUDAError(hipMalloc((void**)&d_A, MatrixSizeInBytes));
HandleCUDAError(hipMalloc((void**)&d_B, MatrixSizeInBytes));
HandleCUDAError(hipMalloc((void**)&d_C, MatrixSizeInBytes));
//transfer data from CPU Memory to GPU Memory
chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
HandleCUDAError(hipMemcpy(d_A, h_A, MatrixSizeInBytes, hipMemcpyHostToDevice));
HandleCUDAError(hipMemcpy(d_B, h_B, MatrixSizeInBytes, hipMemcpyHostToDevice));
end = std::chrono::system_clock::now();
std::chrono::duration<double> elasped_seconds = end - start;
cout << "Memory Copy - HostToDevice: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
// Kernel Invoke Parameters - 2D Grid and 1D Blocks
int dimx = 256;
dim3 block(dimx);
dim3 grid((nx + block.x - 1) / block.x, ny);
cout << "2D Grid Dimension" << endl;
cout << "\tNumber of Block along X dimension: " << grid.x << endl;
cout << "\tNumber of Block along Y dimension: " << grid.y << endl;
cout << "1D Block Dimension" << endl;
cout << "\tNumber of threads along X dimension: " << block.x << endl;
cout << "\tNumber of threads along Y dimension: " << block.y << endl;
HandleCUDAError(hipEventRecord(kernel_start));
MatrixAddition2DG1DB << <grid, block >> > (d_A, d_B, d_C, nx, ny);
HandleCUDAError(hipEventRecord(kernel_stop));
HandleCUDAError(hipEventSynchronize(kernel_stop));
GetCUDARunTimeError();
HandleCUDAError(hipEventElapsedTime(&fElapsedTime, kernel_start, kernel_stop));
start = std::chrono::system_clock::now();
HandleCUDAError(hipMemcpy(h_C, d_C, MatrixSizeInBytes, hipMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
elasped_seconds = end - start;
MatrixAdditionVerification(ref, h_C, ny, nx);
cout << "Memory Copy - DeviceToHost: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
cout << "2DG1DB Elapsed Time (GPU) = " << fElapsedTime << " msecs" << endl;
HandleCUDAError(hipFree(d_A));
HandleCUDAError(hipFree(d_B));
HandleCUDAError(hipFree(d_C));
HandleCUDAError(hipEventDestroy(kernel_start));
HandleCUDAError(hipEventDestroy(kernel_stop));
HandleCUDAError(hipDeviceReset());
} | 98d505933059af359ed3eaf1b3460afc19434b26.cu | #include "MatrixAddition.h"
#include "GPUErrors.h"
//GPU kernel with 2D Grid and 1D block Arrangement
__global__ void MatrixAddition2DG1DB(float* g_A, float* g_B, float* g_C, const int ny, const int nx)
{
int ix = threadIdx.x + (blockIdx.x * blockDim.x);
int iy = blockDim.y;
int idx = iy * nx + ix;
if (ix < nx && iy < ny)
{
g_C[idx] = g_A[idx] + g_C[idx];
}
}
//GPU Host Function
__host__ void MatrixAdditionOnGPU2DG1DB(float* h_A, float* h_B, float* h_C, float* ref, const int ny, const int nx)
{
float* d_A, * d_B, * d_C;
const int MatrixSizeInBytes = ny * nx * sizeof(float);
cudaEvent_t kernel_start;
cudaEvent_t kernel_stop;
float fElapsedTime;
HandleCUDAError(cudaEventCreate(&kernel_start));
HandleCUDAError(cudaEventCreate(&kernel_stop));
//Allocate device memory on the global memory
HandleCUDAError(cudaMalloc((void**)&d_A, MatrixSizeInBytes));
HandleCUDAError(cudaMalloc((void**)&d_B, MatrixSizeInBytes));
HandleCUDAError(cudaMalloc((void**)&d_C, MatrixSizeInBytes));
//transfer data from CPU Memory to GPU Memory
chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
HandleCUDAError(cudaMemcpy(d_A, h_A, MatrixSizeInBytes, cudaMemcpyHostToDevice));
HandleCUDAError(cudaMemcpy(d_B, h_B, MatrixSizeInBytes, cudaMemcpyHostToDevice));
end = std::chrono::system_clock::now();
std::chrono::duration<double> elasped_seconds = end - start;
cout << "Memory Copy - HostToDevice: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
// Kernel Invoke Parameters - 2D Grid and 1D Blocks
int dimx = 256;
dim3 block(dimx);
dim3 grid((nx + block.x - 1) / block.x, ny);
cout << "2D Grid Dimension" << endl;
cout << "\tNumber of Block along X dimension: " << grid.x << endl;
cout << "\tNumber of Block along Y dimension: " << grid.y << endl;
cout << "1D Block Dimension" << endl;
cout << "\tNumber of threads along X dimension: " << block.x << endl;
cout << "\tNumber of threads along Y dimension: " << block.y << endl;
HandleCUDAError(cudaEventRecord(kernel_start));
MatrixAddition2DG1DB << <grid, block >> > (d_A, d_B, d_C, nx, ny);
HandleCUDAError(cudaEventRecord(kernel_stop));
HandleCUDAError(cudaEventSynchronize(kernel_stop));
GetCUDARunTimeError();
HandleCUDAError(cudaEventElapsedTime(&fElapsedTime, kernel_start, kernel_stop));
start = std::chrono::system_clock::now();
HandleCUDAError(cudaMemcpy(h_C, d_C, MatrixSizeInBytes, cudaMemcpyDeviceToHost));
end = std::chrono::system_clock::now();
elasped_seconds = end - start;
MatrixAdditionVerification(ref, h_C, ny, nx);
cout << "Memory Copy - DeviceToHost: " << (elasped_seconds.count() * 1000.0f) << " msecs" << endl;
cout << "2DG1DB Elapsed Time (GPU) = " << fElapsedTime << " msecs" << endl;
HandleCUDAError(cudaFree(d_A));
HandleCUDAError(cudaFree(d_B));
HandleCUDAError(cudaFree(d_C));
HandleCUDAError(cudaEventDestroy(kernel_start));
HandleCUDAError(cudaEventDestroy(kernel_stop));
HandleCUDAError(cudaDeviceReset());
} |
6c1b4ab44ca8fd4257140946480eb92dc0fcabd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Version 20180514-01: Eliminated flaton and AD damping. Eliminated option to turn off back-reaction.
// Libraries
#include <fstream>
#include <string>
#include <iostream>
#include <chrono>
#include <common/select_GPU.cu>
#include <common/complex.cu>
#include <1D/parameters.hpp>
using namespace std;
// field macros
#define phi d_Phi[0][x]
#define hu d_Phi[1][x]
#define hd d_Phi[2][x]
#define l d_Phi[3][x]
#define d d_Phi[4][x]
////////////////////////////////////////////////////////////////////////////////////////////////////
// Functions
////////////////////////////////////////////////////////////////////////////////////////////////////
// read initial data from input files
void Read(Complex<double> Phi[F][X], Complex<double> PhiDot[F][X], string inputPhiStr, string inputPhiDotStr, double ndt) {
// Variables
float n;
uint32_t x;
Complex<float> tmp[F];
// read Phi
ifstream inputPhi(inputPhiStr, std::ios::binary);
while (inputPhi.read( reinterpret_cast<char*>(&n), sizeof(n))) {
if (n != ndt) {
inputPhi.seekg((sizeof(x) + sizeof(tmp)), std::ios::cur);
continue;
}
inputPhi.read( reinterpret_cast<char*>(&x), sizeof(x));
inputPhi.read( reinterpret_cast<char*>(&tmp), sizeof(tmp));
for (int f = 0; f < F; f++)
Phi[f][x] = Complex<double>{tmp[f].x, tmp[f].y};
}
inputPhi.close();
// read PhiDot
ifstream inputPhiDot(inputPhiDotStr, std::ios::binary);
while (inputPhiDot.read( reinterpret_cast<char*>(&n), sizeof(n))) {
if (n != ndt) {
inputPhiDot.seekg( (sizeof(x) + sizeof(tmp)), std::ios::beg);
continue;
}
inputPhiDot.read( reinterpret_cast<char*>(&x), sizeof(x));
inputPhiDot.read( reinterpret_cast<char*>(&tmp), sizeof(tmp));
for (int f = 0; f < F; f++)
PhiDot[f][x] = Complex<double>{tmp[f].x, tmp[f].y};
}
inputPhiDot.close();
}
// Calculate a single timestep of Phi
__global__ void StepPhi(Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X], double dt)
{
const uint2 index = {blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y};
const uint2 stride = {blockDim.x * gridDim.x, blockDim.y * gridDim.y};
for (int x = index.x; x<X; x += stride.x) {
for (int f = index.y; f<F; f += stride.y) {
d_Phi[f][x] = d_Phi[f][x] + dt * d_PhiDot[f][x];
}
}
}
// Calculate a single timestep of PhiDot
__global__ void StepPhiDot(Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X], int n, double dt)
{
const uint index = {blockIdx.x * blockDim.x + threadIdx.x};
const uint stride = {blockDim.x * gridDim.x};
// charges
const double Q[13] = {0,1,-1,-1,0.5,0,0,0,0,0,0,0,0}; // phi,h_u,h_d,l,d,8xg
// dependent parameters
const Complex<double> Amu = Complex<double>{ modAmu*cosf(argAmu), modAmu*sinf(argAmu)};
const Complex<double> Anu = Complex<double>{ modAnu*cosf(argAnu), modAnu*sinf(argAnu)};
const Complex<double> Ad = Complex<double>{ modAd*cosf(argAd), modAd*sinf(argAd)};
const double kmu = mu; // phi_0 = 1
const double knu = (( sqrt(modsq(Anu)) + sqrt( modsq(Anu) - 6.0*(mLsq+mHusq) ) ) / 6.0); // l_0 = 1
// evolution variables
const double Tphi = Tphi_i * exp(-GammaTphi*n*time_step);
Complex<double> d2Phidx2[F];
double Tsq;
Complex<double> dVdPhi[F];
double PhiQQPhi;
double PhiQQQPhi;
Complex<double> PhiQPhidot;
Complex<double> PhiQd2Phidx2;
Complex<double> PhiQdVdPhi;
Complex<double> pi[F];
double piQpi;
Complex<double> PhiQQpi;
double Pi;
for (int x=index; x<X; x+=stride) {
// evaluate Laplacian
for (short int f=0; f<13; f++) {
d2Phidx2[f] = ( d_Phi[f][(x+1)%X] - 2*d_Phi[f][x] + d_Phi[f][(x+X-1)%X] ) / (dx*dx);
}
/* potential
V = V0 - Tphi*Tphi*Tphi*Tphi * (m0*m0/(phi0*phi0)) * exp(-conj(phi)*phi*phi0*phi0/(Tphi*Tphi*m0*m0)) + 0.5*alpha*mphisq * ( log(conj(phi)*phi+mssq) - 1 ) * conj(phi)*phi + mHusq*conj(hu)*hu + mHdsq*conj(hd)*hd + mLsq*conj(l)*l + mdsq*conj(d)*d - Amu*kmu*phi*phi*hu*hd + 0.5*Anu*knu*l*l*hu*hu - 0.5*Ad*ld*hd*d*d - conj(Amu*kmu*phi*phi*hu*hd) + conj(0.5*Anu*knu*l*l*hu*hu) - conj(0.5*Ad*ld*hd*d*d) + conj(kmu*phi*phi*hu+0.5*ld*d*d)*(kmu*phi*phi*hu+0.5*ld*d*d) + conj(kmu*phi*phi*hd-knu*l*l*hu)*(kmu*phi*phi*hd-knu*l*l*hu) + conj(knu*l*hu*hu)*(knu*l*hu*hu) + conj(ld*hd*d)*(ld*hd*d) + conj(ldg*g*d)*(ldg*g*d) + conj(g*g)*(g*g);
*/
// evaluate potential derivatives
Tsq = 0;
for (short int f=5; f<13; f++) {
Tsq += modsq(d_Phi[f][x]);
}
dVdPhi[0] = Tphi * Tphi * phi * exp(-modsq(phi)*phi0*phi0/(Tphi*Tphi*m0*m0)) + 0.5*alpha*mphisq * ( log(modsq(phi)+mssq) - mssq/(modsq(phi)+mssq) ) * phi + 2.0*(l0/phi0)*(l0/phi0) * ( 0.0 - conj(Amu*kmu*phi*hu*hd) + conj(kmu*phi*hu)*(kmu*phi*phi*hu+0.5*ld*d*d) + conj(kmu*phi*hd)*(kmu*phi*phi*hd-knu*l*l*hu) );
dVdPhi[1] = mHusq*hu - conj(Amu*kmu*phi*phi*hd) + conj(Anu*knu*l*l*hu) + conj(kmu*phi*phi)*(kmu*phi*phi*hu+0.5*ld*d*d) - conj(knu*l*l)*(kmu*phi*phi*hd-knu*l*l*hu) + 2.0*conj(knu*l*hu)*(knu*l*hu*hu);
dVdPhi[2] = mHdsq*hd - conj(Amu*kmu*phi*phi*hu) - 0.5*conj(Ad*ld*d*d) + conj(kmu*phi*phi)*(kmu*phi*phi*hd-knu*l*l*hu) + conj(ld*d)*ld*hd*d;
dVdPhi[3] = mLsq*l + conj(Anu*knu*l*hu*hu) - 2.0*conj(knu*l*hu)*(kmu*phi*phi*hd-knu*l*l*hu) + conj(knu*hu*hu)*knu*l*hu*hu;
dVdPhi[4] = mdsq*d - conj(Ad*ld*hd*d) + conj(ld*d)*(kmu*phi*phi*hu+0.5*ld*d*d) + conj(ld*hd)*ld*hd*d + ldg*ldg*Tsq*d;
for (short int f=5;f<13;f++) {
dVdPhi[f] = conj(ldg*(l0*d/m0))*ldg*(l0*d/m0)*d_Phi[f][x] + conj(d_Phi[f][x])*d_Phi[f][x]*d_Phi[f][x];
}
// evaluate evolution variables
PhiQQPhi = 0;
for (short int f=1; f<5; f++) {
PhiQQPhi += Q[f] * Q[f] * modsq(d_Phi[f][x]);
}
PhiQQQPhi = 0;
for (short int f=1; f<5; f++) {
PhiQQQPhi += Q[f] * Q[f] * Q[f] * modsq(d_Phi[f][x]);
}
PhiQPhidot = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQPhidot = PhiQPhidot + conj(d_Phi[f][x]) * Q[f] * d_PhiDot[f][x];
}
PhiQd2Phidx2 = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQd2Phidx2 = PhiQd2Phidx2 + conj(d_Phi[f][x]) * Q[f] * d2Phidx2[f];
}
PhiQdVdPhi = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQdVdPhi = PhiQdVdPhi + conj(d_Phi[f][x]) * Q[f] * dVdPhi[f];
}
for (short int f=1; f<5; f++) {
pi[f] = d_PhiDot[f][x] + dt * d2Phidx2[f] - dt * dVdPhi[f] - ( ( PhiQPhidot + dt * PhiQd2Phidx2 - dt * PhiQdVdPhi ) / PhiQQPhi ) * Q[f] * d_Phi[f][x];
}
piQpi = 0;
for (short int f=1; f<5; f++) {
piQpi += Q[f] * modsq(pi[f]);
}
PhiQQpi = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQQpi = PhiQQpi + conj(d_Phi[f][x]) * Q[f] * Q[f] * pi[f];
}
Pi = ( 2.0 * piQpi ) / ( PhiQQPhi + dt * PhiQQpi.x + sqrt( ( PhiQQPhi + dt * PhiQQpi.x ) * ( PhiQQPhi + dt * PhiQQpi.x ) - dt * dt * piQpi * PhiQQQPhi ) );
// step
d_PhiDot[0][x] = d_PhiDot[0][x] + dt * d2Phidx2[0] - dt * dVdPhi[0];
for (short int f=1; f<5; f++) {
d_PhiDot[f][x] = pi[f] - 0.5 * dt * Pi * Q[f] * d_Phi[f][x];
}
for (short int f=5; f<13; f++) {
d_PhiDot[f][x] = d_PhiDot[f][x] + dt * d2Phidx2[f] - dt * dVdPhi[f];
}
}
}
// write continuous data to output file
void print(Complex<double> Phi[F][X], Complex<double> PhiDot[F][X], float ndt, ofstream &PhiFile, ofstream &PhiDotFile) {
Complex<float> tmp[F];
for (uint32_t x=0; x<X; x++) {
PhiFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) );
PhiFile.write( reinterpret_cast<const char*>(&x), sizeof(x) );
for (int f=0; f<F; f++) {
tmp[f].x = Phi[f][x].x;
tmp[f].y = Phi[f][x].y;
}
PhiFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) );
}
for (uint32_t x=0; x<X; x++) {
PhiDotFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) );
PhiDotFile.write( reinterpret_cast<const char*>(&x), sizeof(x) );
for (int f=0; f<F; f++) {
tmp[f].x = PhiDot[f][x].x;
tmp[f].y = PhiDot[f][x].y;
}
PhiDotFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) );
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//Main Loop
////////////////////////////////////////////////////////////////////////////////////////////////////
Complex<double> Phi[F][X];
Complex<double> PhiDot[F][X];
int main () {
/////////////
//Variables//
/////////////
chrono::high_resolution_clock::time_point start = chrono::high_resolution_clock::now();
cout << "\n////////////////////////////////////////////////////////////////////////////////\n\n";
SelectDevice();
string inputPhi, inputPhiDot, fileName;
fileName = "Phi.bin";
ofstream PhiFile(fileName, ios::binary);
fileName = "PhiDot.bin";
ofstream PhiDotFile(fileName, ios::binary);
Complex<double> (*d_Phi)[X];
Complex<double> (*d_PhiDot)[X];
hipMalloc(&d_Phi, (F*X)*sizeof(Complex<double>));
hipMalloc(&d_PhiDot, (F*X)*sizeof(Complex<double>));
const dim3 blockL = {1024};
const dim3 gridL = {(X+blockL.x-1)/blockL.x};
const dim3 blockS = {256};
const dim3 gridS = {(X+blockS.x-1)/blockS.x};
hipError_t __err;
__err = hipGetLastError();
if (__err != hipSuccess) {
cout << "\nFailed to create variables" << endl;
cout << hipGetErrorString(__err) << endl;
return -1;
}
//////////////////
//Initialization//
//////////////////
cout << "Would you like to use " << "initialPhi.txt" << " and " << "initialPhiDot.txt" << "?\n";
cout << "(Y)es/(N)o:";
//cin >> inputPhi;
inputPhi = "y";
if (inputPhi == "Y" || inputPhi == "y" || inputPhi == "Yes" || inputPhi == "yes") {
inputPhi = "initialPhi.bin";
inputPhiDot = "initialPhiDot.bin";
} else {
cout << "\nEnter the name of the input file for Phi:";
cin >> inputPhi;
cout << "\nEnter the name of the input file for PhiDot:";
cin >> inputPhiDot;
}
cout << "\n=============================================\n";
cout << "|| ||\n";
Read(Phi, PhiDot, inputPhi, inputPhiDot, N_i*time_step);
print(Phi, PhiDot, 0*time_step, PhiFile, PhiDotFile);
cout << "|| Finished reading files ||\n";
hipMemcpy(d_Phi, Phi, (F*X)*sizeof(Complex<double>), hipMemcpyHostToDevice);
hipMemcpy(d_PhiDot, PhiDot, (F*X)*sizeof(Complex<double>), hipMemcpyHostToDevice);
__err = hipGetLastError();
if (__err != hipSuccess) {
cout << "|| ||\n";
cout << "|| Failed to initialize ||" << endl;
cout << "|| " << setw(40) << left << hipGetErrorString(__err) << "||" << endl;
cout << "=============================================\n";
return -1;
}
cout << "|| Starting Simulation ||" << endl;
///////////////
//Calculation//
///////////////
for (int n=N_i; n<N_f; n+=S) {
hipLaunchKernelGGL(( StepPhiDot), dim3(gridS), dim3(blockS), 0, 0, d_Phi, d_PhiDot, n, time_step/2);
hipDeviceSynchronize();
for (int s = 1; s < S; s++) {
hipLaunchKernelGGL(( StepPhi), dim3(gridL), dim3(blockL), 0, 0, d_Phi, d_PhiDot, time_step);
hipDeviceSynchronize();
hipLaunchKernelGGL(( StepPhiDot), dim3(gridS), dim3(blockS), 0, 0, d_Phi, d_PhiDot, n+s, time_step);
hipDeviceSynchronize();
}
hipLaunchKernelGGL(( StepPhi), dim3(gridL), dim3(blockL), 0, 0, d_Phi, d_PhiDot, time_step);
hipDeviceSynchronize();
hipLaunchKernelGGL(( StepPhiDot), dim3(gridS), dim3(blockS), 0, 0, d_Phi, d_PhiDot, n+S, time_step/2);
hipDeviceSynchronize();
// Print continuous data
hipMemcpy(Phi, d_Phi, (F*X)*sizeof(Complex<double>), hipMemcpyDeviceToHost);
hipMemcpy(PhiDot, d_PhiDot, (F*X)*sizeof(Complex<double>), hipMemcpyDeviceToHost);
print(Phi, PhiDot, (n+S)*time_step, PhiFile, PhiDotFile);
cout << "|| Completed timestep " << setw(20) << left << n+S << " ||" << endl;
}
PhiFile.close();
PhiDotFile.close();
///////////
//Cleanup//
///////////
cout << "|| ||";
hipFree(d_Phi);
hipFree(d_PhiDot);
__err = hipGetLastError();
if (__err == hipSuccess) {
cout << "\n|| Everything seems fine :D ||" << endl;
} else {
cout << "\n|| Something is wrong... :( ||" << endl;
cout << "|| " << setw(40) << left << hipGetErrorString(__err) << "||" << endl;
}
cout << "|| ||";
cout << "\n=============================================\n";
chrono::high_resolution_clock::time_point end = chrono::high_resolution_clock::now();
cout << "\nProgram took " << chrono::duration_cast<chrono::seconds>(end - start).count() << " seconds.\n";
cout << "\n\n////////////////////////////////////////////////////////////////////////////////\n" << endl;
}
| 6c1b4ab44ca8fd4257140946480eb92dc0fcabd4.cu | // Version 20180514-01: Eliminated flaton and AD damping. Eliminated option to turn off back-reaction.
// Libraries
#include <fstream>
#include <string>
#include <iostream>
#include <chrono>
#include <common/select_GPU.cu>
#include <common/complex.cu>
#include <1D/parameters.hpp>
using namespace std;
// field macros
#define phi d_Phi[0][x]
#define hu d_Phi[1][x]
#define hd d_Phi[2][x]
#define l d_Phi[3][x]
#define d d_Phi[4][x]
////////////////////////////////////////////////////////////////////////////////////////////////////
// Functions
////////////////////////////////////////////////////////////////////////////////////////////////////
// read initial data from input files
void Read(Complex<double> Phi[F][X], Complex<double> PhiDot[F][X], string inputPhiStr, string inputPhiDotStr, double ndt) {
// Variables
float n;
uint32_t x;
Complex<float> tmp[F];
// read Phi
ifstream inputPhi(inputPhiStr, std::ios::binary);
while (inputPhi.read( reinterpret_cast<char*>(&n), sizeof(n))) {
if (n != ndt) {
inputPhi.seekg((sizeof(x) + sizeof(tmp)), std::ios::cur);
continue;
}
inputPhi.read( reinterpret_cast<char*>(&x), sizeof(x));
inputPhi.read( reinterpret_cast<char*>(&tmp), sizeof(tmp));
for (int f = 0; f < F; f++)
Phi[f][x] = Complex<double>{tmp[f].x, tmp[f].y};
}
inputPhi.close();
// read PhiDot
ifstream inputPhiDot(inputPhiDotStr, std::ios::binary);
while (inputPhiDot.read( reinterpret_cast<char*>(&n), sizeof(n))) {
if (n != ndt) {
inputPhiDot.seekg( (sizeof(x) + sizeof(tmp)), std::ios::beg);
continue;
}
inputPhiDot.read( reinterpret_cast<char*>(&x), sizeof(x));
inputPhiDot.read( reinterpret_cast<char*>(&tmp), sizeof(tmp));
for (int f = 0; f < F; f++)
PhiDot[f][x] = Complex<double>{tmp[f].x, tmp[f].y};
}
inputPhiDot.close();
}
// Calculate a single timestep of Phi
__global__ void StepPhi(Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X], double dt)
{
const uint2 index = {blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y};
const uint2 stride = {blockDim.x * gridDim.x, blockDim.y * gridDim.y};
for (int x = index.x; x<X; x += stride.x) {
for (int f = index.y; f<F; f += stride.y) {
d_Phi[f][x] = d_Phi[f][x] + dt * d_PhiDot[f][x];
}
}
}
// Calculate a single timestep of PhiDot
__global__ void StepPhiDot(Complex<double> d_Phi[F][X], Complex<double> d_PhiDot[F][X], int n, double dt)
{
const uint index = {blockIdx.x * blockDim.x + threadIdx.x};
const uint stride = {blockDim.x * gridDim.x};
// charges
const double Q[13] = {0,1,-1,-1,0.5,0,0,0,0,0,0,0,0}; // phi,h_u,h_d,l,d,8xg
// dependent parameters
const Complex<double> Amu = Complex<double>{ modAmu*cosf(argAmu), modAmu*sinf(argAmu)};
const Complex<double> Anu = Complex<double>{ modAnu*cosf(argAnu), modAnu*sinf(argAnu)};
const Complex<double> Ad = Complex<double>{ modAd*cosf(argAd), modAd*sinf(argAd)};
const double kmu = mu; // phi_0 = 1
const double knu = (( sqrt(modsq(Anu)) + sqrt( modsq(Anu) - 6.0*(mLsq+mHusq) ) ) / 6.0); // l_0 = 1
// evolution variables
const double Tphi = Tphi_i * exp(-GammaTphi*n*time_step);
Complex<double> d2Phidx2[F];
double Tsq;
Complex<double> dVdPhi[F];
double PhiQQPhi;
double PhiQQQPhi;
Complex<double> PhiQPhidot;
Complex<double> PhiQd2Phidx2;
Complex<double> PhiQdVdPhi;
Complex<double> pi[F];
double piQpi;
Complex<double> PhiQQpi;
double Pi;
for (int x=index; x<X; x+=stride) {
// evaluate Laplacian
for (short int f=0; f<13; f++) {
d2Phidx2[f] = ( d_Phi[f][(x+1)%X] - 2*d_Phi[f][x] + d_Phi[f][(x+X-1)%X] ) / (dx*dx);
}
/* potential
V = V0 - Tphi*Tphi*Tphi*Tphi * (m0*m0/(phi0*phi0)) * exp(-conj(phi)*phi*phi0*phi0/(Tphi*Tphi*m0*m0)) + 0.5*alpha*mphisq * ( log(conj(phi)*phi+mssq) - 1 ) * conj(phi)*phi + mHusq*conj(hu)*hu + mHdsq*conj(hd)*hd + mLsq*conj(l)*l + mdsq*conj(d)*d - Amu*kmu*phi*phi*hu*hd + 0.5*Anu*knu*l*l*hu*hu - 0.5*Ad*ld*hd*d*d - conj(Amu*kmu*phi*phi*hu*hd) + conj(0.5*Anu*knu*l*l*hu*hu) - conj(0.5*Ad*ld*hd*d*d) + conj(kmu*phi*phi*hu+0.5*ld*d*d)*(kmu*phi*phi*hu+0.5*ld*d*d) + conj(kmu*phi*phi*hd-knu*l*l*hu)*(kmu*phi*phi*hd-knu*l*l*hu) + conj(knu*l*hu*hu)*(knu*l*hu*hu) + conj(ld*hd*d)*(ld*hd*d) + conj(ldg*g*d)*(ldg*g*d) + conj(g*g)*(g*g);
*/
// evaluate potential derivatives
Tsq = 0;
for (short int f=5; f<13; f++) {
Tsq += modsq(d_Phi[f][x]);
}
dVdPhi[0] = Tphi * Tphi * phi * exp(-modsq(phi)*phi0*phi0/(Tphi*Tphi*m0*m0)) + 0.5*alpha*mphisq * ( log(modsq(phi)+mssq) - mssq/(modsq(phi)+mssq) ) * phi + 2.0*(l0/phi0)*(l0/phi0) * ( 0.0 - conj(Amu*kmu*phi*hu*hd) + conj(kmu*phi*hu)*(kmu*phi*phi*hu+0.5*ld*d*d) + conj(kmu*phi*hd)*(kmu*phi*phi*hd-knu*l*l*hu) );
dVdPhi[1] = mHusq*hu - conj(Amu*kmu*phi*phi*hd) + conj(Anu*knu*l*l*hu) + conj(kmu*phi*phi)*(kmu*phi*phi*hu+0.5*ld*d*d) - conj(knu*l*l)*(kmu*phi*phi*hd-knu*l*l*hu) + 2.0*conj(knu*l*hu)*(knu*l*hu*hu);
dVdPhi[2] = mHdsq*hd - conj(Amu*kmu*phi*phi*hu) - 0.5*conj(Ad*ld*d*d) + conj(kmu*phi*phi)*(kmu*phi*phi*hd-knu*l*l*hu) + conj(ld*d)*ld*hd*d;
dVdPhi[3] = mLsq*l + conj(Anu*knu*l*hu*hu) - 2.0*conj(knu*l*hu)*(kmu*phi*phi*hd-knu*l*l*hu) + conj(knu*hu*hu)*knu*l*hu*hu;
dVdPhi[4] = mdsq*d - conj(Ad*ld*hd*d) + conj(ld*d)*(kmu*phi*phi*hu+0.5*ld*d*d) + conj(ld*hd)*ld*hd*d + ldg*ldg*Tsq*d;
for (short int f=5;f<13;f++) {
dVdPhi[f] = conj(ldg*(l0*d/m0))*ldg*(l0*d/m0)*d_Phi[f][x] + conj(d_Phi[f][x])*d_Phi[f][x]*d_Phi[f][x];
}
// evaluate evolution variables
PhiQQPhi = 0;
for (short int f=1; f<5; f++) {
PhiQQPhi += Q[f] * Q[f] * modsq(d_Phi[f][x]);
}
PhiQQQPhi = 0;
for (short int f=1; f<5; f++) {
PhiQQQPhi += Q[f] * Q[f] * Q[f] * modsq(d_Phi[f][x]);
}
PhiQPhidot = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQPhidot = PhiQPhidot + conj(d_Phi[f][x]) * Q[f] * d_PhiDot[f][x];
}
PhiQd2Phidx2 = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQd2Phidx2 = PhiQd2Phidx2 + conj(d_Phi[f][x]) * Q[f] * d2Phidx2[f];
}
PhiQdVdPhi = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQdVdPhi = PhiQdVdPhi + conj(d_Phi[f][x]) * Q[f] * dVdPhi[f];
}
for (short int f=1; f<5; f++) {
pi[f] = d_PhiDot[f][x] + dt * d2Phidx2[f] - dt * dVdPhi[f] - ( ( PhiQPhidot + dt * PhiQd2Phidx2 - dt * PhiQdVdPhi ) / PhiQQPhi ) * Q[f] * d_Phi[f][x];
}
piQpi = 0;
for (short int f=1; f<5; f++) {
piQpi += Q[f] * modsq(pi[f]);
}
PhiQQpi = Complex<double>{0,0};
for (short int f=1; f<5; f++) {
PhiQQpi = PhiQQpi + conj(d_Phi[f][x]) * Q[f] * Q[f] * pi[f];
}
Pi = ( 2.0 * piQpi ) / ( PhiQQPhi + dt * PhiQQpi.x + sqrt( ( PhiQQPhi + dt * PhiQQpi.x ) * ( PhiQQPhi + dt * PhiQQpi.x ) - dt * dt * piQpi * PhiQQQPhi ) );
// step
d_PhiDot[0][x] = d_PhiDot[0][x] + dt * d2Phidx2[0] - dt * dVdPhi[0];
for (short int f=1; f<5; f++) {
d_PhiDot[f][x] = pi[f] - 0.5 * dt * Pi * Q[f] * d_Phi[f][x];
}
for (short int f=5; f<13; f++) {
d_PhiDot[f][x] = d_PhiDot[f][x] + dt * d2Phidx2[f] - dt * dVdPhi[f];
}
}
}
// write continuous data to output file
void print(Complex<double> Phi[F][X], Complex<double> PhiDot[F][X], float ndt, ofstream &PhiFile, ofstream &PhiDotFile) {
Complex<float> tmp[F];
for (uint32_t x=0; x<X; x++) {
PhiFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) );
PhiFile.write( reinterpret_cast<const char*>(&x), sizeof(x) );
for (int f=0; f<F; f++) {
tmp[f].x = Phi[f][x].x;
tmp[f].y = Phi[f][x].y;
}
PhiFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) );
}
for (uint32_t x=0; x<X; x++) {
PhiDotFile.write( reinterpret_cast<const char*>(&ndt), sizeof(ndt) );
PhiDotFile.write( reinterpret_cast<const char*>(&x), sizeof(x) );
for (int f=0; f<F; f++) {
tmp[f].x = PhiDot[f][x].x;
tmp[f].y = PhiDot[f][x].y;
}
PhiDotFile.write( reinterpret_cast<const char*>(&tmp), sizeof(tmp) );
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//Main Loop
////////////////////////////////////////////////////////////////////////////////////////////////////
Complex<double> Phi[F][X];
Complex<double> PhiDot[F][X];
int main () {
/////////////
//Variables//
/////////////
chrono::high_resolution_clock::time_point start = chrono::high_resolution_clock::now();
cout << "\n////////////////////////////////////////////////////////////////////////////////\n\n";
SelectDevice();
string inputPhi, inputPhiDot, fileName;
fileName = "Phi.bin";
ofstream PhiFile(fileName, ios::binary);
fileName = "PhiDot.bin";
ofstream PhiDotFile(fileName, ios::binary);
Complex<double> (*d_Phi)[X];
Complex<double> (*d_PhiDot)[X];
cudaMalloc(&d_Phi, (F*X)*sizeof(Complex<double>));
cudaMalloc(&d_PhiDot, (F*X)*sizeof(Complex<double>));
const dim3 blockL = {1024};
const dim3 gridL = {(X+blockL.x-1)/blockL.x};
const dim3 blockS = {256};
const dim3 gridS = {(X+blockS.x-1)/blockS.x};
cudaError_t __err;
__err = cudaGetLastError();
if (__err != cudaSuccess) {
cout << "\nFailed to create variables" << endl;
cout << cudaGetErrorString(__err) << endl;
return -1;
}
//////////////////
//Initialization//
//////////////////
cout << "Would you like to use " << "initialPhi.txt" << " and " << "initialPhiDot.txt" << "?\n";
cout << "(Y)es/(N)o:";
//cin >> inputPhi;
inputPhi = "y";
if (inputPhi == "Y" || inputPhi == "y" || inputPhi == "Yes" || inputPhi == "yes") {
inputPhi = "initialPhi.bin";
inputPhiDot = "initialPhiDot.bin";
} else {
cout << "\nEnter the name of the input file for Phi:";
cin >> inputPhi;
cout << "\nEnter the name of the input file for PhiDot:";
cin >> inputPhiDot;
}
cout << "\n=============================================\n";
cout << "|| ||\n";
Read(Phi, PhiDot, inputPhi, inputPhiDot, N_i*time_step);
print(Phi, PhiDot, 0*time_step, PhiFile, PhiDotFile);
cout << "|| Finished reading files ||\n";
cudaMemcpy(d_Phi, Phi, (F*X)*sizeof(Complex<double>), cudaMemcpyHostToDevice);
cudaMemcpy(d_PhiDot, PhiDot, (F*X)*sizeof(Complex<double>), cudaMemcpyHostToDevice);
__err = cudaGetLastError();
if (__err != cudaSuccess) {
cout << "|| ||\n";
cout << "|| Failed to initialize ||" << endl;
cout << "|| " << setw(40) << left << cudaGetErrorString(__err) << "||" << endl;
cout << "=============================================\n";
return -1;
}
cout << "|| Starting Simulation ||" << endl;
///////////////
//Calculation//
///////////////
for (int n=N_i; n<N_f; n+=S) {
StepPhiDot<<<gridS, blockS>>>(d_Phi, d_PhiDot, n, time_step/2);
cudaDeviceSynchronize();
for (int s = 1; s < S; s++) {
StepPhi<<<gridL, blockL>>>(d_Phi, d_PhiDot, time_step);
cudaDeviceSynchronize();
StepPhiDot<<<gridS, blockS>>>(d_Phi, d_PhiDot, n+s, time_step);
cudaDeviceSynchronize();
}
StepPhi<<<gridL, blockL>>>(d_Phi, d_PhiDot, time_step);
cudaDeviceSynchronize();
StepPhiDot<<<gridS, blockS>>>(d_Phi, d_PhiDot, n+S, time_step/2);
cudaDeviceSynchronize();
// Print continuous data
cudaMemcpy(Phi, d_Phi, (F*X)*sizeof(Complex<double>), cudaMemcpyDeviceToHost);
cudaMemcpy(PhiDot, d_PhiDot, (F*X)*sizeof(Complex<double>), cudaMemcpyDeviceToHost);
print(Phi, PhiDot, (n+S)*time_step, PhiFile, PhiDotFile);
cout << "|| Completed timestep " << setw(20) << left << n+S << " ||" << endl;
}
PhiFile.close();
PhiDotFile.close();
///////////
//Cleanup//
///////////
cout << "|| ||";
cudaFree(d_Phi);
cudaFree(d_PhiDot);
__err = cudaGetLastError();
if (__err == cudaSuccess) {
cout << "\n|| Everything seems fine :D ||" << endl;
} else {
cout << "\n|| Something is wrong... :( ||" << endl;
cout << "|| " << setw(40) << left << cudaGetErrorString(__err) << "||" << endl;
}
cout << "|| ||";
cout << "\n=============================================\n";
chrono::high_resolution_clock::time_point end = chrono::high_resolution_clock::now();
cout << "\nProgram took " << chrono::duration_cast<chrono::seconds>(end - start).count() << " seconds.\n";
cout << "\n\n////////////////////////////////////////////////////////////////////////////////\n" << endl;
}
|
e4dc9e27b0bb0001705a7fdff9cdbe1cc41db5d5.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include "hip/hip_runtime.h"
#include "rocblas.h"
#include "rocblas.h"
#include "../include/shared_functions.h"
#if defined (__DOUBLE__) && (CUARCH<20)
#define BLOCKSIZE 256
#define SHARED_SIZE_LIMIT 512
#else
#define BLOCKSIZE 512
#define SHARED_SIZE_LIMIT 1024
#endif
#define BLOCKSIZE_Q 128
#define DIMENTIONS 128
#define SINGLE_STREAM_BLOCKS 4
#define FORCE_SINGLE_STREAM -5
#ifndef MEMTEST
__device__ void Comparator(knntype& keyA, knntype& valA, knntype& keyB, knntype& valB, int dir){
knntype t;
if( (keyA > keyB) == dir ){
t = keyA; keyA = keyB; keyB = t;
t = valA; valA = valB; valB = t;
}
}
__device__ void Comparator_elim(knntype& keyA, knntype& valA, knntype& keyB, knntype& valB, int dir){
if( (keyA > keyB) == dir ){
keyA = keyB;
valA = valB;
}
}
__global__ void bitonic_shared(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, int arrayLength, int objects, int queries, uint dir, int k, int qk, int idOffset){
__shared__ knntype s_key[SHARED_SIZE_LIMIT];
__shared__ knntype s_val[SHARED_SIZE_LIMIT];
int tid = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
SrcKey += blockIdx.y*arrayLength;
SrcVal += blockIdx.y*arrayLength;
DstKey += blockIdx.y*arrayLength;
DstVal += blockIdx.y*arrayLength;
knntype* SrcKey_ptr = SrcKey;
knntype* SrcVal_ptr = SrcVal;
knntype* DstKey_ptr = DstKey;
knntype* DstVal_ptr = DstVal;
SrcKey_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
SrcVal_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
DstKey_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
DstVal_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = (tid<objects) ? SrcKey_ptr[0] : FLT_MAX;
s_val[threadIdx.x + 0] = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x + idOffset;
s_key[threadIdx.x + (SHARED_SIZE_LIMIT >> 1)] = ((tid + (SHARED_SIZE_LIMIT>>1))<objects) ? SrcKey_ptr[(SHARED_SIZE_LIMIT >> 1)] : FLT_MAX;
s_val[threadIdx.x + (SHARED_SIZE_LIMIT >> 1)] = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x + (SHARED_SIZE_LIMIT >> 1) + idOffset;
__syncthreads();
uint ddd;
uint pos;
//SHARED_SIZE_LIMIT
for(uint size = 2; size <= k; size <<= 1){
//Bitonic merge
ddd = dir^(threadIdx.x & (size >> 1)) != 0;
for(uint stride = size >> 1; stride > 0; stride >>= 1){
__syncthreads();
pos = (threadIdx.x << 1) - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd);
}
}
for(int obj = SHARED_SIZE_LIMIT >> 1; obj >= k; obj >>= 1){
__syncthreads();
// End of first part
int bi = threadIdx.x >> qk;
int li = threadIdx.x & (k-1);
int pb = (obj >> qk) + ((bi + 1) & ((obj >> qk)-1));
int prt = (pb << qk) + li;
if(threadIdx.x<obj){
Comparator_elim(s_key[threadIdx.x], s_val[threadIdx.x], s_key[prt], s_val[prt], 1);
uint size = k;
ddd = dir ^ ( (threadIdx.x & (size >> 1)) != 0 );
for(int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd);
}
}
}
__syncthreads();
if(threadIdx.x < k){
DstKey[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_key[threadIdx.x] : s_key[k-threadIdx.x-1];
DstVal[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_val[threadIdx.x] : s_val[k-threadIdx.x-1];
}
}
__global__ void bitonic_shared2(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, int arrayLength, int objects, int queries, uint dir, int k, int qk){
__shared__ knntype s_key[SHARED_SIZE_LIMIT];
__shared__ knntype s_val[SHARED_SIZE_LIMIT];
int SIZE_LIMIT = blockDim.x << 1;
int tid = blockIdx.x * SIZE_LIMIT + threadIdx.x;
SrcKey += blockIdx.y*arrayLength;
SrcVal += blockIdx.y*arrayLength;
DstKey += blockIdx.y*arrayLength;
DstVal += blockIdx.y*arrayLength;
knntype* SrcKey_ptr = SrcKey;
knntype* SrcVal_ptr = SrcVal;
knntype* DstKey_ptr = DstKey;
knntype* DstVal_ptr = DstVal;
SrcKey_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
SrcVal_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
DstKey_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
DstVal_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = (tid<objects) ? SrcKey_ptr[0] : FLT_MAX;
s_val[threadIdx.x + 0] = (tid<objects) ? SrcVal_ptr[0] : FLT_MAX;
s_key[threadIdx.x + (SIZE_LIMIT >> 1)] = ((tid+(SIZE_LIMIT>>1))<objects) ? SrcKey_ptr[(SIZE_LIMIT >> 1)] : FLT_MAX;
s_val[threadIdx.x + (SIZE_LIMIT >> 1)] = ((tid+(SIZE_LIMIT>>1))<objects) ? SrcVal_ptr[(SIZE_LIMIT >> 1)] : FLT_MAX;
__syncthreads();
uint ddd;
uint pos;
for(int obj = SIZE_LIMIT >> 1; obj >= k; obj >>= 1){
__syncthreads();
// End of first part
int bi = threadIdx.x >> qk;
int li = threadIdx.x & (k-1);
int pb = (obj >> qk) + ((bi + 1) & ((obj >> qk)-1));
int prt = (pb << qk) + li;
if(threadIdx.x<obj){
Comparator_elim(s_key[threadIdx.x], s_val[threadIdx.x], s_key[prt], s_val[prt], 1);
uint size = k;
ddd = dir ^ ( (threadIdx.x & (size >> 1)) != 0 );
for(int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd);
}
}
}
__syncthreads();
if(threadIdx.x < k){
DstKey[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_key[threadIdx.x] : s_key[k-threadIdx.x-1];
DstVal[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_val[threadIdx.x] : s_val[k-threadIdx.x-1];
}
}
__global__ void relloc(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, int objects, int k){
DstKey += blockIdx.x * k + threadIdx.x;
DstVal += blockIdx.x * k + threadIdx.x;
SrcKey += blockIdx.x * objects + threadIdx.x;
SrcVal += blockIdx.x * objects + threadIdx.x;
if(threadIdx.x < k){
DstKey[0] = SrcKey[0];
DstVal[0] = SrcVal[0];
}
}
void BitonicSelect(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, knntype *buffkey, knntype *buffval, int objects, int queries, int k, int qk, hipStream_t str, int streamId){
int numObjects = objects;
knntype *tmpkey1 = SrcKey;
knntype *tmpkey2 = buffkey;
knntype *tmpval1 = SrcVal;
knntype *tmpval2 = buffval;
knntype *tmpV;
knntype *tmpK;
dim3 threads(SHARED_SIZE_LIMIT / 2, 1);
int grd = (objects & (SHARED_SIZE_LIMIT-1)) ? objects / SHARED_SIZE_LIMIT + 1 : objects / SHARED_SIZE_LIMIT;
dim3 grid(grd, queries);
int idOffset = streamId*objects;
hipLaunchKernelGGL(( bitonic_shared), dim3(grid), dim3(threads), 0, str, buffkey, buffval, SrcKey, SrcVal, numObjects, objects, queries, 1, k, qk, idOffset);
objects = grd*k;
int robjects = objects;
objects = (objects & (SHARED_SIZE_LIMIT-1)) ? (objects / SHARED_SIZE_LIMIT + 1)*SHARED_SIZE_LIMIT : (objects / SHARED_SIZE_LIMIT)*SHARED_SIZE_LIMIT;
while(robjects > k){
int blockSize = SHARED_SIZE_LIMIT<objects ? SHARED_SIZE_LIMIT : objects;
dim3 threadsp(blockSize/2, 1);
dim3 gridp(objects / blockSize, queries);
hipLaunchKernelGGL(( bitonic_shared2), dim3(gridp), dim3(threadsp), 0, str, tmpkey1, tmpval1, tmpkey2, tmpval2, numObjects, robjects, queries, 1, k, qk);
tmpK = tmpkey1; tmpkey1 = tmpkey2; tmpkey2 = tmpK;
tmpV = tmpval1; tmpval1 = tmpval2; tmpval2 = tmpV;
objects = k*(objects / blockSize);
robjects = objects;
objects = (objects & (SHARED_SIZE_LIMIT-1)) ? (objects / SHARED_SIZE_LIMIT + 1)*SHARED_SIZE_LIMIT : (objects / SHARED_SIZE_LIMIT)*SHARED_SIZE_LIMIT;
}
dim3 threads_relloc(k, 1);
dim3 grid_relloc(queries,1);
hipLaunchKernelGGL(( relloc), dim3(grid_relloc), dim3(threads_relloc), 0, str, DstKey, DstVal, tmpkey2, tmpval2, numObjects, k);
}
__global__ void initialize_index_B(knntype* data, int objects, int numQueries){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid<objects){
#pragma unroll 2
for(int i=0; i<numQueries; i++){
data[i*objects + tid] = tid;
}
}
}
/* Test function test function currently no used */
extern "C" void cuknnsBitonic(knntype *dist, knntype *data, knntype *query, knntype *index, knntype *dotp, knntype *d_dotB, knntype *distbuff, knntype *idxbuff, int objects, int attributes, int numQueries, int k, hipblasHandle_t handle, hipStream_t str, knntimes* times, int strId){
float tmp_time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int qk = (int)(log((float)k) / log(2.0));
dim3 inThreads(BLOCKSIZE, 1);
int block = (objects & (BLOCKSIZE-1)) ? objects / BLOCKSIZE + 1 : objects / BLOCKSIZE;
dim3 inGrid(block, 1);
hipEventRecord(start, 0);
hipLaunchKernelGGL(( initialize_index_B), dim3(inGrid), dim3(inThreads), 0, str, index, objects, numQueries);
pdist_N(dist, data, query, dotp, objects, attributes, numQueries, handle, str);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_time, start, stop);
times->dst_time += tmp_time;
hipEventRecord(start, 0);
BitonicSelect(dist, index, dist, index, distbuff, idxbuff, objects, numQueries, k, qk, str, strId);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_time, start, stop);
times->srch_time += tmp_time;
switch(attributes){
case 50:
//dot4_50<<<numQueries, 50, 0, str>>>(d_ditB, quary);
case 128:
//dot4<<<numQueries, DIMENTIONS, 0, str>>>(d_dotB, query);
break;
case 1024:
//dot4_1024<<<numQueries, 512, 0, str>>>(d_dotB, query);
break;
case 2048:
#if defined (CUARCH) && (CUARCH>=20)
//dot4_2048<<<numQueries, 1024, 0, str>>>(d_dotB, query);
#endif
#if defined(CUARCH) && (CUARCH<20)
//dot4_2048<<<numQueries, 512, 0, str>>>(d_dotB, query);
#endif
break;
}
dim3 threads2(k, 1);
dim3 grid2(numQueries, 1);
hipLaunchKernelGGL(( dot3), dim3(grid2), dim3(threads2), 0, str, dist, d_dotB);
hipEventDestroy(start);
hipEventDestroy(stop);
}
/* KNNS using TBiS */
extern "C" void cuknnsBitonicSTR(knntype *dist, knntype *data, knntype *query, knntype *index, knntype *dotp, knntype *d_dotB, knntype *distbuff, knntype *idxbuff, int objects, int attributes, int numQueries, int k, hipblasHandle_t handle, hipStream_t str, knntimes* times, int strId, distFunctParam *distFunc){
float tmp_time;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int qk = (int)(log((float)k) / log(2.0));
dim3 inThreads(BLOCKSIZE, 1);
int block = (objects & (BLOCKSIZE-1)) ? objects / BLOCKSIZE + 1 : objects / BLOCKSIZE;
dim3 inGrid(block, 1);
hipEventRecord(start, str);
distFunc->distF(dist, data, query, dotp, objects, attributes, numQueries, handle, str, &distFunc->dotP);
hipEventRecord(stop, str);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_time, start, stop);
times->dst_time = tmp_time;
hipEventRecord(start, str);
BitonicSelect(dist, index, dist, index, distbuff, idxbuff, objects, numQueries, k, qk, str, strId);
hipEventRecord(stop, str);
hipEventSynchronize(stop);
hipEventElapsedTime(&tmp_time, start, stop);
times->srch_time += tmp_time;
times->srch_time = tmp_time;
dim3 threads2(k, 1);
dim3 grid2(numQueries, 1);
hipLaunchKernelGGL(( dot3), dim3(grid2), dim3(threads2), 0, str, dist, d_dotB);
hipEventDestroy(start);
hipEventDestroy(stop);
}
void mergeResBitonic(knntype *data, knntype *idx, int k, int Q, int numStreams){
for(int s=0; s<Q; s++){
knntype cmax = -FLT_MAX;
int maxid = 0;
for(int i=0; i<k; i++){
knntype tmp = data[s*k+i];
if(tmp>cmax){
cmax = tmp;
maxid = i;
}
}
for(int i=1; i<numStreams; i++){
for(int j=0; j<k; j++){
knntype tmp = data[s*k + i*Q*k + j];
if(tmp<cmax){
data[s*k + maxid] = tmp;
idx[s*k + maxid] = idx[s*k + i*Q*k + j];
//max = data[s*k];
cmax = -FLT_MAX;
for(int p=0; p<k; p++){
knntype tmp2 = data[s*k + p];
if(tmp2>cmax){
cmax = tmp2;
maxid = p;
}
}
}
}
}
}
}
#endif
#ifndef MEMTEST
extern "C" double gpuknnsBitonic(knntype *query, knntype *data, knntype *values, knntype *indices, int objects, int numQueries, int attributes, int k, int numStreams){
#else
extern "C" double gpuknnsBitonicMemTest(knntype *query, knntype *data, knntype *values, knntype *indices, int objects, int numQueries, int attributes, int k, int numStreams){
#endif
knntype *d_data, *d_query;
knntype *d_dotp, *d_dist, *d_labels;
size_t memory_free, memory_total;
double TimeOut;
knntimes TimesOut;
cuMemGetInfo(&memory_free, &memory_total);
TimesOut.dst_time = 0;
TimesOut.srch_time = 0;
TimesOut.knn_time = 0;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipMalloc((void**)&d_query, numQueries*attributes*sizeof(knntype));
/* initialize distance functions */
distFunctParam dstfunc;
dstfunc.distF = pdist_NT;
switch(attributes){
case 50:
dstfunc.dotP.dotQ = &dot4_50;
dstfunc.dotP.nthreadsQ = 32;
dstfunc.dotP.dotCrp = &crpdot2_50;
dstfunc.dotP.nthreadsCrp = 32;
dstfunc.dotP.externShared = 0;
break;
case 128:
dstfunc.dotP.dotQ = &dot4;
dstfunc.dotP.nthreadsQ = 128;
dstfunc.dotP.dotCrp = &crpdot2_128;
dstfunc.dotP.nthreadsCrp = 128;
dstfunc.dotP.externShared = 0;
break;
case 1024:
dstfunc.dotP.dotQ = &dot4_1024;
dstfunc.dotP.nthreadsQ = 512;
dstfunc.dotP.dotCrp = &crpdot2_1024;
dstfunc.dotP.nthreadsCrp = 512;
dstfunc.dotP.externShared = 0;
break;
case 2048:
dstfunc.dotP.dotQ = &dot4_2048;
dstfunc.dotP.nthreadsQ = 512;
dstfunc.dotP.dotCrp = &crpdot2_2048;
dstfunc.dotP.nthreadsCrp = 512;
dstfunc.dotP.externShared = 0;
break;
default:
dstfunc.dotP.dotQ = &dot4_gen;
dstfunc.dotP.nthreadsQ = ceil(log2((float)attributes));
dstfunc.dotP.dotCrp = &crpdot2_gen;
dstfunc.dotP.nthreadsCrp = dstfunc.dotP.nthreadsQ;
dstfunc.dotP.externShared = dstfunc.dotP.nthreadsQ;
}
/* calculate the number of streams */
int memory_req = (4*numQueries + attributes)*sizeof(knntype);
int maxObjects = (int)ceil((float)memory_free*0.9 / (float)memory_req);
maxObjects = (1 << (int)floor(log((double)maxObjects)/log(2)));
#ifndef MEMTEST
int reqStreams = (maxObjects < objects) ? 2 : 1;
#endif
#ifdef MEMTEST
int reqStreams = 1;
#endif
int CorpusBlocks = (int)ceil((float)objects/(float)maxObjects);
int blocksPstream = (numStreams == FORCE_SINGLE_STREAM) ? SINGLE_STREAM_BLOCKS : 1;
numStreams = (numStreams == FORCE_SINGLE_STREAM) ? 1 : max(numStreams, reqStreams);
maxObjects = min(maxObjects, objects);
/*Initialize Streams */
hipStream_t *stream = (hipStream_t*)malloc(numStreams*sizeof(hipStream_t));
for(int i=0; i<numStreams; i++){
hipStreamCreate__(&stream[i], 0);
}
/* Initialize memory */
knntype *outbuffDist, *outbuffIdx;
hipHostMalloc((void**)&outbuffDist, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
hipHostMalloc((void**)&outbuffIdx, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
hipMalloc((void**)&d_data, maxObjects*attributes*sizeof(knntype));
hipMalloc((void**)&d_dotp, maxObjects*sizeof(knntype));
hipMalloc((void**)&d_dist, maxObjects*numQueries*sizeof(knntype));
hipMalloc((void**)&d_labels, maxObjects*numQueries*sizeof(knntype));
knntype *d_dotB, *distbuff, *idxbuff;
hipMalloc((void**)&d_dotB, numQueries*sizeof(knntype));
hipMalloc((void**)&distbuff, numQueries*maxObjects*sizeof(knntype));
hipMalloc((void**)&idxbuff, numQueries*maxObjects*sizeof(knntype));
hipMemcpyAsync(d_query, query, numQueries*attributes*sizeof(knntype), hipMemcpyHostToDevice, stream[0]);
/*compute the dot product of the queries*/
dstfunc.dotPhipLaunchKernelGGL((.dotQ), dim3(numQueries), dim3(dstfunc.dotP.nthreadsQ), dstfunc.dotP.externShared, stream[0], d_dotB, d_query, attributes);
int fail = 0;
//float TotalDstTime = 0;
//float TotalSeachTime = 0;
//float TotalCompTime = 0;
hipEventRecord(start, 0);
for(int ii=0, c = 0; ii<objects; ii+=maxObjects, c++){
int CorpusBlockSize = min(maxObjects, objects-ii);
int StreamSize = CorpusBlockSize / numStreams;
for(int jj=0; jj<numStreams; jj++){
hipMemcpyAsync(d_data + jj*StreamSize*attributes, data + ii*attributes + jj*StreamSize*attributes, StreamSize*attributes*sizeof(knntype), hipMemcpyHostToDevice, stream[jj]);
}
#ifndef MEMTEST
for(int jj=0; jj<numStreams; jj++){
cuknnsBitonicSTR(d_dist + jj*StreamSize*numQueries, d_data + jj*StreamSize*attributes, d_query, d_labels + jj*StreamSize*numQueries, d_dotp+jj*StreamSize, d_dotB, distbuff + jj*StreamSize*numQueries, idxbuff + jj*StreamSize*numQueries, StreamSize, attributes, numQueries, k, handle, stream[jj], &TimesOut, c*numStreams + jj, &dstfunc);
}
#endif
for(int jj=0; jj<numStreams; jj++){
hipMemcpyAsync(outbuffDist + jj*k*numQueries + c*numStreams*k*numQueries, d_dist + jj*StreamSize*numQueries, k*numQueries*sizeof(knntype), hipMemcpyDeviceToHost, stream[jj]);
hipMemcpyAsync(outbuffIdx + jj*k*numQueries + c*numStreams*k*numQueries, d_labels + jj*StreamSize*numQueries, k*numQueries*sizeof(knntype), hipMemcpyDeviceToHost, stream[jj]);
}
}
hipCtxSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
TimesOut.knn_time = (fail==0) ? elapsedTime / 1000 : FLT_MAX;
TimeOut = TimesOut.knn_time;
//printf("Time Elapsed: %f\n", TimeOut);
int ss = numStreams;
if(ss*CorpusBlocks>1){
mergeResBitonic(outbuffDist, outbuffIdx, k, numQueries, ss*CorpusBlocks);
}
memcpy(values, outbuffDist, k*numQueries*sizeof(knntype));
memcpy(indices, outbuffIdx, k*numQueries*sizeof(knntype));
for(int i=0; i<numStreams; i++){
hipStreamDestroy(stream[i]);
}
hipFree(d_dotB);
hipFree(distbuff);
hipFree(idxbuff);
hipHostFree(outbuffDist);
hipHostFree(outbuffIdx);
hipblasDestroy(handle);
hipFree(d_data);
hipFree(d_query);
hipFree(d_dotp);
hipFree(d_dist);
hipFree(d_labels);
hipEventDestroy(start);
hipEventDestroy(stop);
free(stream);
return(TimeOut);
}
/* Under development */
#ifndef MEMTEST
extern "C" float gpuknnLSH(knntype *query, knntype *data, knntype *values, knntype *indices, knntype *dp, int objects, int numQueries, int attributes, int k, int numStreams, int *bucketSize, int *query_offsets, int numClusters, int *query_sizes){
#else
extern "C" float gpuknnLSHmemtest(knntype *query, knntype *data, knntype *values, knntype *indices, knntype *dp, int objects, int numQueries, int attributes, int k, int numStreams, int *bucketSize, int *query_offsets, int numClusters, int *query_sizes){
#endif
knntype *d_data, *d_query;
knntype *d_dotp, *d_dist, *d_labels;
size_t memory_free, memory_total;
double TimeOut;
knntimes TimesOut;
cuMemGetInfo(&memory_free, &memory_total);
TimesOut.dst_time = 0;
TimesOut.srch_time = 0;
TimesOut.knn_time = 0;
float elapsedTime;
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipMalloc((void**)&d_query, numQueries*attributes*sizeof(knntype));
/* calculate the number of streams */
int memory_req = (4*numQueries + attributes)*sizeof(knntype);
//int memory_req = (4*numStreams + attributes)*sizeof(knntype);
int maxObjects = (int)ceil((float)memory_free*0.9 / (float)memory_req);
maxObjects = (1 << (int)floor(log((double)maxObjects)/log(2)));
/*
#ifndef MEMTEST
int reqStreams = (maxObjects < objects) ? 2 : 1;
#endif
#ifdef MEMTEST
int reqStreams = 1;
#endif
*/
int CorpusBlocks = (int)ceil((float)objects/(float)maxObjects);
//int StreamingEnable = 1;
int blocksPstream = (numStreams == FORCE_SINGLE_STREAM) ? SINGLE_STREAM_BLOCKS : 1;
numStreams = (numStreams == FORCE_SINGLE_STREAM) ? 1 : numStreams;
//printf("General inter:\n");
//printf("Free memory: %d\n", memory_free);
//printf("Objects : %d\n", objects);
//printf("max Corpus size Size: %d\n", maxObjects);
//printf("Blocks of the Corpus: %d\n", CorpusBlocks);
//printf("numStreams = %d\n", numStreams);
//printf("Blocks Per Stream: %d\n", blocksPstream);
//maxObjects = min(maxObjects, objects);
//maxObjects = min(maxObjects, (int)ceil(objects/1024)*1024);
printf("New maxObjets: %d\n", maxObjects);
/*Initialize Streams */
hipStream_t *stream = (hipStream_t*)malloc(numStreams*sizeof(hipStream_t));
for(int i=0; i<numStreams; i++){
hipStreamCreate__(&stream[i], 0);
}
/* Initialize memory */
knntype *outbuffDist, *outbuffIdx;
hipHostMalloc((void**)&outbuffDist, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
hipHostMalloc((void**)&outbuffIdx, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
//hipHostMalloc((void**)&outbuffDist, numQueries*k*sizeof(knntype));
//hipHostMalloc((void**)&outbuffIdx, numQueries*k*sizeof(knntype));
hipMalloc((void**)&d_data, maxObjects*attributes*sizeof(knntype));
hipMalloc((void**)&d_dotp, maxObjects*sizeof(knntype));
hipMalloc((void**)&d_dist, maxObjects*numQueries*sizeof(knntype));
hipMalloc((void**)&d_labels, maxObjects*numQueries*sizeof(knntype));
//hipMalloc((void**)&d_dist, maxObjects*numStreams*sizeof(knntype));
//hipMalloc((void**)&d_labels, maxObjects*numStreams*sizeof(knntype));
knntype *d_dotB, *distbuff, *idxbuff;
hipMalloc((void**)&d_dotB, numQueries*sizeof(knntype));
hipMalloc((void**)&distbuff, numQueries*maxObjects*sizeof(knntype));
hipMalloc((void**)&idxbuff, numQueries*maxObjects*sizeof(knntype));
//hipMalloc((void**)&distbuff, numStreams*maxObjects*sizeof(knntype));
//hipMalloc((void**)&idxbuff, numStreams*maxObjects*sizeof(knntype));
hipMemcpyAsync(d_query, query, numQueries*attributes*sizeof(knntype), hipMemcpyHostToDevice, stream[0]);
/*compute the dot product of the queries*/
switch(attributes){
case 128:
//dot4<<<numQueries, DIMENTIONS, 0, stream[0]>>>(d_dotB, d_query);
break;
case 1024:
//dot4_1024<<<numQueries, 512, 0, stream[0]>>>(d_dotB, d_query);
break;
case 2048:
#if defined (CUARCH) && (CUARCH>=20)
//dot4_2048<<<numQueries, 1024, 0, stream[0]>>>(d_dotB, d_query);
#endif
#if defined(CUARCH) && (CUARCH<20)
//dot4_2048<<<numQueries, 512, 0, stream[0]>>>(d_dotB, d_query);
#endif
break;
}
//printf("Starting Streaming\n");
int fail = 0;
//float TotalDstTime = 0;
//float TotalSeachTime = 0;
//float TotalCompTime = 0;
hipEventRecord(start, 0);
int offset = 0;
for(int ii=0, c = 0; ii<numClusters; ii+=numStreams, c++){
//int CorpusBlockSize = min(maxObjects, objects-ii);
int StreamSize = bucketSize[ii];
//int offset = redSizes[ii];
int pasedStreams = 0;
int mOffset = (offset & ((objects>>1)-1));
//for(int jj=0; jj<numStreams, pasedStreams<maxObjects; jj++){
for(int jj=0; jj<numStreams; jj++){
StreamSize = bucketSize[ii+jj];
//printf("Query: %d, stream: %d, pasedStreams:%d, offset: %d, moffset: %d\n", ii+jj, jj, pasedStreams, offset+pasedStreams, mOffset);
hipMemcpyAsync(d_data + pasedStreams*attributes, data + mOffset*attributes + pasedStreams*attributes, StreamSize*attributes*sizeof(knntype), hipMemcpyHostToDevice, stream[jj]);
hipMemcpyAsync(d_dotp + pasedStreams, dp + mOffset + pasedStreams, StreamSize*sizeof(knntype), hipMemcpyHostToDevice, stream[jj]);
pasedStreams += StreamSize;
}
#ifndef MEMTEST
pasedStreams = 0;
//for(int jj=0; jj<numStreams, pasedStreams<maxObjects; jj++){
for(int jj=0; jj<numStreams; jj++){
StreamSize = bucketSize[ii+jj];
//cuknnsBitonicSTR(d_dist + pasedStreams*query_offsets[ii+jj], d_data + pasedStreams*attributes, d_query+query_offsets[ii+jj]*attributes, d_labels + pasedStreams*query_offsets[ii+jj], d_dotp+pasedStreams, d_dotB + query_offsets[ii+jj], distbuff + pasedStreams*query_offsets[ii+jj], idxbuff + pasedStreams*query_offsets[ii+jj], StreamSize, attributes, query_sizes[ii+jj], k, handle, stream[jj], &TimesOut, jj);
pasedStreams += StreamSize;
}
#endif
pasedStreams = 0;
//for(int jj=0; jj<numStreams, pasedStreams<maxObjects; jj++){
for(int jj=0; jj<numStreams; jj++){
StreamSize = bucketSize[ii+jj];
hipMemcpyAsync(outbuffDist + k*query_offsets[ii+jj], d_dist + pasedStreams*query_offsets[ii+jj], query_sizes[ii+jj]*k*sizeof(knntype), hipMemcpyDeviceToHost, stream[jj]);
hipMemcpyAsync(outbuffIdx + k*query_offsets[ii+jj], d_labels + pasedStreams*query_offsets[ii+jj], query_sizes[ii+jj]*k*sizeof(knntype), hipMemcpyDeviceToHost, stream[jj]);
pasedStreams += StreamSize;
}
offset += pasedStreams;
}
hipCtxSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
printf("Exiting streaming... \n");
hipEventElapsedTime(&elapsedTime, start, stop);
TimesOut.knn_time = (fail==0) ? elapsedTime / 1000 : FLT_MAX;
TimeOut = TimesOut.knn_time;
#ifndef MEMTEST
printf("Bitonic Search: N: %d, Q: %d, streams: %d, time : %f\n", objects, numQueries, numStreams, TimeOut);
//printf("Computation Time: %f\n", (TimesOut.dst_time + TimesOut.srch_time) / 1000);
printf("Time elapsed knns with Bitonic Search: %f\n", TimeOut);
#else
printf("Data transfer: N: %d, Q: %d, time elapsed: %f\n", objects, numQueries, TimeOut);
#endif
/*
#ifndef MEMTEST
int ss = numStreams;
printf("ss; %d\n", ss);
if(ss*CorpusBlocks>1){
mergeResBitonic(outbuffDist, outbuffIdx, k, numQueries, ss*CorpusBlocks);
}
#endif
*/
memcpy(values, outbuffDist, k*numQueries*sizeof(knntype));
memcpy(indices, outbuffIdx, k*numQueries*sizeof(knntype));
for(int i=0; i<numStreams; i++){
hipStreamDestroy(stream[i]);
}
hipFree(d_dotB);
hipFree(distbuff);
hipFree(idxbuff);
hipHostFree(outbuffDist);
hipHostFree(outbuffIdx);
hipblasDestroy(handle);
hipFree(d_data);
hipFree(d_query);
hipFree(d_dotp);
hipFree(d_dist);
hipFree(d_labels);
hipEventDestroy(start);
hipEventDestroy(stop);
free(stream);
//hipDeviceReset();
return(TimeOut);
}
| e4dc9e27b0bb0001705a7fdff9cdbe1cc41db5d5.cu |
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include "cuda.h"
#include "cublas.h"
#include "cublas_v2.h"
#include "../include/shared_functions.h"
#if defined (__DOUBLE__) && (CUARCH<20)
#define BLOCKSIZE 256
#define SHARED_SIZE_LIMIT 512
#else
#define BLOCKSIZE 512
#define SHARED_SIZE_LIMIT 1024
#endif
#define BLOCKSIZE_Q 128
#define DIMENTIONS 128
#define SINGLE_STREAM_BLOCKS 4
#define FORCE_SINGLE_STREAM -5
#ifndef MEMTEST
__device__ void Comparator(knntype& keyA, knntype& valA, knntype& keyB, knntype& valB, int dir){
knntype t;
if( (keyA > keyB) == dir ){
t = keyA; keyA = keyB; keyB = t;
t = valA; valA = valB; valB = t;
}
}
__device__ void Comparator_elim(knntype& keyA, knntype& valA, knntype& keyB, knntype& valB, int dir){
if( (keyA > keyB) == dir ){
keyA = keyB;
valA = valB;
}
}
__global__ void bitonic_shared(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, int arrayLength, int objects, int queries, uint dir, int k, int qk, int idOffset){
__shared__ knntype s_key[SHARED_SIZE_LIMIT];
__shared__ knntype s_val[SHARED_SIZE_LIMIT];
int tid = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x;
SrcKey += blockIdx.y*arrayLength;
SrcVal += blockIdx.y*arrayLength;
DstKey += blockIdx.y*arrayLength;
DstVal += blockIdx.y*arrayLength;
knntype* SrcKey_ptr = SrcKey;
knntype* SrcVal_ptr = SrcVal;
knntype* DstKey_ptr = DstKey;
knntype* DstVal_ptr = DstVal;
SrcKey_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
SrcVal_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
DstKey_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
DstVal_ptr += blockIdx.x*SHARED_SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = (tid<objects) ? SrcKey_ptr[0] : FLT_MAX;
s_val[threadIdx.x + 0] = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x + idOffset;
s_key[threadIdx.x + (SHARED_SIZE_LIMIT >> 1)] = ((tid + (SHARED_SIZE_LIMIT>>1))<objects) ? SrcKey_ptr[(SHARED_SIZE_LIMIT >> 1)] : FLT_MAX;
s_val[threadIdx.x + (SHARED_SIZE_LIMIT >> 1)] = blockIdx.x * SHARED_SIZE_LIMIT + threadIdx.x + (SHARED_SIZE_LIMIT >> 1) + idOffset;
__syncthreads();
uint ddd;
uint pos;
//SHARED_SIZE_LIMIT
for(uint size = 2; size <= k; size <<= 1){
//Bitonic merge
ddd = dir^(threadIdx.x & (size >> 1)) != 0;
for(uint stride = size >> 1; stride > 0; stride >>= 1){
__syncthreads();
pos = (threadIdx.x << 1) - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd);
}
}
for(int obj = SHARED_SIZE_LIMIT >> 1; obj >= k; obj >>= 1){
__syncthreads();
// End of first part
int bi = threadIdx.x >> qk;
int li = threadIdx.x & (k-1);
int pb = (obj >> qk) + ((bi + 1) & ((obj >> qk)-1));
int prt = (pb << qk) + li;
if(threadIdx.x<obj){
Comparator_elim(s_key[threadIdx.x], s_val[threadIdx.x], s_key[prt], s_val[prt], 1);
uint size = k;
ddd = dir ^ ( (threadIdx.x & (size >> 1)) != 0 );
for(int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd);
}
}
}
__syncthreads();
if(threadIdx.x < k){
DstKey[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_key[threadIdx.x] : s_key[k-threadIdx.x-1];
DstVal[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_val[threadIdx.x] : s_val[k-threadIdx.x-1];
}
}
__global__ void bitonic_shared2(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, int arrayLength, int objects, int queries, uint dir, int k, int qk){
__shared__ knntype s_key[SHARED_SIZE_LIMIT];
__shared__ knntype s_val[SHARED_SIZE_LIMIT];
int SIZE_LIMIT = blockDim.x << 1;
int tid = blockIdx.x * SIZE_LIMIT + threadIdx.x;
SrcKey += blockIdx.y*arrayLength;
SrcVal += blockIdx.y*arrayLength;
DstKey += blockIdx.y*arrayLength;
DstVal += blockIdx.y*arrayLength;
knntype* SrcKey_ptr = SrcKey;
knntype* SrcVal_ptr = SrcVal;
knntype* DstKey_ptr = DstKey;
knntype* DstVal_ptr = DstVal;
SrcKey_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
SrcVal_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
DstKey_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
DstVal_ptr += blockIdx.x*SIZE_LIMIT + threadIdx.x;
s_key[threadIdx.x + 0] = (tid<objects) ? SrcKey_ptr[0] : FLT_MAX;
s_val[threadIdx.x + 0] = (tid<objects) ? SrcVal_ptr[0] : FLT_MAX;
s_key[threadIdx.x + (SIZE_LIMIT >> 1)] = ((tid+(SIZE_LIMIT>>1))<objects) ? SrcKey_ptr[(SIZE_LIMIT >> 1)] : FLT_MAX;
s_val[threadIdx.x + (SIZE_LIMIT >> 1)] = ((tid+(SIZE_LIMIT>>1))<objects) ? SrcVal_ptr[(SIZE_LIMIT >> 1)] : FLT_MAX;
__syncthreads();
uint ddd;
uint pos;
for(int obj = SIZE_LIMIT >> 1; obj >= k; obj >>= 1){
__syncthreads();
// End of first part
int bi = threadIdx.x >> qk;
int li = threadIdx.x & (k-1);
int pb = (obj >> qk) + ((bi + 1) & ((obj >> qk)-1));
int prt = (pb << qk) + li;
if(threadIdx.x<obj){
Comparator_elim(s_key[threadIdx.x], s_val[threadIdx.x], s_key[prt], s_val[prt], 1);
uint size = k;
ddd = dir ^ ( (threadIdx.x & (size >> 1)) != 0 );
for(int stride = size / 2; stride > 0; stride >>= 1){
__syncthreads();
pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1));
Comparator(s_key[pos + 0], s_val[pos + 0], s_key[pos + stride], s_val[pos + stride], ddd);
}
}
}
__syncthreads();
if(threadIdx.x < k){
DstKey[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_key[threadIdx.x] : s_key[k-threadIdx.x-1];
DstVal[k*blockIdx.x + threadIdx.x] = (blockIdx.x & 1) == 0 ? s_val[threadIdx.x] : s_val[k-threadIdx.x-1];
}
}
__global__ void relloc(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, int objects, int k){
DstKey += blockIdx.x * k + threadIdx.x;
DstVal += blockIdx.x * k + threadIdx.x;
SrcKey += blockIdx.x * objects + threadIdx.x;
SrcVal += blockIdx.x * objects + threadIdx.x;
if(threadIdx.x < k){
DstKey[0] = SrcKey[0];
DstVal[0] = SrcVal[0];
}
}
void BitonicSelect(knntype *DstKey, knntype *DstVal, knntype *SrcKey, knntype *SrcVal, knntype *buffkey, knntype *buffval, int objects, int queries, int k, int qk, CUstream str, int streamId){
int numObjects = objects;
knntype *tmpkey1 = SrcKey;
knntype *tmpkey2 = buffkey;
knntype *tmpval1 = SrcVal;
knntype *tmpval2 = buffval;
knntype *tmpV;
knntype *tmpK;
dim3 threads(SHARED_SIZE_LIMIT / 2, 1);
int grd = (objects & (SHARED_SIZE_LIMIT-1)) ? objects / SHARED_SIZE_LIMIT + 1 : objects / SHARED_SIZE_LIMIT;
dim3 grid(grd, queries);
int idOffset = streamId*objects;
bitonic_shared<<<grid, threads, 0, str>>>(buffkey, buffval, SrcKey, SrcVal, numObjects, objects, queries, 1, k, qk, idOffset);
objects = grd*k;
int robjects = objects;
objects = (objects & (SHARED_SIZE_LIMIT-1)) ? (objects / SHARED_SIZE_LIMIT + 1)*SHARED_SIZE_LIMIT : (objects / SHARED_SIZE_LIMIT)*SHARED_SIZE_LIMIT;
while(robjects > k){
int blockSize = SHARED_SIZE_LIMIT<objects ? SHARED_SIZE_LIMIT : objects;
dim3 threadsp(blockSize/2, 1);
dim3 gridp(objects / blockSize, queries);
bitonic_shared2<<<gridp, threadsp, 0, str>>>(tmpkey1, tmpval1, tmpkey2, tmpval2, numObjects, robjects, queries, 1, k, qk);
tmpK = tmpkey1; tmpkey1 = tmpkey2; tmpkey2 = tmpK;
tmpV = tmpval1; tmpval1 = tmpval2; tmpval2 = tmpV;
objects = k*(objects / blockSize);
robjects = objects;
objects = (objects & (SHARED_SIZE_LIMIT-1)) ? (objects / SHARED_SIZE_LIMIT + 1)*SHARED_SIZE_LIMIT : (objects / SHARED_SIZE_LIMIT)*SHARED_SIZE_LIMIT;
}
dim3 threads_relloc(k, 1);
dim3 grid_relloc(queries,1);
relloc<<<grid_relloc, threads_relloc, 0, str>>>(DstKey, DstVal, tmpkey2, tmpval2, numObjects, k);
}
__global__ void initialize_index_B(knntype* data, int objects, int numQueries){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid<objects){
#pragma unroll 2
for(int i=0; i<numQueries; i++){
data[i*objects + tid] = tid;
}
}
}
/* Test function test function currently no used */
extern "C" void cuknnsBitonic(knntype *dist, knntype *data, knntype *query, knntype *index, knntype *dotp, knntype *d_dotB, knntype *distbuff, knntype *idxbuff, int objects, int attributes, int numQueries, int k, cublasHandle_t handle, CUstream str, knntimes* times, int strId){
float tmp_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int qk = (int)(log((float)k) / log(2.0));
dim3 inThreads(BLOCKSIZE, 1);
int block = (objects & (BLOCKSIZE-1)) ? objects / BLOCKSIZE + 1 : objects / BLOCKSIZE;
dim3 inGrid(block, 1);
cudaEventRecord(start, 0);
initialize_index_B<<<inGrid, inThreads, 0, str>>>(index, objects, numQueries);
pdist_N(dist, data, query, dotp, objects, attributes, numQueries, handle, str);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_time, start, stop);
times->dst_time += tmp_time;
cudaEventRecord(start, 0);
BitonicSelect(dist, index, dist, index, distbuff, idxbuff, objects, numQueries, k, qk, str, strId);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_time, start, stop);
times->srch_time += tmp_time;
switch(attributes){
case 50:
//dot4_50<<<numQueries, 50, 0, str>>>(d_ditB, quary);
case 128:
//dot4<<<numQueries, DIMENTIONS, 0, str>>>(d_dotB, query);
break;
case 1024:
//dot4_1024<<<numQueries, 512, 0, str>>>(d_dotB, query);
break;
case 2048:
#if defined (CUARCH) && (CUARCH>=20)
//dot4_2048<<<numQueries, 1024, 0, str>>>(d_dotB, query);
#endif
#if defined(CUARCH) && (CUARCH<20)
//dot4_2048<<<numQueries, 512, 0, str>>>(d_dotB, query);
#endif
break;
}
dim3 threads2(k, 1);
dim3 grid2(numQueries, 1);
dot3<<<grid2, threads2, 0, str>>>(dist, d_dotB);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
/* KNNS using TBiS */
extern "C" void cuknnsBitonicSTR(knntype *dist, knntype *data, knntype *query, knntype *index, knntype *dotp, knntype *d_dotB, knntype *distbuff, knntype *idxbuff, int objects, int attributes, int numQueries, int k, cublasHandle_t handle, CUstream str, knntimes* times, int strId, distFunctParam *distFunc){
float tmp_time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int qk = (int)(log((float)k) / log(2.0));
dim3 inThreads(BLOCKSIZE, 1);
int block = (objects & (BLOCKSIZE-1)) ? objects / BLOCKSIZE + 1 : objects / BLOCKSIZE;
dim3 inGrid(block, 1);
cudaEventRecord(start, str);
distFunc->distF(dist, data, query, dotp, objects, attributes, numQueries, handle, str, &distFunc->dotP);
cudaEventRecord(stop, str);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_time, start, stop);
times->dst_time = tmp_time;
cudaEventRecord(start, str);
BitonicSelect(dist, index, dist, index, distbuff, idxbuff, objects, numQueries, k, qk, str, strId);
cudaEventRecord(stop, str);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&tmp_time, start, stop);
times->srch_time += tmp_time;
times->srch_time = tmp_time;
dim3 threads2(k, 1);
dim3 grid2(numQueries, 1);
dot3<<<grid2, threads2, 0, str>>>(dist, d_dotB);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void mergeResBitonic(knntype *data, knntype *idx, int k, int Q, int numStreams){
for(int s=0; s<Q; s++){
knntype cmax = -FLT_MAX;
int maxid = 0;
for(int i=0; i<k; i++){
knntype tmp = data[s*k+i];
if(tmp>cmax){
cmax = tmp;
maxid = i;
}
}
for(int i=1; i<numStreams; i++){
for(int j=0; j<k; j++){
knntype tmp = data[s*k + i*Q*k + j];
if(tmp<cmax){
data[s*k + maxid] = tmp;
idx[s*k + maxid] = idx[s*k + i*Q*k + j];
//max = data[s*k];
cmax = -FLT_MAX;
for(int p=0; p<k; p++){
knntype tmp2 = data[s*k + p];
if(tmp2>cmax){
cmax = tmp2;
maxid = p;
}
}
}
}
}
}
}
#endif
#ifndef MEMTEST
extern "C" double gpuknnsBitonic(knntype *query, knntype *data, knntype *values, knntype *indices, int objects, int numQueries, int attributes, int k, int numStreams){
#else
extern "C" double gpuknnsBitonicMemTest(knntype *query, knntype *data, knntype *values, knntype *indices, int objects, int numQueries, int attributes, int k, int numStreams){
#endif
knntype *d_data, *d_query;
knntype *d_dotp, *d_dist, *d_labels;
size_t memory_free, memory_total;
double TimeOut;
knntimes TimesOut;
cuMemGetInfo(&memory_free, &memory_total);
TimesOut.dst_time = 0;
TimesOut.srch_time = 0;
TimesOut.knn_time = 0;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cublasHandle_t handle;
cublasCreate(&handle);
cudaMalloc((void**)&d_query, numQueries*attributes*sizeof(knntype));
/* initialize distance functions */
distFunctParam dstfunc;
dstfunc.distF = pdist_NT;
switch(attributes){
case 50:
dstfunc.dotP.dotQ = &dot4_50;
dstfunc.dotP.nthreadsQ = 32;
dstfunc.dotP.dotCrp = &crpdot2_50;
dstfunc.dotP.nthreadsCrp = 32;
dstfunc.dotP.externShared = 0;
break;
case 128:
dstfunc.dotP.dotQ = &dot4;
dstfunc.dotP.nthreadsQ = 128;
dstfunc.dotP.dotCrp = &crpdot2_128;
dstfunc.dotP.nthreadsCrp = 128;
dstfunc.dotP.externShared = 0;
break;
case 1024:
dstfunc.dotP.dotQ = &dot4_1024;
dstfunc.dotP.nthreadsQ = 512;
dstfunc.dotP.dotCrp = &crpdot2_1024;
dstfunc.dotP.nthreadsCrp = 512;
dstfunc.dotP.externShared = 0;
break;
case 2048:
dstfunc.dotP.dotQ = &dot4_2048;
dstfunc.dotP.nthreadsQ = 512;
dstfunc.dotP.dotCrp = &crpdot2_2048;
dstfunc.dotP.nthreadsCrp = 512;
dstfunc.dotP.externShared = 0;
break;
default:
dstfunc.dotP.dotQ = &dot4_gen;
dstfunc.dotP.nthreadsQ = ceil(log2((float)attributes));
dstfunc.dotP.dotCrp = &crpdot2_gen;
dstfunc.dotP.nthreadsCrp = dstfunc.dotP.nthreadsQ;
dstfunc.dotP.externShared = dstfunc.dotP.nthreadsQ;
}
/* calculate the number of streams */
int memory_req = (4*numQueries + attributes)*sizeof(knntype);
int maxObjects = (int)ceil((float)memory_free*0.9 / (float)memory_req);
maxObjects = (1 << (int)floor(log((double)maxObjects)/log(2)));
#ifndef MEMTEST
int reqStreams = (maxObjects < objects) ? 2 : 1;
#endif
#ifdef MEMTEST
int reqStreams = 1;
#endif
int CorpusBlocks = (int)ceil((float)objects/(float)maxObjects);
int blocksPstream = (numStreams == FORCE_SINGLE_STREAM) ? SINGLE_STREAM_BLOCKS : 1;
numStreams = (numStreams == FORCE_SINGLE_STREAM) ? 1 : max(numStreams, reqStreams);
maxObjects = min(maxObjects, objects);
/*Initialize Streams */
CUstream *stream = (CUstream*)malloc(numStreams*sizeof(CUstream));
for(int i=0; i<numStreams; i++){
cuStreamCreate(&stream[i], 0);
}
/* Initialize memory */
knntype *outbuffDist, *outbuffIdx;
cudaMallocHost((void**)&outbuffDist, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
cudaMallocHost((void**)&outbuffIdx, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
cudaMalloc((void**)&d_data, maxObjects*attributes*sizeof(knntype));
cudaMalloc((void**)&d_dotp, maxObjects*sizeof(knntype));
cudaMalloc((void**)&d_dist, maxObjects*numQueries*sizeof(knntype));
cudaMalloc((void**)&d_labels, maxObjects*numQueries*sizeof(knntype));
knntype *d_dotB, *distbuff, *idxbuff;
cudaMalloc((void**)&d_dotB, numQueries*sizeof(knntype));
cudaMalloc((void**)&distbuff, numQueries*maxObjects*sizeof(knntype));
cudaMalloc((void**)&idxbuff, numQueries*maxObjects*sizeof(knntype));
cudaMemcpyAsync(d_query, query, numQueries*attributes*sizeof(knntype), cudaMemcpyHostToDevice, stream[0]);
/*compute the dot product of the queries*/
dstfunc.dotP.dotQ<<<numQueries, dstfunc.dotP.nthreadsQ, dstfunc.dotP.externShared, stream[0]>>>(d_dotB, d_query, attributes);
int fail = 0;
//float TotalDstTime = 0;
//float TotalSeachTime = 0;
//float TotalCompTime = 0;
cudaEventRecord(start, 0);
for(int ii=0, c = 0; ii<objects; ii+=maxObjects, c++){
int CorpusBlockSize = min(maxObjects, objects-ii);
int StreamSize = CorpusBlockSize / numStreams;
for(int jj=0; jj<numStreams; jj++){
cudaMemcpyAsync(d_data + jj*StreamSize*attributes, data + ii*attributes + jj*StreamSize*attributes, StreamSize*attributes*sizeof(knntype), cudaMemcpyHostToDevice, stream[jj]);
}
#ifndef MEMTEST
for(int jj=0; jj<numStreams; jj++){
cuknnsBitonicSTR(d_dist + jj*StreamSize*numQueries, d_data + jj*StreamSize*attributes, d_query, d_labels + jj*StreamSize*numQueries, d_dotp+jj*StreamSize, d_dotB, distbuff + jj*StreamSize*numQueries, idxbuff + jj*StreamSize*numQueries, StreamSize, attributes, numQueries, k, handle, stream[jj], &TimesOut, c*numStreams + jj, &dstfunc);
}
#endif
for(int jj=0; jj<numStreams; jj++){
cudaMemcpyAsync(outbuffDist + jj*k*numQueries + c*numStreams*k*numQueries, d_dist + jj*StreamSize*numQueries, k*numQueries*sizeof(knntype), cudaMemcpyDeviceToHost, stream[jj]);
cudaMemcpyAsync(outbuffIdx + jj*k*numQueries + c*numStreams*k*numQueries, d_labels + jj*StreamSize*numQueries, k*numQueries*sizeof(knntype), cudaMemcpyDeviceToHost, stream[jj]);
}
}
cuCtxSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
TimesOut.knn_time = (fail==0) ? elapsedTime / 1000 : FLT_MAX;
TimeOut = TimesOut.knn_time;
//printf("Time Elapsed: %f\n", TimeOut);
int ss = numStreams;
if(ss*CorpusBlocks>1){
mergeResBitonic(outbuffDist, outbuffIdx, k, numQueries, ss*CorpusBlocks);
}
memcpy(values, outbuffDist, k*numQueries*sizeof(knntype));
memcpy(indices, outbuffIdx, k*numQueries*sizeof(knntype));
for(int i=0; i<numStreams; i++){
cuStreamDestroy(stream[i]);
}
cudaFree(d_dotB);
cudaFree(distbuff);
cudaFree(idxbuff);
cudaFreeHost(outbuffDist);
cudaFreeHost(outbuffIdx);
cublasDestroy(handle);
cudaFree(d_data);
cudaFree(d_query);
cudaFree(d_dotp);
cudaFree(d_dist);
cudaFree(d_labels);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(stream);
return(TimeOut);
}
/* Under development */
#ifndef MEMTEST
extern "C" float gpuknnLSH(knntype *query, knntype *data, knntype *values, knntype *indices, knntype *dp, int objects, int numQueries, int attributes, int k, int numStreams, int *bucketSize, int *query_offsets, int numClusters, int *query_sizes){
#else
extern "C" float gpuknnLSHmemtest(knntype *query, knntype *data, knntype *values, knntype *indices, knntype *dp, int objects, int numQueries, int attributes, int k, int numStreams, int *bucketSize, int *query_offsets, int numClusters, int *query_sizes){
#endif
knntype *d_data, *d_query;
knntype *d_dotp, *d_dist, *d_labels;
size_t memory_free, memory_total;
double TimeOut;
knntimes TimesOut;
cuMemGetInfo(&memory_free, &memory_total);
TimesOut.dst_time = 0;
TimesOut.srch_time = 0;
TimesOut.knn_time = 0;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cublasHandle_t handle;
cublasCreate(&handle);
cudaMalloc((void**)&d_query, numQueries*attributes*sizeof(knntype));
/* calculate the number of streams */
int memory_req = (4*numQueries + attributes)*sizeof(knntype);
//int memory_req = (4*numStreams + attributes)*sizeof(knntype);
int maxObjects = (int)ceil((float)memory_free*0.9 / (float)memory_req);
maxObjects = (1 << (int)floor(log((double)maxObjects)/log(2)));
/*
#ifndef MEMTEST
int reqStreams = (maxObjects < objects) ? 2 : 1;
#endif
#ifdef MEMTEST
int reqStreams = 1;
#endif
*/
int CorpusBlocks = (int)ceil((float)objects/(float)maxObjects);
//int StreamingEnable = 1;
int blocksPstream = (numStreams == FORCE_SINGLE_STREAM) ? SINGLE_STREAM_BLOCKS : 1;
numStreams = (numStreams == FORCE_SINGLE_STREAM) ? 1 : numStreams;
//printf("General inter:\n");
//printf("Free memory: %d\n", memory_free);
//printf("Objects : %d\n", objects);
//printf("max Corpus size Size: %d\n", maxObjects);
//printf("Blocks of the Corpus: %d\n", CorpusBlocks);
//printf("numStreams = %d\n", numStreams);
//printf("Blocks Per Stream: %d\n", blocksPstream);
//maxObjects = min(maxObjects, objects);
//maxObjects = min(maxObjects, (int)ceil(objects/1024)*1024);
printf("New maxObjets: %d\n", maxObjects);
/*Initialize Streams */
CUstream *stream = (CUstream*)malloc(numStreams*sizeof(CUstream));
for(int i=0; i<numStreams; i++){
cuStreamCreate(&stream[i], 0);
}
/* Initialize memory */
knntype *outbuffDist, *outbuffIdx;
cudaMallocHost((void**)&outbuffDist, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
cudaMallocHost((void**)&outbuffIdx, blocksPstream*numStreams*CorpusBlocks*numQueries*k*sizeof(knntype));
//cudaMallocHost((void**)&outbuffDist, numQueries*k*sizeof(knntype));
//cudaMallocHost((void**)&outbuffIdx, numQueries*k*sizeof(knntype));
cudaMalloc((void**)&d_data, maxObjects*attributes*sizeof(knntype));
cudaMalloc((void**)&d_dotp, maxObjects*sizeof(knntype));
cudaMalloc((void**)&d_dist, maxObjects*numQueries*sizeof(knntype));
cudaMalloc((void**)&d_labels, maxObjects*numQueries*sizeof(knntype));
//cudaMalloc((void**)&d_dist, maxObjects*numStreams*sizeof(knntype));
//cudaMalloc((void**)&d_labels, maxObjects*numStreams*sizeof(knntype));
knntype *d_dotB, *distbuff, *idxbuff;
cudaMalloc((void**)&d_dotB, numQueries*sizeof(knntype));
cudaMalloc((void**)&distbuff, numQueries*maxObjects*sizeof(knntype));
cudaMalloc((void**)&idxbuff, numQueries*maxObjects*sizeof(knntype));
//cudaMalloc((void**)&distbuff, numStreams*maxObjects*sizeof(knntype));
//cudaMalloc((void**)&idxbuff, numStreams*maxObjects*sizeof(knntype));
cudaMemcpyAsync(d_query, query, numQueries*attributes*sizeof(knntype), cudaMemcpyHostToDevice, stream[0]);
/*compute the dot product of the queries*/
switch(attributes){
case 128:
//dot4<<<numQueries, DIMENTIONS, 0, stream[0]>>>(d_dotB, d_query);
break;
case 1024:
//dot4_1024<<<numQueries, 512, 0, stream[0]>>>(d_dotB, d_query);
break;
case 2048:
#if defined (CUARCH) && (CUARCH>=20)
//dot4_2048<<<numQueries, 1024, 0, stream[0]>>>(d_dotB, d_query);
#endif
#if defined(CUARCH) && (CUARCH<20)
//dot4_2048<<<numQueries, 512, 0, stream[0]>>>(d_dotB, d_query);
#endif
break;
}
//printf("Starting Streaming\n");
int fail = 0;
//float TotalDstTime = 0;
//float TotalSeachTime = 0;
//float TotalCompTime = 0;
cudaEventRecord(start, 0);
int offset = 0;
for(int ii=0, c = 0; ii<numClusters; ii+=numStreams, c++){
//int CorpusBlockSize = min(maxObjects, objects-ii);
int StreamSize = bucketSize[ii];
//int offset = redSizes[ii];
int pasedStreams = 0;
int mOffset = (offset & ((objects>>1)-1));
//for(int jj=0; jj<numStreams, pasedStreams<maxObjects; jj++){
for(int jj=0; jj<numStreams; jj++){
StreamSize = bucketSize[ii+jj];
//printf("Query: %d, stream: %d, pasedStreams:%d, offset: %d, moffset: %d\n", ii+jj, jj, pasedStreams, offset+pasedStreams, mOffset);
cudaMemcpyAsync(d_data + pasedStreams*attributes, data + mOffset*attributes + pasedStreams*attributes, StreamSize*attributes*sizeof(knntype), cudaMemcpyHostToDevice, stream[jj]);
cudaMemcpyAsync(d_dotp + pasedStreams, dp + mOffset + pasedStreams, StreamSize*sizeof(knntype), cudaMemcpyHostToDevice, stream[jj]);
pasedStreams += StreamSize;
}
#ifndef MEMTEST
pasedStreams = 0;
//for(int jj=0; jj<numStreams, pasedStreams<maxObjects; jj++){
for(int jj=0; jj<numStreams; jj++){
StreamSize = bucketSize[ii+jj];
//cuknnsBitonicSTR(d_dist + pasedStreams*query_offsets[ii+jj], d_data + pasedStreams*attributes, d_query+query_offsets[ii+jj]*attributes, d_labels + pasedStreams*query_offsets[ii+jj], d_dotp+pasedStreams, d_dotB + query_offsets[ii+jj], distbuff + pasedStreams*query_offsets[ii+jj], idxbuff + pasedStreams*query_offsets[ii+jj], StreamSize, attributes, query_sizes[ii+jj], k, handle, stream[jj], &TimesOut, jj);
pasedStreams += StreamSize;
}
#endif
pasedStreams = 0;
//for(int jj=0; jj<numStreams, pasedStreams<maxObjects; jj++){
for(int jj=0; jj<numStreams; jj++){
StreamSize = bucketSize[ii+jj];
cudaMemcpyAsync(outbuffDist + k*query_offsets[ii+jj], d_dist + pasedStreams*query_offsets[ii+jj], query_sizes[ii+jj]*k*sizeof(knntype), cudaMemcpyDeviceToHost, stream[jj]);
cudaMemcpyAsync(outbuffIdx + k*query_offsets[ii+jj], d_labels + pasedStreams*query_offsets[ii+jj], query_sizes[ii+jj]*k*sizeof(knntype), cudaMemcpyDeviceToHost, stream[jj]);
pasedStreams += StreamSize;
}
offset += pasedStreams;
}
cuCtxSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("Exiting streaming... \n");
cudaEventElapsedTime(&elapsedTime, start, stop);
TimesOut.knn_time = (fail==0) ? elapsedTime / 1000 : FLT_MAX;
TimeOut = TimesOut.knn_time;
#ifndef MEMTEST
printf("Bitonic Search: N: %d, Q: %d, streams: %d, time : %f\n", objects, numQueries, numStreams, TimeOut);
//printf("Computation Time: %f\n", (TimesOut.dst_time + TimesOut.srch_time) / 1000);
printf("Time elapsed knns with Bitonic Search: %f\n", TimeOut);
#else
printf("Data transfer: N: %d, Q: %d, time elapsed: %f\n", objects, numQueries, TimeOut);
#endif
/*
#ifndef MEMTEST
int ss = numStreams;
printf("ss; %d\n", ss);
if(ss*CorpusBlocks>1){
mergeResBitonic(outbuffDist, outbuffIdx, k, numQueries, ss*CorpusBlocks);
}
#endif
*/
memcpy(values, outbuffDist, k*numQueries*sizeof(knntype));
memcpy(indices, outbuffIdx, k*numQueries*sizeof(knntype));
for(int i=0; i<numStreams; i++){
cuStreamDestroy(stream[i]);
}
cudaFree(d_dotB);
cudaFree(distbuff);
cudaFree(idxbuff);
cudaFreeHost(outbuffDist);
cudaFreeHost(outbuffIdx);
cublasDestroy(handle);
cudaFree(d_data);
cudaFree(d_query);
cudaFree(d_dotp);
cudaFree(d_dist);
cudaFree(d_labels);
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(stream);
//cudaDeviceReset();
return(TimeOut);
}
|
8016edf2792574c2b7c2b1c4dfb3800c5b8554c8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* GridTools
*
* Copyright (c) 2014-2023, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <gridtools/storage/builder.hpp>
#include <gridtools/storage/gpu.hpp>
#include <multiplet.hpp>
using namespace gridtools;
constexpr int c_x = 3 /* < 32 for this test */, c_y = 5, c_z = 7;
template <class View>
__global__ void mul2(View s) {
auto &&lengths = s.lengths();
bool expected_dims = lengths[0] == c_x && lengths[1] == c_y && lengths[2] == c_z;
bool expected_size = s.length() <= 32 * c_y * c_z && s.length() >= c_x * c_y * c_z;
s(0, 0, 0) *= 2 * expected_dims * expected_size;
s(1, 0, 0) *= 2 * expected_dims * expected_size;
}
TEST(DataViewTest, Simple) {
// create and allocate a data_store
auto ds = storage::builder<storage::gpu>.type<double>().layout<2, 1, 0>().dimensions(c_x, c_y, c_z)();
// create a rw view and fill with some data
auto dv = ds->host_view();
dv(0, 0, 0) = 50;
dv(1, 0, 0) = 60;
// check if interface works
EXPECT_TRUE(ds->lengths() == dv.lengths());
// check if data is there
EXPECT_EQ(50, dv(0, 0, 0));
EXPECT_EQ(60, dv(1, 0, 0));
// create a ro view
auto dvro = ds->const_host_view();
// check if data is the same
EXPECT_EQ(50, dvro(0, 0, 0));
EXPECT_EQ(60, dvro(1, 0, 0));
hipLaunchKernelGGL(( mul2), dim3(1), dim3(1), 0, 0, ds->target_view());
dvro = ds->const_host_view();
// check if data is the same
EXPECT_EQ(100, dvro(0, 0, 0));
EXPECT_EQ(120, dvro(1, 0, 0));
}
| 8016edf2792574c2b7c2b1c4dfb3800c5b8554c8.cu | /*
* GridTools
*
* Copyright (c) 2014-2023, ETH Zurich
* All rights reserved.
*
* Please, refer to the LICENSE file in the root directory.
* SPDX-License-Identifier: BSD-3-Clause
*/
#include <gtest/gtest.h>
#include <gridtools/storage/builder.hpp>
#include <gridtools/storage/gpu.hpp>
#include <multiplet.hpp>
using namespace gridtools;
constexpr int c_x = 3 /* < 32 for this test */, c_y = 5, c_z = 7;
template <class View>
__global__ void mul2(View s) {
auto &&lengths = s.lengths();
bool expected_dims = lengths[0] == c_x && lengths[1] == c_y && lengths[2] == c_z;
bool expected_size = s.length() <= 32 * c_y * c_z && s.length() >= c_x * c_y * c_z;
s(0, 0, 0) *= 2 * expected_dims * expected_size;
s(1, 0, 0) *= 2 * expected_dims * expected_size;
}
TEST(DataViewTest, Simple) {
// create and allocate a data_store
auto ds = storage::builder<storage::gpu>.type<double>().layout<2, 1, 0>().dimensions(c_x, c_y, c_z)();
// create a rw view and fill with some data
auto dv = ds->host_view();
dv(0, 0, 0) = 50;
dv(1, 0, 0) = 60;
// check if interface works
EXPECT_TRUE(ds->lengths() == dv.lengths());
// check if data is there
EXPECT_EQ(50, dv(0, 0, 0));
EXPECT_EQ(60, dv(1, 0, 0));
// create a ro view
auto dvro = ds->const_host_view();
// check if data is the same
EXPECT_EQ(50, dvro(0, 0, 0));
EXPECT_EQ(60, dvro(1, 0, 0));
mul2<<<1, 1>>>(ds->target_view());
dvro = ds->const_host_view();
// check if data is the same
EXPECT_EQ(100, dvro(0, 0, 0));
EXPECT_EQ(120, dvro(1, 0, 0));
}
|
dfe9f5eea6119d5547d46a7524ea9718f3765313.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ppm.h"
const int BLUR_SIZE=10;
//used to blur a 2d color ppm image
__global__
void blurKernel(int* in, int* out,int w,int h){
int curpix=blockIdx.x*blockDim.x+threadIdx.x;
int row=curpix / w;
int col=curpix % w;
if ( row>=h ) return;
int pixr=0;
int pixg=0;
int pixb=0;
int pixels=0;
for (int br=-BLUR_SIZE; br<=BLUR_SIZE;br++){
for (int bc=-BLUR_SIZE; bc<=BLUR_SIZE;bc++){
int currow=row+br;
int curcol=col+bc;
if(currow>=0 && currow<h && curcol>=0 && curcol <w ){
pixels++;
int pdex= 3*(currow*w+curcol);
pixr+=in[pdex];
pixg+=in[pdex+1];
pixb+=in[pdex+2];
}
}
}
int dex=3*(row*w+col);
out[dex]= round((float)pixr/pixels);
out[dex+1]= round((float)pixg/pixels);
out[dex+2]= round((float)pixb/pixels);
}
int main(){
ppm football("football.ppm");
int numpixels=football.height*football.width;
int size=3*numpixels;
int arsize=sizeof(int)*size;
std::cout <<"Size is: "<< size;
int* d_football_data;
int* d_bfootball_data;
hipMalloc((void**)&d_football_data,arsize);
hipMalloc((void**)&d_bfootball_data,arsize );
hipMemcpy(d_football_data,football.data,arsize,hipMemcpyHostToDevice);
hipMemcpy(d_bfootball_data,football.data,arsize,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( blurKernel), dim3(ceil(numpixels/256)) ,dim3(256), 0, 0, d_football_data,d_bfootball_data,football.width,football.height);
ppm bfootball(football);
hipMemcpy(bfootball.data,d_bfootball_data,arsize,hipMemcpyDeviceToHost);
bfootball.write("bfootball.ppm");
hipFree(d_football_data);
hipFree(d_bfootball_data);
}
| dfe9f5eea6119d5547d46a7524ea9718f3765313.cu | #include "ppm.h"
const int BLUR_SIZE=10;
//used to blur a 2d color ppm image
__global__
void blurKernel(int* in, int* out,int w,int h){
int curpix=blockIdx.x*blockDim.x+threadIdx.x;
int row=curpix / w;
int col=curpix % w;
if ( row>=h ) return;
int pixr=0;
int pixg=0;
int pixb=0;
int pixels=0;
for (int br=-BLUR_SIZE; br<=BLUR_SIZE;br++){
for (int bc=-BLUR_SIZE; bc<=BLUR_SIZE;bc++){
int currow=row+br;
int curcol=col+bc;
if(currow>=0 && currow<h && curcol>=0 && curcol <w ){
pixels++;
int pdex= 3*(currow*w+curcol);
pixr+=in[pdex];
pixg+=in[pdex+1];
pixb+=in[pdex+2];
}
}
}
int dex=3*(row*w+col);
out[dex]= round((float)pixr/pixels);
out[dex+1]= round((float)pixg/pixels);
out[dex+2]= round((float)pixb/pixels);
}
int main(){
ppm football("football.ppm");
int numpixels=football.height*football.width;
int size=3*numpixels;
int arsize=sizeof(int)*size;
std::cout <<"Size is: "<< size;
int* d_football_data;
int* d_bfootball_data;
cudaMalloc((void**)&d_football_data,arsize);
cudaMalloc((void**)&d_bfootball_data,arsize );
cudaMemcpy(d_football_data,football.data,arsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_bfootball_data,football.data,arsize,cudaMemcpyHostToDevice);
blurKernel<<<ceil(numpixels/256) ,256>>>(d_football_data,d_bfootball_data,football.width,football.height);
ppm bfootball(football);
cudaMemcpy(bfootball.data,d_bfootball_data,arsize,cudaMemcpyDeviceToHost);
bfootball.write("bfootball.ppm");
cudaFree(d_football_data);
cudaFree(d_bfootball_data);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.