Sarim-Hash commited on
Commit
37293b2
·
verified ·
1 Parent(s): d3fab4b

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitignore +1 -0
  2. cuda_code/04matrix_transposed.cu +77 -0
  3. cuda_code/11-convolution.cu +50 -0
  4. cuda_code/1d1.cu +105 -0
  5. cuda_code/28-pyrup-pyrdown.cu +90 -0
  6. cuda_code/2d_xyWENOADV_p.cu +169 -0
  7. cuda_code/2mm.cu +244 -0
  8. cuda_code/ACNetHDNL3.cu +1653 -0
  9. cuda_code/Activation_27.cu +587 -0
  10. cuda_code/AimetOpUtilsGpu_4.cu +76 -0
  11. cuda_code/ArrayManip_1.cu +70 -0
  12. cuda_code/BE_L1D_HIT.cu +179 -0
  13. cuda_code/BE_L1D_MISS_L2D_HIT_13.cu +189 -0
  14. cuda_code/BE_L1D_MISS_L2D_HIT_19.cu +171 -0
  15. cuda_code/BE_MEM_SHRD_Acss.cu +174 -0
  16. cuda_code/BE_SP_FP_DIV_1.cu +220 -0
  17. cuda_code/BE_SP_INT_ADD_l32_1.cu +184 -0
  18. cuda_code/BE_SP_INT_MUL_3.cu +225 -0
  19. cuda_code/BK5_1.cu +812 -0
  20. cuda_code/BatchNormalization_8.cu +99 -0
  21. cuda_code/BlockSelectFloat_1.cu +122 -0
  22. cuda_code/BounceBackNVEGPU_6.cu +103 -0
  23. cuda_code/COOtoCSR_2.cu +74 -0
  24. cuda_code/CPU_GPU_time.cu +110 -0
  25. cuda_code/CUAPI_Asyn_PoissonGravitySolver_8.cu +473 -0
  26. cuda_code/CUFLU_Shared_FullStepUpdate_1.cu +188 -0
  27. cuda_code/Col2Im_9.cu +207 -0
  28. cuda_code/CommunicatorGridGPU_1.cu +191 -0
  29. cuda_code/CompareEQKernel.cu +28 -0
  30. cuda_code/CompareGEKernel_3.cu +29 -0
  31. cuda_code/CompareGTKernel.cu +28 -0
  32. cuda_code/CompressKernel_8.cu +2022 -0
  33. cuda_code/ComputeProductKernel.cu +90 -0
  34. cuda_code/ConstraintEllipsoidGPU_1.cu +103 -0
  35. cuda_code/CopySurface.cu +52 -0
  36. cuda_code/Copy_15.cu +225 -0
  37. cuda_code/CudaAllocator_2.cu +343 -0
  38. cuda_code/CudaKernel.cu +12 -0
  39. cuda_code/CudaKernel_11.cu +105 -0
  40. cuda_code/CudnnMaxPool_2.cu +210 -0
  41. cuda_code/CustomPi.cu +42 -0
  42. cuda_code/DReductor.cu +130 -0
  43. cuda_code/DepthmapDenoiseWeightedHuber.cu +942 -0
  44. cuda_code/DepthwiseGEMM_64.cu +38 -0
  45. cuda_code/DeviceMemory_2.cu +650 -0
  46. cuda_code/Distance_14.cu +472 -0
  47. cuda_code/DistributionExponentialKernel.cu +37 -0
  48. cuda_code/DistributionExponentialKernel_1.cu +15 -0
  49. cuda_code/DnDHgels.cu +67 -0
  50. cuda_code/DnSXgels.cu +67 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ push.sh
cuda_code/04matrix_transposed.cu ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <assert.h>
2
+ #include <stdio.h>
3
+ #include "Error.h"
4
+
5
+ #define N 4
6
+
7
+ __global__ void transposedMatrixKernel(int* d_a, int* d_b) {
8
+ int i = threadIdx.x + blockDim.x * blockIdx.x;
9
+ int j = threadIdx.y + blockDim.y * blockIdx.y;
10
+
11
+ d_b[i * N + j] = d_a[j * N + i];
12
+ }
13
+
14
+ void onDevice(int h_a[][N], int h_b[][N]) {
15
+ // declare GPU memory pointers
16
+ int *d_a, *d_b;
17
+
18
+ const int ARRAY_BYTES = N * N * sizeof(int);
19
+
20
+ // allocate memory on the GPU
21
+ HANDLER_ERROR_ERR(cudaMalloc((void**)&d_a, ARRAY_BYTES));
22
+ HANDLER_ERROR_ERR(cudaMalloc((void**)&d_b, ARRAY_BYTES));
23
+
24
+ // copy data from CPU the GPU
25
+ HANDLER_ERROR_ERR(
26
+ cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice));
27
+ HANDLER_ERROR_ERR(
28
+ cudaMemcpy(d_b, h_b, ARRAY_BYTES, cudaMemcpyHostToDevice));
29
+
30
+ // execution configuration
31
+ dim3 GridBlocks(1, 1);
32
+ dim3 ThreadsBlocks(4, 4);
33
+
34
+ // run the kernel
35
+ transposedMatrixKernel<<<GridBlocks, ThreadsBlocks>>>(d_a, d_b);
36
+ HANDLER_ERROR_MSG("kernel panic!!!");
37
+
38
+ // copy data back from the GPU to the CPU
39
+ HANDLER_ERROR_ERR(
40
+ cudaMemcpy(h_b, d_b, ARRAY_BYTES, cudaMemcpyDeviceToHost));
41
+
42
+ // free GPU memory
43
+ HANDLER_ERROR_ERR(cudaFree(d_a));
44
+ HANDLER_ERROR_ERR(cudaFree(d_b));
45
+ }
46
+
47
+ void test(int h_a[][N], int h_b[][N]) {
48
+ // test result
49
+ for (int i = 0; i < N; i++) {
50
+ for (int j = 0; j < N; j++) {
51
+ assert(h_a[j][i] == h_b[i][j]);
52
+ }
53
+ }
54
+
55
+ printf("-: successful execution :-\n");
56
+ }
57
+
58
+ void onHost() {
59
+ int i, j, k = 0;
60
+ int h_a[N][N], h_b[N][N];
61
+
62
+ for (i = 0; i < N; i++) {
63
+ for (j = 0; j < N; j++) {
64
+ h_a[i][j] = k;
65
+ h_b[i][j] = 0;
66
+ k++;
67
+ }
68
+ }
69
+
70
+ // call device configuration
71
+ onDevice(h_a, h_b);
72
+ test(h_a, h_b);
73
+ }
74
+
75
+ int main() {
76
+ onHost();
77
+ }
cuda_code/11-convolution.cu ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ extern "C" {
2
+ //#define Mask_width 5
3
+ //#define Mask_radius Mask_width/2
4
+ #define O_TILE_WIDTH 12
5
+ #define BLOCK_WIDTH (O_TILE_WIDTH+4)
6
+ #define clamp(x, start, end) min(max(x, start), end)
7
+ __global__ void convolution_2D_kernel(float*P,float*N,int height,int width,int channels,const float* __restrict__ M,int Mask_width){
8
+ int Mask_radius = Mask_width/2;
9
+ __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH*3];
10
+ int tx=threadIdx.x;
11
+ int ty=threadIdx.y;
12
+ int row_o=blockIdx.y*O_TILE_WIDTH+ty;
13
+ int col_o=blockIdx.x*O_TILE_WIDTH+tx;
14
+
15
+ int row_i=row_o-2;
16
+ int col_i=col_o-2;
17
+
18
+ int i=0;
19
+ int j=0;
20
+ int k=0;
21
+ if((row_i>=0)&&(row_i<height)&&(col_i>=0)&&(col_i<width)){
22
+ for(k=0;k<channels;++k){
23
+ Ns[ty][tx*channels+k]=P[(row_i*width+col_i)*channels+k];
24
+ }
25
+ }else{
26
+ for(k=0;k<channels;++k){
27
+ Ns[ty][tx*channels+k]=0.0f;
28
+ }
29
+ }
30
+
31
+ __syncthreads();
32
+ float output=0.0f;
33
+ if(ty<O_TILE_WIDTH&&tx<O_TILE_WIDTH){
34
+ for(k=0;k<channels;++k){
35
+ output=0.0f;
36
+ for(i=0;i<Mask_width;++i){
37
+ for(j=0;j<Mask_width;++j){
38
+ output+=M[i*Mask_width+j]*Ns[i+ty][(j+tx)*channels+k];
39
+ }
40
+ }
41
+ if(row_o<height&&col_o<width){
42
+ N[(row_o*width+col_o)*channels+k]=output;
43
+ }
44
+ }
45
+ }
46
+ }
47
+
48
+
49
+
50
+ }
cuda_code/1d1.cu ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ #include <sys/time.h>
4
+ #include <cuda_runtime.h>
5
+ #define N 1024 * 1024 * 128
6
+ #define KN 9
7
+ #define THREADSPERBLOCK 1024
8
+ #define BLOCKSPERGRID 1
9
+
10
+ float data[N];
11
+ float kernel[KN];
12
+ float output[N-KN+1];
13
+ float output_from_device[N-KN+1];
14
+
15
+ __global__ void conv( float *data_cuda, float *kernel, float *output ){
16
+ int tid = threadIdx.x;
17
+ while (tid < N - KN + 1) {
18
+ for(int i = 0; i < KN; i++) {
19
+ output[tid] += data_cuda[tid + i] * kernel[i];
20
+ }
21
+ tid += blockDim.x;
22
+ }
23
+ }
24
+
25
+ int main(){
26
+ int cpu = true;
27
+ int pass = 1;
28
+ cudaError_t cuError = cudaSuccess ;
29
+
30
+ double elapsedTimeCPU;
31
+ struct timespec t_start, t_end;
32
+
33
+ float elapsedTime;
34
+ cudaEvent_t start, stop;
35
+ cudaEventCreate(&start);
36
+ cudaEventCreate(&stop);
37
+
38
+ // generate dummy data
39
+ srand(time(NULL));
40
+ for (int i = 0; i < KN; i++) {
41
+ kernel[i] = rand() / (float)RAND_MAX;
42
+ }
43
+
44
+ srand(time(NULL));
45
+ for (int i = 0; i < N; i++) {
46
+ data[i] = rand() / (float)RAND_MAX;
47
+ }
48
+
49
+ // CPU
50
+ if (cpu) {
51
+ clock_gettime( CLOCK_REALTIME, &t_start);
52
+ for (int i = 0; i < N-KN+1; i++) {
53
+ output[i] = 0;
54
+ for (int j = 0; j < KN; j++) {
55
+ output[i] += kernel[j] * data[i+j];
56
+ }
57
+ }
58
+ clock_gettime( CLOCK_REALTIME, &t_end);
59
+ elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0;
60
+ elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0;
61
+ printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU);
62
+ }
63
+
64
+ // GPU
65
+ float *d_kernel, *d_data, *d_output;
66
+ if (cudaMalloc( (void**)&d_kernel, KN * sizeof(float) ) != cudaSuccess) return 1;
67
+ if (cudaMalloc( (void**)&d_data, N * sizeof(float) ) != cudaSuccess) return 1;
68
+ if (cudaMalloc( (void**)&d_output, (N-KN+1) * sizeof(float) ) != cudaSuccess) return 1;
69
+ if (cudaMemcpy( d_kernel, kernel, KN * sizeof(float), cudaMemcpyHostToDevice ) != cudaSuccess) return 1;
70
+ if (cudaMemcpy( d_data, data, N * sizeof(float), cudaMemcpyHostToDevice ) != cudaSuccess) return 1;
71
+
72
+ cudaEventRecord(start, 0);
73
+ conv<<<BLOCKSPERGRID, THREADSPERBLOCK>>>(d_data, d_kernel, d_output);
74
+ cudaEventRecord(stop, 0);
75
+ cudaEventSynchronize(stop);
76
+ cudaEventElapsedTime(&elapsedTime, start, stop);
77
+ printf("GPU time: %13f msec\n", elapsedTime);
78
+
79
+ cudaMemcpy( output_from_device, d_output, (N-KN+1) * sizeof(float), cudaMemcpyDeviceToHost );
80
+ cudaEventDestroy(start);
81
+ cudaEventDestroy(stop);
82
+
83
+ if (cudaGetLastError() != cudaSuccess)
84
+ {
85
+ printf ("Failed in kernel launch and reason is %s\n", cudaGetErrorString(cuError)) ;
86
+ return 1 ;
87
+ }
88
+
89
+ //check correctness
90
+ if (cpu) {
91
+ for (int i = 0; i < N-KN+1; i++){
92
+ if((output_from_device[i] - output[i]) > 0.001){
93
+ printf("CPU:%lf GPU:%lf\n",output[i], output_from_device[i] );
94
+ pass = 0;
95
+ break;
96
+ }
97
+ }
98
+ if(pass == 1) {
99
+ printf("Test pass!\n");
100
+ printf("GPU / CPU = %f\n", elapsedTimeCPU / elapsedTime);
101
+ }
102
+ else
103
+ printf("Test fail!\n");
104
+ }
105
+ }
cuda_code/28-pyrup-pyrdown.cu ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ extern "C" {
2
+
3
+ __global__ void pyrup_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
4
+ {
5
+ const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
6
+ const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
7
+ const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
8
+ const int color_tid1= (xIndex/2)* colorWidthStep + (3 * (yIndex/2));
9
+ if(yIndex >=width || xIndex>=height)
10
+ {
11
+ return;
12
+ }
13
+
14
+ if(yIndex%2==0 &&xIndex%2==0)
15
+ {
16
+ d_out[color_tid]=d_in[color_tid1];
17
+ d_out[color_tid+1]=d_in[color_tid1+1];
18
+ d_out[color_tid+2]=d_in[color_tid1+2];
19
+ }
20
+ else
21
+ {
22
+ d_out[color_tid]=0;
23
+ d_out[color_tid+1]=0;//d_in[color_tid1+1];
24
+ d_out[color_tid+2]=0;//d_in[color_tid1+2];
25
+
26
+ }
27
+ }
28
+
29
+ __global__ void pyrup_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
30
+ {
31
+ const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
32
+ const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
33
+ const int color_tid = (xIndex)* aabhas + yIndex;
34
+ const int color_tid1= (xIndex/2)* colorWidthStep + yIndex/2;
35
+ if(yIndex >=width || xIndex>=height)
36
+ {
37
+ return;
38
+ }
39
+
40
+ if(yIndex%2==0 &&xIndex%2==0)
41
+ {
42
+ d_out[color_tid]=d_in[color_tid1];
43
+ //d_out[color_tid+1]=d_in[color_tid1+1];
44
+ //d_out[color_tid+2]=d_in[color_tid1+2];
45
+ }
46
+ else
47
+ {
48
+ d_out[color_tid]=255;
49
+ //d_out[color_tid+1]=0;//d_in[color_tid1+1];
50
+ //d_out[color_tid+2]=0;//d_in[color_tid1+2];
51
+
52
+ }
53
+ }
54
+
55
+
56
+ __global__ void pyrdown_rgb_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
57
+ {
58
+ const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
59
+ const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
60
+ const int color_tid = (xIndex)* aabhas + (3 * (yIndex));
61
+ const int color_tid1= (2*xIndex)* colorWidthStep + (3 * (2*yIndex));
62
+ if(yIndex >=width || xIndex>=height)
63
+ {
64
+
65
+ return;
66
+ }
67
+
68
+ d_out[color_tid]=d_in[color_tid1];
69
+ d_out[color_tid+1]=d_in[color_tid1+1];
70
+ d_out[color_tid+2]=d_in[color_tid1+2];
71
+ }
72
+
73
+ __global__ void pyrdown_gray_kernel(unsigned char *d_in,unsigned char *d_out,int colorWidthStep,int aabhas,int height,int width)
74
+ {
75
+ const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
76
+ const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
77
+ const int color_tid = (xIndex)* aabhas + yIndex;
78
+ const int color_tid1= (2*xIndex)* colorWidthStep + 2*yIndex;
79
+ if(yIndex >=width || xIndex>=height)
80
+ {
81
+
82
+ return;
83
+ }
84
+
85
+ d_out[color_tid]=d_in[color_tid1];
86
+ //d_out[color_tid+1]=d_in[color_tid1+1];
87
+ //d_out[color_tid+2]=d_in[color_tid1+2];
88
+ }
89
+
90
+ }
cuda_code/2d_xyWENOADV_p.cu ADDED
@@ -0,0 +1,169 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Andrew Gloster
2
+ // November 2018
3
+ // Example of advection in 2D with upwinding WENO
4
+
5
+ // Copyright 2018 Andrew Gloster
6
+
7
+ // Licensed under the Apache License, Version 2.0 (the "License");
8
+ // you may not use this file except in compliance with the License.
9
+ // You may obtain a copy of the License at
10
+
11
+ // http://www.apache.org/licenses/LICENSE-2.0
12
+
13
+ // Unless required by applicable law or agreed to in writing, software
14
+ // distributed under the License is distributed on an "AS IS" BASIS,
15
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16
+ // See the License for the specific language governing permissions and
17
+ // limitations under the License.
18
+
19
+
20
+ // ---------------------------------------------------------------------
21
+ // Standard Libraries and Headers
22
+ // ---------------------------------------------------------------------
23
+
24
+ #include <cmath>
25
+ #include <iostream>
26
+ #include <cstdio>
27
+ #include "cuda.h"
28
+
29
+ // ---------------------------------------------------------------------
30
+ // cuSten - Note the file position is relative
31
+ // ---------------------------------------------------------------------
32
+
33
+ #include "../../cuSten/cuSten.h"
34
+
35
+ // ---------------------------------------------------------------------
36
+ // MACROS
37
+ // ---------------------------------------------------------------------
38
+
39
+ #define BLOCK_X 32
40
+ #define BLOCK_Y 32
41
+
42
+ // ---------------------------------------------------------------------
43
+ // Main Program
44
+ // ---------------------------------------------------------------------
45
+
46
+ int main()
47
+ {
48
+ // Set the device number
49
+ int deviceNum = 0;
50
+
51
+ // Declare Domain Size
52
+ int nx = 8192;
53
+ int ny = 8192;
54
+
55
+ double lx = 2 * M_PI;
56
+ double ly = 2 * M_PI;
57
+
58
+ // Domain spacings
59
+ double dx = lx / (double) (nx);
60
+ double dy = ly / (double) (ny);
61
+
62
+ // Set the number of tiles per device
63
+ int numTiles = 4;
64
+
65
+ // Initial Conditions
66
+ double* dataInput;
67
+ double* dataOutput;
68
+ double* u;
69
+ double* v;
70
+
71
+ // -----------------------------
72
+ // Allocate the memory
73
+ // -----------------------------
74
+
75
+ cudaMallocManaged(&dataInput, nx * ny * sizeof(double));
76
+ cudaMallocManaged(&dataOutput, nx * ny * sizeof(double));
77
+
78
+ cudaMallocManaged(&u, nx * ny * sizeof(double));
79
+ cudaMallocManaged(&v, nx * ny * sizeof(double));
80
+
81
+ // -----------------------------
82
+ // Set the initial conditions
83
+ // -----------------------------
84
+
85
+ // Indexing
86
+ int temp;
87
+ int index;
88
+
89
+ for (int j = 0; j < ny; j++)
90
+ {
91
+ temp = j * nx;
92
+
93
+ for (int i = 0; i < nx; i++)
94
+ {
95
+ index = temp + i;
96
+
97
+ dataInput[index] = cos(i * dx) * sin(j * dy);
98
+ dataOutput[index] = 0.0;
99
+
100
+ u[index] = sin(j * dy);
101
+ v[index] = - sin(i * dx);
102
+ }
103
+ }
104
+
105
+ // Ensure all the above is completed
106
+ cudaDeviceSynchronize();
107
+
108
+ // -----------------------------
109
+ // Set up device
110
+ // -----------------------------
111
+
112
+ // Set up the compute device structs
113
+ cuSten_t xyWENOCompute;
114
+
115
+ // Initialise the instance of the stencil
116
+ cuStenCreate2DXYWENOADVp(
117
+ &xyWENOCompute,
118
+
119
+ deviceNum,
120
+
121
+ numTiles,
122
+
123
+ nx,
124
+ ny,
125
+
126
+ BLOCK_X,
127
+ BLOCK_Y,
128
+
129
+ dx,
130
+ dy,
131
+
132
+ u,
133
+ v,
134
+
135
+ dataOutput,
136
+
137
+ dataInput
138
+ );
139
+
140
+ // Synchronise to ensure everything initialised
141
+ cudaDeviceSynchronize();
142
+
143
+ // -----------------------------
144
+ // Compute
145
+ // -----------------------------
146
+
147
+ // Run the computation
148
+ cuStenCompute2DXYWENOADVp(&xyWENOCompute, HOST);
149
+
150
+ // // Synchronise at the end to ensure everything is complete
151
+ cudaDeviceSynchronize();
152
+
153
+ // -----------------------------
154
+ // Destroy struct and free memory
155
+ // -----------------------------
156
+
157
+ // Destroy struct
158
+ cuStenDestroy2DXYWENOADVp(&xyWENOCompute);
159
+
160
+ // Free memory at the end
161
+ cudaFree(dataInput);
162
+ cudaFree(dataOutput);
163
+
164
+ cudaFree(u);
165
+ cudaFree(v);
166
+
167
+ // Return 0 when the program completes
168
+ return 0;
169
+ }
cuda_code/2mm.cu ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite.
3
+ *
4
+ *
5
+ * Contact: Scott Grauer-Gray <[email protected]>
6
+ * Louis-Noel Pouchet <[email protected]>
7
+ * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
8
+ */
9
+
10
+ #include <stdio.h>
11
+ #include <stdlib.h>
12
+ #include <math.h>
13
+ #include <assert.h>
14
+ #include <unistd.h>
15
+ #include <sys/time.h>
16
+ #include <cuda.h>
17
+
18
+ #include "../../../common/polybenchUtilFuncts.h"
19
+
20
+ //define the error threshold for the results "not matching"
21
+ #define PERCENT_DIFF_ERROR_THRESHOLD 0.05
22
+
23
+ #define GPU_DEVICE 0
24
+
25
+ /* Problem size. */
26
+ # define NI 2048 * 4
27
+ # define NJ 2048 * 4
28
+ # define NK 2048 * 4
29
+ # define NL 2048 * 4
30
+
31
+ /* Thread block dimensions */
32
+ #define DIM_THREAD_BLOCK_X 32
33
+ #define DIM_THREAD_BLOCK_Y 8
34
+
35
+ /* Can switch DATA_TYPE between float and double */
36
+ typedef float DATA_TYPE;
37
+
38
+
39
+
40
+ void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu)
41
+ {
42
+ int i, j;
43
+
44
+ for (i = 0; i < NI; i++)
45
+ {
46
+ for (j = 0; j < NK; j++)
47
+ {
48
+ A[i*NI + j] = ((DATA_TYPE) i*j) / NI;
49
+ A_gpu[i*NI + j] = ((DATA_TYPE) i*j) / NI;
50
+ }
51
+ }
52
+
53
+ for (i = 0; i < NK; i++)
54
+ {
55
+ for (j = 0; j < NJ; j++)
56
+ {
57
+ B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
58
+ B_gpu[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ;
59
+ }
60
+ }
61
+
62
+ for (i = 0; i < NL; i++)
63
+ {
64
+ for (j = 0; j < NJ; j++)
65
+ {
66
+ C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
67
+ C_gpu[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL;
68
+ }
69
+ }
70
+
71
+ for (i = 0; i < NI; i++)
72
+ {
73
+ for (j = 0; j < NL; j++)
74
+ {
75
+ D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
76
+ D_gpu[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK;
77
+ }
78
+ }
79
+ }
80
+
81
+
82
+ void compareResults(DATA_TYPE *E, DATA_TYPE *E_outputFromGpu)
83
+ {
84
+ int i,j,fail;
85
+ fail = 0;
86
+
87
+ for (i=0; i < NL; i++)
88
+ {
89
+ for (j=0; j < NI; j++)
90
+ {
91
+ if (percentDiff(E[i*NI + j], E_outputFromGpu[i*NI + j]) > PERCENT_DIFF_ERROR_THRESHOLD)
92
+ {
93
+ fail++;
94
+ }
95
+ }
96
+ }
97
+
98
+ // print results
99
+ printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
100
+ }
101
+
102
+
103
+ void GPU_argv_init()
104
+ {
105
+ cudaDeviceProp deviceProp;
106
+ cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
107
+ printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
108
+ cudaSetDevice( GPU_DEVICE );
109
+ }
110
+
111
+
112
+ __global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C)
113
+ {
114
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
115
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
116
+
117
+ if ((i < NI) && (j < NJ))
118
+ {
119
+ int k;
120
+ for (k = 0; k < NK; k++)
121
+ {
122
+ C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j];
123
+ }
124
+ }
125
+ }
126
+
127
+
128
+ __global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E)
129
+ {
130
+ int j = blockIdx.x * blockDim.x + threadIdx.x;
131
+ int i = blockIdx.y * blockDim.y + threadIdx.y;
132
+
133
+ if ((i < NI) && (j < NL))
134
+ {
135
+ int k;
136
+ for (k = 0; k < NJ; k++)
137
+ {
138
+ E[i * NL + j] += C[i * NJ + k] * D[k * NL + j];
139
+ }
140
+ }
141
+ }
142
+
143
+
144
+ void mm2_cpu(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E)
145
+ {
146
+ int i, j, k;
147
+
148
+ for (i = 0; i < NI; i++)
149
+ {
150
+ for (j = 0; j < NJ; j++)
151
+ {
152
+ C[i*NJ + j] = 0.0;
153
+ for (k = 0; k < NK; ++k)
154
+ {
155
+ C[i*NJ + j] += A[i*NK + k] * B[k*NJ + j];
156
+ }
157
+ }
158
+ }
159
+
160
+ for (i = 0; i < NI; i++)
161
+ {
162
+ for (j = 0; j < NL; j++)
163
+ {
164
+ E[i*NL + j] = 0.0;
165
+ for (k = 0; k < NJ; ++k)
166
+ {
167
+ E[i*NL + j] += C[i*NJ + k] * D[k*NL + j];
168
+ }
169
+ }
170
+ }
171
+ }
172
+
173
+
174
+ void mm2Cuda(DATA_TYPE* A_gpu, DATA_TYPE* B_gpu, DATA_TYPE* C_gpu, DATA_TYPE* D_gpu, DATA_TYPE* E_gpu)
175
+ {
176
+ double t_start, t_end;
177
+ dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
178
+ dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
179
+ dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) );
180
+ t_start = rtclock();
181
+ mm2_kernel1<<<grid1,block>>>(A_gpu, B_gpu, C_gpu);
182
+ cudaDeviceSynchronize();
183
+ mm2_kernel2<<<grid2,block>>>(C_gpu, D_gpu, E_gpu);
184
+ cudaDeviceSynchronize();
185
+ t_end = rtclock();
186
+ fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
187
+ }
188
+
189
+
190
+ int main(int argc, char** argv)
191
+ {
192
+ double t_start, t_end;
193
+
194
+ DATA_TYPE* C;
195
+ DATA_TYPE* A;
196
+ DATA_TYPE* B;
197
+ DATA_TYPE* D;
198
+ DATA_TYPE* E;
199
+
200
+ DATA_TYPE *A_gpu;
201
+ DATA_TYPE *B_gpu;
202
+ DATA_TYPE *C_gpu;
203
+ DATA_TYPE *D_gpu;
204
+ DATA_TYPE *E_gpu;
205
+
206
+ C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE));
207
+ A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE));
208
+ B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE));
209
+ D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE));
210
+ E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE));
211
+
212
+
213
+ cudaMallocManaged(&A_gpu, sizeof(DATA_TYPE) * NI * NK);
214
+ cudaMallocManaged(&B_gpu, sizeof(DATA_TYPE) * NK * NJ);
215
+ cudaMallocManaged(&C_gpu, sizeof(DATA_TYPE) * NI * NJ);
216
+ cudaMallocManaged(&D_gpu, sizeof(DATA_TYPE) * NJ * NL);
217
+ cudaMallocManaged(&E_gpu, sizeof(DATA_TYPE) * NI * NL);
218
+
219
+
220
+ init_array(A, B, C, D, A_gpu, B_gpu, C_gpu, D_gpu);
221
+ GPU_argv_init();
222
+
223
+ mm2Cuda(A_gpu, B_gpu, C_gpu, D_gpu, E_gpu);
224
+
225
+ t_start = rtclock();
226
+ mm2_cpu(A, B, C, D, E);
227
+ t_end = rtclock();
228
+ fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
229
+
230
+ compareResults(E, E_gpu);
231
+
232
+ free(C);
233
+ free(A);
234
+ free(B);
235
+ free(D);
236
+ free(E);
237
+ cudaFree(A_gpu);
238
+ cudaFree(B_gpu);
239
+ cudaFree(C_gpu);
240
+ cudaFree(D_gpu);
241
+ cudaFree(E_gpu);
242
+ return 0;
243
+ }
244
+
cuda_code/ACNetHDNL3.cu ADDED
@@ -0,0 +1,1653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "CudaHelper.cuh"
2
+ #include "CudaInterface.hpp"
3
+
4
+ __device__ __constant__ static const float kernelsL1[9 * 8] =
5
+ {
6
+ -0.0461f, 0.1274f, 0.2976f,
7
+ -0.0393f, -0.1251f, 0.2527f,
8
+ 0.0791f, 0.0600f, -0.0303f,
9
+ -0.0520f, -0.5039f, -0.3305f,
10
+ -0.0115f, 0.0456f, 0.4370f,
11
+ 0.0601f, 0.0780f, 0.3106f,
12
+ -0.0017f, -0.0018f, -0.0017f,
13
+ -0.0017f, -0.0018f, -0.0018f,
14
+ -0.0017f, -0.0017f, -0.0017f,
15
+ 0.2666f, 0.1687f, 0.2303f,
16
+ -0.1901f, 0.3825f, 0.3024f,
17
+ 0.1811f, 0.0581f, 0.2080f,
18
+ -0.1246f, 0.0155f, -0.4075f,
19
+ 0.1156f, 0.5929f, 0.1449f,
20
+ -0.1080f, -0.0171f, -0.0516f,
21
+ -0.0817f, 0.2247f, 0.0472f,
22
+ 0.0394f, 0.1085f, 0.1435f,
23
+ -0.0480f, -0.0135f, -0.0606f,
24
+ -0.0083f, 0.2045f, 0.1056f,
25
+ -0.2239f, 0.2823f, -0.1926f,
26
+ 0.2581f, 0.1362f, -0.1914f,
27
+ -0.0833f, 0.0702f, 0.0234f,
28
+ 0.3616f, 0.3789f, -0.1840f,
29
+ 0.0128f, 0.1347f, -0.0187f
30
+ };
31
+ __device__ __constant__ static const float biasL1[8] =
32
+ {
33
+ -0.1329f, -0.0431f, -0.0031f, -0.0129f, 0.2294f, -0.2595f, -0.2370f, -0.0499f
34
+ };
35
+ __device__ __constant__ static const float kernelsL[8][9 * 8 * 8] =
36
+ {
37
+ {
38
+ 1.4090e-01f, -1.8985e-02f, -6.8589e-02f,
39
+ 6.6491e-02f, 1.4360e-02f, 8.5223e-02f,
40
+ 1.8782e-01f, 9.8042e-02f, -3.4558e-02f,
41
+ 2.5606e-01f, 2.2027e-01f, 2.7603e-01f,
42
+ 1.9424e-01f, 3.4537e-02f, 9.5975e-02f,
43
+ 1.1223e-02f, -4.3377e-01f, -1.4760e-01f,
44
+ -3.4293e-40f, -5.5421e-40f, -4.4763e-41f,
45
+ -6.3322e-40f, -3.1495e-40f, -7.8264e-41f,
46
+ -1.5375e-40f, -3.3656e-40f, 5.2441e-40f,
47
+ 1.2413e-01f, 1.5682e-01f, 1.1465e-01f,
48
+ 1.6683e-02f, 7.8382e-02f, 1.0110e-01f,
49
+ 1.4902e-01f, 1.3608e-01f, 1.1674e-01f,
50
+ -6.5160e-02f, 7.7748e-02f, 2.1773e-02f,
51
+ 2.0652e-02f, 2.7245e-01f, 1.0297e-01f,
52
+ -2.0953e-02f, 6.1685e-02f, 4.4128e-02f,
53
+ 6.1538e-02f, -1.9746e-02f, -1.2785e-02f,
54
+ 2.5931e-02f, 1.2740e-01f, 9.0033e-02f,
55
+ 8.6448e-02f, 2.0684e-01f, 9.8063e-02f,
56
+ -7.8384e-03f, 6.3277e-02f, 7.6751e-03f,
57
+ 3.5956e-02f, 1.0555e-01f, 4.2728e-02f,
58
+ 7.1578e-02f, 1.3253e-01f, 1.1171e-01f,
59
+ -2.7538e-02f, 1.5836e-01f, 1.0014e-01f,
60
+ -4.9113e-02f, 1.6911e-01f, 2.7329e-01f,
61
+ 7.9170e-03f, 9.5440e-02f, 1.3922e-01f,
62
+ 8.0151e-02f, 4.3438e-02f, 5.5314e-02f,
63
+ 3.4896e-02f, 1.6816e-01f, -4.5783e-03f,
64
+ -1.4579e-03f, 2.0493e-01f, 2.6238e-02f,
65
+ 2.6499e-02f, 3.9490e-01f, -1.1582e-02f,
66
+ 3.5790e-01f, 1.4317e-01f, -2.1775e-01f,
67
+ 4.1794e-03f, -3.2513e-01f, -1.6729e-01f,
68
+ 3.4040e-41f, -6.2960e-42f, -1.0067e-40f,
69
+ 5.5978e-41f, -1.2353e-40f, -1.1347e-40f,
70
+ 5.4572e-40f, -6.4384e-40f, -4.1234e-40f,
71
+ -9.3690e-02f, 1.7765e-01f, 1.1275e-01f,
72
+ 9.1159e-03f, 1.7375e-01f, 1.1427e-01f,
73
+ -7.8385e-02f, 1.5658e-01f, -3.8399e-02f,
74
+ -1.0756e-01f, 5.9943e-02f, -6.7273e-02f,
75
+ -1.1117e-01f, 1.5267e-01f, 1.1563e-01f,
76
+ -1.2964e-01f, -3.8604e-02f, -2.4532e-02f,
77
+ 1.6324e-02f, 1.3112e-01f, 6.1679e-03f,
78
+ -7.7703e-03f, 2.6311e-01f, 8.9427e-02f,
79
+ -2.8948e-02f, 1.9341e-01f, 4.4339e-02f,
80
+ 6.4559e-03f, -6.8885e-02f, 1.1481e-01f,
81
+ -1.0665e-01f, 3.8613e-02f, 7.0410e-02f,
82
+ -6.1680e-02f, -1.7374e-02f, 9.5475e-03f,
83
+ -4.0081e-02f, -3.1549e-02f, 2.8311e-01f,
84
+ -1.2178e-01f, -1.3848e-01f, 1.7416e-01f,
85
+ -8.1756e-02f, -1.7718e-01f, 7.9533e-02f,
86
+ -3.1299e-03f, -3.2305e-03f, -3.2094e-03f,
87
+ -3.1548e-03f, -3.2553e-03f, -3.2453e-03f,
88
+ -3.1459e-03f, -3.2278e-03f, -3.2076e-03f,
89
+ -3.6554e-05f, -3.6715e-05f, -3.1284e-05f,
90
+ -1.4927e-05f, -1.4357e-05f, -1.2185e-05f,
91
+ -1.5771e-09f, -1.1439e-09f, -6.4952e-10f,
92
+ 3.7723e-40f, 4.9166e-40f, -2.1946e-40f,
93
+ -4.7599e-40f, -4.3356e-40f, -8.3928e-41f,
94
+ 2.6127e-40f, 4.8634e-40f, 2.7720e-40f,
95
+ -5.4972e-03f, -5.6409e-03f, -5.6919e-03f,
96
+ -5.5818e-03f, -5.7079e-03f, -5.7542e-03f,
97
+ -5.6338e-03f, -5.7437e-03f, -5.7600e-03f,
98
+ -3.7940e-03f, -3.8853e-03f, -3.8693e-03f,
99
+ -3.8995e-03f, -3.9616e-03f, -3.8945e-03f,
100
+ -3.8438e-03f, -3.9156e-03f, -3.8269e-03f,
101
+ -7.2342e-05f, -7.8682e-05f, -4.7701e-05f,
102
+ -1.1126e-04f, -1.1918e-04f, -7.8931e-05f,
103
+ -1.1644e-04f, -1.2418e-04f, -8.2350e-05f,
104
+ -2.3881e-04f, -3.7971e-04f, -3.9448e-04f,
105
+ -2.4112e-04f, -3.8395e-04f, -4.0189e-04f,
106
+ -2.3451e-04f, -3.7525e-04f, -3.9222e-04f,
107
+ -3.9853e-03f, -4.0748e-03f, -4.1134e-03f,
108
+ -4.0685e-03f, -4.1456e-03f, -4.1548e-03f,
109
+ -4.0547e-03f, -4.1388e-03f, -4.1357e-03f,
110
+ 5.3008e-02f, 2.2252e-02f, -7.1158e-02f,
111
+ -6.6411e-02f, -3.0015e-02f, -2.2526e-02f,
112
+ 1.2259e-01f, -6.2488e-02f, 5.6190e-02f,
113
+ 1.5981e-02f, -7.6832e-02f, 1.7908e-02f,
114
+ 2.7618e-01f, 5.4054e-02f, 8.7282e-02f,
115
+ 1.5212e-02f, -1.1097e-01f, -2.2265e-02f,
116
+ -6.8532e-41f, -6.0539e-40f, 4.6269e-40f,
117
+ -2.9221e-40f, -3.8468e-40f, -4.6656e-40f,
118
+ 6.4572e-40f, -6.1625e-40f, 6.4545e-40f,
119
+ 3.5920e-02f, 9.0955e-02f, -1.7626e-02f,
120
+ 4.7826e-02f, 1.8832e-01f, -4.4043e-02f,
121
+ -3.8405e-02f, 5.9176e-02f, 6.8182e-02f,
122
+ 3.7657e-03f, 2.6441e-02f, -2.5585e-01f,
123
+ 1.0969e-01f, 2.3914e-01f, 3.5120e-02f,
124
+ -1.6252e-01f, 3.4371e-02f, -2.7501e-01f,
125
+ 4.9289e-02f, 2.2088e-02f, -1.4588e-02f,
126
+ 1.6384e-01f, -8.1421e-03f, -6.9613e-02f,
127
+ 1.0820e-01f, 1.1137e-01f, 7.2648e-03f,
128
+ 1.5243e-01f, 1.3659e-01f, 2.7553e-02f,
129
+ 1.3966e-01f, 1.1019e-01f, 1.9817e-02f,
130
+ 1.1420e-01f, -5.1386e-03f, 6.8617e-03f,
131
+ -1.3264e-02f, 2.1508e-01f, 4.8430e-02f,
132
+ 5.1149e-02f, 2.9165e-01f, 2.8077e-01f,
133
+ 2.9288e-03f, 9.0611e-02f, 8.1538e-02f,
134
+ -1.1812e-01f, 1.5603e-02f, 1.1571e-01f,
135
+ -3.4958e-02f, -1.6688e-03f, -4.6619e-02f,
136
+ -1.0417e-02f, -3.1802e-02f, 1.8357e-02f,
137
+ 1.1064e-01f, 1.8397e-01f, 4.8449e-02f,
138
+ -8.3336e-03f, 1.6029e-01f, 3.9490e-02f,
139
+ -4.0959e-01f, -2.6134e-01f, 2.0766e-02f,
140
+ 6.6073e-41f, -6.7490e-40f, -5.1131e-41f,
141
+ -4.3320e-41f, -3.7194e-40f, 2.0674e-40f,
142
+ -5.2359e-40f, -3.4006e-40f, -4.9257e-40f,
143
+ -4.7260e-02f, 2.8518e-03f, -2.7764e-01f,
144
+ 6.9182e-03f, 1.3938e-01f, -1.3162e-01f,
145
+ -6.0901e-03f, 1.0339e-01f, 6.0419e-02f,
146
+ -1.4449e-01f, -3.2043e-02f, -9.1466e-02f,
147
+ -1.4022e-02f, 3.1703e-01f, 5.8166e-02f,
148
+ -1.5243e-02f, 1.4521e-01f, 2.0790e-04f,
149
+ -1.0255e-01f, -7.8766e-02f, -1.2395e-01f,
150
+ 7.9894e-03f, 3.7079e-03f, -3.2134e-02f,
151
+ 1.1663e-01f, 1.4808e-01f, 2.0431e-01f,
152
+ 7.4026e-02f, 6.9632e-02f, 1.7156e-01f,
153
+ -3.0385e-02f, 2.3218e-01f, 7.3855e-02f,
154
+ -8.8530e-02f, -5.9224e-02f, 2.3431e-02f,
155
+ 1.4596e-02f, 3.2442e-02f, -1.1308e-01f,
156
+ -6.3734e-02f, 2.5270e-01f, 7.8081e-02f,
157
+ 1.0468e-02f, 1.5473e-01f, 3.8676e-02f,
158
+ -1.0842e-01f, 8.6778e-03f, 1.4985e-01f,
159
+ 8.1757e-03f, -8.2109e-02f, 8.5471e-02f,
160
+ -2.1437e-01f, -6.1173e-02f, 4.8163e-02f,
161
+ 2.8965e-01f, 1.9748e-01f, 4.2651e-02f,
162
+ 1.8196e-01f, 3.3932e-01f, 3.9594e-01f,
163
+ 3.9657e-01f, 4.2167e-01f, 2.9290e-01f,
164
+ 7.4011e-41f, 6.5220e-40f, -5.9885e-40f,
165
+ 7.4011e-41f, 6.2047e-40f, -7.1533e-40f,
166
+ 4.1950e-40f, -1.1886e-40f, -5.9922e-40f,
167
+ 1.9662e-01f, 2.1402e-01f, 3.1041e-02f,
168
+ -1.1079e-01f, 1.3361e-01f, -2.1608e-01f,
169
+ -1.7962e-01f, -8.0576e-02f, -3.1277e-01f,
170
+ 1.0620e-02f, 2.4024e-01f, 1.0657e-01f,
171
+ -7.9906e-05f, 2.8760e-01f, 4.1231e-02f,
172
+ -1.3261e-02f, -1.0868e-01f, -1.1267e-01f,
173
+ -1.0659e-02f, -2.6051e-02f, -4.5389e-02f,
174
+ 5.8261e-02f, 4.0288e-02f, 6.7050e-02f,
175
+ -2.6462e-01f, -1.7846e-01f, -1.0002e-01f,
176
+ -6.2904e-02f, 1.5275e-01f, 4.4282e-03f,
177
+ 1.4446e-01f, 1.1814e-01f, -8.0349e-02f,
178
+ 2.0331e-02f, 3.3014e-02f, 1.2710e-01f,
179
+ 1.6084e-01f, 3.8819e-01f, 1.0854e-01f,
180
+ -6.8126e-03f, 3.5673e-01f, 1.8938e-01f,
181
+ -1.1660e-01f, -5.7694e-02f, -2.9194e-01f,
182
+ 1.2775e-02f, -3.2769e-02f, 1.7228e-02f,
183
+ 1.8324e-01f, 1.1983e-01f, -1.6944e-02f,
184
+ 1.0593e-01f, 1.3451e-01f, 5.2536e-02f,
185
+ 1.9147e-01f, 1.3875e-01f, 1.0298e-01f,
186
+ -2.0871e-01f, -1.7197e-01f, 1.1342e-01f,
187
+ -1.7581e-01f, 4.0972e-02f, 2.9796e-01f,
188
+ 3.2588e-40f, -4.3663e-40f, -2.6518e-40f,
189
+ 3.2588e-40f, -4.3663e-40f, -2.6518e-40f,
190
+ 4.1600e-40f, -4.4350e-40f, -4.8744e-41f,
191
+ 3.7289e-02f, 8.1769e-03f, 1.7059e-02f,
192
+ 3.7735e-02f, 6.6571e-02f, -6.6137e-02f,
193
+ -5.8890e-02f, -7.7019e-03f, -6.2128e-02f,
194
+ -4.0751e-02f, 1.1710e-01f, -1.1586e-01f,
195
+ -1.2999e-01f, -1.6384e-02f, -2.1858e-01f,
196
+ -2.8028e-01f, -6.0443e-02f, -1.1880e-01f,
197
+ 1.8152e-01f, 1.5364e-01f, 1.1781e-01f,
198
+ 2.9010e-01f, 2.4612e-01f, 1.3170e-01f,
199
+ 1.9022e-01f, 1.8117e-01f, 1.6483e-01f,
200
+ 9.3342e-02f, 2.6607e-01f, 1.4679e-01f,
201
+ 1.6729e-01f, 2.5374e-01f, 1.1954e-01f,
202
+ 6.3258e-02f, 1.0557e-01f, 6.7221e-02f,
203
+ -5.2017e-02f, 1.9628e-01f, 1.7243e-01f,
204
+ -3.2667e-02f, 1.5756e-01f, 1.9347e-01f,
205
+ -9.5252e-02f, -3.7525e-02f, -3.4543e-04f,
206
+ -4.9759e-02f, 4.0383e-02f, -2.0231e-02f,
207
+ -1.1776e-01f, 3.4182e-02f, 3.6720e-02f,
208
+ -1.4822e-02f, -4.1658e-02f, -1.3729e-02f,
209
+ -1.9215e-02f, 2.4427e-02f, -9.0638e-02f,
210
+ -1.4438e-01f, -2.1785e-01f, -5.1789e-02f,
211
+ -2.0279e-01f, -3.3918e-01f, -1.6871e-01f,
212
+ 6.1262e-41f, 2.4066e-40f, 6.6851e-40f,
213
+ 5.3430e-40f, -3.2335e-40f, -3.7400e-40f,
214
+ -6.3256e-40f, -4.7491e-40f, 2.2854e-40f,
215
+ -6.8701e-03f, -1.4849e-02f, 8.6332e-02f,
216
+ 1.1686e-01f, 1.8346e-01f, 1.8797e-01f,
217
+ -2.3251e-02f, 7.3973e-02f, 1.0532e-01f,
218
+ -6.1838e-02f, 5.6667e-02f, 8.1584e-02f,
219
+ -3.8900e-02f, 7.0927e-02f, 9.5606e-02f,
220
+ -4.5098e-02f, -1.0829e-01f, -1.2224e-01f,
221
+ 3.5047e-03f, 3.2898e-02f, 3.5622e-02f,
222
+ 1.6170e-02f, 4.3721e-02f, 9.7496e-02f,
223
+ 2.3445e-03f, 6.0417e-02f, 1.3482e-01f,
224
+ 6.0570e-02f, -5.7139e-03f, -1.0883e-03f,
225
+ 2.2701e-02f, -2.9113e-02f, 7.9178e-03f,
226
+ 8.1214e-02f, -4.1408e-02f, 1.3616e-02f,
227
+ -4.7985e-02f, 1.0304e-02f, -3.3236e-02f,
228
+ -1.6334e-02f, -8.1538e-02f, 1.8629e-02f,
229
+ -9.3720e-02f, -1.2920e-01f, -4.0836e-02f
230
+ }
231
+ ,
232
+ {
233
+ 1.0443e-01f, 1.5461e-01f, -1.4743e-01f,
234
+ 1.6716e-01f, 1.0532e-01f, -2.3088e-01f,
235
+ 1.0218e-01f, 1.2393e-01f, -9.6646e-02f,
236
+ 1.7659e-01f, -7.3279e-02f, 1.9627e-02f,
237
+ 1.7721e-01f, -1.4329e-01f, -1.2533e-01f,
238
+ 1.6551e-01f, -3.4616e-01f, 9.5618e-02f,
239
+ 4.5827e-09f, 9.3413e-09f, 1.7015e-08f,
240
+ 1.2245e-08f, 9.9727e-09f, 6.7108e-09f,
241
+ 1.9612e-07f, 3.9479e-08f, 1.1537e-09f,
242
+ 2.2127e-02f, 9.2715e-02f, -1.2150e-01f,
243
+ 7.5652e-02f, 1.1548e-01f, -1.2420e-01f,
244
+ -1.0693e-03f, -7.2839e-02f, -1.9664e-01f,
245
+ 1.4466e-01f, -1.8552e-03f, -1.3575e-01f,
246
+ 2.0699e-01f, 8.0396e-02f, -1.9651e-01f,
247
+ -4.7075e-02f, -5.1259e-02f, -8.2593e-02f,
248
+ -2.2385e-01f, 3.0066e-03f, -2.2659e-02f,
249
+ 6.1827e-02f, 2.5331e-02f, -5.3898e-02f,
250
+ 2.7091e-01f, 1.0991e-01f, -3.3600e-01f,
251
+ -8.9499e-02f, -9.3821e-03f, 2.2675e-02f,
252
+ 1.1213e-01f, 1.3276e-01f, 2.0368e-02f,
253
+ 6.5408e-02f, 4.1598e-02f, -4.7917e-02f,
254
+ 6.0740e-03f, 1.2236e-04f, -1.0659e-01f,
255
+ -1.8072e-02f, -9.1082e-02f, -9.0414e-02f,
256
+ 4.9052e-02f, -1.4298e-01f, -3.9721e-02f,
257
+ 1.1840e-01f, 2.2503e-01f, 2.4587e-02f,
258
+ 9.3023e-02f, 6.9650e-02f, 1.6798e-01f,
259
+ -1.5640e-03f, 1.6300e-02f, 6.3585e-02f,
260
+ 1.4431e-01f, 3.7885e-02f, 1.6692e-02f,
261
+ 1.7345e-01f, 7.2315e-02f, 1.8942e-02f,
262
+ 1.1081e-01f, 8.2973e-02f, -9.7717e-02f,
263
+ -5.2264e-03f, -5.2641e-03f, -5.2727e-03f,
264
+ -5.2809e-03f, -5.3125e-03f, -5.3153e-03f,
265
+ -5.2915e-03f, -5.3251e-03f, -5.3231e-03f,
266
+ 6.0008e-02f, 2.0268e-01f, 1.3396e-01f,
267
+ -2.5202e-03f, -1.7750e-02f, -1.2019e-02f,
268
+ 1.1806e-01f, -2.2306e-02f, 3.6464e-02f,
269
+ 7.9324e-02f, 3.1883e-02f, 1.5483e-02f,
270
+ -4.3537e-02f, 1.2204e-02f, 1.8905e-02f,
271
+ -8.1581e-02f, -1.1307e-01f, -6.0718e-02f,
272
+ -2.4865e-01f, -1.0199e-01f, 1.9886e-02f,
273
+ -1.0519e-02f, 6.9972e-02f, 4.8012e-02f,
274
+ -1.5282e-02f, 1.1979e-01f, 8.7968e-02f,
275
+ -3.6752e-02f, 1.9523e-02f, 7.1321e-02f,
276
+ -5.8295e-02f, 5.3242e-02f, 1.2773e-01f,
277
+ -7.9671e-02f, 8.3249e-04f, 7.4904e-02f,
278
+ 1.1792e-01f, 2.2135e-03f, -9.0963e-03f,
279
+ -2.8356e-03f, -4.2661e-02f, 6.9497e-02f,
280
+ 9.3561e-02f, 1.0475e-01f, 5.4745e-02f,
281
+ -8.5901e-02f, -2.1969e-01f, -1.5572e-01f,
282
+ 3.6473e-02f, 1.1097e-01f, -2.6830e-02f,
283
+ 1.2199e-02f, 1.8917e-01f, 1.1906e-01f,
284
+ 1.0664e-01f, -2.7005e-01f, 1.5492e-01f,
285
+ -4.1771e-02f, -1.6580e-01f, 2.9234e-02f,
286
+ -1.9854e-02f, 2.1436e-01f, -1.1100e-01f,
287
+ 4.5382e-04f, 4.2085e-04f, 5.6852e-04f,
288
+ 3.4951e-04f, 3.7354e-04f, 3.2786e-04f,
289
+ 2.0790e-04f, 2.8606e-04f, 3.2415e-04f,
290
+ -1.5500e-02f, 2.2865e-02f, -3.0070e-01f,
291
+ 1.8467e-01f, 2.4899e-01f, 1.4812e-02f,
292
+ -1.2318e-01f, 2.3175e-01f, 7.2244e-02f,
293
+ 1.6713e-01f, 1.9089e-02f, -2.7494e-01f,
294
+ 1.0202e-01f, 2.9200e-01f, -3.6055e-03f,
295
+ 1.3265e-01f, 2.2551e-01f, 1.9897e-01f,
296
+ -3.9474e-02f, 1.6262e-01f, 1.6726e-01f,
297
+ -8.6222e-02f, 2.0573e-01f, -7.3247e-01f,
298
+ -9.5391e-02f, 3.8933e-01f, 1.5861e-01f,
299
+ -1.2202e-01f, -6.4735e-02f, -1.1762e-01f,
300
+ -2.2427e-02f, -1.9171e-01f, -1.6092e-01f,
301
+ 3.2356e-01f, -2.2234e-01f, -1.3743e-01f,
302
+ -1.1493e-01f, -2.4936e-02f, 2.9212e-02f,
303
+ -9.8112e-02f, -1.8021e-02f, -1.0507e-01f,
304
+ -1.0168e-01f, 1.1759e-01f, -9.8203e-02f,
305
+ -2.8871e-02f, 1.3249e-01f, 7.8378e-02f,
306
+ -1.1012e-01f, -4.0596e-02f, 5.4202e-02f,
307
+ 4.9022e-02f, -1.1744e-01f, 9.8888e-02f,
308
+ 1.3343e-02f, 1.4358e-01f, -8.7142e-02f,
309
+ 1.9952e-01f, 3.3708e-02f, 2.0721e-02f,
310
+ 2.6527e-02f, -2.3822e-01f, 2.4706e-01f,
311
+ -3.2750e-04f, -2.8475e-04f, -6.3494e-05f,
312
+ -2.2378e-04f, -1.8046e-04f, -1.9242e-05f,
313
+ -4.2124e-05f, -2.2062e-05f, 4.5500e-07f,
314
+ 1.1692e-01f, 4.0366e-01f, -1.8709e-02f,
315
+ 8.2700e-02f, 1.7884e-01f, -1.3520e-01f,
316
+ 3.7758e-02f, 3.7048e-02f, -2.8109e-01f,
317
+ -2.3438e-01f, 5.9423e-02f, -1.7300e-01f,
318
+ 1.0343e-02f, 7.2307e-02f, -4.3852e-01f,
319
+ -5.7429e-02f, -4.9136e-02f, -8.0327e-02f,
320
+ 8.1094e-02f, 2.9118e-02f, 1.6677e-01f,
321
+ 1.2155e-01f, 6.5358e-01f, 2.4544e-01f,
322
+ 3.1163e-02f, 3.7463e-02f, -2.6613e-01f,
323
+ 1.2723e-01f, 1.2541e-01f, 1.4319e-02f,
324
+ 1.9055e-01f, -5.7441e-02f, 1.1146e-01f,
325
+ -1.0690e-02f, -1.7567e-01f, -1.2238e-01f,
326
+ -2.0879e-01f, -6.5278e-02f, -7.9327e-02f,
327
+ -1.6564e-01f, -1.3659e-01f, -2.6231e-01f,
328
+ -3.1916e-01f, -2.6553e-01f, -9.8647e-02f,
329
+ -1.0617e-01f, 1.2782e-01f, -2.1053e-02f,
330
+ -1.2329e-01f, 1.4952e-01f, -1.7466e-02f,
331
+ -1.6969e-01f, 3.6980e-02f, -6.7732e-02f,
332
+ -3.1220e-02f, 4.0615e-02f, -1.5251e-01f,
333
+ -2.0017e-01f, 2.2421e-01f, -2.5682e-02f,
334
+ -6.5873e-02f, 1.8346e-01f, 1.2982e-02f,
335
+ 1.4021e-06f, -1.6929e-05f, -8.4696e-05f,
336
+ 1.9580e-05f, 2.9943e-06f, 3.0084e-06f,
337
+ 2.0769e-04f, 1.4661e-05f, 2.9503e-06f,
338
+ -1.4485e-01f, 1.8841e-01f, -1.7954e-01f,
339
+ 2.1551e-01f, 2.2601e-01f, -8.6689e-03f,
340
+ 8.6926e-02f, -6.8989e-02f, -1.2683e-01f,
341
+ -8.7712e-02f, 6.3176e-02f, 1.1983e-01f,
342
+ 1.0790e-01f, 6.6418e-02f, 6.5849e-02f,
343
+ 1.2483e-01f, 1.2428e-01f, 4.4994e-02f,
344
+ 1.5139e-01f, -1.2116e-01f, -3.5497e-01f,
345
+ -6.1889e-02f, 3.4088e-01f, 1.3148e-01f,
346
+ -1.6478e-01f, 4.4477e-02f, -1.1979e-01f,
347
+ 3.8343e-02f, 1.7992e-01f, 3.6790e-01f,
348
+ 3.0426e-01f, 1.1235e-01f, 4.9815e-01f,
349
+ 2.6290e-01f, 1.9703e-01f, 1.5881e-01f,
350
+ -6.4678e-03f, 2.4401e-01f, 1.9266e-01f,
351
+ -1.4089e-01f, 1.2323e-01f, 4.4340e-02f,
352
+ -8.8856e-02f, 8.4036e-02f, -9.8488e-02f,
353
+ -1.7377e-03f, -1.7654e-03f, -1.7223e-03f,
354
+ -1.7651e-03f, -1.7919e-03f, -1.7491e-03f,
355
+ -1.7172e-03f, -1.7446e-03f, -1.7041e-03f,
356
+ -3.0384e-04f, -2.9297e-04f, -2.4838e-04f,
357
+ -3.2961e-04f, -3.1678e-04f, -2.7009e-04f,
358
+ -3.1665e-04f, -3.0492e-04f, -2.6122e-04f,
359
+ 3.7109e-40f, -3.7915e-40f, -5.2536e-40f,
360
+ 5.8286e-41f, -5.6108e-40f, 4.3331e-40f,
361
+ -3.0184e-42f, -4.8987e-40f, -5.1788e-40f,
362
+ -4.0457e-04f, -4.3257e-04f, -4.1616e-04f,
363
+ -4.2268e-04f, -4.5118e-04f, -4.3407e-04f,
364
+ -3.9446e-04f, -4.2199e-04f, -4.0650e-04f,
365
+ -1.1253e-16f, -1.1328e-14f, -2.0489e-14f,
366
+ -3.0346e-19f, -1.7189e-16f, -4.5141e-16f,
367
+ -2.4957e-30f, -1.8191e-23f, -3.5882e-22f,
368
+ -3.1610e-36f, -1.7544e-24f, -2.2187e-21f,
369
+ -4.2887e-19f, -1.5526e-15f, -1.5160e-14f,
370
+ -1.7750e-16f, -6.8066e-14f, -3.3764e-13f,
371
+ -6.9570e-24f, -5.1139e-23f, -2.9335e-23f,
372
+ -1.9091e-22f, -1.0323e-21f, -4.5931e-22f,
373
+ -2.0010e-22f, -9.3710e-22f, -3.5622e-22f,
374
+ -2.9470e-04f, -2.9081e-04f, -2.5958e-04f,
375
+ -3.2290e-04f, -3.1810e-04f, -2.8461e-04f,
376
+ -3.1795e-04f, -3.1356e-04f, -2.8121e-04f,
377
+ 6.1623e-02f, 1.7057e-01f, 8.0478e-02f,
378
+ 1.2624e-01f, 1.8468e-01f, 2.1901e-02f,
379
+ 7.6033e-02f, 1.3455e-01f, 8.4037e-02f,
380
+ 8.4434e-02f, -1.7069e-02f, -7.8318e-02f,
381
+ 4.9244e-02f, 4.4782e-02f, -6.9747e-02f,
382
+ 1.2915e-01f, 1.1453e-01f, -6.5243e-02f,
383
+ -5.0985e-03f, -5.1407e-03f, -5.1687e-03f,
384
+ -5.1185e-03f, -5.1511e-03f, -5.1712e-03f,
385
+ -5.0986e-03f, -5.1272e-03f, -5.1409e-03f,
386
+ -1.8186e-02f, 6.2680e-02f, 3.3235e-02f,
387
+ 1.3398e-02f, 1.6497e-01f, 4.3523e-02f,
388
+ -2.4101e-02f, 1.3316e-01f, 1.8373e-02f,
389
+ -6.2677e-04f, 6.5026e-03f, 2.5948e-02f,
390
+ 6.6542e-02f, 1.2352e-01f, 1.5155e-02f,
391
+ -8.6237e-02f, -2.0907e-02f, 1.0237e-02f,
392
+ -1.7807e-01f, -8.6196e-02f, -3.2408e-02f,
393
+ -8.1946e-03f, -1.3957e-02f, -1.6733e-01f,
394
+ 2.6269e-02f, 1.6817e-01f, 9.4029e-02f,
395
+ 3.4005e-02f, -1.2833e-02f, -1.2038e-01f,
396
+ -4.8950e-02f, 3.9857e-02f, 1.4048e-02f,
397
+ -6.4758e-02f, 9.9603e-02f, 1.0748e-01f,
398
+ -1.0850e-02f, 9.8875e-02f, -4.4439e-02f,
399
+ 9.1219e-02f, 6.6400e-02f, -6.7693e-02f,
400
+ 5.3318e-02f, 1.1838e-02f, -1.5164e-01f,
401
+ -5.8568e-02f, 1.1249e-01f, -3.8286e-02f,
402
+ -7.1122e-02f, 9.5799e-02f, 3.8521e-02f,
403
+ -1.3846e-01f, 1.4167e-01f, -3.5500e-03f,
404
+ -1.0343e-01f, -3.3025e-02f, 3.7186e-02f,
405
+ -2.0769e-03f, 1.3558e-01f, -1.3009e-01f,
406
+ 1.0167e-02f, 1.5358e-02f, -9.8009e-02f,
407
+ 2.4123e-05f, -1.1800e-05f, -1.4180e-04f,
408
+ 3.5217e-05f, -6.3838e-06f, -1.2243e-04f,
409
+ 8.5525e-05f, 2.1599e-06f, -5.3290e-05f,
410
+ -1.4471e-01f, 2.0111e-02f, -1.2449e-01f,
411
+ 5.3368e-02f, 3.2918e-01f, 1.4034e-01f,
412
+ -1.1833e-01f, -1.9225e-02f, -1.2658e-01f,
413
+ -2.6966e-01f, 1.1751e-01f, 9.7072e-02f,
414
+ -1.9929e-01f, 9.7986e-02f, -5.1240e-02f,
415
+ -9.5073e-02f, -6.8070e-02f, -2.1318e-01f,
416
+ 9.5305e-02f, -4.0551e-02f, -1.0936e-01f,
417
+ 5.2687e-02f, 4.5340e-01f, 2.3531e-01f,
418
+ -1.3385e-02f, 1.5922e-01f, -1.8371e-01f,
419
+ -1.2203e-01f, -7.2567e-02f, -3.0000e-01f,
420
+ -3.4356e-02f, -1.3471e-01f, -9.0995e-02f,
421
+ -2.5230e-01f, -2.4846e-01f, -1.8529e-01f,
422
+ -1.6962e-01f, 1.0905e-01f, 1.1557e-01f,
423
+ -1.4405e-01f, 8.9191e-02f, 1.1715e-01f,
424
+ -1.3237e-01f, 5.2092e-02f, -1.2227e-01f
425
+ }
426
+ ,
427
+ {
428
+ 2.0013e-01f, 2.2105e-01f, 1.9196e-01f,
429
+ 6.8158e-02f, 1.7154e-01f, -8.6677e-02f,
430
+ 9.2652e-02f, 1.0789e-01f, 1.6745e-01f,
431
+ -2.9254e-01f, -7.6815e-02f, 5.8812e-02f,
432
+ -4.6466e-02f, 1.3941e-02f, 2.3353e-01f,
433
+ -1.5033e-01f, 7.5167e-02f, 1.4433e-01f,
434
+ 2.8008e-02f, 3.1625e-01f, 3.2877e-02f,
435
+ -5.8835e-02f, -1.7305e-01f, -6.1558e-02f,
436
+ -1.2227e-01f, 3.9931e-02f, 3.0300e-02f,
437
+ 2.3004e-01f, 4.1834e-02f, -5.7790e-02f,
438
+ -2.2861e-01f, 2.9314e-01f, 1.6884e-01f,
439
+ -2.8009e-02f, 4.7550e-02f, -4.4542e-02f,
440
+ -2.4674e-01f, -1.5483e-01f, 3.2653e-02f,
441
+ -2.1574e-01f, 3.1083e-01f, -1.4025e-03f,
442
+ 1.7354e-02f, 5.6417e-02f, 1.0844e-01f,
443
+ -4.2681e-40f, 4.5893e-42f, -7.4234e-40f,
444
+ 1.7665e-40f, 4.0151e-40f, 4.6269e-40f,
445
+ 2.5452e-40f, -7.0179e-40f, -1.2338e-40f,
446
+ -1.4957e-01f, -1.9087e-02f, 7.1170e-02f,
447
+ -1.4435e-01f, 8.9560e-02f, 1.3879e-01f,
448
+ -3.6992e-02f, 5.9822e-02f, 1.9241e-02f,
449
+ -2.4402e-03f, 1.5097e-01f, 6.3958e-02f,
450
+ -1.7630e-01f, 3.6009e-01f, -2.0383e-01f,
451
+ -8.5106e-03f, 4.0863e-03f, -2.7575e-02f,
452
+ 7.8942e-02f, -1.8640e-01f, -6.7715e-02f,
453
+ 7.2777e-02f, -1.3804e-01f, -7.0332e-02f,
454
+ 1.5185e-01f, -4.3530e-02f, 1.4502e-01f,
455
+ -3.2928e-02f, -3.0583e-02f, 9.2061e-02f,
456
+ 1.2493e-01f, 1.0400e-01f, 1.3780e-01f,
457
+ 1.4438e-01f, 8.2051e-02f, 1.6159e-02f,
458
+ 2.7478e-02f, 1.7768e-01f, 2.5945e-01f,
459
+ -3.4662e-01f, 2.0330e-03f, 8.8118e-02f,
460
+ -2.9628e-01f, -1.3212e-01f, -1.8145e-02f,
461
+ -1.9330e-01f, 3.9238e-02f, -4.6944e-02f,
462
+ -1.5668e-01f, -5.7104e-02f, 1.9558e-01f,
463
+ 6.5305e-02f, 5.9933e-02f, 7.7337e-02f,
464
+ -2.4906e-02f, -1.1235e-01f, 1.3822e-02f,
465
+ -3.9988e-02f, -9.1882e-03f, 1.9204e-02f,
466
+ 1.0504e-01f, 4.6820e-03f, -2.1836e-02f,
467
+ -2.6953e-40f, 2.5334e-40f, -1.3028e-40f,
468
+ 1.4110e-41f, 5.6841e-40f, 3.6368e-40f,
469
+ -1.1746e-41f, -7.0658e-41f, -3.9413e-40f,
470
+ 1.5025e-02f, 7.4419e-02f, 9.5652e-02f,
471
+ 5.0297e-02f, 6.6704e-02f, 5.7316e-02f,
472
+ 2.5102e-02f, 1.1985e-01f, 2.6043e-02f,
473
+ 3.3297e-02f, -7.7374e-02f, -1.1114e-01f,
474
+ -7.5586e-02f, -1.9338e-02f, -1.3739e-02f,
475
+ 4.5616e-02f, -6.4946e-02f, -6.9372e-02f,
476
+ -7.5874e-03f, -1.1141e-01f, -2.9135e-02f,
477
+ -6.9436e-03f, -1.4418e-02f, 1.6436e-03f,
478
+ -1.3051e-01f, -1.3324e-01f, -9.3934e-02f,
479
+ 1.2184e-01f, 1.9386e-01f, 1.7995e-01f,
480
+ -2.7452e-02f, 9.9736e-02f, 1.0020e-01f,
481
+ -6.3290e-02f, -2.1447e-02f, -1.7005e-01f,
482
+ 1.3857e-01f, 2.3338e-01f, 2.5410e-01f,
483
+ 2.3002e-01f, 1.9551e-01f, 1.4452e-01f,
484
+ 4.7040e-01f, 2.2647e-01f, 1.5215e-01f,
485
+ 2.6927e-02f, -2.1304e-01f, -1.4762e-01f,
486
+ -5.6998e-02f, 2.9064e-01f, 1.8085e-01f,
487
+ 8.9393e-02f, -1.7463e-01f, -2.7095e-01f,
488
+ 3.8434e-02f, 1.7198e-01f, -1.8122e-02f,
489
+ -1.3857e-01f, 1.9418e-01f, 1.5019e-01f,
490
+ -5.6337e-02f, -5.3265e-01f, 3.2122e-01f,
491
+ -2.4484e-40f, -5.3707e-40f, 1.5854e-41f,
492
+ 5.1791e-40f, -4.1875e-41f, 5.6732e-40f,
493
+ 1.3048e-40f, 1.6452e-40f, -4.5028e-40f,
494
+ -3.0692e-02f, 1.8569e-01f, 2.0327e-01f,
495
+ -7.4756e-02f, -5.1765e-02f, 4.2475e-02f,
496
+ -9.0675e-02f, -3.0438e-01f, -3.5088e-01f,
497
+ -1.9129e-02f, -1.5663e-03f, 4.9895e-02f,
498
+ -1.9441e-02f, 9.3237e-02f, 1.2910e-01f,
499
+ -2.3919e-02f, -4.0539e-01f, 2.8167e-02f,
500
+ 2.0203e-01f, 3.3424e-02f, 1.7927e-02f,
501
+ 4.1923e-02f, -1.6967e-01f, 2.5656e-02f,
502
+ -1.5869e-01f, -1.8727e-01f, 2.7860e-03f,
503
+ -4.0276e-02f, -6.7792e-03f, 3.3699e-02f,
504
+ -6.7044e-03f, 1.7686e-02f, 2.9786e-02f,
505
+ -1.5623e-02f, 3.7904e-02f, 2.4737e-02f,
506
+ -1.2282e-01f, -3.6563e-02f, 4.1976e-02f,
507
+ -9.9622e-03f, 8.8981e-02f, 2.1364e-02f,
508
+ -8.5668e-02f, -1.6803e-01f, -4.4974e-02f,
509
+ 1.3164e-01f, 4.1294e-01f, 1.8897e-01f,
510
+ 2.1991e-01f, 1.6247e-02f, 1.1569e-01f,
511
+ -3.0142e-02f, 1.4069e-02f, 3.6646e-02f,
512
+ -2.6816e-02f, -3.9767e-02f, 1.4061e-01f,
513
+ -1.3603e-01f, -2.0649e-01f, 7.5837e-02f,
514
+ -1.6984e-02f, -8.3800e-03f, 2.3652e-04f,
515
+ 1.5049e-40f, 4.6504e-40f, 1.3625e-40f,
516
+ -7.5358e-40f, -3.4257e-40f, 9.9763e-41f,
517
+ 4.7243e-40f, 7.4890e-40f, -7.9440e-42f,
518
+ -5.9692e-02f, -2.8047e-02f, 2.3795e-02f,
519
+ -3.5284e-02f, 1.1448e-02f, 5.0302e-04f,
520
+ -3.5066e-02f, 4.6185e-02f, 1.2167e-02f,
521
+ 3.7583e-02f, -3.6598e-02f, 1.0206e-01f,
522
+ -9.6229e-02f, -1.5977e-01f, 4.9157e-02f,
523
+ 3.7293e-02f, 5.8766e-02f, 1.0448e-02f,
524
+ 1.1490e-01f, 1.4459e-01f, 8.6936e-02f,
525
+ 2.8609e-01f, -4.8108e-02f, 9.0023e-02f,
526
+ 6.7941e-02f, -5.7148e-03f, 1.0021e-01f,
527
+ 7.3816e-02f, 7.3794e-02f, 8.0970e-03f,
528
+ 2.8307e-02f, 3.6635e-03f, -1.1769e-01f,
529
+ 4.1374e-02f, 3.9933e-02f, -4.4292e-02f,
530
+ 5.9423e-02f, 1.9009e-01f, -2.3735e-01f,
531
+ -2.6670e-01f, 5.8789e-01f, -2.0048e-01f,
532
+ -3.7082e-01f, 1.8045e-01f, 5.4820e-02f,
533
+ -6.3567e-01f, 2.0098e-01f, 1.0653e-01f,
534
+ -2.5056e-01f, 6.5065e-01f, -4.0471e-01f,
535
+ 5.4715e-02f, 2.4375e-01f, -2.7402e-01f,
536
+ 1.5982e-01f, 1.0923e-01f, 2.1566e-01f,
537
+ 2.0239e-01f, -9.0221e-02f, -4.4606e-01f,
538
+ 1.0550e-01f, 5.4666e-02f, -2.7134e-01f,
539
+ -4.6424e-40f, 2.9137e-40f, 7.4968e-41f,
540
+ 1.2376e-41f, -5.6213e-40f, -6.3457e-40f,
541
+ 2.5404e-40f, 2.0013e-40f, 3.5611e-40f,
542
+ 5.5423e-02f, 3.9843e-02f, -1.7509e-01f,
543
+ 5.4480e-02f, 5.0331e-02f, -1.6793e-01f,
544
+ 6.6093e-02f, 3.0163e-02f, -8.2023e-02f,
545
+ -1.5490e-01f, 1.7457e-01f, 2.7832e-01f,
546
+ 1.1482e-01f, 2.5759e-01f, -2.4199e-01f,
547
+ -9.3891e-02f, 9.1921e-02f, -6.4480e-03f,
548
+ 1.9266e-01f, 5.2907e-02f, 7.0289e-02f,
549
+ 1.3582e-01f, 6.4246e-02f, 1.4989e-01f,
550
+ 6.2013e-03f, -6.8884e-02f, 6.8734e-02f,
551
+ -1.0483e-01f, -7.7134e-02f, -3.6204e-02f,
552
+ 1.7590e-02f, 5.0844e-02f, 1.4234e-01f,
553
+ 7.2913e-02f, 6.0726e-02f, 6.4414e-02f,
554
+ -8.5021e-02f, -1.0621e-03f, 5.5851e-02f,
555
+ 2.4666e-01f, 6.5652e-02f, -1.8180e-02f,
556
+ 1.5225e-01f, 1.2928e-01f, 3.1578e-03f,
557
+ 1.1468e-01f, 1.9544e-01f, 6.6637e-02f,
558
+ 6.3430e-02f, 2.0542e-01f, 7.0876e-02f,
559
+ 3.4779e-02f, 1.0037e-02f, -2.2134e-02f,
560
+ -6.9304e-02f, 1.1184e-01f, -3.7015e-02f,
561
+ -1.7634e-01f, 1.2475e-01f, 9.1947e-02f,
562
+ -6.0550e-02f, -1.3904e-01f, 7.5192e-02f,
563
+ -2.2871e-40f, 4.7367e-41f, -1.0711e-40f,
564
+ -2.8662e-40f, 4.0542e-41f, 3.3067e-40f,
565
+ -4.4395e-41f, -7.2684e-41f, 1.8695e-40f,
566
+ -1.6702e-01f, -2.6654e-01f, 8.7902e-03f,
567
+ -2.0108e-01f, -3.8093e-01f, -8.3700e-02f,
568
+ -7.5433e-02f, -2.0689e-01f, 2.7951e-02f,
569
+ 2.9938e-03f, 1.1378e-01f, 7.1598e-02f,
570
+ -1.6031e-01f, 1.3475e-01f, 1.5800e-01f,
571
+ -7.2019e-02f, -1.1663e-01f, 8.0692e-02f,
572
+ 1.0610e-01f, 1.1163e-02f, -1.4959e-01f,
573
+ -1.1576e-01f, -8.5645e-02f, 4.0414e-02f,
574
+ 5.6245e-02f, 1.7056e-01f, 2.5734e-01f,
575
+ -6.1086e-02f, -7.0851e-02f, 7.6851e-02f,
576
+ -2.7595e-02f, -6.0890e-02f, 4.7472e-02f,
577
+ 7.1059e-03f, 6.0942e-05f, 7.4915e-02f,
578
+ 1.9350e-01f, -1.8458e-02f, -2.3040e-02f,
579
+ 6.3477e-02f, 1.1923e-01f, 9.9319e-02f,
580
+ 6.4839e-02f, 2.7973e-01f, 1.2902e-01f,
581
+ -1.7829e-01f, 5.7083e-03f, -6.1680e-03f,
582
+ -1.1256e-01f, -2.7951e-02f, -2.1544e-01f,
583
+ -2.1614e-02f, -7.1468e-02f, -2.2054e-02f,
584
+ -8.7543e-02f, -1.2982e-01f, 1.9386e-01f,
585
+ -5.7157e-03f, -1.0108e-01f, 1.4467e-01f,
586
+ -6.5742e-02f, -7.2054e-02f, 1.7924e-01f,
587
+ 7.5418e-40f, 6.3043e-40f, 4.9815e-40f,
588
+ -1.0952e-40f, 3.0327e-40f, -2.3848e-40f,
589
+ 4.1302e-40f, 2.0150e-40f, -1.6509e-40f,
590
+ -1.3985e-02f, -1.0550e-01f, 5.8772e-02f,
591
+ -1.7108e-02f, -7.3644e-02f, 3.3014e-02f,
592
+ -1.8224e-03f, 2.8931e-03f, 9.2762e-02f,
593
+ 4.1531e-02f, -1.5139e-01f, -1.7773e-01f,
594
+ 9.6548e-02f, -1.1914e-01f, -4.6536e-02f,
595
+ 8.6754e-02f, -4.0057e-03f, 1.8983e-01f,
596
+ 1.6545e-01f, -4.7311e-02f, -7.2455e-03f,
597
+ 3.7567e-01f, 1.8883e-01f, -7.4325e-02f,
598
+ -5.8252e-02f, -1.3811e-02f, -7.0470e-02f,
599
+ -3.2943e-02f, -7.0770e-02f, -1.4700e-01f,
600
+ 1.7043e-02f, 9.4331e-02f, 4.2857e-03f,
601
+ 4.1247e-03f, 1.6690e-01f, 4.2146e-02f,
602
+ 1.1420e-01f, -7.4456e-02f, -3.8763e-02f,
603
+ 1.6807e-01f, 9.3636e-03f, -1.1796e-01f,
604
+ 1.7703e-01f, 1.1386e-03f, -6.8707e-02f,
605
+ 1.0259e-01f, -1.8918e-02f, 6.5902e-03f,
606
+ 1.2421e-02f, -7.8960e-02f, 2.1766e-02f,
607
+ 1.3062e-01f, 4.6001e-02f, 2.4199e-01f,
608
+ -1.2955e-02f, -1.9329e-01f, 5.2074e-03f,
609
+ 5.9446e-02f, 1.8832e-01f, 2.2094e-01f,
610
+ -1.0954e-01f, -8.1867e-02f, -4.3324e-02f,
611
+ -3.9596e-41f, 2.8677e-40f, -6.5843e-40f,
612
+ 4.2812e-41f, -3.5323e-40f, 4.8298e-40f,
613
+ 7.6351e-40f, -2.4759e-40f, 7.3030e-40f,
614
+ -1.1284e-01f, -8.4171e-02f, -1.5935e-01f,
615
+ -3.2299e-02f, 1.5427e-01f, 8.9029e-02f,
616
+ -3.8815e-02f, 1.3098e-01f, -4.3065e-02f,
617
+ -2.5276e-01f, -1.7018e-01f, 9.7901e-02f,
618
+ 1.4218e-01f, 3.1236e-01f, 2.9636e-01f,
619
+ -2.3613e-02f, -5.5258e-02f, -2.0550e-01f
620
+ }
621
+ ,
622
+ {
623
+ 0.0333f, 0.1145f, -0.0922f,
624
+ 0.1185f, 0.4533f, -0.2015f,
625
+ -0.0774f, 0.1759f, -0.0496f,
626
+ 0.0954f, -0.0499f, 0.0824f,
627
+ 0.1059f, 0.0173f, -0.0586f,
628
+ -0.0666f, -0.0287f, -0.0652f,
629
+ -0.0558f, -0.1362f, 0.0015f,
630
+ 0.1277f, 0.1020f, -0.1369f,
631
+ 0.0020f, -0.0103f, -0.0804f,
632
+ 0.0507f, 0.1404f, -0.0241f,
633
+ 0.0520f, 0.1239f, 0.0633f,
634
+ -0.0268f, 0.0335f, 0.0883f,
635
+ -0.0549f, -0.1022f, -0.0515f,
636
+ -0.0163f, -0.1167f, -0.0442f,
637
+ 0.0858f, -0.0804f, -0.0014f,
638
+ 0.0354f, -0.0666f, -0.2105f,
639
+ -0.0950f, 0.1578f, -0.0920f,
640
+ -0.1303f, 0.0299f, -0.0195f,
641
+ -0.0281f, -0.1993f, -0.0154f,
642
+ 0.0796f, 0.0503f, 0.0954f,
643
+ 0.0540f, 0.0212f, 0.0389f,
644
+ -0.1387f, 0.1091f, -0.1212f,
645
+ 0.1556f, 0.3573f, 0.0976f,
646
+ -0.0587f, -0.2070f, 0.2067f,
647
+ 0.0138f, 0.0051f, -0.1008f,
648
+ 0.2877f, 0.1079f, -0.0681f,
649
+ 0.0953f, -0.0739f, -0.2349f,
650
+ 0.1482f, 0.0657f, 0.0480f,
651
+ 0.1590f, -0.0009f, 0.1402f,
652
+ 0.0700f, 0.0435f, 0.1190f,
653
+ 0.0957f, 0.0117f, -0.1010f,
654
+ 0.1790f, -0.0200f, -0.0765f,
655
+ 0.0797f, 0.1455f, -0.0340f,
656
+ 0.0008f, -0.0267f, 0.0089f,
657
+ 0.0644f, 0.0647f, 0.0397f,
658
+ 0.0463f, -0.0116f, -0.0771f,
659
+ 0.2237f, 0.0324f, 0.0192f,
660
+ -0.0082f, -0.0345f, 0.0294f,
661
+ 0.0719f, -0.0185f, 0.1008f,
662
+ -0.0307f, 0.0134f, -0.0747f,
663
+ 0.0776f, -0.1485f, 0.0135f,
664
+ 0.0965f, -0.0665f, -0.1263f,
665
+ -0.0101f, -0.0097f, -0.0144f,
666
+ -0.0022f, -0.0083f, 0.0277f,
667
+ 0.0136f, -0.0076f, 0.0314f,
668
+ -0.0008f, 0.0722f, -0.0704f,
669
+ 0.0053f, 0.0767f, 0.0368f,
670
+ -0.0189f, -0.1354f, 0.0231f,
671
+ -0.1416f, 0.1945f, -0.1756f,
672
+ 0.2058f, 0.0401f, -0.1348f,
673
+ -0.0945f, -0.2530f, -0.3082f,
674
+ -0.0096f, 0.0871f, 0.0699f,
675
+ -0.0092f, 0.0423f, 0.0995f,
676
+ -0.0914f, -0.0570f, -0.0718f,
677
+ -0.0739f, -0.2749f, -0.2320f,
678
+ 0.1488f, -0.2698f, -0.1977f,
679
+ 0.1445f, -0.1655f, -0.0758f,
680
+ 0.2035f, -0.0138f, 0.0332f,
681
+ 0.0282f, -0.2247f, -0.0945f,
682
+ -0.0614f, -0.2484f, -0.0595f,
683
+ -0.1174f, -0.1252f, 0.1969f,
684
+ -0.1101f, -0.2950f, -0.2164f,
685
+ -0.0348f, -0.0891f, 0.1250f,
686
+ 0.0195f, 0.0050f, 0.0300f,
687
+ -0.0508f, -0.0316f, -0.0194f,
688
+ 0.0199f, 0.0345f, 0.0444f,
689
+ -0.0022f, -0.0529f, 0.1604f,
690
+ 0.0756f, -0.2015f, -0.2117f,
691
+ -0.0837f, -0.1270f, 0.1330f,
692
+ 0.0286f, 0.0952f, 0.1082f,
693
+ 0.0724f, -0.0446f, -0.1156f,
694
+ 0.0545f, 0.0444f, -0.0291f,
695
+ 0.0759f, 0.1110f, 0.0944f,
696
+ 0.1615f, 0.4302f, -0.1060f,
697
+ 0.0418f, -0.0281f, -0.1378f,
698
+ -0.0757f, -0.0527f, -0.1578f,
699
+ 0.0123f, -0.0427f, 0.1504f,
700
+ 0.0694f, 0.0690f, 0.0203f,
701
+ 0.2132f, -0.3449f, 0.0936f,
702
+ 0.2491f, 0.0279f, -0.0884f,
703
+ -0.0447f, 0.1589f, -0.0054f,
704
+ -0.0246f, 0.1247f, 0.0403f,
705
+ 0.0513f, -0.0541f, -0.1141f,
706
+ 0.0712f, -0.1174f, -0.0051f,
707
+ 0.2304f, 0.2431f, -0.0517f,
708
+ -0.1548f, -0.0401f, 0.2032f,
709
+ -0.0087f, -0.1676f, -0.0600f,
710
+ 0.1094f, -0.0329f, 0.0530f,
711
+ -0.0580f, 0.1499f, -0.0806f,
712
+ -0.0086f, -0.1400f, -0.0636f,
713
+ 0.0708f, -0.1003f, -0.1113f,
714
+ -0.0732f, -0.1199f, 0.0060f,
715
+ -0.0534f, -0.0011f, 0.0965f,
716
+ -0.0268f, 0.0116f, -0.1161f,
717
+ 0.0787f, 0.3925f, -0.0819f,
718
+ -0.0041f, -0.0892f, -0.2063f,
719
+ -0.1296f, 0.0924f, -0.0079f,
720
+ 0.5625f, 0.4013f, 0.1645f,
721
+ -0.0137f, -0.1935f, 0.2714f,
722
+ 0.0980f, 0.0016f, -0.1461f,
723
+ 0.1576f, 0.0305f, -0.1450f,
724
+ 0.1503f, -0.0303f, -0.1403f,
725
+ 0.0262f, -0.0077f, 0.0459f,
726
+ 0.2718f, 0.0754f, 0.2404f,
727
+ 0.1381f, -0.1499f, 0.0016f,
728
+ 0.1454f, -0.1278f, -0.0085f,
729
+ 0.1674f, -0.0834f, 0.1993f,
730
+ 0.0874f, -0.0598f, -0.0188f,
731
+ 0.2003f, 0.3296f, 0.0153f,
732
+ -0.0154f, 0.5550f, -0.0945f,
733
+ 0.0489f, 0.0415f, -0.0940f,
734
+ 0.0164f, 0.0791f, 0.1077f,
735
+ -0.0893f, 0.1231f, 0.0473f,
736
+ -0.0319f, 0.1444f, 0.1690f,
737
+ -0.0518f, -0.1404f, -0.1778f,
738
+ -0.0170f, 0.1395f, -0.0234f,
739
+ 0.0128f, -0.0112f, -0.0472f,
740
+ 0.1039f, 0.1982f, -0.0272f,
741
+ 0.0282f, -0.1199f, -0.2622f,
742
+ -0.0449f, 0.0239f, -0.1030f,
743
+ -0.0840f, -0.1044f, -0.0646f,
744
+ 0.0588f, 0.1937f, -0.2494f,
745
+ 0.0180f, 0.0747f, 0.1530f,
746
+ 0.0500f, 0.1756f, 0.0491f,
747
+ -0.1113f, -0.0079f, 0.0854f,
748
+ -0.1493f, -0.0559f, -0.0373f,
749
+ 0.1972f, -0.3158f, -0.0500f,
750
+ 0.1932f, 0.3177f, -0.0018f,
751
+ -0.0516f, -0.1144f, 0.0686f,
752
+ 0.0175f, 0.0598f, 0.0345f,
753
+ -0.0667f, -0.1078f, 0.0384f,
754
+ 0.0897f, 0.2198f, -0.0531f,
755
+ -0.2596f, -0.1997f, 0.0195f,
756
+ 0.0332f, 0.4098f, 0.1381f,
757
+ 0.1985f, -0.0669f, -0.1275f,
758
+ -0.0751f, -0.2388f, -0.0672f,
759
+ 0.0090f, 0.0891f, -0.0362f,
760
+ 0.1392f, -0.0518f, 0.2039f,
761
+ 0.2079f, -0.1202f, 0.0707f,
762
+ 0.0498f, -0.1237f, -0.0665f,
763
+ -0.0398f, -0.1557f, -0.0928f,
764
+ 0.0505f, 0.1220f, 0.0352f,
765
+ -0.0674f, -0.1159f, 0.0724f,
766
+ -0.0331f, -0.1751f, 0.0766f,
767
+ 0.0992f, -0.0763f, 0.0090f,
768
+ -0.1223f, 0.2621f, -0.2029f,
769
+ 0.0509f, -0.0279f, -0.1061f,
770
+ 0.0598f, 0.0353f, -0.1610f,
771
+ 0.0165f, 0.0835f, 0.0704f,
772
+ -0.0079f, -0.0982f, 0.0187f,
773
+ 0.2331f, -0.1929f, 0.0684f,
774
+ -0.0507f, 0.1476f, -0.0886f,
775
+ -0.0275f, 0.1658f, 0.0697f,
776
+ -0.1123f, -0.0069f, -0.0851f,
777
+ -0.0377f, -0.0917f, -0.0629f,
778
+ -0.0420f, 0.0506f, 0.1111f,
779
+ 0.1086f, 0.1351f, -0.0851f,
780
+ 0.0466f, 0.2750f, 0.0185f,
781
+ -0.0208f, 0.2090f, 0.0271f,
782
+ 0.0217f, -0.0548f, 0.0078f,
783
+ -0.0609f, 0.1029f, -0.1641f,
784
+ 0.1392f, 0.0115f, 0.0317f,
785
+ -0.0570f, 0.1060f, 0.1814f,
786
+ -0.2015f, -0.1301f, 0.1082f,
787
+ 0.2452f, -0.1815f, -0.0046f,
788
+ 0.0103f, -0.0466f, -0.0895f,
789
+ 0.0158f, -0.0594f, -0.1386f,
790
+ -0.0073f, -0.0719f, -0.0716f,
791
+ 0.1308f, -0.0206f, 0.0511f,
792
+ -0.0437f, -0.0763f, 0.0287f,
793
+ 0.0493f, -0.1239f, 0.0219f,
794
+ -0.0041f, 0.0373f, 0.0262f,
795
+ 0.0078f, -0.0249f, -0.0284f,
796
+ 0.0598f, -0.0205f, -0.0276f,
797
+ 0.0115f, -0.1778f, -0.0395f,
798
+ 0.1673f, -0.0036f, 0.2334f,
799
+ 0.0706f, -0.0694f, 0.0177f,
800
+ 0.1123f, -0.0043f, 0.0716f,
801
+ -0.0894f, -0.1609f, 0.0334f,
802
+ -0.0046f, -0.2006f, -0.0977f,
803
+ -0.0127f, 0.1198f, -0.0339f,
804
+ -0.0283f, 0.1354f, 0.1637f,
805
+ -0.1696f, 0.0187f, -0.2621f,
806
+ 0.0496f, 0.2834f, 0.0423f,
807
+ 0.1126f, 0.3962f, 0.1660f,
808
+ -0.0750f, 0.1955f, 0.0590f,
809
+ -0.1088f, -0.1146f, -0.1219f,
810
+ 0.1360f, 0.1524f, 0.0498f,
811
+ -0.1151f, 0.0219f, -0.0063f,
812
+ -0.0821f, 0.0247f, -0.1065f,
813
+ 0.1153f, 0.2085f, 0.0618f,
814
+ -0.0383f, 0.0527f, -0.2067f
815
+ }
816
+ ,
817
+ {
818
+ 1.8014e-01f, 2.1908e-01f, -2.1088e-03f,
819
+ 1.7345e-01f, 2.7654e-01f, 1.3607e-02f,
820
+ 1.1363e-01f, 9.9105e-02f, -6.5730e-02f,
821
+ -3.5679e-02f, 9.6072e-03f, 4.0721e-02f,
822
+ -1.8771e-02f, -2.3484e-04f, -1.0230e-02f,
823
+ 1.6965e-02f, -1.3032e-02f, -6.3906e-02f,
824
+ -4.5686e-02f, -3.6733e-02f, -4.8873e-02f,
825
+ 4.0752e-02f, 2.1615e-02f, -1.4822e-02f,
826
+ 1.1689e-01f, 3.0153e-02f, -5.0163e-04f,
827
+ -7.0394e-03f, -1.2387e-01f, -8.9243e-02f,
828
+ -1.8312e-01f, -1.3868e-01f, -6.2618e-02f,
829
+ -8.1627e-02f, -2.0480e-01f, -3.0740e-01f,
830
+ 4.4296e-02f, 3.8572e-02f, 4.3754e-02f,
831
+ 1.7538e-01f, 5.3284e-02f, -7.5663e-03f,
832
+ 1.9670e-01f, -1.2397e-01f, -1.6266e-01f,
833
+ 1.4575e-01f, -5.7771e-02f, 2.7619e-02f,
834
+ 2.2757e-02f, -4.8910e-01f, -2.6201e-01f,
835
+ 3.6513e-02f, -2.0704e-01f, -1.3225e-01f,
836
+ -6.7533e-02f, 1.1289e-02f, 7.1316e-02f,
837
+ -7.6847e-02f, 6.8128e-02f, 7.4717e-02f,
838
+ 1.1269e-01f, 2.9978e-02f, 3.2132e-02f,
839
+ -5.4557e-02f, -4.4599e-02f, 4.1835e-02f,
840
+ 5.7964e-02f, -2.1246e-03f, 1.5007e-01f,
841
+ 1.8432e-01f, 1.1463e-01f, 2.2691e-01f,
842
+ 9.6166e-02f, 4.7887e-02f, -3.8399e-02f,
843
+ 5.8153e-02f, -2.0255e-02f, -1.1362e-01f,
844
+ 2.6402e-02f, 2.5562e-02f, 1.9096e-02f,
845
+ 1.1588e-01f, 1.4540e-01f, 1.1948e-01f,
846
+ 1.0360e-01f, 5.9083e-02f, 1.9263e-01f,
847
+ 1.6953e-01f, 2.7390e-02f, 9.7883e-02f,
848
+ 1.5059e-01f, 6.7593e-02f, -4.5843e-03f,
849
+ 8.7031e-02f, -2.0926e-03f, -6.3056e-02f,
850
+ -6.6960e-02f, -5.2056e-02f, -7.3570e-02f,
851
+ 1.4361e-02f, 1.1059e-01f, -4.9720e-02f,
852
+ 4.4270e-02f, 3.9995e-02f, 4.3101e-03f,
853
+ -1.1042e-01f, 4.5028e-02f, -8.9124e-02f,
854
+ -1.2906e-01f, -7.6972e-02f, -6.5449e-03f,
855
+ -1.9269e-01f, 2.8349e-01f, 1.1573e-01f,
856
+ -1.7983e-01f, 9.7615e-02f, 9.4003e-03f,
857
+ -4.7802e-02f, -1.5889e-01f, -1.2693e-01f,
858
+ 7.4717e-02f, 2.8655e-01f, -7.2637e-02f,
859
+ 1.5837e-02f, 8.7125e-02f, -1.2198e-01f,
860
+ -1.7754e-02f, -5.6443e-02f, -9.8661e-03f,
861
+ 6.3040e-02f, 2.0249e-02f, -3.5368e-02f,
862
+ 9.7756e-03f, 2.6760e-02f, -5.5172e-02f,
863
+ -1.0406e-02f, 4.8313e-02f, 2.4717e-02f,
864
+ -5.2851e-02f, 6.8496e-02f, -2.5933e-02f,
865
+ 4.5932e-02f, 5.9892e-02f, 1.9200e-02f,
866
+ -5.1316e-40f, -5.1811e-40f, -1.5144e-40f,
867
+ -6.7758e-38f, -5.4608e-40f, -3.9680e-40f,
868
+ -1.9155e-39f, 2.0423e-41f, 1.5256e-41f,
869
+ -2.5559e-08f, -3.2461e-08f, -2.6821e-08f,
870
+ -3.6885e-08f, -4.6896e-08f, -3.9086e-08f,
871
+ -3.4305e-08f, -4.4160e-08f, -3.7187e-08f,
872
+ -3.7416e-40f, 3.6550e-40f, 5.0727e-40f,
873
+ -1.6722e-40f, 3.9228e-40f, 5.4548e-40f,
874
+ -5.7512e-40f, -2.8156e-40f, 9.4571e-41f,
875
+ -4.7040e-40f, -1.6974e-40f, 6.3849e-40f,
876
+ -3.7322e-40f, 2.6014e-40f, 2.3080e-40f,
877
+ -2.8395e-40f, -3.7116e-40f, 4.4393e-40f,
878
+ 1.1597e-40f, 4.3291e-40f, 3.8219e-40f,
879
+ 3.3393e-40f, 3.1747e-40f, -1.8400e-36f,
880
+ -5.5215e-40f, 1.7648e-40f, -1.6540e-35f,
881
+ -3.0953e-40f, 5.3063e-40f, -1.6454e-40f,
882
+ 2.1341e-40f, 2.0790e-40f, -3.0226e-40f,
883
+ -2.6807e-40f, -1.6601e-40f, 5.1829e-40f,
884
+ -1.8897e-40f, -4.5956e-41f, 5.3784e-40f,
885
+ -2.5661e-40f, -2.1726e-40f, 1.2010e-40f,
886
+ 1.8263e-41f, 1.1214e-40f, -3.7693e-40f,
887
+ -4.2596e-40f, 1.8854e-40f, 5.5010e-40f,
888
+ -6.6262e-40f, -4.8808e-40f, 3.3123e-40f,
889
+ 5.9379e-41f, 2.3249e-40f, 4.4504e-40f,
890
+ -8.4836e-04f, -8.4397e-04f, -5.8640e-04f,
891
+ -8.3506e-04f, -8.0192e-04f, -5.3901e-04f,
892
+ -8.3539e-04f, -7.8069e-04f, -4.8720e-04f,
893
+ -3.4706e-04f, -4.4640e-04f, -5.2353e-04f,
894
+ -4.4518e-04f, -5.3374e-04f, -5.2734e-04f,
895
+ -5.8780e-04f, -5.8730e-04f, -5.4362e-04f,
896
+ -5.2452e-04f, -5.4578e-04f, -5.6266e-04f,
897
+ -4.2387e-04f, -4.4643e-04f, -4.8936e-04f,
898
+ -3.5880e-04f, -3.7886e-04f, -4.1998e-04f,
899
+ -2.4479e-04f, -4.0736e-04f, -3.1189e-04f,
900
+ -3.4922e-04f, -4.0173e-04f, -2.5042e-04f,
901
+ -5.7091e-04f, -5.2665e-04f, -2.3293e-04f,
902
+ -2.8505e-04f, 9.7283e-05f, 3.1209e-04f,
903
+ -2.7463e-04f, 1.8704e-04f, 4.4351e-04f,
904
+ -9.1436e-05f, 3.2602e-04f, 5.7573e-04f,
905
+ -4.0112e-04f, -4.2566e-04f, -2.4300e-04f,
906
+ -9.9362e-05f, -6.5499e-05f, 3.2872e-05f,
907
+ 1.1584e-04f, 2.3417e-04f, 3.4427e-04f,
908
+ -7.5767e-05f, 3.9768e-06f, 6.2201e-05f,
909
+ 2.3151e-05f, 2.5595e-04f, 3.4038e-04f,
910
+ -1.3871e-05f, 3.0295e-04f, 4.4170e-04f,
911
+ -1.7802e-04f, -4.5376e-04f, -5.1847e-04f,
912
+ -5.0687e-04f, -5.5837e-04f, -2.5917e-04f,
913
+ -5.3992e-04f, -7.1375e-04f, -4.8728e-04f,
914
+ -1.7543e-01f, -3.4151e-01f, -3.2619e-02f,
915
+ -1.9701e-02f, -1.5494e-01f, -1.6534e-01f,
916
+ 3.5632e-02f, -1.0897e-01f, -3.8379e-02f,
917
+ -6.1420e-02f, -1.0735e-01f, 1.4730e-01f,
918
+ 7.4386e-02f, -1.0487e-01f, 7.9646e-02f,
919
+ 1.7130e-02f, 4.4391e-02f, -5.1959e-03f,
920
+ 4.5682e-02f, -1.1543e-01f, 9.4035e-03f,
921
+ -3.4376e-01f, -1.1961e-01f, 1.0099e-01f,
922
+ 1.1335e-01f, 7.5840e-02f, 1.0675e-01f,
923
+ 4.9539e-02f, 8.7406e-02f, 4.4951e-02f,
924
+ 1.8111e-01f, 2.6406e-01f, -1.5924e-02f,
925
+ -1.1464e-01f, 8.4579e-04f, -6.6811e-02f,
926
+ -8.9635e-03f, 1.8236e-03f, 3.6561e-02f,
927
+ -7.0281e-02f, 2.9717e-01f, 3.1836e-02f,
928
+ -1.3647e-01f, -6.5627e-02f, 9.3063e-02f,
929
+ -2.1851e-01f, -6.0226e-02f, -1.0326e-01f,
930
+ 5.3441e-02f, 1.9103e-01f, -5.7999e-02f,
931
+ -3.3512e-02f, 1.5496e-01f, -1.1111e-01f,
932
+ 2.3256e-03f, -1.5004e-01f, -9.1248e-02f,
933
+ -9.7706e-02f, 1.9549e-01f, -1.5403e-01f,
934
+ -1.5327e-01f, 8.3335e-02f, 5.6111e-03f,
935
+ -1.5707e-01f, 8.0277e-03f, -7.3955e-02f,
936
+ -1.4111e-01f, -1.3548e-01f, -1.0563e-01f,
937
+ 2.3054e-01f, -2.1822e-02f, -6.6938e-03f,
938
+ -1.0259e-01f, 4.3577e-02f, -1.7630e-01f,
939
+ 1.6484e-01f, 4.2413e-01f, 6.9475e-02f,
940
+ -2.4705e-01f, 2.5757e-01f, -9.5611e-02f,
941
+ 1.0236e-01f, -3.4820e-02f, -6.8818e-03f,
942
+ -1.1434e-01f, -3.1800e-01f, 2.1337e-02f,
943
+ -1.9939e-01f, -2.6532e-01f, 7.3361e-02f,
944
+ 6.5939e-02f, 9.5812e-02f, -7.0156e-02f,
945
+ -1.6249e-02f, -1.5927e-02f, -1.1189e-01f,
946
+ -9.3936e-03f, -1.0933e-01f, -2.9399e-02f,
947
+ -2.8752e-02f, -4.5613e-02f, -1.2718e-02f,
948
+ 3.8781e-01f, 2.6776e-01f, -1.0373e-02f,
949
+ -2.3927e-02f, -6.4398e-02f, 9.9117e-02f,
950
+ -6.0732e-02f, -5.5917e-03f, 5.1716e-02f,
951
+ -1.4168e-01f, 1.7661e-01f, -5.5893e-02f,
952
+ -3.0419e-01f, -3.5537e-01f, 2.1978e-01f,
953
+ -1.8610e-01f, -5.7743e-03f, 3.2649e-02f,
954
+ 1.9975e-01f, 1.6508e-01f, 1.3808e-02f,
955
+ 1.0733e-01f, 1.4722e-01f, 5.8671e-02f,
956
+ 6.4940e-02f, 1.6114e-01f, 3.9697e-02f,
957
+ 1.1530e-01f, 2.4021e-01f, -2.1669e-01f,
958
+ 6.0220e-02f, 2.0257e-01f, -1.5227e-01f,
959
+ -6.1096e-02f, 6.6511e-02f, -1.3858e-01f,
960
+ -6.5275e-02f, 1.0891e-01f, 8.2048e-02f,
961
+ -6.7907e-02f, 2.2863e-02f, -1.0322e-01f,
962
+ 1.6542e-01f, -1.4436e-01f, 6.4125e-02f,
963
+ -1.0378e-01f, -3.2346e-01f, -1.5123e-02f,
964
+ 3.8758e-03f, 1.1006e-01f, -4.4325e-02f,
965
+ -1.0102e-01f, -3.7699e-02f, 9.2472e-02f,
966
+ -6.8972e-02f, -1.2308e-02f, 1.6478e-01f,
967
+ 3.4351e-02f, -1.7461e-02f, 1.0301e-01f,
968
+ -2.7125e-01f, -5.6730e-02f, -2.5989e-01f,
969
+ -3.0163e-01f, -1.4826e-01f, -3.4955e-01f,
970
+ -1.6259e-01f, -1.6708e-01f, -2.7964e-01f,
971
+ -6.7134e-02f, -2.2385e-01f, 2.1776e-01f,
972
+ -1.1351e-02f, -3.7861e-01f, 1.8687e-01f,
973
+ 4.0551e-02f, 8.1943e-02f, 1.0866e-01f,
974
+ 1.0273e-01f, 1.1844e-01f, -1.1852e-01f,
975
+ 2.6758e-02f, -8.5806e-02f, 5.9444e-02f,
976
+ -5.1627e-02f, 7.1636e-02f, 2.2841e-01f,
977
+ -3.7242e-03f, 2.9723e-01f, 1.1918e-01f,
978
+ 8.4994e-02f, -3.5747e-01f, 3.6148e-02f,
979
+ 9.9705e-02f, -1.3736e-01f, -6.0080e-02f,
980
+ 1.2370e-01f, 5.0668e-02f, -6.0246e-02f,
981
+ 6.0562e-02f, -3.5068e-01f, -3.2645e-01f,
982
+ 9.1020e-04f, 6.6203e-02f, -1.0770e-01f,
983
+ 1.9434e-02f, 3.0018e-01f, 2.8018e-01f,
984
+ 1.4021e-01f, 2.7481e-01f, 2.2868e-01f,
985
+ 4.8540e-02f, 1.7719e-01f, -4.5834e-02f,
986
+ -9.6349e-02f, -2.3008e-02f, -1.4497e-01f,
987
+ 4.3053e-02f, -1.0161e-01f, 2.8750e-02f,
988
+ -1.2594e-01f, -1.0388e-02f, -4.3966e-02f,
989
+ 7.5993e-02f, -7.1609e-02f, 1.4624e-02f,
990
+ 4.1110e-02f, 7.1258e-02f, -2.9109e-02f,
991
+ -5.8698e-03f, 1.2389e-01f, 4.7648e-02f,
992
+ -6.1585e-04f, -4.4556e-02f, -2.3373e-02f,
993
+ -4.4883e-02f, -7.7722e-02f, -7.3635e-02f,
994
+ -2.7750e-02f, -1.5117e-03f, -8.7368e-02f,
995
+ 2.5113e-02f, 7.7490e-02f, 2.9024e-02f,
996
+ 1.5426e-01f, 2.5472e-01f, 4.8057e-02f,
997
+ -1.1969e-01f, -1.1487e-01f, -1.1802e-01f,
998
+ -4.7392e-02f, -4.2226e-02f, 3.1968e-02f,
999
+ -2.6717e-01f, -5.0206e-02f, 8.1946e-04f,
1000
+ -4.0426e-02f, 1.4373e-01f, -3.3121e-03f,
1001
+ -4.5292e-02f, -2.4538e-02f, 1.0377e-01f,
1002
+ -1.7780e-02f, 2.0058e-01f, -2.4343e-02f,
1003
+ -1.1714e-02f, 1.5984e-01f, -1.2638e-01f,
1004
+ 6.4655e-02f, 3.7703e-02f, 3.7970e-02f,
1005
+ 9.1864e-03f, 1.1468e-01f, -6.2760e-04f,
1006
+ -1.4812e-01f, 6.5670e-03f, 1.0765e-01f,
1007
+ 1.5023e-01f, -7.0594e-02f, -1.3924e-01f,
1008
+ 3.6016e-02f, -3.9078e-02f, -3.8950e-02f,
1009
+ 1.8735e-02f, -1.5573e-01f, -1.2456e-01f
1010
+ }
1011
+ ,
1012
+ {
1013
+ 4.8634e-02f, -1.3617e-01f, 6.1231e-02f,
1014
+ -7.0235e-02f, -6.4110e-01f, 1.5985e-01f,
1015
+ 8.6151e-02f, 1.1847e-01f, 1.3819e-01f,
1016
+ -3.6017e-04f, -3.2273e-02f, -8.5485e-02f,
1017
+ -7.0804e-03f, 2.1751e-01f, 7.2575e-03f,
1018
+ -8.3606e-02f, -1.4885e-01f, -1.2702e-01f,
1019
+ 4.0848e-41f, 8.0934e-40f, -1.8889e-40f,
1020
+ -3.9103e-40f, -7.4709e-40f, 3.8377e-40f,
1021
+ -2.4159e-40f, -4.7610e-40f, 7.7359e-40f,
1022
+ -8.6217e-05f, -5.9763e-05f, -4.0558e-05f,
1023
+ -7.4966e-05f, -4.7074e-05f, -3.1656e-05f,
1024
+ -9.8390e-05f, -6.6833e-05f, -4.7669e-05f,
1025
+ 3.5375e-02f, 2.8660e-02f, 4.1277e-02f,
1026
+ 1.6289e-01f, -3.2199e-01f, -1.7845e-02f,
1027
+ 2.4659e-01f, -3.9618e-02f, 4.1065e-03f,
1028
+ 2.7267e-02f, 8.6819e-02f, 9.5070e-02f,
1029
+ -7.2700e-02f, -2.8826e-01f, 1.1750e-03f,
1030
+ 2.5259e-02f, 2.4681e-03f, 6.4737e-02f,
1031
+ 7.3023e-03f, 2.9631e-02f, 1.0820e-02f,
1032
+ -2.1400e-02f, 5.4244e-01f, 1.5639e-01f,
1033
+ -1.7561e-01f, 4.8947e-01f, -8.8305e-02f,
1034
+ 6.5073e-02f, 3.4922e-01f, 1.3483e-01f,
1035
+ 1.4506e-01f, -2.5472e-01f, -7.2894e-02f,
1036
+ 4.5945e-02f, 1.4040e-01f, 1.2148e-01f,
1037
+ -2.6932e-01f, -1.1518e-01f, -9.3158e-03f,
1038
+ -2.3961e-01f, -1.2479e-01f, -8.9796e-02f,
1039
+ 1.8688e-02f, -4.9267e-02f, 7.7189e-02f,
1040
+ -7.3691e-02f, 7.8186e-03f, 1.3761e-02f,
1041
+ -1.5689e-01f, 3.1138e-02f, 3.9231e-02f,
1042
+ -4.3607e-03f, 2.0813e-01f, 5.5635e-02f,
1043
+ -6.7000e-41f, 9.8995e-41f, 3.0043e-40f,
1044
+ 6.7190e-40f, 4.0827e-40f, 7.6057e-40f,
1045
+ 4.2208e-40f, 8.1141e-40f, -3.3569e-40f,
1046
+ 1.0179e-03f, 5.1543e-04f, 3.8076e-04f,
1047
+ 7.3507e-04f, 4.5432e-04f, 3.7410e-04f,
1048
+ 9.3014e-04f, 6.7365e-04f, 6.0051e-04f,
1049
+ -5.1998e-02f, 6.5768e-02f, 3.1603e-02f,
1050
+ -3.0198e-02f, -3.1692e-02f, -6.9299e-02f,
1051
+ 1.7672e-02f, 2.3766e-01f, 5.7877e-02f,
1052
+ -5.7944e-02f, 1.2624e-01f, -1.4396e-01f,
1053
+ -4.1542e-02f, 6.5110e-01f, 1.0942e-01f,
1054
+ -1.3133e-01f, 5.0538e-02f, -2.7371e-02f,
1055
+ -3.7515e-02f, 2.8703e-02f, 1.2382e-03f,
1056
+ 3.8542e-01f, -2.2754e-02f, 3.4459e-02f,
1057
+ 3.0545e-01f, -5.3817e-01f, -2.1389e-03f,
1058
+ 1.3888e-02f, -2.2775e-01f, -6.3692e-02f,
1059
+ -1.8430e-01f, 5.8452e-02f, 4.5764e-02f,
1060
+ -8.5045e-02f, -1.7060e-01f, -1.8565e-02f,
1061
+ -2.0384e-02f, -3.3018e-02f, -5.1135e-02f,
1062
+ -4.5789e-02f, -1.8105e-01f, 3.5419e-02f,
1063
+ -5.0081e-02f, 8.7719e-02f, 1.0373e-01f,
1064
+ -1.0033e-02f, 7.0530e-02f, -7.8012e-03f,
1065
+ 8.4042e-02f, 1.1982e-01f, -9.6046e-02f,
1066
+ -6.4009e-02f, -1.0711e-01f, -1.3523e-01f,
1067
+ 1.8868e-41f, -7.0039e-40f, -7.2568e-40f,
1068
+ 1.7408e-40f, -7.8143e-40f, -6.8130e-40f,
1069
+ -6.3142e-40f, -6.2560e-40f, -7.4238e-40f,
1070
+ 2.6297e-04f, 7.0014e-05f, -4.0981e-04f,
1071
+ 2.6263e-04f, 4.2811e-05f, -4.9950e-04f,
1072
+ 3.9795e-04f, 1.2615e-04f, -4.7660e-04f,
1073
+ 7.5933e-02f, 2.6295e-02f, 2.7984e-02f,
1074
+ -5.5914e-03f, -8.7981e-02f, -9.2618e-02f,
1075
+ 4.2725e-02f, -3.1210e-01f, 1.3412e-01f,
1076
+ 5.2683e-02f, 3.9891e-01f, 2.9150e-02f,
1077
+ -6.6090e-02f, 2.9455e-01f, -1.9710e-01f,
1078
+ 1.4546e-02f, -2.5572e-02f, 8.1125e-02f,
1079
+ 1.2271e-01f, 1.6097e-01f, 4.5644e-02f,
1080
+ 3.6101e-02f, -1.7174e-02f, 6.6110e-02f,
1081
+ 1.5078e-01f, 4.5180e-01f, 7.7154e-02f,
1082
+ -5.9725e-02f, 1.0185e-01f, 1.1363e-03f,
1083
+ 6.7791e-02f, 1.7696e-02f, 5.2638e-02f,
1084
+ 3.3051e-02f, -8.4049e-02f, 1.4380e-01f,
1085
+ 1.8744e-02f, -2.0940e-01f, -2.1424e-01f,
1086
+ -2.1329e-01f, -1.3154e-01f, -3.2572e-01f,
1087
+ 1.1292e-01f, 1.2361e-02f, -1.5506e-01f,
1088
+ -1.0362e-02f, 1.9955e-02f, 4.2639e-02f,
1089
+ -2.1952e-02f, -2.4682e-02f, -2.4453e-02f,
1090
+ -2.5606e-02f, -3.3580e-02f, -3.6340e-02f,
1091
+ -5.0830e-40f, 6.3797e-40f, -5.2775e-40f,
1092
+ -7.7988e-40f, -7.4579e-40f, -5.1901e-40f,
1093
+ -3.8275e-41f, -5.7607e-40f, -1.3656e-40f,
1094
+ 2.7164e-04f, 5.9977e-04f, 8.6886e-04f,
1095
+ 3.0116e-04f, 7.0106e-04f, 1.0248e-03f,
1096
+ 2.9177e-04f, 6.4748e-04f, 9.4825e-04f,
1097
+ 6.6310e-02f, 1.5240e-02f, -5.3044e-02f,
1098
+ 1.2545e-01f, 5.0582e-02f, 2.7358e-02f,
1099
+ 1.9338e-01f, 1.1377e-01f, 4.6110e-02f,
1100
+ -3.1997e-02f, 1.5171e-02f, -4.9372e-02f,
1101
+ 5.4615e-04f, 1.7262e-01f, -2.2081e-01f,
1102
+ 8.4871e-02f, 1.7824e-02f, -3.6429e-02f,
1103
+ 4.2821e-02f, -1.0055e-01f, 4.8927e-02f,
1104
+ 1.2524e-01f, 5.8859e-02f, -2.0980e-02f,
1105
+ 2.2897e-01f, 1.7594e-01f, 3.4239e-02f,
1106
+ 1.0915e-01f, 1.2088e-01f, 1.0151e-01f,
1107
+ 6.8449e-03f, -1.5546e-01f, 1.2024e-01f,
1108
+ 4.9036e-02f, -1.2245e-01f, 4.6713e-02f,
1109
+ 7.5083e-03f, -4.8084e-02f, 9.7731e-03f,
1110
+ 4.8779e-02f, 3.1848e-02f, -9.3517e-02f,
1111
+ 6.4595e-02f, 3.9337e-02f, -7.2343e-02f,
1112
+ 3.9519e-02f, 4.1867e-02f, -5.0485e-02f,
1113
+ 2.5257e-02f, 1.4071e-01f, 1.3606e-01f,
1114
+ 1.7481e-01f, 2.0210e-01f, 1.7241e-01f,
1115
+ -7.6295e-40f, -7.8460e-40f, -4.1806e-41f,
1116
+ -7.9994e-40f, -7.3271e-40f, -6.2665e-40f,
1117
+ -7.9602e-40f, -7.0226e-40f, -7.4131e-40f,
1118
+ -4.5544e-04f, -5.2379e-04f, -7.0755e-04f,
1119
+ -3.3807e-04f, -3.8123e-04f, -5.3222e-04f,
1120
+ -3.1771e-04f, -3.4586e-04f, -4.8784e-04f,
1121
+ -3.5257e-02f, -1.1866e-02f, 1.9717e-02f,
1122
+ -6.0777e-02f, -7.3127e-03f, -3.2825e-02f,
1123
+ -1.4952e-01f, 3.2117e-01f, -6.3786e-02f,
1124
+ -1.0255e-02f, 1.2961e-01f, -8.6823e-02f,
1125
+ 1.6994e-01f, 4.7491e-01f, 2.7135e-01f,
1126
+ 2.8538e-03f, 1.5572e-01f, -3.3736e-02f,
1127
+ 8.5996e-02f, -1.0176e-02f, 2.6629e-02f,
1128
+ 7.3362e-02f, -7.7525e-03f, 5.6261e-02f,
1129
+ 1.0819e-01f, -2.5863e-01f, -5.7146e-03f,
1130
+ -7.1781e-02f, 2.8376e-03f, 7.8298e-02f,
1131
+ 1.3183e-01f, 2.7149e-02f, -9.9786e-02f,
1132
+ 9.0491e-02f, 8.7938e-02f, -2.1882e-02f,
1133
+ 4.1396e-03f, -4.5816e-02f, -7.8892e-02f,
1134
+ -6.3855e-03f, 1.7502e-01f, 1.2053e-01f,
1135
+ 1.2492e-01f, 6.1258e-02f, -4.0516e-02f,
1136
+ -4.5409e-02f, -4.5877e-02f, -7.6414e-02f,
1137
+ -1.0573e-02f, -1.2517e-01f, -4.3991e-02f,
1138
+ -2.6447e-02f, -9.5478e-02f, -2.4735e-02f,
1139
+ -4.6548e-41f, -1.6443e-40f, -3.1221e-40f,
1140
+ -3.2675e-40f, -2.7265e-40f, -3.1190e-40f,
1141
+ -2.2065e-40f, -2.5407e-40f, -6.9511e-40f,
1142
+ -1.2727e-04f, -2.6585e-04f, -3.5516e-04f,
1143
+ 3.4272e-05f, -1.6810e-04f, -3.1677e-04f,
1144
+ -5.5355e-05f, -2.9924e-04f, -4.3692e-04f,
1145
+ -5.6428e-02f, 1.0771e-01f, 1.0185e-01f,
1146
+ 2.2948e-01f, -7.8744e-02f, 6.0768e-04f,
1147
+ -2.2355e-03f, -2.0128e-03f, -5.7317e-03f,
1148
+ -7.1232e-03f, 1.0297e-01f, 1.6872e-01f,
1149
+ 1.9194e-01f, -1.1578e-01f, 1.0732e-01f,
1150
+ -8.6952e-02f, 3.2901e-02f, -6.6658e-03f,
1151
+ 7.3979e-02f, 8.3875e-02f, -7.6372e-03f,
1152
+ 1.9577e-01f, 2.7391e-01f, 4.5275e-02f,
1153
+ 1.5610e-01f, 2.3802e-01f, 1.6555e-02f,
1154
+ 1.3814e-01f, 1.2870e-01f, 9.1626e-02f,
1155
+ -4.6890e-02f, -8.8734e-02f, 7.8866e-02f,
1156
+ 1.0027e-01f, 2.2139e-01f, 1.0050e-01f,
1157
+ -6.5845e-02f, -1.0990e-01f, -6.9896e-02f,
1158
+ 4.1687e-02f, 3.0631e-02f, -8.8441e-02f,
1159
+ -1.1868e-01f, 1.0836e-02f, 2.5873e-02f,
1160
+ -1.7114e-02f, 7.6295e-02f, 1.5439e-02f,
1161
+ -2.4271e-02f, 5.8538e-02f, 9.8190e-02f,
1162
+ 4.9742e-02f, 8.7807e-02f, 6.5871e-02f,
1163
+ -7.2669e-40f, -7.5936e-41f, -7.4975e-40f,
1164
+ -1.6984e-42f, -1.7334e-40f, -8.4954e-41f,
1165
+ -2.1556e-41f, -1.5374e-40f, -1.5515e-40f,
1166
+ -6.2626e-04f, -7.2727e-04f, -8.1665e-04f,
1167
+ -5.6584e-04f, -6.1190e-04f, -6.9584e-04f,
1168
+ -5.6278e-04f, -5.8554e-04f, -6.3554e-04f,
1169
+ 8.1550e-02f, -4.1817e-03f, 1.2301e-02f,
1170
+ -4.5800e-02f, 4.6708e-02f, -8.7972e-02f,
1171
+ -2.9880e-01f, 2.6456e-01f, 3.9363e-03f,
1172
+ -3.0939e-02f, -1.9921e-01f, -3.8689e-03f,
1173
+ -8.6803e-02f, 3.4857e-01f, -1.0201e-01f,
1174
+ 2.1597e-02f, 1.4380e-02f, 4.3448e-02f,
1175
+ 7.1195e-02f, 1.4980e-01f, 3.8079e-02f,
1176
+ -1.2678e-01f, -8.1274e-02f, -4.3445e-02f,
1177
+ 5.2482e-02f, -1.8763e-01f, 1.1557e-01f,
1178
+ -9.4614e-02f, 5.4415e-02f, -3.1485e-02f,
1179
+ -3.6451e-02f, 1.4379e-01f, 5.2291e-02f,
1180
+ -9.2069e-02f, 9.5675e-02f, -5.8433e-02f,
1181
+ 7.5768e-03f, -7.1280e-02f, -1.4576e-01f,
1182
+ -1.4671e-01f, -1.2446e-01f, -1.5207e-01f,
1183
+ -5.4368e-02f, 3.8303e-02f, -8.1794e-02f,
1184
+ 2.0492e-02f, 4.0910e-02f, 1.1379e-02f,
1185
+ 3.1582e-02f, 3.6039e-02f, -4.4040e-03f,
1186
+ 1.7540e-02f, 1.4097e-04f, -6.4367e-02f,
1187
+ -7.9553e-40f, -5.3941e-40f, -7.1912e-40f,
1188
+ -5.8099e-40f, -6.8315e-40f, -6.6012e-40f,
1189
+ -7.6242e-40f, -5.4784e-40f, -7.0267e-40f,
1190
+ -2.9197e-04f, -2.1994e-04f, -1.9501e-04f,
1191
+ -2.6516e-05f, -1.2642e-05f, -8.4345e-05f,
1192
+ 1.6763e-04f, 1.1268e-04f, -5.4516e-05f,
1193
+ -3.8007e-03f, -6.8765e-02f, -9.5716e-02f,
1194
+ 6.3091e-02f, -8.1971e-02f, -9.2895e-02f,
1195
+ -6.8353e-03f, 7.3639e-02f, 1.3505e-01f,
1196
+ 9.0083e-02f, 2.4352e-01f, 3.9708e-02f,
1197
+ -5.4051e-02f, -6.8748e-02f, -1.8937e-01f,
1198
+ -1.9808e-03f, -7.1337e-02f, -2.8316e-02f,
1199
+ 8.1504e-02f, 8.3226e-03f, 6.9013e-03f,
1200
+ 9.4393e-02f, 5.9322e-02f, 5.5023e-02f,
1201
+ 1.0236e-01f, -4.0205e-02f, 3.5172e-02f,
1202
+ 6.5381e-02f, 4.9075e-02f, -5.3931e-02f,
1203
+ 4.3961e-02f, 9.0223e-03f, -4.1678e-02f,
1204
+ -6.4262e-02f, -5.0304e-02f, -9.3597e-02f
1205
+ }
1206
+ ,
1207
+ {
1208
+ 3.8496e-01f, 1.4287e-01f, 3.4530e-02f,
1209
+ -5.5398e-01f, -6.0381e-02f, 1.2078e-02f,
1210
+ 7.9983e-02f, 2.1478e-01f, -5.7915e-02f,
1211
+ -1.4020e-01f, -2.6914e-02f, 1.5915e-02f,
1212
+ 1.2371e-01f, 2.5496e-01f, -2.9867e-02f,
1213
+ 1.3269e-02f, -9.9596e-02f, -2.3173e-01f,
1214
+ 5.1471e-02f, -4.5507e-01f, -7.7620e-02f,
1215
+ -5.1328e-02f, -1.9808e-02f, -4.7051e-02f,
1216
+ 3.0573e-02f, 7.8762e-02f, -7.2627e-02f,
1217
+ 6.8690e-02f, -4.0125e-02f, 5.6657e-02f,
1218
+ 8.0208e-02f, -2.0075e-02f, 1.4019e-01f,
1219
+ -5.7959e-02f, -7.3152e-02f, 2.0202e-02f,
1220
+ -8.8702e-02f, -1.9911e-01f, -1.5570e-01f,
1221
+ 2.8401e-02f, 5.8802e-02f, 1.3050e-01f,
1222
+ 2.1905e-02f, -3.4298e-02f, 4.0447e-02f,
1223
+ 1.0184e-01f, -9.0101e-02f, -9.2770e-02f,
1224
+ 1.1713e-02f, -3.2514e-01f, 1.9393e-01f,
1225
+ -9.4227e-02f, 2.7053e-01f, -9.7233e-02f,
1226
+ -1.0478e-01f, 6.0652e-02f, 8.3399e-02f,
1227
+ 1.1104e-01f, 2.9008e-01f, 4.9208e-02f,
1228
+ -1.5414e-02f, 3.1718e-02f, -7.9083e-02f,
1229
+ -5.2358e-03f, 9.0101e-02f, 5.2973e-02f,
1230
+ 5.5527e-02f, -1.6599e-02f, -8.5167e-02f,
1231
+ -5.1018e-02f, 7.2243e-03f, -9.5684e-02f,
1232
+ -5.0608e-02f, -6.7864e-02f, -8.9496e-02f,
1233
+ -2.4348e-01f, 2.7477e-01f, -1.7588e-01f,
1234
+ 1.3927e-01f, 5.5502e-02f, -1.3370e-02f,
1235
+ -4.3509e-02f, -2.1511e-01f, -5.9070e-02f,
1236
+ 1.0293e-01f, 4.2678e-01f, -8.7527e-02f,
1237
+ -6.8546e-02f, -5.6296e-02f, -8.7962e-02f,
1238
+ -8.6130e-02f, 9.2069e-02f, 7.2303e-02f,
1239
+ 2.4365e-02f, 2.1988e-01f, -7.9408e-03f,
1240
+ -3.0063e-02f, 1.1554e-01f, -5.0311e-02f,
1241
+ 1.0605e-02f, 5.4598e-02f, 1.3826e-02f,
1242
+ -1.4342e-02f, 1.5353e-01f, -5.3974e-03f,
1243
+ 1.5583e-01f, -6.0889e-02f, -1.5772e-02f,
1244
+ -2.5956e-02f, -3.5285e-01f, -2.0338e-01f,
1245
+ 2.6011e-01f, 2.2737e-01f, -1.4693e-01f,
1246
+ -7.7964e-02f, 1.0053e-01f, -5.4278e-02f,
1247
+ -3.0668e-02f, 3.4556e-02f, -3.4321e-02f,
1248
+ 7.8695e-02f, -2.2357e-01f, 9.5733e-02f,
1249
+ 1.7483e-01f, -1.5153e-01f, -1.8262e-03f,
1250
+ 4.7605e-02f, -2.2834e-01f, 4.6383e-02f,
1251
+ 1.5701e-01f, 3.2264e-01f, 1.0334e-02f,
1252
+ 6.3351e-02f, 1.1340e-01f, 8.3478e-02f,
1253
+ 6.4196e-02f, 3.3460e-02f, 8.8473e-02f,
1254
+ 5.4663e-02f, -1.7665e-03f, -4.1935e-02f,
1255
+ -6.1346e-03f, -5.4463e-02f, -6.2960e-02f,
1256
+ 2.8159e-02f, 2.9903e-02f, 9.2429e-03f,
1257
+ -3.0041e-02f, -9.7783e-02f, -4.9500e-02f,
1258
+ 9.5350e-02f, -7.9143e-02f, -1.3244e-01f,
1259
+ -6.5129e-02f, 1.4568e-01f, 6.6843e-02f,
1260
+ 1.5241e-01f, -7.8736e-02f, 1.0721e-01f,
1261
+ -5.9015e-02f, 1.5320e-01f, 3.0796e-01f,
1262
+ -5.4266e-03f, -6.0804e-02f, 3.7326e-02f,
1263
+ 7.4844e-02f, 4.8340e-02f, 1.5251e-01f,
1264
+ 3.8158e-02f, 1.2087e-01f, -8.9003e-02f,
1265
+ -5.8369e-02f, -7.3813e-02f, 1.2240e-02f,
1266
+ -4.5106e-03f, 7.4580e-02f, 1.2042e-01f,
1267
+ 4.1959e-02f, 1.4529e-01f, 5.3636e-03f,
1268
+ -4.9708e-03f, -1.0775e-02f, -5.9374e-02f,
1269
+ 1.5358e-02f, 1.7277e-02f, -1.5412e-01f,
1270
+ 8.1647e-02f, 3.3503e-02f, -8.1934e-02f,
1271
+ -1.5807e-02f, -1.0001e-02f, -1.0059e-02f,
1272
+ -9.0493e-03f, -7.8954e-02f, 4.3891e-02f,
1273
+ -9.3815e-03f, 3.2241e-02f, 4.7962e-02f,
1274
+ -7.2252e-03f, 7.9324e-02f, 2.0662e-02f,
1275
+ -5.7710e-02f, -5.1142e-02f, -1.4296e-01f,
1276
+ 2.1501e-02f, -1.9518e-02f, -2.7658e-02f,
1277
+ 1.4983e-01f, 8.5447e-02f, 7.2092e-04f,
1278
+ 1.1275e-01f, 6.1131e-02f, 5.7955e-02f,
1279
+ 1.5624e-02f, 2.7225e-01f, 1.1716e-01f,
1280
+ -1.6322e-04f, -1.3368e-04f, -1.5575e-04f,
1281
+ -1.0525e-04f, -1.0765e-04f, -1.5306e-04f,
1282
+ -8.9692e-05f, -1.0857e-04f, -1.7316e-04f,
1283
+ -1.8015e-03f, -1.3733e-03f, -3.9154e-04f,
1284
+ -1.8453e-03f, -1.4238e-03f, -4.4163e-04f,
1285
+ -1.5511e-03f, -1.1131e-03f, -2.0087e-04f,
1286
+ -2.4082e-03f, -2.2576e-03f, -1.9231e-03f,
1287
+ -2.4913e-03f, -2.4136e-03f, -2.1678e-03f,
1288
+ -2.5057e-03f, -2.4650e-03f, -2.2732e-03f,
1289
+ -2.3901e-05f, -1.5870e-05f, -5.8255e-06f,
1290
+ -1.5163e-05f, -1.2370e-05f, -6.0712e-06f,
1291
+ -1.3098e-05f, -1.1132e-05f, -5.7866e-06f,
1292
+ -5.9760e-03f, -5.9998e-03f, -6.0295e-03f,
1293
+ -5.9962e-03f, -6.0100e-03f, -6.0277e-03f,
1294
+ -6.0003e-03f, -6.0059e-03f, -6.0148e-03f,
1295
+ -3.2764e-05f, -2.9574e-05f, -2.8001e-05f,
1296
+ -1.0846e-05f, -1.1569e-05f, -1.4282e-05f,
1297
+ -1.6255e-06f, -2.5666e-06f, -4.7808e-06f,
1298
+ -5.1999e-03f, -5.2334e-03f, -5.2847e-03f,
1299
+ -5.2057e-03f, -5.2283e-03f, -5.2713e-03f,
1300
+ -5.2195e-03f, -5.2321e-03f, -5.2633e-03f,
1301
+ -3.0782e-06f, -9.2118e-06f, -1.6177e-05f,
1302
+ -1.6382e-06f, -6.9559e-06f, -1.4245e-05f,
1303
+ -1.1471e-06f, -6.5984e-06f, -1.4903e-05f,
1304
+ 7.7574e-02f, -1.2866e-02f, 4.1348e-03f,
1305
+ -6.7298e-02f, -1.3691e-01f, 6.4079e-02f,
1306
+ 3.7962e-02f, 8.7737e-02f, -4.1046e-02f,
1307
+ -2.8471e-02f, 1.7647e-01f, 6.4232e-02f,
1308
+ 1.2316e-01f, 3.6800e-01f, -1.5740e-01f,
1309
+ -6.0839e-02f, 1.5449e-02f, -1.0761e-01f,
1310
+ -6.6869e-02f, -1.2867e-01f, -4.0195e-02f,
1311
+ -4.9651e-02f, -5.5500e-02f, -2.5879e-02f,
1312
+ 2.0179e-02f, 6.8467e-02f, 2.6575e-02f,
1313
+ -6.7728e-04f, -7.6269e-02f, 2.3470e-02f,
1314
+ 7.1869e-02f, -1.1855e-01f, -2.1067e-02f,
1315
+ 1.3263e-01f, -3.2957e-02f, -3.4365e-03f,
1316
+ 8.1936e-02f, 1.3073e-01f, 1.1477e-01f,
1317
+ 1.2429e-01f, 1.6129e-01f, 1.6251e-01f,
1318
+ 1.5476e-02f, 3.2862e-02f, 2.1999e-02f,
1319
+ -2.9189e-02f, -3.3615e-02f, 5.5616e-04f,
1320
+ -2.4059e-02f, -9.6181e-03f, -4.1175e-02f,
1321
+ -6.3680e-04f, -9.6559e-02f, -9.1448e-02f,
1322
+ 3.0238e-02f, 1.2534e-01f, 1.5256e-02f,
1323
+ -4.2118e-02f, 1.5723e-01f, 2.6929e-03f,
1324
+ 1.9873e-02f, 5.3050e-02f, -1.0153e-03f,
1325
+ 2.0634e-02f, 9.2825e-03f, -6.8027e-03f,
1326
+ 3.1335e-03f, -7.7443e-03f, -1.8307e-02f,
1327
+ 7.9974e-03f, -1.0283e-03f, -6.2520e-03f,
1328
+ 4.5050e-02f, 9.9504e-02f, -1.3404e-01f,
1329
+ -6.7271e-01f, -5.7290e-02f, 2.6919e-02f,
1330
+ 2.3673e-01f, 2.4688e-02f, -2.0227e-02f,
1331
+ 5.1389e-02f, -3.9810e-02f, -8.9700e-02f,
1332
+ 2.8445e-02f, 3.9136e-01f, -1.1508e-01f,
1333
+ -1.0449e-01f, -6.2005e-02f, 6.5721e-02f,
1334
+ -1.9123e-01f, -4.2613e-02f, 3.5371e-02f,
1335
+ 1.9207e-01f, 8.7916e-02f, 4.8089e-02f,
1336
+ -5.7912e-02f, 1.0014e-01f, -9.4659e-02f,
1337
+ 1.1240e-02f, -6.2254e-03f, 1.3399e-01f,
1338
+ 1.6483e-01f, -3.5079e-01f, 1.1612e-02f,
1339
+ 2.9215e-01f, 5.6875e-02f, 6.9505e-02f,
1340
+ 1.3721e-02f, 1.2607e-01f, 2.6426e-02f,
1341
+ -2.0529e-01f, 2.1768e-01f, 2.1232e-01f,
1342
+ -6.3574e-02f, 2.3504e-02f, -1.0811e-01f,
1343
+ -1.3470e-02f, -3.6446e-02f, -5.4379e-02f,
1344
+ -1.3257e-01f, -8.3412e-02f, 3.7745e-02f,
1345
+ 5.8778e-02f, -2.6060e-01f, 3.8262e-02f,
1346
+ -4.3689e-03f, -6.6703e-02f, -2.2025e-01f,
1347
+ -9.0961e-02f, 1.3855e-01f, 3.4573e-04f,
1348
+ -2.9613e-01f, -3.6138e-02f, -1.3827e-01f,
1349
+ 4.5896e-02f, -5.3871e-02f, -1.0037e-01f,
1350
+ 1.8457e-01f, 1.0338e-01f, -5.7306e-02f,
1351
+ 5.5510e-02f, -9.4938e-02f, -5.6527e-05f,
1352
+ 1.6372e-01f, -3.3854e-02f, 5.6332e-02f,
1353
+ -4.0251e-01f, -5.9428e-02f, -9.1470e-02f,
1354
+ -1.5921e-02f, -5.7948e-02f, 8.1682e-03f,
1355
+ -3.7833e-03f, 1.6293e-01f, 5.3784e-02f,
1356
+ 1.1053e-01f, -1.3867e-01f, 2.6772e-02f,
1357
+ -1.3133e-02f, 3.7614e-01f, 3.6361e-03f,
1358
+ -1.4205e-01f, 3.1312e-02f, -9.9928e-02f,
1359
+ -1.5755e-01f, 4.2016e-01f, 9.4065e-02f,
1360
+ 2.7536e-02f, 1.2620e-01f, -1.4894e-01f,
1361
+ -4.2137e-02f, -9.8700e-02f, -1.7479e-01f,
1362
+ 4.5836e-02f, 5.3893e-02f, -1.0138e-01f,
1363
+ 8.3609e-02f, 2.1849e-02f, -1.0648e-01f,
1364
+ 7.4801e-02f, -1.2671e-01f, -1.5007e-02f,
1365
+ 2.7440e-01f, -3.1351e-01f, 6.5787e-02f,
1366
+ -6.7820e-02f, 1.6312e-01f, -1.3254e-02f,
1367
+ -2.5770e-02f, -2.0041e-02f, 5.8243e-02f,
1368
+ 1.6055e-02f, 1.1971e-02f, -4.6112e-02f,
1369
+ -1.6276e-01f, -1.5313e-02f, -7.9826e-03f,
1370
+ 9.1668e-02f, 9.7722e-02f, 1.3754e-01f,
1371
+ -7.4817e-02f, -4.1923e-01f, -1.2337e-01f,
1372
+ 1.3472e-01f, -4.0745e-02f, -5.4055e-02f,
1373
+ -1.2943e-02f, 4.8796e-02f, 4.2007e-02f,
1374
+ 9.4668e-02f, 8.6149e-02f, 1.2362e-01f,
1375
+ 7.0637e-02f, 2.3565e-01f, 1.4582e-01f,
1376
+ 5.6904e-02f, -8.2166e-02f, 1.0563e-01f,
1377
+ 9.3969e-02f, -2.2909e-01f, 4.6537e-02f,
1378
+ 6.5257e-02f, 1.4804e-01f, -6.2092e-02f,
1379
+ -1.5699e-02f, -1.5303e-02f, 1.6671e-01f,
1380
+ -6.1947e-03f, 2.5749e-01f, 1.5257e-01f,
1381
+ 3.2908e-02f, -5.9907e-02f, 1.1502e-01f,
1382
+ 7.5876e-02f, -2.6699e-01f, -1.5891e-02f,
1383
+ -8.0426e-02f, 1.3406e-01f, -1.9881e-02f,
1384
+ 3.5472e-02f, -8.2140e-02f, 1.6509e-02f,
1385
+ 8.3390e-03f, -7.8291e-02f, -2.0754e-01f,
1386
+ 3.4490e-02f, 2.7913e-01f, 5.9566e-02f,
1387
+ 2.5288e-02f, 1.1725e-01f, -1.0356e-01f,
1388
+ -5.0955e-02f, 9.2093e-02f, -5.8477e-02f,
1389
+ 4.4325e-02f, 3.2973e-02f, -1.9477e-01f,
1390
+ 3.9582e-02f, -8.6877e-02f, -1.1753e-01f,
1391
+ 3.0401e-02f, -2.8757e-02f, -2.5563e-02f,
1392
+ 5.0741e-02f, -3.5056e-01f, -2.5584e-01f,
1393
+ 9.1709e-02f, -4.0932e-02f, 2.3812e-01f,
1394
+ 5.0945e-02f, 4.9246e-02f, 1.2738e-01f,
1395
+ 5.1440e-03f, 1.5703e-01f, 5.5743e-02f,
1396
+ -3.9492e-02f, 1.2114e-01f, 2.0531e-02f,
1397
+ 8.0800e-02f, 2.6680e-03f, -1.6660e-02f,
1398
+ 1.0684e-01f, 1.2308e-01f, 1.7882e-02f,
1399
+ 1.8280e-02f, 1.0972e-01f, -5.2912e-03f
1400
+ }
1401
+ ,
1402
+ {
1403
+ -1.3812e-02f, -4.6271e-02f, 7.3790e-02f,
1404
+ -6.3801e-02f, -3.6817e-01f, -1.7880e-02f,
1405
+ 5.2986e-02f, 1.8626e-01f, 1.5645e-03f,
1406
+ 1.2367e-02f, -6.2923e-02f, 3.0844e-02f,
1407
+ 9.3623e-02f, 1.9527e-01f, -2.6366e-02f,
1408
+ -2.0837e-02f, -3.4424e-02f, 4.0256e-02f,
1409
+ 4.1482e-02f, 6.1795e-02f, -1.1293e-02f,
1410
+ -8.9944e-02f, -1.3608e-01f, 1.8067e-02f,
1411
+ 3.6974e-02f, 5.2530e-03f, -2.7474e-02f,
1412
+ 1.1872e-05f, 1.9000e-05f, 2.0729e-05f,
1413
+ 1.0139e-05f, 1.6832e-05f, 1.9392e-05f,
1414
+ 6.5445e-06f, 1.0973e-05f, 1.3521e-05f,
1415
+ -5.3340e-02f, 1.3108e-03f, 4.0436e-02f,
1416
+ 5.7068e-02f, -2.7923e-02f, -5.4781e-02f,
1417
+ -2.9293e-02f, 2.7145e-02f, 2.7340e-02f,
1418
+ 5.3520e-03f, 1.8766e-02f, 4.0297e-01f,
1419
+ 2.6473e-02f, -3.4675e-02f, -1.1783e-01f,
1420
+ -2.5038e-02f, -1.7702e-02f, -3.4908e-02f,
1421
+ 1.4847e-02f, 2.3237e-01f, -6.3687e-02f,
1422
+ -6.5672e-02f, -2.1888e-01f, -1.7233e-02f,
1423
+ 4.0608e-02f, -6.9580e-02f, -2.2200e-02f,
1424
+ 5.8163e-02f, 1.3695e-01f, -2.6257e-02f,
1425
+ -1.3328e-01f, -3.5730e-01f, 2.4507e-02f,
1426
+ -4.5611e-03f, 2.0424e-01f, -3.9821e-02f,
1427
+ 5.5300e-02f, -1.6006e-01f, 1.1717e-01f,
1428
+ -2.6107e-02f, -8.6995e-02f, 8.3720e-02f,
1429
+ 7.5494e-02f, 3.2189e-01f, 1.5527e-01f,
1430
+ -6.6869e-02f, 1.4469e-01f, 5.1805e-02f,
1431
+ 9.8760e-02f, -1.6759e-01f, -1.2350e-01f,
1432
+ 5.7005e-02f, 8.4904e-02f, 8.9713e-02f,
1433
+ -1.4263e-02f, 2.8914e-02f, 3.2239e-02f,
1434
+ -2.4871e-02f, 5.6014e-02f, -4.4469e-02f,
1435
+ 3.1209e-02f, 1.3677e-02f, -2.1052e-02f,
1436
+ -1.6548e-03f, -1.8796e-03f, -1.9883e-03f,
1437
+ -1.6186e-03f, -1.8494e-03f, -1.9670e-03f,
1438
+ -1.5841e-03f, -1.8173e-03f, -1.9345e-03f,
1439
+ 3.5726e-02f, 1.8013e-01f, 1.6913e-02f,
1440
+ -1.2168e-01f, -6.3848e-02f, 3.0555e-02f,
1441
+ 3.0269e-02f, -1.0260e-01f, -1.5259e-02f,
1442
+ -4.7375e-03f, 5.5115e-02f, 6.2642e-01f,
1443
+ 9.9776e-03f, -2.1988e-01f, -2.0984e-01f,
1444
+ 7.0470e-03f, 6.3178e-02f, -1.3607e-02f,
1445
+ 1.1918e-01f, -2.4081e-01f, 1.7889e-01f,
1446
+ -1.0514e-01f, 2.9220e-01f, -1.3263e-01f,
1447
+ 5.6091e-03f, -4.1623e-02f, 2.5589e-02f,
1448
+ -1.8496e-01f, 2.7698e-02f, -6.5768e-02f,
1449
+ 2.9677e-01f, 4.4163e-02f, 5.8530e-02f,
1450
+ -1.1010e-01f, -7.6787e-02f, 3.9844e-02f,
1451
+ 5.2113e-03f, -1.8202e-02f, 1.4129e-03f,
1452
+ -6.1402e-03f, -2.7222e-01f, 7.4690e-02f,
1453
+ 1.9131e-02f, 2.2753e-01f, 1.9587e-02f,
1454
+ -2.7391e-02f, 6.7917e-03f, 2.0496e-03f,
1455
+ 6.7333e-02f, 7.8262e-02f, 2.1110e-03f,
1456
+ -5.4519e-02f, 3.0763e-02f, 1.5628e-02f,
1457
+ 9.5055e-02f, 3.8855e-02f, 1.2446e-02f,
1458
+ -1.5152e-01f, 7.8124e-02f, -1.2616e-02f,
1459
+ 9.3100e-03f, -1.6528e-02f, -1.2873e-02f,
1460
+ -1.8377e-03f, -1.9231e-03f, -1.8930e-03f,
1461
+ -1.8058e-03f, -1.8841e-03f, -1.8678e-03f,
1462
+ -1.7387e-03f, -1.7966e-03f, -1.7781e-03f,
1463
+ -4.5122e-02f, 1.7027e-03f, -3.5534e-03f,
1464
+ 8.5222e-03f, 1.0130e-01f, 4.7893e-02f,
1465
+ 6.5574e-02f, 7.2150e-03f, -2.1820e-03f,
1466
+ -5.5105e-03f, -1.8990e-01f, 2.6527e-02f,
1467
+ 6.6140e-03f, 2.1537e-01f, -2.2183e-02f,
1468
+ -8.0628e-03f, 6.8398e-03f, 9.4474e-03f,
1469
+ 1.2239e-01f, -1.3337e-01f, 7.3391e-02f,
1470
+ -1.2205e-01f, 1.3145e-01f, -2.0063e-02f,
1471
+ 2.2168e-02f, 3.6097e-03f, 2.7146e-02f,
1472
+ 4.6717e-02f, 2.1122e-02f, 1.5491e-02f,
1473
+ -1.3077e-01f, 1.1635e-01f, 1.0849e-02f,
1474
+ 8.0113e-02f, -8.4028e-02f, 1.2863e-03f,
1475
+ -2.9796e-02f, -8.4537e-02f, -2.6766e-03f,
1476
+ -7.7771e-03f, -2.4274e-03f, 8.6274e-02f,
1477
+ -2.0354e-02f, 4.1245e-02f, 8.4227e-02f,
1478
+ 5.5894e-02f, 1.0706e-01f, 5.2965e-02f,
1479
+ -7.8731e-03f, 5.5825e-01f, 1.0373e-01f,
1480
+ -1.1975e-01f, -2.0071e-02f, -2.5286e-02f,
1481
+ -7.7477e-02f, 5.3589e-02f, -1.5710e-03f,
1482
+ -1.2753e-01f, 2.5166e-01f, 8.2205e-03f,
1483
+ -9.8349e-02f, -4.9539e-02f, -5.4941e-02f,
1484
+ -4.9916e-03f, -4.9986e-03f, -5.0660e-03f,
1485
+ -4.9770e-03f, -4.9840e-03f, -5.0543e-03f,
1486
+ -4.9997e-03f, -5.0114e-03f, -5.0809e-03f,
1487
+ 6.1819e-02f, 1.5061e-01f, 1.1984e-02f,
1488
+ 1.2905e-01f, 2.5921e-01f, 1.4768e-01f,
1489
+ 4.5548e-02f, 1.4902e-01f, -4.8961e-03f,
1490
+ -1.3605e-02f, 8.2896e-02f, -4.1931e-01f,
1491
+ -2.2657e-02f, 2.4768e-01f, 2.6528e-01f,
1492
+ -1.1566e-02f, -8.7819e-03f, 4.3618e-02f,
1493
+ -3.4332e-02f, -1.8392e-01f, 4.4471e-02f,
1494
+ -3.7073e-02f, -5.4620e-02f, 1.0899e-01f,
1495
+ 3.7891e-02f, 9.9487e-02f, 3.2383e-02f,
1496
+ -6.3628e-02f, -5.0303e-03f, 5.4617e-02f,
1497
+ -8.7802e-02f, 2.1977e-01f, -6.0249e-03f,
1498
+ 6.3554e-02f, -5.4291e-02f, -2.6709e-02f,
1499
+ -1.5505e-02f, -6.7104e-02f, 3.8607e-02f,
1500
+ -1.1427e-01f, -3.2524e-01f, 4.0077e-02f,
1501
+ -6.5144e-03f, 1.2313e-01f, -2.7924e-02f,
1502
+ 1.4265e-02f, -3.8338e-02f, 8.6780e-02f,
1503
+ 1.5341e-01f, 1.2174e-01f, -7.3160e-02f,
1504
+ 2.6326e-04f, 7.3690e-02f, 5.2187e-02f,
1505
+ -3.3114e-02f, -3.6588e-02f, 1.1635e-02f,
1506
+ -3.3521e-02f, 1.0767e-01f, -8.9125e-03f,
1507
+ -2.2431e-02f, -4.5655e-03f, 7.5531e-03f,
1508
+ 6.7227e-04f, 7.2856e-04f, 7.3907e-04f,
1509
+ 6.5335e-04f, 7.0702e-04f, 7.1233e-04f,
1510
+ 6.1540e-04f, 6.7286e-04f, 6.7797e-04f,
1511
+ -3.1496e-02f, 6.0514e-02f, 4.2013e-02f,
1512
+ -2.8617e-02f, 1.4846e-02f, 4.0016e-03f,
1513
+ 4.7006e-03f, -4.0017e-02f, -3.0411e-02f,
1514
+ -9.6037e-03f, 8.8522e-02f, 9.8616e-02f,
1515
+ 4.1297e-02f, -3.2645e-01f, -7.6144e-03f,
1516
+ -1.0711e-02f, 3.9324e-02f, 4.0144e-02f,
1517
+ 5.2899e-02f, -7.8668e-02f, -5.4798e-02f,
1518
+ -2.0428e-01f, 5.7238e-02f, -3.6937e-02f,
1519
+ -3.6103e-02f, -8.2683e-02f, -2.8101e-02f,
1520
+ 8.2479e-02f, 5.7766e-02f, -1.2019e-01f,
1521
+ -3.8373e-01f, 6.8272e-02f, -1.1758e-02f,
1522
+ 5.1129e-02f, -2.7931e-01f, 4.5608e-02f,
1523
+ -2.5151e-02f, -5.0816e-02f, 1.7231e-02f,
1524
+ -3.6376e-02f, 1.5916e-01f, 2.9192e-02f,
1525
+ -4.1947e-02f, 5.3183e-02f, -9.7289e-02f,
1526
+ 4.6138e-02f, 7.0842e-02f, 1.6673e-02f,
1527
+ -1.7243e-03f, 2.7203e-01f, 3.8262e-02f,
1528
+ -1.4000e-01f, -7.3793e-02f, -2.0050e-02f,
1529
+ -1.8750e-02f, -8.5319e-02f, -3.0858e-02f,
1530
+ -5.9981e-02f, 1.2729e-01f, 1.4094e-02f,
1531
+ -5.4088e-02f, -2.3694e-02f, -9.7485e-03f,
1532
+ -4.7840e-03f, -4.8359e-03f, -4.8727e-03f,
1533
+ -4.7882e-03f, -4.8380e-03f, -4.8755e-03f,
1534
+ -4.7859e-03f, -4.8321e-03f, -4.8633e-03f,
1535
+ 4.9511e-02f, 1.0935e-01f, -3.7430e-03f,
1536
+ 1.1834e-01f, 7.7243e-02f, 4.3074e-02f,
1537
+ 6.7446e-02f, 2.9734e-02f, -1.1276e-02f,
1538
+ -2.0080e-02f, 1.3561e-01f, -1.3455e-01f,
1539
+ -1.4505e-02f, 2.2100e-01f, 4.9635e-02f,
1540
+ -1.0040e-02f, 3.4560e-02f, -7.4607e-03f,
1541
+ -6.8873e-02f, -5.6221e-02f, 1.2255e-02f,
1542
+ -2.9198e-02f, 7.1612e-02f, 2.9402e-02f,
1543
+ 4.1036e-02f, 4.6417e-02f, 6.0284e-03f,
1544
+ -6.5261e-02f, 2.1426e-03f, 2.4192e-02f,
1545
+ -1.6073e-03f, -6.2222e-03f, -1.8295e-02f,
1546
+ 2.4952e-04f, -2.0623e-02f, -3.3064e-03f,
1547
+ 5.9188e-02f, -4.8839e-02f, 7.9840e-02f,
1548
+ -6.7952e-02f, -4.7191e-01f, 1.5117e-01f,
1549
+ 1.5668e-01f, 2.4733e-01f, 1.1354e-01f,
1550
+ 1.7742e-02f, -4.4059e-02f, 9.5374e-03f,
1551
+ 3.2049e-01f, -1.3779e-01f, 9.6608e-02f,
1552
+ 8.4580e-02f, 1.4293e-01f, 6.1574e-02f,
1553
+ 2.8777e-03f, 7.8795e-02f, -5.1902e-02f,
1554
+ 1.2212e-01f, 1.0321e-01f, 3.2360e-02f,
1555
+ -9.6617e-02f, 7.8941e-03f, -7.0876e-02f,
1556
+ 3.5869e-03f, 3.5891e-03f, 3.5923e-03f,
1557
+ 3.5746e-03f, 3.5840e-03f, 3.5967e-03f,
1558
+ 3.5785e-03f, 3.5932e-03f, 3.6080e-03f,
1559
+ 1.5454e-03f, 3.0582e-03f, 4.3737e-02f,
1560
+ -5.9833e-02f, -1.1247e-01f, 4.4380e-02f,
1561
+ -1.3206e-01f, 8.2778e-03f, 4.7963e-02f,
1562
+ -4.3720e-02f, -7.5722e-03f, 2.0510e-01f,
1563
+ 3.0133e-02f, -4.0506e-01f, 2.7867e-01f,
1564
+ 5.5586e-02f, 2.8926e-02f, 1.3360e-03f,
1565
+ 1.9490e-05f, 3.3326e-01f, -7.7241e-02f,
1566
+ -1.5648e-01f, 1.5195e-01f, -1.3995e-01f,
1567
+ 8.6519e-02f, 1.0447e-01f, -4.1413e-02f,
1568
+ -3.8667e-03f, 1.6159e-01f, 1.1627e-01f,
1569
+ -2.2646e-01f, -3.4758e-02f, -6.7956e-03f,
1570
+ -3.2689e-01f, 1.9606e-01f, -9.1523e-02f,
1571
+ 1.1238e-02f, 1.5084e-03f, 4.2113e-02f,
1572
+ -1.1154e-02f, -3.6596e-01f, -7.2252e-02f,
1573
+ 6.6621e-02f, 1.0188e-01f, 4.1032e-01f,
1574
+ 3.5892e-02f, -4.8304e-02f, 6.6142e-03f,
1575
+ 1.3374e-01f, 2.2720e-01f, -7.1224e-02f,
1576
+ 6.8952e-02f, 2.0467e-01f, 5.0251e-02f,
1577
+ -6.2016e-02f, 2.2175e-01f, -1.7764e-02f,
1578
+ 2.7542e-02f, 1.4905e-01f, 3.6637e-02f,
1579
+ -7.2231e-02f, 5.0271e-03f, -7.1823e-02f,
1580
+ 3.5760e-03f, 3.5540e-03f, 3.5692e-03f,
1581
+ 3.5664e-03f, 3.5490e-03f, 3.5689e-03f,
1582
+ 3.5671e-03f, 3.5619e-03f, 3.5864e-03f,
1583
+ 2.7470e-02f, -3.9752e-02f, 4.1063e-02f,
1584
+ -2.4985e-02f, -1.7969e-01f, 8.2186e-02f,
1585
+ -5.4251e-02f, -5.9651e-03f, 2.5079e-02f,
1586
+ -2.1197e-02f, 2.5426e-02f, 1.3585e-01f,
1587
+ -1.3460e-02f, -1.1377e-01f, 1.2278e-01f,
1588
+ 3.6533e-02f, 1.2843e-02f, 5.6219e-02f,
1589
+ 5.8141e-04f, 2.8354e-01f, -6.2016e-02f,
1590
+ -1.0289e-01f, 1.8724e-01f, -9.9475e-02f,
1591
+ 5.1193e-02f, 7.5986e-02f, -1.2951e-03f,
1592
+ -8.2587e-02f, 1.8498e-01f, 1.0891e-01f,
1593
+ 1.3538e-01f, -4.7728e-01f, 1.0868e-01f,
1594
+ -8.6415e-02f, -1.7061e-01f, 1.0457e-02f
1595
+ }
1596
+ };
1597
+ __device__ __constant__ static const float biasL[8][8] =
1598
+ {
1599
+ {
1600
+ -0.1175f, -0.0258f, -0.0053f, -0.0437f, -0.0563f, -0.1047f, -0.3449f, 0.0568f
1601
+ }
1602
+ ,
1603
+ {
1604
+ 0.0339f, -0.1738f, 0.0061f, 0.1565f, -0.0316f, -0.0016f, -0.0032f, -0.0554f
1605
+ }
1606
+ ,
1607
+ {
1608
+ -0.0508f, -0.0609f, 0.0347f, -0.0802f, -0.0438f, 0.2512f, -0.0491f, -0.0259f
1609
+ }
1610
+ ,
1611
+ {
1612
+ 0.0655f, 0.0255f, 0.0228f, -0.0027f, -0.0155f, -0.0163f, -0.0174f, -0.1095f
1613
+ }
1614
+ ,
1615
+ {
1616
+ 4.9947e-03f, 5.3372e-03f, -4.5286e-09f, -1.3756e-03f, 3.8858e-03f, -4.4197e-02f, 3.3970e-02f, 2.8411e-02f
1617
+ }
1618
+ ,
1619
+ {
1620
+ -0.0396f, 0.0007f, 0.1735f, 0.0109f, 0.1177f, 0.0919f, 0.0567f, -0.0005f
1621
+ }
1622
+ ,
1623
+ {
1624
+ 0.0127f, -0.0688f, 0.1102f, -0.0052f, 0.1602f, -0.0191f, -0.0322f, 0.0311f
1625
+ }
1626
+ ,
1627
+ {
1628
+ 0.0063f, 0.0093f, 0.0729f, 0.3734f, 0.0006f, 0.1915f, 0.3186f, 0.2636f
1629
+ }
1630
+ };
1631
+ __device__ __constant__ static const float kernelsL10[4 * 8] =
1632
+ {
1633
+ -0.0967f, -0.3094f,
1634
+ 0.3537f, 0.5705f,
1635
+ 0.2547f, 0.3360f,
1636
+ -0.0718f, -0.0700f,
1637
+ -0.3013f, -0.1602f,
1638
+ 0.4520f, 0.0495f,
1639
+ 0.1564f, 0.3773f,
1640
+ -0.0216f, 0.4367f,
1641
+ -0.4855f, -0.1972f,
1642
+ -0.2026f, -0.4390f,
1643
+ 0.3743f, -0.1156f,
1644
+ 0.4408f, -0.3123f,
1645
+ -0.3577f, 0.0753f,
1646
+ -0.3396f, 0.0336f,
1647
+ 0.1052f, -0.4180f,
1648
+ 0.0799f, -0.3587f
1649
+ };
1650
+
1651
+ #include "ACNetCommon.cuh"
1652
+
1653
+ DECLARE_ACNET_HDN_INTERFACE_FUNCTION(3)
cuda_code/Activation_27.cu ADDED
@@ -0,0 +1,587 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define _USE_MATH_DEFINES
2
+
3
+ #include <ATen/native/Activation.h>
4
+
5
+ #include <cmath>
6
+
7
+ #include <thrust/tuple.h>
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/AccumulateType.h>
11
+ #include <ATen/Dispatch.h>
12
+ #include <ATen/NativeFunctions.h>
13
+ #include <ATen/core/Array.h>
14
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
15
+ #include <ATen/cuda/detail/IndexUtils.cuh>
16
+ #include <ATen/cuda/detail/OffsetCalculator.cuh>
17
+ #include <ATen/native/cuda/Loops.cuh>
18
+ #include <c10/cuda/CUDAMathCompat.h>
19
+
20
+ namespace at {
21
+ namespace native {
22
+
23
+ // -----------------------------------
24
+ // prelu forward
25
+ // -----------------------------------
26
+ template <typename scalar_t>
27
+ void prelu_cuda_kernel_share_weights(
28
+ const Tensor& input,
29
+ Tensor& result,
30
+ const scalar_t* weight_data)
31
+ {
32
+ auto iter = TensorIterator::unary_op(result, input);
33
+
34
+ at::native::gpu_kernel(iter,
35
+ [weight_data] GPU_LAMBDA (scalar_t input_val) {
36
+ return (input_val > 0) ? input_val : *weight_data * input_val;
37
+ });
38
+ }
39
+
40
+ template <typename scalar_t>
41
+ __global__ void prelu_cuda_kernel_multi_weights(
42
+ scalar_t* result_data,
43
+ const scalar_t* input_data,
44
+ const scalar_t* weight_data,
45
+ int64_t input_stride0,
46
+ int64_t input_stride1,
47
+ int64_t input_numel) {
48
+
49
+ int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
50
+ if (linearId >= input_numel) return;
51
+
52
+ // multiply values at each channel with weight[channel_index]
53
+ int64_t channel = (linearId % input_stride0) / input_stride1;
54
+ scalar_t input_data_val = input_data[linearId];
55
+ result_data[linearId] = (input_data_val > 0) ? input_data_val : weight_data[channel] * input_data_val;
56
+ }
57
+
58
+ Tensor prelu_cuda(const Tensor& self, const Tensor& weight_) {
59
+ TORCH_CHECK(self.is_cuda());
60
+ TORCH_CHECK(weight_.is_cuda());
61
+
62
+ auto input = self.contiguous();
63
+ auto weight = weight_.contiguous();
64
+
65
+ TORCH_CHECK(input.is_contiguous());
66
+ TORCH_CHECK(weight.is_contiguous());
67
+
68
+ int64_t weight_num = weight.numel();
69
+ Tensor result = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
70
+ auto strides = input.strides();
71
+
72
+ // case1: shared weight for all channels
73
+ if (weight_num == 1) {
74
+ AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
75
+ prelu_cuda_kernel_share_weights<scalar_t>(
76
+ input,
77
+ result,
78
+ weight.data_ptr<scalar_t>());
79
+ });
80
+ }
81
+ else { // case2: multiple weights, one for each channel
82
+ int64_t input_ndim = input.dim();
83
+ TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
84
+
85
+ int64_t channel_size = 1; // channel_size default to 1
86
+ int64_t input_stride0 = 1, input_stride1 = 1;
87
+
88
+ if (input_ndim > 1) {
89
+ channel_size = input.size(1); // channel is the 2nd dim of input
90
+ input_stride0 = strides[0];
91
+ input_stride1 = strides[1];
92
+ }
93
+ TORCH_CHECK(channel_size == weight_num,
94
+ "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
95
+ " and channel size = ", channel_size, ".");
96
+
97
+ // config to run cuda kernel
98
+ int64_t input_numel = input.numel();
99
+ const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
100
+ dim3 grid;
101
+ int curDevice = -1;
102
+ cudaGetDevice(&curDevice);
103
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
104
+ TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu: input too large or too many dimensions");
105
+
106
+ AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_cuda", [&] {
107
+ prelu_cuda_kernel_multi_weights<scalar_t>
108
+ <<<grid, block, 0, stream>>>(
109
+ result.data_ptr<scalar_t>(),
110
+ input.data_ptr<scalar_t>(),
111
+ weight.data_ptr<scalar_t>(),
112
+ input_stride0,
113
+ input_stride1,
114
+ input_numel);
115
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
116
+ });
117
+ }
118
+ return result;
119
+ }
120
+
121
+ // -----------------------------------
122
+ // prelu backward
123
+ // -----------------------------------
124
+ template <typename scalar_t>
125
+ void prelu_cuda_backward_kernel_share_weights(
126
+ const Tensor& input,
127
+ const Tensor& grad_out,
128
+ Tensor& input_grad,
129
+ Tensor& weight_grad_collector,
130
+ const scalar_t* weight_data) {
131
+ at::TensorIterator iter = TensorIteratorConfig()
132
+ .add_borrowed_output(input_grad)
133
+ .add_borrowed_output(weight_grad_collector)
134
+ .add_borrowed_input(input)
135
+ .add_borrowed_input(grad_out)
136
+ .build();
137
+
138
+ // N.B. `std::tuple` does not support `::operator=` on device code.
139
+ gpu_kernel_multiple_outputs(iter, [=] GPU_LAMBDA (scalar_t input, scalar_t grad_out) -> thrust::tuple<scalar_t, scalar_t> {
140
+ scalar_t input_grad = input > 0 ? grad_out : (*weight_data) * grad_out;
141
+ scalar_t weight_grad_collector = input > 0 ? scalar_t(0) : input * grad_out;
142
+ return {input_grad, weight_grad_collector};
143
+ });
144
+ }
145
+
146
+ template <typename scalar_t>
147
+ __global__ void prelu_cuda_backward_kernel_multi_weights(
148
+ const scalar_t* input_data,
149
+ const scalar_t* weight_data,
150
+ const scalar_t* grad_out_data,
151
+ scalar_t* input_grad_data,
152
+ scalar_t* weight_grad_collector,
153
+ int64_t input_stride0,
154
+ int64_t input_stride1,
155
+ int64_t input_numel) {
156
+
157
+ int64_t linearId = blockIdx.x * blockDim.x + threadIdx.x;
158
+ if (linearId >= input_numel) return;
159
+ int64_t channel = (linearId % input_stride0) / input_stride1;
160
+ scalar_t input_data_val = input_data[linearId];
161
+ scalar_t grad_out_data_val = grad_out_data[linearId];
162
+ input_grad_data[linearId] = (input_data_val > 0) ? grad_out_data_val : weight_data[channel] * grad_out_data_val;
163
+ weight_grad_collector[linearId] = (input_data_val > 0) ? scalar_t(0) : input_data_val * grad_out_data_val;
164
+ }
165
+
166
+ std::tuple<Tensor, Tensor> prelu_backward_cuda(const Tensor& grad_out_, const Tensor& self, const Tensor& weight_) {
167
+ TORCH_CHECK(grad_out_.is_cuda());
168
+ TORCH_CHECK(self.is_cuda());
169
+ TORCH_CHECK(weight_.is_cuda());
170
+
171
+ auto input = self.contiguous();
172
+ auto grad_out = grad_out_.contiguous();
173
+ auto weight = weight_.contiguous();
174
+
175
+ TORCH_CHECK(input.is_contiguous());
176
+ TORCH_CHECK(weight.is_contiguous());
177
+ TORCH_CHECK(grad_out.is_contiguous());
178
+
179
+ int64_t weight_num = weight.numel();
180
+ auto strides = input.strides();
181
+ auto dims = input.dim();
182
+ Tensor input_grad = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
183
+ Tensor weight_grad = at::empty_like(weight, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
184
+ Tensor weight_grad_collector = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
185
+ // case1: shared parameter for all channels
186
+ if (weight_num == 1) {
187
+ AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
188
+ prelu_cuda_backward_kernel_share_weights<scalar_t>(
189
+ input,
190
+ grad_out,
191
+ input_grad,
192
+ weight_grad_collector,
193
+ weight.data_ptr<scalar_t>());
194
+ });
195
+ weight_grad.fill_(weight_grad_collector.sum());
196
+ }
197
+ else { // case2: multiple parameters, one for each channel
198
+ int64_t input_ndim = input.dim();
199
+ TORCH_CHECK(input_ndim > 0, "Not allow zero-dim input tensor.");
200
+
201
+ int64_t channel_size = 1; // channel_size default to 1
202
+ int64_t input_stride0 = 1, input_stride1 = 1;
203
+
204
+ if (input_ndim > 1) {
205
+ channel_size = input.size(1); // channel is the 2nd dim of input
206
+ input_stride0 = strides[0];
207
+ input_stride1 = strides[1];
208
+ }
209
+ TORCH_CHECK(channel_size == weight_num,
210
+ "Mismatch of parameter numbers and input channel size. Found parameter numbers = ", weight_num,
211
+ " and channel size = ", channel_size, ".");
212
+
213
+ // config to run cuda kernel
214
+ int64_t input_numel = input.numel();
215
+ const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), input_numel));
216
+ dim3 grid;
217
+ int curDevice = -1;
218
+ cudaGetDevice(&curDevice);
219
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream(curDevice);
220
+ TORCH_CHECK(cuda::getApplyGrid(input_numel, grid, curDevice), "prelu_backward_cuda: input too large or too many dimensions");
221
+
222
+ AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, input.scalar_type(), "prelu_backward_cuda", [&] {
223
+ prelu_cuda_backward_kernel_multi_weights<scalar_t>
224
+ <<<grid, block, 0, stream>>>(
225
+ input.data_ptr<scalar_t>(),
226
+ weight.data_ptr<scalar_t>(),
227
+ grad_out.data_ptr<scalar_t>(),
228
+ input_grad.data_ptr<scalar_t>(),
229
+ weight_grad_collector.data_ptr<scalar_t>(),
230
+ input_stride0,
231
+ input_stride1,
232
+ input_numel);
233
+ C10_CUDA_KERNEL_LAUNCH_CHECK();
234
+ });
235
+ // update weight_grad
236
+ std::vector<int64_t> reduce_dims;
237
+ reduce_dims.push_back(0);
238
+ if (dims > 2) {
239
+ for(int64_t i = 2; i < dims; i++) reduce_dims.push_back(i);
240
+ }
241
+ weight_grad = weight_grad_collector.sum(reduce_dims);
242
+ }
243
+ return std::tuple<Tensor, Tensor>{input_grad, weight_grad};
244
+ }
245
+
246
+ // -----------------------------------
247
+ // hardshrink
248
+ // -----------------------------------
249
+ void hardshrink_kernel(TensorIterator& iter, const Scalar& value) {
250
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardshrink_cuda", [&]() {
251
+ auto lambd = value.to<scalar_t>();
252
+ gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
253
+ return (a >= -lambd && a <= lambd) ? scalar_t(0) : a;
254
+ });
255
+ });
256
+ }
257
+
258
+ void softshrink_kernel(TensorIteratorBase& iter, const Scalar& value) {
259
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softshrink_cuda", [&]() {
260
+ auto lambd = value.to<scalar_t>();
261
+ gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t a) -> scalar_t {
262
+ return a > lambd ? a - lambd : (a < -lambd ? a + lambd : scalar_t(0));
263
+ });
264
+ });
265
+ }
266
+
267
+ void shrink_backward_kernel(TensorIterator& iter, const Scalar& value) {
268
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "shrink_backward_cuda", [&]() {
269
+ auto lambd = value.to<scalar_t>();
270
+ gpu_kernel(iter, [lambd]GPU_LAMBDA(scalar_t grad_val, scalar_t self_val) -> scalar_t {
271
+ return (self_val >= -lambd && self_val <= lambd) ? scalar_t(0) : grad_val;
272
+ });
273
+ });
274
+ }
275
+
276
+ void hardtanh_backward_kernel(TensorIterator& iter, const Scalar& min, const Scalar& max) {
277
+ AT_DISPATCH_FLOATING_TYPES_AND(at::ScalarType::Half, iter.dtype(), "hardtanh_backward_cuda", [&]() {
278
+ auto min_val = min.to<scalar_t>();
279
+ auto max_val = max.to<scalar_t>();
280
+ gpu_kernel(iter, [min_val, max_val]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
281
+ return (b <= min_val) || (b >= max_val) ? scalar_t(0) : a;
282
+ });
283
+ });
284
+ }
285
+
286
+ void softplus_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
287
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_cuda", [&]() {
288
+ auto beta = beta_.to<scalar_t>();
289
+ auto threshold = threshold_.to<scalar_t>();
290
+ gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a) -> scalar_t {
291
+ return (a * beta) > threshold ? a : static_cast<scalar_t>(::log1p(std::exp(a * beta))) / beta;
292
+ });
293
+ });
294
+ }
295
+
296
+ void softplus_backward_kernel(TensorIteratorBase& iter, const Scalar& beta_, const Scalar& threshold_) {
297
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "softplus_backward_cuda", [&]() {
298
+ auto beta = beta_.to<scalar_t>();
299
+ auto threshold = threshold_.to<scalar_t>();
300
+ gpu_kernel(iter, [beta, threshold]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
301
+ scalar_t z = std::exp(b * beta);
302
+ return (b * beta) > threshold ? a : a * z / (z + scalar_t(1.));
303
+ });
304
+ });
305
+ }
306
+
307
+ template <typename scalar_t>
308
+ void threshold_kernel_impl(TensorIteratorBase& iter, scalar_t threshold, scalar_t value) {
309
+ gpu_kernel_with_scalars(iter, [=]GPU_LAMBDA(scalar_t x, scalar_t other) -> scalar_t {
310
+ return x <= threshold ? value : other;
311
+ });
312
+ }
313
+
314
+ static void threshold_kernel_cuda(TensorIteratorBase& iter, const Scalar& threshold, const Scalar& value) {
315
+ AT_DISPATCH_ALL_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "threshold_cuda", [&] {
316
+ threshold_kernel_impl<scalar_t>(iter, threshold.to<scalar_t>(), value.to<scalar_t>());
317
+ });
318
+ }
319
+
320
+ void elu_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale) {
321
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_cuda", [&]() {
322
+ auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
323
+ auto poscoef = scale.to<scalar_t>();
324
+ auto negiptcoef = input_scale.to<scalar_t>();
325
+ gpu_kernel(iter, [negcoef, poscoef, negiptcoef]GPU_LAMBDA(scalar_t a) -> scalar_t {
326
+ return a > scalar_t(0) ? a * poscoef : (static_cast<scalar_t>(std::exp(a * negiptcoef)) - scalar_t(1.)) * negcoef;
327
+ });
328
+ });
329
+ }
330
+
331
+ void elu_backward_kernel(TensorIteratorBase& iter, const Scalar& alpha, const Scalar& scale, const Scalar& input_scale, bool is_result) {
332
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "elu_backward_cuda", [&]() {
333
+ auto negcoef = alpha.to<scalar_t>() * scale.to<scalar_t>();
334
+ auto poscoef = scale.to<scalar_t>();
335
+ auto negiptcoef = input_scale.to<scalar_t>();
336
+ gpu_kernel(iter, [negcoef, poscoef, negiptcoef, is_result]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
337
+ if (is_result) {
338
+ return b <= scalar_t(0) ? a * negiptcoef * (b + negcoef) : a * poscoef;
339
+ } else {
340
+ return b <= scalar_t(0) ? a * negiptcoef * negcoef * (static_cast<scalar_t>(std::exp(b * negiptcoef))) : a * poscoef;
341
+ }
342
+ });
343
+ });
344
+ }
345
+
346
+ namespace {
347
+
348
+ void GeluCUDAKernelImpl(TensorIterator& it) {
349
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, it.dtype(), "GeluCUDAKernelImpl", [&]() {
350
+ using T_ACC = acc_type<scalar_t, true>;
351
+ gpu_kernel(it, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
352
+ return static_cast<T_ACC>(x) *
353
+ c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
354
+ });
355
+ });
356
+ }
357
+
358
+ void GeluBackwardCUDAKernelImpl(TensorIterator& it) {
359
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16,
360
+ it.dtype(), "GeluBackwardCUDAKernelImpl", [&]() {
361
+ using T_ACC = acc_type<scalar_t, true>;
362
+ gpu_kernel(it, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
363
+ constexpr T_ACC kBeta = M_2_SQRTPI * M_SQRT1_2 * T_ACC(0.5);
364
+ const T_ACC cdf = c10::cuda::compat::normcdf(static_cast<T_ACC>(x));
365
+ const T_ACC pdf =
366
+ c10::cuda::compat::exp(
367
+ T_ACC(-0.5) * static_cast<T_ACC>(x) * static_cast<T_ACC>(x)) *
368
+ kBeta;
369
+ return static_cast<T_ACC>(dy) * (cdf + static_cast<T_ACC>(x) * pdf);
370
+ });
371
+ });
372
+ }
373
+
374
+ void leaky_relu_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
375
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_cuda", [&]() {
376
+ auto negval = negval_.to<scalar_t>();
377
+ gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a) -> scalar_t {
378
+ return a > scalar_t(0) ? a : a * negval;
379
+ });
380
+ });
381
+ }
382
+
383
+ void leaky_relu_backward_kernel(TensorIteratorBase& iter, const Scalar& negval_) {
384
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "leaky_relu_backward_cuda", [&]() {
385
+ auto negval = negval_.to<scalar_t>();
386
+ gpu_kernel(iter, [negval]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
387
+ return a > scalar_t(0) ? b : b * negval;
388
+ });
389
+ });
390
+ }
391
+
392
+ void hardswish_kernel(TensorIterator& iter) {
393
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_cuda", [&]() {
394
+ using T_ACC = acc_type<scalar_t, true>;
395
+ const T_ACC zero(0.0f);
396
+ const T_ACC one_sixth(1.0f / 6.0f);
397
+ const T_ACC three(3.0f);
398
+ const T_ACC six(6.0f);
399
+ gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
400
+ T_ACC x = static_cast<T_ACC>(self_val);
401
+ return x * std::min(std::max(x + three, zero), six) * one_sixth;
402
+ });
403
+ });
404
+ }
405
+
406
+ void hardswish_backward_kernel(TensorIterator& iter) {
407
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardswish_backward_cuda", [&]() {
408
+ using T_ACC = acc_type<scalar_t, true>;
409
+ const T_ACC zero(0.0f);
410
+ const T_ACC three(3.0f);
411
+ const T_ACC neg_three(-3.0f);
412
+ const T_ACC one_half(0.5f);
413
+ gpu_kernel(
414
+ iter,
415
+ [zero, three, neg_three, one_half]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
416
+ T_ACC grad_val = static_cast<T_ACC>(grad_val_);
417
+ T_ACC self_val = static_cast<T_ACC>(self_val_);
418
+ if (self_val < neg_three) {
419
+ return zero;
420
+ } else if (self_val <= three) {
421
+ return grad_val * ((self_val / three) + one_half);
422
+ } else {
423
+ return grad_val;
424
+ }
425
+ });
426
+ });
427
+ }
428
+
429
+ void hardsigmoid_kernel(TensorIteratorBase& iter) {
430
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_cuda", [&]() {
431
+ using T_ACC = acc_type<scalar_t, true>;
432
+ const T_ACC zero(0.0f);
433
+ const T_ACC one_sixth(1.0f / 6.0f);
434
+ const T_ACC three(3.0f);
435
+ const T_ACC six(6.0f);
436
+ gpu_kernel(iter, [zero, one_sixth, three, six]GPU_LAMBDA(scalar_t self_val) -> scalar_t {
437
+ T_ACC x = static_cast<T_ACC>(self_val);
438
+ return std::min(std::max(x + three, zero), six) * one_sixth;
439
+ });
440
+ });
441
+ }
442
+
443
+ void hardsigmoid_backward_kernel(TensorIteratorBase& iter) {
444
+ AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "hardsigmoid_backward_cuda", [&]() {
445
+ using T_ACC = acc_type<scalar_t, true>;
446
+ const T_ACC zero(0.0f);
447
+ const T_ACC three(3.0f);
448
+ const T_ACC neg_three(-3.0f);
449
+ const T_ACC one_sixth(1.0f / 6.0f);
450
+ gpu_kernel(
451
+ iter,
452
+ [zero, three, neg_three, one_sixth]GPU_LAMBDA(scalar_t grad_val_, scalar_t self_val_) -> scalar_t {
453
+ T_ACC grad_val = static_cast<T_ACC>(grad_val_);
454
+ T_ACC self_val = static_cast<T_ACC>(self_val_);
455
+ return (self_val > neg_three && self_val < three)
456
+ ? grad_val * one_sixth
457
+ : zero;
458
+ });
459
+ });
460
+ }
461
+
462
+ void silu_kernel(TensorIteratorBase& iter) {
463
+ AT_DISPATCH_FLOATING_TYPES_AND2(
464
+ at::ScalarType::Half,
465
+ at::ScalarType::BFloat16,
466
+ iter.dtype(),
467
+ "silu_cuda",
468
+ [&]() {
469
+ gpu_kernel(
470
+ iter,
471
+ [] GPU_LAMBDA(scalar_t x) -> scalar_t {
472
+ using T_ACC = acc_type<scalar_t, true>;
473
+ const T_ACC x_acc = static_cast<T_ACC>(x);
474
+ return x_acc / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
475
+ });
476
+ });
477
+ }
478
+
479
+ void silu_backward_kernel(TensorIterator& iter) {
480
+ AT_DISPATCH_FLOATING_TYPES_AND2(
481
+ at::ScalarType::Half,
482
+ at::ScalarType::BFloat16,
483
+ iter.dtype(),
484
+ "silu_backward_cuda",
485
+ [&]() {
486
+ gpu_kernel(
487
+ iter,
488
+ [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
489
+ using T_ACC = acc_type<scalar_t, true>;
490
+ const T_ACC dy_acc = static_cast<T_ACC>(dy);
491
+ const T_ACC x_acc = static_cast<T_ACC>(x);
492
+ const T_ACC s_acc =
493
+ T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
494
+ return dy_acc * s_acc * (T_ACC(1) + x_acc * (T_ACC(1) - s_acc));
495
+ });
496
+ });
497
+ }
498
+
499
+ void mish_kernel(TensorIteratorBase& iter) {
500
+ AT_DISPATCH_FLOATING_TYPES_AND2(
501
+ at::ScalarType::Half,
502
+ at::ScalarType::BFloat16,
503
+ iter.dtype(),
504
+ "mish_cuda",
505
+ [&]() {
506
+ gpu_kernel(
507
+ iter,
508
+ [] GPU_LAMBDA(scalar_t x) -> scalar_t {
509
+ using T_ACC = acc_type<scalar_t, true>;
510
+ const T_ACC x_acc = static_cast<T_ACC>(x);
511
+ return x_acc * c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
512
+ });
513
+ });
514
+ }
515
+
516
+ void mish_backward_kernel(TensorIterator& iter) {
517
+ AT_DISPATCH_FLOATING_TYPES_AND2(
518
+ at::ScalarType::Half,
519
+ at::ScalarType::BFloat16,
520
+ iter.dtype(),
521
+ "mish_backward_cuda",
522
+ [&]() {
523
+ gpu_kernel(
524
+ iter,
525
+ [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t {
526
+ using T_ACC = acc_type<scalar_t, true>;
527
+ const T_ACC dy_acc = static_cast<T_ACC>(dy);
528
+ const T_ACC x_acc = static_cast<T_ACC>(x);
529
+ const T_ACC s_acc =
530
+ T_ACC(1) / (T_ACC(1) + c10::cuda::compat::exp(-x_acc));
531
+ const T_ACC t_acc =
532
+ c10::cuda::compat::tanh(c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc)));
533
+ return dy_acc * (t_acc + x_acc * s_acc * (T_ACC(1) - t_acc * t_acc));
534
+ });
535
+ });
536
+ }
537
+
538
+ } // namespace
539
+
540
+ Tensor gelu_cuda(const Tensor& self) {
541
+ Tensor Y = at::native::empty_like(
542
+ self,
543
+ c10::nullopt /* dtype */,
544
+ c10::nullopt /* layout */,
545
+ c10::nullopt /* device */,
546
+ c10::nullopt /* pin_memory */,
547
+ LEGACY_CONTIGUOUS_MEMORY_FORMAT);
548
+ auto it = TensorIterator::unary_op(Y, self);
549
+ GeluCUDAKernelImpl(it);
550
+ return Y;
551
+ }
552
+
553
+ Tensor gelu_backward_cuda(const Tensor& grad, const Tensor& self) {
554
+ Tensor dX = at::native::empty_like(
555
+ self,
556
+ c10::nullopt /* dtype */,
557
+ c10::nullopt /* layout */,
558
+ c10::nullopt /* device */,
559
+ c10::nullopt /* pin_memory */,
560
+ LEGACY_CONTIGUOUS_MEMORY_FORMAT);
561
+ auto it = TensorIterator::borrowing_binary_op(dX, grad, self);
562
+ GeluBackwardCUDAKernelImpl(it);
563
+ return dX;
564
+ }
565
+
566
+ REGISTER_DISPATCH(hardtanh_backward_stub, &hardtanh_backward_kernel);
567
+ REGISTER_DISPATCH(hardshrink_stub, &hardshrink_kernel);
568
+ REGISTER_DISPATCH(softshrink_stub, &softshrink_kernel);
569
+ REGISTER_DISPATCH(shrink_backward_stub, &shrink_backward_kernel);
570
+ REGISTER_DISPATCH(elu_stub, &elu_kernel);
571
+ REGISTER_DISPATCH(elu_backward_stub, &elu_backward_kernel);
572
+ REGISTER_DISPATCH(leaky_relu_stub, &leaky_relu_kernel);
573
+ REGISTER_DISPATCH(leaky_relu_backward_stub, &leaky_relu_backward_kernel);
574
+ REGISTER_DISPATCH(hardswish_stub, &hardswish_kernel);
575
+ REGISTER_DISPATCH(hardswish_backward_stub, &hardswish_backward_kernel);
576
+ REGISTER_DISPATCH(hardsigmoid_stub, &hardsigmoid_kernel);
577
+ REGISTER_DISPATCH(hardsigmoid_backward_stub, &hardsigmoid_backward_kernel);
578
+ REGISTER_DISPATCH(softplus_stub, &softplus_kernel);
579
+ REGISTER_DISPATCH(softplus_backward_stub, &softplus_backward_kernel);
580
+ REGISTER_DISPATCH(silu_stub, &silu_kernel);
581
+ REGISTER_DISPATCH(silu_backward_stub, &silu_backward_kernel);
582
+ REGISTER_DISPATCH(mish_stub, &mish_kernel);
583
+ REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel);
584
+ REGISTER_DISPATCH(threshold_stub, &threshold_kernel_cuda);
585
+
586
+ } // namespace native
587
+ } // namespace at
cuda_code/AimetOpUtilsGpu_4.cu ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //==============================================================================
2
+ //
3
+ // @@-COPYRIGHT-START-@@
4
+ //
5
+ // Copyright (c) 2020, Qualcomm Innovation Center, Inc. All rights reserved.
6
+ //
7
+ // Redistribution and use in source and binary forms, with or without
8
+ // modification, are permitted provided that the following conditions are met:
9
+ //
10
+ // 1. Redistributions of source code must retain the above copyright notice,
11
+ // this list of conditions and the following disclaimer.
12
+ //
13
+ // 2. Redistributions in binary form must reproduce the above copyright notice,
14
+ // this list of conditions and the following disclaimer in the documentation
15
+ // and/or other materials provided with the distribution.
16
+ //
17
+ // 3. Neither the name of the copyright holder nor the names of its contributors
18
+ // may be used to endorse or promote products derived from this software
19
+ // without specific prior written permission.
20
+ //
21
+ // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22
+ // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23
+ // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24
+ // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25
+ // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26
+ // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27
+ // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28
+ // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29
+ // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30
+ // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31
+ // POSSIBILITY OF SUCH DAMAGE.
32
+ //
33
+ // SPDX-License-Identifier: BSD-3-Clause
34
+ //
35
+ // @@-COPYRIGHT-END-@@
36
+ //
37
+ //==============================================================================
38
+
39
+ #ifdef GOOGLE_CUDA
40
+
41
+ #define EIGEN_USE_GPU
42
+ #define EIGEN_USE_THREADS
43
+
44
+ #include "AimetOpUtils.h"
45
+
46
+ using namespace tensorflow;
47
+
48
+ #define EIGEN_USE_GPU
49
+ typedef Eigen::GpuDevice GPUDevice;
50
+
51
+
52
+ // GPU specialization of actual computations.
53
+ template <typename T>
54
+ void copyInputTensorsToOutputTensors(const GPUDevice& d, const T* inTensor, size_t count, T* outTensor)
55
+ {
56
+ // copy input_tensor to output_tensor
57
+ cudaMemcpy(outTensor, inTensor, count * sizeof(float), cudaMemcpyDeviceToDevice);
58
+ }
59
+
60
+ template <typename T>
61
+ T copyLiteralToHost(const GPUDevice& d, const T* deviceValue)
62
+ {
63
+ T hostValue;
64
+ cudaMemcpy(&hostValue, deviceValue, sizeof(T), cudaMemcpyDeviceToHost);
65
+
66
+ return hostValue;
67
+ }
68
+
69
+ template void copyInputTensorsToOutputTensors(const GPUDevice& d, const float* inTensor, size_t count, float* outTensor);
70
+ template int8 copyLiteralToHost<int8>(const GPUDevice&, const int8* deviceValue);
71
+ template int32 copyLiteralToHost<int32>(const GPUDevice&, const int32* deviceValue);
72
+ template uint64 copyLiteralToHost<uint64>(const GPUDevice&, const uint64* deviceValue);
73
+ template double copyLiteralToHost<double>(const GPUDevice&, const double* deviceValue);
74
+ template bool copyLiteralToHost<bool>(const GPUDevice&, const bool* deviceValue);
75
+
76
+ #endif // GOOGLE_CUDA
cuda_code/ArrayManip_1.cu ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "AbstractAPI.h"
2
+ #include "interfaces/cuda/Internals.h"
3
+ #include <cassert>
4
+ #include <device.h>
5
+
6
+ namespace device {
7
+ template <typename T> __global__ void kernel_scaleArray(T *array, const T scalar, const size_t numElements) {
8
+ size_t index = threadIdx.x + blockIdx.x * blockDim.x;
9
+ if (index < numElements) {
10
+ array[index] *= scalar;
11
+ }
12
+ }
13
+
14
+ template <typename T> void Algorithms::scaleArray(T *devArray,
15
+ T scalar,
16
+ const size_t numElements,
17
+ void* streamPtr) {
18
+ dim3 block(64, 1, 1);
19
+ dim3 grid = internals::computeGrid1D(block, numElements);
20
+ auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
21
+ kernel_scaleArray<<<grid, block, 0, stream>>>(devArray, scalar, numElements);
22
+ CHECK_ERR;
23
+ }
24
+ template void Algorithms::scaleArray(real *devArray, real scalar, const size_t numElements, void* streamPtr);
25
+ template void Algorithms::scaleArray(int *devArray, int scalar, const size_t numElements, void* streamPtr);
26
+ template void Algorithms::scaleArray(char *devArray, char scalar, const size_t numElements, void* streamPtr);
27
+
28
+ //--------------------------------------------------------------------------------------------------
29
+ template <typename T> __global__ void kernel_fillArray(T *array, T scalar, const size_t numElements) {
30
+ size_t index = threadIdx.x + blockIdx.x * blockDim.x;
31
+ if (index < numElements) {
32
+ array[index] = scalar;
33
+ }
34
+ }
35
+
36
+ template <typename T> void Algorithms::fillArray(T *devArray, const T scalar, const size_t numElements, void* streamPtr) {
37
+ dim3 block(64, 1, 1);
38
+ dim3 grid = internals::computeGrid1D(block, numElements);
39
+ auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
40
+ kernel_fillArray<<<grid, block, 0, stream>>>(devArray, scalar, numElements);
41
+ CHECK_ERR;
42
+ }
43
+ template void Algorithms::fillArray(real *devArray, real scalar, const size_t numElements, void* streamPtr);
44
+ template void Algorithms::fillArray(int *devArray, int scalar, const size_t numElements, void* streamPtr);
45
+ template void Algorithms::fillArray(char *devArray, char scalar, const size_t numElements, void* streamPtr);
46
+
47
+ //--------------------------------------------------------------------------------------------------
48
+ __global__ void kernel_touchMemory(real *ptr, size_t size, bool clean) {
49
+ int id = threadIdx.x + blockIdx.x * blockDim.x;
50
+ if (id < size) {
51
+ if (clean) {
52
+ ptr[id] = 0;
53
+ } else {
54
+ real value = ptr[id];
55
+ // Do something dummy here. We just need to check the pointers point to valid memory locations.
56
+ // Avoid compiler optimization. Possibly, implement a dummy code with asm.
57
+ value += 1;
58
+ value -= 1;
59
+ }
60
+ }
61
+ }
62
+
63
+ void Algorithms::touchMemory(real *ptr, size_t size, bool clean, void* streamPtr) {
64
+ dim3 block(256, 1, 1);
65
+ dim3 grid = internals::computeGrid1D(block, size);
66
+ auto stream = reinterpret_cast<internals::deviceStreamT>(streamPtr);
67
+ kernel_touchMemory<<<grid, block, 0, stream>>>(ptr, size, clean);
68
+ CHECK_ERR;
69
+ }
70
+ } // namespace device
cuda_code/BE_L1D_HIT.cu ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ #include <cutil.h>
4
+ #include <math.h>
5
+ // Includes
6
+ #include <stdio.h>
7
+ #include "../include/ContAcq-IntClk.h"
8
+
9
+ // includes, project
10
+ #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
11
+ //#include <shrQATest.h>
12
+ //#include <shrUtils.h>
13
+
14
+ // includes CUDA
15
+ #include <cuda_runtime.h>
16
+
17
+ #define THREADS_PER_BLOCK 256
18
+ #define NUM_OF_BLOCKS 60
19
+ #define ITERATIONS REPLACE_ITERATIONS
20
+
21
+ #define LINE_SIZE 128
22
+ #define SETS 64
23
+ #define ASSOC 6
24
+ #define SIMD_WIDTH 32
25
+
26
+ // Variables
27
+ int* h_A;
28
+ int* h_B;
29
+ int* h_C;
30
+ int* d_A;
31
+ int* d_B;
32
+ int* d_C;
33
+ bool noprompt = false;
34
+ unsigned int my_timer;
35
+
36
+ // Functions
37
+ void CleanupResources(void);
38
+ void RandomInit(int*, int);
39
+ void ParseArguments(int, char**);
40
+
41
+ ////////////////////////////////////////////////////////////////////////////////
42
+ // These are CUDA Helper functions
43
+
44
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
45
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
46
+
47
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
48
+ if(cudaSuccess != err){
49
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
50
+ exit(-1);
51
+ }
52
+ }
53
+
54
+ // This will output the proper error string when calling cudaGetLastError
55
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
56
+
57
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
58
+ cudaError_t err = cudaGetLastError();
59
+ if (cudaSuccess != err){
60
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
61
+ exit(-1);
62
+ }
63
+ }
64
+
65
+ // end of CUDA Helper Functions
66
+
67
+
68
+
69
+
70
+ // Device code
71
+ __global__ void PowerKernal(int* A, int* C, int N){
72
+ int tid = blockDim.x * blockIdx.x + threadIdx.x;
73
+ //Do Some Computation
74
+
75
+ int size = (LINE_SIZE*ASSOC*SETS)/sizeof(int);
76
+ unsigned j=0, k=0;
77
+ int m_sum=0;
78
+ // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
79
+ for(k=0; k<ITERATIONS; ++k){
80
+ //for(j=0; j<(size); j+=THREADS_PER_BLOCK){
81
+ m_sum += A[tid];
82
+ //}
83
+ }
84
+ C[tid]=m_sum;
85
+ __syncthreads();
86
+ }
87
+
88
+
89
+ // Host code
90
+
91
+ int main(){
92
+
93
+ printf("Power Microbenchmarks\n");
94
+ //int N = LINE_SIZE*SETS*ASSOC;
95
+ int N = NUM_OF_BLOCKS*THREADS_PER_BLOCK;
96
+ size_t size = N * sizeof(int) * NUM_OF_BLOCKS;
97
+
98
+ // Allocate input vectors h_A and h_B in host memory
99
+ h_A = (int*)malloc(size);
100
+ if (h_A == 0) CleanupResources();
101
+ //h_B = (float*)malloc(size);
102
+ //if (h_B == 0) CleanupResources();
103
+ h_C = (int*)malloc(size);
104
+ if (h_C == 0) CleanupResources();
105
+
106
+ // Initialize input vectors
107
+ RandomInit(h_A, N);
108
+ //RandomInit(h_B, N);
109
+
110
+ // Allocate vectors in device memory
111
+ checkCudaErrors( cudaMalloc((void**)&d_A, size) );
112
+ //checkCudaErrors( cudaMalloc((void**)&d_B, size) );
113
+ checkCudaErrors( cudaMalloc((void**)&d_C, size) );
114
+
115
+ // Copy vectors from host memory to device memory
116
+ checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
117
+ //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
118
+
119
+ //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
120
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
121
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
122
+
123
+ CUT_SAFE_CALL(cutCreateTimer(&my_timer));
124
+ TaskHandle taskhandle = LaunchDAQ();
125
+ CUT_SAFE_CALL(cutStartTimer(my_timer));
126
+
127
+ PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
128
+
129
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
130
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
131
+ TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
132
+ CUT_SAFE_CALL(cutStopTimer(my_timer));
133
+ CUT_SAFE_CALL(cutDeleteTimer(my_timer));
134
+
135
+ getLastCudaError("kernel launch failure");
136
+
137
+ #ifdef _DEBUG
138
+ checkCudaErrors( cudaDeviceSynchronize() );
139
+ #endif
140
+
141
+ // Copy result from device memory to host memory
142
+ // h_C contains the result in host memory
143
+ checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
144
+
145
+ CleanupResources();
146
+
147
+ return 0;
148
+ }
149
+
150
+ void CleanupResources(void){
151
+ // Free device memory
152
+ if (d_A)
153
+ cudaFree(d_A);
154
+ //if (d_B)
155
+ // cudaFree(d_B);
156
+ if (d_C)
157
+ cudaFree(d_C);
158
+
159
+ // Free host memory
160
+ if (h_A)
161
+ free(h_A);
162
+ // if (h_B)
163
+ // free(h_B);
164
+ if (h_C)
165
+ free(h_C);
166
+
167
+ }
168
+
169
+ // Allocates an array with random float entries.
170
+ void RandomInit(int* data, int n){
171
+ for (int i = 0; i < n; ++i)
172
+ data[i] = (int)(rand() / RAND_MAX);
173
+ }
174
+
175
+
176
+
177
+
178
+
179
+
cuda_code/BE_L1D_MISS_L2D_HIT_13.cu ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ #include <cutil.h>
4
+ #include <math.h>
5
+ // Includes
6
+ #include <stdio.h>
7
+ #include "../include/ContAcq-IntClk.h"
8
+ //#include "REPEATL.h"
9
+ #include "../include/REPEATW.h"
10
+ // includes, project
11
+ #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
12
+ //#include <shrQATest.h>
13
+ //#include <shrUtils.h>
14
+
15
+ // includes CUDA
16
+ #include <cuda_runtime.h>
17
+
18
+ #define THREADS_PER_BLOCK 256
19
+ #define NUM_OF_BLOCKS 60
20
+ #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
21
+ #define LINE_SIZE 32
22
+ #define SETS 64
23
+ #define ASSOC 6
24
+ #define SIMD_WIDTH 32
25
+ #define ITERATIONS REPLACE_ITERATIONS
26
+ // Variables
27
+ int* h_A;
28
+ int* h_B;
29
+ int* h_C;
30
+ int* d_A;
31
+ int* d_B;
32
+ int* d_C;
33
+ bool noprompt = false;
34
+ unsigned int my_timer;
35
+
36
+ // Functions
37
+ void CleanupResources(void);
38
+ void RandomInit(int*, int);
39
+ void ParseArguments(int, char**);
40
+
41
+ ////////////////////////////////////////////////////////////////////////////////
42
+ // These are CUDA Helper functions
43
+
44
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
45
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
46
+
47
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
48
+ if(cudaSuccess != err){
49
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
50
+ exit(-1);
51
+ }
52
+ }
53
+
54
+ // This will output the proper error string when calling cudaGetLastError
55
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
56
+
57
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
58
+ cudaError_t err = cudaGetLastError();
59
+ if (cudaSuccess != err){
60
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
61
+ exit(-1);
62
+ }
63
+ }
64
+
65
+ // end of CUDA Helper Functions
66
+
67
+
68
+
69
+
70
+ // Device code
71
+ __global__ void PowerKernal(int* A, int* C, int N){
72
+ int tid = blockDim.x * blockIdx.x + threadIdx.x;
73
+ //Do Some Computation
74
+
75
+ int size = (400*max_tid*LINE_SIZE)/sizeof(int);
76
+ unsigned j=0, k=0;
77
+
78
+ int sum=0;
79
+
80
+ // Fill the L1 cache, Miss on every iteration
81
+ for (int i=0; i<ITERATIONS ; i++){
82
+ //REPEAT_L3(tid);
83
+ REPEAT_L6(0);
84
+ }
85
+
86
+
87
+ /*
88
+ // Fill the L1 cache, Miss on first LD, Hit on subsequent LDs
89
+ for(k=0; k<ITERATIONS; ++k){
90
+ for(j=0; j<(size/2); j+=THREADS_PER_BLOCK){
91
+ C[tid+j] = A[tid+j];
92
+ }
93
+ }
94
+ */
95
+ C[0]=sum;
96
+ __syncthreads();
97
+
98
+
99
+ }
100
+
101
+
102
+ // Host code
103
+
104
+ int main(){
105
+
106
+ printf("Power Microbenchmarks\n");
107
+ int N = (400*max_tid*LINE_SIZE);
108
+ size_t size = N * sizeof(int) ;
109
+
110
+
111
+ // Allocate input vectors h_A and h_B in host memory
112
+ h_A = (int*)malloc(size);
113
+ if (h_A == 0) CleanupResources();
114
+ //h_B = (float*)malloc(size);
115
+ //if (h_B == 0) CleanupResources();
116
+ h_C = (int*)malloc(size);
117
+ if (h_C == 0) CleanupResources();
118
+
119
+ // Initialize input vectors
120
+ RandomInit(h_A, N);
121
+ //RandomInit(h_B, N);
122
+
123
+ // Allocate vectors in device memory
124
+ checkCudaErrors( cudaMalloc((void**)&d_A, size) );
125
+ //checkCudaErrors( cudaMalloc((void**)&d_B, size) );
126
+ checkCudaErrors( cudaMalloc((void**)&d_C, size) );
127
+
128
+ // Copy vectors from host memory to device memory
129
+ checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
130
+ //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
131
+
132
+ //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
133
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
134
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
135
+ CUT_SAFE_CALL(cutCreateTimer(&my_timer));
136
+ TaskHandle taskhandle = LaunchDAQ();
137
+ CUT_SAFE_CALL(cutStartTimer(my_timer));
138
+ PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, N);
139
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
140
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
141
+ TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
142
+ CUT_SAFE_CALL(cutStopTimer(my_timer));
143
+ CUT_SAFE_CALL(cutDeleteTimer(my_timer));
144
+
145
+ getLastCudaError("kernel launch failure");
146
+
147
+ #ifdef _DEBUG
148
+ checkCudaErrors( cudaDeviceSynchronize() );
149
+ #endif
150
+
151
+ // Copy result from device memory to host memory
152
+ // h_C contains the result in host memory
153
+ checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
154
+
155
+ CleanupResources();
156
+
157
+ return 0;
158
+ }
159
+
160
+ void CleanupResources(void){
161
+ // Free device memory
162
+ if (d_A)
163
+ cudaFree(d_A);
164
+ //if (d_B)
165
+ // cudaFree(d_B);
166
+ if (d_C)
167
+ cudaFree(d_C);
168
+
169
+ // Free host memory
170
+ if (h_A)
171
+ free(h_A);
172
+ // if (h_B)
173
+ // free(h_B);
174
+ if (h_C)
175
+ free(h_C);
176
+
177
+ }
178
+
179
+ // Allocates an array with random float entries.
180
+ void RandomInit(int* data, int n){
181
+ for (int i = 0; i < n; ++i)
182
+ data[i] = (int)(rand() / RAND_MAX);
183
+ }
184
+
185
+
186
+
187
+
188
+
189
+
cuda_code/BE_L1D_MISS_L2D_HIT_19.cu ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Includes
2
+ #include <stdio.h>
3
+ #include <stdlib.h>
4
+
5
+
6
+ // includes CUDA
7
+ #include <cuda_runtime.h>
8
+
9
+ // includes, project
10
+ #include "../include/REPEATW.h"
11
+
12
+
13
+ #define THREADS_PER_BLOCK 256
14
+ #define NUM_OF_BLOCKS 640
15
+ #define max_tid THREADS_PER_BLOCK*NUM_OF_BLOCKS
16
+ #define LINE_SIZE 12
17
+ // Variables
18
+ int* h_A;
19
+ int* h_B;
20
+ int* h_C;
21
+ int* d_A;
22
+ int* d_B;
23
+ int* d_C;
24
+ bool noprompt = false;
25
+ unsigned int my_timer;
26
+
27
+ // Functions
28
+ void CleanupResources(void);
29
+ void RandomInit(int*, int);
30
+
31
+ ////////////////////////////////////////////////////////////////////////////////
32
+ // These are CUDA Helper functions
33
+
34
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
35
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
36
+
37
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line ){
38
+ if(cudaSuccess != err){
39
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
40
+ exit(-1);
41
+ }
42
+ }
43
+
44
+ // This will output the proper error string when calling cudaGetLastError
45
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
46
+
47
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ){
48
+ cudaError_t err = cudaGetLastError();
49
+ if (cudaSuccess != err){
50
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
51
+ exit(-1);
52
+ }
53
+ }
54
+
55
+ // end of CUDA Helper Functions
56
+
57
+
58
+
59
+
60
+ // Device code
61
+ __global__ void PowerKernal(int* A, int* C, int iterations){
62
+ int tid = blockDim.x * blockIdx.x + threadIdx.x;
63
+ //Do Some Computation
64
+
65
+ int sum=0;
66
+
67
+ // Fill the L1 cache, Miss on every iteration
68
+ for (int i=0; i<iterations ; i++){
69
+ //REPLACE_ITERATIONS
70
+ REPEAT_L6(0);
71
+ }
72
+
73
+ C[0]=sum;
74
+ __syncthreads();
75
+
76
+
77
+ }
78
+
79
+
80
+ // Host code
81
+ int main(int argc, char** argv)
82
+ {
83
+ int iterations;
84
+ if (argc != 2){
85
+ fprintf(stderr,"usage: %s #iterations\n",argv[0]);
86
+ exit(1);
87
+ }
88
+ else{
89
+ iterations = atoi(argv[1]);
90
+ }
91
+
92
+ printf("Power Microbenchmark with %d iterations\n",iterations);
93
+ int N = (400*max_tid*LINE_SIZE);
94
+ size_t size = N * sizeof(int) ;
95
+ // Allocate input vectors h_A and h_B in host memory
96
+ h_A = (int*)malloc(size);
97
+ if (h_A == 0) CleanupResources();
98
+ // h_B = (int*)malloc(size);
99
+ // if (h_B == 0) CleanupResources();
100
+ h_C = (int*)malloc(size);
101
+ if (h_C == 0) CleanupResources();
102
+
103
+
104
+
105
+ // Initialize input vectors
106
+ RandomInit(h_A, N);
107
+ // RandomInit(h_B, N);
108
+
109
+ // Allocate vectors in device memory
110
+ checkCudaErrors( cudaMalloc((void**)&d_A, size) );
111
+ // checkCudaErrors( cudaMalloc((void**)&d_B, size) );
112
+ checkCudaErrors( cudaMalloc((void**)&d_C, size) );
113
+
114
+ cudaEvent_t start, stop;
115
+ float elapsedTime = 0;
116
+ checkCudaErrors(cudaEventCreate(&start));
117
+ checkCudaErrors(cudaEventCreate(&stop));
118
+
119
+ // Copy vectors from host memory to device memory
120
+ checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
121
+ // checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
122
+
123
+
124
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
125
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
126
+
127
+ checkCudaErrors(cudaEventRecord(start));
128
+ PowerKernal<<<dimGrid,dimBlock>>>(d_A, d_C, iterations);
129
+ checkCudaErrors(cudaEventRecord(stop));
130
+
131
+ checkCudaErrors(cudaEventSynchronize(stop));
132
+ checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
133
+ printf("gpu execution time = %.2f s\n", elapsedTime/1000);
134
+
135
+ getLastCudaError("kernel launch failure");
136
+ cudaThreadSynchronize();
137
+
138
+ // Copy result from device memory to host memory
139
+ // h_C contains the result in host memory
140
+ checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
141
+
142
+ checkCudaErrors(cudaEventDestroy(start));
143
+ checkCudaErrors(cudaEventDestroy(stop));
144
+ CleanupResources();
145
+
146
+ return 0;
147
+ }
148
+
149
+ void CleanupResources(void){
150
+ // Free device memory
151
+ if (d_A)
152
+ cudaFree(d_A);
153
+ //if (d_B)
154
+ // cudaFree(d_B);
155
+ if (d_C)
156
+ cudaFree(d_C);
157
+
158
+ // Free host memory
159
+ if (h_A)
160
+ free(h_A);
161
+ // if (h_B)
162
+ // free(h_B);
163
+ if (h_C)
164
+ free(h_C);
165
+ }
166
+
167
+ // Allocates an array with random float entries.
168
+ void RandomInit(int* data, int n){
169
+ for (int i = 0; i < n; ++i)
170
+ data[i] = (int)(rand() / RAND_MAX);
171
+ }
cuda_code/BE_MEM_SHRD_Acss.cu ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ #include <cutil.h>
4
+ // Includes
5
+ #include <stdio.h>
6
+ #include "../include/ContAcq-IntClk.h"
7
+
8
+ // includes, project
9
+ #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
10
+ //#include <shrQATest.h>
11
+ //#include <shrUtils.h>
12
+
13
+ // includes CUDA
14
+ #include <cuda_runtime.h>
15
+
16
+ #define THREADS_PER_BLOCK 256
17
+ #define NUM_OF_BLOCKS 60
18
+ #define ITERATIONS REPLACE_ITERATIONS
19
+
20
+ // Variables
21
+ unsigned* h_C1;
22
+ float* h_C2;
23
+ unsigned* d_C1;
24
+ float* d_C2;
25
+ bool noprompt = false;
26
+ unsigned int my_timer;
27
+
28
+ // Functions
29
+ void CleanupResources(void);
30
+ void RandomInit(unsigned*, int);
31
+ void ParseArguments(int, char**);
32
+
33
+ ////////////////////////////////////////////////////////////////////////////////
34
+ // These are CUDA Helper functions
35
+
36
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
37
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
38
+
39
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line )
40
+ {
41
+ if(cudaSuccess != err){
42
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
43
+ exit(-1);
44
+ }
45
+ }
46
+
47
+ // This will output the proper error string when calling cudaGetLastError
48
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
49
+
50
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
51
+ {
52
+ cudaError_t err = cudaGetLastError();
53
+ if (cudaSuccess != err){
54
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
55
+ exit(-1);
56
+ }
57
+ }
58
+
59
+ // end of CUDA Helper Functions
60
+
61
+
62
+
63
+
64
+ // Device code
65
+ __global__ void PowerKernal(unsigned* C1, float* C2, int N)
66
+ {
67
+ int i = threadIdx.x;
68
+ //Do Some Computation
69
+ __device__ __shared__ unsigned I1[THREADS_PER_BLOCK];
70
+ __device__ __shared__ unsigned I2[THREADS_PER_BLOCK];
71
+ __device__ __shared__ float I3[THREADS_PER_BLOCK];
72
+ __device__ __shared__ float I4[THREADS_PER_BLOCK];
73
+
74
+ I1[i]=i*2;
75
+ I2[i]=i;
76
+ I3[i]=i/2;
77
+ I4[i]=i;
78
+
79
+ __syncthreads();
80
+
81
+ for(unsigned k=0; k<ITERATIONS ;k++) {
82
+ I1[i]=I2[(i+k)%THREADS_PER_BLOCK];
83
+ I2[i]=I1[(i+k+1)%THREADS_PER_BLOCK];
84
+ }
85
+
86
+ for(unsigned k=0; k<ITERATIONS ;k++) {
87
+ I3[i]=I4[(i+k)%THREADS_PER_BLOCK];
88
+ I4[i]=I3[(i+k+1)%THREADS_PER_BLOCK];
89
+ }
90
+ //C1[i]=I2[i];
91
+ //C2[i]=I4[i];
92
+ __syncthreads();
93
+
94
+ }
95
+
96
+
97
+ // Host code
98
+
99
+ int main()
100
+ {
101
+ printf("Power Microbenchmarks\n");
102
+ int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
103
+ size_t size1 = N * sizeof(unsigned);
104
+ size_t size2 = N * sizeof(float);
105
+
106
+ // Allocate vectors in device memory
107
+ h_C1 = (unsigned *) malloc(size1);
108
+ h_C2 = (float *) malloc(size2);
109
+ checkCudaErrors( cudaMalloc((void**)&d_C1, size1) );
110
+ checkCudaErrors( cudaMalloc((void**)&d_C2, size2) );
111
+
112
+ //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
113
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
114
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
115
+
116
+ CUT_SAFE_CALL(cutCreateTimer(&my_timer));
117
+ TaskHandle taskhandle = LaunchDAQ();
118
+ CUT_SAFE_CALL(cutStartTimer(my_timer));
119
+
120
+ PowerKernal<<<dimGrid,dimBlock>>>(d_C1, d_C2, N);
121
+
122
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
123
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
124
+ TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
125
+ CUT_SAFE_CALL(cutStopTimer(my_timer));
126
+ CUT_SAFE_CALL(cutDeleteTimer(my_timer));
127
+
128
+ getLastCudaError("kernel launch failure");
129
+
130
+ #ifdef _DEBUG
131
+ checkCudaErrors( cudaDeviceSynchronize() );
132
+ #endif
133
+
134
+ // Copy result from device memory to host memory
135
+ // h_C contains the result in host memory
136
+ checkCudaErrors( cudaMemcpy(h_C1, d_C1, size1, cudaMemcpyDeviceToHost) );
137
+ checkCudaErrors( cudaMemcpy(h_C2, d_C2, size2, cudaMemcpyDeviceToHost) );
138
+
139
+ CleanupResources();
140
+
141
+ return 0;
142
+ }
143
+
144
+ void CleanupResources(void)
145
+ {
146
+ // Free device memory
147
+ if (d_C1)
148
+ cudaFree(d_C1);
149
+ if (d_C2)
150
+ cudaFree(d_C2);
151
+
152
+
153
+ // Free host memory
154
+ if (h_C1)
155
+ free(h_C1);
156
+ if (d_C2)
157
+ cudaFree(d_C2);
158
+
159
+ }
160
+
161
+ // Allocates an array with random float entries.
162
+ void RandomInit(unsigned* data, int n)
163
+ {
164
+ for (int i = 0; i < n; ++i){
165
+ srand((unsigned)time(0));
166
+ data[i] = rand() / RAND_MAX;
167
+ }
168
+ }
169
+
170
+
171
+
172
+
173
+
174
+
cuda_code/BE_SP_FP_DIV_1.cu ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ //#include <cutil.h>
4
+ // Includes
5
+ //#include <stdio.h>
6
+
7
+ // includes, project
8
+ //#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
9
+ //#include <shrQATest.h>
10
+ //#include <shrUtils.h>
11
+
12
+ // includes CUDA
13
+ #include <cuda_runtime.h>
14
+
15
+ #define THREADS_PER_BLOCK 256
16
+ #define NUM_OF_BLOCKS 640
17
+ //#define ITERATIONS 40
18
+ //#include "../include/ContAcq-IntClk.h"
19
+
20
+ // Variables
21
+ float* h_A;
22
+ float* h_B;
23
+ float* h_C;
24
+ float* d_A;
25
+ float* d_B;
26
+ float* d_C;
27
+ //bool noprompt = false;
28
+ //unsigned int my_timer;
29
+
30
+ // Functions
31
+ void CleanupResources(void);
32
+ void RandomInit(float*, int);
33
+ //void ParseArguments(int, char**);
34
+
35
+ ////////////////////////////////////////////////////////////////////////////////
36
+ // These are CUDA Helper functions
37
+
38
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
39
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
40
+
41
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line )
42
+ {
43
+ if(cudaSuccess != err){
44
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
45
+ exit(-1);
46
+ }
47
+ }
48
+
49
+ // This will output the proper error string when calling cudaGetLastError
50
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
51
+
52
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
53
+ {
54
+ cudaError_t err = cudaGetLastError();
55
+ if (cudaSuccess != err){
56
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
57
+ exit(-1);
58
+ }
59
+ }
60
+
61
+ // end of CUDA Helper Functions
62
+
63
+
64
+
65
+
66
+ __global__ void PowerKernal3(const float* A, const float* B, float* C, int N)
67
+ {
68
+ int i = blockDim.x * blockIdx.x + threadIdx.x;
69
+ //Do Some Computation
70
+ float Value1;
71
+ float Value2 = 999999;
72
+ float Value3;
73
+ float Value;
74
+ float I1=A[i];
75
+ float I2=B[i];
76
+
77
+
78
+ __syncthreads();
79
+ #pragma unroll 100
80
+ // Excessive Division Operations
81
+ for(unsigned k=0; k<N;k++) {
82
+ Value1=I1/I2;
83
+ Value3=I1/I2;
84
+ Value1/=Value2;
85
+ Value1/=Value2;
86
+ Value2=Value3/Value1;
87
+ Value1=Value2/Value3;
88
+ }
89
+
90
+ __syncthreads();
91
+ Value=Value1;
92
+ C[i]=Value/Value2;
93
+ }
94
+
95
+
96
+
97
+ int main(int argc, char** argv)
98
+ {
99
+ int iterations;
100
+ if(argc!=2) {
101
+ fprintf(stderr,"usage: %s #iterations\n",argv[0]);
102
+ exit(1);
103
+ }
104
+ else {
105
+ iterations = atoi(argv[1]);
106
+ }
107
+
108
+ printf("Power Microbenchmarks with iterations %d\n",iterations);
109
+ int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
110
+ size_t size = N * sizeof(float);
111
+ // Allocate input vectors h_A and h_B in host memory
112
+ h_A = (float*)malloc(size);
113
+ if (h_A == 0) CleanupResources();
114
+ h_B = (float*)malloc(size);
115
+ if (h_B == 0) CleanupResources();
116
+ h_C = (float*)malloc(size);
117
+ if (h_C == 0) CleanupResources();
118
+
119
+ // Initialize input vectors
120
+ RandomInit(h_A, N);
121
+ RandomInit(h_B, N);
122
+
123
+ // Allocate vectors in device memory
124
+ printf("before\n");
125
+ checkCudaErrors( cudaMalloc((void**)&d_A, size) );
126
+ checkCudaErrors( cudaMalloc((void**)&d_B, size) );
127
+ checkCudaErrors( cudaMalloc((void**)&d_C, size) );
128
+ printf("after\n");
129
+
130
+ cudaEvent_t start, stop;
131
+ float elapsedTime = 0;
132
+ checkCudaErrors(cudaEventCreate(&start));
133
+ checkCudaErrors(cudaEventCreate(&stop));
134
+
135
+ // Copy vectors from host memory to device memory
136
+ checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
137
+ checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
138
+
139
+ //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
140
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
141
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
142
+ dim3 dimGrid2(1,1);
143
+ dim3 dimBlock2(1,1);
144
+
145
+ checkCudaErrors(cudaEventRecord(start));
146
+ PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
147
+ checkCudaErrors(cudaEventRecord(stop));
148
+
149
+ checkCudaErrors(cudaEventSynchronize(stop));
150
+ checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
151
+ printf("execution time = %.2f s\n", elapsedTime/1000);
152
+ getLastCudaError("kernel launch failure");
153
+ cudaThreadSynchronize();
154
+
155
+ /*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
156
+ TaskHandle taskhandle = LaunchDAQ();
157
+ CUT_SAFE_CALL(cutStartTimer(my_timer));
158
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
159
+
160
+
161
+
162
+ PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
163
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
164
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
165
+
166
+
167
+ getLastCudaError("kernel launch failure");
168
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
169
+ CUT_SAFE_CALL(cutStopTimer(my_timer));
170
+ TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
171
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
172
+ CUT_SAFE_CALL(cutDeleteTimer(my_timer));
173
+
174
+ #ifdef _DEBUG
175
+ checkCudaErrors( cudaDeviceSynchronize() );
176
+ #endif*/
177
+
178
+ // Copy result from device memory to host memory
179
+ // h_C contains the result in host memory
180
+ checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
181
+ checkCudaErrors(cudaEventDestroy(start));
182
+ checkCudaErrors(cudaEventDestroy(stop));
183
+ CleanupResources();
184
+
185
+ return 0;
186
+ }
187
+
188
+ void CleanupResources(void)
189
+ {
190
+ // Free device memory
191
+ if (d_A)
192
+ cudaFree(d_A);
193
+ if (d_B)
194
+ cudaFree(d_B);
195
+ if (d_C)
196
+ cudaFree(d_C);
197
+
198
+ // Free host memory
199
+ if (h_A)
200
+ free(h_A);
201
+ if (h_B)
202
+ free(h_B);
203
+ if (h_C)
204
+ free(h_C);
205
+
206
+ }
207
+
208
+ // Allocates an array with random float entries.
209
+ void RandomInit(float* data, int n)
210
+ {
211
+ for (int i = 0; i < n; ++i){
212
+ data[i] = rand() / RAND_MAX;
213
+ }
214
+ }
215
+
216
+
217
+
218
+
219
+
220
+
cuda_code/BE_SP_INT_ADD_l32_1.cu ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Includes
2
+ #include <stdio.h>
3
+ #include <stdlib.h>
4
+ // includes from project
5
+
6
+
7
+ // includes from CUDA
8
+ #include <cuda_runtime.h>
9
+ //#include <helper_math.h>
10
+
11
+ #define THREADS_PER_BLOCK 256
12
+ #define NUM_OF_BLOCKS 640
13
+
14
+
15
+ // Variables
16
+ unsigned* h_A;
17
+ unsigned* h_B;
18
+ unsigned* h_C;
19
+ unsigned* d_A;
20
+ unsigned* d_B;
21
+ unsigned* d_C;
22
+
23
+ // Functions
24
+ void CleanupResources(void);
25
+ void RandomInit(unsigned*, int);
26
+
27
+ ////////////////////////////////////////////////////////////////////////////////
28
+ // These are CUDA Helper functions
29
+
30
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
31
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
32
+
33
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line )
34
+ {
35
+ if(cudaSuccess != err){
36
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
37
+ exit(-1);
38
+ }
39
+ }
40
+
41
+ // This will output the proper error string when calling cudaGetLastError
42
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
43
+
44
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
45
+ {
46
+ cudaError_t err = cudaGetLastError();
47
+ if (cudaSuccess != err){
48
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
49
+ exit(-1);
50
+ }
51
+ }
52
+ // end of CUDA Helper Functions
53
+
54
+ __global__ void PowerKernal2(const unsigned* A, const unsigned* B, unsigned* C, int iterations)
55
+ {
56
+ int i = blockDim.x * blockIdx.x + threadIdx.x;
57
+ //Do Some Computation
58
+ unsigned Value1=0;
59
+ unsigned Value2=0;
60
+ unsigned Value3=0;
61
+ unsigned Value=0;
62
+ unsigned I1=A[i];
63
+ unsigned I2=B[i];
64
+
65
+ // Excessive INT addition access
66
+ if((i%32)<=31){
67
+ #pragma unroll 100
68
+ for(unsigned k=0; k<iterations;k++) {
69
+ Value2= I1+I2;
70
+ Value3=I1-I2;
71
+ Value1-=Value2;
72
+ Value3+=Value1;
73
+ Value2-=Value3;
74
+ Value1+=Value3;
75
+ }
76
+ }
77
+ __syncthreads();
78
+
79
+ Value=Value1;
80
+ C[i]=Value;
81
+ __syncthreads();
82
+ }
83
+
84
+ int main(int argc, char** argv)
85
+ {
86
+
87
+ int iterations;
88
+ if (argc != 2){
89
+ fprintf(stderr,"usage: %s #iterations\n",argv[0]);
90
+ exit(1);
91
+ }
92
+ else{
93
+ iterations = atoi(argv[1]);
94
+ }
95
+
96
+ printf("Power Microbenchmark with %d iterations\n",iterations);
97
+ int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
98
+ size_t size = N * sizeof(unsigned);
99
+ // Allocate input vectors h_A and h_B in host memory
100
+ h_A = (unsigned*)malloc(size);
101
+ if (h_A == 0) CleanupResources();
102
+ h_B = (unsigned*)malloc(size);
103
+ if (h_B == 0) CleanupResources();
104
+ h_C = (unsigned*)malloc(size);
105
+ if (h_C == 0) CleanupResources();
106
+
107
+
108
+
109
+ // Initialize input vectors
110
+ RandomInit(h_A, N);
111
+ RandomInit(h_B, N);
112
+
113
+ // Allocate vectors in device memory
114
+ printf("before\n");
115
+ checkCudaErrors( cudaMalloc((void**)&d_A, size) );
116
+ checkCudaErrors( cudaMalloc((void**)&d_B, size) );
117
+ checkCudaErrors( cudaMalloc((void**)&d_C, size) );
118
+ printf("after\n");
119
+
120
+ cudaEvent_t start, stop;
121
+ float elapsedTime = 0;
122
+ checkCudaErrors(cudaEventCreate(&start));
123
+ checkCudaErrors(cudaEventCreate(&stop));
124
+
125
+ // Copy vectors from host memory to device memory
126
+ checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
127
+ checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
128
+
129
+
130
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
131
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
132
+ dim3 dimGrid2(1,1);
133
+ dim3 dimBlock2(1,1);
134
+
135
+ checkCudaErrors(cudaEventRecord(start));
136
+ PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
137
+ checkCudaErrors(cudaEventRecord(stop));
138
+
139
+ checkCudaErrors(cudaEventSynchronize(stop));
140
+ checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
141
+ printf("gpu execution time = %.2f s\n", elapsedTime/1000);
142
+
143
+ getLastCudaError("kernel launch failure");
144
+ cudaThreadSynchronize();
145
+
146
+ // Copy result from device memory to host memory
147
+ // h_C contains the result in host memory
148
+ checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
149
+
150
+ checkCudaErrors(cudaEventDestroy(start));
151
+ checkCudaErrors(cudaEventDestroy(stop));
152
+ CleanupResources();
153
+
154
+ return 0;
155
+ }
156
+
157
+ void CleanupResources(void)
158
+ {
159
+ // Free device memory
160
+ if (d_A)
161
+ cudaFree(d_A);
162
+ if (d_B)
163
+ cudaFree(d_B);
164
+ if (d_C)
165
+ cudaFree(d_C);
166
+
167
+ // Free host memory
168
+ if (h_A)
169
+ free(h_A);
170
+ if (h_B)
171
+ free(h_B);
172
+ if (h_C)
173
+ free(h_C);
174
+
175
+ }
176
+
177
+ // Allocates an array with random unsigned entries.
178
+ void RandomInit(unsigned* data, int n)
179
+ {
180
+ for (int i = 0; i < n; ++i){
181
+ srand((unsigned)time(0));
182
+ data[i] = rand() / RAND_MAX;
183
+ }
184
+ }
cuda_code/BE_SP_INT_MUL_3.cu ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <stdio.h>
2
+ #include <stdlib.h>
3
+ //#include <cutil.h>
4
+ // Includes
5
+ //#include <stdio.h>
6
+ //#include "../include/ContAcq-IntClk.h"
7
+
8
+ // includes, project
9
+ //#include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples
10
+ //#include <shrQATest.h>
11
+ //#include <shrUtils.h>
12
+
13
+ // includes CUDA
14
+ #include <cuda_runtime.h>
15
+
16
+ #define THREADS_PER_BLOCK 256
17
+ #define NUM_OF_BLOCKS 640
18
+ //#define ITERATIONS 40
19
+
20
+ // Variables
21
+ unsigned* h_A;
22
+ unsigned* h_B;
23
+ unsigned* h_C;
24
+ unsigned* d_A;
25
+ unsigned* d_B;
26
+ unsigned* d_C;
27
+ //bool noprompt = false;
28
+ //unsigned int my_timer;
29
+
30
+ // Functions
31
+ void CleanupResources(void);
32
+ void RandomInit(unsigned*, int);
33
+ //void ParseArguments(int, char**);
34
+
35
+ ////////////////////////////////////////////////////////////////////////////////
36
+ // These are CUDA Helper functions
37
+
38
+ // This will output the proper CUDA error strings in the event that a CUDA host call returns an error
39
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
40
+
41
+ inline void __checkCudaErrors(cudaError err, const char *file, const int line )
42
+ {
43
+ if(cudaSuccess != err){
44
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
45
+ exit(-1);
46
+ }
47
+ }
48
+
49
+ // This will output the proper error string when calling cudaGetLastError
50
+ #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
51
+
52
+ inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
53
+ {
54
+ cudaError_t err = cudaGetLastError();
55
+ if (cudaSuccess != err){
56
+ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
57
+ exit(-1);
58
+ }
59
+ }
60
+
61
+ // end of CUDA Helper Functions
62
+
63
+
64
+
65
+
66
+ // Device code
67
+
68
+
69
+ __global__ void PowerKernal3(const unsigned* A, const unsigned* B, unsigned* C, int N)
70
+ {
71
+ int i = blockDim.x * blockIdx.x + threadIdx.x;
72
+ //Do Some Computation
73
+ unsigned Value1;
74
+ unsigned Value2 = 999999;
75
+ unsigned Value3;
76
+ unsigned Value;
77
+ unsigned I1=A[i];
78
+ unsigned I2=B[i];
79
+
80
+
81
+ #pragma unroll 100
82
+ // Excessive Multiplication
83
+ for(unsigned k=0; k<N;k++) {
84
+ Value1=I1*I2;
85
+ Value1*=Value2;
86
+ Value3=Value1*I2;
87
+ Value2*=I1*Value3;
88
+ Value1*=Value2;
89
+ Value3*=Value1;
90
+ }
91
+
92
+ __syncthreads();
93
+ Value=Value3;
94
+
95
+ C[i]=Value;
96
+ __syncthreads();
97
+
98
+ }
99
+
100
+
101
+
102
+
103
+ int main(int argc, char** argv)
104
+ {
105
+ int iterations;
106
+ if(argc!=2) {
107
+ fprintf(stderr,"usage: %s #iterations\n",argv[0]);
108
+ exit(1);
109
+ }
110
+ else {
111
+ iterations = atoi(argv[1]);
112
+ }
113
+
114
+ printf("Power Microbenchmarks with iterations %d\n",iterations);
115
+ int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS;
116
+ size_t size = N * sizeof(unsigned);
117
+ // Allocate input vectors h_A and h_B in host memory
118
+ h_A = (unsigned*)malloc(size);
119
+ if (h_A == 0) CleanupResources();
120
+ h_B = (unsigned*)malloc(size);
121
+ if (h_B == 0) CleanupResources();
122
+ h_C = (unsigned*)malloc(size);
123
+ if (h_C == 0) CleanupResources();
124
+
125
+ // Initialize input vectors
126
+ RandomInit(h_A, N);
127
+ RandomInit(h_B, N);
128
+
129
+ // Allocate vectors in device memory
130
+ checkCudaErrors( cudaMalloc((void**)&d_A, size) );
131
+ checkCudaErrors( cudaMalloc((void**)&d_B, size) );
132
+ checkCudaErrors( cudaMalloc((void**)&d_C, size) );
133
+
134
+ // Copy vectors from host memory to device memory
135
+ checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
136
+ checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) );
137
+
138
+ cudaEvent_t start, stop;
139
+ float elapsedTime = 0;
140
+ checkCudaErrors(cudaEventCreate(&start));
141
+ checkCudaErrors(cudaEventCreate(&stop));
142
+
143
+ //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N);
144
+ dim3 dimGrid(NUM_OF_BLOCKS,1);
145
+ dim3 dimBlock(THREADS_PER_BLOCK,1);
146
+ dim3 dimGrid2(1,1);
147
+ dim3 dimBlock2(1,1);
148
+
149
+ checkCudaErrors(cudaEventRecord(start));
150
+ PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, iterations);
151
+ checkCudaErrors(cudaEventRecord(stop));
152
+
153
+ checkCudaErrors(cudaEventSynchronize(stop));
154
+ checkCudaErrors(cudaEventElapsedTime(&elapsedTime, start, stop));
155
+ printf("execution time = %.2f s\n", elapsedTime/1000);
156
+ getLastCudaError("kernel launch failure");
157
+ cudaThreadSynchronize();
158
+
159
+ /*CUT_SAFE_CALL(cutCreateTimer(&my_timer));
160
+ TaskHandle taskhandle = LaunchDAQ();
161
+ CUT_SAFE_CALL(cutStartTimer(my_timer));
162
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
163
+
164
+
165
+
166
+ PowerKernal3<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N);
167
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
168
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
169
+
170
+
171
+ getLastCudaError("kernel launch failure");
172
+ CUDA_SAFE_CALL( cudaThreadSynchronize() );
173
+ CUT_SAFE_CALL(cutStopTimer(my_timer));
174
+ TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer));
175
+ printf("execution time = %f\n", cutGetTimerValue(my_timer));
176
+ CUT_SAFE_CALL(cutDeleteTimer(my_timer));
177
+
178
+ #ifdef _DEBUG
179
+ checkCudaErrors( cudaDeviceSynchronize() );
180
+ #endif*/
181
+
182
+ // Copy result from device memory to host memory
183
+ // h_C contains the result in host memory
184
+ checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
185
+ checkCudaErrors(cudaEventDestroy(start));
186
+ checkCudaErrors(cudaEventDestroy(stop));
187
+ CleanupResources();
188
+
189
+ return 0;
190
+ }
191
+
192
+ void CleanupResources(void)
193
+ {
194
+ // Free device memory
195
+ if (d_A)
196
+ cudaFree(d_A);
197
+ if (d_B)
198
+ cudaFree(d_B);
199
+ if (d_C)
200
+ cudaFree(d_C);
201
+
202
+ // Free host memory
203
+ if (h_A)
204
+ free(h_A);
205
+ if (h_B)
206
+ free(h_B);
207
+ if (h_C)
208
+ free(h_C);
209
+
210
+ }
211
+
212
+ // Allocates an array with random float entries.
213
+ void RandomInit(unsigned* data, int n)
214
+ {
215
+ for (int i = 0; i < n; ++i){
216
+ srand((unsigned)time(0));
217
+ data[i] = rand() / RAND_MAX;
218
+ }
219
+ }
220
+
221
+
222
+
223
+
224
+
225
+
cuda_code/BK5_1.cu ADDED
@@ -0,0 +1,812 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+
3
+ See LICENSE file.
4
+
5
+ */
6
+
7
+ #include <stdio.h>
8
+ #include <stdlib.h>
9
+ #include <cuda.h>
10
+ #include <cuda_runtime.h>
11
+ #include "meshBasis.hpp"
12
+
13
+ void matrixPrint(int Nrows, int Ncols, dfloat *A, const char *mess){
14
+ #if 0
15
+ printf("%s = [\n", mess);
16
+ for(int i=0;i<Nrows;++i){
17
+ for(int a=0;a<Ncols;++a){
18
+ printf(" % e", A[i*Ncols+a]);
19
+ }
20
+ printf("\n");
21
+ }
22
+ printf("]\n");
23
+ #endif
24
+ }
25
+
26
+
27
+
28
+ __forceinline__ __device__ __host__ int ijN(const int i, const int j, const int N){
29
+
30
+ return i + j*N;
31
+
32
+ }
33
+
34
+ __forceinline__ __device__ __host__ int ijkN(const int i, const int j, const int k, const int N){
35
+
36
+ return i + j*N + k*N*N;
37
+
38
+ }
39
+
40
+ __forceinline__ __device__ __host__ int ijklN(const int i, const int j, const int k, const int l, const int N){
41
+
42
+ return i + j*N + k*N*N + l*N*N*N;
43
+
44
+ }
45
+
46
+ // switch:
47
+ // 1 to use CUDA 10.0 stream recording
48
+ // 0 to use traditional enqueing of kernels
49
+ #define USE_GRAPH 0
50
+
51
+ #define MAX_DOFS_1D 14
52
+ #define MAX_HALF_DOFS_1D 7
53
+
54
+
55
+ #define HALF_DOFS_1D ((NUM_DOFS_1D+1)/2)
56
+
57
+ #define NUM_DOFS_2D (NUM_DOFS_1D*NUM_DOFS_1D)
58
+ #define NUM_DOFS_3D (NUM_DOFS_1D*NUM_DOFS_1D*NUM_DOFS_1D)
59
+
60
+ __constant__ dfloat const_DofToDofD[MAX_DOFS_1D*MAX_DOFS_1D];
61
+ __constant__ dfloat const_oddDofToDofD[MAX_HALF_DOFS_1D*MAX_HALF_DOFS_1D];
62
+ __constant__ dfloat const_evenDofToDofD[MAX_HALF_DOFS_1D*MAX_HALF_DOFS_1D];
63
+
64
+ void randAlloc(int N, dfloat **h_a, dfloat **c_a){
65
+
66
+ *h_a = (dfloat*) calloc(N, sizeof(dfloat));
67
+
68
+ for(int n=0;n<N;++n)
69
+ h_a[0][n] = drand48();
70
+
71
+ cudaMalloc(c_a, N*sizeof(dfloat));
72
+
73
+ cudaMemcpy(c_a[0], h_a[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
74
+
75
+ }
76
+
77
+ __global__ void nothingKernel(){ }
78
+
79
+
80
+ template <int NUM_DOFS_1D, int p_Nblock >
81
+ __forceinline__ __device__
82
+ void BK5Device(const int numElements,
83
+ const int element,
84
+ const dfloat lambda,
85
+ const dfloat * __restrict__ op,
86
+ const dfloat * __restrict__ DofToDofD,
87
+ const dfloat * __restrict__ oddDofToDofD,
88
+ const dfloat * __restrict__ evenDofToDofD,
89
+ dfloat * __restrict__ r_p,
90
+ dfloat * __restrict__ r_Ap){
91
+
92
+ __shared__ dfloat s_p[p_Nblock][NUM_DOFS_1D][NUM_DOFS_1D];
93
+ __shared__ dfloat s_Gpr[p_Nblock][NUM_DOFS_1D][NUM_DOFS_1D];
94
+ __shared__ dfloat s_Gps[p_Nblock][NUM_DOFS_1D][NUM_DOFS_1D];
95
+
96
+ // assumes NUM_DOFS_2D threads
97
+ int t = threadIdx.x;
98
+ int blk = threadIdx.y;
99
+
100
+ int i = t%NUM_DOFS_1D;
101
+ int j = t/NUM_DOFS_1D;
102
+
103
+ for(int k = 0; k < NUM_DOFS_1D; k++) {
104
+ r_Ap[k] = 0.f; // zero the accumulator
105
+ }
106
+
107
+ // Layer by layer
108
+ #pragma unroll
109
+ for(int k = 0; k < NUM_DOFS_1D; k++) {
110
+
111
+ // share r_p[k]
112
+ __syncthreads();
113
+
114
+ s_p[blk][j][i] = r_p[k];
115
+
116
+ __syncthreads();
117
+
118
+ dfloat G00 = 0, G01 =0, G02 =0, G11 =0, G12 =0, G22 =0, GWJ =0;
119
+
120
+ // prefetch geometric factors
121
+ const int gbase = element*p_Nggeo*NUM_DOFS_3D + ijkN(i,j,k,NUM_DOFS_1D);
122
+
123
+ if(element<numElements){
124
+ G00 = op[gbase+p_G00ID*NUM_DOFS_3D];
125
+ G01 = op[gbase+p_G01ID*NUM_DOFS_3D];
126
+ G02 = op[gbase+p_G02ID*NUM_DOFS_3D];
127
+ G11 = op[gbase+p_G11ID*NUM_DOFS_3D];
128
+ G12 = op[gbase+p_G12ID*NUM_DOFS_3D];
129
+ G22 = op[gbase+p_G22ID*NUM_DOFS_3D];
130
+ GWJ = op[gbase+p_GWJID*NUM_DOFS_3D];
131
+ }
132
+
133
+ dfloat pr = 0.f;
134
+ dfloat ps = 0.f;
135
+ dfloat pt = 0.f;
136
+
137
+ #pragma unroll
138
+ for(int m = 0; m < NUM_DOFS_1D; m++) {
139
+ int im = ijN(m,i,NUM_DOFS_1D);
140
+ int jm = ijN(m,j,NUM_DOFS_1D);
141
+ int km = ijN(m,k,NUM_DOFS_1D);
142
+ pr += DofToDofD[im]*s_p[blk][j][m];
143
+ ps += DofToDofD[jm]*s_p[blk][m][i];
144
+ pt += DofToDofD[km]*r_p[m];
145
+ }
146
+
147
+ s_Gpr[blk][j][i] = (G00*pr + G01*ps + G02*pt);
148
+ s_Gps[blk][j][i] = (G01*pr + G11*ps + G12*pt);
149
+
150
+ dfloat Gpt = (G02*pr + G12*ps + G22*pt);
151
+
152
+ dfloat Apk = GWJ*lambda*r_p[k];
153
+
154
+ __syncthreads();
155
+
156
+ #pragma unroll
157
+ for(int m = 0; m < NUM_DOFS_1D; m++){
158
+ int mi = ijN(i,m,NUM_DOFS_1D);
159
+ int mj = ijN(j,m,NUM_DOFS_1D);
160
+ int km = ijN(m,k,NUM_DOFS_1D);
161
+ Apk += DofToDofD[mi]*s_Gpr[blk][j][m];
162
+ Apk += DofToDofD[mj]*s_Gps[blk][m][i];
163
+ r_Ap[m] += DofToDofD[km]*Gpt; // DT(m,k)*ut(i,j,k,e)
164
+ }
165
+
166
+ r_Ap[k] += Apk;
167
+ }
168
+
169
+ }
170
+
171
+ template <int NUM_DOFS_1D, int p_Nblock >
172
+ __global__ void BK5ConstantKernel(const int numElements,
173
+ const dfloat lambda,
174
+ const dfloat * __restrict__ op,
175
+ const dfloat * __restrict__ DofToDofD,
176
+ const dfloat * __restrict__ oddDofToDofD,
177
+ const dfloat * __restrict__ evenDofToDofD,
178
+ const dfloat * __restrict__ solIn,
179
+ dfloat * __restrict__ solOut){
180
+
181
+ __shared__ dfloat s_DofToDofD[NUM_DOFS_2D];
182
+
183
+ dfloat r_q[NUM_DOFS_1D];
184
+ dfloat r_Aq[NUM_DOFS_1D];
185
+
186
+ const unsigned int t = threadIdx.x;
187
+ const int blk = threadIdx.y;
188
+
189
+ const int element = blockIdx.x*p_Nblock + blk;
190
+
191
+ const unsigned int a = t%NUM_DOFS_1D;
192
+ const unsigned int b = t/NUM_DOFS_1D;
193
+
194
+ s_DofToDofD[t] = DofToDofD[t];
195
+
196
+ if(element < numElements){
197
+ for(int c=0;c<NUM_DOFS_1D;++c){
198
+
199
+ int id = ijklN(a,b,c,element,NUM_DOFS_1D);
200
+
201
+ r_q[c] = solIn[id];
202
+ }
203
+ }
204
+
205
+ __syncthreads();
206
+
207
+ BK5Device <NUM_DOFS_1D, p_Nblock>
208
+ (numElements, element, lambda, op, s_DofToDofD, const_oddDofToDofD, const_evenDofToDofD, r_q, r_Aq);
209
+
210
+ if(element<numElements){
211
+ #pragma unroll
212
+ for(int c=0;c<NUM_DOFS_1D;++c){
213
+ int id = ijklN(a,b,c,element,NUM_DOFS_1D);
214
+ solOut[id] = r_Aq[c];
215
+ }
216
+ }
217
+ }
218
+
219
+ template <int NUM_DOFS_1D>
220
+ __forceinline__ __device__
221
+ dfloat BK5CubeDevice(const int numElements,
222
+ const int element,
223
+ const dfloat lambda,
224
+ const dfloat * __restrict__ op,
225
+ const dfloat * __restrict__ DofToDofD,
226
+ dfloat r_p){
227
+
228
+ __shared__ dfloat s_p[NUM_DOFS_1D][NUM_DOFS_1D][NUM_DOFS_1D];
229
+
230
+ // assumes NUM_DOFS_2D threads
231
+ int i = threadIdx.x;
232
+ int j = threadIdx.y;
233
+ int k = threadIdx.z;
234
+
235
+ dfloat r_Ap = 0; // zero the accumulator
236
+
237
+ s_p[k][j][i] = r_p;
238
+
239
+ __syncthreads();
240
+
241
+ dfloat G00 = 0, G01 =0, G02 =0, G11 =0, G12 =0, G22 =0, GWJ =0;
242
+
243
+ // prefetch geometric factors
244
+ const int gbase = element*p_Nggeo*NUM_DOFS_3D + ijkN(i,j,k,NUM_DOFS_1D);
245
+
246
+ if(element<numElements){
247
+ G00 = op[gbase+p_G00ID*NUM_DOFS_3D];
248
+ G01 = op[gbase+p_G01ID*NUM_DOFS_3D];
249
+ G02 = op[gbase+p_G02ID*NUM_DOFS_3D];
250
+ G11 = op[gbase+p_G11ID*NUM_DOFS_3D];
251
+ G12 = op[gbase+p_G12ID*NUM_DOFS_3D];
252
+ G22 = op[gbase+p_G22ID*NUM_DOFS_3D];
253
+ GWJ = op[gbase+p_GWJID*NUM_DOFS_3D];
254
+ }
255
+
256
+ r_Ap = GWJ*lambda*r_p;
257
+
258
+ dfloat pr = 0.f;
259
+ dfloat ps = 0.f;
260
+ dfloat pt = 0.f;
261
+
262
+ #pragma unroll
263
+ for(int m = 0; m < NUM_DOFS_1D; m++) {
264
+ int im = ijN(m,i,NUM_DOFS_1D);
265
+ int jm = ijN(m,j,NUM_DOFS_1D);
266
+ int km = ijN(m,k,NUM_DOFS_1D);
267
+ pr += DofToDofD[im]*s_p[k][j][m];
268
+ ps += DofToDofD[jm]*s_p[k][m][i];
269
+ pt += DofToDofD[km]*s_p[m][j][i];
270
+ }
271
+
272
+ dfloat Gpr = (G00*pr + G01*ps + G02*pt);
273
+ dfloat Gps = (G01*pr + G11*ps + G12*pt);
274
+ dfloat Gpt = (G02*pr + G12*ps + G22*pt);
275
+
276
+
277
+ __syncthreads();
278
+
279
+ s_p[k][j][i] = Gpr;
280
+
281
+ __syncthreads();
282
+
283
+ #pragma unroll
284
+ for(int m = 0; m < NUM_DOFS_1D; m++){
285
+ int mi = ijN(i,m,NUM_DOFS_1D);
286
+ r_Ap += DofToDofD[mi]*s_p[k][j][m];
287
+ }
288
+
289
+
290
+ __syncthreads();
291
+
292
+ s_p[k][j][i] = Gps;
293
+
294
+ __syncthreads();
295
+
296
+ #pragma unroll
297
+ for(int m = 0; m < NUM_DOFS_1D; m++){
298
+ int mj = ijN(j,m,NUM_DOFS_1D);
299
+ r_Ap += DofToDofD[mj]*s_p[k][m][i];
300
+ }
301
+
302
+ __syncthreads();
303
+
304
+ s_p[k][j][i] = Gpt;
305
+
306
+ __syncthreads();
307
+
308
+ #pragma unroll
309
+ for(int m = 0; m < NUM_DOFS_1D; m++){
310
+ int mk= ijN(k,m,NUM_DOFS_1D);
311
+ r_Ap += DofToDofD[mk]*s_p[m][j][i];
312
+ }
313
+
314
+ return r_Ap;
315
+ }
316
+
317
+ template <int NUM_DOFS_1D>
318
+ __global__ void BK5CubeKernel(const int numElements,
319
+ const dfloat lambda,
320
+ const dfloat * __restrict__ op,
321
+ const dfloat * __restrict__ DofToDofD,
322
+ const dfloat * __restrict__ solIn,
323
+ dfloat * __restrict__ solOut){
324
+
325
+ __shared__ dfloat s_DofToDofD[NUM_DOFS_2D];
326
+
327
+ const int element = blockIdx.x;
328
+
329
+ int a = threadIdx.x;
330
+ int b = threadIdx.y;
331
+ int c = threadIdx.z;
332
+
333
+ if(c==0)
334
+ s_DofToDofD[b*NUM_DOFS_1D+a] = DofToDofD[b*NUM_DOFS_1D+a];
335
+
336
+ int id = ijklN(a,b,c,element,NUM_DOFS_1D);
337
+
338
+ dfloat r_p = solIn[id];
339
+
340
+ __syncthreads();
341
+
342
+ dfloat r_Ap = BK5CubeDevice <NUM_DOFS_1D>
343
+ (numElements, element, lambda, op, s_DofToDofD, r_p);
344
+
345
+ solOut[id] = r_Ap;
346
+
347
+ }
348
+
349
+
350
+
351
+ double bandwidthTest(cudaStream_t stream, int Ntests, size_t bwNtotal){
352
+
353
+ cudaEvent_t start, end;
354
+ cudaEventCreate(&start);
355
+ cudaEventCreate(&end);
356
+
357
+ dfloat *h_bwTest1, *c_bwTest1;
358
+ dfloat *h_bwTest2, *c_bwTest2;
359
+
360
+ randAlloc(bwNtotal/2, &h_bwTest1, &c_bwTest1);
361
+ randAlloc(bwNtotal/2, &h_bwTest2, &c_bwTest2);
362
+
363
+ cudaDeviceSynchronize();
364
+ cudaEventRecord(start, stream);
365
+
366
+ for(int test=0;test<Ntests/2;++test){
367
+ cudaMemcpy(c_bwTest2, c_bwTest1, (bwNtotal/2)*sizeof(dfloat), cudaMemcpyDeviceToDevice);
368
+ cudaMemcpy(c_bwTest1, c_bwTest2, (bwNtotal/2)*sizeof(dfloat), cudaMemcpyDeviceToDevice);
369
+ }
370
+
371
+ cudaEventRecord(end, stream);
372
+ cudaEventSynchronize(end);
373
+ cudaDeviceSynchronize();
374
+
375
+ float elapsed;
376
+ cudaEventElapsedTime(&elapsed, start, end);
377
+ elapsed /= 1000.; // convert to s
378
+ elapsed /= (double) Ntests;
379
+
380
+ double estimatedActualDeviceBandwidth = (bwNtotal*sizeof(dfloat)/elapsed)/1.e9;
381
+
382
+ cudaFree(c_bwTest1);
383
+ cudaFree(c_bwTest2);
384
+
385
+ free(h_bwTest1);
386
+ free(h_bwTest2);
387
+
388
+ cudaEventDestroy(start);
389
+ cudaEventDestroy(end);
390
+
391
+ return estimatedActualDeviceBandwidth;
392
+ }
393
+
394
+ // leave this here in case we add odd-even versions
395
+ void buildOddEvenMatrices(int NUM_COLS_OP, int NUM_ROWS_OP,
396
+ dfloat *h_OP, dfloat **c_OP, dfloat **c_oddOP, dfloat **c_evenOP){
397
+
398
+ int HALF_COLS_OP = ((NUM_COLS_OP+1)/2);
399
+ int HALF_ROWS_OP = ((NUM_ROWS_OP+1)/2);
400
+
401
+ dfloat *X = (dfloat*) calloc(NUM_COLS_OP*NUM_COLS_OP, sizeof(dfloat));
402
+ dfloat *invX = (dfloat*) calloc(NUM_COLS_OP*NUM_COLS_OP, sizeof(dfloat));
403
+
404
+ dfloat *cubX = (dfloat*) calloc(NUM_ROWS_OP*NUM_ROWS_OP, sizeof(dfloat));
405
+ dfloat *cubInvX = (dfloat*) calloc(NUM_ROWS_OP*NUM_ROWS_OP, sizeof(dfloat));
406
+
407
+ for(int n=0;n<NUM_ROWS_OP;++n){
408
+ cubX[n*NUM_ROWS_OP + n] = 1;
409
+ cubInvX[n*NUM_ROWS_OP + n] = 0.5;
410
+
411
+ if(n<NUM_ROWS_OP/2){
412
+ cubX[n*NUM_ROWS_OP + NUM_ROWS_OP-1-n] = -1;
413
+ cubInvX[n*NUM_ROWS_OP + NUM_ROWS_OP-1-n] = +0.5;
414
+ }
415
+
416
+ if(n>=(NUM_ROWS_OP/2)){
417
+ cubX[n*NUM_ROWS_OP + NUM_ROWS_OP-1-n] = +1;
418
+ cubInvX[n*NUM_ROWS_OP + NUM_ROWS_OP-1-n] = -0.5;
419
+ }
420
+ }
421
+
422
+ for(int n=0;n<NUM_COLS_OP;++n){
423
+ X[n*NUM_COLS_OP + n] = 1;
424
+ invX[n*NUM_COLS_OP + n] = 0.5;
425
+
426
+ if(n<NUM_COLS_OP/2){
427
+ X[n*NUM_COLS_OP + NUM_COLS_OP-1-n] = 1;
428
+ invX[n*NUM_COLS_OP + NUM_COLS_OP-1-n] = -0.5;
429
+ }
430
+
431
+ if(n>=NUM_COLS_OP/2){
432
+ X[n*NUM_COLS_OP + NUM_COLS_OP-1-n] = -1;
433
+ invX[n*NUM_COLS_OP + NUM_COLS_OP-1-n] = 0.5;
434
+ }
435
+ }
436
+
437
+ if(NUM_COLS_OP%2) X[(NUM_COLS_OP)*(NUM_COLS_OP)/2] = 1;
438
+ if(NUM_COLS_OP%2) invX[(NUM_COLS_OP)*(NUM_COLS_OP)/2] = 1;
439
+
440
+ if(NUM_ROWS_OP%2) cubX[(NUM_ROWS_OP)*(NUM_ROWS_OP)/2] = 1;
441
+ if(NUM_ROWS_OP%2) cubInvX[(NUM_ROWS_OP)*(NUM_ROWS_OP)/2] = 1;
442
+
443
+ // if(NUM_COLS_OP%2) invX[(NUM_COLS_OP)*(NUM_COLS_OP)/2] = 1;
444
+ // if(NUM_ROWS_OP%2) cubInvX[(NUM_ROWS_OP+1)*(NUM_ROWS_OP+1)/2] = 1;
445
+
446
+ dfloat *IinvX = (dfloat*) calloc(NUM_COLS_OP*NUM_ROWS_OP, sizeof(dfloat));
447
+ dfloat *cubInvXIinvX = (dfloat*) calloc(NUM_COLS_OP*NUM_ROWS_OP, sizeof(dfloat));
448
+
449
+ // post multiply by invX
450
+ for(int i=0;i<NUM_ROWS_OP;++i){
451
+ for(int a=0;a<NUM_COLS_OP;++a){
452
+ dfloat resI = 0;
453
+ for(int n=0;n<NUM_COLS_OP;++n){
454
+ resI += h_OP [i*NUM_COLS_OP+n]*invX[n*NUM_COLS_OP+a];
455
+ }
456
+ IinvX[i*NUM_COLS_OP+a] = resI;
457
+ }
458
+ }
459
+
460
+ // pre multiply by invX
461
+ for(int i=0;i<NUM_ROWS_OP;++i){
462
+ for(int a=0;a<NUM_COLS_OP;++a){
463
+ dfloat resI = 0;
464
+ for(int n=0;n<NUM_ROWS_OP;++n){
465
+ resI += cubInvX[i*NUM_ROWS_OP+n]*IinvX[n*NUM_COLS_OP + a];
466
+ }
467
+ cubInvXIinvX[i*NUM_COLS_OP+a] = resI;
468
+ }
469
+ }
470
+
471
+ // now interleave the two non-zero blocks
472
+ // [ A 0 ] => [ A[0][0] B[0][0] A[0][1] B[0][1] .. A[0][HALF_DOFS_1D-1] B[0][HALF_DOFS_1D-1] ..
473
+ // [ 0 B ]
474
+
475
+ dfloat *oddOP = (dfloat*) calloc(NUM_ROWS_OP*HALF_ROWS_OP, sizeof(dfloat));
476
+ dfloat *evenOP = (dfloat*) calloc(NUM_ROWS_OP*HALF_ROWS_OP, sizeof(dfloat));
477
+
478
+ for(int i=0;i<HALF_ROWS_OP;++i){
479
+ for(int a=0;a<HALF_COLS_OP;++a){
480
+
481
+ oddOP[i*HALF_COLS_OP+a] = cubInvXIinvX[i*NUM_COLS_OP+a];
482
+ evenOP[i*HALF_COLS_OP+a] = cubInvXIinvX[(NUM_ROWS_OP-1-i)*NUM_COLS_OP + NUM_COLS_OP-1-a];
483
+ }
484
+ }
485
+
486
+ if((NUM_ROWS_OP%2)) // zero duplicate
487
+ evenOP[HALF_ROWS_OP*HALF_COLS_OP-1] = 0;
488
+
489
+ int NoddOP = HALF_ROWS_OP*HALF_COLS_OP;
490
+ int NevenOP = HALF_ROWS_OP*HALF_COLS_OP;
491
+
492
+ cudaMalloc(c_oddOP, NoddOP*sizeof(dfloat));
493
+ cudaMalloc(c_evenOP, NevenOP*sizeof(dfloat));
494
+
495
+ cudaMemcpy(*c_oddOP, oddOP, NoddOP*sizeof(dfloat), cudaMemcpyHostToDevice);
496
+ cudaMemcpy(*c_evenOP, evenOP, NoddOP*sizeof(dfloat), cudaMemcpyHostToDevice);
497
+
498
+ cudaMemcpy(*c_OP, h_OP, NUM_COLS_OP*NUM_ROWS_OP*sizeof(dfloat), cudaMemcpyHostToDevice);
499
+
500
+ matrixPrint(NUM_COLS_OP, NUM_COLS_OP, X, "X");
501
+ matrixPrint(NUM_ROWS_OP, NUM_ROWS_OP, cubX, "cubX");
502
+
503
+
504
+ matrixPrint(NUM_COLS_OP, NUM_COLS_OP, invX, "invX");
505
+ matrixPrint(NUM_ROWS_OP, NUM_ROWS_OP, cubInvX, "cubInvX");
506
+
507
+
508
+
509
+ }
510
+
511
+
512
+ void runBK5Kernel(cudaStream_t stream, int Nq, int numElements, dfloat lambda,
513
+ dfloat *c_op,
514
+ dfloat *c_DofToDofD, dfloat *c_oddDofToDofD, dfloat *c_evenDofToDofD,
515
+ dfloat *c_solIn, dfloat *c_solOut, int mode){
516
+
517
+ #define BK5Kernel(Nq,Nblock) \
518
+ { \
519
+ if(mode==0){ \
520
+ dim3 G((numElements+Nblock-1)/Nblock, 1, 1); \
521
+ dim3 B(Nq*Nq, Nblock, 1); \
522
+ BK5ConstantKernel<Nq,Nblock> <<< G, B, 0, stream >>> \
523
+ (numElements, lambda, c_op, c_DofToDofD, c_oddDofToDofD,c_evenDofToDofD, c_solIn, c_solOut); \
524
+ } \
525
+ else{ \
526
+ dim3 G(numElements,1,1); \
527
+ dim3 B(Nq, Nq, Nq); \
528
+ BK5CubeKernel<Nq> <<< G, B, 0, stream >>> \
529
+ (numElements, lambda, c_op, c_DofToDofD, c_solIn, c_solOut); \
530
+ } \
531
+ }
532
+
533
+
534
+ #define ERR printf("massMatrixMultiplyRegister with Nq=%d not available", Nq); exit(-1)
535
+
536
+ if(Nq==2){
537
+ BK5Kernel(2,16);
538
+ return;
539
+ }
540
+
541
+ if(Nq==3){
542
+ BK5Kernel(3,7);
543
+ return;
544
+ }
545
+
546
+ if(Nq==4){
547
+ BK5Kernel(4,4);
548
+ return;
549
+ }
550
+
551
+ if(Nq==5){
552
+ BK5Kernel(5,5);
553
+ return;
554
+ }
555
+
556
+ if(Nq==6){
557
+ BK5Kernel(6,3);
558
+ return;
559
+ }
560
+
561
+ if(Nq==7){
562
+ BK5Kernel(7,2);
563
+ return;
564
+ }
565
+
566
+ if(Nq==8){
567
+ BK5Kernel(8,1);
568
+ return;
569
+ }
570
+
571
+ if(Nq==9){
572
+ BK5Kernel(9,1);
573
+ return;
574
+ }
575
+
576
+ if(Nq==10){
577
+ BK5Kernel(10,1);
578
+ return;
579
+ }
580
+
581
+ if(Nq==11){
582
+ BK5Kernel(11,1);
583
+ return;
584
+ }
585
+
586
+ if(Nq==12){
587
+ BK5Kernel(12,1);
588
+ return;
589
+ }
590
+
591
+ if(Nq==13){
592
+ BK5Kernel(13,1);
593
+ return;
594
+ }
595
+
596
+ ERR;
597
+ }
598
+
599
+
600
+ dfloat nothingTest(cudaStream_t stream, int Ntests){
601
+
602
+ cudaEvent_t start, end;
603
+ cudaEventCreate(&start);
604
+ cudaEventCreate(&end);
605
+
606
+ cudaDeviceSynchronize();
607
+
608
+ float nothingElapsed = 0;
609
+ {
610
+
611
+ // time kernel that does nothing
612
+
613
+ #if USE_GRAPH==1
614
+ // cuda stream capture sequence for nothingKernel
615
+ cudaGraph_t nothingGraph;
616
+
617
+ cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
618
+
619
+ for(int test=0;test<Ntests;++test){
620
+ nothingKernel <<< 1, 1, 0, stream >>> ();
621
+ }
622
+
623
+ cudaStreamEndCapture(stream, &nothingGraph);
624
+
625
+ // time graph sequence for nothing
626
+ cudaGraphExec_t nothingInstance;
627
+ cudaGraphInstantiate(&nothingInstance, nothingGraph, NULL, NULL, 0);
628
+
629
+ cudaEventRecord(start, stream);
630
+
631
+ cudaGraphLaunch(nothingInstance, stream);
632
+
633
+ cudaEventRecord(end, stream);
634
+ #else
635
+
636
+ cudaEventRecord(start, stream);
637
+
638
+ for(int test=0;test<Ntests;++test)
639
+ nothingKernel <<< 1, 1, 0, stream >>> ();
640
+
641
+ cudaEventRecord(end, stream);
642
+
643
+ #endif
644
+
645
+ cudaDeviceSynchronize();
646
+
647
+ cudaEventElapsedTime(&nothingElapsed, start, end);
648
+ nothingElapsed /= 1000.;
649
+ nothingElapsed /= (double) Ntests;
650
+
651
+ }
652
+
653
+ return nothingElapsed;
654
+ }
655
+
656
+
657
+ int main(int argc, char **argv){
658
+
659
+ cudaStream_t stream;
660
+ cudaStreamCreate(&stream);
661
+
662
+ if(argc!=4){
663
+ printf("Usage: ./BK5 Nq numElements mode\n");
664
+ exit(-1);
665
+ }
666
+
667
+ // read number of elements
668
+ int Nq = atoi(argv[1]);
669
+ int numElements = atoi(argv[2]);
670
+ int mode = atoi(argv[3]);
671
+
672
+ dfloat lambda = 0;
673
+
674
+ printf("Running: NUM_DOFS_1D=%d, numElements=%d\n", Nq, numElements);
675
+
676
+ int Np = Nq*Nq*Nq;
677
+ int halfNq = ((Nq+1)/2);
678
+
679
+ int Ntotal = numElements*Np;
680
+
681
+ int Ntests = 10;
682
+
683
+ double estimatedActualDeviceBandwidth = bandwidthTest(stream, Ntests, (Ntotal*2+7*Ntotal)*sizeof(dfloat));
684
+
685
+ dfloat *h_op, *c_op;
686
+ dfloat *h_solOut, *c_solOut;
687
+ dfloat *h_solIn, *c_solIn;
688
+
689
+ dfloat *h_DofToDofD, *c_DofToDofD;
690
+ dfloat *c_oddDofToDofD, *c_evenDofToDofD;
691
+
692
+ // float fields
693
+ randAlloc(Ntotal*p_Nggeo, &h_op, &c_op);
694
+
695
+ randAlloc(Ntotal, &h_solIn, &c_solIn);
696
+ randAlloc(Ntotal, &h_solOut, &c_solOut);
697
+
698
+ randAlloc(Nq*Nq, &h_DofToDofD, &c_DofToDofD);
699
+
700
+ // give D the correct symmetry
701
+ for(int i=0;i<halfNq;++i){
702
+ for(int a=0;a<Nq;++a){
703
+ h_DofToDofD[(Nq-1-i)*Nq + Nq-1-a] = -h_DofToDofD[i*Nq+a];
704
+ }
705
+ }
706
+
707
+ // create Odd-even packed storage for I and transpose(I) and push to constant memory
708
+ buildOddEvenMatrices (Nq,Nq, h_DofToDofD, &c_DofToDofD, &c_oddDofToDofD, &c_evenDofToDofD);
709
+
710
+ cudaMemcpyToSymbol(const_DofToDofD, c_DofToDofD, Nq*Nq*sizeof(dfloat), 0, cudaMemcpyDeviceToDevice);
711
+ cudaMemcpyToSymbol(const_oddDofToDofD, c_oddDofToDofD, halfNq*halfNq*sizeof(dfloat), 0, cudaMemcpyDeviceToDevice);
712
+ cudaMemcpyToSymbol(const_evenDofToDofD, c_evenDofToDofD, halfNq*halfNq*sizeof(dfloat), 0, cudaMemcpyDeviceToDevice);
713
+
714
+ cudaEvent_t start, end;
715
+ cudaEventCreate(&start);
716
+ cudaEventCreate(&end);
717
+
718
+ // KERNEL GRID
719
+ // do nothing kernel test
720
+ dfloat nothingElapsed = nothingTest(stream, Ntests);
721
+ nothingElapsed = nothingTest(stream, Ntests);
722
+
723
+ // warm up call
724
+ runBK5Kernel (stream, Nq, numElements, lambda,
725
+ c_op,
726
+ c_DofToDofD, c_oddDofToDofD, c_evenDofToDofD,
727
+ c_solIn, c_solOut, mode);
728
+
729
+ #if USE_GRAPH==1
730
+ // cuda stream capture
731
+ cudaGraph_t graph;
732
+
733
+ cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal);
734
+
735
+ for(int test=0;test<Ntests;++test){
736
+
737
+ runMassBK5Kernel (stream, Nq, numElements, lambda,
738
+ c_op,
739
+ c_DofToDofD, c_oddDofToDofD, c_evenDofToDofD,
740
+ c_solIn, c_solOut, mode);
741
+ }
742
+
743
+ cudaStreamEndCapture(stream, &graph);
744
+
745
+ cudaGraphExec_t instance;
746
+ cudaGraphInstantiate(&instance, graph, NULL, NULL, 0);
747
+ #endif
748
+
749
+ cudaDeviceSynchronize();
750
+
751
+ {
752
+ cudaEventRecord(start, stream);
753
+
754
+ #if USE_GRAPH==0
755
+ for(int test=0;test<Ntests;++test){
756
+
757
+ runBK5Kernel (stream, Nq, numElements, lambda,
758
+ c_op,
759
+ c_DofToDofD, c_oddDofToDofD, c_evenDofToDofD,
760
+ c_solIn, c_solOut, mode);
761
+
762
+ }
763
+ #else
764
+ cudaGraphLaunch(instance, stream);
765
+ #endif
766
+
767
+ cudaEventRecord(end, stream);
768
+
769
+ cudaEventSynchronize(end);
770
+
771
+ float elapsed;
772
+ cudaEventElapsedTime(&elapsed, start, end);
773
+ elapsed /= 1000.;
774
+ elapsed /= (double) Ntests;
775
+
776
+ int bytesMoved = (2*Np+7*Np)*sizeof(dfloat); // x, Mx, opa
777
+ double bw = (bytesMoved*numElements/elapsed)/1.e9;
778
+
779
+ double flopCount = Np*(6*2*Nq + 17);
780
+ double gflops = (flopCount*numElements/elapsed)/1.e9;
781
+
782
+ printf("%2d %8d %8d %e %e %e %e %e %e %%%% [BK5: N, numElements, Ndofs,"
783
+ " elapsed, dofsPerSecond, nothingElapsed, BW in GB/s, estimatedActualDeviceBandwidth, GFLOPS/s]\n",
784
+ Nq-1, numElements, Np*numElements, elapsed, numElements*(Np/elapsed),
785
+ nothingElapsed, bw, estimatedActualDeviceBandwidth, gflops);
786
+ }
787
+
788
+ // check output is correct
789
+ // BK5Host (Nq, numElements, lambda, h_op, h_DofToDofD, h_solIn, h_solOut);
790
+ meshReferenceBK5(Nq, numElements, lambda, h_op, h_DofToDofD, h_solIn, h_solOut);
791
+
792
+ // copy device version to host old qh
793
+ dfloat *fromDevice = (dfloat*) calloc(numElements*Np, sizeof(dfloat));
794
+ cudaMemcpy(fromDevice, c_solOut, numElements*Np*sizeof(dfloat), cudaMemcpyDeviceToHost);
795
+
796
+ dfloat maxDiff = 0;
797
+
798
+ for(int e=0;e<numElements;++e){
799
+ for(int n=0;n<Np;++n){
800
+ int id = e*Np + n;
801
+ dfloat diff = fabs(h_solOut[id]-fromDevice[id]);
802
+ maxDiff = (diff>maxDiff) ? diff:maxDiff;
803
+ }
804
+ }
805
+ printf("|| Mq_{host} - Mq_{device} ||_linf = %lg\n", maxDiff);
806
+
807
+ cudaEventDestroy(start);
808
+ cudaEventDestroy(end);
809
+
810
+ return 0;
811
+
812
+ }
cuda_code/BatchNormalization_8.cu ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef THC_GENERIC_FILE
2
+ #define THC_GENERIC_FILE "generic/BatchNormalization.cu"
3
+ #else
4
+
5
+ #define DeviceTensor3 THCDeviceTensor<real, 3>
6
+ #define DeviceTensor1 THCDeviceTensor<real, 1>
7
+
8
+ template <int Dim>
9
+ static THCDeviceTensor<real, Dim> devicetensor(THCState *state, THCTensor *t) {
10
+ if (!t) {
11
+ return THCDeviceTensor<real, Dim>();
12
+ }
13
+
14
+ int inDim = THCTensor_(nDimension)(state, t);
15
+ if (inDim == Dim) {
16
+ return toDeviceTensor<real, Dim>(state, t);
17
+ }
18
+
19
+ // View in which the last dimensions are collapsed or expanded as needed
20
+ THAssert(THCTensor_(isContiguous)(state, t));
21
+ int size[Dim];
22
+ for (int i = 0; i < Dim || i < inDim; ++i) {
23
+ if (i < Dim && i < inDim) {
24
+ size[i] = t->size[i];
25
+ } else if (i < Dim) {
26
+ size[i] = 1;
27
+ } else {
28
+ size[Dim - 1] *= t->size[i];
29
+ }
30
+ }
31
+ return THCDeviceTensor<real, Dim>(THCTensor_(data)(state, t), size);
32
+ }
33
+
34
+ void THNN_(BatchNormalization_updateOutput)(
35
+ THCState *state, THCTensor *input_, THCTensor *output_,
36
+ THCTensor *weight_, THCTensor *bias_, THCTensor *runningMean_,
37
+ THCTensor *runningVar_, THCTensor *saveMean_, THCTensor *saveStd_,
38
+ bool train, double momentum, double eps) {
39
+
40
+ THCTensor_(resizeAs)(state, output_, input_);
41
+ DeviceTensor3 input = devicetensor<3>(state, input_);
42
+ DeviceTensor3 output = devicetensor<3>(state, output_);
43
+ DeviceTensor1 weight = devicetensor<1>(state, weight_);
44
+ DeviceTensor1 bias = devicetensor<1>(state, bias_);
45
+ DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_);
46
+ DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_);
47
+ DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_);
48
+ DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_);
49
+
50
+ cudaStream_t s = THCState_getCurrentStream(state);
51
+ cudaDeviceProp *prop = THCState_getCurrentDeviceProperties(state);
52
+
53
+ if (!train) {
54
+ dim3 blocks(input.getSize(1));
55
+ dim3 threads(getNumThreads(input.getSize(2)));
56
+ BatchNormalizationUpdateOutputInference_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
57
+ input, output, runningMean, runningVar, weight, bias, eps);
58
+ } else {
59
+ dim3 blocks(input.getSize(1));
60
+ dim3 threads(getNumThreads(input.getSize(2)));
61
+ BatchNormalizationUpdateOutput_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
62
+ input, output, weight, bias, eps, momentum, runningMean, runningVar,
63
+ saveMean, saveStd);
64
+ }
65
+ THCudaCheck(cudaGetLastError());
66
+ }
67
+
68
+ void THNN_(BatchNormalization_backward)(
69
+ THCState *state, THCTensor *input_, THCTensor *gradOutput_,
70
+ THCTensor *gradInput_, THCTensor *gradWeight_, THCTensor *gradBias_,
71
+ THCTensor *weight_, THCTensor *runningMean_, THCTensor *runningVar_,
72
+ THCTensor *saveMean_, THCTensor *saveStd_, bool train, float scale, double eps) {
73
+
74
+ THCUNN_check_shape(state, input_, gradOutput_);
75
+ DeviceTensor3 input = devicetensor<3>(state, input_);
76
+ DeviceTensor3 gradOutput = devicetensor<3>(state, gradOutput_);
77
+ DeviceTensor3 gradInput = devicetensor<3>(state, gradInput_);
78
+ DeviceTensor1 gradWeight = devicetensor<1>(state, gradWeight_);
79
+ DeviceTensor1 gradBias = devicetensor<1>(state, gradBias_);
80
+ DeviceTensor1 weight = devicetensor<1>(state, weight_);
81
+ DeviceTensor1 runningMean = devicetensor<1>(state, runningMean_);
82
+ DeviceTensor1 runningVar = devicetensor<1>(state, runningVar_);
83
+ DeviceTensor1 saveMean = devicetensor<1>(state, saveMean_);
84
+ DeviceTensor1 saveStd = devicetensor<1>(state, saveStd_);
85
+
86
+ cudaStream_t s = THCState_getCurrentStream(state);
87
+
88
+ dim3 blocks(gradOutput.getSize(1));
89
+ dim3 threads(getNumThreads(gradOutput.getSize(2)));
90
+ BatchNormalizationBackward_kernel<real, accreal, DeviceTensor1, DeviceTensor3> <<<blocks, threads, 0, s>>>(
91
+ input, gradOutput, gradInput, gradWeight, gradBias, weight, runningMean, runningVar,
92
+ saveMean, saveStd, train, scale, eps);
93
+ THCudaCheck(cudaGetLastError());
94
+ }
95
+
96
+ #undef DeviceTensor3
97
+ #undef DeviceTensor1
98
+
99
+ #endif
cuda_code/BlockSelectFloat_1.cu ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright (c) 2015-present, Facebook, Inc.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD+Patents license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ // Copyright 2004-present Facebook. All Rights Reserved.
10
+ #include "blockselect/BlockSelectImpl.cuh"
11
+
12
+ namespace faiss { namespace gpu {
13
+
14
+ // warp Q to thread Q:
15
+ // 1, 1
16
+ // 32, 2
17
+ // 64, 3
18
+ // 128, 3
19
+ // 256, 4
20
+ // 512, 8
21
+ // 1024, 8
22
+
23
+ BLOCK_SELECT_DECL(float, true, 1);
24
+ BLOCK_SELECT_DECL(float, true, 32);
25
+ BLOCK_SELECT_DECL(float, true, 64);
26
+ BLOCK_SELECT_DECL(float, true, 128);
27
+ BLOCK_SELECT_DECL(float, true, 256);
28
+ BLOCK_SELECT_DECL(float, true, 512);
29
+ BLOCK_SELECT_DECL(float, true, 1024);
30
+
31
+ BLOCK_SELECT_DECL(float, false, 1);
32
+ BLOCK_SELECT_DECL(float, false, 32);
33
+ BLOCK_SELECT_DECL(float, false, 64);
34
+ BLOCK_SELECT_DECL(float, false, 128);
35
+ BLOCK_SELECT_DECL(float, false, 256);
36
+ BLOCK_SELECT_DECL(float, false, 512);
37
+ BLOCK_SELECT_DECL(float, false, 1024);
38
+
39
+ void runBlockSelect(Tensor<float, 2, true>& in,
40
+ Tensor<float, 2, true>& outK,
41
+ Tensor<int, 2, true>& outV,
42
+ bool dir, int k, cudaStream_t stream) {
43
+ FAISS_ASSERT(k <= 1024);
44
+
45
+ if (dir) {
46
+ if (k == 1) {
47
+ BLOCK_SELECT_CALL(float, true, 1);
48
+ } else if (k <= 32) {
49
+ BLOCK_SELECT_CALL(float, true, 32);
50
+ } else if (k <= 64) {
51
+ BLOCK_SELECT_CALL(float, true, 64);
52
+ } else if (k <= 128) {
53
+ BLOCK_SELECT_CALL(float, true, 128);
54
+ } else if (k <= 256) {
55
+ BLOCK_SELECT_CALL(float, true, 256);
56
+ } else if (k <= 512) {
57
+ BLOCK_SELECT_CALL(float, true, 512);
58
+ } else if (k <= 1024) {
59
+ BLOCK_SELECT_CALL(float, true, 1024);
60
+ }
61
+ } else {
62
+ if (k == 1) {
63
+ BLOCK_SELECT_CALL(float, false, 1);
64
+ } else if (k <= 32) {
65
+ BLOCK_SELECT_CALL(float, false, 32);
66
+ } else if (k <= 64) {
67
+ BLOCK_SELECT_CALL(float, false, 64);
68
+ } else if (k <= 128) {
69
+ BLOCK_SELECT_CALL(float, false, 128);
70
+ } else if (k <= 256) {
71
+ BLOCK_SELECT_CALL(float, false, 256);
72
+ } else if (k <= 512) {
73
+ BLOCK_SELECT_CALL(float, false, 512);
74
+ } else if (k <= 1024) {
75
+ BLOCK_SELECT_CALL(float, false, 1024);
76
+ }
77
+ }
78
+ }
79
+
80
+ void runBlockSelectPair(Tensor<float, 2, true>& inK,
81
+ Tensor<int, 2, true>& inV,
82
+ Tensor<float, 2, true>& outK,
83
+ Tensor<int, 2, true>& outV,
84
+ bool dir, int k, cudaStream_t stream) {
85
+ FAISS_ASSERT(k <= 1024);
86
+
87
+ if (dir) {
88
+ if (k == 1) {
89
+ BLOCK_SELECT_PAIR_CALL(float, true, 1);
90
+ } else if (k <= 32) {
91
+ BLOCK_SELECT_PAIR_CALL(float, true, 32);
92
+ } else if (k <= 64) {
93
+ BLOCK_SELECT_PAIR_CALL(float, true, 64);
94
+ } else if (k <= 128) {
95
+ BLOCK_SELECT_PAIR_CALL(float, true, 128);
96
+ } else if (k <= 256) {
97
+ BLOCK_SELECT_PAIR_CALL(float, true, 256);
98
+ } else if (k <= 512) {
99
+ BLOCK_SELECT_PAIR_CALL(float, true, 512);
100
+ } else if (k <= 1024) {
101
+ BLOCK_SELECT_PAIR_CALL(float, true, 1024);
102
+ }
103
+ } else {
104
+ if (k == 1) {
105
+ BLOCK_SELECT_PAIR_CALL(float, false, 1);
106
+ } else if (k <= 32) {
107
+ BLOCK_SELECT_PAIR_CALL(float, false, 32);
108
+ } else if (k <= 64) {
109
+ BLOCK_SELECT_PAIR_CALL(float, false, 64);
110
+ } else if (k <= 128) {
111
+ BLOCK_SELECT_PAIR_CALL(float, false, 128);
112
+ } else if (k <= 256) {
113
+ BLOCK_SELECT_PAIR_CALL(float, false, 256);
114
+ } else if (k <= 512) {
115
+ BLOCK_SELECT_PAIR_CALL(float, false, 512);
116
+ } else if (k <= 1024) {
117
+ BLOCK_SELECT_PAIR_CALL(float, false, 1024);
118
+ }
119
+ }
120
+ }
121
+
122
+ } } // namespace
cuda_code/BounceBackNVEGPU_6.cu ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2018-2020, Michael P. Howard
2
+ // Copyright (c) 2021, Auburn University
3
+ // This file is part of the azplugins project, released under the Modified BSD License.
4
+
5
+ /*!
6
+ * \file BounceBackNVEGPU.cu
7
+ * \brief Template specialization of CUDA kernels for BounceBackNVEGPU geometries. Each instance of the
8
+ * nve_bounce_step_one must be templated explicitly for each geometry.
9
+ */
10
+
11
+ #include "BounceBackNVEGPU.cuh"
12
+ #include "BounceBackGeometry.h"
13
+
14
+ namespace azplugins
15
+ {
16
+ namespace gpu
17
+ {
18
+
19
+ //! Template instantiation of slit geometry streaming
20
+ template cudaError_t nve_bounce_step_one<mpcd::detail::SlitGeometry>
21
+ (const bounce_args_t& args, const mpcd::detail::SlitGeometry& geom);
22
+
23
+ namespace kernel
24
+ {
25
+ //! Kernel for applying second step of velocity Verlet algorithm with bounce back
26
+ /*!
27
+ * \param d_vel Particle velocities
28
+ * \param d_accel Particle accelerations
29
+ * \param d_net_force Net force on each particle
30
+ * \param d_group Indexes in particle group
31
+ * \param dt Timestep
32
+ * \param N Number of particles in group
33
+ *
34
+ * \b Implementation:
35
+ * Using one thread per particle, the particle velocities are updated according to the second step of the velocity Verlet
36
+ * algorithm. This is the standard update as in MD, and is only reimplemented here in case future modifications are necessary.
37
+ */
38
+ __global__ void nve_bounce_step_two(Scalar4 *d_vel,
39
+ Scalar3 *d_accel,
40
+ const Scalar4 *d_net_force,
41
+ const unsigned int *d_group,
42
+ const Scalar dt,
43
+ const unsigned int N)
44
+ {
45
+ // one thread per particle
46
+ unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
47
+ if (idx >= N)
48
+ return;
49
+ const unsigned int pid = d_group[idx];
50
+
51
+ const Scalar4 net_force = d_net_force[pid];
52
+ Scalar3 accel = make_scalar3(net_force.x, net_force.y, net_force.z);
53
+ Scalar4 vel = d_vel[pid];
54
+ accel.x /= vel.w;
55
+ accel.y /= vel.w;
56
+ accel.z /= vel.w;
57
+
58
+ // then, update the velocity
59
+ vel.x += Scalar(0.5) * accel.x * dt;
60
+ vel.y += Scalar(0.5) * accel.y * dt;
61
+ vel.z += Scalar(0.5) * accel.z * dt;
62
+
63
+ d_vel[pid] = vel;
64
+ d_accel[pid] = accel;
65
+ }
66
+ } // end namespace kernel
67
+
68
+ /*!
69
+ * \param d_vel Particle velocities
70
+ * \param d_accel Particle accelerations
71
+ * \param d_net_force Net force on each particle
72
+ * \param d_group Indexes in particle group
73
+ * \param dt Timestep
74
+ * \param N Number of particles in group
75
+ * \param block_size Number of threads per block
76
+ *
77
+ * \sa kernel::nve_bounce_step_two
78
+ */
79
+ cudaError_t nve_bounce_step_two(Scalar4 *d_vel,
80
+ Scalar3 *d_accel,
81
+ const Scalar4 *d_net_force,
82
+ const unsigned int *d_group,
83
+ const Scalar dt,
84
+ const unsigned int N,
85
+ const unsigned int block_size)
86
+ {
87
+ static unsigned int max_block_size = UINT_MAX;
88
+ if (max_block_size == UINT_MAX)
89
+ {
90
+ cudaFuncAttributes attr;
91
+ cudaFuncGetAttributes(&attr, (const void*)kernel::nve_bounce_step_two);
92
+ max_block_size = attr.maxThreadsPerBlock;
93
+ }
94
+
95
+ unsigned int run_block_size = min(block_size, max_block_size);
96
+ dim3 grid(N / run_block_size + 1);
97
+ kernel::nve_bounce_step_two<<<grid, run_block_size>>>(d_vel, d_accel, d_net_force, d_group, dt, N);
98
+
99
+ return cudaSuccess;
100
+ }
101
+
102
+ } // end namespace gpu
103
+ } // end namespace azplugins
cuda_code/COOtoCSR_2.cu ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) 2020, NVIDIA CORPORATION.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ #include <functions.hpp>
18
+ #include "COOtoCSR.cuh"
19
+
20
+ namespace cugraph {
21
+
22
+ // Explicit instantiation for uint32_t + float
23
+ template std::unique_ptr<GraphCSR<uint32_t, uint32_t, float>> coo_to_csr<uint32_t, uint32_t, float>(
24
+ GraphCOOView<uint32_t, uint32_t, float> const &graph, rmm::mr::device_memory_resource *);
25
+
26
+ // Explicit instantiation for uint32_t + double
27
+ template std::unique_ptr<GraphCSR<uint32_t, uint32_t, double>>
28
+ coo_to_csr<uint32_t, uint32_t, double>(GraphCOOView<uint32_t, uint32_t, double> const &graph,
29
+ rmm::mr::device_memory_resource *);
30
+
31
+ // Explicit instantiation for int + float
32
+ template std::unique_ptr<GraphCSR<int32_t, int32_t, float>> coo_to_csr<int32_t, int32_t, float>(
33
+ GraphCOOView<int32_t, int32_t, float> const &graph, rmm::mr::device_memory_resource *);
34
+
35
+ // Explicit instantiation for int + double
36
+ template std::unique_ptr<GraphCSR<int32_t, int32_t, double>> coo_to_csr<int32_t, int32_t, double>(
37
+ GraphCOOView<int32_t, int32_t, double> const &graph, rmm::mr::device_memory_resource *);
38
+
39
+ // Explicit instantiation for int64_t + float
40
+ template std::unique_ptr<GraphCSR<int64_t, int64_t, float>> coo_to_csr<int64_t, int64_t, float>(
41
+ GraphCOOView<int64_t, int64_t, float> const &graph, rmm::mr::device_memory_resource *);
42
+
43
+ // Explicit instantiation for int64_t + double
44
+ template std::unique_ptr<GraphCSR<int64_t, int64_t, double>> coo_to_csr<int64_t, int64_t, double>(
45
+ GraphCOOView<int64_t, int64_t, double> const &graph, rmm::mr::device_memory_resource *);
46
+
47
+ // in-place versions:
48
+ //
49
+ // Explicit instantiation for uint32_t + float
50
+ template void coo_to_csr_inplace<uint32_t, uint32_t, float>(
51
+ GraphCOOView<uint32_t, uint32_t, float> &graph, GraphCSRView<uint32_t, uint32_t, float> &result);
52
+
53
+ // Explicit instantiation for uint32_t + double
54
+ template void coo_to_csr_inplace<uint32_t, uint32_t, double>(
55
+ GraphCOOView<uint32_t, uint32_t, double> &graph,
56
+ GraphCSRView<uint32_t, uint32_t, double> &result);
57
+
58
+ // Explicit instantiation for int + float
59
+ template void coo_to_csr_inplace<int32_t, int32_t, float>(
60
+ GraphCOOView<int32_t, int32_t, float> &graph, GraphCSRView<int32_t, int32_t, float> &result);
61
+
62
+ // Explicit instantiation for int + double
63
+ template void coo_to_csr_inplace<int32_t, int32_t, double>(
64
+ GraphCOOView<int32_t, int32_t, double> &graph, GraphCSRView<int32_t, int32_t, double> &result);
65
+
66
+ // Explicit instantiation for int64_t + float
67
+ template void coo_to_csr_inplace<int64_t, int64_t, float>(
68
+ GraphCOOView<int64_t, int64_t, float> &graph, GraphCSRView<int64_t, int64_t, float> &result);
69
+
70
+ // Explicit instantiation for int64_t + double
71
+ template void coo_to_csr_inplace<int64_t, int64_t, double>(
72
+ GraphCOOView<int64_t, int64_t, double> &graph, GraphCSRView<int64_t, int64_t, double> &result);
73
+
74
+ } // namespace cugraph
cuda_code/CPU_GPU_time.cu ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * =====================================================================================
3
+ *
4
+ * Filename: testLironPaper.cu
5
+ *
6
+ * Description: This tries to match computation results with those in
7
+ * Liron's paper.
8
+ *
9
+ * Version: 1.0
10
+ * Created: 04/04/2014 07:58:39 AM
11
+ * Revision: none
12
+ * Compiler: gcc
13
+ *
14
+ * Author: Hoang-Ngan Nguyen (), zhoangngan-gmail
15
+ * Organization:
16
+ *
17
+ * =====================================================================================
18
+ */
19
+
20
+ #include "periodicStokes.h"
21
+ #include <cstdlib> /* srand, rand */
22
+ #include <ctime> /* time */
23
+ #include <string>
24
+ #include <fstream>
25
+ #include <iostream>
26
+ #include <eigen3/Eigen/Dense>
27
+ using namespace std;
28
+ using namespace Eigen;
29
+
30
+ int main( int argc, char *argv[] ) {
31
+ MatrixOnHost L(3, 1, 1);
32
+
33
+ //open binary file to save GPU and CPU times
34
+ //char timeName[80];
35
+ //sprintf(timeName, "CPU_GPU_time", filename);
36
+ //ofstream outTime(timeName, ios::binary);
37
+
38
+ //if (!outTime) {
39
+ //std::cout << "Error: Could not open file \"" << timeName << "\""
40
+ //<< ". Error occurs on line " << __LINE__
41
+ //<< " in source file \"" << __FILE__ << "\"" << std::endl;;
42
+ //exit(1);
43
+ //}
44
+ MatrixOnHost timeM(2, 4);
45
+
46
+ int maxShell = 4;
47
+ double d = 1.0/sqrt(atan(1)*4);
48
+ double e = 0;
49
+
50
+ clock_t start, end;
51
+ cudaEvent_t eStart, eStop;
52
+ HANDLE_ERROR( cudaEventCreate( &eStart ) );
53
+ HANDLE_ERROR( cudaEventCreate( &eStop ) );
54
+
55
+ MatrixOnHost newM(2, 1), oldM(2, 1);
56
+ float elapsedTime;
57
+ size_t size = 10;
58
+ size_t maxCol = 10;
59
+ for (int i = 0; i < 4; i++) {
60
+ cout << "Round " << i << endl;
61
+ MatrixOnHost x(3, size), x0(3, maxCol);
62
+ x.setRandom();
63
+ x0 = x;
64
+ //MatrixOnDevice dx = x, dx0 = x0;
65
+ //MatrixOnDevice dA(3*dx.columns(), 3*dx0.columns());
66
+
67
+ // record GPU time
68
+ //for (int j = 0; j * maxCol < size; j++) {
69
+ //for (int l = 0; l < 3; l++) {
70
+ //for (int k = 0; k < maxCol; k++) {
71
+ //x0(l, k) = x(l, j * maxCol + k);
72
+ //}
73
+ //}
74
+ //dx0 = x0;
75
+ //HANDLE_ERROR( cudaEventRecord( eStart, 0) );
76
+ //imageStokeslet( dA, dx, dx0, d, e, maxShell, maxShell-1, L(0), L(1) );
77
+ //HANDLE_ERROR( cudaEventRecord( eStop , 0) );
78
+ //HANDLE_ERROR( cudaEventSynchronize( eStop ) );
79
+ //HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime, eStart, eStop ) );
80
+ //timeM(0, i) = elapsedTime;
81
+ //}
82
+
83
+ //record CPU time
84
+ MatrixOnHost A(3*x.columns(), 3*x.columns());
85
+ MatrixOnHost absA = A, refSol = A;
86
+ start = clock();
87
+ newM(0) = newM(1) = oldM(0) = oldM(1) = 0;
88
+ realShells ( refSol, absA, x, x, newM, oldM, L, d, e );
89
+ fourierShells(A, absA, x, x, newM, oldM, L, d, e);
90
+ refSol = refSol + A;
91
+ newM(0) = newM(1) = maxShell;
92
+ realShells ( refSol, absA, x, x, newM, oldM, L, d, e );
93
+ newM(0) = newM(1) = maxShell-1;
94
+ fourierShells(refSol, absA, x, x, newM, oldM, L, d, e);
95
+ end = clock();
96
+ timeM(1, i) = 1000.0 * ((double) (end - start)) / CLOCKS_PER_SEC ;
97
+ size *= 8;
98
+ cout << "maxShell = " << maxShell << endl;
99
+ cout << "time is " << timeM(1, i) << "ms" << endl;
100
+ }
101
+ timeM.write("CPU_GPU_time");
102
+ HANDLE_ERROR( cudaEventDestroy( eStart ) );
103
+ HANDLE_ERROR( cudaEventDestroy( eStop ) );
104
+ //outTime.close();
105
+
106
+ cout << "Done!!!!!!!!!!!!!!" << endl;
107
+
108
+ return EXIT_SUCCESS;
109
+ } // ---------- end of function main ----------
110
+
cuda_code/CUAPI_Asyn_PoissonGravitySolver_8.cu ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "CUAPI.h"
2
+ #include "CUPOT.h"
3
+
4
+ #if ( defined GPU && defined GRAVITY )
5
+
6
+
7
+
8
+ // Poisson solver prototypes
9
+ #if ( POT_SCHEME == SOR )
10
+ #ifdef USE_PSOLVER_10TO14
11
+ __global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
12
+ const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
13
+ real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
14
+ const int Min_Iter, const int Max_Iter, const real Omega_6,
15
+ const real Const, const IntScheme_t IntScheme );
16
+ #else
17
+ __global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
18
+ const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
19
+ real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
20
+ const int Min_Iter, const int Max_Iter, const real Omega_6,
21
+ const real Const, const IntScheme_t IntScheme );
22
+ #endif // #ifdef USE_PSOLVER_10TO14 ... else ...
23
+
24
+ #elif ( POT_SCHEME == MG )
25
+ __global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
26
+ const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
27
+ real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
28
+ const real dh_Min, const int Max_Iter, const int NPre_Smooth,
29
+ const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff,
30
+ const IntScheme_t IntScheme );
31
+ #endif // POT_SCHEME
32
+
33
+
34
+ // Gravity solver prototypes
35
+ #if ( MODEL == HYDRO )
36
+ __global__
37
+ void CUPOT_HydroGravitySolver(
38
+ real g_Flu_Array_New[][GRA_NIN][ CUBE(PS1) ],
39
+ const real g_Pot_Array_New[][ CUBE(GRA_NXT) ],
40
+ const double g_Corner_Array [][3],
41
+ const real g_Pot_Array_USG[][ CUBE(USG_NXT_G) ],
42
+ const real g_Flu_Array_USG[][GRA_NIN-1][ CUBE(PS1) ],
43
+ char g_DE_Array [][ CUBE(PS1) ],
44
+ const real g_EngyB_Array [][ CUBE(PS1) ],
45
+ const real dt, const real dh, const bool P5_Gradient,
46
+ const OptGravityType_t GravityType,
47
+ const double TimeNew, const double TimeOld, const real MinEint );
48
+
49
+ #elif ( MODEL == ELBDM )
50
+ __global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ],
51
+ const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ],
52
+ const double g_Corner_Array[][3],
53
+ const real EtaDt, const real dh, const real Lambda, const bool ExtPot,
54
+ const double TimeNew );
55
+
56
+ #else
57
+ #error : ERROR : unsupported MODEL !!
58
+ #endif // MODEL
59
+
60
+
61
+ // declare all device pointers
62
+ extern real (*d_Rho_Array_P )[ CUBE(RHO_NXT) ];
63
+ extern real (*d_Pot_Array_P_In )[ CUBE(POT_NXT) ];
64
+ extern real (*d_Pot_Array_P_Out)[ CUBE(GRA_NXT) ];
65
+ extern real (*d_Flu_Array_G )[GRA_NIN][ CUBE(PS1)];
66
+ extern double (*d_Corner_Array_G)[3];
67
+ #if ( MODEL == HYDRO )
68
+ #ifdef UNSPLIT_GRAVITY
69
+ extern real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ];
70
+ extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ];
71
+ #else
72
+ static real (*d_Pot_Array_USG_G)[ CUBE(USG_NXT_G) ] = NULL;
73
+ static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ CUBE(PS1) ] = NULL;
74
+ #endif
75
+ #ifdef DUAL_ENERGY
76
+ extern char (*d_DE_Array_G)[ CUBE(PS1) ];
77
+ #else
78
+ static char (*d_DE_Array_G)[ CUBE(PS1) ] = NULL;
79
+ #endif
80
+ #ifdef MHD
81
+ extern real (*d_EngyB_Array_G)[ CUBE(PS1) ];
82
+ #else
83
+ static real (*d_EngyB_Array_G)[ CUBE(PS1) ] = NULL;
84
+ #endif
85
+ #endif // #if ( MODEL == HYDRO )
86
+
87
+ extern cudaStream_t *Stream;
88
+
89
+
90
+
91
+
92
+ //-------------------------------------------------------------------------------------------------------
93
+ // Function : CUAPI_Asyn_PoissonGravitySolver
94
+ // Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate
95
+ // the gravitational potential and/or advance the fluid variables by the gravitational
96
+ // acceleration for a group of patches
97
+ //
98
+ // ***********************************************************
99
+ // ** Asynchronous Function **
100
+ // ** **
101
+ // ** will return before the execution in GPU is complete **
102
+ // ***********************************************************
103
+ //
104
+ // Note : a. Use streams for the asychronous memory copy between device and host
105
+ // b. Prefix "d" : for pointers pointing to the "Device" memory space
106
+ // Prefix "h" : for pointers pointing to the "Host" memory space
107
+ //
108
+ // Parameter : h_Rho_Array : Host array storing the input density
109
+ // h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation
110
+ // h_Pot_Array_Out : Host array to store the output potential
111
+ // h_Flu_Array : Host array to store the fluid variables for the Gravity solver
112
+ // h_Corner_Array : Host array storing the physical corner coordinates of each patch
113
+ // h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY
114
+ // h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY
115
+ // h_DE_Array : Host array storing the dual-energy status (for both input and output)
116
+ // h_EngyB_Array : Host array storing the cell-centered magnetic energy (MHD only)
117
+ // NPatchGroup : Number of patch groups evaluated simultaneously by GPU
118
+ // dt : Time interval to advance solution
119
+ // dh : Grid size
120
+ // SOR_Min_Iter : Minimum # of iterations for SOR
121
+ // SOR_Max_Iter : Maximum # of iterations for SOR
122
+ // SOR_Omega : Over-relaxation parameter
123
+ // MG_Max_Iter : Maximum number of iterations for multigrid
124
+ // MG_NPre_Smooth : Number of pre-smoothing steps for multigrid
125
+ // MG_NPos_tSmooth : Number of post-smoothing steps for multigrid
126
+ // MG_Tolerated_Error : Maximum tolerated error for multigrid
127
+ // Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a)
128
+ // IntScheme : Interpolation scheme for potential
129
+ // --> currently supported schemes include
130
+ // INT_CQUAD : conservative quadratic interpolation
131
+ // INT_QUAD : quadratic interpolation
132
+ // P5_Gradient : Use 5-points stencil to evaluate the potential gradient
133
+ // ELBDM_Eta : Particle mass / Planck constant in ELBDM
134
+ // ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM
135
+ // Poisson : true --> invoke the Poisson solver
136
+ // GraAcc : true --> invoke the Gravity solver
137
+ // GPU_NStream : Number of CUDA streams for the asynchronous memory copy
138
+ // GravityType : Types of gravity --> self-gravity, external gravity, both
139
+ // TimeNew : Physical time at the current step (for the external gravity solver)
140
+ // TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY)
141
+ // ExtPot : Add the external potential
142
+ // MinEint : Minimum allowed internal energy (== MIN_PRES / (GAMMA-1))
143
+ //
144
+ // Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda
145
+ // Useless parameters in ELBDM : P5_Gradient
146
+ //-------------------------------------------------------------------------------------------------------
147
+ void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT],
148
+ const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT],
149
+ real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT],
150
+ real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1],
151
+ const double h_Corner_Array[][3],
152
+ const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G],
153
+ const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1],
154
+ char h_DE_Array [][PS1][PS1][PS1],
155
+ const real h_EngyB_Array [][PS1][PS1][PS1],
156
+ const int NPatchGroup, const real dt, const real dh, const int SOR_Min_Iter,
157
+ const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter,
158
+ const int MG_NPre_Smooth, const int MG_NPost_Smooth,
159
+ const real MG_Tolerated_Error, const real Poi_Coeff,
160
+ const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta,
161
+ const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream,
162
+ const OptGravityType_t GravityType, const double TimeNew, const double TimeOld,
163
+ const bool ExtPot, const real MinEint )
164
+ {
165
+
166
+ // model-independent constants
167
+ # if ( POT_SCHEME == SOR )
168
+ const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z );
169
+ # elif ( POT_SCHEME == MG )
170
+ const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 );
171
+ # endif
172
+ const dim3 Gra_Block_Dim( GRA_BLOCK_SIZE );
173
+ const int NPatch = NPatchGroup*8;
174
+ # if ( POT_SCHEME == SOR )
175
+ const real Poi_Const = Poi_Coeff*dh*dh;
176
+ const real SOR_Omega_6 = SOR_Omega/6.0;
177
+ # endif
178
+
179
+ // model-dependent constants
180
+ # if ( MODEL == HYDRO )
181
+
182
+ # elif ( MODEL == ELBDM )
183
+ const real ELBDM_EtaDt = ELBDM_Eta*dt;
184
+
185
+ # else
186
+ # error : ERROR : unsupported MODEL !!
187
+ # endif
188
+
189
+
190
+ // check
191
+ # if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 )
192
+ # warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !!
193
+ # endif
194
+
195
+ # ifdef GAMER_DEBUG
196
+ const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z;
197
+
198
+ // minimum number of threads for spatial interpolation
199
+ if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) )
200
+ Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n",
201
+ Poi_NThread, (POT_NXT-2)*(POT_NXT-2) );
202
+
203
+ // constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG"
204
+ # if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG )
205
+ if ( Poisson && Poi_NThread < 64 )
206
+ Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread );
207
+ # endif
208
+
209
+ // constraint in "CUPOT_PoissonSolver_SOR_16to18cube"
210
+ # if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 )
211
+ if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 )
212
+ Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread,
213
+ RHO_NXT*RHO_NXT/2 );
214
+ # endif
215
+
216
+ if ( GraAcc )
217
+ {
218
+ if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot )
219
+ {
220
+ if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
221
+ if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" );
222
+ }
223
+
224
+ # ifdef UNSPLIT_GRAVITY
225
+ if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH )
226
+ {
227
+ if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" );
228
+ if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" );
229
+ }
230
+
231
+ if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" );
232
+ if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" );
233
+ # endif
234
+
235
+ # ifdef DUAL_ENERGY
236
+ if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" );
237
+ if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" );
238
+ # endif
239
+
240
+ # ifdef MHD
241
+ if ( h_EngyB_Array == NULL ) Aux_Error( ERROR_INFO, "h_EngyB_Array == NULL !!\n" );
242
+ if ( d_EngyB_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_EngyB_Array_G == NULL !!\n" );
243
+ # endif
244
+ }
245
+ # endif // #ifdef GAMER_DEBUG
246
+
247
+ if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) )
248
+ Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme );
249
+
250
+
251
+ int *NPatch_per_Stream = new int [GPU_NStream];
252
+ int *Rho_MemSize = new int [GPU_NStream];
253
+ int *Pot_MemSize_In = new int [GPU_NStream];
254
+ int *Pot_MemSize_Out = new int [GPU_NStream];
255
+ int *Flu_MemSize = new int [GPU_NStream];
256
+ int *Corner_MemSize = new int [GPU_NStream];
257
+ int *UsedPatch = new int [GPU_NStream];
258
+ # ifdef UNSPLIT_GRAVITY
259
+ int *Pot_USG_MemSize = new int [GPU_NStream];
260
+ int *Flu_USG_MemSize = new int [GPU_NStream];
261
+ # endif
262
+ # ifdef DUAL_ENERGY
263
+ int *DE_MemSize = new int [GPU_NStream];
264
+ # endif
265
+ # ifdef MHD
266
+ int *EngyB_MemSize = new int [GPU_NStream];
267
+ # endif
268
+
269
+
270
+ // set the number of patches in each stream
271
+ UsedPatch[0] = 0;
272
+
273
+ if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch;
274
+ else
275
+ {
276
+ for (int s=0; s<GPU_NStream-1; s++)
277
+ {
278
+ NPatch_per_Stream[s] = NPatch/GPU_NStream;
279
+ UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
280
+ }
281
+
282
+ NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1];
283
+ }
284
+
285
+
286
+ // set the size of data to be transferred into GPU in each stream
287
+ for (int s=0; s<GPU_NStream; s++)
288
+ {
289
+ Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real);
290
+ Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real);
291
+ Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real);
292
+ Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN;
293
+ Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double);
294
+ # ifdef UNSPLIT_GRAVITY
295
+ Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real);
296
+ Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1);
297
+ # endif
298
+ # ifdef DUAL_ENERGY
299
+ DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char);
300
+ # endif
301
+ # ifdef MHD
302
+ EngyB_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real);
303
+ # endif
304
+ }
305
+
306
+
307
+ // a. copy data from host to device
308
+ //=========================================================================================
309
+ for (int s=0; s<GPU_NStream; s++)
310
+ {
311
+ if ( NPatch_per_Stream[s] == 0 ) continue;
312
+
313
+ if ( Poisson )
314
+ {
315
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s],
316
+ Rho_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
317
+
318
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s],
319
+ Pot_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) );
320
+ }
321
+
322
+ if ( GraAcc )
323
+ {
324
+ if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson )
325
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s],
326
+ Pot_MemSize_Out[s], cudaMemcpyHostToDevice, Stream[s] ) );
327
+
328
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s],
329
+ Flu_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
330
+
331
+ if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot )
332
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s],
333
+ Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
334
+ # ifdef UNSPLIT_GRAVITY
335
+ if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH )
336
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s],
337
+ Pot_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
338
+
339
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s],
340
+ Flu_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
341
+ # endif
342
+
343
+ # ifdef DUAL_ENERGY
344
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s],
345
+ DE_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
346
+ # endif
347
+
348
+ # ifdef MHD
349
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( d_EngyB_Array_G + UsedPatch[s], h_EngyB_Array + UsedPatch[s],
350
+ EngyB_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
351
+ # endif
352
+ } // if ( GraAcc )
353
+ } // for (int s=0; s<GPU_NStream; s++)
354
+
355
+
356
+ // b. execute the kernel
357
+ //=========================================================================================
358
+ for (int s=0; s<GPU_NStream; s++)
359
+ {
360
+ if ( NPatch_per_Stream[s] == 0 ) continue;
361
+
362
+ // b1. Poisson solver
363
+ if ( Poisson )
364
+ {
365
+ # if ( POT_SCHEME == SOR )
366
+
367
+ # ifdef USE_PSOLVER_10TO14
368
+ CUPOT_PoissonSolver_SOR_10to14cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>>
369
+ ( d_Rho_Array_P + UsedPatch[s],
370
+ d_Pot_Array_P_In + UsedPatch[s],
371
+ d_Pot_Array_P_Out + UsedPatch[s],
372
+ SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme );
373
+ # else
374
+ CUPOT_PoissonSolver_SOR_16to18cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>>
375
+ ( d_Rho_Array_P + UsedPatch[s],
376
+ d_Pot_Array_P_In + UsedPatch[s],
377
+ d_Pot_Array_P_Out + UsedPatch[s],
378
+ SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme );
379
+ # endif // #ifdef USE_PSOLVER_10TO14 ... else ...
380
+
381
+ # elif ( POT_SCHEME == MG )
382
+
383
+ CUPOT_PoissonSolver_MG <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>>
384
+ ( d_Rho_Array_P + UsedPatch[s],
385
+ d_Pot_Array_P_In + UsedPatch[s],
386
+ d_Pot_Array_P_Out + UsedPatch[s],
387
+ dh, MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error,
388
+ Poi_Coeff, IntScheme );
389
+
390
+ # else
391
+
392
+ # error : unsupported GPU Poisson solver
393
+
394
+ # endif // POT_SCHEME
395
+ } // if ( Poisson )
396
+
397
+
398
+ // b2. Gravity solver
399
+ if ( GraAcc )
400
+ {
401
+ # if ( MODEL == HYDRO )
402
+ CUPOT_HydroGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>>
403
+ ( d_Flu_Array_G + UsedPatch[s],
404
+ d_Pot_Array_P_Out + UsedPatch[s],
405
+ d_Corner_Array_G + UsedPatch[s],
406
+ d_Pot_Array_USG_G + UsedPatch[s],
407
+ d_Flu_Array_USG_G + UsedPatch[s],
408
+ d_DE_Array_G + UsedPatch[s],
409
+ d_EngyB_Array_G + UsedPatch[s],
410
+ dt, dh, P5_Gradient, GravityType, TimeNew, TimeOld, MinEint );
411
+
412
+ # elif ( MODEL == ELBDM )
413
+ CUPOT_ELBDMGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>>
414
+ ( d_Flu_Array_G + UsedPatch[s],
415
+ d_Pot_Array_P_Out + UsedPatch[s],
416
+ d_Corner_Array_G + UsedPatch[s],
417
+ ELBDM_EtaDt, dh, ELBDM_Lambda, ExtPot, TimeNew );
418
+
419
+ # else
420
+ # error : ERROR : unsupported MODEL !!
421
+ # endif // MODEL
422
+ } // if ( GraAcc )
423
+
424
+ CUDA_CHECK_ERROR( cudaGetLastError() );
425
+ } // for (int s=0; s<GPU_NStream; s++)
426
+
427
+
428
+ // c. copy data from device to host
429
+ //=========================================================================================
430
+ for (int s=0; s<GPU_NStream; s++)
431
+ {
432
+ if ( NPatch_per_Stream[s] == 0 ) continue;
433
+
434
+ if ( Poisson )
435
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s],
436
+ Pot_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
437
+
438
+ if ( GraAcc )
439
+ {
440
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s],
441
+ Flu_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
442
+
443
+ # ifdef DUAL_ENERGY
444
+ CUDA_CHECK_ERROR( cudaMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s],
445
+ DE_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
446
+ # endif
447
+ }
448
+ } // for (int s=0; s<GPU_NStream; s++)
449
+
450
+
451
+ delete [] NPatch_per_Stream;
452
+ delete [] Rho_MemSize;
453
+ delete [] Pot_MemSize_In;
454
+ delete [] Pot_MemSize_Out;
455
+ delete [] Flu_MemSize;
456
+ delete [] Corner_MemSize;
457
+ delete [] UsedPatch;
458
+ # ifdef UNSPLIT_GRAVITY
459
+ delete [] Pot_USG_MemSize;
460
+ delete [] Flu_USG_MemSize;
461
+ # endif
462
+ # ifdef DUAL_ENERGY
463
+ delete [] DE_MemSize;
464
+ # endif
465
+ # ifdef MHD
466
+ delete [] EngyB_MemSize;
467
+ # endif
468
+
469
+ } // FUNCTION : CUAPI_Asyn_PoissonGravitySolver
470
+
471
+
472
+
473
+ #endif // #if ( defined GPU && defined GRAVITY )
cuda_code/CUFLU_Shared_FullStepUpdate_1.cu ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifndef __CUFLU_FULLSTEPUPDATE__
2
+ #define __CUFLU_FULLSTEPUPDATE__
3
+
4
+
5
+
6
+ #include "CUFLU.h"
7
+
8
+ #if ( MODEL == HYDRO && ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU ) )
9
+
10
+
11
+
12
+ // external functions
13
+ #ifdef __CUDACC__
14
+
15
+ #if ( NCOMP_PASSIVE > 0 )
16
+ # include "CUFLU_Shared_FluUtility.cu"
17
+ #endif
18
+
19
+ #ifdef DUAL_ENERGY
20
+ # include "CUFLU_Shared_DualEnergy.cu"
21
+ #endif
22
+
23
+ #endif // #ifdef __CUDACC__
24
+
25
+
26
+
27
+
28
+ //-------------------------------------------------------------------------------------------------------
29
+ // Function : Hydro_FullStepUpdate
30
+ // Description : Evaluate the full-step solution
31
+ //
32
+ // Note : 1. This function is shared by MHM, MHM_RP, and CTU schemes
33
+ // 2. Invoke dual-energy check if DualEnergySwitch is on
34
+ //
35
+ // Parameter : g_Input : Array storing the input fluid data
36
+ // g_Output : Array to store the updated fluid data
37
+ // g_DE_Status : Array to store the dual-energy status
38
+ // g_FC_B : Array storing the updated face-centered B field
39
+ // --> For the dual-energy formalism only
40
+ // g_Flux : Array storing the input face-centered fluxes
41
+ // --> Accessed with the array stride N_FL_FLUX even thought its actually
42
+ // allocated size is N_FC_FLUX^3
43
+ // dt : Time interval to advance solution
44
+ // dh : Cell size
45
+ // MinDens/Eint : Density and internal energy floors
46
+ // DualEnergySwitch : Use the dual-energy formalism if E_int/E_kin < DualEnergySwitch
47
+ // NormPassive : true --> normalize passive scalars so that the sum of their mass density
48
+ // is equal to the gas mass density
49
+ // NNorm : Number of passive scalars to be normalized
50
+ // --> Should be set to the global variable "PassiveNorm_NVar"
51
+ // NormIdx : Target variable indices to be normalized
52
+ // --> Should be set to the global variable "PassiveNorm_VarIdx"
53
+ // EoS : EoS object
54
+ // --> Only for obtaining Gamma used by the dual-energy formalism
55
+ //-------------------------------------------------------------------------------------------------------
56
+ GPU_DEVICE
57
+ void Hydro_FullStepUpdate( const real g_Input[][ CUBE(FLU_NXT) ], real g_Output[][ CUBE(PS2) ], char g_DE_Status[],
58
+ const real g_FC_B[][ PS2P1*SQR(PS2) ], const real g_Flux[][NCOMP_TOTAL_PLUS_MAG][ CUBE(N_FC_FLUX) ],
59
+ const real dt, const real dh, const real MinDens, const real MinEint,
60
+ const real DualEnergySwitch, const bool NormPassive, const int NNorm, const int NormIdx[],
61
+ const EoS_t *EoS )
62
+ {
63
+
64
+ const int didx_flux[3] = { 1, N_FL_FLUX, SQR(N_FL_FLUX) };
65
+ const real dt_dh = dt/dh;
66
+
67
+ real dFlux[3][NCOMP_TOTAL], Output_1Cell[NCOMP_TOTAL], Emag;
68
+
69
+
70
+ const int size_ij = SQR(PS2);
71
+ CGPU_LOOP( idx_out, CUBE(PS2) )
72
+ {
73
+ const int i_out = idx_out % PS2;
74
+ const int j_out = idx_out % size_ij / PS2;
75
+ const int k_out = idx_out / size_ij;
76
+
77
+ // for MHD, one additional flux is evaluated along each transverse direction for computing the CT electric field
78
+ # ifdef MHD
79
+ const int i_flux = i_out + 1;
80
+ const int j_flux = j_out + 1;
81
+ const int k_flux = k_out + 1;
82
+ # else
83
+ const int i_flux = i_out;
84
+ const int j_flux = j_out;
85
+ const int k_flux = k_out;
86
+ # endif
87
+ const int idx_flux = IDX321( i_flux, j_flux, k_flux, N_FL_FLUX, N_FL_FLUX );
88
+
89
+ const int i_in = i_out + FLU_GHOST_SIZE;
90
+ const int j_in = j_out + FLU_GHOST_SIZE;
91
+ const int k_in = k_out + FLU_GHOST_SIZE;
92
+ const int idx_in = IDX321( i_in, j_in, k_in, FLU_NXT, FLU_NXT );
93
+
94
+
95
+ // 1. calculate flux difference to update the fluid data
96
+ for (int d=0; d<3; d++)
97
+ for (int v=0; v<NCOMP_TOTAL; v++)
98
+ {
99
+ # ifdef MHD
100
+ dFlux[d][v] = g_Flux[d][v][idx_flux] - g_Flux[d][v][ idx_flux - didx_flux[d] ];
101
+ # else
102
+ dFlux[d][v] = g_Flux[d][v][ idx_flux + didx_flux[d] ] - g_Flux[d][v][idx_flux];
103
+ # endif
104
+ }
105
+
106
+ for (int v=0; v<NCOMP_TOTAL; v++)
107
+ Output_1Cell[v] = g_Input[v][idx_in] - dt_dh*( dFlux[0][v] + dFlux[1][v] + dFlux[2][v] );
108
+
109
+
110
+ // we no longer ensure positive density and pressure here
111
+ // --> these checks have been moved to Flu_Close()->CorrectUnphysical()
112
+ // because we want to apply 1st-order-flux correction BEFORE setting a minimum density and pressure
113
+ // --> this consideration holds even when DUAL_ENERGY is adopted (e.g., when density is negative,
114
+ // even when DUAL_ENERGY is on, we still want to try the 1st-order-flux correction before setting a floor value)
115
+ // --> but for barotropic EoS, we apply Eint floor here to avoid any false alarm caused by Eint<0
116
+ # ifdef BAROTROPIC_EOS
117
+ # ifdef MHD
118
+ Emag = MHD_GetCellCenteredBEnergy( g_FC_B[MAGX], g_FC_B[MAGY], g_FC_B[MAGZ],
119
+ PS2, PS2, PS2, i_out, j_out, k_out );
120
+ # else
121
+ Emag = NULL_REAL;
122
+ # endif
123
+ // Output_1Cell[DENS] = FMAX( Output_1Cell[DENS], MinDens );
124
+ Output_1Cell[ENGY] = Hydro_CheckMinEintInEngy( Output_1Cell[DENS], Output_1Cell[MOMX],
125
+ Output_1Cell[MOMY], Output_1Cell[MOMZ],
126
+ Output_1Cell[ENGY], MinEint, Emag );
127
+ # endif // #ifdef BAROTROPIC_EOS
128
+
129
+
130
+ // 2. floor and normalize passive scalars
131
+ # if ( NCOMP_PASSIVE > 0 )
132
+ for (int v=NCOMP_FLUID; v<NCOMP_TOTAL; v++) Output_1Cell[v] = FMAX( Output_1Cell[v], TINY_NUMBER );
133
+
134
+ if ( NormPassive )
135
+ Hydro_NormalizePassive( Output_1Cell[DENS], Output_1Cell+NCOMP_FLUID, NNorm, NormIdx );
136
+ # endif
137
+
138
+
139
+ // 3. apply the dual-energy formalism to correct the internal energy
140
+ // --> currently, even when UNSPLIT_GRAVITY is on (which would update the internal energy), we still invoke
141
+ // Hydro_DualEnergyFix() here and will fix the internal energy in the gravity solver for cells updated
142
+ // by the dual-energy formalism (i.e., for cells with their dual-energy status marked as DE_UPDATED_BY_DUAL)
143
+ // --> this feature might be modified in the future
144
+ # ifdef DUAL_ENERGY
145
+ // B field must be updated in advance
146
+ # ifdef MHD
147
+ Emag = MHD_GetCellCenteredBEnergy( g_FC_B[MAGX], g_FC_B[MAGY], g_FC_B[MAGZ],
148
+ PS2, PS2, PS2, i_out, j_out, k_out );
149
+ # else
150
+ Emag = NULL_REAL;
151
+ # endif
152
+ // we no longer apply density and pressure floors here since we want to enable 1st-order-flux correction for that
153
+ const bool CheckMinPres_No = false;
154
+ // Output_1Cell[DENS] = FMAX( Output_1Cell[DENS], MinDens );
155
+
156
+ Hydro_DualEnergyFix( Output_1Cell[DENS], Output_1Cell[MOMX], Output_1Cell[MOMY], Output_1Cell[MOMZ],
157
+ Output_1Cell[ENGY], Output_1Cell[ENPY], g_DE_Status[idx_out],
158
+ EoS->AuxArrayDevPtr_Flt[1], EoS->AuxArrayDevPtr_Flt[2], CheckMinPres_No, NULL_REAL,
159
+ DualEnergySwitch, Emag );
160
+ # endif // #ifdef DUAL_ENERGY
161
+
162
+
163
+ // 4. store results to the output array
164
+ for (int v=0; v<NCOMP_TOTAL; v++) g_Output[v][idx_out] = Output_1Cell[v];
165
+
166
+
167
+ // 5. check the negative density and energy
168
+ # ifdef CHECK_NEGATIVE_IN_FLUID
169
+ if ( Hydro_CheckNegative(Output_1Cell[DENS]) )
170
+ printf( "WARNING : invalid density (%14.7e) at file <%s>, line <%d>, function <%s>\n",
171
+ Output_1Cell[DENS], __FILE__, __LINE__, __FUNCTION__ );
172
+
173
+ if ( Hydro_CheckNegative(Output_1Cell[ENGY]) )
174
+ printf( "WARNING : invalid energy (%14.7e) at file <%s>, line <%d>, function <%s>\n",
175
+ Output_1Cell[ENGY], __FILE__, __LINE__, __FUNCTION__ );
176
+ # endif
177
+
178
+ } // CGPU_LOOP( idx_out, CUBE(PS2) )
179
+
180
+ } // FUNCTION : Hydro_FullStepUpdate
181
+
182
+
183
+
184
+ #endif // #if ( FLU_SCHEME == MHM || FLU_SCHEME == MHM_RP || FLU_SCHEME == CTU )
185
+
186
+
187
+
188
+ #endif // #ifndef __CUFLU_FULLSTEPUPDATE__
cuda_code/Col2Im_9.cu ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/AccumulateType.h>
3
+ #include <ATen/NativeFunctions.h>
4
+ #include <ATen/TensorUtils.h>
5
+ #include <ATen/Utils.h>
6
+ #include <ATen/div_rtn.h>
7
+
8
+ #include <ATen/cuda/CUDAContext.h>
9
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
10
+
11
+ #include <ATen/native/cuda/im2col.cuh>
12
+ #include <ATen/native/im2col_shape_check.h>
13
+
14
+ namespace at {
15
+ namespace native {
16
+ namespace {
17
+
18
+ void col2im_out_cuda_template(
19
+ Tensor& output,
20
+ const Tensor& input_,
21
+ IntArrayRef output_size,
22
+ IntArrayRef kernel_size,
23
+ IntArrayRef dilation,
24
+ IntArrayRef padding,
25
+ IntArrayRef stride) {
26
+ TensorArg input_arg{input_, "input", 1};
27
+ TensorArg output_arg{output, "output", 2};
28
+ checkAllSameGPU("col2im_out_cuda", {input_arg, output_arg});
29
+
30
+ TORCH_CHECK(
31
+ output_size.size() == 2,
32
+ "It is expected output_size equals to 2, but got size ",
33
+ output_size.size());
34
+
35
+ TORCH_CHECK(
36
+ kernel_size.size() == 2,
37
+ "It is expected kernel_size equals to 2, but got size ",
38
+ kernel_size.size());
39
+
40
+ TORCH_CHECK(
41
+ dilation.size() == 2,
42
+ "It is expected dilation equals to 2, but got size ",
43
+ dilation.size());
44
+
45
+ TORCH_CHECK(
46
+ padding.size() == 2,
47
+ "It is expected padding equals to 2, but got size ",
48
+ padding.size());
49
+
50
+ TORCH_CHECK(
51
+ stride.size() == 2,
52
+ "It is expected stride equals to 2, but got size ",
53
+ stride.size());
54
+
55
+ int64_t output_height = output_size[0];
56
+ int64_t output_width = output_size[1];
57
+ int64_t kernel_height = kernel_size[0];
58
+ int64_t kernel_width = kernel_size[1];
59
+ int64_t dilation_height = dilation[0];
60
+ int64_t dilation_width = dilation[1];
61
+ int64_t pad_height = padding[0];
62
+ int64_t pad_width = padding[1];
63
+ int64_t stride_height = stride[0];
64
+ int64_t stride_width = stride[1];
65
+
66
+ col2im_shape_check(
67
+ input_,
68
+ Tensor(),
69
+ output_height,
70
+ output_width,
71
+ kernel_height,
72
+ kernel_width,
73
+ dilation_height,
74
+ dilation_width,
75
+ pad_height,
76
+ pad_width,
77
+ stride_height,
78
+ stride_width);
79
+
80
+ Tensor input = input_.contiguous();
81
+
82
+ bool batched_input = true;
83
+ if (input.dim() == 2) {
84
+ // Force batch
85
+ batched_input = false;
86
+ input.resize_({1, input.size(0), input.size(1)});
87
+ }
88
+
89
+ int64_t batch_size = input.size(0);
90
+ int64_t n_input_plane = input.size(1);
91
+ int64_t n_output_plane = n_input_plane / (kernel_width * kernel_height);
92
+
93
+ output.resize_({batch_size, n_output_plane, output_height, output_width});
94
+ output.zero_();
95
+
96
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "col2im_out_cuda", [&] {
97
+ using accscalar_t = at::acc_type<scalar_t, true>;
98
+
99
+ Tensor input_n;
100
+ Tensor output_n;
101
+
102
+ int64_t height_col = (output_height + 2 * pad_height -
103
+ (dilation_height * (kernel_height - 1) + 1)) /
104
+ stride_height +
105
+ 1;
106
+ int64_t width_col = (output_width + 2 * pad_width -
107
+ (dilation_width * (kernel_width - 1) + 1)) /
108
+ stride_width +
109
+ 1;
110
+
111
+ for (int64_t elt = 0; elt < batch_size; elt++) {
112
+ input_n = input.select(0, elt);
113
+ output_n = output.select(0, elt);
114
+
115
+ col2im<scalar_t, accscalar_t>(
116
+ at::cuda::getCurrentCUDAStream(),
117
+ input_n.data_ptr<scalar_t>(),
118
+ n_output_plane,
119
+ output_height,
120
+ output_width,
121
+ height_col,
122
+ width_col,
123
+ kernel_height,
124
+ kernel_width,
125
+ pad_height,
126
+ pad_width,
127
+ stride_height,
128
+ stride_width,
129
+ dilation_height,
130
+ dilation_width,
131
+ output_n.data_ptr<scalar_t>());
132
+ }
133
+
134
+ if (!batched_input) {
135
+ output.resize_({n_output_plane, output_height, output_width});
136
+ }
137
+ });
138
+ }
139
+
140
+ void col2im_backward_out_cuda_template(
141
+ Tensor& grad_input,
142
+ const Tensor& grad_output,
143
+ IntArrayRef kernel_size,
144
+ IntArrayRef dilation,
145
+ IntArrayRef padding,
146
+ IntArrayRef stride) {
147
+ // im2col_out_cuda checks size of kernel_size, dilation, padding and stride
148
+ im2col_out_cuda(
149
+ grad_input, grad_output, kernel_size, dilation, padding, stride);
150
+ }
151
+
152
+ } // namespace
153
+
154
+ Tensor& col2im_out_cuda(
155
+ Tensor& output,
156
+ const Tensor& input,
157
+ IntArrayRef output_size,
158
+ IntArrayRef kernel_size,
159
+ IntArrayRef dilation,
160
+ IntArrayRef padding,
161
+ IntArrayRef stride) {
162
+ col2im_out_cuda_template(
163
+ output, input, output_size, kernel_size, dilation, padding, stride);
164
+ return output;
165
+ }
166
+
167
+ Tensor col2im_cuda(
168
+ const Tensor& input,
169
+ IntArrayRef output_size,
170
+ IntArrayRef kernel_size,
171
+ IntArrayRef dilation,
172
+ IntArrayRef padding,
173
+ IntArrayRef stride) {
174
+ Tensor output = at::empty_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
175
+
176
+ col2im_out_cuda_template(
177
+ output, input, output_size, kernel_size, dilation, padding, stride);
178
+ return output;
179
+ }
180
+
181
+ Tensor& col2im_backward_out_cuda(
182
+ Tensor& grad_input,
183
+ const Tensor& grad_output,
184
+ IntArrayRef kernel_size,
185
+ IntArrayRef dilation,
186
+ IntArrayRef padding,
187
+ IntArrayRef stride) {
188
+ col2im_backward_out_cuda_template(
189
+ grad_input, grad_output, kernel_size, dilation, padding, stride);
190
+ return grad_input;
191
+ }
192
+
193
+ Tensor col2im_backward_cuda(
194
+ const Tensor& grad_output,
195
+ IntArrayRef kernel_size,
196
+ IntArrayRef dilation,
197
+ IntArrayRef padding,
198
+ IntArrayRef stride) {
199
+ Tensor grad_input = at::empty_like(grad_output, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
200
+
201
+ col2im_backward_out_cuda_template(
202
+ grad_input, grad_output, kernel_size, dilation, padding, stride);
203
+ return grad_input;
204
+ }
205
+
206
+ } // namespace native
207
+ } // namespace at
cuda_code/CommunicatorGridGPU_1.cu ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2022 The Regents of the University of Michigan.
2
+ // Part of HOOMD-blue, released under the BSD 3-Clause License.
3
+
4
+ #include "hip/hip_runtime.h"
5
+
6
+ #ifdef __HIP_PLATFORM_HCC__
7
+ #include <hipfft.h>
8
+ #else
9
+ #include <cufft.h>
10
+ typedef cufftComplex hipfftComplex;
11
+ #endif
12
+
13
+ #include "CommunicatorGridGPU.cuh"
14
+ //! Define plus operator for complex data type (needed by CommunicatorMesh)
15
+ __device__ inline hipfftComplex operator+(hipfftComplex& lhs, const hipfftComplex& rhs)
16
+ {
17
+ hipfftComplex res;
18
+ res.x = lhs.x + rhs.x;
19
+ res.y = lhs.y + rhs.y;
20
+ return res;
21
+ }
22
+
23
+ namespace hoomd
24
+ {
25
+ namespace md
26
+ {
27
+ namespace kernel
28
+ {
29
+ template<typename T>
30
+ __global__ void gpu_gridcomm_scatter_send_cells_kernel(unsigned int n_send_cells,
31
+ unsigned int* d_send_idx,
32
+ const T* d_grid,
33
+ T* d_send_buf)
34
+ {
35
+ unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
36
+
37
+ if (idx >= n_send_cells)
38
+ return;
39
+ d_send_buf[idx] = d_grid[d_send_idx[idx]];
40
+ }
41
+
42
+ template<typename T, bool add_outer>
43
+ __global__ void gpu_gridcomm_scatter_add_recv_cells_kernel(unsigned int n_unique_recv_cells,
44
+ const T* d_recv_buf,
45
+ T* d_grid,
46
+ const unsigned int* d_cell_recv,
47
+ const unsigned int* d_cell_recv_begin,
48
+ const unsigned int* d_cell_recv_end,
49
+ const unsigned int* d_recv_idx)
50
+ {
51
+ unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
52
+ if (idx >= n_unique_recv_cells)
53
+ return;
54
+
55
+ unsigned int begin = d_cell_recv_begin[idx];
56
+ unsigned int end = d_cell_recv_end[idx];
57
+
58
+ T val = d_recv_buf[d_cell_recv[begin]];
59
+
60
+ // add together multiple received cells
61
+ for (unsigned int i = begin + 1; i < end; i++)
62
+ val = val + d_recv_buf[d_cell_recv[i]];
63
+
64
+ unsigned int recv_cell = d_recv_idx[d_cell_recv[begin]];
65
+ if (add_outer)
66
+ {
67
+ // add to grid
68
+ d_grid[recv_cell] = d_grid[recv_cell] + val;
69
+ }
70
+ else
71
+ {
72
+ // write out to grid
73
+ d_grid[recv_cell] = val;
74
+ }
75
+ }
76
+
77
+ template<typename T>
78
+ void gpu_gridcomm_scatter_send_cells(unsigned int n_send_cells,
79
+ unsigned int* d_send_idx,
80
+ const T* d_grid,
81
+ T* d_send_buf)
82
+ {
83
+ unsigned int block_size = 256;
84
+ unsigned int n_blocks = n_send_cells / block_size + 1;
85
+
86
+ hipLaunchKernelGGL((gpu_gridcomm_scatter_send_cells_kernel<T>),
87
+ dim3(n_blocks),
88
+ dim3(block_size),
89
+ 0,
90
+ 0,
91
+ n_send_cells,
92
+ d_send_idx,
93
+ d_grid,
94
+ d_send_buf);
95
+ }
96
+
97
+ template<typename T>
98
+ void gpu_gridcomm_scatter_add_recv_cells(unsigned int n_unique_recv_cells,
99
+ const T* d_recv_buf,
100
+ T* d_grid,
101
+ const unsigned int* d_cell_recv,
102
+ const unsigned int* d_cell_recv_begin,
103
+ const unsigned int* d_cell_recv_end,
104
+ const unsigned int* d_recv_idx,
105
+ bool add_outer)
106
+ {
107
+ unsigned int block_size = 256;
108
+ unsigned int n_blocks = n_unique_recv_cells / block_size + 1;
109
+
110
+ if (add_outer)
111
+ {
112
+ hipLaunchKernelGGL((gpu_gridcomm_scatter_add_recv_cells_kernel<T, true>),
113
+ dim3(n_blocks),
114
+ dim3(block_size),
115
+ 0,
116
+ 0,
117
+ n_unique_recv_cells,
118
+ d_recv_buf,
119
+ d_grid,
120
+ d_cell_recv,
121
+ d_cell_recv_begin,
122
+ d_cell_recv_end,
123
+ d_recv_idx);
124
+ }
125
+ else
126
+ {
127
+ hipLaunchKernelGGL((gpu_gridcomm_scatter_add_recv_cells_kernel<T, false>),
128
+ dim3(n_blocks),
129
+ dim3(block_size),
130
+ 0,
131
+ 0,
132
+ n_unique_recv_cells,
133
+ d_recv_buf,
134
+ d_grid,
135
+ d_cell_recv,
136
+ d_cell_recv_begin,
137
+ d_cell_recv_end,
138
+ d_recv_idx);
139
+ }
140
+ }
141
+
142
+ //! Template instantiation for hipfftComplex
143
+ template void gpu_gridcomm_scatter_send_cells<hipfftComplex>(unsigned int n_send_cells,
144
+ unsigned int* d_send_idx,
145
+ const hipfftComplex* d_grid,
146
+ hipfftComplex* d_send_buf);
147
+
148
+ template void
149
+ gpu_gridcomm_scatter_add_recv_cells<hipfftComplex>(unsigned int n_unique_recv_cells,
150
+ const hipfftComplex* d_recv_buf,
151
+ hipfftComplex* d_grid,
152
+ const unsigned int* d_cell_recv,
153
+ const unsigned int* d_cell_recv_begin,
154
+ const unsigned int* d_cell_recv_end,
155
+ const unsigned int* d_recv_idx,
156
+ bool add_outer);
157
+
158
+ //! Template instantiation for Scalar
159
+ template void gpu_gridcomm_scatter_send_cells<Scalar>(unsigned int n_send_cells,
160
+ unsigned int* d_send_idx,
161
+ const Scalar* d_grid,
162
+ Scalar* d_send_buf);
163
+
164
+ template void gpu_gridcomm_scatter_add_recv_cells<Scalar>(unsigned int n_unique_recv_cells,
165
+ const Scalar* d_recv_buf,
166
+ Scalar* d_grid,
167
+ const unsigned int* d_cell_recv,
168
+ const unsigned int* d_cell_recv_begin,
169
+ const unsigned int* d_cell_recv_end,
170
+ const unsigned int* d_recv_idx,
171
+ bool add_outer);
172
+
173
+ //! Template instantiation for unsigned int
174
+ template void gpu_gridcomm_scatter_send_cells<unsigned int>(unsigned int n_send_cells,
175
+ unsigned int* d_send_idx,
176
+ const unsigned int* d_grid,
177
+ unsigned int* d_send_buf);
178
+
179
+ template void
180
+ gpu_gridcomm_scatter_add_recv_cells<unsigned int>(unsigned int n_unique_recv_cells,
181
+ const unsigned int* d_recv_buf,
182
+ unsigned int* d_grid,
183
+ const unsigned int* d_cell_recv,
184
+ const unsigned int* d_cell_recv_begin,
185
+ const unsigned int* d_cell_recv_end,
186
+ const unsigned int* d_recv_idx,
187
+ bool add_outer);
188
+
189
+ } // end namespace kernel
190
+ } // end namespace md
191
+ } // end namespace hoomd
cuda_code/CompareEQKernel.cu ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Dispatch.h>
2
+ #include <ATen/native/BinaryOps.h>
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/native/TensorIterator.h>
5
+ #include <ATen/native/cuda/Loops.cuh>
6
+
7
+
8
+ // NOTE: CUDA on Windows requires that the enclosing function
9
+ // of a __device__ lambda not have internal linkage.
10
+
11
+ namespace at { namespace native {
12
+
13
+ template<typename scalar_t>
14
+ struct CompareEqFunctor {
15
+ __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
16
+ return a == b;
17
+ }
18
+ };
19
+
20
+ void eq_kernel_cuda(TensorIteratorBase& iter) {
21
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "eq_cuda", [&]() {
22
+ gpu_kernel_with_scalars(iter, CompareEqFunctor<scalar_t>());
23
+ });
24
+ }
25
+
26
+ REGISTER_DISPATCH(eq_stub, &eq_kernel_cuda);
27
+
28
+ }} // namespace at::native
cuda_code/CompareGEKernel_3.cu ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_NO_OPERATORS
2
+ #include <ATen/Dispatch.h>
3
+ #include <ATen/native/BinaryOps.h>
4
+ #include <ATen/native/DispatchStub.h>
5
+ #include <ATen/native/TensorIterator.h>
6
+ #include <ATen/native/cuda/Loops.cuh>
7
+
8
+
9
+ // NOTE: CUDA on Windows requires that the enclosing function
10
+ // of a __device__ lambda not have internal linkage.
11
+
12
+ namespace at { namespace native {
13
+
14
+ template<typename scalar_t>
15
+ struct CompareGEFunctor {
16
+ __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
17
+ return a >= b;
18
+ }
19
+ };
20
+
21
+ void ge_kernel_cuda(TensorIteratorBase& iter) {
22
+ AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "ge_cuda", [&]() {
23
+ gpu_kernel_with_scalars(iter, CompareGEFunctor<scalar_t>());
24
+ });
25
+ }
26
+
27
+ REGISTER_DISPATCH(ge_stub, &ge_kernel_cuda);
28
+
29
+ }} // namespace at::native
cuda_code/CompareGTKernel.cu ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Dispatch.h>
2
+ #include <ATen/native/BinaryOps.h>
3
+ #include <ATen/native/DispatchStub.h>
4
+ #include <ATen/native/TensorIterator.h>
5
+ #include <ATen/native/cuda/Loops.cuh>
6
+
7
+
8
+ // NOTE: CUDA on Windows requires that the enclosing function
9
+ // of a __device__ lambda not have internal linkage.
10
+
11
+ namespace at { namespace native {
12
+
13
+ template<typename scalar_t>
14
+ struct CompareGTFunctor {
15
+ __device__ __forceinline__ bool operator() (scalar_t a, scalar_t b) const {
16
+ return a > b;
17
+ }
18
+ };
19
+
20
+ void gt_kernel_cuda(TensorIterator& iter) {
21
+ AT_DISPATCH_ALL_TYPES_AND3(kHalf, kBFloat16, kBool, iter.common_dtype(), "gt_cuda", [&]() {
22
+ gpu_kernel_with_scalars(iter, CompareGTFunctor<scalar_t>());
23
+ });
24
+ }
25
+
26
+ REGISTER_DISPATCH(gt_stub, &gt_kernel_cuda);
27
+
28
+ }} // namespace at::native
cuda_code/CompressKernel_8.cu ADDED
@@ -0,0 +1,2022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2011 Ignacio Castano <[email protected]>
2
+ // Copyright (c) 2007-2009 NVIDIA Corporation -- Ignacio Castano <[email protected]>
3
+ //
4
+ // Permission is hereby granted, free of charge, to any person
5
+ // obtaining a copy of this software and associated documentation
6
+ // files (the "Software"), to deal in the Software without
7
+ // restriction, including without limitation the rights to use,
8
+ // copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ // copies of the Software, and to permit persons to whom the
10
+ // Software is furnished to do so, subject to the following
11
+ // conditions:
12
+ //
13
+ // The above copyright notice and this permission notice shall be
14
+ // included in all copies or substantial portions of the Software.
15
+ //
16
+ // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18
+ // OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
20
+ // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
21
+ // WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
22
+ // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23
+ // OTHER DEALINGS IN THE SOFTWARE.
24
+
25
+ #include <math.h>
26
+ #include <float.h> // FLT_MAX
27
+
28
+ #include "CudaMath.h"
29
+
30
+
31
+ #define NUM_THREADS 64 // Number of threads per block.
32
+
33
+ typedef unsigned char uchar;
34
+ typedef unsigned short ushort;
35
+ typedef unsigned int uint;
36
+
37
+ template <class T>
38
+ __device__ inline void swap(T & a, T & b)
39
+ {
40
+ T tmp = a;
41
+ a = b;
42
+ b = tmp;
43
+ }
44
+
45
+ __constant__ uchar OMatch5[256][2];
46
+ __constant__ uchar OMatch6[256][2];
47
+
48
+ __constant__ float3 kColorMetric = { 1.0f, 1.0f, 1.0f };
49
+ __constant__ float3 kColorMetricSqr = { 1.0f, 1.0f, 1.0f };
50
+
51
+ // Some kernels read the input through texture.
52
+ texture<uchar4, 2, cudaReadModeNormalizedFloat> tex;
53
+
54
+
55
+ ////////////////////////////////////////////////////////////////////////////////
56
+ // Color helpers
57
+ ////////////////////////////////////////////////////////////////////////////////
58
+
59
+ __device__ inline uint float_to_u8(float value)
60
+ {
61
+ return min(max(__float2int_rn((255 * value + 0.5f) / (1.0f + 1.0f/255.0f)), 0), 255);
62
+ }
63
+
64
+ __device__ inline uint float_to_u6(float value)
65
+ {
66
+ return min(max(__float2int_rn((63 * value + 0.5f) / (1.0f + 1.0f/63.0f)), 0), 63);
67
+ }
68
+
69
+ __device__ inline uint float_to_u5(float value)
70
+ {
71
+ return min(max(__float2int_rn((31 * value + 0.5f) / (1.0f + 1.0f/31.0f)), 0), 31);
72
+ }
73
+
74
+ __device__ inline float u8_to_float(uint value)
75
+ {
76
+ return __saturatef(__uint2float_rn(value) / 255.0f);
77
+ //return (value) / 255.0f;
78
+ }
79
+
80
+ __device__ float3 color32ToFloat3(uint c)
81
+ {
82
+ float3 color;
83
+ color.z = u8_to_float((c >> 0) & 0xFF);
84
+ color.y = u8_to_float((c >> 8) & 0xFF);
85
+ color.x = u8_to_float((c >> 16) & 0xFF);
86
+ return color;
87
+ }
88
+
89
+ __device__ int3 color16ToInt3(ushort c)
90
+ {
91
+ int3 color;
92
+
93
+ color.z = ((c >> 0) & 0x1F);
94
+ color.z = (color.z << 3) | (color.z >> 2);
95
+
96
+ color.y = ((c >> 5) & 0x3F);
97
+ color.y = (color.y << 2) | (color.y >> 4);
98
+
99
+ color.x = ((c >> 11) & 0x1F);
100
+ color.x = (color.x << 3) | (color.x >> 2);
101
+
102
+ return color;
103
+ }
104
+
105
+ __device__ float3 color16ToFloat3(ushort c)
106
+ {
107
+ int3 color = color16ToInt3(c);
108
+ return make_float3(color.x, color.y, color.z) * (1.0f / 255.0f);
109
+ }
110
+
111
+ __device__ int3 float3ToInt3(float3 c)
112
+ {
113
+ return make_int3(c.x * 255, c.y * 255, c.z * 255);
114
+ }
115
+
116
+ __device__ float3 int3ToFloat3(int3 c)
117
+ {
118
+ return make_float3(float_to_u8(c.x), float_to_u8(c.y), float_to_u8(c.z));
119
+ }
120
+
121
+
122
+ __device__ int colorDistance(int3 c0, int3 c1)
123
+ {
124
+ int dx = c0.x-c1.x;
125
+ int dy = c0.y-c1.y;
126
+ int dz = c0.z-c1.z;
127
+ return __mul24(dx, dx) + __mul24(dy, dy) + __mul24(dz, dz);
128
+ }
129
+
130
+
131
+ ////////////////////////////////////////////////////////////////////////////////
132
+ // Round color to RGB565 and expand
133
+ ////////////////////////////////////////////////////////////////////////////////
134
+
135
+
136
+ #if 0
137
+ __device__ inline uint float_to_u8(float value)
138
+ {
139
+ //uint result;
140
+ //asm("cvt.sat.rni.u8.f32 %0, %1;" : "=r" (result) : "f" (value));
141
+ //return result;
142
+ //return __float2uint_rn(__saturatef(value) * 255.0f);
143
+
144
+ int result = __float2int_rn((255 * value + 0.5f) / (1.0f + 1.0f/255.0f));
145
+ result = max(result, 0);
146
+ result = min(result, 255);
147
+ return result;
148
+ }
149
+
150
+ __device__ inline float u8_to_float(uint value)
151
+ {
152
+ //float result;
153
+ //asm("cvt.sat.rn.f32.u8 %0, %1;" : "=f" (result) : "r" (value)); // this is wrong!
154
+ //return result;
155
+ return __saturatef(__uint2float_rn(value) / 255.0f);
156
+ }
157
+
158
+ inline __device__ float3 roundAndExpand565(float3 v, ushort * w)
159
+ {
160
+ uint x = float_to_u8(v.x) >> 3;
161
+ uint y = float_to_u8(v.y) >> 2;
162
+ uint z = float_to_u8(v.z) >> 3;
163
+ *w = (x << 11) | (y << 5) | z;
164
+ v.x = u8_to_float((x << 3) | (x >> 2));
165
+ v.y = u8_to_float((y << 2) | (y >> 4));
166
+ v.z = u8_to_float((z << 3) | (z >> 2));
167
+ // v.x = u8_to_float(x) * 255.0f / 31.0f;
168
+ // v.y = u8_to_float(y) * 255.0f / 63.0f;
169
+ // v.z = u8_to_float(z) * 255.0f / 31.0f;
170
+ return v;
171
+ }
172
+ #else
173
+
174
+ inline __device__ float3 roundAndExpand565(float3 v, ushort * w)
175
+ {
176
+ uint x = __float2uint_rn(__saturatef(v.x) * 31.0f);
177
+ uint y = __float2uint_rn(__saturatef(v.y) * 63.0f);
178
+ uint z = __float2uint_rn(__saturatef(v.z) * 31.0f);
179
+
180
+ //uint x = float_to_u5(v.x);
181
+ //uint y = float_to_u6(v.y);
182
+ //uint z = float_to_u5(v.z);
183
+
184
+ *w = (x << 11) | (y << 5) | z;
185
+
186
+ v.x = __uint2float_rn(x) * 1.0f / 31.0f;
187
+ v.y = __uint2float_rn(y) * 1.0f / 63.0f;
188
+ v.z = __uint2float_rn(z) * 1.0f / 31.0f;
189
+
190
+ //v.x = u8_to_float((x << 3) | (x >> 2));
191
+ //v.y = u8_to_float((y << 2) | (y >> 4));
192
+ //v.z = u8_to_float((z << 3) | (z >> 2));
193
+
194
+ return v;
195
+ }
196
+ #endif
197
+ inline __device__ float2 roundAndExpand56(float2 v, ushort * w)
198
+ {
199
+ uint x = __float2uint_rn(__saturatef(v.x) * 31.0f);
200
+ uint y = __float2uint_rn(__saturatef(v.y) * 63.0f);
201
+ *w = (x << 11) | (y << 5);
202
+ v.x = __uint2float_rn(x) * 1.0f / 31.0f;
203
+ v.y = __uint2float_rn(y) * 1.0f / 63.0f;
204
+ return v;
205
+ }
206
+
207
+ inline __device__ float2 roundAndExpand88(float2 v, ushort * w)
208
+ {
209
+ uint x = __float2uint_rn(__saturatef(v.x) * 255.0f);
210
+ uint y = __float2uint_rn(__saturatef(v.y) * 255.0f);
211
+ *w = (x << 8) | y;
212
+ v.x = __uint2float_rn(x) * 1.0f / 255.0f;
213
+ v.y = __uint2float_rn(y) * 1.0f / 255.0f;
214
+ return v;
215
+ }
216
+
217
+
218
+ ////////////////////////////////////////////////////////////////////////////////
219
+ // Block errors
220
+ ////////////////////////////////////////////////////////////////////////////////
221
+
222
+ __device__ float3 blockError4(const float3 * colors, uint permutation, float3 a, float3 b)
223
+ {
224
+ float3 error = make_float3(0.0f, 0.0f, 0.0f);
225
+
226
+ for (int i = 0; i < 16; i++)
227
+ {
228
+ const uint bits = permutation >> (2*i);
229
+
230
+ float beta = (bits & 1);
231
+ if (bits & 2) beta = (1 + beta) / 3.0f;
232
+ float alpha = 1.0f - beta;
233
+
234
+ float3 diff = colors[i] - (a*alpha + b*beta);
235
+
236
+ error += diff*diff;
237
+ }
238
+
239
+ return error;
240
+ }
241
+
242
+ __device__ float3 blockError4(const float3 * colors, uint permutation, ushort c0, ushort c1)
243
+ {
244
+ float3 error = make_float3(0.0f, 0.0f, 0.0f);
245
+
246
+ int3 color0 = color16ToInt3(c0);
247
+ int3 color1 = color16ToInt3(c1);
248
+
249
+ for (int i = 0; i < 16; i++)
250
+ {
251
+ const uint bits = permutation >> (2*i);
252
+
253
+ int beta = (bits & 1);
254
+ if (bits & 2) beta = (1 + beta);
255
+ float alpha = 3 - beta;
256
+
257
+ int3 color;
258
+ color.x = (color0.x * alpha + color1.x * beta) / 3;
259
+ color.y = (color0.y * alpha + color1.y * beta) / 3;
260
+ color.z = (color0.z * alpha + color1.z * beta) / 3;
261
+
262
+ float3 diff = colors[i] - int3ToFloat3(color);
263
+
264
+ error += diff*diff;
265
+ }
266
+
267
+ return error;
268
+ }
269
+
270
+
271
+ __device__ float3 blockError3(const float3 * colors, uint permutation, float3 a, float3 b)
272
+ {
273
+ float3 error = make_float3(0.0f, 0.0f, 0.0f);
274
+
275
+ for (int i = 0; i < 16; i++)
276
+ {
277
+ const uint bits = permutation >> (2*i);
278
+
279
+ float beta = (bits & 1);
280
+ if (bits & 2) beta = 0.5f;
281
+ float alpha = 1.0f - beta;
282
+
283
+ float3 diff = colors[i] - (a*alpha + b*beta);
284
+
285
+ error += diff*diff;
286
+ }
287
+
288
+ return error;
289
+ }
290
+
291
+
292
+ ////////////////////////////////////////////////////////////////////////////////
293
+ // Sort colors
294
+ ////////////////////////////////////////////////////////////////////////////////
295
+
296
+ // @@ Experimental code to avoid duplicate colors for faster compression.
297
+ // We could first sort along the best fit line and only compare colors that have the same projection.
298
+ // The hardest part is to maintain the indices to map packed/sorted colors to the input colors.
299
+ // We also need to update several functions that assume the number of colors is fixed to 16.
300
+ // And compute different bit maps for the different color counts.
301
+ // This is a fairly high amount of work.
302
+ __device__ int packColors(float3 * values, float * weights, int * ranks)
303
+ {
304
+ const int tid = threadIdx.x;
305
+
306
+ __shared__ int count;
307
+ count = 0;
308
+
309
+ bool alive = true;
310
+
311
+ // Append this
312
+ for (int i = 0; i < 16; i++)
313
+ {
314
+ // One thread leads on each iteration.
315
+ if (tid == i) {
316
+
317
+ // If thread alive, then append element.
318
+ if (alive) {
319
+ values[count] = values[i];
320
+ weights[count] = weights[i];
321
+ count++;
322
+ }
323
+
324
+ // Otherwise update weight.
325
+ else {
326
+ weights[ranks[i]] += weights[i];
327
+ }
328
+ }
329
+
330
+ // Kill all threads that have the same element and record rank.
331
+ if (values[i] == values[tid]) {
332
+ alive = false;
333
+ ranks[tid] = count - 1;
334
+ }
335
+ }
336
+
337
+ return count;
338
+ }
339
+
340
+
341
+ __device__ void sortColors(const float * values, int * ranks)
342
+ {
343
+ const int tid = threadIdx.x;
344
+
345
+ int rank = 0;
346
+
347
+ #pragma unroll
348
+ for (int i = 0; i < 16; i++)
349
+ {
350
+ rank += (values[i] < values[tid]);
351
+ }
352
+
353
+ ranks[tid] = rank;
354
+
355
+ // Resolve elements with the same index.
356
+ #pragma unroll
357
+ for (int i = 0; i < 15; i++)
358
+ {
359
+ if ((tid > i) & (ranks[tid] == ranks[i])) ++ranks[tid];
360
+ }
361
+ }
362
+
363
+ __device__ void sortColors(const float * values, int * ranks, int count)
364
+ {
365
+ const int tid = threadIdx.x;
366
+
367
+ int rank = 0;
368
+
369
+ #pragma unroll
370
+ for (int i = 0; i < count; i++)
371
+ {
372
+ rank += (values[i] < values[tid]);
373
+ }
374
+
375
+ ranks[tid] = rank;
376
+
377
+ // Resolve elements with the same index.
378
+ #pragma unroll
379
+ for (int i = 0; i < count-1; i++)
380
+ {
381
+ if ((tid > i) & (ranks[tid] == ranks[i])) ++ranks[tid];
382
+ }
383
+ }
384
+
385
+
386
+
387
+ ////////////////////////////////////////////////////////////////////////////////
388
+ // Load color block to shared mem
389
+ ////////////////////////////////////////////////////////////////////////////////
390
+
391
+ __device__ void loadColorBlockTex(uint firstBlock, uint blockWidth, float3 colors[16], float3 sums[16], int xrefs[16], int * sameColor)
392
+ {
393
+ const int bid = blockIdx.x;
394
+ const int idx = threadIdx.x;
395
+
396
+ __shared__ float dps[16];
397
+
398
+ if (idx < 16)
399
+ {
400
+ float x = 4 * ((firstBlock + bid) % blockWidth) + idx % 4; // @@ Avoid mod and div by using 2D grid?
401
+ float y = 4 * ((firstBlock + bid) / blockWidth) + idx / 4;
402
+
403
+ // Read color and copy to shared mem.
404
+ float4 c = tex2D(tex, x, y);
405
+
406
+ colors[idx].x = c.z;
407
+ colors[idx].y = c.y;
408
+ colors[idx].z = c.x;
409
+
410
+ // Sort colors along the best fit line.
411
+ colorSums(colors, sums);
412
+ float3 axis = bestFitLine(colors, sums[0], kColorMetric);
413
+
414
+ *sameColor = (axis == make_float3(0, 0, 0));
415
+
416
+ dps[idx] = dot(colors[idx], axis);
417
+
418
+ sortColors(dps, xrefs);
419
+
420
+ float3 tmp = colors[idx];
421
+ colors[xrefs[idx]] = tmp;
422
+ }
423
+ }
424
+
425
+ /*
426
+ __device__ void loadColorBlockTex(uint firstBlock, uint w, float3 colors[16], float3 sums[16], float weights[16], int xrefs[16], int * sameColor)
427
+ {
428
+ const int bid = blockIdx.x;
429
+ const int idx = threadIdx.x;
430
+
431
+ __shared__ float dps[16];
432
+
433
+ if (idx < 16)
434
+ {
435
+ float x = 4 * ((firstBlock + bid) % w) + idx % 4; // @@ Avoid mod and div by using 2D grid?
436
+ float y = 4 * ((firstBlock + bid) / w) + idx / 4;
437
+
438
+ // Read color and copy to shared mem.
439
+ float4 c = tex2D(tex, x, y);
440
+
441
+ colors[idx].x = c.z;
442
+ colors[idx].y = c.y;
443
+ colors[idx].z = c.x;
444
+ weights[idx] = 1;
445
+
446
+ int count = packColors(colors, weights);
447
+ if (idx < count)
448
+ {
449
+ // Sort colors along the best fit line.
450
+ colorSums(colors, sums);
451
+ float3 axis = bestFitLine(colors, sums[0], kColorMetric);
452
+
453
+ *sameColor = (axis == make_float3(0, 0, 0));
454
+
455
+ dps[idx] = dot(colors[idx], axis);
456
+
457
+ sortColors(dps, xrefs);
458
+
459
+ float3 tmp = colors[idx];
460
+ colors[xrefs[idx]] = tmp;
461
+ }
462
+ }
463
+ }
464
+ */
465
+
466
+ __device__ void loadColorBlockTex(uint firstBlock, uint width, float3 colors[16], float3 sums[16], float weights[16], int xrefs[16], int * sameColor)
467
+ {
468
+ const int bid = blockIdx.x;
469
+ const int idx = threadIdx.x;
470
+
471
+ __shared__ float3 rawColors[16];
472
+ __shared__ float dps[16];
473
+
474
+ if (idx < 16)
475
+ {
476
+ float x = 4 * ((firstBlock + bid) % width) + idx % 4; // @@ Avoid mod and div by using 2D grid?
477
+ float y = 4 * ((firstBlock + bid) / width) + idx / 4;
478
+
479
+ // Read color and copy to shared mem.
480
+ float4 c = tex2D(tex, x, y);
481
+
482
+ rawColors[idx].x = c.z;
483
+ rawColors[idx].y = c.y;
484
+ rawColors[idx].z = c.x;
485
+ weights[idx] = c.w;
486
+
487
+ colors[idx] = rawColors[idx] * weights[idx];
488
+
489
+ // Sort colors along the best fit line.
490
+ colorSums(colors, sums);
491
+ float3 axis = bestFitLine(colors, sums[0], kColorMetric);
492
+
493
+ *sameColor = (axis == make_float3(0, 0, 0));
494
+
495
+ // Single color compressor needs unweighted colors.
496
+ if (*sameColor) colors[idx] = rawColors[idx];
497
+
498
+ dps[idx] = dot(colors[idx], axis);
499
+
500
+ sortColors(dps, xrefs);
501
+
502
+ float3 tmp = colors[idx];
503
+ float w = weights[idx];
504
+ colors[xrefs[idx]] = tmp;
505
+ weights[xrefs[idx]] = w;
506
+ }
507
+ }
508
+
509
+ __device__ void loadColorBlock(const uint * image, float2 colors[16], float2 sums[16], int xrefs[16], int * sameColor)
510
+ {
511
+ const int bid = blockIdx.x;
512
+ const int idx = threadIdx.x;
513
+
514
+ __shared__ float dps[16];
515
+
516
+ if (idx < 16)
517
+ {
518
+ // Read color and copy to shared mem.
519
+ uint c = image[(bid) * 16 + idx];
520
+
521
+ colors[idx].y = ((c >> 8) & 0xFF) * (1.0f / 255.0f);
522
+ colors[idx].x = ((c >> 16) & 0xFF) * (1.0f / 255.0f);
523
+
524
+ // Sort colors along the best fit line.
525
+ colorSums(colors, sums);
526
+ float2 axis = bestFitLine(colors, sums[0]);
527
+
528
+ *sameColor = (axis == make_float2(0, 0));
529
+
530
+ dps[idx] = dot(colors[idx], axis);
531
+
532
+ sortColors(dps, xrefs);
533
+
534
+ float2 tmp = colors[idx];
535
+ colors[xrefs[idx]] = tmp;
536
+ }
537
+ }
538
+
539
+
540
+ ////////////////////////////////////////////////////////////////////////////////
541
+ // Evaluate permutations
542
+ ////////////////////////////////////////////////////////////////////////////////
543
+ __device__ float evalPermutation4(const float3 * colors, uint permutation, ushort * start, ushort * end)
544
+ {
545
+ // Compute endpoints using least squares.
546
+ float alpha2_sum = 0.0f;
547
+ float beta2_sum = 0.0f;
548
+ float alphabeta_sum = 0.0f;
549
+ float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
550
+ float3 betax_sum = make_float3(0.0f, 0.0f, 0.0f);
551
+
552
+ // Compute alpha & beta for this permutation.
553
+ for (int i = 0; i < 16; i++)
554
+ {
555
+ const uint bits = permutation >> (2*i);
556
+
557
+ float beta = (bits & 1);
558
+ if (bits & 2) beta = (1 + beta) / 3.0f;
559
+ float alpha = 1.0f - beta;
560
+
561
+ alpha2_sum += alpha * alpha;
562
+ beta2_sum += beta * beta;
563
+ alphabeta_sum += alpha * beta;
564
+ alphax_sum += alpha * colors[i];
565
+ betax_sum += beta * colors[i];
566
+ }
567
+
568
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
569
+
570
+ float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
571
+ float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
572
+
573
+ // Round a, b to the closest 5-6-5 color and expand...
574
+ a = roundAndExpand565(a, start);
575
+ b = roundAndExpand565(b, end);
576
+
577
+ // compute the error
578
+ float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
579
+
580
+ return dot(e, kColorMetricSqr);
581
+ }
582
+
583
+ __device__ float evalPermutation3(const float3 * colors, uint permutation, ushort * start, ushort * end)
584
+ {
585
+ // Compute endpoints using least squares.
586
+ float alpha2_sum = 0.0f;
587
+ float beta2_sum = 0.0f;
588
+ float alphabeta_sum = 0.0f;
589
+ float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
590
+ float3 betax_sum = make_float3(0.0f, 0.0f, 0.0f);
591
+
592
+ // Compute alpha & beta for this permutation.
593
+ for (int i = 0; i < 16; i++)
594
+ {
595
+ const uint bits = permutation >> (2*i);
596
+
597
+ float beta = (bits & 1);
598
+ if (bits & 2) beta = 0.5f;
599
+ float alpha = 1.0f - beta;
600
+
601
+ alpha2_sum += alpha * alpha;
602
+ beta2_sum += beta * beta;
603
+ alphabeta_sum += alpha * beta;
604
+ alphax_sum += alpha * colors[i];
605
+ betax_sum += beta * colors[i];
606
+ }
607
+
608
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
609
+
610
+ float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
611
+ float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
612
+
613
+ // Round a, b to the closest 5-6-5 color and expand...
614
+ a = roundAndExpand565(a, start);
615
+ b = roundAndExpand565(b, end);
616
+
617
+ // compute the error
618
+ float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
619
+
620
+ return dot(e, kColorMetricSqr);
621
+ }
622
+
623
+ __constant__ const float alphaTable4[4] = { 9.0f, 0.0f, 6.0f, 3.0f };
624
+ __constant__ const float alphaTable3[4] = { 4.0f, 0.0f, 2.0f, 2.0f };
625
+ __constant__ const uint prods4[4] = { 0x090000,0x000900,0x040102,0x010402 };
626
+ __constant__ const uint prods3[4] = { 0x040000,0x000400,0x040101,0x010401 };
627
+
628
+ __device__ float evalPermutation4(const float3 * colors, float3 color_sum, uint permutation, ushort * start, ushort * end)
629
+ {
630
+ // Compute endpoints using least squares.
631
+ float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
632
+ uint akku = 0;
633
+
634
+ // Compute alpha & beta for this permutation.
635
+ #pragma unroll
636
+ for (int i = 0; i < 16; i++)
637
+ {
638
+ const uint bits = permutation >> (2*i);
639
+
640
+ alphax_sum += alphaTable4[bits & 3] * colors[i];
641
+ akku += prods4[bits & 3];
642
+ }
643
+
644
+ float alpha2_sum = float(akku >> 16);
645
+ float beta2_sum = float((akku >> 8) & 0xff);
646
+ float alphabeta_sum = float(akku & 0xff);
647
+ float3 betax_sum = 9.0f * color_sum - alphax_sum;
648
+
649
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
650
+
651
+ float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
652
+ float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
653
+
654
+ // Round a, b to the closest 5-6-5 color and expand...
655
+ a = roundAndExpand565(a, start);
656
+ b = roundAndExpand565(b, end);
657
+
658
+ // compute the error
659
+ float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
660
+
661
+ //float3 e = blockError4(colors, permutation, *start, *end);
662
+
663
+ return (1.0f / 9.0f) * dot(e, kColorMetricSqr);
664
+ }
665
+
666
+ __device__ float evalPermutation3(const float3 * colors, float3 color_sum, uint permutation, ushort * start, ushort * end)
667
+ {
668
+ // Compute endpoints using least squares.
669
+ float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
670
+ uint akku = 0;
671
+
672
+ // Compute alpha & beta for this permutation.
673
+ #pragma unroll
674
+ for (int i = 0; i < 16; i++)
675
+ {
676
+ const uint bits = permutation >> (2*i);
677
+
678
+ alphax_sum += alphaTable3[bits & 3] * colors[i];
679
+ akku += prods3[bits & 3];
680
+ }
681
+
682
+ float alpha2_sum = float(akku >> 16);
683
+ float beta2_sum = float((akku >> 8) & 0xff);
684
+ float alphabeta_sum = float(akku & 0xff);
685
+ float3 betax_sum = 4.0f * color_sum - alphax_sum;
686
+
687
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
688
+
689
+ float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
690
+ float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
691
+
692
+ // Round a, b to the closest 5-6-5 color and expand...
693
+ a = roundAndExpand565(a, start);
694
+ b = roundAndExpand565(b, end);
695
+
696
+ // compute the error
697
+ float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
698
+
699
+ //float3 e = blockError3(colors, permutation, a, b);
700
+
701
+ return (1.0f / 4.0f) * dot(e, kColorMetricSqr);
702
+ }
703
+
704
+ __device__ float evalPermutation4(const float3 * colors, const float * weights, float3 color_sum, uint permutation, ushort * start, ushort * end)
705
+ {
706
+ // Compute endpoints using least squares.
707
+ float alpha2_sum = 0.0f;
708
+ float beta2_sum = 0.0f;
709
+ float alphabeta_sum = 0.0f;
710
+ float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
711
+
712
+ // Compute alpha & beta for this permutation.
713
+ for (int i = 0; i < 16; i++)
714
+ {
715
+ const uint bits = permutation >> (2*i);
716
+
717
+ float beta = (bits & 1);
718
+ if (bits & 2) beta = (1 + beta) / 3.0f;
719
+ float alpha = 1.0f - beta;
720
+
721
+ alpha2_sum += alpha * alpha * weights[i];
722
+ beta2_sum += beta * beta * weights[i];
723
+ alphabeta_sum += alpha * beta * weights[i];
724
+ alphax_sum += alpha * colors[i];
725
+ }
726
+
727
+ float3 betax_sum = color_sum - alphax_sum;
728
+
729
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
730
+
731
+ float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
732
+ float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
733
+
734
+ // Round a, b to the closest 5-6-5 color and expand...
735
+ a = roundAndExpand565(a, start);
736
+ b = roundAndExpand565(b, end);
737
+
738
+ // compute the error
739
+ float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
740
+
741
+ return dot(e, kColorMetricSqr);
742
+ }
743
+
744
+ /*
745
+ __device__ float evalPermutation3(const float3 * colors, const float * weights, uint permutation, ushort * start, ushort * end)
746
+ {
747
+ // Compute endpoints using least squares.
748
+ float alpha2_sum = 0.0f;
749
+ float beta2_sum = 0.0f;
750
+ float alphabeta_sum = 0.0f;
751
+ float3 alphax_sum = make_float3(0.0f, 0.0f, 0.0f);
752
+
753
+ // Compute alpha & beta for this permutation.
754
+ for (int i = 0; i < 16; i++)
755
+ {
756
+ const uint bits = permutation >> (2*i);
757
+
758
+ float beta = (bits & 1);
759
+ if (bits & 2) beta = 0.5f;
760
+ float alpha = 1.0f - beta;
761
+
762
+ alpha2_sum += alpha * alpha * weights[i];
763
+ beta2_sum += beta * beta * weights[i];
764
+ alphabeta_sum += alpha * beta * weights[i];
765
+ alphax_sum += alpha * colors[i];
766
+ }
767
+
768
+ float3 betax_sum = color_sum - alphax_sum;
769
+
770
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
771
+
772
+ float3 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
773
+ float3 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
774
+
775
+ // Round a, b to the closest 5-6-5 color and expand...
776
+ a = roundAndExpand565(a, start);
777
+ b = roundAndExpand565(b, end);
778
+
779
+ // compute the error
780
+ float3 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
781
+
782
+ return dot(e, kColorMetricSqr);
783
+ }
784
+ */
785
+
786
+ __device__ float evalPermutation4(const float2 * colors, float2 color_sum, uint permutation, ushort * start, ushort * end)
787
+ {
788
+ // Compute endpoints using least squares.
789
+ float2 alphax_sum = make_float2(0.0f, 0.0f);
790
+ uint akku = 0;
791
+
792
+ // Compute alpha & beta for this permutation.
793
+ #pragma unroll
794
+ for (int i = 0; i < 16; i++)
795
+ {
796
+ const uint bits = permutation >> (2*i);
797
+
798
+ alphax_sum += alphaTable4[bits & 3] * colors[i];
799
+ akku += prods4[bits & 3];
800
+ }
801
+
802
+ float alpha2_sum = float(akku >> 16);
803
+ float beta2_sum = float((akku >> 8) & 0xff);
804
+ float alphabeta_sum = float(akku & 0xff);
805
+ float2 betax_sum = 9.0f * color_sum - alphax_sum;
806
+
807
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
808
+
809
+ float2 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
810
+ float2 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
811
+
812
+ // Round a, b to the closest 5-6 color and expand...
813
+ a = roundAndExpand56(a, start);
814
+ b = roundAndExpand56(b, end);
815
+
816
+ // compute the error
817
+ float2 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
818
+
819
+ return (1.0f / 9.0f) * (e.x + e.y);
820
+ }
821
+
822
+ __device__ float evalPermutation3(const float2 * colors, float2 color_sum, uint permutation, ushort * start, ushort * end)
823
+ {
824
+ // Compute endpoints using least squares.
825
+ float2 alphax_sum = make_float2(0.0f, 0.0f);
826
+ uint akku = 0;
827
+
828
+ // Compute alpha & beta for this permutation.
829
+ #pragma unroll
830
+ for (int i = 0; i < 16; i++)
831
+ {
832
+ const uint bits = permutation >> (2*i);
833
+
834
+ alphax_sum += alphaTable3[bits & 3] * colors[i];
835
+ akku += prods3[bits & 3];
836
+ }
837
+
838
+ float alpha2_sum = float(akku >> 16);
839
+ float beta2_sum = float((akku >> 8) & 0xff);
840
+ float alphabeta_sum = float(akku & 0xff);
841
+ float2 betax_sum = 4.0f * color_sum - alphax_sum;
842
+
843
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
844
+
845
+ float2 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
846
+ float2 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
847
+
848
+ // Round a, b to the closest 5-6 color and expand...
849
+ a = roundAndExpand56(a, start);
850
+ b = roundAndExpand56(b, end);
851
+
852
+ // compute the error
853
+ float2 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
854
+
855
+ return (1.0f / 4.0f) * (e.x + e.y);
856
+ }
857
+
858
+ __device__ float evalPermutationCTX(const float2 * colors, float2 color_sum, uint permutation, ushort * start, ushort * end)
859
+ {
860
+ // Compute endpoints using least squares.
861
+ float2 alphax_sum = make_float2(0.0f, 0.0f);
862
+ uint akku = 0;
863
+
864
+ // Compute alpha & beta for this permutation.
865
+ #pragma unroll
866
+ for (int i = 0; i < 16; i++)
867
+ {
868
+ const uint bits = permutation >> (2*i);
869
+
870
+ alphax_sum += alphaTable4[bits & 3] * colors[i];
871
+ akku += prods4[bits & 3];
872
+ }
873
+
874
+ float alpha2_sum = float(akku >> 16);
875
+ float beta2_sum = float((akku >> 8) & 0xff);
876
+ float alphabeta_sum = float(akku & 0xff);
877
+ float2 betax_sum = 9.0f * color_sum - alphax_sum;
878
+
879
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
880
+
881
+ float2 a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
882
+ float2 b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
883
+
884
+ // Round a, b to the closest 8-8 color and expand...
885
+ a = roundAndExpand88(a, start);
886
+ b = roundAndExpand88(b, end);
887
+
888
+ // compute the error
889
+ float2 e = a * a * alpha2_sum + b * b * beta2_sum + 2.0f * (a * b * alphabeta_sum - a * alphax_sum - b * betax_sum);
890
+
891
+ return (1.0f / 9.0f) * (e.x + e.y);
892
+ }
893
+
894
+
895
+ ////////////////////////////////////////////////////////////////////////////////
896
+ // Evaluate all permutations
897
+ ////////////////////////////////////////////////////////////////////////////////
898
+ __device__ void evalAllPermutations(const float3 * colors, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
899
+ {
900
+ const int idx = threadIdx.x;
901
+
902
+ float bestError = FLT_MAX;
903
+
904
+ __shared__ uint s_permutations[160];
905
+
906
+ for(int i = 0; i < 16; i++)
907
+ {
908
+ int pidx = idx + NUM_THREADS * i;
909
+ if (pidx >= 992) break;
910
+
911
+ ushort start, end;
912
+ uint permutation = permutations[pidx];
913
+ if (pidx < 160) s_permutations[pidx] = permutation;
914
+
915
+ float error = evalPermutation4(colors, colorSum, permutation, &start, &end);
916
+
917
+ if (error < bestError)
918
+ {
919
+ bestError = error;
920
+ bestPermutation = permutation;
921
+ bestStart = start;
922
+ bestEnd = end;
923
+ }
924
+ }
925
+
926
+ if (bestStart < bestEnd)
927
+ {
928
+ swap(bestEnd, bestStart);
929
+ bestPermutation ^= 0x55555555; // Flip indices.
930
+ }
931
+
932
+ for(int i = 0; i < 3; i++)
933
+ {
934
+ int pidx = idx + NUM_THREADS * i;
935
+ if (pidx >= 160) break;
936
+
937
+ ushort start, end;
938
+ uint permutation = s_permutations[pidx];
939
+ float error = evalPermutation3(colors, colorSum, permutation, &start, &end);
940
+
941
+ if (error < bestError)
942
+ {
943
+ bestError = error;
944
+ bestPermutation = permutation;
945
+ bestStart = start;
946
+ bestEnd = end;
947
+
948
+ if (bestStart > bestEnd)
949
+ {
950
+ swap(bestEnd, bestStart);
951
+ bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
952
+ }
953
+ }
954
+ }
955
+
956
+ errors[idx] = bestError;
957
+ }
958
+
959
+ /*
960
+ __device__ void evalAllPermutations(const float3 * colors, const float * weights, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
961
+ {
962
+ const int idx = threadIdx.x;
963
+
964
+ float bestError = FLT_MAX;
965
+
966
+ __shared__ uint s_permutations[160];
967
+
968
+ for(int i = 0; i < 16; i++)
969
+ {
970
+ int pidx = idx + NUM_THREADS * i;
971
+ if (pidx >= 992) break;
972
+
973
+ ushort start, end;
974
+ uint permutation = permutations[pidx];
975
+ if (pidx < 160) s_permutations[pidx] = permutation;
976
+
977
+ float error = evalPermutation4(colors, weights, permutation, &start, &end);
978
+
979
+ if (error < bestError)
980
+ {
981
+ bestError = error;
982
+ bestPermutation = permutation;
983
+ bestStart = start;
984
+ bestEnd = end;
985
+ }
986
+ }
987
+
988
+ if (bestStart < bestEnd)
989
+ {
990
+ swap(bestEnd, bestStart);
991
+ bestPermutation ^= 0x55555555; // Flip indices.
992
+ }
993
+
994
+ for(int i = 0; i < 3; i++)
995
+ {
996
+ int pidx = idx + NUM_THREADS * i;
997
+ if (pidx >= 160) break;
998
+
999
+ ushort start, end;
1000
+ uint permutation = s_permutations[pidx];
1001
+ float error = evalPermutation3(colors, weights, permutation, &start, &end);
1002
+
1003
+ if (error < bestError)
1004
+ {
1005
+ bestError = error;
1006
+ bestPermutation = permutation;
1007
+ bestStart = start;
1008
+ bestEnd = end;
1009
+
1010
+ if (bestStart > bestEnd)
1011
+ {
1012
+ swap(bestEnd, bestStart);
1013
+ bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
1014
+ }
1015
+ }
1016
+ }
1017
+
1018
+ errors[idx] = bestError;
1019
+ }
1020
+ */
1021
+
1022
+ __device__ void evalAllPermutations(const float2 * colors, float2 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
1023
+ {
1024
+ const int idx = threadIdx.x;
1025
+
1026
+ float bestError = FLT_MAX;
1027
+
1028
+ __shared__ uint s_permutations[160];
1029
+
1030
+ for(int i = 0; i < 16; i++)
1031
+ {
1032
+ int pidx = idx + NUM_THREADS * i;
1033
+ if (pidx >= 992) break;
1034
+
1035
+ ushort start, end;
1036
+ uint permutation = permutations[pidx];
1037
+ if (pidx < 160) s_permutations[pidx] = permutation;
1038
+
1039
+ float error = evalPermutation4(colors, colorSum, permutation, &start, &end);
1040
+
1041
+ if (error < bestError)
1042
+ {
1043
+ bestError = error;
1044
+ bestPermutation = permutation;
1045
+ bestStart = start;
1046
+ bestEnd = end;
1047
+ }
1048
+ }
1049
+
1050
+ if (bestStart < bestEnd)
1051
+ {
1052
+ swap(bestEnd, bestStart);
1053
+ bestPermutation ^= 0x55555555; // Flip indices.
1054
+ }
1055
+
1056
+ for(int i = 0; i < 3; i++)
1057
+ {
1058
+ int pidx = idx + NUM_THREADS * i;
1059
+ if (pidx >= 160) break;
1060
+
1061
+ ushort start, end;
1062
+ uint permutation = s_permutations[pidx];
1063
+ float error = evalPermutation3(colors, colorSum, permutation, &start, &end);
1064
+
1065
+ if (error < bestError)
1066
+ {
1067
+ bestError = error;
1068
+ bestPermutation = permutation;
1069
+ bestStart = start;
1070
+ bestEnd = end;
1071
+
1072
+ if (bestStart > bestEnd)
1073
+ {
1074
+ swap(bestEnd, bestStart);
1075
+ bestPermutation ^= (~bestPermutation >> 1) & 0x55555555; // Flip indices.
1076
+ }
1077
+ }
1078
+ }
1079
+
1080
+ errors[idx] = bestError;
1081
+ }
1082
+
1083
+ __device__ void evalLevel4Permutations(const float3 * colors, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
1084
+ {
1085
+ const int idx = threadIdx.x;
1086
+
1087
+ float bestError = FLT_MAX;
1088
+
1089
+ for(int i = 0; i < 16; i++)
1090
+ {
1091
+ int pidx = idx + NUM_THREADS * i;
1092
+ if (pidx >= 992) break;
1093
+
1094
+ ushort start, end;
1095
+ uint permutation = permutations[pidx];
1096
+
1097
+ float error = evalPermutation4(colors, colorSum, permutation, &start, &end);
1098
+
1099
+ if (error < bestError)
1100
+ {
1101
+ bestError = error;
1102
+ bestPermutation = permutation;
1103
+ bestStart = start;
1104
+ bestEnd = end;
1105
+ }
1106
+ }
1107
+
1108
+ if (bestStart < bestEnd)
1109
+ {
1110
+ swap(bestEnd, bestStart);
1111
+ bestPermutation ^= 0x55555555; // Flip indices.
1112
+ }
1113
+
1114
+ errors[idx] = bestError;
1115
+ }
1116
+
1117
+ __device__ void evalLevel4Permutations(const float3 * colors, const float * weights, float3 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
1118
+ {
1119
+ const int idx = threadIdx.x;
1120
+
1121
+ float bestError = FLT_MAX;
1122
+
1123
+ for(int i = 0; i < 16; i++)
1124
+ {
1125
+ int pidx = idx + NUM_THREADS * i;
1126
+ if (pidx >= 992) break;
1127
+
1128
+ ushort start, end;
1129
+ uint permutation = permutations[pidx];
1130
+
1131
+ float error = evalPermutation4(colors, weights, colorSum, permutation, &start, &end);
1132
+
1133
+ if (error < bestError)
1134
+ {
1135
+ bestError = error;
1136
+ bestPermutation = permutation;
1137
+ bestStart = start;
1138
+ bestEnd = end;
1139
+ }
1140
+ }
1141
+
1142
+ if (bestStart < bestEnd)
1143
+ {
1144
+ swap(bestEnd, bestStart);
1145
+ bestPermutation ^= 0x55555555; // Flip indices.
1146
+ }
1147
+
1148
+ errors[idx] = bestError;
1149
+ }
1150
+
1151
+ __device__ void evalAllPermutationsCTX(const float2 * colors, float2 colorSum, const uint * permutations, ushort & bestStart, ushort & bestEnd, uint & bestPermutation, float * errors)
1152
+ {
1153
+ const int idx = threadIdx.x;
1154
+
1155
+ float bestError = FLT_MAX;
1156
+
1157
+ for(int i = 0; i < 16; i++)
1158
+ {
1159
+ int pidx = idx + NUM_THREADS * i;
1160
+ if (pidx >= 704) break;
1161
+
1162
+ ushort start, end;
1163
+ uint permutation = permutations[pidx];
1164
+
1165
+ float error = evalPermutationCTX(colors, colorSum, permutation, &start, &end);
1166
+
1167
+ if (error < bestError)
1168
+ {
1169
+ bestError = error;
1170
+ bestPermutation = permutation;
1171
+ bestStart = start;
1172
+ bestEnd = end;
1173
+ }
1174
+ }
1175
+
1176
+ if (bestStart < bestEnd)
1177
+ {
1178
+ swap(bestEnd, bestStart);
1179
+ bestPermutation ^= 0x55555555; // Flip indices.
1180
+ }
1181
+
1182
+ errors[idx] = bestError;
1183
+ }
1184
+
1185
+
1186
+ ////////////////////////////////////////////////////////////////////////////////
1187
+ // Find index with minimum error
1188
+ ////////////////////////////////////////////////////////////////////////////////
1189
+ __device__ int findMinError(float * errors)
1190
+ {
1191
+ const int idx = threadIdx.x;
1192
+
1193
+ __shared__ int indices[NUM_THREADS];
1194
+ indices[idx] = idx;
1195
+
1196
+ for(int d = NUM_THREADS/2; d > 32; d >>= 1)
1197
+ {
1198
+ __syncthreads();
1199
+
1200
+ if (idx < d)
1201
+ {
1202
+ float err0 = errors[idx];
1203
+ float err1 = errors[idx + d];
1204
+
1205
+ if (err1 < err0) {
1206
+ errors[idx] = err1;
1207
+ indices[idx] = indices[idx + d];
1208
+ }
1209
+ }
1210
+ }
1211
+
1212
+ __syncthreads();
1213
+
1214
+ // unroll last 6 iterations
1215
+ if (idx < 32)
1216
+ {
1217
+ if (errors[idx + 32] < errors[idx]) {
1218
+ errors[idx] = errors[idx + 32];
1219
+ indices[idx] = indices[idx + 32];
1220
+ }
1221
+ if (errors[idx + 16] < errors[idx]) {
1222
+ errors[idx] = errors[idx + 16];
1223
+ indices[idx] = indices[idx + 16];
1224
+ }
1225
+ if (errors[idx + 8] < errors[idx]) {
1226
+ errors[idx] = errors[idx + 8];
1227
+ indices[idx] = indices[idx + 8];
1228
+ }
1229
+ if (errors[idx + 4] < errors[idx]) {
1230
+ errors[idx] = errors[idx + 4];
1231
+ indices[idx] = indices[idx + 4];
1232
+ }
1233
+ if (errors[idx + 2] < errors[idx]) {
1234
+ errors[idx] = errors[idx + 2];
1235
+ indices[idx] = indices[idx + 2];
1236
+ }
1237
+ if (errors[idx + 1] < errors[idx]) {
1238
+ errors[idx] = errors[idx + 1];
1239
+ indices[idx] = indices[idx + 1];
1240
+ }
1241
+ }
1242
+
1243
+ __syncthreads();
1244
+
1245
+ return indices[0];
1246
+ }
1247
+
1248
+
1249
+ ////////////////////////////////////////////////////////////////////////////////
1250
+ // Save DXT block
1251
+ ////////////////////////////////////////////////////////////////////////////////
1252
+ __device__ void saveBlockDXT1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 * result)
1253
+ {
1254
+ const int bid = blockIdx.x;
1255
+
1256
+ if (start == end)
1257
+ {
1258
+ permutation = 0;
1259
+ }
1260
+
1261
+ // Reorder permutation.
1262
+ uint indices = 0;
1263
+ for(int i = 0; i < 16; i++)
1264
+ {
1265
+ int ref = xrefs[i];
1266
+ indices |= ((permutation >> (2 * ref)) & 3) << (2 * i);
1267
+ }
1268
+
1269
+ // Write endpoints.
1270
+ result[bid].x = (end << 16) | start;
1271
+
1272
+ // Write palette indices.
1273
+ result[bid].y = indices;
1274
+ }
1275
+
1276
+ __device__ void saveBlockDXT1_Parallel(uint endpoints, float3 colors[16], int xrefs[16], uint * result)
1277
+ {
1278
+ const int tid = threadIdx.x;
1279
+ const int bid = blockIdx.x;
1280
+
1281
+ if (tid < 16)
1282
+ {
1283
+ int3 color = float3ToInt3(colors[xrefs[tid]]);
1284
+
1285
+ ushort endpoint0 = endpoints & 0xFFFF;
1286
+ ushort endpoint1 = endpoints >> 16;
1287
+
1288
+ int3 palette[4];
1289
+ palette[0] = color16ToInt3(endpoint0);
1290
+ palette[1] = color16ToInt3(endpoint1);
1291
+
1292
+ int d0 = colorDistance(palette[0], color);
1293
+ int d1 = colorDistance(palette[1], color);
1294
+
1295
+ uint index;
1296
+ if (endpoint0 > endpoint1)
1297
+ {
1298
+ palette[2].x = (2 * palette[0].x + palette[1].x) / 3;
1299
+ palette[2].y = (2 * palette[0].y + palette[1].y) / 3;
1300
+ palette[2].z = (2 * palette[0].z + palette[1].z) / 3;
1301
+
1302
+ palette[3].x = (2 * palette[1].x + palette[0].x) / 3;
1303
+ palette[3].y = (2 * palette[1].y + palette[0].y) / 3;
1304
+ palette[3].z = (2 * palette[1].z + palette[0].z) / 3;
1305
+
1306
+ int d2 = colorDistance(palette[2], color);
1307
+ int d3 = colorDistance(palette[3], color);
1308
+
1309
+ // Compute the index that best fit color.
1310
+ uint b0 = d0 > d3;
1311
+ uint b1 = d1 > d2;
1312
+ uint b2 = d0 > d2;
1313
+ uint b3 = d1 > d3;
1314
+ uint b4 = d2 > d3;
1315
+
1316
+ uint x0 = b1 & b2;
1317
+ uint x1 = b0 & b3;
1318
+ uint x2 = b0 & b4;
1319
+
1320
+ index = (x2 | ((x0 | x1) << 1));
1321
+ }
1322
+ else {
1323
+ palette[2].x = (palette[0].x + palette[1].x) / 2;
1324
+ palette[2].y = (palette[0].y + palette[1].y) / 2;
1325
+ palette[2].z = (palette[0].z + palette[1].z) / 2;
1326
+
1327
+ int d2 = colorDistance(palette[2], color);
1328
+
1329
+ index = 0;
1330
+ if (d1 < d0 && d1 < d2) index = 1;
1331
+ else if (d2 < d0) index = 2;
1332
+ }
1333
+
1334
+ __shared__ uint indices[16];
1335
+
1336
+ indices[tid] = index << (2 * tid);
1337
+ if (tid < 8) indices[tid] |= indices[tid+8];
1338
+ if (tid < 4) indices[tid] |= indices[tid+4];
1339
+ if (tid < 2) indices[tid] |= indices[tid+2];
1340
+ if (tid < 1) indices[tid] |= indices[tid+1];
1341
+
1342
+ if (tid < 2) {
1343
+ result[2 * bid + tid] = tid == 0 ? endpoints : indices[0];
1344
+ }
1345
+ }
1346
+ }
1347
+
1348
+ __device__ void saveBlockDXT1_Parallel(uint endpoints, uint permutation, int xrefs[16], uint * result)
1349
+ {
1350
+ const int tid = threadIdx.x;
1351
+ const int bid = blockIdx.x;
1352
+
1353
+ if (tid < 16)
1354
+ {
1355
+ // Reorder permutation.
1356
+ uint index = ((permutation >> (2 * xrefs[tid])) & 3) << (2 * tid);
1357
+ __shared__ uint indices[16];
1358
+
1359
+ indices[tid] = index;
1360
+ if (tid < 8) indices[tid] |= indices[tid+8];
1361
+ if (tid < 4) indices[tid] |= indices[tid+4];
1362
+ if (tid < 2) indices[tid] |= indices[tid+2];
1363
+ if (tid < 1) indices[tid] |= indices[tid+1];
1364
+
1365
+ if (tid < 2) {
1366
+ result[2 * bid + tid] = tid == 0 ? endpoints : indices[0];
1367
+ }
1368
+ }
1369
+ }
1370
+
1371
+
1372
+ __device__ void saveBlockCTX1(ushort start, ushort end, uint permutation, int xrefs[16], uint2 * result)
1373
+ {
1374
+ saveBlockDXT1(start, end, permutation, xrefs, result);
1375
+ }
1376
+
1377
+ __device__ void saveSingleColorBlockDXT1(float3 color, uint2 * result)
1378
+ {
1379
+ const int bid = blockIdx.x;
1380
+
1381
+ int r = color.x * 255;
1382
+ int g = color.y * 255;
1383
+ int b = color.z * 255;
1384
+
1385
+ ushort color0 = (OMatch5[r][0] << 11) | (OMatch6[g][0] << 5) | OMatch5[b][0];
1386
+ ushort color1 = (OMatch5[r][1] << 11) | (OMatch6[g][1] << 5) | OMatch5[b][1];
1387
+
1388
+ if (color0 < color1)
1389
+ {
1390
+ result[bid].x = (color0 << 16) | color1;
1391
+ result[bid].y = 0xffffffff;
1392
+ }
1393
+ else
1394
+ {
1395
+ result[bid].x = (color1 << 16) | color0;
1396
+ result[bid].y = 0xaaaaaaaa;
1397
+ }
1398
+ }
1399
+
1400
+ __device__ void saveSingleColorBlockDXT1(float2 color, uint2 * result)
1401
+ {
1402
+ const int bid = blockIdx.x;
1403
+
1404
+ int r = color.x * 255;
1405
+ int g = color.y * 255;
1406
+
1407
+ ushort color0 = (OMatch5[r][0] << 11) | (OMatch6[g][0] << 5);
1408
+ ushort color1 = (OMatch5[r][1] << 11) | (OMatch6[g][1] << 5);
1409
+
1410
+ if (color0 < color1)
1411
+ {
1412
+ result[bid].x = (color0 << 16) | color1;
1413
+ result[bid].y = 0xffffffff;
1414
+ }
1415
+ else
1416
+ {
1417
+ result[bid].x = (color1 << 16) | color0;
1418
+ result[bid].y = 0xaaaaaaaa;
1419
+ }
1420
+ }
1421
+
1422
+ __device__ void saveSingleColorBlockCTX1(float2 color, uint2 * result)
1423
+ {
1424
+ const int bid = blockIdx.x;
1425
+
1426
+ int r = color.x * 255;
1427
+ int g = color.y * 255;
1428
+
1429
+ ushort color0 = (r << 8) | (g);
1430
+
1431
+ result[bid].x = (color0 << 16) | color0;
1432
+ result[bid].y = 0x00000000;
1433
+ }
1434
+
1435
+
1436
+ ////////////////////////////////////////////////////////////////////////////////
1437
+ // Compress color block
1438
+ ////////////////////////////////////////////////////////////////////////////////
1439
+
1440
+ __global__ void compressDXT1(uint firstBlock, uint blockWidth, const uint * permutations, uint2 * result)
1441
+ {
1442
+ __shared__ float3 colors[16];
1443
+ __shared__ float3 sums[16];
1444
+ __shared__ int xrefs[16];
1445
+ __shared__ int sameColor;
1446
+
1447
+ loadColorBlockTex(firstBlock, blockWidth, colors, sums, xrefs, &sameColor);
1448
+
1449
+ __syncthreads();
1450
+
1451
+ if (sameColor)
1452
+ {
1453
+ if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result);
1454
+ return;
1455
+ }
1456
+
1457
+ ushort bestStart, bestEnd;
1458
+ uint bestPermutation;
1459
+
1460
+ __shared__ float errors[NUM_THREADS];
1461
+ evalAllPermutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
1462
+
1463
+ // Use a parallel reduction to find minimum error.
1464
+ const int minIdx = findMinError(errors);
1465
+
1466
+ __shared__ uint s_bestEndPoints;
1467
+ __shared__ uint s_bestPermutation;
1468
+
1469
+ // Only write the result of the winner thread.
1470
+ if (threadIdx.x == minIdx)
1471
+ {
1472
+ s_bestEndPoints = (bestEnd << 16) | bestStart;
1473
+ s_bestPermutation = (bestStart != bestEnd) ? bestPermutation : 0;
1474
+ }
1475
+
1476
+ __syncthreads();
1477
+
1478
+ saveBlockDXT1_Parallel(s_bestEndPoints, colors, xrefs, (uint *)result);
1479
+ //saveBlockDXT1_Parallel(s_bestEndPoints, s_bestPermutation, xrefs, (uint *)result);
1480
+ }
1481
+
1482
+
1483
+ __global__ void compressLevel4DXT1(uint firstBlock, uint blockWidth, const uint * permutations, uint2 * result)
1484
+ {
1485
+ __shared__ float3 colors[16];
1486
+ __shared__ float3 sums[16];
1487
+ __shared__ int xrefs[16];
1488
+ __shared__ int sameColor;
1489
+
1490
+ loadColorBlockTex(firstBlock, blockWidth, colors, sums, xrefs, &sameColor);
1491
+
1492
+ __syncthreads();
1493
+
1494
+ if (sameColor)
1495
+ {
1496
+ if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result);
1497
+ return;
1498
+ }
1499
+
1500
+ ushort bestStart, bestEnd;
1501
+ uint bestPermutation;
1502
+
1503
+ __shared__ float errors[NUM_THREADS];
1504
+
1505
+ evalLevel4Permutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
1506
+
1507
+ // Use a parallel reduction to find minimum error.
1508
+ const int minIdx = findMinError(errors);
1509
+
1510
+ // Only write the result of the winner thread.
1511
+ if (threadIdx.x == minIdx)
1512
+ {
1513
+ saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result);
1514
+ }
1515
+ }
1516
+
1517
+ __global__ void compressWeightedDXT1(uint firstBlock, uint blockWidth, const uint * permutations, uint2 * result)
1518
+ {
1519
+ __shared__ float3 colors[16];
1520
+ __shared__ float3 sums[16];
1521
+ __shared__ float weights[16];
1522
+ __shared__ int xrefs[16];
1523
+ __shared__ int sameColor;
1524
+
1525
+ loadColorBlockTex(firstBlock, blockWidth, colors, sums, weights, xrefs, &sameColor);
1526
+
1527
+ __syncthreads();
1528
+
1529
+ if (sameColor)
1530
+ {
1531
+ if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result);
1532
+ return;
1533
+ }
1534
+
1535
+ ushort bestStart, bestEnd;
1536
+ uint bestPermutation;
1537
+
1538
+ __shared__ float errors[NUM_THREADS];
1539
+
1540
+ evalLevel4Permutations(colors, weights, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
1541
+
1542
+ // Use a parallel reduction to find minimum error.
1543
+ int minIdx = findMinError(errors);
1544
+
1545
+ // Only write the result of the winner thread.
1546
+ if (threadIdx.x == minIdx)
1547
+ {
1548
+ saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result);
1549
+ }
1550
+ }
1551
+
1552
+
1553
+ __global__ void compressNormalDXT1(const uint * permutations, const uint * image, uint2 * result)
1554
+ {
1555
+ __shared__ float2 colors[16];
1556
+ __shared__ float2 sums[16];
1557
+ __shared__ int xrefs[16];
1558
+ __shared__ int sameColor;
1559
+
1560
+ loadColorBlock(image, colors, sums, xrefs, &sameColor);
1561
+
1562
+ __syncthreads();
1563
+
1564
+ if (sameColor)
1565
+ {
1566
+ if (threadIdx.x == 0) saveSingleColorBlockDXT1(colors[0], result);
1567
+ return;
1568
+ }
1569
+
1570
+ ushort bestStart, bestEnd;
1571
+ uint bestPermutation;
1572
+
1573
+ __shared__ float errors[NUM_THREADS];
1574
+
1575
+ evalAllPermutations(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
1576
+
1577
+ // Use a parallel reduction to find minimum error.
1578
+ const int minIdx = findMinError(errors);
1579
+
1580
+ // Only write the result of the winner thread.
1581
+ if (threadIdx.x == minIdx)
1582
+ {
1583
+ saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, result);
1584
+ }
1585
+ }
1586
+
1587
+ __global__ void compressCTX1(const uint * permutations, const uint * image, uint2 * result)
1588
+ {
1589
+ __shared__ float2 colors[16];
1590
+ __shared__ float2 sums[16];
1591
+ __shared__ int xrefs[16];
1592
+ __shared__ int sameColor;
1593
+
1594
+ loadColorBlock(image, colors, sums, xrefs, &sameColor);
1595
+
1596
+ __syncthreads();
1597
+
1598
+ if (sameColor)
1599
+ {
1600
+ if (threadIdx.x == 0) saveSingleColorBlockCTX1(colors[0], result);
1601
+ return;
1602
+ }
1603
+
1604
+ ushort bestStart, bestEnd;
1605
+ uint bestPermutation;
1606
+
1607
+ __shared__ float errors[NUM_THREADS];
1608
+
1609
+ evalAllPermutationsCTX(colors, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
1610
+
1611
+ // Use a parallel reduction to find minimum error.
1612
+ const int minIdx = findMinError(errors);
1613
+
1614
+ // Only write the result of the winner thread.
1615
+ if (threadIdx.x == minIdx)
1616
+ {
1617
+ saveBlockCTX1(bestStart, bestEnd, bestPermutation, xrefs, result);
1618
+ }
1619
+ }
1620
+
1621
+
1622
+ /*
1623
+ __device__ float computeError(const float weights[16], uchar a0, uchar a1)
1624
+ {
1625
+ float palette[6];
1626
+ palette[0] = (6.0f/7.0f * a0 + 1.0f/7.0f * a1);
1627
+ palette[1] = (5.0f/7.0f * a0 + 2.0f/7.0f * a1);
1628
+ palette[2] = (4.0f/7.0f * a0 + 3.0f/7.0f * a1);
1629
+ palette[3] = (3.0f/7.0f * a0 + 4.0f/7.0f * a1);
1630
+ palette[4] = (2.0f/7.0f * a0 + 5.0f/7.0f * a1);
1631
+ palette[5] = (1.0f/7.0f * a0 + 6.0f/7.0f * a1);
1632
+
1633
+ float total = 0.0f;
1634
+
1635
+ for (uint i = 0; i < 16; i++)
1636
+ {
1637
+ float alpha = weights[i];
1638
+
1639
+ float error = a0 - alpha;
1640
+ error = min(error, palette[0] - alpha);
1641
+ error = min(error, palette[1] - alpha);
1642
+ error = min(error, palette[2] - alpha);
1643
+ error = min(error, palette[3] - alpha);
1644
+ error = min(error, palette[4] - alpha);
1645
+ error = min(error, palette[5] - alpha);
1646
+ error = min(error, a1 - alpha);
1647
+
1648
+ total += error;
1649
+ }
1650
+
1651
+ return total;
1652
+ }
1653
+
1654
+ inline __device__ uchar roundAndExpand(float a)
1655
+ {
1656
+ return rintf(__saturatef(a) * 255.0f);
1657
+ }
1658
+ */
1659
+ /*
1660
+ __device__ void optimizeAlpha8(const float alphas[16], uchar & a0, uchar & a1)
1661
+ {
1662
+ float alpha2_sum = 0;
1663
+ float beta2_sum = 0;
1664
+ float alphabeta_sum = 0;
1665
+ float alphax_sum = 0;
1666
+ float betax_sum = 0;
1667
+
1668
+ for (int i = 0; i < 16; i++)
1669
+ {
1670
+ uint idx = index[i];
1671
+ float alpha;
1672
+ if (idx < 2) alpha = 1.0f - idx;
1673
+ else alpha = (8.0f - idx) / 7.0f;
1674
+
1675
+ float beta = 1 - alpha;
1676
+
1677
+ alpha2_sum += alpha * alpha;
1678
+ beta2_sum += beta * beta;
1679
+ alphabeta_sum += alpha * beta;
1680
+ alphax_sum += alpha * alphas[i];
1681
+ betax_sum += beta * alphas[i];
1682
+ }
1683
+
1684
+ const float factor = 1.0f / (alpha2_sum * beta2_sum - alphabeta_sum * alphabeta_sum);
1685
+
1686
+ float a = (alphax_sum * beta2_sum - betax_sum * alphabeta_sum) * factor;
1687
+ float b = (betax_sum * alpha2_sum - alphax_sum * alphabeta_sum) * factor;
1688
+
1689
+ a0 = roundAndExpand8(a);
1690
+ a1 = roundAndExpand8(b);
1691
+ }
1692
+ */
1693
+ /*
1694
+ __device__ void compressAlpha(const float alphas[16], uint4 * result)
1695
+ {
1696
+ const int tid = threadIdx.x;
1697
+
1698
+ // Compress alpha block!
1699
+ // Brute force approach:
1700
+ // Try all color pairs: 256*256/2 = 32768, 32768/64 = 512 iterations?
1701
+
1702
+ // Determine min & max alphas
1703
+
1704
+ float A0, A1;
1705
+
1706
+ if (tid < 16)
1707
+ {
1708
+ __shared__ uint s_alphas[16];
1709
+
1710
+ s_alphas[tid] = alphas[tid];
1711
+ s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^8]);
1712
+ s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^4]);
1713
+ s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^2]);
1714
+ s_alphas[tid] = min(s_alphas[tid], s_alphas[tid^1]);
1715
+ A0 = s_alphas[tid];
1716
+
1717
+ s_alphas[tid] = alphas[tid];
1718
+ s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^8]);
1719
+ s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^4]);
1720
+ s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^2]);
1721
+ s_alphas[tid] = max(s_alphas[tid], s_alphas[tid^1]);
1722
+ A1 = s_alphas[tid];
1723
+ }
1724
+
1725
+ __syncthreads();
1726
+
1727
+ int minIdx = 0;
1728
+
1729
+ if (A1 - A0 > 8)
1730
+ {
1731
+ float bestError = FLT_MAX;
1732
+
1733
+ // 64 threads -> 8x8
1734
+ // divide [A1-A0] in partitions.
1735
+ // test endpoints
1736
+
1737
+ for (int i = 0; i < 128; i++)
1738
+ {
1739
+ uint idx = (i * NUM_THREADS + tid) * 4;
1740
+ uchar a0 = idx & 255;
1741
+ uchar a1 = idx >> 8;
1742
+
1743
+ float error = computeError(alphas, a0, a1);
1744
+
1745
+ if (error < bestError)
1746
+ {
1747
+ bestError = error;
1748
+ A0 = a0;
1749
+ A1 = a1;
1750
+ }
1751
+ }
1752
+
1753
+ __shared__ float errors[NUM_THREADS];
1754
+ errors[tid] = bestError;
1755
+
1756
+ // Minimize error.
1757
+ minIdx = findMinError(errors);
1758
+
1759
+ }
1760
+
1761
+ if (minIdx == tid)
1762
+ {
1763
+ // @@ Compute indices.
1764
+
1765
+ // @@ Write alpha block.
1766
+ }
1767
+ }
1768
+
1769
+ __global__ void compressDXT5(const uint * permutations, const uint * image, uint4 * result)
1770
+ {
1771
+ __shared__ float3 colors[16];
1772
+ __shared__ float3 sums[16];
1773
+ __shared__ float weights[16];
1774
+ __shared__ int xrefs[16];
1775
+
1776
+ loadColorBlock(image, colors, sums, weights, xrefs);
1777
+
1778
+ __syncthreads();
1779
+
1780
+ compressAlpha(weights, result);
1781
+
1782
+ ushort bestStart, bestEnd;
1783
+ uint bestPermutation;
1784
+
1785
+ __shared__ float errors[NUM_THREADS];
1786
+
1787
+ evalLevel4Permutations(colors, weights, sums[0], permutations, bestStart, bestEnd, bestPermutation, errors);
1788
+
1789
+ // Use a parallel reduction to find minimum error.
1790
+ int minIdx = findMinError(errors);
1791
+
1792
+ // Only write the result of the winner thread.
1793
+ if (threadIdx.x == minIdx)
1794
+ {
1795
+ saveBlockDXT1(bestStart, bestEnd, bestPermutation, xrefs, (uint2 *)result);
1796
+ }
1797
+ }
1798
+ */
1799
+
1800
+ /*__device__ void evaluatePalette(uint alpha0, uint alpha1, uint alphas[8])
1801
+ {
1802
+ alpha[0] = alpha0;
1803
+ alpha[1] = alpha1;
1804
+ alpha[2] = (6 * alpha[0] + 1 * alpha[1]) / 7; // bit code 010
1805
+ alpha[3] = (5 * alpha[0] + 2 * alpha[1]) / 7; // bit code 011
1806
+ alpha[4] = (4 * alpha[0] + 3 * alpha[1]) / 7; // bit code 100
1807
+ alpha[5] = (3 * alpha[0] + 4 * alpha[1]) / 7; // bit code 101
1808
+ alpha[6] = (2 * alpha[0] + 5 * alpha[1]) / 7; // bit code 110
1809
+ alpha[7] = (1 * alpha[0] + 6 * alpha[1]) / 7; // bit code 111
1810
+ }
1811
+
1812
+ __device__ uint computeAlphaError(const uint block[16], uint alpha0, uint alpha1, int bestError = INT_MAX)
1813
+ {
1814
+ uint8 alphas[8];
1815
+ evaluatePalette(alpha0, alpha1, alphas);
1816
+
1817
+ int totalError = 0;
1818
+
1819
+ for (uint i = 0; i < 16; i++)
1820
+ {
1821
+ uint8 alpha = block[i];
1822
+
1823
+ // @@ It should be possible to do this much faster.
1824
+
1825
+ int minDist = INT_MAX;
1826
+ for (uint p = 0; p < 8; p++)
1827
+ {
1828
+ int dist = alphaDistance(alpha, alphas[p]);
1829
+ minDist = min(dist, minDist);
1830
+ }
1831
+
1832
+
1833
+
1834
+ totalError += minDist;
1835
+
1836
+ if (totalError > bestError)
1837
+ {
1838
+ // early out
1839
+ return totalError;
1840
+ }
1841
+ }
1842
+
1843
+ return totalError;
1844
+ }
1845
+
1846
+
1847
+ void compressDXT5A(uint alpha[16])
1848
+ {
1849
+ // Get min/max alpha.
1850
+ for (uint i = 0; i < 16; i++)
1851
+ {
1852
+ mina = min(mina, alpha[i]);
1853
+ maxa = max(maxa, alpha[i]);
1854
+ }
1855
+
1856
+ dxtBlock->alpha0 = maxa;
1857
+ dxtBlock->alpha1 = mina;
1858
+
1859
+ if (maxa - mina > 8)
1860
+ {
1861
+ int besterror = computeAlphaError(rgba, dxtBlock);
1862
+ int besta0 = maxa;
1863
+ int besta1 = mina;
1864
+
1865
+ // Expand search space a bit.
1866
+ const int alphaExpand = 8;
1867
+ mina = (mina <= alphaExpand) ? 0 : mina - alphaExpand;
1868
+ maxa = (maxa <= 255-alphaExpand) ? 255 : maxa + alphaExpand;
1869
+
1870
+ for (int a0 = mina+9; a0 < maxa; a0++)
1871
+ {
1872
+ for (int a1 = mina; a1 < a0-8; a1++)
1873
+ {
1874
+ nvDebugCheck(a0 - a1 > 8);
1875
+
1876
+ dxtBlock->alpha0 = a0;
1877
+ dxtBlock->alpha1 = a1;
1878
+ int error = computeAlphaError(rgba, dxtBlock, besterror);
1879
+
1880
+ if (error < besterror)
1881
+ {
1882
+ besterror = error;
1883
+ besta0 = a0;
1884
+ besta1 = a1;
1885
+ }
1886
+ }
1887
+ }
1888
+
1889
+ dxtBlock->alpha0 = besta0;
1890
+ dxtBlock->alpha1 = besta1;
1891
+ }
1892
+ }
1893
+
1894
+ __global__ void compressDXT5n(uint blockNum, uint2 * d_result)
1895
+ {
1896
+ uint idx = blockIdx.x * 128 + threadIdx.x;
1897
+
1898
+ if (idx >= blockNum)
1899
+ {
1900
+ return;
1901
+ }
1902
+
1903
+ // @@ Ideally we would load the data to shared mem to achieve coalesced global mem access.
1904
+ // @@ Blocks would require too much shared memory (8k) and limit occupancy.
1905
+
1906
+ // @@ Ideally we should use SIMD processing, multiple threads (4-8) processing the same block.
1907
+ // That simplifies coalescing, and reduces divergence.
1908
+
1909
+ // @@ Experiment with texture. That's probably the most simple approach.
1910
+
1911
+ uint x[16];
1912
+ uint y[16];
1913
+
1914
+
1915
+ }
1916
+ */
1917
+
1918
+
1919
+ ////////////////////////////////////////////////////////////////////////////////
1920
+ // Setup kernel
1921
+ ////////////////////////////////////////////////////////////////////////////////
1922
+
1923
+ extern "C" void setupOMatchTables(const void * OMatch5Src, size_t OMatch5Size, const void * OMatch6Src, size_t OMatch6Size)
1924
+ {
1925
+ // Init single color lookup contant tables.
1926
+ cudaMemcpyToSymbol(OMatch5, OMatch5Src, OMatch5Size, 0, cudaMemcpyHostToDevice);
1927
+ cudaMemcpyToSymbol(OMatch6, OMatch6Src, OMatch6Size, 0, cudaMemcpyHostToDevice);
1928
+ }
1929
+
1930
+ extern "C" void setupCompressKernel(const float weights[3])
1931
+ {
1932
+ // Set constants.
1933
+ cudaMemcpyToSymbol(kColorMetric, weights, sizeof(float) * 3, 0);
1934
+
1935
+ float weightsSqr[3];
1936
+ weightsSqr[0] = weights[0] * weights[0];
1937
+ weightsSqr[1] = weights[1] * weights[1];
1938
+ weightsSqr[2] = weights[2] * weights[2];
1939
+
1940
+ cudaMemcpyToSymbol(kColorMetricSqr, weightsSqr, sizeof(float) * 3, 0);
1941
+ }
1942
+
1943
+ extern "C" void bindTextureToArray(cudaArray * d_data)
1944
+ {
1945
+ // Setup texture
1946
+ tex.normalized = false;
1947
+ tex.filterMode = cudaFilterModePoint;
1948
+ tex.addressMode[0] = cudaAddressModeClamp;
1949
+ tex.addressMode[1] = cudaAddressModeClamp;
1950
+
1951
+ cudaBindTextureToArray(tex, d_data);
1952
+ }
1953
+
1954
+
1955
+
1956
+ ////////////////////////////////////////////////////////////////////////////////
1957
+ // Launch kernel
1958
+ ////////////////////////////////////////////////////////////////////////////////
1959
+
1960
+ // DXT1 compressors:
1961
+ extern "C" void compressKernelDXT1(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps)
1962
+ {
1963
+ compressDXT1<<<blockNum, NUM_THREADS>>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result);
1964
+ }
1965
+
1966
+ extern "C" void compressKernelDXT1_Level4(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps)
1967
+ {
1968
+ compressLevel4DXT1<<<blockNum, NUM_THREADS>>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result);
1969
+ }
1970
+
1971
+ extern "C" void compressWeightedKernelDXT1(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps)
1972
+ {
1973
+ compressWeightedDXT1<<<blockNum, NUM_THREADS>>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result);
1974
+ }
1975
+
1976
+ // @@ DXT1a compressors.
1977
+
1978
+
1979
+ // @@ DXT3 compressors:
1980
+ extern "C" void compressKernelDXT3(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps)
1981
+ {
1982
+ //compressDXT3<<<blockNum, NUM_THREADS>>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result);
1983
+ }
1984
+
1985
+ extern "C" void compressWeightedKernelDXT3(uint firstBlock, uint blockNum, uint blockWidth, uint * d_result, uint * d_bitmaps)
1986
+ {
1987
+ //compressWeightedDXT3<<<blockNum, NUM_THREADS>>>(firstBlock, blockWidth, d_bitmaps, (uint2 *)d_result);
1988
+ }
1989
+
1990
+
1991
+ // @@ DXT5 compressors.
1992
+ extern "C" void compressKernelDXT5(uint firstBlock, uint blockNum, uint w, uint * d_result, uint * d_bitmaps)
1993
+ {
1994
+ //compressDXT5<<<blockNum, NUM_THREADS>>>(firstBlock, w, d_bitmaps, (uint2 *)d_result);
1995
+ }
1996
+
1997
+ extern "C" void compressWeightedKernelDXT5(uint firstBlock, uint blockNum, uint w, uint * d_result, uint * d_bitmaps)
1998
+ {
1999
+ //compressWeightedDXT5<<<blockNum, NUM_THREADS>>>(firstBlock, w, d_bitmaps, (uint2 *)d_result);
2000
+ }
2001
+
2002
+
2003
+
2004
+
2005
+
2006
+ /*
2007
+ extern "C" void compressNormalKernelDXT1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps)
2008
+ {
2009
+ compressNormalDXT1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result);
2010
+ }
2011
+
2012
+ extern "C" void compressKernelCTX1(uint blockNum, uint * d_data, uint * d_result, uint * d_bitmaps)
2013
+ {
2014
+ compressCTX1<<<blockNum, NUM_THREADS>>>(d_bitmaps, d_data, (uint2 *)d_result);
2015
+ }
2016
+ */
2017
+ /*
2018
+ extern "C" void compressKernelDXT5n(uint blockNum, cudaArray * d_data, uint * d_result)
2019
+ {
2020
+ // compressDXT5n<<<blockNum/128, 128>>>(blockNum, (uint2 *)d_result);
2021
+ }
2022
+ */
cuda_code/ComputeProductKernel.cu ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ typedef unsigned char uint8_t;
2
+
3
+ struct __device_builtin__ __align__(_NCS_) uint8n
4
+ {
5
+ uint8_t _VARNAMES_;
6
+ };
7
+
8
+ extern "C"
9
+ __global__ void compute_product(
10
+ const uint8_t* __restrict__ A,
11
+ const float* __restrict__ B,
12
+ const char* __restrict__ isEmpty,
13
+ const int* __restrict__ divStart,
14
+ const int* __restrict__ divSize,
15
+ float* __restrict__ V,
16
+ int* __restrict__ I,
17
+ int N, int L, int O, int nProbe
18
+ ) {
19
+ const int tid = threadIdx.x; // thread ID
20
+ const int qid = blockIdx.x; // query ID
21
+ // const uint8n* A2 = reinterpret_cast<const uint8n*>( const_cast<uint8_t*>(A) )
22
+ const uint8n* A2 = reinterpret_cast<const uint8n*>(A); // ?
23
+
24
+ // Load precomputed distances
25
+ extern __shared__ volatile float Bsh[];
26
+ #pragma unroll
27
+ if (tid < 256){
28
+ for (int i = 0; i < _M_; i++){
29
+ int bz = i;
30
+ int by = qid;
31
+ int bx = tid;
32
+ Bsh[i * _K_ + tid] = B[(bz * L * _K_) + (by * _K_) + (bx)];
33
+ }
34
+ }
35
+ __syncthreads();
36
+ // Load A and compute distance
37
+ int iN = tid;
38
+ int counter = tid;
39
+ int start = 0;
40
+ int size = 0;
41
+ int cDiv = -1;
42
+ bool break_loop = false;
43
+ while (iN < N){
44
+ while ( (iN - start) >= size){
45
+ cDiv ++;
46
+ if (cDiv >= nProbe){
47
+ break_loop = true;
48
+ break;
49
+ }
50
+ int residual = iN - start - size;
51
+ start = divStart[(qid) * nProbe + (cDiv)];
52
+ iN = start + residual;
53
+ size = divSize[(qid) * nProbe + (cDiv)];
54
+ if (iN >= N){
55
+ break_loop = true;
56
+ break;
57
+ }
58
+ }
59
+ if (break_loop)
60
+ break;
61
+
62
+ float sum = 0.f;
63
+ #pragma unroll
64
+ for (int i = 0; i < _M_ / _NCS_; i++){
65
+ uint8n Avals = A2[(i * N) + (iN)];
66
+ _CODEBLOCK_
67
+ }
68
+ // write to V and I
69
+ int isCurrentEmpty;
70
+ isCurrentEmpty = isEmpty[iN];
71
+
72
+ /*
73
+ if (isCurrentEmpty == 0){
74
+ V[(qid) * O + counter] = sum;
75
+ I[(qid) * O + counter] = iN;
76
+ } else {
77
+ V[(qid) * O + counter] = -999999.f;
78
+ I[(qid) * O + counter] = -1;
79
+ }
80
+ */
81
+
82
+ if (counter < O){
83
+ V[(qid) * O + counter] = isCurrentEmpty == 0 ? sum : -999999.f;
84
+ I[(qid) * O + counter] = isCurrentEmpty == 0 ? iN : -1;
85
+ // atomicAdd(V + (qid) * O + counter, isCurrentEmpty == 0 ? sum : -99999.f);
86
+ }
87
+ iN += _TPB_;
88
+ counter += _TPB_;
89
+ }
90
+ }
cuda_code/ConstraintEllipsoidGPU_1.cu ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) 2009-2016 The Regents of the University of Michigan
2
+ // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
3
+
4
+
5
+ // Maintainer: joaander
6
+
7
+ #include "ConstraintEllipsoidGPU.cuh"
8
+ #include "EvaluatorConstraint.h"
9
+ #include "EvaluatorConstraintEllipsoid.h"
10
+
11
+ #include <assert.h>
12
+
13
+ /*! \file ConstraintEllipsoidGPU.cu
14
+ \brief Defines GPU kernel code for calculating ellipsoid constraint forces. Used by ConstraintEllipsoidGPU.
15
+ */
16
+
17
+ //! Kernel for caculating ellipsoid constraint forces on the GPU
18
+ /*! \param d_group_members List of members in the group
19
+ \param group_size number of members in the group
20
+ \param N number of particles in system
21
+ \param d_pos particle positions on device
22
+ \param P Position of the ellipsoid
23
+ \param rx radius of the ellipsoid in x direction
24
+ \param ry radius of the ellipsoid in y direction
25
+ \param rz radius of the ellipsoid in z direction
26
+ \param deltaT step size from the Integrator
27
+ */
28
+ extern "C" __global__
29
+ void gpu_compute_constraint_ellipsoid_constraint_kernel(const unsigned int *d_group_members,
30
+ unsigned int group_size,
31
+ const unsigned int N,
32
+ Scalar4 *d_pos,
33
+ Scalar3 P,
34
+ Scalar rx,
35
+ Scalar ry,
36
+ Scalar rz)
37
+ {
38
+ // start by identifying which particle we are to handle
39
+ // determine which particle this thread works on
40
+ int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
41
+
42
+ if (group_idx >= group_size)
43
+ return;
44
+
45
+ unsigned int idx = d_group_members[group_idx];
46
+
47
+ // read in position, velocity, net force, and mass
48
+ Scalar4 pos = d_pos[idx];
49
+
50
+ // convert to Scalar3's for passing to the evaluators
51
+ Scalar3 X = make_scalar3(pos.x, pos.y, pos.z);
52
+
53
+ // evaluate the constraint position
54
+ EvaluatorConstraintEllipsoid Ellipsoid(P, rx, ry, rz);
55
+ Scalar3 C = Ellipsoid.evalClosest(X);
56
+
57
+ // apply the constraint
58
+ d_pos[idx] = make_scalar4(C.x, C.y, C.z, Scalar(0.0));
59
+ }
60
+
61
+
62
+ /*! \param d_group_members List of members in the group
63
+ \param group_size number of members in the group
64
+ \param N nunmber of particles
65
+ \param d_pos particle positions on the device
66
+ \param P Position of the ellipsoid
67
+ \param rx radius of the ellipsoid in x direction
68
+ \param ry radius of the ellipsoid in y direction
69
+ \param rz radius of the ellipsoid in z direction
70
+ \param deltaT step size from the Integrator
71
+ \param block_size Block size to execute on the GPU
72
+
73
+ \returns Any error code resulting from the kernel launch
74
+ \note Always returns cudaSuccess in release builds to avoid the cudaThreadSynchronize()
75
+ */
76
+ cudaError_t gpu_compute_constraint_ellipsoid_constraint(const unsigned int *d_group_members,
77
+ unsigned int group_size,
78
+ const unsigned int N,
79
+ Scalar4 *d_pos,
80
+ const Scalar3 P,
81
+ Scalar rx,
82
+ Scalar ry,
83
+ Scalar rz,
84
+ unsigned int block_size)
85
+ {
86
+ assert(d_group_members);
87
+
88
+ // setup the grid to run the kernel
89
+ dim3 grid( group_size / block_size + 1, 1, 1);
90
+ dim3 threads(block_size, 1, 1);
91
+
92
+ // run the kernel
93
+ gpu_compute_constraint_ellipsoid_constraint_kernel<<< grid, threads>>>(d_group_members,
94
+ group_size,
95
+ N,
96
+ d_pos,
97
+ P,
98
+ rx,
99
+ ry,
100
+ rz);
101
+
102
+ return cudaSuccess;
103
+ }
cuda_code/CopySurface.cu ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************
2
+ # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
3
+ #
4
+ # Redistribution and use in source and binary forms, with or without
5
+ # modification, are permitted provided that the following conditions
6
+ # are met:
7
+ # * Redistributions of source code must retain the above copyright
8
+ # notice, this list of conditions and the following disclaimer.
9
+ # * Redistributions in binary form must reproduce the above copyright
10
+ # notice, this list of conditions and the following disclaimer in the
11
+ # documentation and/or other materials provided with the distribution.
12
+ # * Neither the name of NVIDIA CORPORATION nor the names of its
13
+ # contributors may be used to endorse or promote products derived
14
+ # from this software without specific prior written permission.
15
+ #
16
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
17
+ # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19
+ # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
20
+ # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
21
+ # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
22
+ # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
23
+ # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
24
+ # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
+ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
+ ***************************************************************************/
28
+ #include "CopySurface.h"
29
+ #include <device_launch_parameters.h>
30
+
31
+ // The CUDA kernel. This sample simply copies the input surface.
32
+ template<class T>
33
+ __global__ void copySurface(cudaSurfaceObject_t input, cudaSurfaceObject_t output, unsigned int width, unsigned int height)
34
+ {
35
+ unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
36
+ unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
37
+ if (x < width && y < height)
38
+ {
39
+ T data;
40
+ surf2Dread(&data, input, sizeof(T) * x, y);
41
+ surf2Dwrite(data, output, sizeof(T) * x, y);
42
+ }
43
+ }
44
+
45
+ // A wrapper function that launches the kernel.
46
+ void launchCopySurface(cudaSurfaceObject_t input, cudaSurfaceObject_t output, unsigned int width, unsigned int height, unsigned int format)
47
+ {
48
+ dim3 dimBlock(16, 16);
49
+ dim3 dimGrid((width + dimBlock.x - 1) / dimBlock.x, (height + dimBlock.y - 1) / dimBlock.y);
50
+ if (format == cudaChannelFormatKindFloat) copySurface<float><<<dimGrid, dimBlock>>>(input, output, width, height);
51
+ else copySurface<int><<<dimGrid, dimBlock>>>(input, output, width, height);
52
+ }
cuda_code/Copy_15.cu ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/ATen.h>
2
+ #include <ATen/Context.h>
3
+ #include <ATen/Dispatch.h>
4
+ #include <ATen/cuda/CUDAContext.h>
5
+ #include <ATen/cuda/CUDAEvent.h>
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <ATen/native/Copy.h>
8
+ #include <ATen/native/TensorIterator.h>
9
+ #include <ATen/native/cuda/Loops.cuh>
10
+ #include <THC/THC.h>
11
+
12
+ #ifdef __HIP_PLATFORM_HCC__
13
+ #include <hip/hip_version.h>
14
+ #endif
15
+
16
+ namespace at {
17
+ namespace native {
18
+
19
+ using namespace at::cuda;
20
+
21
+ // device-to-device copy, does type conversion
22
+ void copy_device_to_device(TensorIterator& iter, bool non_blocking) {
23
+ int64_t numel = iter.numel();
24
+
25
+ // We can memcpy the memory if both tensors have the same type AND both
26
+ // tensors are contiguous after dimension coalescing and reordering.
27
+ bool same_type = iter.dtype(0) == iter.dtype(1);
28
+ bool same_conj = iter.tensor(0).is_conj() == iter.tensor(1).is_conj();
29
+ bool memcpy_eligible = same_type && same_conj && iter.is_contiguous();
30
+
31
+ Device dst_device = iter.device(0);
32
+ Device src_device = iter.device(1);
33
+
34
+ CUDAGuard device_guard(src_device);
35
+
36
+ // We always perform the copy on the source device, using the current stream
37
+ // on the source device, and we fully synchronize on both src and dst's
38
+ // current streams for completion of the copy. We have to explicitly do this
39
+ // for non-contig copies. This mimics the behavior of cross-device
40
+ // cudaMemcpyAsync on the default stream.
41
+ CUDAStream copy_stream = getCurrentCUDAStream(src_device.index());
42
+ if (src_device != dst_device) {
43
+ // This is a cross-device copy on the src current stream and dst current
44
+ // stream. We perform a two-way barrier between both devices' streams
45
+ // before the copy. This ensures that any write-after-write and
46
+ // write-after-read dependencies on the destination side are handled, so
47
+ // that no one is operating on the dst memory when we perform the copy.
48
+ // src waits on dst barrier (src already waits on src)
49
+ CUDAEvent dst_ready;
50
+ device_guard.set_device(dst_device);
51
+ dst_ready.record(getCurrentCUDAStream(dst_device.index()));
52
+
53
+ device_guard.set_device(src_device);
54
+ dst_ready.block(copy_stream);
55
+ }
56
+
57
+ if (memcpy_eligible) {
58
+ void *dst = iter.data_ptr(0);
59
+ void *src = iter.data_ptr(1);
60
+ size_t size = numel * iter.element_size(0);
61
+ if (src != dst || src_device != dst_device) {
62
+ // Perform the copy
63
+ AT_CUDA_CHECK(cudaMemcpyAsync(
64
+ dst, src, size,
65
+ cudaMemcpyDeviceToDevice,
66
+ copy_stream));
67
+ }
68
+ } else {
69
+ auto dtype = iter.dtype(0);
70
+ if (isQIntType(dtype)) {
71
+ AT_DISPATCH_QINT_TYPES(dtype, "copy_", [&] {
72
+ gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; });
73
+ });
74
+ } else {
75
+ if (!same_conj && same_type) {
76
+ AT_DISPATCH_COMPLEX_TYPES(
77
+ dtype, "copy_conj_", [&] {
78
+ gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return std::conj(x); });
79
+ });
80
+ } else {
81
+ AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(
82
+ kHalf, kBool, kBFloat16, dtype, "copy_", [&] {
83
+ gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) { return x; });
84
+ });
85
+ }
86
+ }
87
+ }
88
+
89
+ if (src_device != dst_device) {
90
+ // dst waits on src barrier (dst already waits on dst). We cannot
91
+ // operate on dst's copy until the copy is complete.
92
+
93
+ // Still on src_device, record stream event
94
+ CUDAEvent src_ready;
95
+ src_ready.record(copy_stream);
96
+
97
+ device_guard.set_device(dst_device);
98
+ src_ready.block(getCurrentCUDAStream(dst_device.index()));
99
+ }
100
+
101
+ AT_CUDA_CHECK(cudaGetLastError());
102
+ }
103
+
104
+ static bool copy_requires_temporaries(TensorIterator& iter, bool p2p_enabled) {
105
+ Device dst_device = iter.device(0);
106
+ Device src_device = iter.device(1);
107
+
108
+ if (dst_device == src_device) {
109
+ // We never require temporaries for copies on the same GPU.
110
+ TORCH_INTERNAL_ASSERT(dst_device.is_cuda() && src_device.is_cuda());
111
+ return false;
112
+ }
113
+
114
+ bool same_dtype = iter.dtype(0) == iter.dtype(1);
115
+ if (same_dtype && iter.is_contiguous()) {
116
+ // Contiguous same-dtype copies can always use cudaMemcpyAsync
117
+ return false;
118
+ } else if (dst_device.is_cuda() && src_device.is_cuda()) {
119
+ // Copies between GPUs can use the copy kernel if P2P is supported
120
+ return !p2p_enabled;
121
+ } else {
122
+ // The remaining cases require temporaries. For example, this includes
123
+ // non-contiguous copies between CPU and GPU.
124
+ return true;
125
+ }
126
+ }
127
+
128
+ static bool maybe_enable_p2p_access(Device dst_device, Device src_device) {
129
+ if (dst_device.is_cpu() || src_device.is_cpu()) {
130
+ return false;
131
+ }
132
+ return THCState_getPeerToPeerAccess(
133
+ globalContext().getTHCState(), src_device.index(), dst_device.index());
134
+ }
135
+
136
+ static void copy_kernel_cuda(TensorIterator& iter, bool non_blocking) {
137
+ AT_ASSERT(iter.ntensors() == 2);
138
+
139
+ Device dst_device = iter.device(0);
140
+ Device src_device = iter.device(1);
141
+
142
+ // Enable p2p access between devices. (No-op if it involves the CPU)
143
+ bool p2p_enabled = maybe_enable_p2p_access(dst_device, src_device);
144
+
145
+ if (copy_requires_temporaries(iter, p2p_enabled)) {
146
+ // NB: this involves recursive calls to copy. Be careful that those copies
147
+ // don't require temporaries or you will cause an infinite recursion!
148
+ auto& dst = iter.tensor(0);
149
+ Tensor dst_contig;
150
+ Tensor src_contig;
151
+
152
+ // Type conversions are performed on the CPU for CPU-GPU copies and on
153
+ // the src device for GPU-GPU copies.
154
+ if (iter.device_type(0) == kCUDA) {
155
+ dst_contig = dst.is_contiguous() ? dst : at::empty_like(dst, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
156
+ src_contig = iter.tensor(1).to(iter.dtype(0)).expand_as(dst).contiguous();
157
+ } else {
158
+ bool same_type = iter.dtype(0) == iter.dtype(1);
159
+ dst_contig = (dst.is_contiguous() && same_type) ? dst : at::empty_like(dst, iter.dtype(1), LEGACY_CONTIGUOUS_MEMORY_FORMAT);
160
+ src_contig = iter.tensor(1).expand_as(dst).contiguous();
161
+ }
162
+
163
+ // propagate the correct conjugate bit
164
+ dst_contig._set_conj(dst.is_conj());
165
+ src_contig._set_conj(iter.tensor(1).is_conj());
166
+
167
+ // perform a same-dtype copy on contiguous tensors
168
+ TORCH_INTERNAL_ASSERT(dst_contig.sizes().equals(src_contig.sizes()));
169
+ TORCH_INTERNAL_ASSERT(dst_contig.scalar_type() == src_contig.scalar_type());
170
+ dst_contig.copy_(src_contig, non_blocking);
171
+
172
+ // if necessary, copy back into dst
173
+ if (!dst_contig.is_same(dst)) {
174
+ TORCH_INTERNAL_ASSERT(dst_contig.device() == dst.device());
175
+ dst.copy_(dst_contig, non_blocking);
176
+ }
177
+ return;
178
+ }
179
+
180
+ // Copy on GPU (or between GPUs)
181
+ if (dst_device.is_cuda() && src_device.is_cuda()) {
182
+ copy_device_to_device(iter, non_blocking);
183
+ return;
184
+ }
185
+
186
+ // Copy between CPU and GPU
187
+ cuda::OptionalCUDAGuard device_guard;
188
+ cudaMemcpyKind kind;
189
+ if (dst_device.is_cuda() && src_device.is_cpu()) {
190
+ device_guard.set_device(dst_device);
191
+ kind = cudaMemcpyHostToDevice;
192
+ } else if (dst_device.is_cpu() && src_device.is_cuda()) {
193
+ device_guard.set_device(src_device);
194
+ kind = cudaMemcpyDeviceToHost;
195
+ } else {
196
+ TORCH_INTERNAL_ASSERT(false, "unsupported devices in GPU copy_()");
197
+ }
198
+
199
+ void* dst = iter.data_ptr(0);
200
+ void* src = iter.data_ptr(1);
201
+ int64_t nbytes = iter.numel() * iter.element_size(0);
202
+ CUDAStream stream = getCurrentCUDAStream();
203
+
204
+ if (non_blocking) {
205
+ AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
206
+ void* ptr = (dst_device == kCPU ? dst : src);
207
+ AT_CUDA_CHECK(THCCachingHostAllocator_recordEvent(ptr, stream));
208
+ } else {
209
+ #if HIP_VERSION >= 301
210
+ AT_CUDA_CHECK(hipMemcpyWithStream(dst, src, nbytes, kind, stream));
211
+ #else
212
+ AT_CUDA_CHECK(cudaMemcpyAsync(dst, src, nbytes, kind, stream));
213
+ AT_CUDA_CHECK(cudaStreamSynchronize(stream));
214
+ #endif
215
+ }
216
+
217
+ if (iter.tensor(0).is_conj() != iter.tensor(1).is_conj()) {
218
+ iter.tensor(0).conj_physical_();
219
+ }
220
+ }
221
+
222
+ REGISTER_DISPATCH(copy_stub, &copy_kernel_cuda);
223
+
224
+ } // namespace native
225
+ } // namespace at
cuda_code/CudaAllocator_2.cu ADDED
@@ -0,0 +1,343 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ //============================================================================
2
+ // Copyright (c) Kitware, Inc.
3
+ // All rights reserved.
4
+ // See LICENSE.txt for details.
5
+ //
6
+ // This software is distributed WITHOUT ANY WARRANTY; without even
7
+ // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
8
+ // PURPOSE. See the above copyright notice for more information.
9
+ //============================================================================
10
+
11
+ #include <cstdlib>
12
+ #include <mutex>
13
+ #include <vtkm/cont/Logging.h>
14
+ #include <vtkm/cont/RuntimeDeviceInformation.h>
15
+ #include <vtkm/cont/cuda/ErrorCuda.h>
16
+ #include <vtkm/cont/cuda/internal/CudaAllocator.h>
17
+ #include <vtkm/cont/cuda/internal/DeviceAdapterTagCuda.h>
18
+ #include <vtkm/cont/cuda/internal/RuntimeDeviceConfigurationCuda.h>
19
+ #define NO_VTKM_MANAGED_MEMORY "NO_VTKM_MANAGED_MEMORY"
20
+
21
+ #include <mutex>
22
+ #include <vector>
23
+
24
+ VTKM_THIRDPARTY_PRE_INCLUDE
25
+ #include <cuda_runtime.h>
26
+ VTKM_THIRDPARTY_POST_INCLUDE
27
+
28
+ // These static vars are in an anon namespace to work around MSVC linker issues.
29
+ namespace
30
+ {
31
+ #if CUDART_VERSION >= 8000
32
+ // Has CudaAllocator::Initialize been called by any thread?
33
+ static std::once_flag IsInitialized;
34
+ #endif
35
+
36
+ // Holds how VTK-m currently allocates memory.
37
+ // When VTK-m is initialized we set this based on the hardware support ( HardwareSupportsManagedMemory ).
38
+ // The user can explicitly disable managed memory through an enviornment variable
39
+ // or by calling a function on the CudaAllocator.
40
+ // Likewise managed memory can be re-enabled by calling a function on CudaAllocator
41
+ // if and only if the underlying hardware supports pageable managed memory
42
+ static bool ManagedMemoryEnabled = false;
43
+
44
+ // True if concurrent pagable managed memory is supported by the machines hardware.
45
+ static bool HardwareSupportsManagedMemory = false;
46
+
47
+ // Avoid overhead of cudaMemAdvise and cudaMemPrefetchAsync for small buffers.
48
+ // This value should be > 0 or else these functions will error out.
49
+ static std::size_t Threshold = 1 << 20;
50
+ }
51
+
52
+ namespace vtkm
53
+ {
54
+ namespace cont
55
+ {
56
+ namespace cuda
57
+ {
58
+ namespace internal
59
+ {
60
+
61
+ bool CudaAllocator::UsingManagedMemory()
62
+ {
63
+ CudaAllocator::Initialize();
64
+ return ManagedMemoryEnabled;
65
+ }
66
+
67
+ void CudaAllocator::ForceManagedMemoryOff()
68
+ {
69
+ if (HardwareSupportsManagedMemory)
70
+ {
71
+ ManagedMemoryEnabled = false;
72
+ VTKM_LOG_F(vtkm::cont::LogLevel::Info, "CudaAllocator disabling managed memory");
73
+ }
74
+ else
75
+ {
76
+ VTKM_LOG_F(
77
+ vtkm::cont::LogLevel::Warn,
78
+ "CudaAllocator trying to disable managed memory on hardware that doesn't support it");
79
+ }
80
+ }
81
+
82
+ void CudaAllocator::ForceManagedMemoryOn()
83
+ {
84
+ if (HardwareSupportsManagedMemory)
85
+ {
86
+ ManagedMemoryEnabled = true;
87
+ VTKM_LOG_F(vtkm::cont::LogLevel::Info, "CudaAllocator enabling managed memory");
88
+ }
89
+ else
90
+ {
91
+ VTKM_LOG_F(vtkm::cont::LogLevel::Warn,
92
+ "CudaAllocator trying to enable managed memory on hardware that doesn't support it");
93
+ }
94
+ }
95
+
96
+ bool CudaAllocator::IsDevicePointer(const void* ptr)
97
+ {
98
+ CudaAllocator::Initialize();
99
+ if (!ptr)
100
+ {
101
+ return false;
102
+ }
103
+
104
+ cudaPointerAttributes attr;
105
+ cudaError_t err = cudaPointerGetAttributes(&attr, ptr);
106
+ // This function will return invalid value if the pointer is unknown to the
107
+ // cuda runtime. Manually catch this value since it's not really an error.
108
+ if (err == cudaErrorInvalidValue)
109
+ {
110
+ cudaGetLastError(); // Clear the error so we don't raise it later...
111
+ return false;
112
+ }
113
+ VTKM_CUDA_CALL(err /*= cudaPointerGetAttributes(&attr, ptr)*/);
114
+ return attr.devicePointer == ptr;
115
+ }
116
+
117
+ bool CudaAllocator::IsManagedPointer(const void* ptr)
118
+ {
119
+ if (!ptr || !ManagedMemoryEnabled)
120
+ {
121
+ return false;
122
+ }
123
+
124
+ cudaPointerAttributes attr;
125
+ cudaError_t err = cudaPointerGetAttributes(&attr, ptr);
126
+ // This function will return invalid value if the pointer is unknown to the
127
+ // cuda runtime. Manually catch this value since it's not really an error.
128
+ if (err == cudaErrorInvalidValue)
129
+ {
130
+ cudaGetLastError(); // Clear the error so we don't raise it later...
131
+ return false;
132
+ }
133
+ VTKM_CUDA_CALL(err /*= cudaPointerGetAttributes(&attr, ptr)*/);
134
+ #if CUDART_VERSION < 10000 // isManaged deprecated in CUDA 10.
135
+ return attr.isManaged != 0;
136
+ #else // attr.type doesn't exist before CUDA 10
137
+ return attr.type == cudaMemoryTypeManaged;
138
+ #endif
139
+ }
140
+
141
+ void* CudaAllocator::Allocate(std::size_t numBytes)
142
+ {
143
+ CudaAllocator::Initialize();
144
+ // When numBytes is zero cudaMallocManaged returns an error and the behavior
145
+ // of cudaMalloc is not documented. Just return nullptr.
146
+ if (numBytes == 0)
147
+ {
148
+ return nullptr;
149
+ }
150
+
151
+ void* ptr = nullptr;
152
+ if (ManagedMemoryEnabled)
153
+ {
154
+ VTKM_CUDA_CALL(cudaMallocManaged(&ptr, numBytes));
155
+ }
156
+ else
157
+ {
158
+ VTKM_CUDA_CALL(cudaMalloc(&ptr, numBytes));
159
+ }
160
+
161
+ {
162
+ VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
163
+ "Allocated CUDA array of %s at %p.",
164
+ vtkm::cont::GetSizeString(numBytes).c_str(),
165
+ ptr);
166
+ }
167
+
168
+ return ptr;
169
+ }
170
+
171
+ void* CudaAllocator::AllocateUnManaged(std::size_t numBytes)
172
+ {
173
+ void* ptr = nullptr;
174
+ VTKM_CUDA_CALL(cudaMalloc(&ptr, numBytes));
175
+ {
176
+ VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
177
+ "Allocated CUDA array of %s at %p.",
178
+ vtkm::cont::GetSizeString(numBytes).c_str(),
179
+ ptr);
180
+ }
181
+ return ptr;
182
+ }
183
+
184
+ void CudaAllocator::Free(void* ptr)
185
+ {
186
+ VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing CUDA allocation at %p.", ptr);
187
+ VTKM_CUDA_CALL(cudaFree(ptr));
188
+ }
189
+
190
+ void CudaAllocator::FreeDeferred(void* ptr, std::size_t numBytes)
191
+ {
192
+ static std::mutex deferredMutex;
193
+ static std::vector<void*> deferredPointers;
194
+ static std::size_t deferredSize = 0;
195
+ constexpr std::size_t bufferLimit = 2 << 24; //16MB buffer
196
+
197
+ {
198
+ VTKM_LOG_F(vtkm::cont::LogLevel::MemExec,
199
+ "Deferring free of CUDA allocation at %p of %s.",
200
+ ptr,
201
+ vtkm::cont::GetSizeString(numBytes).c_str());
202
+ }
203
+
204
+ std::vector<void*> toFree;
205
+ // critical section
206
+ {
207
+ std::lock_guard<std::mutex> lock(deferredMutex);
208
+ deferredPointers.push_back(ptr);
209
+ deferredSize += numBytes;
210
+ if (deferredSize >= bufferLimit)
211
+ {
212
+ toFree.swap(deferredPointers);
213
+ deferredSize = 0;
214
+ }
215
+ }
216
+
217
+ for (auto&& p : toFree)
218
+ {
219
+ VTKM_LOG_F(vtkm::cont::LogLevel::MemExec, "Freeing deferred CUDA allocation at %p.", p);
220
+ VTKM_CUDA_CALL(cudaFree(p));
221
+ }
222
+ }
223
+
224
+ void CudaAllocator::PrepareForControl(const void* ptr, std::size_t numBytes)
225
+ {
226
+ if (IsManagedPointer(ptr) && numBytes >= Threshold)
227
+ {
228
+ #if CUDART_VERSION >= 8000
229
+ // TODO these hints need to be benchmarked and adjusted once we start
230
+ // sharing the pointers between cont/exec
231
+ VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, cudaCpuDeviceId));
232
+ VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, cudaCpuDeviceId, cudaStreamPerThread));
233
+ #endif // CUDA >= 8.0
234
+ }
235
+ }
236
+
237
+ void CudaAllocator::PrepareForInput(const void* ptr, std::size_t numBytes)
238
+ {
239
+ if (IsManagedPointer(ptr) && numBytes >= Threshold)
240
+ {
241
+ #if CUDART_VERSION >= 8000
242
+ vtkm::Id dev;
243
+ vtkm::cont::RuntimeDeviceInformation()
244
+ .GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagCuda())
245
+ .GetDeviceInstance(dev);
246
+ // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev));
247
+ // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetReadMostly, dev));
248
+ VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev));
249
+ VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
250
+ #endif // CUDA >= 8.0
251
+ }
252
+ }
253
+
254
+ void CudaAllocator::PrepareForOutput(const void* ptr, std::size_t numBytes)
255
+ {
256
+ if (IsManagedPointer(ptr) && numBytes >= Threshold)
257
+ {
258
+ #if CUDART_VERSION >= 8000
259
+ vtkm::Id dev;
260
+ vtkm::cont::RuntimeDeviceInformation()
261
+ .GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagCuda())
262
+ .GetDeviceInstance(dev);
263
+ // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev));
264
+ // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseUnsetReadMostly, dev));
265
+ VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev));
266
+ VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
267
+ #endif // CUDA >= 8.0
268
+ }
269
+ }
270
+
271
+ void CudaAllocator::PrepareForInPlace(const void* ptr, std::size_t numBytes)
272
+ {
273
+ if (IsManagedPointer(ptr) && numBytes >= Threshold)
274
+ {
275
+ #if CUDART_VERSION >= 8000
276
+ vtkm::Id dev;
277
+ vtkm::cont::RuntimeDeviceInformation()
278
+ .GetRuntimeConfiguration(vtkm::cont::DeviceAdapterTagCuda())
279
+ .GetDeviceInstance(dev);
280
+ // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetPreferredLocation, dev));
281
+ // VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseUnsetReadMostly, dev));
282
+ VTKM_CUDA_CALL(cudaMemAdvise(ptr, numBytes, cudaMemAdviseSetAccessedBy, dev));
283
+ VTKM_CUDA_CALL(cudaMemPrefetchAsync(ptr, numBytes, dev, cudaStreamPerThread));
284
+ #endif // CUDA >= 8.0
285
+ }
286
+ }
287
+
288
+ void CudaAllocator::Initialize()
289
+ {
290
+ #if CUDART_VERSION >= 8000
291
+ std::call_once(IsInitialized, []() {
292
+ auto cudaDeviceConfig = dynamic_cast<
293
+ vtkm::cont::internal::RuntimeDeviceConfiguration<vtkm::cont::DeviceAdapterTagCuda>&>(
294
+ vtkm::cont::RuntimeDeviceInformation{}.GetRuntimeConfiguration(
295
+ vtkm::cont::DeviceAdapterTagCuda()));
296
+ vtkm::Id numDevices;
297
+ cudaDeviceConfig.GetMaxDevices(numDevices);
298
+
299
+ if (numDevices == 0)
300
+ {
301
+ return;
302
+ }
303
+
304
+ // Check all devices, use the feature set supported by all
305
+ bool managedMemorySupported = true;
306
+ std::vector<cudaDeviceProp> cudaProp;
307
+ cudaDeviceConfig.GetCudaDeviceProp(cudaProp);
308
+ for (int i = 0; i < numDevices && managedMemorySupported; ++i)
309
+ {
310
+ // We check for concurrentManagedAccess, as devices with only the
311
+ // managedAccess property have extra synchronization requirements.
312
+ managedMemorySupported = managedMemorySupported && cudaProp[i].concurrentManagedAccess;
313
+ }
314
+
315
+ HardwareSupportsManagedMemory = managedMemorySupported;
316
+ ManagedMemoryEnabled = managedMemorySupported;
317
+
318
+ VTKM_LOG_F(vtkm::cont::LogLevel::Info,
319
+ "CudaAllocator hardware %s managed memory",
320
+ HardwareSupportsManagedMemory ? "supports" : "doesn't support");
321
+
322
+ // Check if users want to disable managed memory
323
+ #pragma warning(push)
324
+ // getenv is not thread safe on windows but since it's inside a call_once block so
325
+ // it's fine to suppress the warning here.
326
+ #pragma warning(disable : 4996)
327
+ const char* buf = std::getenv(NO_VTKM_MANAGED_MEMORY);
328
+ #pragma warning(pop)
329
+ if (managedMemorySupported && buf != nullptr)
330
+ { //only makes sense to disable managed memory if the hardware supports it
331
+ //in the first place
332
+ ManagedMemoryEnabled = false;
333
+ VTKM_LOG_F(
334
+ vtkm::cont::LogLevel::Info,
335
+ "CudaAllocator disabling managed memory due to NO_VTKM_MANAGED_MEMORY env variable");
336
+ }
337
+ });
338
+ #endif
339
+ }
340
+ }
341
+ }
342
+ }
343
+ } // end namespace vtkm::cont::cuda::internal
cuda_code/CudaKernel.cu ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ typedef struct KernelData
2
+ {
3
+ float a;
4
+ float b;
5
+ float result;
6
+ } KernelData;
7
+
8
+ __global__ void vectorAddition(KernelData* data)
9
+ {
10
+ int index = blockIdx.x * blockDim.x + threadIdx.x;
11
+ data[index].result = data[index].a + data[index].b;
12
+ }
cuda_code/CudaKernel_11.cu ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include<cuda.h>
2
+ #include<iostream>
3
+ #include "CudaKernel.h"
4
+
5
+ using namespace std;
6
+
7
+ #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
8
+ #define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
9
+ #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
10
+
11
+
12
+ texture <float,2,cudaReadModeElementType> tex1;
13
+
14
+ static cudaArray *cuArray = NULL;
15
+
16
+ //Kernel for x direction sobel
17
+ __global__ void implement_x_sobel(float* output,int width,int height,int widthStep)
18
+ {
19
+ int x = blockIdx.x * blockDim.x + threadIdx.x;
20
+ int y = blockIdx.y * blockDim.y + threadIdx.y;
21
+
22
+ //Make sure that thread is inside image bounds
23
+ if(x<width && y<height)
24
+ {
25
+ float output_value = (-1*tex2D(tex1,x-1,y-1)) + (0*tex2D(tex1,x,y-1)) + (1*tex2D(tex1,x+1,y-1))
26
+ + (-2*tex2D(tex1,x-1,y)) + (0*tex2D(tex1,x,y)) + (2*tex2D(tex1,x+1,y))
27
+ + (-1*tex2D(tex1,x-1,y+1)) + (0*tex2D(tex1,x,y+1)) + (1*tex2D(tex1,x+1,y+1));
28
+
29
+ output[y*widthStep+x]=output_value;
30
+ }
31
+
32
+ }
33
+
34
+
35
+ inline void __checkCudaErrors( cudaError err, const char *file, const int line )
36
+ {
37
+ if( cudaSuccess != err) {
38
+ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",
39
+ file, line, (int)err, cudaGetErrorString( err ) );
40
+ exit(-1);
41
+ }
42
+ }
43
+
44
+ //Host Code
45
+ inline void __cudaSafeCall( cudaError err, const char *file, const int line )
46
+ {
47
+ #ifdef CUDA_ERROR_CHECK
48
+ if ( cudaSuccess != err )
49
+ {
50
+ printf("cudaSafeCall() failed at %s:%i : %s\n",
51
+ file, line, cudaGetErrorString( err ) );
52
+ exit( -1 );
53
+ }
54
+ #endif
55
+
56
+ return;
57
+ }
58
+ inline void __cudaCheckError( const char *file, const int line )
59
+ {
60
+ #ifdef CUDA_ERROR_CHECK
61
+ cudaError err = cudaGetLastError();
62
+ if ( cudaSuccess != err )
63
+ {
64
+ printf("cudaCheckError() failed at %s:%i : %s\n",
65
+ file, line, cudaGetErrorString( err ) );
66
+ exit( -1 );
67
+ }
68
+ #endif
69
+
70
+ return;
71
+ }
72
+
73
+ void kernelcall(float* input,float* output,int width,int height,int widthStep)
74
+ {
75
+ cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
76
+
77
+ CudaSafeCall(cudaMallocArray(&cuArray,&channelDesc,width,height));
78
+
79
+ //Never use 1D memory copy if host and device pointers have different widthStep.
80
+ // You don't know the width step of CUDA array, so its better to use cudaMemcpy2D...
81
+ cudaMemcpy2DToArray(cuArray,0,0,input,widthStep,width * sizeof(float),height,cudaMemcpyHostToDevice);
82
+
83
+ cudaBindTextureToArray(tex1,cuArray,channelDesc);
84
+
85
+ float * D_output_x;
86
+ CudaSafeCall(cudaMalloc(&D_output_x,widthStep*height));
87
+
88
+ dim3 blocksize(16,16);
89
+ dim3 gridsize;
90
+ gridsize.x=(width+blocksize.x-1)/blocksize.x;
91
+ gridsize.y=(height+blocksize.y-1)/blocksize.y;
92
+
93
+ implement_x_sobel<<<gridsize,blocksize>>>(D_output_x,width,height,widthStep/sizeof(float));
94
+
95
+ cudaThreadSynchronize();
96
+ CudaCheckError();
97
+
98
+ //Don't forget to unbind the texture
99
+ cudaUnbindTexture(tex1);
100
+
101
+ CudaSafeCall(cudaMemcpy(output,D_output_x,height*widthStep,cudaMemcpyDeviceToHost));
102
+
103
+ cudaFree(D_output_x);
104
+ cudaFreeArray(cuArray);
105
+ }
cuda_code/CudnnMaxPool_2.cu ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include "gpu_runtime.h"
2
+
3
+ int CuDNN_DLGpuMax_Pooling2d(const DLArrayHandle input,const size_t kernel_H, const size_t kernel_W, DLArrayHandle output, const size_t padding, const size_t stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
4
+ // create handle
5
+ // CUDNN_CALL(cudnnCreate(&cudnn));
6
+ int dev_id = (input->ctx).device_id;
7
+ cudnn_init(dev_id, stream_handle);
8
+
9
+ // input
10
+ size_t input_N = input->shape[0];
11
+ size_t input_C = input->shape[1];
12
+ size_t input_H = input->shape[2];
13
+ size_t input_W = input->shape[3];
14
+ const float * input_data = (const float*) input->data;
15
+
16
+ //output
17
+ size_t output_H = output->shape[2];
18
+ size_t output_W = output->shape[3];
19
+ float *output_data = (float *) output->data;
20
+ if(p != NULL){
21
+ int size_input = 1, size_output = 1;
22
+ for(int i = 0; i < input -> ndim; i++)
23
+ size_input *= input -> shape[i];
24
+ for(int i = 0; i < output -> ndim; i++)
25
+ size_output *= output -> shape[i];
26
+ p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
27
+ p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
28
+ p -> workspace_memory = 0;
29
+ cudaEvent_t start, stop;
30
+ cudaEventCreate(&start);
31
+ cudaEventRecord(start,0);
32
+ //pooling descriptor
33
+ cudnnPoolingDescriptor_t maxpool_desc;
34
+ CUDNN_CALL(cudnnCreatePoolingDescriptor(&maxpool_desc));
35
+ // std::cout<<"padding = "<<padding<<" stride = "<<stride<<std::endl;
36
+ // CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX,
37
+ // CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
38
+ CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX_DETERMINISTIC,
39
+ CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
40
+
41
+ //input descriptor
42
+ cudnnTensorDescriptor_t input_desc;
43
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
44
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
45
+
46
+ //output descriptor
47
+ cudnnTensorDescriptor_t output_desc;
48
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
49
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc,CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, output_H, output_W));
50
+
51
+ float alpha = 1.0f;
52
+ float beta = 0.0f;
53
+
54
+ CUDNN_CALL(cudnnPoolingForward(cudnn_map[dev_id], maxpool_desc,
55
+ &alpha, input_desc, input_data, &beta, output_desc, output_data));
56
+
57
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
58
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
59
+ CUDNN_CALL(cudnnDestroyPoolingDescriptor(maxpool_desc));
60
+
61
+ float elapsedTime;
62
+ cudaEventCreate(&stop);
63
+ cudaEventRecord(stop,0);
64
+ cudaEventSynchronize(stop);
65
+ cudaEventElapsedTime(&elapsedTime, start,stop);
66
+ cudaEventDestroy(start);
67
+ cudaEventDestroy(stop);
68
+ p->time = elapsedTime;
69
+ }
70
+ else{
71
+ //pooling descriptor
72
+ cudnnPoolingDescriptor_t maxpool_desc;
73
+ CUDNN_CALL(cudnnCreatePoolingDescriptor(&maxpool_desc));
74
+ // std::cout<<"padding = "<<padding<<" stride = "<<stride<<std::endl;
75
+ // CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX,
76
+ // CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
77
+ CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX_DETERMINISTIC,
78
+ CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
79
+
80
+ //input descriptor
81
+ cudnnTensorDescriptor_t input_desc;
82
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
83
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
84
+
85
+ //output descriptor
86
+ cudnnTensorDescriptor_t output_desc;
87
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
88
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc,CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, output_H, output_W));
89
+
90
+ float alpha = 1.0f;
91
+ float beta = 0.0f;
92
+
93
+ CUDNN_CALL(cudnnPoolingForward(cudnn_map[dev_id], maxpool_desc,
94
+ &alpha, input_desc, input_data, &beta, output_desc, output_data));
95
+
96
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
97
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
98
+ CUDNN_CALL(cudnnDestroyPoolingDescriptor(maxpool_desc));
99
+ }
100
+ return 0;
101
+ }
102
+
103
+ int CuDNN_DLGpuMax_Pooling2d_gradient(const DLArrayHandle output_Y,const DLArrayHandle gradient_Y,const DLArrayHandle input_X, const size_t kernel_H, const size_t kernel_W, DLArrayHandle gradient_X, const size_t padding, const size_t stride, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
104
+ // create handle
105
+ // CUDNN_CALL(cudnnCreate(&cudnn));
106
+ int dev_id = (input_X->ctx).device_id;
107
+ cudnn_init(dev_id, stream_handle);
108
+
109
+ // input
110
+ size_t input_N = input_X->shape[0];
111
+ size_t input_C = input_X->shape[1];
112
+ size_t input_H = input_X->shape[2];
113
+ size_t input_W = input_X->shape[3];
114
+ const float * input_data = (const float*) input_X->data;
115
+ float * gradient_x_data = (float *) gradient_X->data;
116
+ //output
117
+ size_t output_H = output_Y->shape[2];
118
+ size_t output_W = output_Y->shape[3];
119
+ const float *output_data = (const float *) output_Y->data;
120
+ const float *gradient_Y_data = (const float *) gradient_Y -> data;
121
+ if(p != NULL){
122
+ int size_input1 = 1, size_input2 = 1, size_input3 = 1, size_output = 1;
123
+ for(int i = 0; i < output_Y -> ndim; i++)
124
+ size_input1 *= output_Y -> shape[i];
125
+ for(int i = 0; i < gradient_Y -> ndim; i++)
126
+ size_input2 *= gradient_Y -> shape[i];
127
+ for(int i = 0; i < input_X -> ndim; i++)
128
+ size_input3 *= input_X -> shape[i];
129
+ for(int i = 0; i < gradient_X -> ndim; i++)
130
+ size_output *= gradient_X -> shape[i];
131
+ p -> input_memory = 1.0 * (size_input1 + size_input2 + size_input3) * sizeof(float) / 1024 / 1024;
132
+ p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
133
+ p -> workspace_memory = 0;cudaEvent_t start, stop;
134
+ cudaEventCreate(&start);
135
+ cudaEventRecord(start,0);
136
+
137
+ //pooling descriptor
138
+ cudnnPoolingDescriptor_t maxpool_desc;
139
+ CUDNN_CALL(cudnnCreatePoolingDescriptor(&maxpool_desc));
140
+ // std::cout<<"padding = "<<padding<<" stride = "<<stride<<std::endl;
141
+ // CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX,
142
+ // CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
143
+ CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX_DETERMINISTIC,
144
+ CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
145
+
146
+ //input descriptor
147
+ cudnnTensorDescriptor_t input_desc;
148
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
149
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
150
+
151
+ //output descriptor
152
+ cudnnTensorDescriptor_t output_desc;
153
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
154
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc,CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, output_H, output_W));
155
+
156
+ float alpha = 1.0f;
157
+ float beta = 0.0f;
158
+
159
+ CUDNN_CALL(cudnnPoolingBackward(cudnn_map[dev_id], maxpool_desc,
160
+ &alpha, output_desc, output_data,
161
+ output_desc, gradient_Y_data,
162
+ input_desc, input_data,
163
+ &beta, input_desc,gradient_x_data));
164
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
165
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
166
+ CUDNN_CALL(cudnnDestroyPoolingDescriptor(maxpool_desc));
167
+
168
+ float elapsedTime;
169
+ cudaEventCreate(&stop);
170
+ cudaEventRecord(stop,0);
171
+ cudaEventSynchronize(stop);
172
+ cudaEventElapsedTime(&elapsedTime, start,stop);
173
+ cudaEventDestroy(start);
174
+ cudaEventDestroy(stop);
175
+ p->time = elapsedTime;
176
+ }
177
+ else{
178
+ //pooling descriptor
179
+ cudnnPoolingDescriptor_t maxpool_desc;
180
+ CUDNN_CALL(cudnnCreatePoolingDescriptor(&maxpool_desc));
181
+ // std::cout<<"padding = "<<padding<<" stride = "<<stride<<std::endl;
182
+ // CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX,
183
+ // CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
184
+ CUDNN_CALL(cudnnSetPooling2dDescriptor(maxpool_desc,CUDNN_POOLING_MAX_DETERMINISTIC,
185
+ CUDNN_PROPAGATE_NAN, kernel_H, kernel_W, padding, padding, stride, stride));
186
+
187
+ //input descriptor
188
+ cudnnTensorDescriptor_t input_desc;
189
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
190
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
191
+
192
+ //output descriptor
193
+ cudnnTensorDescriptor_t output_desc;
194
+ CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
195
+ CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc,CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, output_H, output_W));
196
+
197
+ float alpha = 1.0f;
198
+ float beta = 0.0f;
199
+
200
+ CUDNN_CALL(cudnnPoolingBackward(cudnn_map[dev_id], maxpool_desc,
201
+ &alpha, output_desc, output_data,
202
+ output_desc, gradient_Y_data,
203
+ input_desc, input_data,
204
+ &beta, input_desc,gradient_x_data));
205
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
206
+ CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
207
+ CUDNN_CALL(cudnnDestroyPoolingDescriptor(maxpool_desc));
208
+ }
209
+ return 0;
210
+ }
cuda_code/CustomPi.cu ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #ifdef __cplusplus
2
+ extern "C" {
3
+ #endif
4
+
5
+ struct point{
6
+ float x;
7
+ float y;
8
+ };
9
+
10
+
11
+ __global__ void pi(const struct point* A, int* res, const int nbPoint, const float ray){
12
+ const int idx = 32*blockDim.x * blockIdx.x + threadIdx.x;
13
+ if (idx < nbPoint-32*blockDim.x)
14
+ #pragma unroll 16
15
+ for (int j = 0; j < 32; j++) {
16
+ int i = idx + blockDim.x * j;
17
+ res[i] = (A[i].x*A[i].x + A[i].y*A[i].y <= ray);
18
+ }
19
+ }
20
+
21
+
22
+ struct point2{
23
+ double x;
24
+ double y;
25
+ };
26
+
27
+
28
+ __global__ void pi_double(const struct point2* A, int* res, const int nbPoint, const float ray){
29
+ const int idx = 32*blockDim.x * blockIdx.x + threadIdx.x;
30
+ if (idx < nbPoint-32*blockDim.x)
31
+ if (idx < (int)(nbPoint-32*blockDim.x))
32
+ #pragma unroll 16
33
+ for (int j = 0; j < 32; j++) {
34
+ int i = idx + blockDim.x * j;
35
+ res[i] = (A[i].x*A[i].x + A[i].y*A[i].y <= (double)ray);
36
+ }
37
+ }
38
+
39
+
40
+ #ifdef __cplusplus
41
+ }
42
+ #endif
cuda_code/DReductor.cu ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Zhihua Ban all rights reserved.
2
+ // If you have questions, please contact me at [email protected]
3
+
4
+ #include "AMatrix.hpp"
5
+ #include <cuda.h>
6
+ #include "common.h"
7
+
8
+ // dim3(GS)
9
+ // dim3(BS)
10
+ // /*number of layers*/
11
+ template<unsigned int GS, unsigned int BS, unsigned int N, class T>
12
+ __global__ void kernel_reduction(
13
+ T *odata, T *idata,
14
+ const unsigned int width,
15
+ const unsigned int height,
16
+ const unsigned int steps,
17
+ const unsigned int layer /*layer size = steps*height*/
18
+ ){
19
+ __shared__ T smem[BS];
20
+ const unsigned int tid = threadIdx.x;
21
+ const unsigned int ty = blockIdx.x;
22
+ unsigned int i = 0;
23
+ unsigned int y = 0;
24
+ unsigned int x = 0;
25
+ T sumv = 0;
26
+
27
+ // for each layer
28
+ for (; i < N; i++){
29
+ sumv = 0;
30
+ for (y = ty; y < height; y+=GS){
31
+ for (x = tid; x < width; x+=BS){
32
+ sumv += idata[y*steps + x];
33
+ }
34
+ }
35
+
36
+ smem[tid] = sumv;
37
+ __syncthreads();
38
+
39
+ if (BS >= 512){ if (tid < 256) smem[tid] = sumv = sumv + smem[tid + 256]; __syncthreads(); }
40
+ if (BS >= 256){ if (tid < 128) smem[tid] = sumv = sumv + smem[tid + 128]; __syncthreads(); }
41
+ if (BS >= 128){ if (tid < 64) smem[tid] = sumv = sumv + smem[tid + 64]; __syncthreads(); }
42
+ if (BS >= 64){ if (tid < 32) smem[tid] = sumv = sumv + smem[tid + 32]; __syncthreads(); }
43
+ if (BS >= 32){ if (tid < 16) smem[tid] = sumv = sumv + smem[tid + 16]; __syncthreads(); }
44
+ if (BS >= 16){ if (tid < 8) smem[tid] = sumv = sumv + smem[tid + 8]; __syncthreads(); }
45
+ if (BS >= 8){ if (tid < 4) smem[tid] = sumv = sumv + smem[tid + 4]; __syncthreads(); }
46
+ if (BS >= 4){ if (tid < 2) smem[tid] = sumv = sumv + smem[tid + 2]; __syncthreads(); }
47
+ if (BS >= 2){ if (tid < 1){ odata[i*GS + ty] = sumv + smem[1]; } }
48
+ idata += layer;
49
+ }
50
+ }
51
+ // dim3(N) /*number of layers*/
52
+ // dim3(BS)
53
+ template<unsigned int BS, unsigned int N, class T>
54
+ __global__ void kernel_reduction(T* odata, const T* idata){
55
+
56
+ __shared__ T smem[BS];
57
+ T sumv = 0;
58
+ unsigned int tid = threadIdx.x;
59
+ unsigned int i = blockIdx.x;
60
+ smem[tid] = sumv = idata[i*BS + tid];
61
+ __syncthreads();
62
+ if (BS >= 512){ if (tid < 256) smem[tid] = sumv = sumv + smem[tid + 256]; __syncthreads(); }
63
+ if (BS >= 256){ if (tid < 128) smem[tid] = sumv = sumv + smem[tid + 128]; __syncthreads(); }
64
+ if (BS >= 128){ if (tid < 64) smem[tid] = sumv = sumv + smem[tid + 64]; __syncthreads(); }
65
+ if (BS >= 64){ if (tid < 32) smem[tid] = sumv = sumv + smem[tid + 32]; __syncthreads(); }
66
+ if (BS >= 32){ if (tid < 16) smem[tid] = sumv = sumv + smem[tid + 16]; __syncthreads(); }
67
+ if (BS >= 16){ if (tid < 8) smem[tid] = sumv = sumv + smem[tid + 8]; __syncthreads(); }
68
+ if (BS >= 8){ if (tid < 4) smem[tid] = sumv = sumv + smem[tid + 4]; __syncthreads(); }
69
+ if (BS >= 4){ if (tid < 2) smem[tid] = sumv = sumv + smem[tid + 2]; __syncthreads(); }
70
+ if (BS >= 2){ if (tid < 1) { odata[i] = sumv + smem[1]; } }
71
+ }
72
+
73
+ // dim3(N) /*number of layers*/
74
+ // dim3(BS)
75
+ template<unsigned int BS, unsigned int N, class T>
76
+ __global__ void kernel_reduction_factor(T* odata, const T* idata, const float factor){
77
+
78
+ __shared__ T smem[BS];
79
+ T sumv = 0;
80
+ unsigned int tid = threadIdx.x;
81
+ unsigned int i = blockIdx.x;
82
+ smem[tid] = sumv = idata[i*BS + tid];
83
+ __syncthreads();
84
+ if (BS >= 512){ if (tid < 256) smem[tid] = sumv = sumv + smem[tid + 256]; __syncthreads(); }
85
+ if (BS >= 256){ if (tid < 128) smem[tid] = sumv = sumv + smem[tid + 128]; __syncthreads(); }
86
+ if (BS >= 128){ if (tid < 64) smem[tid] = sumv = sumv + smem[tid + 64]; __syncthreads(); }
87
+ if (BS >= 64){ if (tid < 32) smem[tid] = sumv = sumv + smem[tid + 32]; __syncthreads(); }
88
+ if (BS >= 32){ if (tid < 16) smem[tid] = sumv = sumv + smem[tid + 16]; __syncthreads(); }
89
+ if (BS >= 16){ if (tid < 8) smem[tid] = sumv = sumv + smem[tid + 8]; __syncthreads(); }
90
+ if (BS >= 8){ if (tid < 4) smem[tid] = sumv = sumv + smem[tid + 4]; __syncthreads(); }
91
+ if (BS >= 4){ if (tid < 2) smem[tid] = sumv = sumv + smem[tid + 2]; __syncthreads(); }
92
+ if (BS >= 2){ if (tid < 1) { odata[i] = (sumv + smem[1])*factor; } }
93
+ }
94
+
95
+ template<unsigned int GS, unsigned int BS, unsigned int N, class T> void
96
+ run_sum_(T *odata, T *idata, const unsigned int width, const unsigned int height, const unsigned int steps)
97
+ {
98
+ if (GS > N){
99
+ kernel_reduction<GS, BS, N, T> <<<GS, BS>>>(odata + GS, idata, width, height, steps, steps*height);
100
+ kernel_reduction<GS, N, T> <<<N, GS>>>(odata, odata + GS);
101
+ }
102
+ else{
103
+ cerr << "not support yet." << endl;
104
+ exit(EXIT_FAILURE);
105
+ }
106
+ }
107
+
108
+ template void run_sum_<REDUCTOR_GS__, REDUCTOR_BS__, NN_FEATURES__, float>
109
+ (float *odata, float *idata, const unsigned int width, const unsigned int height, const unsigned int steps);
110
+ template void run_sum_<REDUCTOR_GS__, REDUCTOR_BS__, NN_FEATURES__, int >
111
+ (int *odata, int *idata, const unsigned int width, const unsigned int height, const unsigned int steps);
112
+
113
+
114
+ template<unsigned int GS, unsigned int BS, unsigned int N, class T> void
115
+ run_mean_(T *odata, T *idata, const unsigned int width, const unsigned int height, const unsigned int steps)
116
+ {
117
+ if (GS > N){
118
+ kernel_reduction<GS, BS, N, T> << <GS, BS >> >(odata + GS, idata, width, height, steps, steps*height);
119
+ kernel_reduction_factor<GS, N, T> << <N, GS >> >(odata, odata + GS, 1.f/float(width*height));
120
+ }
121
+ else{
122
+ cerr << "not support yet." << endl;
123
+ exit(EXIT_FAILURE);
124
+ }
125
+ }
126
+
127
+ template void run_mean_<REDUCTOR_GS__, REDUCTOR_BS__, NN_FEATURES__, float>
128
+ (float *odata, float *idata, const unsigned int width, const unsigned int height, const unsigned int steps);
129
+ template void run_mean_<REDUCTOR_GS__, REDUCTOR_BS__, NN_FEATURES__, int >
130
+ (int *odata, int *idata, const unsigned int width, const unsigned int height, const unsigned int steps);
cuda_code/DepthmapDenoiseWeightedHuber.cu ADDED
@@ -0,0 +1,942 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*M///////////////////////////////////////////////////////////////////////////////////////
2
+ //
3
+ // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4
+ //
5
+ // By downloading, copying, installing or using the software you agree to this license.
6
+ // If you do not agree to this license, do not download, install,
7
+ // copy or use the software.
8
+ //
9
+ //
10
+ // License Agreement
11
+ // For Open Source Computer Vision Library
12
+ //
13
+ // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14
+ // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15
+ // Third party copyrights are property of their respective owners.
16
+ //
17
+ // Redistribution and use in source and binary forms, with or without modification,
18
+ // are permitted provided that the following conditions are met:
19
+ //
20
+ // * Redistribution's of source code must retain the above copyright notice,
21
+ // this list of conditions and the following disclaimer.
22
+ //
23
+ // * Redistribution's in binary form must reproduce the above copyright notice,
24
+ // this list of conditions and the following disclaimer in the documentation
25
+ // and/or other materials provided with the distribution.
26
+ //
27
+ // * The name of the copyright holders may not be used to endorse or promote products
28
+ // derived from this software without specific prior written permission.
29
+ //
30
+ // This software is provided by the copyright holders and contributors "as is" and
31
+ // any express or implied warranties, including, but not limited to, the implied
32
+ // warranties of merchantability and fitness for a particular purpose are disclaimed.
33
+ // In no event shall the Intel Corporation or contributors be liable for any direct,
34
+ // indirect, incidental, special, exemplary, or consequential damages
35
+ // (including, but not limited to, procurement of substitute goods or services;
36
+ // loss of use, data, or profits; or business interruption) however caused
37
+ // and on any theory of liability, whether in contract, strict liability,
38
+ // or tort (including negligence or otherwise) arising in any way out of
39
+ // the use of this software, even if advised of the possibility of such damage.
40
+ //
41
+ //M*/
42
+
43
+ //! OpenDTAM Variant of Chambolle & Pock denoising
44
+ //!
45
+ //! The complicated half of the DTAM algorithm's mapping core,
46
+ //! but can be used independently to refine depthmaps.
47
+ //!
48
+ //! Written by Paul Foster for GSoC 2014 OpenDTAM project.
49
+ //! High level algorithm described by Richard Newcombe, Steven J. Lovegrove, and Andrew J. Davison.
50
+ //! "DTAM: Dense tracking and mapping in real-time."
51
+ //! Which was in turn based on Chambolle & Pock's
52
+ //! "A first-order primal-dual algorithm for convex problems with applications to imaging."
53
+
54
+ #include <opencv2/core/cuda/common.hpp>//for cudaSafeCall,CV_Assert
55
+
56
+ #include "DepthmapDenoiseWeightedHuber.cuh"
57
+
58
+ namespace cv { namespace cuda { namespace device {
59
+ namespace dtam_denoise{
60
+
61
+
62
+ static unsigned int arows;//TODO:make sure this is still reentrant
63
+
64
+ void loadConstants(uint h_rows, uint, uint , uint ,
65
+ float* , float* , float* , float* , float* ,
66
+ float*) {
67
+
68
+ arows=h_rows;
69
+ }
70
+
71
+ cudaStream_t localStream=0;
72
+
73
+ const int BLOCKX2D=32;
74
+ const int BLOCKY2D=32;
75
+ #define GENERATE_CUDA_FUNC2D(funcName,arglist,notypes) \
76
+ static __global__ void funcName arglist; \
77
+ void funcName##Caller arglist{ \
78
+ dim3 dimBlock(BLOCKX2D,BLOCKY2D); \
79
+ dim3 dimGrid((acols + dimBlock.x - 1) / dimBlock.x, \
80
+ (arows + dimBlock.y - 1) / dimBlock.y); \
81
+ funcName<<<dimGrid, dimBlock,0,localStream>>>notypes; \
82
+ cudaSafeCall( cudaGetLastError() );\
83
+ };static __global__ void funcName arglist
84
+
85
+
86
+ #define GENERATE_CUDA_FUNC2DROWS(funcName,arglist,notypes) \
87
+ static __global__ void funcName arglist; \
88
+ void funcName##Caller arglist{ \
89
+ dim3 dimBlock(BLOCKX2D,BLOCKY2D); \
90
+ dim3 dimGrid(1, \
91
+ (arows + dimBlock.y - 1) / dimBlock.y); \
92
+ funcName<<<dimGrid, dimBlock,0,localStream>>>notypes; \
93
+ cudaSafeCall( cudaGetLastError() );\
94
+ };static __global__ void funcName arglist
95
+
96
+
97
+ static __global__ void computeG1 (float* pp, float* g1p, float* gxp, float* gyp, int cols);
98
+ static __global__ void computeG2 (float* pp, float* g1p, float* gxp, float* gyp, int cols);
99
+ void computeGCaller (float* pp, float* g1p, float* gxp, float* gyp, int cols){
100
+ // dim3 dimBlock(BLOCKX2D,BLOCKY2D);
101
+ dim3 dimBlock(BLOCKX2D,4);
102
+ dim3 dimGrid(1,
103
+ (arows + dimBlock.y - 1) / dimBlock.y);
104
+
105
+ computeG1<<<dimGrid, dimBlock,0,localStream>>>(pp, g1p, gxp, gyp, cols);
106
+ cudaDeviceSynchronize();
107
+ computeG2<<<dimGrid, dimBlock,0,localStream>>>(pp, g1p, gxp, gyp, cols);
108
+ cudaDeviceSynchronize();
109
+
110
+ cudaSafeCall( cudaGetLastError() );
111
+ };
112
+
113
+ GENERATE_CUDA_FUNC2DROWS(computeG1,
114
+ (float* pp, float* g1p, float* gxp, float* gyp, int cols),
115
+ (pp, g1p, gxp, gyp, cols)) {
116
+ #if __CUDA_ARCH__>300
117
+ //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch
118
+
119
+ //Original pseudocode for this function:
120
+ // //subscripts u,d,l,r mean up,down,left,right
121
+ // void computeG(){
122
+ // // g0 is the strongest nearby gradient (excluding point defects)
123
+ // g0x=fabsf(pr-pl);//|dx|
124
+ // g0y=fabsf(pd-pu);//|dy|
125
+ // g0=max(g0x,g0y);
126
+ // // g1 is the scaled g0 through the g function exp(-alpha*x^beta)
127
+ // g1=sqrt(g0); //beta=0.5
128
+ // alpha=3.5;
129
+ // g1=exp(-alpha*g1);
130
+ // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change
131
+ // gx=max(g1r,g1);
132
+ // gy=max(g1d,g1);
133
+ // gu=gyu; //upper spring is the lower spring of the pixel above
134
+ // gd=gy; //lower spring
135
+ // gr=gx; //right spring
136
+ // gl=gxl; //left spring is the right spring of the pixel to the left
137
+ // }
138
+ const float alpha=3.5f;
139
+ int x = threadIdx.x;
140
+ int y = blockIdx.y * blockDim.y + threadIdx.y;
141
+ int upoff=-(y!=0)*cols;
142
+ int dnoff=(y<gridDim.y*blockDim.y-1)*cols;
143
+ //itr0
144
+ int pt=x+y*cols;
145
+ float ph,pn,pu,pd,pl,pr;
146
+ float g0x,g0y,g0,g1,gt,gsav;
147
+ float tmp;
148
+ ph=pp[pt];
149
+ pn=pp[pt+blockDim.x];
150
+
151
+ pr=__shfl_down(ph,2);
152
+ tmp=__shfl_up(pn,30);
153
+ if(threadIdx.x>=30){
154
+ pr=tmp;
155
+ }
156
+ pl=ph;
157
+ pu=pp[pt+upoff];
158
+ pd=pp[pt+dnoff];
159
+
160
+
161
+ // g0 is the strongest nearby gradient (excluding point defects)
162
+ gt=fabsf(pr-pl);
163
+ g0x=__shfl_up(gt,1);//?xxxxxx no prior val
164
+ gsav=__shfl_down(gt,31);//x000000 for next time
165
+ g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx
166
+ g0y=fabsf(pd-pu);
167
+
168
+ g0=fmaxf(g0x,g0y);
169
+ // g1 is the scaled g0 through the g function
170
+ g1=sqrt(g0);
171
+ g1=exp(-alpha*g1);
172
+ //save
173
+ g1p[pt]=g1;
174
+
175
+ x+=32;
176
+ //itr 1:n-2
177
+ for(;x<cols-32;x+=32){
178
+ pt=x+y*cols;
179
+ ph=pn;
180
+ pn=pp[pt+blockDim.x];
181
+ pr=__shfl_down(ph,2);
182
+ tmp=__shfl_up(pn,30);
183
+ pr=threadIdx.x>=30?tmp:pr;
184
+
185
+ pl=ph;
186
+ pu=pp[pt+upoff];
187
+ pd=pp[pt+dnoff];
188
+
189
+ // g0 is the strongest nearby gradient (excluding point defects)
190
+ gt=fabsf(pr-pl);
191
+ g0x=__shfl_up(gt,1);//?xxxxxx
192
+ g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx
193
+ gsav=__shfl_down(gt,31);//x000000 for next time
194
+ g0y=fabsf(pd-pu);
195
+
196
+ g0=fmaxf(g0x,g0y);
197
+
198
+ // g1 is the scaled g0 through the g function
199
+ g1=sqrt(g0);
200
+ g1=exp(-alpha*g1);
201
+ //save
202
+ g1p[pt]=g1;
203
+ }
204
+
205
+ //itr n-1
206
+ pt=x+y*cols;
207
+ ph=pn;
208
+ pr=__shfl_down(ph,2);
209
+ pl=ph;
210
+ pu=pp[pt+upoff];
211
+ pd=pp[pt+dnoff];
212
+
213
+ // g0 is the strongest nearby gradient (excluding point defects)
214
+ gt=fabsf(pr-pl);
215
+ g0x=__shfl_up(gt,1);//?xxxxxx
216
+ g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx
217
+ g0y=fabsf(pd-pu);
218
+
219
+ g0=fmaxf(g0x,g0y);
220
+ // g1 is the scaled g0 through the g function
221
+ g1=sqrt(g0);
222
+ g1=exp(-alpha*g1);
223
+ //save
224
+ g1p[pt]=g1;
225
+ #endif
226
+ }
227
+ GENERATE_CUDA_FUNC2DROWS(computeG2,
228
+ (float* pp, float* g1p, float* gxp, float* gyp, int cols),
229
+ (pp, g1p, gxp, gyp, cols)) {
230
+ #if __CUDA_ARCH__>300
231
+ int x = threadIdx.x;
232
+ int y = blockIdx.y * blockDim.y + threadIdx.y;
233
+ int dnoff=(y<gridDim.y*blockDim.y-1)*cols;
234
+ //itr0
235
+ int pt=x+y*cols;
236
+ float g1h,g1n,g1u,g1d,g1r,g1l,gx,gy;
237
+ float tmp;
238
+ //part2, find gx,gy
239
+ x = threadIdx.x;
240
+ y = blockIdx.y * blockDim.y + threadIdx.y;
241
+ //itr0
242
+ pt=x+y*cols;
243
+
244
+ g1h=g1p[pt];
245
+ g1n=g1p[pt+blockDim.x];
246
+ g1r=__shfl_down(g1h,1);
247
+ tmp=__shfl_up(g1n,31);
248
+ if(threadIdx.x>=31){
249
+ g1r=tmp;
250
+ }
251
+ g1l=g1h;
252
+ g1u=g1h;
253
+ g1d=g1p[pt+dnoff];
254
+
255
+ gx=fmaxf(g1l,g1r);
256
+ gy=fmaxf(g1u,g1d);
257
+
258
+ //save
259
+ gxp[pt]=gx;
260
+ gyp[pt]=gy;
261
+ x+=32;
262
+ //itr 1:n-2
263
+ for(;x<cols-32;x+=32){
264
+ pt=x+y*cols;
265
+ g1h=g1n;
266
+ g1n=g1p[pt+blockDim.x];
267
+ g1r=__shfl_down(g1h,1);
268
+ tmp=__shfl_up(g1n,31);
269
+ g1r=threadIdx.x>=31?tmp:g1r;
270
+
271
+ g1l=g1h;
272
+ g1u=g1h;
273
+ g1d=g1p[pt+dnoff];
274
+
275
+ gx=fmaxf(g1l,g1r);
276
+ gy=fmaxf(g1u,g1d);
277
+ //save
278
+ gxp[pt]=gx;
279
+ gyp[pt]=gy;
280
+ }
281
+
282
+ //itr n-1
283
+ pt=x+y*cols;
284
+ g1h=g1n;
285
+ g1r=__shfl_down(g1h,1);
286
+ g1l=g1h;
287
+ g1u=g1h;
288
+ g1d=g1p[pt+dnoff];
289
+
290
+ gx=fmaxf(g1l,g1r);
291
+ gy=fmaxf(g1u,g1d);
292
+
293
+
294
+ //save
295
+ gxp[pt]=gx;
296
+ gyp[pt]=gy;
297
+ #endif
298
+ }
299
+
300
+
301
+ //This version is faster, but makes synchronization errors at the lines between parts 1 and 2.
302
+ //Could be fixed by a second pass for part 2 over the stitch lines, but I don't have time to figure that out
303
+ //right now.
304
+ GENERATE_CUDA_FUNC2DROWS(computeGunsafe,
305
+ (float* pp, float* g1p, float* gxp, float* gyp, int cols),
306
+ (pp, g1p, gxp, gyp, cols)) {
307
+ #if __CUDA_ARCH__>300
308
+ //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch
309
+ //TODO: rerun kernel on lines with y%32==31 or y%32==0 to fix stitch lines
310
+
311
+ //Original pseudocode for this function:
312
+ // //subscripts u,d,l,r mean up,down,left,right
313
+ // void computeG(){
314
+ // // g0 is the strongest nearby gradient (excluding point defects)
315
+ // g0x=fabsf(pr-pl);//|dx|
316
+ // g0y=fabsf(pd-pu);//|dy|
317
+ // g0=max(g0x,g0y);
318
+ // // g1 is the scaled g0 through the g function exp(-alpha*x^beta)
319
+ // g1=sqrt(g0); //beta=0.5
320
+ // alpha=3.5;
321
+ // g1=exp(-alpha*g1);
322
+ // //hard to explain this without a picture, but breaks are where both neighboring pixels are near a change
323
+ // gx=max(g1r,g1);
324
+ // gy=max(g1d,g1);
325
+ // gu=gyu; //upper spring is the lower spring of the pixel above
326
+ // gd=gy; //lower spring
327
+ // gr=gx; //right spring
328
+ // gl=gxl; //left spring is the right spring of the pixel to the left
329
+ // }
330
+ const float alpha=3.5f;
331
+ int x = threadIdx.x;
332
+ int y = blockIdx.y * blockDim.y + threadIdx.y;
333
+ int upoff=-(y!=0)*cols;
334
+ int dnoff=(y<gridDim.y*blockDim.y-1)*cols;
335
+ //itr0
336
+ int pt=x+y*cols;
337
+ float ph,pn,pu,pd,pl,pr;
338
+ float g0x,g0y,g0,g1,g1h,g1n,g1u,g1d,g1r,g1l,gx,gy,gt,gsav;
339
+ float tmp;
340
+ ph=pp[pt];
341
+ pn=pp[pt+blockDim.x];
342
+
343
+ pr=__shfl_down(ph,2);
344
+ tmp=__shfl_up(pn,30);
345
+ if(threadIdx.x>=30){
346
+ pr=tmp;
347
+ }
348
+ pl=ph;
349
+ pu=pp[pt+upoff];
350
+ pd=pp[pt+dnoff];
351
+
352
+
353
+ // g0 is the strongest nearby gradient (excluding point defects)
354
+ gt=fabsf(pr-pl);
355
+ g0x=__shfl_up(gt,1);//?xxxxxx no prior val
356
+ gsav=__shfl_down(gt,31);//x000000 for next time
357
+ g0x=threadIdx.x>0?g0x:0.0f;//0xxxxxx
358
+ g0y=fabsf(pd-pu);
359
+
360
+ g0=fmaxf(g0x,g0y);
361
+ // g1 is the scaled g0 through the g function
362
+ g1=sqrt(g0);
363
+ g1=exp(-alpha*g1);
364
+ //save
365
+ g1p[pt]=g1;
366
+
367
+ x+=32;
368
+ //itr 1:n-2
369
+ for(;x<cols-32;x+=32){
370
+ pt=x+y*cols;
371
+ ph=pn;
372
+ pn=pp[pt+blockDim.x];
373
+ pr=__shfl_down(ph,2);
374
+ tmp=__shfl_up(pn,30);
375
+ pr=threadIdx.x>=30?tmp:pr;
376
+
377
+ pl=ph;
378
+ pu=pp[pt+upoff];
379
+ pd=pp[pt+dnoff];
380
+
381
+ // g0 is the strongest nearby gradient (excluding point defects)
382
+ gt=fabsf(pr-pl);
383
+ g0x=__shfl_up(gt,1);//?xxxxxx
384
+ g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx
385
+ gsav=__shfl_down(gt,31);//x000000 for next time
386
+ g0y=fabsf(pd-pu);
387
+
388
+ g0=fmaxf(g0x,g0y);
389
+
390
+ // g1 is the scaled g0 through the g function
391
+ g1=sqrt(g0);
392
+ g1=exp(-alpha*g1);
393
+ //save
394
+ g1p[pt]=g1;
395
+ }
396
+
397
+ //itr n-1
398
+ pt=x+y*cols;
399
+ ph=pn;
400
+ pr=__shfl_down(ph,2);
401
+ pl=ph;
402
+ pu=pp[pt+upoff];
403
+ pd=pp[pt+dnoff];
404
+
405
+ // g0 is the strongest nearby gradient (excluding point defects)
406
+ gt=fabsf(pr-pl);
407
+ g0x=__shfl_up(gt,1);//?xxxxxx
408
+ g0x=threadIdx.x>0?g0x:gsav;//xxxxxxx
409
+ g0y=fabsf(pd-pu);
410
+
411
+ g0=fmaxf(g0x,g0y);
412
+ // g1 is the scaled g0 through the g function
413
+ g1=sqrt(g0);
414
+ g1=exp(-alpha*g1);
415
+ //save
416
+ g1p[pt]=g1;
417
+
418
+ //part2, find gx,gy
419
+ x = threadIdx.x;
420
+ y = blockIdx.y * blockDim.y + threadIdx.y;
421
+ //itr0
422
+ pt=x+y*cols;
423
+
424
+ g1h=g1p[pt];
425
+ g1n=g1p[pt+blockDim.x];
426
+ g1r=__shfl_down(g1h,1);
427
+ tmp=__shfl_up(g1n,31);
428
+ if(threadIdx.x>=31){
429
+ g1r=tmp;
430
+ }
431
+ g1l=g1h;
432
+ g1u=g1h;
433
+ g1d=g1p[pt+dnoff];
434
+
435
+ gx=fmaxf(g1l,g1r);
436
+ gy=fmaxf(g1u,g1d);
437
+
438
+ //save
439
+ gxp[pt]=gx;
440
+ gyp[pt]=gy;
441
+ x+=32;
442
+ //itr 1:n-2
443
+ for(;x<cols-32;x+=32){
444
+ pt=x+y*cols;
445
+ g1h=g1n;
446
+ g1n=g1p[pt+blockDim.x];
447
+ g1r=__shfl_down(g1h,1);
448
+ tmp=__shfl_up(g1n,31);
449
+ g1r=threadIdx.x>=31?tmp:g1r;
450
+
451
+ g1l=g1h;
452
+ g1u=g1h;
453
+ g1d=g1p[pt+dnoff];
454
+
455
+ gx=fmaxf(g1l,g1r);
456
+ gy=fmaxf(g1u,g1d);
457
+ //save
458
+ gxp[pt]=gx;
459
+ gyp[pt]=gy;
460
+ }
461
+
462
+ //itr n-1
463
+ pt=x+y*cols;
464
+ g1h=g1n;
465
+ g1r=__shfl_down(g1h,1);
466
+ g1l=g1h;
467
+ g1u=g1h;
468
+ g1d=g1p[pt+dnoff];
469
+
470
+ gx=fmaxf(g1l,g1r);
471
+ gy=fmaxf(g1u,g1d);
472
+
473
+
474
+ //save
475
+ gxp[pt]=gx;
476
+ gyp[pt]=gy;
477
+ #endif
478
+
479
+ }
480
+ __device__ inline float saturate(float x){
481
+ //return x;
482
+ return x/fmaxf(1.0f,fabsf(x));
483
+ }
484
+ // static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt,
485
+ // float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon,
486
+ // float theta);//DANGER, no interblock synchronization = weird instability
487
+ static __global__ void updateQ (float* gqxpt, float* gqypt, float *dpt, float * apt,
488
+ float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon,
489
+ float theta);
490
+ static __global__ void updateD (float* gqxpt, float* gqypt, float *dpt, float * apt,
491
+ float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon,
492
+ float theta);
493
+
494
+ void updateQDCaller(float* gqxpt, float* gqypt, float *dpt, float * apt,
495
+ float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon,
496
+ float theta) {
497
+
498
+ dim3 dimBlock(BLOCKX2D, BLOCKY2D);
499
+ dim3 dimGrid(1, (arows + dimBlock.y - 1) / dimBlock.y);
500
+ CV_Assert(dimGrid.y>0);
501
+ cudaSafeCall( cudaGetLastError() );
502
+ updateQ<<<dimGrid, dimBlock,0,localStream>>>( gqxpt, gqypt, dpt, apt,
503
+ gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta);
504
+ cudaSafeCall( cudaGetLastError() );
505
+ updateD<<<dimGrid, dimBlock,0,localStream>>>( gqxpt, gqypt, dpt, apt,
506
+ gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta);
507
+ cudaSafeCall( cudaGetLastError() );
508
+ };
509
+
510
+ // static __global__ void updateQD (float* gqxpt, float* gqypt, float *dpt, float * apt,
511
+ // float *gxpt, float *gypt, float sigma_q, float sigma_d, float epsilon,
512
+ // float theta) {
513
+ // //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch
514
+ //
515
+ // //Original pseudocode for this function:
516
+ // //void updateQD(){
517
+ // // //shifts are shuffles!
518
+ // // for (all x in blocks of warpsize;;){
519
+ // // //qx update
520
+ // // float dh,dn,qxh,gx,gqx,qyh,gy,gqy;
521
+ // // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply)
522
+ // // dr=dh<<1;
523
+ // // tmp=dn>>31;
524
+ // // if (rt)
525
+ // // dr=tmp;
526
+ // // qxh=gqx/gxh;
527
+ // // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0)
528
+ // // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge.
529
+ // // gqxpt[pt]=gqx;
530
+ // //
531
+ // // //qy update
532
+ // // s[bpt]=dn;
533
+ // // if(!btm){
534
+ // // dd=s[bpt+bdnoff];
535
+ // // }else{
536
+ // // dd=dpt[pt+dnoff];
537
+ // // }
538
+ // // qyh=gqy/gy;
539
+ // // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon);
540
+ // // gqy=saturate(gyh*qyh);
541
+ // // gqypt[pt]=gqy;
542
+ // //
543
+ // // //dx update
544
+ // // gqr=gqx;
545
+ // // gql=gqx>>1;
546
+ // // if (lf)
547
+ // // gql=gqsave;
548
+ // // gqsave=gqx<<31;//save for next iter
549
+ // // dacc = gqr - gql;//dx part
550
+ // //
551
+ // // //dy update and d store
552
+ // // gqd=gqy;
553
+ // // s[bpt]=gqy;
554
+ // // if(!top)
555
+ // // gqu=s[bpt+bupoff];
556
+ // // else
557
+ // // gqu=gqxpt[pt + upoff];
558
+ // // dacc += gqd-gqu; //dy part
559
+ // // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta);
560
+ // // dpt[pt]=d;
561
+ // // }
562
+ // //}
563
+ // __shared__ float s[32*BLOCKY2D];
564
+ // int x = threadIdx.x;
565
+ // int y = blockIdx.y * blockDim.y + threadIdx.y;
566
+ // bool rt=x==31;
567
+ // bool lf=x==0;
568
+ // bool top=y==0;
569
+ // bool btm=y==rows-1;
570
+ // bool btop=threadIdx.y==0;
571
+ // bool bbtm=threadIdx.y==blockDim.y-1;
572
+ // int pt, bpt,bdnoff ,dnoff, bupoff, upoff;
573
+ //
574
+ //
575
+ // float tmp,gqsave;
576
+ // gqsave=0;
577
+ // bpt = threadIdx.x+threadIdx.y*blockDim.x;
578
+ // bdnoff=blockDim.x;
579
+ // dnoff=(!btm)*cols;
580
+ // bupoff=-blockDim.x;
581
+ // upoff=-(!top)*cols;
582
+ //
583
+ // pt=x+y*cols;
584
+ //
585
+ // float dh,dn;
586
+ // dn=dpt[pt];
587
+ //
588
+ // for(;x<cols;x+=32){
589
+ // float qx,gx,gqx,qy,gy,gqy;
590
+ // pt=x+y*cols;
591
+ //
592
+ //
593
+ // //qx update
594
+ // {
595
+ // float dr;
596
+ // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply)
597
+ //
598
+ // //load
599
+ // {
600
+ // dh=dn;
601
+ // if(x<cols-32){
602
+ // dn=dpt[pt+32];
603
+ //
604
+ // }
605
+ // gqx=gqxpt[pt];
606
+ // gx=gxpt[pt];
607
+ // // gx=1.0f;
608
+ //
609
+ // }
610
+ //
611
+ // dr=__shfl_down(dh,1);
612
+ // tmp=__shfl_up(dn,31);
613
+ // if (rt && x<cols-32)
614
+ // dr=tmp;
615
+ // qx = gqx/gx;
616
+ // qx = (qx+sigma_q*gx*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0)
617
+ // gqx = saturate(gx*qx);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge.
618
+ // //gqxpt[pt]=gqx;
619
+ // }
620
+ // dpt[pt] = dh;
621
+ // //qy update
622
+ // {
623
+ // float dd;
624
+ // //load
625
+ // {
626
+ // gqy=gqypt[pt];
627
+ // gy=gypt[pt];
628
+ // // gy=1.0f;
629
+ // }
630
+ // s[bpt]=dh;
631
+ // __syncthreads();
632
+ // if(!bbtm){
633
+ // dd=s[bpt+bdnoff];
634
+ // }else{
635
+ // dd=dpt[pt+dnoff];
636
+ // }
637
+ // qy = gqy/gy;
638
+ // qy = (qy+sigma_q*gy*(dd-dh))/(1+sigma_q*epsilon);
639
+ // gqy = saturate(gy*qy);
640
+ // //gqypt[pt]=gqy;
641
+ // }
642
+ // float dacc;
643
+ // //dx update
644
+ // {
645
+ // float gqr,gql;
646
+ // gqr=gqx;
647
+ // gql=__shfl_up(gqx,1);
648
+ // if (lf)
649
+ // gql=gqsave;
650
+ // gqsave=__shfl_down(gqx,31);//save for next iter
651
+ // dacc = gqr - gql;//dx part
652
+ // }
653
+ // float d=dh;
654
+ // //dy update and d store
655
+ // {
656
+ // float a;
657
+ // //load
658
+ // {
659
+ // a=apt[pt];
660
+ // }
661
+ // float gqu,gqd;
662
+ //
663
+ // gqd=gqy;
664
+ // s[bpt]=gqy;
665
+ // __syncthreads();
666
+ // if(!btop)
667
+ // gqu=s[bpt+bupoff];
668
+ // else
669
+ // gqu=gqypt[pt + upoff];
670
+ // if(y==0)
671
+ // gqu=0.0f;
672
+ // dacc += gqd-gqu; //dy part
673
+ // d = ( d + sigma_d*(dacc + a/theta) ) / (1 + sigma_d/theta);
674
+ // //dpt[pt] = d;
675
+ // }
676
+ // __syncthreads();
677
+ // gqxpt[pt]=gqx;
678
+ // gqypt[pt]=gqy;
679
+ // dpt[pt] = d;
680
+ // __syncthreads();
681
+ // }
682
+ // }
683
+
684
+
685
+ GENERATE_CUDA_FUNC2DROWS(updateQ,
686
+ (float* gqxpt, float* gqypt, float *dpt, float * apt,
687
+ float *gxpt, float *gypt, int cols, float sigma_q, float sigma_d, float epsilon,
688
+ float theta),
689
+ ( gqxpt, gqypt, dpt, apt,
690
+ gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta)) {
691
+ //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch
692
+
693
+ //Original pseudocode for this function:
694
+ //void updateQD(){
695
+ // //shifts are shuffles!
696
+ // for (all x in blocks of warpsize;;){
697
+ // //qx update
698
+ // float dh,dn,qxh,gx,gqx,qyh,gy,gqy;
699
+ // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply)
700
+ // dr=dh<<1;
701
+ // tmp=dn>>31;
702
+ // if (rt)
703
+ // dr=tmp;
704
+ // qxh=gqx/gxh;
705
+ // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0)
706
+ // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge.
707
+ // gqxpt[pt]=gqx;
708
+ //
709
+ // //qy update
710
+ // s[bpt]=dn;
711
+ // if(!btm){
712
+ // dd=s[bpt+bdnoff];
713
+ // }else{
714
+ // dd=dpt[pt+dnoff];
715
+ // }
716
+ // qyh=gqy/gy;
717
+ // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon);
718
+ // gqy=saturate(gyh*qyh);
719
+ // gqypt[pt]=gqy;
720
+ //
721
+ // //dx update
722
+ // gqr=gqx;
723
+ // gql=gqx>>1;
724
+ // if (lf)
725
+ // gql=gqsave;
726
+ // gqsave=gqx<<31;//save for next iter
727
+ // dacc = gqr - gql;//dx part
728
+ //
729
+ // //dy update and d store
730
+ // gqd=gqy;
731
+ // s[bpt]=gqy;
732
+ // if(!top)
733
+ // gqu=s[bpt+bupoff];
734
+ // else
735
+ // gqu=gqxpt[pt + upoff];
736
+ // dacc += gqd-gqu; //dy part
737
+ // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta);
738
+ // dpt[pt]=d;
739
+ // }
740
+ //}
741
+ #if __CUDA_ARCH__>300
742
+ __shared__ float s[32*BLOCKY2D];
743
+ int x = threadIdx.x;
744
+ int y = blockIdx.y * blockDim.y + threadIdx.y;
745
+ bool rt=x==31;
746
+
747
+ bool bbtm=threadIdx.y==blockDim.y-1;
748
+ int pt, bpt,bdnoff ,dnoff;
749
+
750
+ float tmp;
751
+ bpt = threadIdx.x+threadIdx.y*blockDim.x;
752
+ bdnoff=blockDim.x;
753
+ dnoff=(y<gridDim.y*blockDim.y-1)*cols;
754
+
755
+ pt=x+y*cols;
756
+
757
+ float dh,dn;
758
+ dn=dpt[pt];
759
+
760
+
761
+ for(;x<cols;x+=32){
762
+ float qx,gx,gqx,qy,gy,gqy;
763
+ pt=x+y*cols;
764
+
765
+
766
+ //qx update
767
+ {
768
+ float dr;
769
+ //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply)
770
+
771
+ //load
772
+ {
773
+ dh=dn;
774
+ if(x<cols-32){
775
+ dn=dpt[pt+32];
776
+
777
+ }
778
+ gqx=gqxpt[pt];
779
+ gx=gxpt[pt]+.01f;
780
+ // gx=1.0f;
781
+ }
782
+
783
+ dr=__shfl_down(dh,1);
784
+ tmp=__shfl_up(dn,31);
785
+ if (rt && x<cols-32)
786
+ dr=tmp;
787
+ qx = gqx/gx;
788
+ //qx+=(gx*(dr-dh)-epsilon*qx)*.5f;//simplified step
789
+ qx = (qx+sigma_q*gx*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0)
790
+ gqx = saturate(gx*qx);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge.
791
+ gqxpt[pt]=gqx;
792
+ }
793
+
794
+ //qy update
795
+ {
796
+ float dd;
797
+ //load
798
+ {
799
+ gqy=gqypt[pt];
800
+ gy=gypt[pt]+.01f;
801
+ // gy=1.0f;
802
+ }
803
+ s[bpt]=dh;
804
+ __syncthreads();
805
+ if(!bbtm)
806
+ dd=s[bpt+bdnoff];
807
+ else
808
+ dd=dpt[pt+dnoff];
809
+ __syncthreads();
810
+ qy = gqy/gy;
811
+ //qy+=(gy*(dd-dh)-epsilon*qy)*.5f;//simplified step
812
+ qy = (qy+sigma_q*gy*(dd-dh))/(1+sigma_q*epsilon);
813
+ gqy = saturate(gy*qy);
814
+
815
+ gqypt[pt]=gqy;
816
+ }
817
+ //__syncthreads();
818
+ }
819
+ #endif
820
+ }
821
+
822
+ GENERATE_CUDA_FUNC2DROWS(updateD,
823
+ (float* gqxpt, float* gqypt, float *dpt, float * apt,
824
+ float *gxpt, float *gypt,int cols, float sigma_q, float sigma_d, float epsilon,
825
+ float theta),
826
+ ( gqxpt, gqypt, dpt, apt,
827
+ gxpt, gypt, cols, sigma_q, sigma_d, epsilon, theta)) {
828
+ #if __CUDA_ARCH__>300
829
+ //TODO: make compatible with cuda 2.0 and lower (remove shuffles). Probably through texture fetch
830
+
831
+ //Original pseudocode for this function:
832
+ //void updateQD(){
833
+ // //shifts are shuffles!
834
+ // for (all x in blocks of warpsize){
835
+ // //qx update
836
+ // float dh,dn,qxh,gx,gqx,qyh,gy,gqy;
837
+ // //load(dh,dn,gxh,gqx);//load here, next(the block to the right), local constant, old x force(with cached multiply)
838
+ // dr=dh<<1;
839
+ // tmp=dn>>31;
840
+ // if (rt)
841
+ // dr=tmp;
842
+ // qxh=gqx/gxh;
843
+ // qxh = (qxh+sigma_q*gxh*(dr-dh))/(1+sigma_q*epsilon);//basic spring force equation f=k(x-x0)
844
+ // gqx = saturate(gxh*qxh);//spring saturates (with cached multiply), saturation force proportional to prob. of not an edge.
845
+ // gqxpt[pt]=gqx;
846
+ //
847
+ // //qy update
848
+ // s[bpt]=dn;
849
+ // if(!btm){
850
+ // dd=s[bpt+bdnoff];
851
+ // }else{
852
+ // dd=dpt[pt+dnoff];
853
+ // }
854
+ // qyh=gqy/gy;
855
+ // qyh=(qyh+sigma_q*gyh*(dd-dh))/(1+sigma_q*epsilon);
856
+ // gqy=saturate(gyh*qyh);
857
+ // gqypt[pt]=gqy;
858
+ //
859
+ // //dx update
860
+ // gqr=gqx;
861
+ // gql=gqx>>1;
862
+ // if (lf)
863
+ // gql=gqsave;
864
+ // gqsave=gqx<<31;//save for next iter
865
+ // dacc = gqr - gql;//dx part
866
+ //
867
+ // //dy update and d store
868
+ // gqd=gqy;
869
+ // s[bpt]=gqy;
870
+ // if(!top)
871
+ // gqu=s[bpt+bupoff];
872
+ // else
873
+ // gqu=gqxpt[pt + upoff];
874
+ // dacc += gqd-gqu; //dy part
875
+ // d = (d + sigma_d*(dacc+1/theta*ah))/(1+sigma_d/theta);
876
+ // dpt[pt]=d;
877
+ // }
878
+ //}
879
+ __shared__ float s[32*BLOCKY2D];
880
+ int x = threadIdx.x;
881
+ int y = blockIdx.y * blockDim.y + threadIdx.y;
882
+ bool lf=x==0;
883
+ bool top=y==0;
884
+ bool btop=threadIdx.y==0;
885
+ int pt, bpt, bupoff, upoff;
886
+
887
+
888
+ float gqsave=0;
889
+ bpt = threadIdx.x+threadIdx.y*blockDim.x;
890
+
891
+ bupoff=-blockDim.x;
892
+ upoff=-(!top)*cols;
893
+
894
+ pt=x+y*cols;
895
+
896
+ for(;x<cols;x+=32){
897
+ float gqx,gqy;
898
+ pt=x+y*cols;
899
+
900
+
901
+ float dacc;
902
+ //dx update
903
+ {
904
+ float gqr,gql;
905
+ gqr=gqx=gqxpt[pt];
906
+ gql=__shfl_up(gqx,1);
907
+ if (lf)
908
+ gql=gqsave;
909
+ gqsave=__shfl_down(gqx,31);//save for next iter
910
+ dacc = gqr - gql;//dx part
911
+ }
912
+ //dy update and d store
913
+ {
914
+ float a;
915
+ //load
916
+ {
917
+ a=apt[pt];
918
+ }
919
+ float gqu,gqd;
920
+ float d=dpt[pt];
921
+ gqd=gqy=gqypt[pt];
922
+ s[bpt]=gqy;
923
+ __syncthreads();
924
+ if(!btop)
925
+ gqu=s[bpt+bupoff];
926
+ else
927
+ gqu=gqypt[pt + upoff];
928
+ if(y==0)
929
+ gqu=0.0f;
930
+ dacc += gqd-gqu; //dy part
931
+ //d += dacc*.5f;//simplified step
932
+ d = ( d + sigma_d*(dacc + a/theta) ) / (1 + sigma_d/theta);
933
+
934
+ dpt[pt] = d;
935
+ }
936
+ __syncthreads();//can't figure out why this is needed, but it is to avoid subtle errors in Qy at the ends of the warp
937
+ }
938
+ #endif
939
+ }
940
+
941
+
942
+ }}}}
cuda_code/DepthwiseGEMM_64.cu ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /********************************************************
2
+ * Author: Zhao Mingxin
3
+ * Date: 2018/12/11
4
+ * Description: CUDA Kernel for DepthwiseGEMM. As GPU is good at
5
+ * dealing with 32 bits computation and gets performance degradation
6
+ * when bit-width is longer than 32, so the current DepthwiseGEMM
7
+ * version can't give right outputs when fixed point is more than 32 bits.
8
+ *
9
+ * NOTE: If you have any issues about this code, please
10
+ * feedback.
11
+ * Homepage: https://jackgittes.github.io
12
+ *********************************************************/
13
+ __global__ void DepthwiseGEMM_64(const long long *A,const long long *B,
14
+ const int Aheight,const int Awidth,const int Bwidth,
15
+ const long long up_bound,const long long low_bound,long long *C)
16
+ {
17
+ long long Cvalue = 0;
18
+ long long prod_tmp;
19
+ int Bheight = Awidth;
20
+
21
+ int chn = blockIdx.z;
22
+ int row = blockIdx.x * blockDim.x + threadIdx.x;
23
+ int col = blockIdx.y * blockDim.y + threadIdx.y;
24
+
25
+ for (int e = 0; e < Awidth; ++e){
26
+ prod_tmp = A[chn * Aheight * Awidth + Aheight * e + row]*B[chn*Bheight*Bwidth + col * Bheight+ e];
27
+ if(prod_tmp>up_bound)
28
+ prod_tmp = up_bound;
29
+ if(prod_tmp<low_bound)
30
+ prod_tmp = low_bound;
31
+ Cvalue += prod_tmp;
32
+ if(Cvalue>up_bound)
33
+ Cvalue=up_bound;
34
+ if(Cvalue<low_bound)
35
+ Cvalue=low_bound;
36
+ }
37
+ C[chn*Aheight*Bwidth + Aheight*col + row] = Cvalue;
38
+ }
cuda_code/DeviceMemory_2.cu ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cassert>
2
+ #include <math.h>
3
+ #include <stdlib.h>
4
+ #include <stdio.h>
5
+ #include <cuda.h>
6
+ #include <cuda_runtime_api.h>
7
+ #include "cudacommon.h"
8
+ #include "OptionParser.h"
9
+ #include "ResultDatabase.h"
10
+ #include "Timer.h"
11
+ #include "support.h"
12
+
13
+ // Forward declarations for texture memory test and benchmark kernels
14
+ void TestTextureMem(ResultDatabase &resultDB, OptionParser &op, double scalet);
15
+ __global__ void
16
+ readGlobalMemoryCoalesced(float *data, float *output, int size, int repeat);
17
+ __global__ void readGlobalMemoryUnit(float *data, float *output, int size, int repeat);
18
+ __global__ void readLocalMemory(const float *data, float *output, int size, int repeat);
19
+ __global__ void writeGlobalMemoryCoalesced(float *output, int size, int repeat);
20
+ __global__ void writeGlobalMemoryUnit(float *output, int size, int repeat);
21
+ __global__ void writeLocalMemory(float *output, int size, int repeat);
22
+ __device__ int getRand(int seed, int mod);
23
+ __global__ void readTexels(int n, float *d_out, int width);
24
+ __global__ void readTexelsInCache(int n, float *d_out);
25
+ __global__ void readTexelsRandom(int n, float *d_out, int width, int height);
26
+ // Texture to use for the benchmarks
27
+ texture<float4, 2, cudaReadModeElementType> texA;
28
+
29
+ // ****************************************************************************
30
+ // Function: addBenchmarkSpecOptions
31
+ //
32
+ // Purpose:
33
+ // Add benchmark specific options parsing. Note that device memory has no
34
+ // benchmark specific options, so this is just a stub.
35
+ //
36
+ // Arguments:
37
+ // op: the options parser / parameter database
38
+ //
39
+ // Returns: nothing
40
+ //
41
+ // Programmer: Kyle Spafford
42
+ // Creation: December 11, 2009
43
+ //
44
+ // Modifications:
45
+ //
46
+ // ****************************************************************************
47
+ void addBenchmarkSpecOptions(OptionParser &op)
48
+ {
49
+ ;
50
+ }
51
+
52
+ // ****************************************************************************
53
+ // Function: runBenchmark
54
+ //
55
+ // Purpose:
56
+ // This benchmark measures the device memory bandwidth for several areas
57
+ // of memory including global, shared, and texture memories for several
58
+ // types of access patterns.
59
+ //
60
+ // Arguments:
61
+ // resultDB: the benchmark stores its results in this ResultDatabase
62
+ // op: the options parser / parameter database
63
+ //
64
+ // Returns: nothing
65
+ //
66
+ // Programmer: Kyle Spafford
67
+ // Creation: September 08, 2009
68
+ //
69
+ // Modifications:
70
+ // Gabriel Marin, 06/09/2010: Change memory access patterns to eliminate
71
+ // data reuse. Add auto-scaling factor.
72
+ //
73
+ // ****************************************************************************
74
+ void RunBenchmark(ResultDatabase &resultDB,
75
+ OptionParser &op)
76
+ {
77
+ int npasses = op.getOptionInt("passes");
78
+ size_t minGroupSize = 32;
79
+ size_t maxGroupSize = 512;
80
+ size_t globalWorkSize = 32768; // 64 * maxGroupSize = 64 * 512;
81
+ unsigned int memSize = 64*1024*1024; // 64MB buffer
82
+ const long availMem = findAvailBytes();
83
+ while (memSize*2 > availMem)
84
+ memSize >>= 1; // keep it a power of 2
85
+
86
+ const unsigned int numWordsFloat = memSize / sizeof(float);
87
+
88
+ // Initialize host memory
89
+ float *h_in = new float[numWordsFloat];
90
+ float *h_out = new float[numWordsFloat];
91
+ srand48(8650341L);
92
+ for (int i = 0; i < numWordsFloat; ++i)
93
+ {
94
+ h_in[i] = (float)(drand48()*numWordsFloat);
95
+ }
96
+
97
+ // Allocate some device memory
98
+ float *d_mem1, *d_mem2;
99
+ char sizeStr[128];
100
+
101
+ cudaMalloc((void**)&d_mem1, sizeof(float)*(numWordsFloat));
102
+ CHECK_CUDA_ERROR();
103
+ cudaMalloc((void**)&d_mem2, sizeof(float)*(numWordsFloat));
104
+ CHECK_CUDA_ERROR();
105
+
106
+ cudaEvent_t start, stop;
107
+ cudaEventCreate(&start);
108
+ cudaEventCreate(&stop);
109
+ CHECK_CUDA_ERROR();
110
+
111
+ cudaEventRecord(start, 0);
112
+ readGlobalMemoryCoalesced<<<512, 64>>>
113
+ (d_mem1, d_mem2, numWordsFloat, 256);
114
+ cudaEventRecord(stop, 0);
115
+ cudaEventSynchronize(stop);
116
+ CHECK_CUDA_ERROR();
117
+ float t = 0.0f;
118
+ cudaEventElapsedTime(&t, start, stop);
119
+ t /= 1.e3;
120
+ double scalet = 0.15 / t;
121
+
122
+ const unsigned int maxRepeatsCoal = 256*scalet;
123
+ const unsigned int maxRepeatsUnit = 16*scalet;
124
+ const unsigned int maxRepeatsLocal = 300*scalet;
125
+
126
+ for (int p = 0; p < npasses; p++)
127
+ {
128
+ // Run the kernel for each group size
129
+ cout << "Running benchmarks, pass: " << p << "\n";
130
+ for (int threads=minGroupSize; threads<=maxGroupSize ; threads*=2)
131
+ {
132
+ const unsigned int blocks = globalWorkSize / threads;
133
+ double bdwth;
134
+ sprintf (sizeStr, "blockSize:%03d", threads);
135
+
136
+ // Test 1
137
+ cudaEventRecord(start, 0);
138
+ readGlobalMemoryCoalesced<<<blocks, threads>>>
139
+ (d_mem1, d_mem2, numWordsFloat, maxRepeatsCoal);
140
+ cudaEventRecord(stop, 0);
141
+ cudaEventSynchronize(stop);
142
+ CHECK_CUDA_ERROR();
143
+ t = 0.0f;
144
+ cudaEventElapsedTime(&t, start, stop);
145
+ t /= 1.e3;
146
+ bdwth = ((double) globalWorkSize * maxRepeatsCoal * 16 * sizeof(float))
147
+ / (t * 1000. * 1000. * 1000.);
148
+ resultDB.AddResult("readGlobalMemoryCoalesced", sizeStr, "GB/s",
149
+ bdwth);
150
+
151
+ // Test 2
152
+ cudaEventRecord(start, 0);
153
+ readGlobalMemoryUnit<<<blocks, threads>>>
154
+ (d_mem1, d_mem2, numWordsFloat, maxRepeatsUnit);
155
+ cudaEventRecord(stop, 0);
156
+ cudaEventSynchronize(stop);
157
+ CHECK_CUDA_ERROR();
158
+ cudaEventElapsedTime(&t, start, stop);
159
+ t /= 1.e3;
160
+ bdwth = ((double) globalWorkSize * maxRepeatsUnit * 16 * sizeof(float))
161
+ / (t * 1000. * 1000. * 1000.);
162
+ resultDB.AddResult("readGlobalMemoryUnit", sizeStr, "GB/s", bdwth);
163
+
164
+ // Test 3
165
+ cudaEventRecord(start, 0);
166
+ readLocalMemory<<<blocks, threads>>>
167
+ (d_mem1, d_mem2, numWordsFloat, maxRepeatsLocal);
168
+ cudaEventRecord(stop, 0);
169
+ cudaEventSynchronize(stop);
170
+ CHECK_CUDA_ERROR();
171
+ cudaEventElapsedTime(&t, start, stop);
172
+ t /= 1.e3;
173
+ bdwth = ((double) globalWorkSize * maxRepeatsLocal * 16 * sizeof(float))
174
+ / (t * 1000. * 1000. * 1000.);
175
+ resultDB.AddResult("readLocalMemory", sizeStr, "GB/s", bdwth);
176
+
177
+ // Test 4
178
+ cudaEventRecord(start, 0);
179
+ writeGlobalMemoryCoalesced<<<blocks, threads>>>
180
+ (d_mem2, numWordsFloat, maxRepeatsCoal);
181
+ cudaEventRecord(stop, 0);
182
+ cudaEventSynchronize(stop);
183
+ CHECK_CUDA_ERROR();
184
+ cudaEventElapsedTime(&t, start, stop);
185
+ t /= 1.e3;
186
+ bdwth = ((double) globalWorkSize * maxRepeatsCoal * 16 * sizeof(float))
187
+ / (t * 1000. * 1000. * 1000.);
188
+ resultDB.AddResult("writeGlobalMemoryCoalesced", sizeStr, "GB/s",
189
+ bdwth);
190
+
191
+ // Test 5
192
+ cudaEventRecord(start, 0);
193
+ writeGlobalMemoryUnit<<<blocks, threads>>>
194
+ (d_mem2, numWordsFloat, maxRepeatsUnit);
195
+ cudaEventRecord(stop, 0);
196
+ cudaEventSynchronize(stop);
197
+ CHECK_CUDA_ERROR();
198
+ cudaEventElapsedTime(&t, start, stop);
199
+ t /= 1.e3;
200
+ bdwth = ((double) globalWorkSize * maxRepeatsUnit * 16 * sizeof(float))
201
+ / (t * 1000. * 1000. * 1000.);
202
+ resultDB.AddResult("writeGlobalMemoryUnit", sizeStr, "GB/s",
203
+ bdwth);
204
+
205
+ // Test 6
206
+ cudaEventRecord(start, 0);
207
+ writeLocalMemory<<<blocks, threads>>>
208
+ (d_mem2, numWordsFloat, maxRepeatsLocal);
209
+ cudaEventRecord(stop, 0);
210
+ cudaEventSynchronize(stop);
211
+ CHECK_CUDA_ERROR();
212
+ cudaEventElapsedTime(&t, start, stop);
213
+ t /= 1.e3;
214
+ bdwth = ((double) globalWorkSize * maxRepeatsLocal * 16 * sizeof(float))
215
+ / (t * 1000. * 1000. * 1000.);
216
+ resultDB.AddResult("writeLocalMemory", sizeStr, "GB/s", bdwth);
217
+ }
218
+ }
219
+ cudaFree(d_mem1);
220
+ cudaFree(d_mem2);
221
+ delete[] h_in;
222
+ delete[] h_out;
223
+ cudaEventDestroy(start);
224
+ cudaEventDestroy(stop);
225
+ TestTextureMem(resultDB, op, scalet);
226
+ }
227
+
228
+ // ****************************************************************************
229
+ // Function: TestTextureMem
230
+ //
231
+ // Purpose:
232
+ // Measures the bandwidth of texture memory for several access patterns
233
+ // using a 2D texture including sequential, "random", and repeated access to
234
+ // texture cache. Texture memory is often a viable alternative to global
235
+ // memory, especially when data access patterns prevent good coalescing.
236
+ //
237
+ // Arguments:
238
+ // resultDB: results from the benchmark are stored to this resultd database
239
+ // op: the options parser / parameter database
240
+ // scalet: auto-scaling factor for the number of repetitions
241
+ //
242
+ // Returns: nothing
243
+ //
244
+ // Programmer: Kyle Spafford
245
+ // Creation: December 11, 2009
246
+ //
247
+ // Modifications:
248
+ // Gabriel Marin 06/09/2010: add auto-scaling factor
249
+ //
250
+ // Jeremy Meredith, Tue Nov 23 13:45:54 EST 2010
251
+ // Change data sizes to be larger, and textures to be 2D to match OpenCL
252
+ // variant. Dropped #iterations to compensate. Had to remove validation
253
+ // for now, which also matches the current OpenCL variant's behavior.
254
+ //
255
+ // ****************************************************************************
256
+ void TestTextureMem(ResultDatabase &resultDB, OptionParser &op, double scalet)
257
+ {
258
+ // Number of times to repeat each test
259
+ const unsigned int passes = op.getOptionInt("passes");
260
+ // Sizes of textures tested (in kb)
261
+ const unsigned int nsizes = 5;
262
+ const unsigned int sizes[] = { 16, 64, 256, 1024, 4096 };
263
+ // Number of texel accesses by each kernel
264
+ const unsigned int kernelRepFactor = 1024;
265
+ // Number of times to repeat each kernel per test
266
+ const unsigned int iterations = 1*scalet;
267
+
268
+ cudaEvent_t start, stop;
269
+ cudaEventCreate(&start);
270
+ cudaEventCreate(&stop);
271
+ CHECK_CUDA_ERROR();
272
+
273
+ // make sure our texture behaves like we want....
274
+ texA.normalized = false;
275
+ texA.addressMode[0] = cudaAddressModeClamp;
276
+ texA.addressMode[1] = cudaAddressModeClamp;
277
+ texA.filterMode = cudaFilterModePoint;
278
+
279
+ for (int j = 0; j < nsizes; j++)
280
+ {
281
+ cout << "Benchmarking Texture Memory, Test: " << j+1 << " / 5\n";
282
+ const unsigned int size = 1024 * sizes[j];
283
+ const unsigned int numFloat = size / sizeof(float);
284
+ const unsigned int numFloat4 = size / sizeof(float4);
285
+ size_t width, height;
286
+
287
+ // Image memory sizes should be power of 2.
288
+ size_t sizeLog = lround(log2(double(numFloat4)));
289
+ height = 1 << (sizeLog >> 1); // height is the smaller size
290
+ width = numFloat4 / height;
291
+
292
+ const dim3 blockSize(16, 8);
293
+ const dim3 gridSize(width/blockSize.x, height/blockSize.y);
294
+
295
+ float *h_in = new float[numFloat];
296
+ float *h_out = new float[numFloat4];
297
+ float *d_out;
298
+ cudaMalloc((void**) &d_out, numFloat4 * sizeof(float));
299
+ CHECK_CUDA_ERROR();
300
+
301
+ // Fill input data with some pattern
302
+ for (unsigned int i = 0; i < numFloat; i++)
303
+ {
304
+ h_in[i] = (float) i;
305
+ if (i < numFloat4)
306
+ {
307
+ h_out[i] = 0.0f;
308
+ }
309
+ }
310
+
311
+ // Allocate a cuda array
312
+ cudaArray* cuArray;
313
+ cudaMallocArray(&cuArray, &texA.channelDesc, width, height);
314
+ CHECK_CUDA_ERROR();
315
+
316
+ // Copy in source data
317
+ cudaMemcpyToArray(cuArray, 0, 0, h_in, size, cudaMemcpyHostToDevice);
318
+ CHECK_CUDA_ERROR();
319
+
320
+ // Bind texture to the array
321
+ cudaBindTextureToArray(texA, cuArray);
322
+ CHECK_CUDA_ERROR();
323
+
324
+ for (int p = 0; p < passes; p++)
325
+ {
326
+ // Test 1: Repeated Linear Access
327
+ float t = 0.0f;
328
+
329
+ cudaEventRecord(start, 0);
330
+ // read texels from texture
331
+ for (int iter = 0; iter < iterations; iter++)
332
+ {
333
+ readTexels<<<gridSize, blockSize>>>(kernelRepFactor, d_out,
334
+ width);
335
+ }
336
+ cudaEventRecord(stop, 0);
337
+ CHECK_CUDA_ERROR();
338
+ cudaEventSynchronize(stop);
339
+ CHECK_CUDA_ERROR();
340
+ cudaEventElapsedTime(&t, start, stop);
341
+ t /= 1.e3;
342
+
343
+ // Calculate speed in GB/s
344
+ double speed = (double)kernelRepFactor * (double)iterations *
345
+ (double)(size/(1000.*1000.*1000.)) / (t);
346
+
347
+ char sizeStr[256];
348
+ sprintf(sizeStr, "% 6dkB", size / 1024);
349
+ resultDB.AddResult("TextureRepeatedLinearAccess", sizeStr, "GB/sec",
350
+ speed);
351
+
352
+ // Verify results
353
+ cudaMemcpy(h_out, d_out, numFloat4*sizeof(float),
354
+ cudaMemcpyDeviceToHost);
355
+
356
+ // Test 2 Repeated Cache Access
357
+ cudaEventRecord(start, 0);
358
+ for (int iter = 0; iter < iterations; iter++)
359
+ {
360
+ readTexelsInCache<<<gridSize, blockSize>>>
361
+ (kernelRepFactor, d_out);
362
+ }
363
+ cudaEventRecord(stop, 0);
364
+ cudaEventSynchronize(stop);
365
+ CHECK_CUDA_ERROR();
366
+ cudaEventElapsedTime(&t, start, stop);
367
+ t /= 1.e3;
368
+
369
+ // Calculate speed in GB/s
370
+ speed = (double)kernelRepFactor * (double)iterations *
371
+ ((double)size/(1000.*1000.*1000.)) / (t);
372
+
373
+ sprintf(sizeStr, "% 6dkB", size / 1024);
374
+ resultDB.AddResult("TextureRepeatedCacheHit", sizeStr, "GB/sec",
375
+ speed);
376
+
377
+ // Verify results
378
+ cudaMemcpy(h_out, d_out, numFloat4*sizeof(float),
379
+ cudaMemcpyDeviceToHost);
380
+
381
+ // Test 3 Repeated "Random" Access
382
+ cudaEventRecord(start, 0);
383
+
384
+ // read texels from texture
385
+ for (int iter = 0; iter < iterations; iter++)
386
+ {
387
+ readTexelsRandom<<<gridSize, blockSize>>>
388
+ (kernelRepFactor, d_out, width, height);
389
+ }
390
+
391
+ cudaEventRecord(stop, 0);
392
+ cudaEventSynchronize(stop);
393
+ CHECK_CUDA_ERROR();
394
+ cudaEventElapsedTime(&t, start, stop);
395
+ t /= 1.e3;
396
+
397
+ // Calculate speed in GB/s
398
+ speed = (double)kernelRepFactor * (double)iterations *
399
+ ((double)size/(1000.*1000.*1000.)) / (t);
400
+
401
+ sprintf(sizeStr, "% 6dkB", size / 1024);
402
+ resultDB.AddResult("TextureRepeatedRandomAccess", sizeStr,
403
+ "GB/sec", speed);
404
+ }
405
+ delete[] h_in;
406
+ delete[] h_out;
407
+ cudaFree(d_out);
408
+ cudaFreeArray(cuArray);
409
+ cudaUnbindTexture(texA);
410
+ }
411
+ cudaEventDestroy(start);
412
+ cudaEventDestroy(stop);
413
+ }
414
+
415
+ // Begin benchmark kernels
416
+ __global__ void
417
+ readGlobalMemoryCoalesced(float *data, float *output, int size, int repeat)
418
+ {
419
+ int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
420
+ float sum = 0;
421
+ int s = gid;
422
+ for (j=0 ; j<repeat ; ++j)
423
+ {
424
+ float a0 = data[(s+0)&(size-1)];
425
+ float a1 = data[(s+32768)&(size-1)];
426
+ float a2 = data[(s+65536)&(size-1)];
427
+ float a3 = data[(s+98304)&(size-1)];
428
+ float a4 = data[(s+131072)&(size-1)];
429
+ float a5 = data[(s+163840)&(size-1)];
430
+ float a6 = data[(s+196608)&(size-1)];
431
+ float a7 = data[(s+229376)&(size-1)];
432
+ float a8 = data[(s+262144)&(size-1)];
433
+ float a9 = data[(s+294912)&(size-1)];
434
+ float a10 = data[(s+327680)&(size-1)];
435
+ float a11 = data[(s+360448)&(size-1)];
436
+ float a12 = data[(s+393216)&(size-1)];
437
+ float a13 = data[(s+425984)&(size-1)];
438
+ float a14 = data[(s+458752)&(size-1)];
439
+ float a15 = data[(s+491520)&(size-1)];
440
+ sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15;
441
+ s = (s+524288)&(size-1);
442
+ }
443
+ output[gid] = sum;
444
+ }
445
+
446
+ __global__ void
447
+ readGlobalMemoryUnit(float *data, float *output, int size, int repeat)
448
+ {
449
+ int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
450
+ float sum = 0;
451
+ int s = gid*512;
452
+ for (j=0 ; j<repeat ; ++j)
453
+ {
454
+ float a0 = data[(s+0)&(size-1)];
455
+ float a1 = data[(s+1)&(size-1)];
456
+ float a2 = data[(s+2)&(size-1)];
457
+ float a3 = data[(s+3)&(size-1)];
458
+ float a4 = data[(s+4)&(size-1)];
459
+ float a5 = data[(s+5)&(size-1)];
460
+ float a6 = data[(s+6)&(size-1)];
461
+ float a7 = data[(s+7)&(size-1)];
462
+ float a8 = data[(s+8)&(size-1)];
463
+ float a9 = data[(s+9)&(size-1)];
464
+ float a10 = data[(s+10)&(size-1)];
465
+ float a11 = data[(s+11)&(size-1)];
466
+ float a12 = data[(s+12)&(size-1)];
467
+ float a13 = data[(s+13)&(size-1)];
468
+ float a14 = data[(s+14)&(size-1)];
469
+ float a15 = data[(s+15)&(size-1)];
470
+ sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15;
471
+ s = (s+16)&(size-1);
472
+ }
473
+ output[gid] = sum;
474
+ }
475
+
476
+ __global__ void
477
+ readLocalMemory(const float *data, float *output, int size, int repeat)
478
+ {
479
+ int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
480
+ float sum = 0;
481
+ int tid=threadIdx.x, localSize=blockDim.x, grpid=blockIdx.x,
482
+ litems=2048/localSize, goffset=localSize*grpid+tid*litems;
483
+ int s = tid;
484
+ __shared__ float lbuf[2048];
485
+ for ( ; j<litems && j<(size-goffset) ; ++j)
486
+ lbuf[tid*litems+j] = data[goffset+j];
487
+ for (int i=0 ; j<litems ; ++j,++i)
488
+ lbuf[tid*litems+j] = data[i];
489
+ __syncthreads();
490
+ for (j=0 ; j<repeat ; ++j)
491
+ {
492
+ float a0 = lbuf[(s+0)&(2047)];
493
+ float a1 = lbuf[(s+1)&(2047)];
494
+ float a2 = lbuf[(s+2)&(2047)];
495
+ float a3 = lbuf[(s+3)&(2047)];
496
+ float a4 = lbuf[(s+4)&(2047)];
497
+ float a5 = lbuf[(s+5)&(2047)];
498
+ float a6 = lbuf[(s+6)&(2047)];
499
+ float a7 = lbuf[(s+7)&(2047)];
500
+ float a8 = lbuf[(s+8)&(2047)];
501
+ float a9 = lbuf[(s+9)&(2047)];
502
+ float a10 = lbuf[(s+10)&(2047)];
503
+ float a11 = lbuf[(s+11)&(2047)];
504
+ float a12 = lbuf[(s+12)&(2047)];
505
+ float a13 = lbuf[(s+13)&(2047)];
506
+ float a14 = lbuf[(s+14)&(2047)];
507
+ float a15 = lbuf[(s+15)&(2047)];
508
+ sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15;
509
+ s = (s+16)&(2047);
510
+ }
511
+ output[gid] = sum;
512
+ }
513
+
514
+ __global__ void
515
+ writeGlobalMemoryCoalesced(float *output, int size, int repeat)
516
+ {
517
+ int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
518
+ int s = gid;
519
+ for (j=0 ; j<repeat ; ++j)
520
+ {
521
+ output[(s+0)&(size-1)] = gid;
522
+ output[(s+32768)&(size-1)] = gid;
523
+ output[(s+65536)&(size-1)] = gid;
524
+ output[(s+98304)&(size-1)] = gid;
525
+ output[(s+131072)&(size-1)] = gid;
526
+ output[(s+163840)&(size-1)] = gid;
527
+ output[(s+196608)&(size-1)] = gid;
528
+ output[(s+229376)&(size-1)] = gid;
529
+ output[(s+262144)&(size-1)] = gid;
530
+ output[(s+294912)&(size-1)] = gid;
531
+ output[(s+327680)&(size-1)] = gid;
532
+ output[(s+360448)&(size-1)] = gid;
533
+ output[(s+393216)&(size-1)] = gid;
534
+ output[(s+425984)&(size-1)] = gid;
535
+ output[(s+458752)&(size-1)] = gid;
536
+ output[(s+491520)&(size-1)] = gid;
537
+ s = (s+524288)&(size-1);
538
+ }
539
+ }
540
+
541
+ __global__ void
542
+ writeGlobalMemoryUnit(float *output, int size, int repeat)
543
+ {
544
+ int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
545
+ int s = gid*512;
546
+ for (j=0 ; j<repeat ; ++j)
547
+ {
548
+ output[(s+0)&(size-1)] = gid;
549
+ output[(s+1)&(size-1)] = gid;
550
+ output[(s+2)&(size-1)] = gid;
551
+ output[(s+3)&(size-1)] = gid;
552
+ output[(s+4)&(size-1)] = gid;
553
+ output[(s+5)&(size-1)] = gid;
554
+ output[(s+6)&(size-1)] = gid;
555
+ output[(s+7)&(size-1)] = gid;
556
+ output[(s+8)&(size-1)] = gid;
557
+ output[(s+9)&(size-1)] = gid;
558
+ output[(s+10)&(size-1)] = gid;
559
+ output[(s+11)&(size-1)] = gid;
560
+ output[(s+12)&(size-1)] = gid;
561
+ output[(s+13)&(size-1)] = gid;
562
+ output[(s+14)&(size-1)] = gid;
563
+ output[(s+15)&(size-1)] = gid;
564
+ s = (s+16)&(size-1);
565
+ }
566
+ }
567
+
568
+ __global__ void
569
+ writeLocalMemory(float *output, int size, int repeat)
570
+ {
571
+ int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
572
+ int tid=threadIdx.x, localSize=blockDim.x, litems=2048/localSize;
573
+ int s = tid;
574
+ __shared__ float lbuf[2048];
575
+ for (j=0 ; j<repeat ; ++j)
576
+ {
577
+ lbuf[(s+0)&(2047)] = gid;
578
+ lbuf[(s+1)&(2047)] = gid;
579
+ lbuf[(s+2)&(2047)] = gid;
580
+ lbuf[(s+3)&(2047)] = gid;
581
+ lbuf[(s+4)&(2047)] = gid;
582
+ lbuf[(s+5)&(2047)] = gid;
583
+ lbuf[(s+6)&(2047)] = gid;
584
+ lbuf[(s+7)&(2047)] = gid;
585
+ lbuf[(s+8)&(2047)] = gid;
586
+ lbuf[(s+9)&(2047)] = gid;
587
+ lbuf[(s+10)&(2047)] = gid;
588
+ lbuf[(s+11)&(2047)] = gid;
589
+ lbuf[(s+12)&(2047)] = gid;
590
+ lbuf[(s+13)&(2047)] = gid;
591
+ lbuf[(s+14)&(2047)] = gid;
592
+ lbuf[(s+15)&(2047)] = gid;
593
+ s = (s+16)&(2047);
594
+ }
595
+ __syncthreads();
596
+ for (j=0 ; j<litems ; ++j)
597
+ output[gid] = lbuf[tid];
598
+ }
599
+
600
+ // Simple Repeated Linear Read from texture memory
601
+ __global__ void readTexels(int n, float *d_out, int width)
602
+ {
603
+ int idx_x = (blockIdx.x * blockDim.x) + threadIdx.x;
604
+ int idx_y = (blockIdx.y * blockDim.y) + threadIdx.y;
605
+ int out_idx = idx_y * gridDim.x + idx_x;
606
+ float sum = 0.0f;
607
+ int width_bits = width-1;
608
+ for (int i = 0; i < n; i++)
609
+ {
610
+ float4 v = tex2D(texA, float(idx_x), float(idx_y));
611
+ idx_x = (idx_x+1) & width_bits;
612
+ sum += v.x;
613
+ }
614
+ d_out[out_idx] = sum;
615
+ }
616
+
617
+ // Repeated read of only 4kb of texels (should fit in texture cache)
618
+ __global__ void readTexelsInCache(int n, float *d_out)
619
+ {
620
+ int idx_x = (blockIdx.x * blockDim.x) + threadIdx.x;
621
+ int idx_y = (blockIdx.y * blockDim.y) + threadIdx.y;
622
+ int out_idx = idx_y * gridDim.x + idx_x;
623
+ float sum = 0.0f;
624
+ for (int i = 0; i < n; i++)
625
+ {
626
+ float4 v = tex2D(texA, float(idx_x), float(idx_y));
627
+ sum += v.x;
628
+ }
629
+ d_out[out_idx] = sum;
630
+ }
631
+
632
+ // Read "random" texels
633
+ __global__ void readTexelsRandom(int n, float *d_out, int width, int height)
634
+ {
635
+ int idx_x = (blockIdx.x * blockDim.x) + threadIdx.x;
636
+ int idx_y = (blockIdx.y * blockDim.y) + threadIdx.y;
637
+ int out_idx = idx_y * gridDim.x + idx_x;
638
+ float sum = 0.0f;
639
+ int width_bits = width-1;
640
+ int height_bits = height-1;
641
+ for (int i = 0; i < n; i++)
642
+ {
643
+ float4 v = tex2D(texA, float(idx_x), float(idx_y));
644
+ idx_x = (idx_x*3+29)&(width_bits);
645
+ idx_y = (idx_y*5+11)&(height_bits);
646
+ sum += v.x;
647
+ }
648
+ d_out[out_idx] = sum;
649
+ }
650
+
cuda_code/Distance_14.cu ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Copyright (c) Facebook, Inc. and its affiliates.
3
+ *
4
+ * This source code is licensed under the MIT license found in the
5
+ * LICENSE file in the root directory of this source tree.
6
+ */
7
+
8
+ #include <faiss/gpu/GpuResources.h>
9
+ #include <faiss/gpu/utils/DeviceUtils.h>
10
+ #include <faiss/impl/AuxIndexStructures.h>
11
+ #include <faiss/impl/FaissAssert.h>
12
+ #include <faiss/gpu/impl/BroadcastSum.cuh>
13
+ #include <faiss/gpu/impl/Distance.cuh>
14
+ #include <faiss/gpu/impl/DistanceUtils.cuh>
15
+ #include <faiss/gpu/impl/L2Norm.cuh>
16
+ #include <faiss/gpu/impl/L2Select.cuh>
17
+ #include <faiss/gpu/utils/BlockSelectKernel.cuh>
18
+ #include <faiss/gpu/utils/DeviceDefs.cuh>
19
+ #include <faiss/gpu/utils/Limits.cuh>
20
+ #include <faiss/gpu/utils/MatrixMult.cuh>
21
+
22
+ #include <thrust/device_ptr.h>
23
+ #include <thrust/execution_policy.h>
24
+ #include <thrust/fill.h>
25
+ #include <thrust/for_each.h>
26
+ #include <algorithm>
27
+ #include <memory>
28
+
29
+ namespace faiss {
30
+ namespace gpu {
31
+
32
+ template <typename T>
33
+ void runDistance(
34
+ bool computeL2,
35
+ GpuResources* res,
36
+ Tensor<T, 2, true>& centroids,
37
+ bool centroidsRowMajor,
38
+ Tensor<float, 1, true>* centroidNorms,
39
+ Tensor<T, 2, true>& queries,
40
+ bool queriesRowMajor,
41
+ int k,
42
+ Tensor<float, 2, true>& outDistances,
43
+ Tensor<int, 2, true>& outIndices,
44
+ bool ignoreOutDistances) {
45
+ // The # of centroids in `centroids` based on memory layout
46
+ auto numCentroids = centroids.getSize(centroidsRowMajor ? 0 : 1);
47
+
48
+ // The # of queries in `queries` based on memory layout
49
+ auto numQueries = queries.getSize(queriesRowMajor ? 0 : 1);
50
+
51
+ // The dimensions of the vectors to consider
52
+ auto dim = queries.getSize(queriesRowMajor ? 1 : 0);
53
+ FAISS_ASSERT(
54
+ (numQueries == 0 || numCentroids == 0) ||
55
+ dim == centroids.getSize(centroidsRowMajor ? 1 : 0));
56
+
57
+ FAISS_ASSERT(outDistances.getSize(0) == numQueries);
58
+ FAISS_ASSERT(outIndices.getSize(0) == numQueries);
59
+ FAISS_ASSERT(outDistances.getSize(1) == k);
60
+ FAISS_ASSERT(outIndices.getSize(1) == k);
61
+
62
+ auto defaultStream = res->getDefaultStreamCurrentDevice();
63
+
64
+ // If we're querying against a 0 sized set, just return empty results
65
+ if (centroids.numElements() == 0) {
66
+ thrust::fill(
67
+ thrust::cuda::par.on(defaultStream),
68
+ outDistances.data(),
69
+ outDistances.end(),
70
+ Limits<float>::getMax());
71
+
72
+ thrust::fill(
73
+ thrust::cuda::par.on(defaultStream),
74
+ outIndices.data(),
75
+ outIndices.end(),
76
+ -1);
77
+
78
+ return;
79
+ }
80
+
81
+ // L2: If ||c||^2 is not pre-computed, calculate it
82
+ DeviceTensor<float, 1, true> cNorms;
83
+ if (computeL2 && !centroidNorms) {
84
+ cNorms = DeviceTensor<float, 1, true>(
85
+ res,
86
+ makeTempAlloc(AllocType::Other, defaultStream),
87
+ {numCentroids});
88
+ runL2Norm(centroids, centroidsRowMajor, cNorms, true, defaultStream);
89
+ centroidNorms = &cNorms;
90
+ }
91
+
92
+ //
93
+ // Prepare norm vector ||q||^2; ||c||^2 is already pre-computed
94
+ //
95
+ DeviceTensor<float, 1, true> queryNorms(
96
+ res,
97
+ makeTempAlloc(AllocType::Other, defaultStream),
98
+ {(int)numQueries});
99
+
100
+ // ||q||^2
101
+ if (computeL2) {
102
+ runL2Norm(queries, queriesRowMajor, queryNorms, true, defaultStream);
103
+ }
104
+
105
+ // By default, aim to use up to 512 MB of memory for the processing, with
106
+ // both number of queries and number of centroids being at least 512.
107
+ int tileRows = 0;
108
+ int tileCols = 0;
109
+ chooseTileSize(
110
+ numQueries,
111
+ numCentroids,
112
+ dim,
113
+ sizeof(T),
114
+ res->getTempMemoryAvailableCurrentDevice(),
115
+ tileRows,
116
+ tileCols);
117
+
118
+ int numColTiles = utils::divUp(numCentroids, tileCols);
119
+
120
+ // We can have any number of vectors to query against, even less than k, in
121
+ // which case we'll return -1 for the index
122
+ FAISS_ASSERT(k <= GPU_MAX_SELECTION_K); // select limitation
123
+
124
+ // Temporary output memory space we'll use
125
+ DeviceTensor<float, 2, true> distanceBuf1(
126
+ res,
127
+ makeTempAlloc(AllocType::Other, defaultStream),
128
+ {tileRows, tileCols});
129
+ DeviceTensor<float, 2, true> distanceBuf2(
130
+ res,
131
+ makeTempAlloc(AllocType::Other, defaultStream),
132
+ {tileRows, tileCols});
133
+ DeviceTensor<float, 2, true>* distanceBufs[2] = {
134
+ &distanceBuf1, &distanceBuf2};
135
+
136
+ DeviceTensor<float, 2, true> outDistanceBuf1(
137
+ res,
138
+ makeTempAlloc(AllocType::Other, defaultStream),
139
+ {tileRows, numColTiles * k});
140
+ DeviceTensor<float, 2, true> outDistanceBuf2(
141
+ res,
142
+ makeTempAlloc(AllocType::Other, defaultStream),
143
+ {tileRows, numColTiles * k});
144
+ DeviceTensor<float, 2, true>* outDistanceBufs[2] = {
145
+ &outDistanceBuf1, &outDistanceBuf2};
146
+
147
+ DeviceTensor<int, 2, true> outIndexBuf1(
148
+ res,
149
+ makeTempAlloc(AllocType::Other, defaultStream),
150
+ {tileRows, numColTiles * k});
151
+ DeviceTensor<int, 2, true> outIndexBuf2(
152
+ res,
153
+ makeTempAlloc(AllocType::Other, defaultStream),
154
+ {tileRows, numColTiles * k});
155
+ DeviceTensor<int, 2, true>* outIndexBufs[2] = {
156
+ &outIndexBuf1, &outIndexBuf2};
157
+
158
+ auto streams = res->getAlternateStreamsCurrentDevice();
159
+ streamWait(streams, {defaultStream});
160
+
161
+ int curStream = 0;
162
+ bool interrupt = false;
163
+
164
+ // Tile over the input queries
165
+ for (int i = 0; i < numQueries; i += tileRows) {
166
+ if (interrupt || InterruptCallback::is_interrupted()) {
167
+ interrupt = true;
168
+ break;
169
+ }
170
+
171
+ int curQuerySize = std::min(tileRows, numQueries - i);
172
+
173
+ auto outDistanceView = outDistances.narrow(0, i, curQuerySize);
174
+ auto outIndexView = outIndices.narrow(0, i, curQuerySize);
175
+
176
+ auto queryView =
177
+ queries.narrow(queriesRowMajor ? 0 : 1, i, curQuerySize);
178
+ auto queryNormNiew = queryNorms.narrow(0, i, curQuerySize);
179
+
180
+ auto outDistanceBufRowView =
181
+ outDistanceBufs[curStream]->narrow(0, 0, curQuerySize);
182
+ auto outIndexBufRowView =
183
+ outIndexBufs[curStream]->narrow(0, 0, curQuerySize);
184
+
185
+ // Tile over the centroids
186
+ for (int j = 0; j < numCentroids; j += tileCols) {
187
+ if (InterruptCallback::is_interrupted()) {
188
+ interrupt = true;
189
+ break;
190
+ }
191
+
192
+ int curCentroidSize = std::min(tileCols, numCentroids - j);
193
+ int curColTile = j / tileCols;
194
+
195
+ auto centroidsView = sliceCentroids(
196
+ centroids, centroidsRowMajor, j, curCentroidSize);
197
+
198
+ auto distanceBufView = distanceBufs[curStream]
199
+ ->narrow(0, 0, curQuerySize)
200
+ .narrow(1, 0, curCentroidSize);
201
+
202
+ auto outDistanceBufColView =
203
+ outDistanceBufRowView.narrow(1, k * curColTile, k);
204
+ auto outIndexBufColView =
205
+ outIndexBufRowView.narrow(1, k * curColTile, k);
206
+
207
+ // L2: distance is ||c||^2 - 2qc + ||q||^2, we compute -2qc
208
+ // IP: just compute qc
209
+ // (query id x dim) x (centroid id, dim)' = (query id, centroid id)
210
+ runMatrixMult(
211
+ distanceBufView,
212
+ false, // not transposed
213
+ queryView,
214
+ !queriesRowMajor, // transposed MM if col major
215
+ centroidsView,
216
+ centroidsRowMajor, // transposed MM if row major
217
+ computeL2 ? -2.0f : 1.0f,
218
+ 0.0f,
219
+ res->getBlasHandleCurrentDevice(),
220
+ streams[curStream]);
221
+
222
+ if (computeL2) {
223
+ // For L2 distance, we use this fused kernel that performs both
224
+ // adding ||c||^2 to -2qc and k-selection, so we only need two
225
+ // passes (one write by the gemm, one read here) over the huge
226
+ // region of output memory
227
+ //
228
+ // If we aren't tiling along the number of centroids, we can
229
+ // perform the output work directly
230
+ if (tileCols == numCentroids) {
231
+ // Write into the final output
232
+ runL2SelectMin(
233
+ distanceBufView,
234
+ *centroidNorms,
235
+ outDistanceView,
236
+ outIndexView,
237
+ k,
238
+ streams[curStream]);
239
+
240
+ if (!ignoreOutDistances) {
241
+ // expand (query id) to (query id, k) by duplicating
242
+ // along rows top-k ||c||^2 - 2qc + ||q||^2 in the form
243
+ // (query id, k)
244
+ runSumAlongRows(
245
+ queryNormNiew,
246
+ outDistanceView,
247
+ true, // L2 distances should not go below zero
248
+ // due to roundoff error
249
+ streams[curStream]);
250
+ }
251
+ } else {
252
+ auto centroidNormsView =
253
+ centroidNorms->narrow(0, j, curCentroidSize);
254
+
255
+ // Write into our intermediate output
256
+ runL2SelectMin(
257
+ distanceBufView,
258
+ centroidNormsView,
259
+ outDistanceBufColView,
260
+ outIndexBufColView,
261
+ k,
262
+ streams[curStream]);
263
+
264
+ if (!ignoreOutDistances) {
265
+ // expand (query id) to (query id, k) by duplicating
266
+ // along rows top-k ||c||^2 - 2qc + ||q||^2 in the form
267
+ // (query id, k)
268
+ runSumAlongRows(
269
+ queryNormNiew,
270
+ outDistanceBufColView,
271
+ true, // L2 distances should not go below zero
272
+ // due to roundoff error
273
+ streams[curStream]);
274
+ }
275
+ }
276
+ } else {
277
+ // For IP, just k-select the output for this tile
278
+ if (tileCols == numCentroids) {
279
+ // Write into the final output
280
+ runBlockSelect(
281
+ distanceBufView,
282
+ outDistanceView,
283
+ outIndexView,
284
+ true,
285
+ k,
286
+ streams[curStream]);
287
+ } else {
288
+ // Write into the intermediate output
289
+ runBlockSelect(
290
+ distanceBufView,
291
+ outDistanceBufColView,
292
+ outIndexBufColView,
293
+ true,
294
+ k,
295
+ streams[curStream]);
296
+ }
297
+ }
298
+ }
299
+
300
+ // As we're finished with processing a full set of centroids, perform
301
+ // the final k-selection
302
+ if (tileCols != numCentroids) {
303
+ // The indices are tile-relative; for each tile of k, we need to add
304
+ // tileCols to the index
305
+ runIncrementIndex(
306
+ outIndexBufRowView, k, tileCols, streams[curStream]);
307
+
308
+ runBlockSelectPair(
309
+ outDistanceBufRowView,
310
+ outIndexBufRowView,
311
+ outDistanceView,
312
+ outIndexView,
313
+ computeL2 ? false : true,
314
+ k,
315
+ streams[curStream]);
316
+ }
317
+
318
+ curStream = (curStream + 1) % 2;
319
+ }
320
+
321
+ // Have the desired ordering stream wait on the multi-stream
322
+ streamWait({defaultStream}, streams);
323
+
324
+ if (interrupt) {
325
+ FAISS_THROW_MSG("interrupted");
326
+ }
327
+ }
328
+
329
+ template <typename T>
330
+ void runL2Distance(
331
+ GpuResources* res,
332
+ Tensor<T, 2, true>& centroids,
333
+ bool centroidsRowMajor,
334
+ Tensor<float, 1, true>* centroidNorms,
335
+ Tensor<T, 2, true>& queries,
336
+ bool queriesRowMajor,
337
+ int k,
338
+ Tensor<float, 2, true>& outDistances,
339
+ Tensor<int, 2, true>& outIndices,
340
+ bool ignoreOutDistances = false) {
341
+ runDistance<T>(
342
+ true, // L2
343
+ res,
344
+ centroids,
345
+ centroidsRowMajor,
346
+ centroidNorms,
347
+ queries,
348
+ queriesRowMajor,
349
+ k,
350
+ outDistances,
351
+ outIndices,
352
+ ignoreOutDistances);
353
+ }
354
+
355
+ template <typename T>
356
+ void runIPDistance(
357
+ GpuResources* res,
358
+ Tensor<T, 2, true>& centroids,
359
+ bool centroidsRowMajor,
360
+ Tensor<T, 2, true>& queries,
361
+ bool queriesRowMajor,
362
+ int k,
363
+ Tensor<float, 2, true>& outDistances,
364
+ Tensor<int, 2, true>& outIndices) {
365
+ runDistance<T>(
366
+ false, // IP
367
+ res,
368
+ centroids,
369
+ centroidsRowMajor,
370
+ nullptr, // no centroid norms provided
371
+ queries,
372
+ queriesRowMajor,
373
+ k,
374
+ outDistances,
375
+ outIndices,
376
+ false);
377
+ }
378
+
379
+ //
380
+ // Instantiations of the distance templates
381
+ //
382
+
383
+ void runIPDistance(
384
+ GpuResources* res,
385
+ Tensor<float, 2, true>& vectors,
386
+ bool vectorsRowMajor,
387
+ Tensor<float, 2, true>& queries,
388
+ bool queriesRowMajor,
389
+ int k,
390
+ Tensor<float, 2, true>& outDistances,
391
+ Tensor<int, 2, true>& outIndices) {
392
+ runIPDistance<float>(
393
+ res,
394
+ vectors,
395
+ vectorsRowMajor,
396
+ queries,
397
+ queriesRowMajor,
398
+ k,
399
+ outDistances,
400
+ outIndices);
401
+ }
402
+
403
+ void runIPDistance(
404
+ GpuResources* res,
405
+ Tensor<half, 2, true>& vectors,
406
+ bool vectorsRowMajor,
407
+ Tensor<half, 2, true>& queries,
408
+ bool queriesRowMajor,
409
+ int k,
410
+ Tensor<float, 2, true>& outDistances,
411
+ Tensor<int, 2, true>& outIndices) {
412
+ runIPDistance<half>(
413
+ res,
414
+ vectors,
415
+ vectorsRowMajor,
416
+ queries,
417
+ queriesRowMajor,
418
+ k,
419
+ outDistances,
420
+ outIndices);
421
+ }
422
+
423
+ void runL2Distance(
424
+ GpuResources* res,
425
+ Tensor<float, 2, true>& vectors,
426
+ bool vectorsRowMajor,
427
+ Tensor<float, 1, true>* vectorNorms,
428
+ Tensor<float, 2, true>& queries,
429
+ bool queriesRowMajor,
430
+ int k,
431
+ Tensor<float, 2, true>& outDistances,
432
+ Tensor<int, 2, true>& outIndices,
433
+ bool ignoreOutDistances) {
434
+ runL2Distance<float>(
435
+ res,
436
+ vectors,
437
+ vectorsRowMajor,
438
+ vectorNorms,
439
+ queries,
440
+ queriesRowMajor,
441
+ k,
442
+ outDistances,
443
+ outIndices,
444
+ ignoreOutDistances);
445
+ }
446
+
447
+ void runL2Distance(
448
+ GpuResources* res,
449
+ Tensor<half, 2, true>& vectors,
450
+ bool vectorsRowMajor,
451
+ Tensor<float, 1, true>* vectorNorms,
452
+ Tensor<half, 2, true>& queries,
453
+ bool queriesRowMajor,
454
+ int k,
455
+ Tensor<float, 2, true>& outDistances,
456
+ Tensor<int, 2, true>& outIndices,
457
+ bool ignoreOutDistances) {
458
+ runL2Distance<half>(
459
+ res,
460
+ vectors,
461
+ vectorsRowMajor,
462
+ vectorNorms,
463
+ queries,
464
+ queriesRowMajor,
465
+ k,
466
+ outDistances,
467
+ outIndices,
468
+ ignoreOutDistances);
469
+ }
470
+
471
+ } // namespace gpu
472
+ } // namespace faiss
cuda_code/DistributionExponentialKernel.cu ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ATen/Dispatch.h>
2
+ #include <ATen/ExpandUtils.h>
3
+ #include <ATen/NativeFunctions.h>
4
+ #include <ATen/cuda/CUDAApplyUtils.cuh>
5
+ #include <ATen/AccumulateType.h>
6
+ #include <ATen/CUDAGeneratorImpl.h>
7
+ #include <ATen/native/UnaryOps.h>
8
+ #include <ATen/native/cuda/DistributionTemplates.h>
9
+
10
+ #include <curand.h>
11
+ #include <curand_kernel.h>
12
+ #include <curand_philox4x32_x.h>
13
+ #include <utility>
14
+ #include <functional>
15
+
16
+ #include <ATen/native/Distributions.h>
17
+ #include <ATen/native/cuda/Loops.cuh>
18
+ #include <ATen/native/TensorIterator.h>
19
+
20
+ #include <THC/THCGeneral.h>
21
+ #include <THC/THCDeviceUtils.cuh>
22
+
23
+ #include <cstdint>
24
+ #include <limits>
25
+ #include <utility>
26
+ #include <type_traits>
27
+
28
+ namespace at { namespace native {
29
+
30
+ void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
31
+ auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
32
+ at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
33
+ }
34
+
35
+ REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
36
+
37
+ }} // namespace at::native
cuda_code/DistributionExponentialKernel_1.cu ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define TORCH_ASSERT_NO_OPERATORS
2
+ #include <ATen/cuda/CUDAGeneratorImpl.h>
3
+ #include <ATen/native/UnaryOps.h>
4
+ #include <ATen/native/cuda/DistributionTemplates.h>
5
+
6
+ namespace at { namespace native {
7
+
8
+ void exponential_kernel(TensorIteratorBase& iter, double lambda, c10::optional<Generator> gen) {
9
+ auto generator = get_generator_or_default<CUDAGeneratorImpl>(gen, cuda::detail::getDefaultCUDAGenerator());
10
+ at::native::templates::cuda::exponential_kernel(iter, lambda, generator);
11
+ }
12
+
13
+ REGISTER_DISPATCH(exponential_stub, &exponential_kernel);
14
+
15
+ }} // namespace at::native
cuda_code/DnDHgels.cu ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cuda_runtime_api.h> // cudaMalloc, cudaMemcpy, etc.
2
+ #include <cusolverDn.h> // cusolverDn
3
+ #include "../../cusolver_utils.h"
4
+ #include <stdio.h> // printf
5
+ #include <stdlib.h> // EXIT_FAILURE
6
+
7
+ int main(void) {
8
+
9
+ int m = 3;
10
+ int n = 3;
11
+ int nrhs = 3;
12
+ int lda = n;
13
+ int ldb = n;
14
+ int ldx = n;
15
+ double hA[] = {1, 2, 3, 2, 5, 5, 3, 5, 12};
16
+ double hB[] = {1, 2, 3, 2, 5, 5, 3, 5, 12};
17
+ double hX[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
18
+
19
+ double hX_result[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
20
+
21
+ double *dA, *dB, *dX;
22
+ CUDA_CHECK( cudaMalloc((void**) &dA, m * n * sizeof(double)));
23
+ CUDA_CHECK( cudaMalloc((void**) &dB, m * nrhs * sizeof(double)));
24
+ CUDA_CHECK( cudaMalloc((void**) &dX, m * nrhs * sizeof(double)));
25
+ CUDA_CHECK( cudaMemcpy(dA, hA, m * n * sizeof(double), cudaMemcpyHostToDevice) );
26
+ CUDA_CHECK( cudaMemcpy(dB, hB, m * nrhs * sizeof(double), cudaMemcpyHostToDevice) );
27
+ CUDA_CHECK( cudaMemcpy(dX, hX, m * nrhs * sizeof(double), cudaMemcpyHostToDevice) );
28
+
29
+ cusolverDnHandle_t handle = NULL;
30
+ CUSOLVER_CHECK(cusolverDnCreate(&handle));
31
+
32
+ size_t lwork_bytes;
33
+ CUSOLVER_CHECK(cusolverDnDHgels_bufferSize(handle, m, n, nrhs, NULL, lda, NULL, ldb, NULL, ldx, NULL, &lwork_bytes));
34
+ //printf("%d\n", lwork_bytes);
35
+
36
+ void *dWorkspace;
37
+ cudaMalloc((void**)&dWorkspace, lwork_bytes);
38
+
39
+ int *devInfo;
40
+ int niter;
41
+ CUDA_CHECK( cudaMalloc((void**) &devInfo, sizeof(int)));
42
+ CUSOLVER_CHECK(cusolverDnDHgels(handle, m, n, nrhs, dA, lda, dB, ldb, dX, ldx, dWorkspace, lwork_bytes, &niter, devInfo));
43
+ int hdevInfo;
44
+ CUDA_CHECK( cudaMemcpy(&hdevInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost) );
45
+ double values[n*nrhs];
46
+ CUDA_CHECK( cudaMemcpy(values, dX, n * nrhs * sizeof(double), cudaMemcpyDeviceToHost) );
47
+
48
+ int correct = (hdevInfo == 0);
49
+ for (int i = 0; i < n * nrhs; i++) {
50
+ printf("%f == %f\n", values[i], hX_result[i]);
51
+ if (fabsf(values[i] - hX_result[i]) > 0.001) {
52
+ correct = 0;
53
+ //break;
54
+ }
55
+ }
56
+
57
+ if (correct == 1) {
58
+ printf("DnDHgels test PASSED\n");
59
+ } else {
60
+ printf("DnDHgels test FAILED\n");
61
+ }
62
+
63
+ CUSOLVER_CHECK(cusolverDnDestroy(handle));
64
+
65
+ return EXIT_SUCCESS;
66
+
67
+ }
cuda_code/DnSXgels.cu ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <cuda_runtime_api.h> // cudaMalloc, cudaMemcpy, etc.
2
+ #include <cusolverDn.h> // cusolverDn
3
+ #include "../../cusolver_utils.h"
4
+ #include <stdio.h> // printf
5
+ #include <stdlib.h> // EXIT_FAILURE
6
+
7
+ int main(void) {
8
+
9
+ int m = 3;
10
+ int n = 3;
11
+ int nrhs = 3;
12
+ int lda = n;
13
+ int ldb = n;
14
+ int ldx = n;
15
+ float hA[] = {1, 2, 3, 2, 5, 5, 3, 5, 12};
16
+ float hB[] = {1, 2, 3, 2, 5, 5, 3, 5, 12};
17
+ float hX[] = {0, 0, 0, 0, 0, 0, 0, 0, 0};
18
+
19
+ float hX_result[] = {1, 0, 0, 0, 1, 0, 0, 0, 1};
20
+
21
+ float *dA, *dB, *dX;
22
+ CUDA_CHECK( cudaMalloc((void**) &dA, m * n * sizeof(float)));
23
+ CUDA_CHECK( cudaMalloc((void**) &dB, m * nrhs * sizeof(float)));
24
+ CUDA_CHECK( cudaMalloc((void**) &dX, m * nrhs * sizeof(float)));
25
+ CUDA_CHECK( cudaMemcpy(dA, hA, m * n * sizeof(float), cudaMemcpyHostToDevice) );
26
+ CUDA_CHECK( cudaMemcpy(dB, hB, m * nrhs * sizeof(float), cudaMemcpyHostToDevice) );
27
+ CUDA_CHECK( cudaMemcpy(dX, hX, m * nrhs * sizeof(float), cudaMemcpyHostToDevice) );
28
+
29
+ cusolverDnHandle_t handle = NULL;
30
+ CUSOLVER_CHECK(cusolverDnCreate(&handle));
31
+
32
+ size_t lwork_bytes;
33
+ CUSOLVER_CHECK(cusolverDnSXgels_bufferSize(handle, m, n, nrhs, NULL, lda, NULL, ldb, NULL, ldx, NULL, &lwork_bytes));
34
+ //printf("%d\n", lwork_bytes);
35
+
36
+ void *dWorkspace;
37
+ cudaMalloc((void**)&dWorkspace, lwork_bytes);
38
+
39
+ int *devInfo;
40
+ int niter;
41
+ CUDA_CHECK( cudaMalloc((void**) &devInfo, sizeof(int)));
42
+ CUSOLVER_CHECK(cusolverDnSXgels(handle, m, n, nrhs, dA, lda, dB, ldb, dX, ldx, dWorkspace, lwork_bytes, &niter, devInfo));
43
+ int hdevInfo;
44
+ CUDA_CHECK( cudaMemcpy(&hdevInfo, devInfo, sizeof(int), cudaMemcpyDeviceToHost) );
45
+ float values[n*nrhs];
46
+ CUDA_CHECK( cudaMemcpy(values, dX, n * nrhs * sizeof(float), cudaMemcpyDeviceToHost) );
47
+
48
+ int correct = (hdevInfo == 0);
49
+ for (int i = 0; i < n * nrhs; i++) {
50
+ printf("%f == %f\n", values[i], hX_result[i]);
51
+ if (fabsf(values[i] - hX_result[i]) > 0.001) {
52
+ correct = 0;
53
+ //break;
54
+ }
55
+ }
56
+
57
+ if (correct == 1) {
58
+ printf("DnSXgels test PASSED\n");
59
+ } else {
60
+ printf("DnSXgels test FAILED\n");
61
+ }
62
+
63
+ CUSOLVER_CHECK(cusolverDnDestroy(handle));
64
+
65
+ return EXIT_SUCCESS;
66
+
67
+ }