hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
f66723dadab304a7f1db330bea56b2cb2bc7609f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" #include "sampling_gpu.h" __global__ void gather_points_kernel_fast(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) hipError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( gather_points_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, c, n, npoints, points, idx, out); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { // grad_out: (B, C, npoints) // idx: (B, npoints) // output: // grad_points: (B, C, N) hipError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); hipLaunchKernelGGL(( gather_points_grad_kernel_fast), dim3(blocks), dim3(threads), 0, 0, b, c, n, npoints, grad_out, idx, grad_points); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } template <unsigned int block_size> __global__ void furthest_point_sampling_kernel(int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); // if (mag <= 1e-3) // continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) hipError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1024>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 512: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_kernel<256>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_kernel<128>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_kernel<64>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_kernel<32>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_kernel<16>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_kernel<8>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_kernel<4>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_kernel<2>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_kernel<1>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_kernel<512>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); } err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } } template <unsigned int block_size> __global__ void furthest_point_sampling_with_dist_kernel(int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, N) // tmp: (B, N) // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * n; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; // float x1 = dataset[old * 3 + 0]; // float y1 = dataset[old * 3 + 1]; // float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { // float x2, y2, z2; // x2 = dataset[k * 3 + 0]; // y2 = dataset[k * 3 + 1]; // z2 = dataset[k * 3 + 2]; // float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d = dataset[old * n + k]; float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { // dataset: (B, N, N) // tmp: (B, N) // output: // idx: (B, M) hipError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1024>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 512: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<512>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 256: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<256>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 128: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<128>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 64: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<64>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 32: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<32>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 16: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<16>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 8: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<8>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 4: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<4>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 2: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<2>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; case 1: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<1>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); break; default: hipLaunchKernelGGL(( furthest_point_sampling_with_dist_kernel<512>), dim3(b), dim3(n_threads), 0, 0, b, n, m, dataset, temp, idxs); } err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
f66723dadab304a7f1db330bea56b2cb2bc7609f.cu
#include <stdio.h> #include <stdlib.h> #include "cuda_utils.h" #include "sampling_gpu.h" __global__ void gather_points_kernel_fast(int b, int c, int n, int m, const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { // points: (B, C, N) // idx: (B, M) // output: // out: (B, C, M) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; points += bs_idx * c * n + c_idx * n; out[0] = points[idx[0]]; } void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, const float *points, const int *idx, float *out) { // points: (B, C, N) // idx: (B, npoints) // output: // out: (B, C, npoints) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_kernel_fast<<<blocks, threads>>>(b, c, n, npoints, points, idx, out); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { // grad_out: (B, C, M) // idx: (B, M) // output: // grad_points: (B, C, N) int bs_idx = blockIdx.z; int c_idx = blockIdx.y; int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; grad_out += bs_idx * c * m + c_idx * m + pt_idx; idx += bs_idx * m + pt_idx; grad_points += bs_idx * c * n + c_idx * n; atomicAdd(grad_points + idx[0], grad_out[0]); } void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, const float *grad_out, const int *idx, float *grad_points) { // grad_out: (B, C, npoints) // idx: (B, npoints) // output: // grad_points: (B, C, N) cudaError_t err; dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); gather_points_grad_kernel_fast<<<blocks, threads>>>(b, c, n, npoints, grad_out, idx, grad_points); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } __device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ const float v1 = dists[idx1], v2 = dists[idx2]; const int i1 = dists_i[idx1], i2 = dists_i[idx2]; dists[idx1] = max(v1, v2); dists_i[idx1] = v2 > v1 ? i2 : i1; } template <unsigned int block_size> __global__ void furthest_point_sampling_kernel(int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * 3; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[old * 3 + 0]; float y1 = dataset[old * 3 + 1]; float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { float x2, y2, z2; x2 = dataset[k * 3 + 0]; y2 = dataset[k * 3 + 1]; z2 = dataset[k * 3 + 2]; // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); // if (mag <= 1e-3) // continue; float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { // dataset: (B, N, 3) // tmp: (B, N) // output: // idx: (B, M) cudaError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: furthest_point_sampling_kernel<1024><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 512: furthest_point_sampling_kernel<512><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_kernel<256><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_kernel<128><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_kernel<64><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_kernel<32><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_kernel<16><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_kernel<8><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_kernel<4><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_kernel<2><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_kernel<1><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_kernel<512><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); } err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } } template <unsigned int block_size> __global__ void furthest_point_sampling_with_dist_kernel(int b, int n, int m, const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { // dataset: (B, N, N) // tmp: (B, N) // output: // idx: (B, M) if (m <= 0) return; __shared__ float dists[block_size]; __shared__ int dists_i[block_size]; int batch_index = blockIdx.x; dataset += batch_index * n * n; temp += batch_index * n; idxs += batch_index * m; int tid = threadIdx.x; const int stride = block_size; int old = 0; if (threadIdx.x == 0) idxs[0] = old; __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; // float x1 = dataset[old * 3 + 0]; // float y1 = dataset[old * 3 + 1]; // float z1 = dataset[old * 3 + 2]; for (int k = tid; k < n; k += stride) { // float x2, y2, z2; // x2 = dataset[k * 3 + 0]; // y2 = dataset[k * 3 + 1]; // z2 = dataset[k * 3 + 2]; // float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d = dataset[old * n + k]; float d2 = min(d, temp[k]); temp[k] = d2; besti = d2 > best ? k : besti; best = d2 > best ? d2 : best; } dists[tid] = best; dists_i[tid] = besti; __syncthreads(); if (block_size >= 1024) { if (tid < 512) { __update(dists, dists_i, tid, tid + 512); } __syncthreads(); } if (block_size >= 512) { if (tid < 256) { __update(dists, dists_i, tid, tid + 256); } __syncthreads(); } if (block_size >= 256) { if (tid < 128) { __update(dists, dists_i, tid, tid + 128); } __syncthreads(); } if (block_size >= 128) { if (tid < 64) { __update(dists, dists_i, tid, tid + 64); } __syncthreads(); } if (block_size >= 64) { if (tid < 32) { __update(dists, dists_i, tid, tid + 32); } __syncthreads(); } if (block_size >= 32) { if (tid < 16) { __update(dists, dists_i, tid, tid + 16); } __syncthreads(); } if (block_size >= 16) { if (tid < 8) { __update(dists, dists_i, tid, tid + 8); } __syncthreads(); } if (block_size >= 8) { if (tid < 4) { __update(dists, dists_i, tid, tid + 4); } __syncthreads(); } if (block_size >= 4) { if (tid < 2) { __update(dists, dists_i, tid, tid + 2); } __syncthreads(); } if (block_size >= 2) { if (tid < 1) { __update(dists, dists_i, tid, tid + 1); } __syncthreads(); } old = dists_i[0]; if (tid == 0) idxs[j] = old; } } void furthest_point_sampling_with_dist_kernel_launcher(int b, int n, int m, const float *dataset, float *temp, int *idxs) { // dataset: (B, N, N) // tmp: (B, N) // output: // idx: (B, M) cudaError_t err; unsigned int n_threads = opt_n_threads(n); switch (n_threads) { case 1024: furthest_point_sampling_with_dist_kernel<1024><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 512: furthest_point_sampling_with_dist_kernel<512><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 256: furthest_point_sampling_with_dist_kernel<256><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 128: furthest_point_sampling_with_dist_kernel<128><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 64: furthest_point_sampling_with_dist_kernel<64><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 32: furthest_point_sampling_with_dist_kernel<32><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 16: furthest_point_sampling_with_dist_kernel<16><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 8: furthest_point_sampling_with_dist_kernel<8><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 4: furthest_point_sampling_with_dist_kernel<4><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 2: furthest_point_sampling_with_dist_kernel<2><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; case 1: furthest_point_sampling_with_dist_kernel<1><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); break; default: furthest_point_sampling_with_dist_kernel<512><<<b, n_threads>>>(b, n, m, dataset, temp, idxs); } err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
34720fe5f986e690e317ebff8ff0e13832f11432.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include "caffe/common.hpp" #include "caffe/util/im2col.hpp" namespace caffe { template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int conv_out_height, const int conv_out_width, Dtype* data_col) { // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) where n = conv_in_channels * conv_out_height * conv_out_width // ideally if cuda kernel is large enough (kernel_size=blockDim.x * gridDim.x), so that there's 1 thread to fill each of the n conv patches (each with 2D conv kernel size) // when cuda kernel is not large enough (kernel_size<n). n patches are (almost) evenly distributed to blockDim.x * gridDim.x threads // each index_n of the n patches, will be handled by the index_n % kernel_size thread in cuda kernel CUDA_KERNEL_LOOP(index, n) { const int h_index = index / conv_out_width; // h_index: conv_out line index const int h_col = h_index % conv_out_height; // h_col: 2D vertical index in output data_col const int w_col = index % conv_out_width; // w_col: 2D horizontal index in output data_col. (thread) index ele by ele goes through data_col and fill in conv out w->h->c const int c_im = h_index / conv_out_height; // c_im: channel index for each patch in data_col. index / conv_out_width / conv_out_height const int c_col = c_im * kernel_h * kernel_w; // c_col: each patch shall have kernel_h * kernel_w rows in data_col, and each is conv_out_height * conv_out_width wide const int h_offset = h_col * stride_h - pad_h; // 2D veritical start point of the input image to be multiplied by kernel const int w_offset = w_col * stride_w - pad_w; // 2D horizontal start point of the input image to be multiplied by kernel Dtype* data_col_ptr = data_col; data_col_ptr += (c_col * conv_out_height + h_col) * conv_out_width + w_col; // = c_col * conv_out_height * conv_out_width + h_col * conv_out_width + w_col const Dtype* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; // = c_im * height * width + h_offset * width + w_offset. start pointer to each small patch of 2D input image to be multiplied by kernel for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i * dilation_h; int w_im = w_offset + j * dilation_w; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += conv_out_height * conv_out_width; // step 1 row in data_col that's conv_out_width * conv_out_height } } } } template <typename Dtype> void im2col_gpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( im2col_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_gpu<float>(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_col); template void im2col_gpu<double>(const double* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double* data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col) { int d_temp[num_axes]; // NOLINT(runtime/arrays) int d_iter[num_axes]; // NOLINT(runtime/arrays) __shared__ int shared_dilation[num_axes]; __shared__ int shared_kernel_shape[num_axes]; __shared__ int shared_pad[num_axes]; __shared__ int shared_stride[num_axes]; __shared__ int shared_col_shape[num_axes + 1]; __shared__ int shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x]; shared_pad[threadIdx.x] = pad[threadIdx.x]; shared_stride[threadIdx.x] = stride[threadIdx.x]; } if (threadIdx.x < num_axes + 1) { shared_col_shape[threadIdx.x] = col_shape[threadIdx.x]; shared_im_shape[threadIdx.x] = im_shape[threadIdx.x]; } __syncthreads(); int i; CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int channel_in = index; int channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % shared_col_shape[i + 1]; channel_in /= shared_col_shape[i + 1]; channel_out *= shared_kernel_shape[i]; } channel_out *= channel_in; int data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= shared_col_shape[i + 1]; channel_out += d_temp[i]; d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i]; channel_in *= shared_im_shape[i + 1]; channel_in += d_temp[i]; data_col_inc *= shared_col_shape[i + 1]; d_iter[i] = 0; } Dtype* data_col_ptr = data_col + channel_out; const Dtype* data_im_ptr = data_im + channel_in; bool incremented; do { bool in_range = true; for (i = 0; i < num_axes; ++i) { const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1]; if (!in_range) { break; } } if (in_range) { int data_im_offset = d_iter[0] * shared_dilation[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= shared_im_shape[i + 1]; data_im_offset += d_iter[i] * shared_dilation[i]; } *data_col_ptr = data_im_ptr[data_im_offset]; } else { *data_col_ptr = 0; } data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { const int d_max = shared_kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 ++d_iter[i]; incremented = true; break; } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); // do } // CUDA_KERNEL_LOOP(index, n) } template <typename Dtype> void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col) { // num_axes should be smaller than block size DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS); switch (num_spatial_axes) { case 1: im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 2: im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 3: im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 4: im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 5: im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 6: im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 7: im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 8: im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 9: im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 10: im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; default: LOG(FATAL) << "im2col_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_nd_gpu<float>(const float* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, float* data_col); template void im2col_nd_gpu<double>(const double* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, double* data_col); template <typename Dtype> __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); int kernel_extent_w = (kernel_w - 1) * dilation_w + 1; int kernel_extent_h = (kernel_h - 1) * dilation_h + 1; // compute the start and end of the output const int w_col_start = (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // TODO: use LCM of stride and dilation to avoid unnecessary loops for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * stride_h); int w_k = (w_im - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } template <typename Dtype> void col2im_gpu(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( col2im_gpu_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_kernels, data_col, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_im); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void col2im_gpu<float>(const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_im); template void col2im_gpu<double>(const double* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double* data_im); template <typename Dtype, int num_axes> __global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_im) { int d_im[num_axes]; // NOLINT(runtime/arrays) int d_col_iter[num_axes]; // NOLINT(runtime/arrays) int d_col_start[num_axes]; // NOLINT(runtime/arrays) int d_col_end[num_axes]; // NOLINT(runtime/arrays) __shared__ int shared_dilation[num_axes]; __shared__ int shared_kernel_shape[num_axes]; __shared__ int shared_pad[num_axes]; __shared__ int shared_stride[num_axes]; __shared__ int shared_col_shape[num_axes + 1]; __shared__ int shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x]; shared_pad[threadIdx.x] = pad[threadIdx.x]; shared_stride[threadIdx.x] = stride[threadIdx.x]; } if (threadIdx.x < num_axes + 1) { shared_col_shape[threadIdx.x] = col_shape[threadIdx.x]; shared_im_shape[threadIdx.x] = im_shape[threadIdx.x]; } __syncthreads(); CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int c_im = index; // Calculate d_im (image dimensions). for (int i = num_axes - 1; i >= 0; --i) { d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i]; c_im /= shared_im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { const int kernel_extent = shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1; d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent) ? 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1; d_col_end[i] = min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; done = true; break; // for (int i = 0; i < num_axes; ++i) } } if (done) { continue; // CUDA_KERNEL_LOOP(index, n) } // Loop over the col to compute the output val. Dtype val = 0; bool incremented = true; bool skip = false; do { // Compute the final offset. int final_offset = 0; int kernel_shape_prod = 1; int kernel_index; for (int i = num_axes - 1; i >= 0; --i) { kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i]; if (kernel_index % shared_dilation[i]) { skip = true; break; } else { kernel_index /= shared_dilation[i]; final_offset += kernel_index * kernel_shape_prod; kernel_shape_prod *= shared_kernel_shape[i]; } } if (!skip) { final_offset += kernel_shape_prod * c_im; for (int i = 0; i < num_axes; ++i) { final_offset *= shared_col_shape[i + 1]; final_offset += d_col_iter[i]; } val += data_col[final_offset]; } skip = false; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { const int d_max = d_col_end[i]; if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; } else { // d_col_iter[i] < d_max - 1 ++d_col_iter[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } template <typename Dtype> void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_im) { // num_axes should be smaller than block size DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS); switch (num_spatial_axes) { case 1: col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 2: col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 3: col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 4: col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 5: col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 6: col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 7: col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 8: col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 9: col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 10: col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(CAFFE_GET_BLOCKS(im_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; default: LOG(FATAL) << "col2im_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void col2im_nd_gpu<float>(const float* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, float* data_im); template void col2im_nd_gpu<double>(const double* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, double* data_im); } // namespace caffe
34720fe5f986e690e317ebff8ff0e13832f11432.cu
#include <algorithm> #include "caffe/common.hpp" #include "caffe/util/im2col.hpp" namespace caffe { template <typename Dtype> __global__ void im2col_gpu_kernel(const int n, const Dtype* data_im, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int conv_out_height, const int conv_out_width, Dtype* data_col) { // for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) where n = conv_in_channels * conv_out_height * conv_out_width // ideally if cuda kernel is large enough (kernel_size=blockDim.x * gridDim.x), so that there's 1 thread to fill each of the n conv patches (each with 2D conv kernel size) // when cuda kernel is not large enough (kernel_size<n). n patches are (almost) evenly distributed to blockDim.x * gridDim.x threads // each index_n of the n patches, will be handled by the index_n % kernel_size thread in cuda kernel CUDA_KERNEL_LOOP(index, n) { const int h_index = index / conv_out_width; // h_index: conv_out line index const int h_col = h_index % conv_out_height; // h_col: 2D vertical index in output data_col const int w_col = index % conv_out_width; // w_col: 2D horizontal index in output data_col. (thread) index ele by ele goes through data_col and fill in conv out w->h->c const int c_im = h_index / conv_out_height; // c_im: channel index for each patch in data_col. index / conv_out_width / conv_out_height const int c_col = c_im * kernel_h * kernel_w; // c_col: each patch shall have kernel_h * kernel_w rows in data_col, and each is conv_out_height * conv_out_width wide const int h_offset = h_col * stride_h - pad_h; // 2D veritical start point of the input image to be multiplied by kernel const int w_offset = w_col * stride_w - pad_w; // 2D horizontal start point of the input image to be multiplied by kernel Dtype* data_col_ptr = data_col; data_col_ptr += (c_col * conv_out_height + h_col) * conv_out_width + w_col; // = c_col * conv_out_height * conv_out_width + h_col * conv_out_width + w_col const Dtype* data_im_ptr = data_im; data_im_ptr += (c_im * height + h_offset) * width + w_offset; // = c_im * height * width + h_offset * width + w_offset. start pointer to each small patch of 2D input image to be multiplied by kernel for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { int h_im = h_offset + i * dilation_h; int w_im = w_offset + j * dilation_w; *data_col_ptr = (h_im >= 0 && w_im >= 0 && h_im < height && w_im < width) ? data_im_ptr[i * dilation_h * width + j * dilation_w] : 0; data_col_ptr += conv_out_height * conv_out_width; // step 1 row in data_col that's conv_out_width * conv_out_height } } } } template <typename Dtype> void im2col_gpu(const Dtype* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_col) { // We are going to launch channels * height_col * width_col kernels, each // kernel responsible for copying a single-channel grid. int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height_col * width_col; // NOLINT_NEXT_LINE(whitespace/operators) im2col_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, height, width, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_col); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_gpu<float>(const float* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_col); template void im2col_gpu<double>(const double* data_im, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double* data_col); template <typename Dtype, int num_axes> __global__ void im2col_nd_gpu_kernel(const int n, const Dtype* data_im, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col) { int d_temp[num_axes]; // NOLINT(runtime/arrays) int d_iter[num_axes]; // NOLINT(runtime/arrays) __shared__ int shared_dilation[num_axes]; __shared__ int shared_kernel_shape[num_axes]; __shared__ int shared_pad[num_axes]; __shared__ int shared_stride[num_axes]; __shared__ int shared_col_shape[num_axes + 1]; __shared__ int shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x]; shared_pad[threadIdx.x] = pad[threadIdx.x]; shared_stride[threadIdx.x] = stride[threadIdx.x]; } if (threadIdx.x < num_axes + 1) { shared_col_shape[threadIdx.x] = col_shape[threadIdx.x]; shared_im_shape[threadIdx.x] = im_shape[threadIdx.x]; } __syncthreads(); int i; CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int channel_in = index; int channel_out = 1; for (i = num_axes - 1; i >= 0; --i) { d_temp[i] = channel_in % shared_col_shape[i + 1]; channel_in /= shared_col_shape[i + 1]; channel_out *= shared_kernel_shape[i]; } channel_out *= channel_in; int data_col_inc = 1; for (i = 0; i < num_axes; ++i) { channel_out *= shared_col_shape[i + 1]; channel_out += d_temp[i]; d_temp[i] = d_temp[i] * shared_stride[i] - shared_pad[i]; channel_in *= shared_im_shape[i + 1]; channel_in += d_temp[i]; data_col_inc *= shared_col_shape[i + 1]; d_iter[i] = 0; } Dtype* data_col_ptr = data_col + channel_out; const Dtype* data_im_ptr = data_im + channel_in; bool incremented; do { bool in_range = true; for (i = 0; i < num_axes; ++i) { const int d_iter_im = d_iter[i] * shared_dilation[i] + d_temp[i]; in_range &= d_iter_im >= 0 && d_iter_im < shared_im_shape[i + 1]; if (!in_range) { break; } } if (in_range) { int data_im_offset = d_iter[0] * shared_dilation[0]; for (i = 1; i < num_axes; ++i) { data_im_offset *= shared_im_shape[i + 1]; data_im_offset += d_iter[i] * shared_dilation[i]; } *data_col_ptr = data_im_ptr[data_im_offset]; } else { *data_col_ptr = 0; } data_col_ptr += data_col_inc; incremented = false; for (i = num_axes - 1; i >= 0; --i) { const int d_max = shared_kernel_shape[i]; if (d_iter[i] == d_max - 1) { d_iter[i] = 0; } else { // d_iter[i] < d_max - 1 ++d_iter[i]; incremented = true; break; } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); // do } // CUDA_KERNEL_LOOP(index, n) } template <typename Dtype> void im2col_nd_gpu(const Dtype* data_im, const int num_spatial_axes, const int num_kernels, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_col) { // num_axes should be smaller than block size DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS); switch (num_spatial_axes) { case 1: im2col_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 2: im2col_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 3: im2col_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 4: im2col_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 5: im2col_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 6: im2col_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 7: im2col_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 8: im2col_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 9: im2col_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; case 10: im2col_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_im, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_col); break; default: LOG(FATAL) << "im2col_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void im2col_nd_gpu<float>(const float* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, float* data_col); template void im2col_nd_gpu<double>(const double* data_im, const int num_spatial_axes, const int col_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, double* data_col); template <typename Dtype> __global__ void col2im_gpu_kernel(const int n, const Dtype* data_col, const int height, const int width, const int channels, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int height_col, const int width_col, Dtype* data_im) { CUDA_KERNEL_LOOP(index, n) { Dtype val = 0; const int w_im = index % width + pad_w; const int h_im = (index / width) % height + pad_h; const int c_im = index / (width * height); int kernel_extent_w = (kernel_w - 1) * dilation_w + 1; int kernel_extent_h = (kernel_h - 1) * dilation_h + 1; // compute the start and end of the output const int w_col_start = (w_im < kernel_extent_w) ? 0 : (w_im - kernel_extent_w) / stride_w + 1; const int w_col_end = min(w_im / stride_w + 1, width_col); const int h_col_start = (h_im < kernel_extent_h) ? 0 : (h_im - kernel_extent_h) / stride_h + 1; const int h_col_end = min(h_im / stride_h + 1, height_col); // TODO: use LCM of stride and dilation to avoid unnecessary loops for (int h_col = h_col_start; h_col < h_col_end; h_col += 1) { for (int w_col = w_col_start; w_col < w_col_end; w_col += 1) { int h_k = (h_im - h_col * stride_h); int w_k = (w_im - w_col * stride_w); if (h_k % dilation_h == 0 && w_k % dilation_w == 0) { h_k /= dilation_h; w_k /= dilation_w; int data_col_index = (((c_im * kernel_h + h_k) * kernel_w + w_k) * height_col + h_col) * width_col + w_col; val += data_col[data_col_index]; } } } data_im[index] = val; } } template <typename Dtype> void col2im_gpu(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, Dtype* data_im) { int height_col = (height + 2 * pad_h - (dilation_h * (kernel_h - 1) + 1)) / stride_h + 1; int width_col = (width + 2 * pad_w - (dilation_w * (kernel_w - 1) + 1)) / stride_w + 1; int num_kernels = channels * height * width; // To avoid involving atomic operations, we will launch one kernel per // bottom dimension, and then in the kernel add up the top dimensions. // NOLINT_NEXT_LINE(whitespace/operators) col2im_gpu_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels), CAFFE_CUDA_NUM_THREADS>>>( num_kernels, data_col, height, width, channels, kernel_h, kernel_w, pad_h, pad_w, stride_h, stride_w, dilation_h, dilation_w, height_col, width_col, data_im); CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void col2im_gpu<float>(const float* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, float* data_im); template void col2im_gpu<double>(const double* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, double* data_im); template <typename Dtype, int num_axes> __global__ void col2im_nd_gpu_kernel(const int n, const Dtype* data_col, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_im) { int d_im[num_axes]; // NOLINT(runtime/arrays) int d_col_iter[num_axes]; // NOLINT(runtime/arrays) int d_col_start[num_axes]; // NOLINT(runtime/arrays) int d_col_end[num_axes]; // NOLINT(runtime/arrays) __shared__ int shared_dilation[num_axes]; __shared__ int shared_kernel_shape[num_axes]; __shared__ int shared_pad[num_axes]; __shared__ int shared_stride[num_axes]; __shared__ int shared_col_shape[num_axes + 1]; __shared__ int shared_im_shape[num_axes + 1]; if (threadIdx.x < num_axes) { shared_dilation[threadIdx.x] = dilation[threadIdx.x]; shared_kernel_shape[threadIdx.x] = kernel_shape[threadIdx.x]; shared_pad[threadIdx.x] = pad[threadIdx.x]; shared_stride[threadIdx.x] = stride[threadIdx.x]; } if (threadIdx.x < num_axes + 1) { shared_col_shape[threadIdx.x] = col_shape[threadIdx.x]; shared_im_shape[threadIdx.x] = im_shape[threadIdx.x]; } __syncthreads(); CUDA_KERNEL_LOOP(index, n) { // Initialize channel_in, computed in the loop below, with intermediate // computations used to compute the spatial indices. int c_im = index; // Calculate d_im (image dimensions). for (int i = num_axes - 1; i >= 0; --i) { d_im[i] = c_im % shared_im_shape[i + 1] + shared_pad[i]; c_im /= shared_im_shape[i + 1]; } // Calculate col start/end indices. bool done = false; for (int i = 0; i < num_axes; ++i) { const int kernel_extent = shared_dilation[i] * (shared_kernel_shape[i] - 1) + 1; d_col_start[i] = d_col_iter[i] = (d_im[i] < kernel_extent) ? 0 : (d_im[i] - kernel_extent) / shared_stride[i] + 1; d_col_end[i] = min(d_im[i] / shared_stride[i] + 1, shared_col_shape[i + 1]); if (d_col_start[i] >= d_col_end[i]) { // Skip computation if the dimension is 0 at any spatial axis -- // final val will be 0. data_im[index] = 0; done = true; break; // for (int i = 0; i < num_axes; ++i) } } if (done) { continue; // CUDA_KERNEL_LOOP(index, n) } // Loop over the col to compute the output val. Dtype val = 0; bool incremented = true; bool skip = false; do { // Compute the final offset. int final_offset = 0; int kernel_shape_prod = 1; int kernel_index; for (int i = num_axes - 1; i >= 0; --i) { kernel_index = d_im[i] - d_col_iter[i] * shared_stride[i]; if (kernel_index % shared_dilation[i]) { skip = true; break; } else { kernel_index /= shared_dilation[i]; final_offset += kernel_index * kernel_shape_prod; kernel_shape_prod *= shared_kernel_shape[i]; } } if (!skip) { final_offset += kernel_shape_prod * c_im; for (int i = 0; i < num_axes; ++i) { final_offset *= shared_col_shape[i + 1]; final_offset += d_col_iter[i]; } val += data_col[final_offset]; } skip = false; incremented = false; for (int i = num_axes - 1; i >= 0; --i) { const int d_max = d_col_end[i]; if (d_col_iter[i] == d_max - 1) { d_col_iter[i] = d_col_start[i]; } else { // d_col_iter[i] < d_max - 1 ++d_col_iter[i]; incremented = true; break; // for (int i = num_axes - 1; i >= 0; --i) } } // for (int i = num_axes - 1; i >= 0; --i) } while (incremented); data_im[index] = val; } // CUDA_KERNEL_LOOP(index, n) } template <typename Dtype> void col2im_nd_gpu(const Dtype* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, Dtype* data_im) { // num_axes should be smaller than block size DCHECK_LT(num_spatial_axes, CAFFE_CUDA_NUM_THREADS); switch (num_spatial_axes) { case 1: col2im_nd_gpu_kernel<Dtype, 1> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 2: col2im_nd_gpu_kernel<Dtype, 2> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 3: col2im_nd_gpu_kernel<Dtype, 3> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 4: col2im_nd_gpu_kernel<Dtype, 4> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 5: col2im_nd_gpu_kernel<Dtype, 5> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 6: col2im_nd_gpu_kernel<Dtype, 6> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 7: col2im_nd_gpu_kernel<Dtype, 7> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 8: col2im_nd_gpu_kernel<Dtype, 8> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 9: col2im_nd_gpu_kernel<Dtype, 9> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; case 10: col2im_nd_gpu_kernel<Dtype, 10> // NOLINT_NEXT_LINE(whitespace/operators) <<<CAFFE_GET_BLOCKS(im_size), CAFFE_CUDA_NUM_THREADS>>>( im_size, data_col, im_shape, col_shape, kernel_shape, pad, stride, dilation, data_im); break; default: LOG(FATAL) << "col2im_nd_gpu does not support computation with " << num_spatial_axes << " spatial axes"; } CUDA_POST_KERNEL_CHECK; } // Explicit instantiation template void col2im_nd_gpu<float>(const float* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, float* data_im); template void col2im_nd_gpu<double>(const double* data_col, const int num_spatial_axes, const int im_size, const int* im_shape, const int* col_shape, const int* kernel_shape, const int* pad, const int* stride, const int* dilation, double* data_im); } // namespace caffe
a1df5b1b1169252e1d6f44d20b70964ab6a6b187.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<cmath> #define TILE_SIZE 2 __device__ void store_full(float*,float*,int); __device__ void load_full(float*,float*,int); __device__ void potrf_tile(float*,int,int); __device__ void trsm_tile(float*,int,int,int); __device__ void syrk_tile(float*,int,int,int,int); __global__ void right_looking_launch_kernel(float*,int); __device__ void store_zeros(float*,int); __device__ void store_zeros_diagonal(float*,int,int); __device__ void store_zeros_last(float*,int); __device__ void store_full(float* read_data,float* write_data,int N) { int i,j,ID; for(i=0;i<N/TILE_SIZE;i++) { for(j=0;j<N/TILE_SIZE;j++) { ID = (i*TILE_SIZE + threadIdx.y)*N + j*TILE_SIZE + threadIdx.x; write_data[ID + N*N*blockIdx.x] = read_data[ID]; } } __syncthreads(); } __device__ void load_full(float* read_data,float* write_data,int N) { int i,j,ID; for(i=0;i<N/TILE_SIZE;i++) { for(j=0;j<N/TILE_SIZE;j++) { ID = (i*TILE_SIZE + threadIdx.y)*N + j*TILE_SIZE + threadIdx.x; write_data[ID] = read_data[ID + N*N*blockIdx.x]; } } __syncthreads(); } __device__ void potrf_tile(float* t_A,int i,int N) { int t_x = threadIdx.x; int t_y = threadIdx.y; for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) { t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x] = sqrtf(t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x]); } __syncthreads(); if(t_x<t_y && t_x == k) { t_A[i*TILE_SIZE*(1+N) + t_y*N + t_x]/= t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x]; } __syncthreads(); if(k<t_y && k<t_x && t_x<=t_y) { t_A[i*TILE_SIZE*(1+N) + t_y*N + t_x]-= t_A[i*TILE_SIZE*(1+N) + t_x*N + k]*t_A[i*TILE_SIZE*(1+N) + t_y*N + k]; } __syncthreads(); } } __device__ void trsm_tile(float *row_data,int i,int j,int N) { int t_x = threadIdx.x; int t_y = threadIdx.y; for(int s=0;s<TILE_SIZE;s++) { if(t_x==s) { row_data[(t_y + j*TILE_SIZE)*N + t_x + i*TILE_SIZE]/= row_data[i*TILE_SIZE*(1+N) + t_x*(1+N)]; } __syncthreads(); if(t_x > s) { row_data[(t_y + j*TILE_SIZE)*N + t_x + i*TILE_SIZE]-= row_data[(t_x + i*TILE_SIZE)*N + s]*row_data[(t_y + j*TILE_SIZE)*N + s]; } __syncthreads(); } } __device__ void syrk_tile(float* row_data,int i,int j,int k,int N) { int t_y = threadIdx.y; int t_x = threadIdx.x; float valueToSubtract = 0.0; for(int r=0;r<TILE_SIZE;r++) { valueToSubtract+= row_data[(t_x + k*TILE_SIZE)*N + i*TILE_SIZE + r]*row_data[(t_y + j*TILE_SIZE)*N + i*TILE_SIZE + r]; } row_data[(t_y + j*TILE_SIZE)*N + t_x + k*TILE_SIZE]-= valueToSubtract; __syncthreads(); } __device__ void store_zeros(float* A,int N) { int t_y = threadIdx.y; int t_x = threadIdx.x; int i,j; for(i=0;i<N/TILE_SIZE-1;i++) { for(j=i+1;j<N/TILE_SIZE;j++) A[j*blockDim.x + t_x + (i*blockDim.y + t_y)*N] = 0.0; } __syncthreads(); } __device__ void store_zeros_diagonal(float* A,int N,int b) // Will only work if (N/TILE_SIZE) is even { int t_y = threadIdx.y; int t_x = threadIdx.x; int i; for(i=0;i<N/TILE_SIZE-b;i+=2) { if(t_x>t_y) A[i*blockDim.x + t_x + (i*blockDim.y + t_y)*N] = 0.0; if(t_x<t_y) A[(i+1)*blockDim.x*(1+N) + (blockDim.x-t_x-1) + (blockDim.y-t_y-1)*N] = 0.0; } __syncthreads(); } __device__ void store_zeros_last(float* A,int N) // Will only work if (N/TILE_SIZE) is even { int t_y = threadIdx.y; int t_x = threadIdx.x; if(t_x>t_y) A[(N/TILE_SIZE-1)*blockDim.x*(1+N) + t_x + t_y*N] = 0.0; __syncthreads(); } __global__ void right_looking_launch_kernel(float* read_data,int N) { extern __shared__ float data[]; int i,j,k; load_full(read_data,data,N); for(i=0;i<N/TILE_SIZE;i++) { potrf_tile(data,i,N); for(j=i+1;j<N/TILE_SIZE;j++) { trsm_tile(data,i,j,N); for(k=i+1;k<=j;k++) { syrk_tile(data,i,j,k,N); } } } store_zeros(data,N); if((N/TILE_SIZE)%2==0) store_zeros_diagonal(data,N,0); else { store_zeros_diagonal(data,N,1); store_zeros_last(data,N); } store_full(data,read_data,N); }
a1df5b1b1169252e1d6f44d20b70964ab6a6b187.cu
#include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<cmath> #define TILE_SIZE 2 __device__ void store_full(float*,float*,int); __device__ void load_full(float*,float*,int); __device__ void potrf_tile(float*,int,int); __device__ void trsm_tile(float*,int,int,int); __device__ void syrk_tile(float*,int,int,int,int); __global__ void right_looking_launch_kernel(float*,int); __device__ void store_zeros(float*,int); __device__ void store_zeros_diagonal(float*,int,int); __device__ void store_zeros_last(float*,int); __device__ void store_full(float* read_data,float* write_data,int N) { int i,j,ID; for(i=0;i<N/TILE_SIZE;i++) { for(j=0;j<N/TILE_SIZE;j++) { ID = (i*TILE_SIZE + threadIdx.y)*N + j*TILE_SIZE + threadIdx.x; write_data[ID + N*N*blockIdx.x] = read_data[ID]; } } __syncthreads(); } __device__ void load_full(float* read_data,float* write_data,int N) { int i,j,ID; for(i=0;i<N/TILE_SIZE;i++) { for(j=0;j<N/TILE_SIZE;j++) { ID = (i*TILE_SIZE + threadIdx.y)*N + j*TILE_SIZE + threadIdx.x; write_data[ID] = read_data[ID + N*N*blockIdx.x]; } } __syncthreads(); } __device__ void potrf_tile(float* t_A,int i,int N) { int t_x = threadIdx.x; int t_y = threadIdx.y; for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) { t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x] = sqrtf(t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x]); } __syncthreads(); if(t_x<t_y && t_x == k) { t_A[i*TILE_SIZE*(1+N) + t_y*N + t_x]/= t_A[i*TILE_SIZE*(1+N) + t_x*N + t_x]; } __syncthreads(); if(k<t_y && k<t_x && t_x<=t_y) { t_A[i*TILE_SIZE*(1+N) + t_y*N + t_x]-= t_A[i*TILE_SIZE*(1+N) + t_x*N + k]*t_A[i*TILE_SIZE*(1+N) + t_y*N + k]; } __syncthreads(); } } __device__ void trsm_tile(float *row_data,int i,int j,int N) { int t_x = threadIdx.x; int t_y = threadIdx.y; for(int s=0;s<TILE_SIZE;s++) { if(t_x==s) { row_data[(t_y + j*TILE_SIZE)*N + t_x + i*TILE_SIZE]/= row_data[i*TILE_SIZE*(1+N) + t_x*(1+N)]; } __syncthreads(); if(t_x > s) { row_data[(t_y + j*TILE_SIZE)*N + t_x + i*TILE_SIZE]-= row_data[(t_x + i*TILE_SIZE)*N + s]*row_data[(t_y + j*TILE_SIZE)*N + s]; } __syncthreads(); } } __device__ void syrk_tile(float* row_data,int i,int j,int k,int N) { int t_y = threadIdx.y; int t_x = threadIdx.x; float valueToSubtract = 0.0; for(int r=0;r<TILE_SIZE;r++) { valueToSubtract+= row_data[(t_x + k*TILE_SIZE)*N + i*TILE_SIZE + r]*row_data[(t_y + j*TILE_SIZE)*N + i*TILE_SIZE + r]; } row_data[(t_y + j*TILE_SIZE)*N + t_x + k*TILE_SIZE]-= valueToSubtract; __syncthreads(); } __device__ void store_zeros(float* A,int N) { int t_y = threadIdx.y; int t_x = threadIdx.x; int i,j; for(i=0;i<N/TILE_SIZE-1;i++) { for(j=i+1;j<N/TILE_SIZE;j++) A[j*blockDim.x + t_x + (i*blockDim.y + t_y)*N] = 0.0; } __syncthreads(); } __device__ void store_zeros_diagonal(float* A,int N,int b) // Will only work if (N/TILE_SIZE) is even { int t_y = threadIdx.y; int t_x = threadIdx.x; int i; for(i=0;i<N/TILE_SIZE-b;i+=2) { if(t_x>t_y) A[i*blockDim.x + t_x + (i*blockDim.y + t_y)*N] = 0.0; if(t_x<t_y) A[(i+1)*blockDim.x*(1+N) + (blockDim.x-t_x-1) + (blockDim.y-t_y-1)*N] = 0.0; } __syncthreads(); } __device__ void store_zeros_last(float* A,int N) // Will only work if (N/TILE_SIZE) is even { int t_y = threadIdx.y; int t_x = threadIdx.x; if(t_x>t_y) A[(N/TILE_SIZE-1)*blockDim.x*(1+N) + t_x + t_y*N] = 0.0; __syncthreads(); } __global__ void right_looking_launch_kernel(float* read_data,int N) { extern __shared__ float data[]; int i,j,k; load_full(read_data,data,N); for(i=0;i<N/TILE_SIZE;i++) { potrf_tile(data,i,N); for(j=i+1;j<N/TILE_SIZE;j++) { trsm_tile(data,i,j,N); for(k=i+1;k<=j;k++) { syrk_tile(data,i,j,k,N); } } } store_zeros(data,N); if((N/TILE_SIZE)%2==0) store_zeros_diagonal(data,N,0); else { store_zeros_diagonal(data,N,1); store_zeros_last(data,N); } store_full(data,read_data,N); }
7dbe4d9aa7313a6ecded941b8e65cbd0af4cfbff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @author Mark Gates @generated from zgemv_fermi.cu normal z -> c, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #include "commonblas_c.h" #include "magma_templates.h" #define PRECISION_c #define BLK_X 128 #define BLK_Y 128 /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This simple implementation loads x directly, relying on the cache, * without using shared memory. */ __global__ void cgemvn_kernel1_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; if ( ind < m ) { A += ind; magmaFloatComplex res = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < n; j++) { res += A[j*lda] * x[j*incx]; } y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This implementation loads BLK_Y elements into sx, then multiplies * BLK_Y columns of A*sx. */ __global__ void cgemvn_kernel2_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; // threads past last row redundantly work on last row A += min( ind, m-1 ); x += threadIdx.x*incx; magmaFloatComplex res = MAGMA_C_ZERO; __shared__ magmaFloatComplex sx[BLK_Y]; // full block-columns int nfull = (n / BLK_Y) * BLK_Y; for( int j=0; j < nfull; j += BLK_Y ) { // load BLK_Y elements of x into sx sx[threadIdx.x] = x[0]; x += BLK_Y*incx; __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < BLK_Y; j2++) { res += A[0] * sx[j2]; A += lda; } __syncthreads(); } // last, partial block-column // load remaining npart elements of x into sx int npart = n % BLK_Y; if ( threadIdx.x < npart ) { sx[threadIdx.x] = x[0]; } else { sx[threadIdx.x] = MAGMA_C_ZERO; } __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < npart; j2++) { res += A[0]*sx[j2]; A += lda; } if ( ind < m ) { y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^T * x + beta*y. * Each thread block does one column of A (i.e., one row of A^T). * Each thread does a partial sum, then collectively they do a reduction. */ __global__ void cgemvt_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += A[i] * x[tx + i]; } if ( tx + mfull < m ) { res += A[mfull] * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^H * x + beta*y. * Same as cgemvt_kernel_fermi but conjugates entries of A. */ __global__ void cgemvc_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += conj(A[i]) * x[tx + i]; } if ( tx + mfull < m ) { res += conj(A[mfull]) * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] dx COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy REAL array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_cgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dx, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t incy) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemv( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #else magmablas_cgemv_tesla( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { dim3 grid( (m - 1)/BLK_X + 1 ); dim3 threads( BLK_X, 1, 1 ); hipLaunchKernelGGL(( cgemvn_kernel1_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); hipLaunchKernelGGL(( cgemvt_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaConjTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); hipLaunchKernelGGL(( cgemvc_kernel_fermi), dim3(grid), dim3(threads), 0, magma_stream , m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } }
7dbe4d9aa7313a6ecded941b8e65cbd0af4cfbff.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @author Mark Gates @generated from zgemv_fermi.cu normal z -> c, Sat Nov 15 19:53:59 2014 */ #include "common_magma.h" #include "commonblas_c.h" #include "magma_templates.h" #define PRECISION_c #define BLK_X 128 #define BLK_Y 128 /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This simple implementation loads x directly, relying on the cache, * without using shared memory. */ __global__ void cgemvn_kernel1_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; if ( ind < m ) { A += ind; magmaFloatComplex res = MAGMA_C_ZERO; #pragma unroll for(int j=0; j < n; j++) { res += A[j*lda] * x[j*incx]; } y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha*A*x + beta*y. * Each thread block does a BLK_X x N block row of A. * Each thread goes across one row, accumulating dot product of row ind and x into res. * This implementation loads BLK_Y elements into sx, then multiplies * BLK_Y columns of A*sx. */ __global__ void cgemvn_kernel2_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int ind = blockIdx.x*BLK_X + threadIdx.x; // threads past last row redundantly work on last row A += min( ind, m-1 ); x += threadIdx.x*incx; magmaFloatComplex res = MAGMA_C_ZERO; __shared__ magmaFloatComplex sx[BLK_Y]; // full block-columns int nfull = (n / BLK_Y) * BLK_Y; for( int j=0; j < nfull; j += BLK_Y ) { // load BLK_Y elements of x into sx sx[threadIdx.x] = x[0]; x += BLK_Y*incx; __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < BLK_Y; j2++) { res += A[0] * sx[j2]; A += lda; } __syncthreads(); } // last, partial block-column // load remaining npart elements of x into sx int npart = n % BLK_Y; if ( threadIdx.x < npart ) { sx[threadIdx.x] = x[0]; } else { sx[threadIdx.x] = MAGMA_C_ZERO; } __syncthreads(); // multiply A*sx #pragma unroll for(int j2=0; j2 < npart; j2++) { res += A[0]*sx[j2]; A += lda; } if ( ind < m ) { y[ind*incy] = alpha*res + beta*y[ind*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^T * x + beta*y. * Each thread block does one column of A (i.e., one row of A^T). * Each thread does a partial sum, then collectively they do a reduction. */ __global__ void cgemvt_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += A[i] * x[tx + i]; } if ( tx + mfull < m ) { res += A[mfull] * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /* Compute y = alpha * A^H * x + beta*y. * Same as cgemvt_kernel_fermi but conjugates entries of A. */ __global__ void cgemvc_kernel_fermi( int m, int n, magmaFloatComplex alpha, const magmaFloatComplex * __restrict__ A, int lda, const magmaFloatComplex * __restrict__ x, int incx, magmaFloatComplex beta, magmaFloatComplex * __restrict__ y, int incy) { #if (__CUDA_ARCH__ >= 200) int tx = threadIdx.x; __shared__ magmaFloatComplex sdata[BLK_X]; magmaFloatComplex res = MAGMA_C_ZERO; A += blockIdx.y*lda + threadIdx.x; // partial sums int mfull = (m / BLK_X) * BLK_X; for(int i=0; i < mfull; i += BLK_X) { res += conj(A[i]) * x[tx + i]; } if ( tx + mfull < m ) { res += conj(A[mfull]) * x[tx + mfull]; } sdata[tx] = res; // tree reduction of partial sums, // from BLK_X sums to ... 128 to 64 to 32 ... to 1 sum in sdata[0] magma_sum_reduce< BLK_X >( tx, sdata ); if ( tx == 0 ) { y[blockIdx.y*incy] = alpha*sdata[0] + beta*y[blockIdx.y*incy]; } #endif /* (__CUDA_ARCH__ >= 200) */ } /** Purpose ------- CGEMV performs one of the matrix-vector operations y := alpha*A*x + beta*y, or y := alpha*A**T*x + beta*y, or y := alpha*A**H*x + beta*y, where alpha and beta are scalars, x and y are vectors and A is an m by n matrix. Arguments ---------- @param[in] trans magma_trans_t On entry, TRANS specifies the operation to be performed as follows: - = MagmaNoTrans: y := alpha*A *x + beta*y - = MagmaTrans: y := alpha*A^T*x + beta*y - = MagmaConjTrans: y := alpha*A^H*x + beta*y @param[in] m INTEGER On entry, m specifies the number of rows of the matrix A. @param[in] n INTEGER On entry, n specifies the number of columns of the matrix A @param[in] alpha COMPLEX On entry, ALPHA specifies the scalar alpha. @param[in] dA COMPLEX array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER LDA specifies the leading dimension of A. @param[in] dx COMPLEX array of dimension n if trans == MagmaNoTrans m if trans == MagmaTrans or MagmaConjTrans @param[in] incx Specifies the increment for the elements of X. INCX must not be zero. @param[in] beta DOUBLE REAL On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[out] dy REAL array of dimension m if trans == MagmaNoTrans n if trans == MagmaTrans or MagmaConjTrans @param[in] incy Specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_dblas2 ********************************************************************/ extern "C" void magmablas_cgemv( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaFloatComplex alpha, magmaFloatComplex_const_ptr dA, magma_int_t ldda, magmaFloatComplex_const_ptr dx, magma_int_t incx, magmaFloatComplex beta, magmaFloatComplex_ptr dy, magma_int_t incy) { magma_int_t info = 0; if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans ) info = -1; else if ( m < 0 ) info = -2; else if ( n < 0 ) info = -3; else if ( ldda < m ) info = -6; else if ( incx == 0 ) info = -8; else if ( incy == 0 ) info = -11; if (info != 0) { magma_xerbla( __func__, -(info) ); return; //info; } magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { // -------------------- // call CUDA ARCH 1.x version // magmablas for [sd] precisions, cublas for [zc] precisions. #if defined(PRECISION_z) || defined(PRECISION_c) magma_cgemv( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #else magmablas_cgemv_tesla( trans, m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); #endif return; } // -------------------- // CUDA ARCH 2.x (Fermi) version if ( trans == MagmaNoTrans ) { dim3 grid( (m - 1)/BLK_X + 1 ); dim3 threads( BLK_X, 1, 1 ); cgemvn_kernel1_fermi<<< grid, threads, 0, magma_stream >>> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); cgemvt_kernel_fermi<<< grid, threads, 0, magma_stream >>> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } else if ( trans == MagmaConjTrans ) { dim3 grid ( 1, n, 1 ); dim3 threads ( BLK_X, 1, 1 ); cgemvc_kernel_fermi<<< grid, threads, 0, magma_stream >>> ( m, n, alpha, dA, ldda, dx, incx, beta, dy, incy ); } }
76ac0c0cca4b2c1d46b29b293b546a0285e354c8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void mykernel(float *d1, float *d2, float *d3, float *d4, float *d5) { if(threadIdx.x == 0) { d1[0] = 123.0f; d2[0] = 123.0f; d3[0] = 123.0f; d4[0] = 123.0f; d5[0] = 123.0f; } }
76ac0c0cca4b2c1d46b29b293b546a0285e354c8.cu
#include "includes.h" __global__ void mykernel(float *d1, float *d2, float *d3, float *d4, float *d5) { if(threadIdx.x == 0) { d1[0] = 123.0f; d2[0] = 123.0f; d3[0] = 123.0f; d4[0] = 123.0f; d5[0] = 123.0f; } }
bc22444a06d2c2bf78ab26dc6daa605b44ce069f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //============================================================================================= // Name : thread1d.cu // Author : Jose Refojo // Version : 26-06-2012 // Creation date : 18-06-2010 // Copyright : Copyright belongs to Trinity Centre for High Performance Computing // Description : This program will initialize a number of arrays, // then it will grab data from each thread (such as thread position inside the block and block), // save it, send it back into the main memory, and print it //============================================================================================= #include "stdio.h" __global__ void scanTheadInformationGPU(int *threadIdsGPU, int *blockIdsGPU,int Ntot) { int idx=blockIdx.x*blockDim.x+threadIdx.x; if ( idx <Ntot ) { threadIdsGPU[idx]=threadIdx.x; blockIdsGPU[idx]=blockIdx.x; } } int main() { // pointers to host memory int *threadIds, *blockIds; // pointers to device memory int *threadIdsGPU, *blockIdsGPU; // N is the total size that we want int N=18; int i; // Allocate arrays threadIds and blockIds on host threadIds = (int*) malloc(N*sizeof(int)); blockIds = (int*) malloc(N*sizeof(int)); // Allocate arrays threadIdsGPU and blockIdsGPU on device hipMalloc ((void **) &threadIdsGPU, sizeof(int)*N); hipMalloc ((void **) &blockIdsGPU, sizeof(int)*N); /* // Copy data from host memory to device memory (not needed, but this is how you do it) hipMemcpy(threadIdsGPU, threadIds, sizeof(int)*N, hipMemcpyHostToDevice); hipMemcpy(blockIdsGPU, blockIds, sizeof(int)*N, hipMemcpyHostToDevice); */ // Compute the execution configuration int block_size=8; dim3 dimBlock(block_size); dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); // Scan information from the threads hipLaunchKernelGGL(( scanTheadInformationGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, threadIdsGPU, blockIdsGPU, N); // Copy data from device memory to host memory hipMemcpy(threadIds, threadIdsGPU, sizeof(int)*N, hipMemcpyDeviceToHost); hipMemcpy(blockIds, blockIdsGPU, sizeof(int)*N, hipMemcpyDeviceToHost); // Print all the data about the threads printf(" dimGrid=%d\n",dimGrid.x); for (i=0; i<N; i++) { printf(" threadIds[%d]=%d\n",i,threadIds[i]); } for (i=0; i<N; i++) { printf(" blockIds[%d]=%d\n",i,blockIds[i]); } // Free the memory free(threadIds); free(blockIds); hipFree(threadIdsGPU); hipFree(blockIdsGPU); }
bc22444a06d2c2bf78ab26dc6daa605b44ce069f.cu
//============================================================================================= // Name : thread1d.cu // Author : Jose Refojo // Version : 26-06-2012 // Creation date : 18-06-2010 // Copyright : Copyright belongs to Trinity Centre for High Performance Computing // Description : This program will initialize a number of arrays, // then it will grab data from each thread (such as thread position inside the block and block), // save it, send it back into the main memory, and print it //============================================================================================= #include "stdio.h" __global__ void scanTheadInformationGPU(int *threadIdsGPU, int *blockIdsGPU,int Ntot) { int idx=blockIdx.x*blockDim.x+threadIdx.x; if ( idx <Ntot ) { threadIdsGPU[idx]=threadIdx.x; blockIdsGPU[idx]=blockIdx.x; } } int main() { // pointers to host memory int *threadIds, *blockIds; // pointers to device memory int *threadIdsGPU, *blockIdsGPU; // N is the total size that we want int N=18; int i; // Allocate arrays threadIds and blockIds on host threadIds = (int*) malloc(N*sizeof(int)); blockIds = (int*) malloc(N*sizeof(int)); // Allocate arrays threadIdsGPU and blockIdsGPU on device cudaMalloc ((void **) &threadIdsGPU, sizeof(int)*N); cudaMalloc ((void **) &blockIdsGPU, sizeof(int)*N); /* // Copy data from host memory to device memory (not needed, but this is how you do it) cudaMemcpy(threadIdsGPU, threadIds, sizeof(int)*N, cudaMemcpyHostToDevice); cudaMemcpy(blockIdsGPU, blockIds, sizeof(int)*N, cudaMemcpyHostToDevice); */ // Compute the execution configuration int block_size=8; dim3 dimBlock(block_size); dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) ); // Scan information from the threads scanTheadInformationGPU<<<dimGrid,dimBlock>>>(threadIdsGPU, blockIdsGPU, N); // Copy data from device memory to host memory cudaMemcpy(threadIds, threadIdsGPU, sizeof(int)*N, cudaMemcpyDeviceToHost); cudaMemcpy(blockIds, blockIdsGPU, sizeof(int)*N, cudaMemcpyDeviceToHost); // Print all the data about the threads printf(" dimGrid=%d\n",dimGrid.x); for (i=0; i<N; i++) { printf(" threadIds[%d]=%d\n",i,threadIds[i]); } for (i=0; i<N; i++) { printf(" blockIds[%d]=%d\n",i,blockIds[i]); } // Free the memory free(threadIds); free(blockIds); cudaFree(threadIdsGPU); cudaFree(blockIdsGPU); }
3c36a23cedd758872c2612bfa57cce81032119e2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hist.cuh" #include "hist_2_one_byte_base.cuh" #include "tuning_policy_enums.cuh" #include "compute_hist_loop_one_stat.cuh" #include <hip/hip_cooperative_groups.h> #include <library/cpp/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { template <int Bits, int BlockSize> struct TPointHistOneByte { const int InnerHistBitsCount = Bits - 5; float* Histogram; static constexpr int GetHistSize() { return BlockSize * 32; } static constexpr int AddPointsBatchSize() { return TLoadSize<LoadSize()>::Size(); } static constexpr int Unroll(ECIndexLoadType) { #if __CUDA_ARCH__ < 700 const int NN = 2; #else const int NN = 4; #endif return NN; } static constexpr int GetBlockSize() { return BlockSize; } static constexpr ELoadSize LoadSize() { #if __CUDA_ARCH__ < 500 return ELoadSize::OneElement; #else return ELoadSize::FourElements; // return ELoadSize::TwoElements; #endif } static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) { return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType); } __forceinline__ __device__ int SliceOffset() { const int warpOffset = 1024 * (threadIdx.x / 32); const int blocks = 8 >> InnerHistBitsCount; const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2))); return warpOffset + innerHistStart; } __forceinline__ __device__ TPointHistOneByte(float* hist) { static_assert(Bits >= 5, "Error: this hist is for 5-8 bits"); const int histSize = 32 * BlockSize; #pragma unroll 8 for (int i = threadIdx.x; i < histSize; i += BlockSize) { hist[i] = 0; } Histogram = hist + SliceOffset(); __syncthreads(); } __forceinline__ __device__ void AddPoint(ui32 ci, const float t) { auto syncTile = tiled_partition<32>(this_thread_block()); #pragma unroll for (int i = 0; i < 4; i++) { int f = (threadIdx.x + i) & 3; int bin = (ci >> (24 - 8 * f)) & 255; // int bin = bfe(ci, 24 - 8 * f, 8); const float statToAdd = (bin >> Bits) == 0 ? t : 0; const int mask = (1 << InnerHistBitsCount) - 1; const int higherBin = (bin >> 5) & mask; int offset = 4 * higherBin + f + ((bin & 31) << 5); if (InnerHistBitsCount > 0) { #pragma unroll for (int k = 0; k < (1 << InnerHistBitsCount); ++k) { const int pass = ((threadIdx.x >> 2) + k) & mask; syncTile.sync(); if (pass == higherBin) { Histogram[offset] += statToAdd; } } } else { syncTile.sync(); Histogram[offset] += statToAdd; } } } template <int N> __forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) { auto syncTile = tiled_partition<32>(this_thread_block()); #pragma unroll for (int i = 0; i < 4; i++) { const int f = (threadIdx.x + i) & 3; int bins[N]; float stats[N]; #pragma unroll for (int k = 0; k < N; ++k) { bins[k] = (ci[k] >> (24 - 8 * f)) & 255; // bins[k] = bfe(ci[k], 24 - 8 * f, 8); stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f; } int offsets[N]; int higherBin[N]; const int mask = (1 << InnerHistBitsCount) - 1; #pragma unroll for (int k = 0; k < N; ++k) { higherBin[k] = (bins[k] >> 5) & mask; offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5); } if (InnerHistBitsCount > 0) { #pragma unroll for (int k = 0; k < (1 << InnerHistBitsCount); ++k) { const int pass = ((threadIdx.x >> 2) + k) & mask; syncTile.sync(); #pragma unroll for (int j = 0; j < N; ++j) { if (pass == higherBin[j]) { Histogram[offsets[j]] += stats[j]; } } } } else { syncTile.sync(); #pragma unroll for (int j = 0; j < N; ++j) { Histogram[offsets[j]] += stats[j]; } } } } template <int N> __forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) { const int NN = AddPointsBatchSize(); static_assert(N % NN == 0, "Error: incorrect stripe size"); #pragma unroll for (int k = 0; k < N; k += NN) { AddPointsImpl<NN>(ci + k, t + k); } } __forceinline__ __device__ void Reduce() { Histogram -= SliceOffset(); __syncthreads(); { const int warpHistSize = 1024; for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) { float sum = 0; //12 iterations #pragma unroll 12 for (int i = start; i < 32 * BlockSize; i += warpHistSize) { sum += Histogram[i]; } Histogram[warpHistSize + start] = sum; } } __syncthreads(); //now we have only 1024 entries hist const int warpHistBlockCount = 8 >> InnerHistBitsCount; const int fold = threadIdx.x; const int histSize = 1 << (5 + InnerHistBitsCount); float sum[4]; #pragma unroll for (int i = 0; i < 4; ++i) { sum[i] = 0.0f; } if (fold < histSize) { const int warpHistSize = 1024; const int lowerBitsOffset = (fold & 31) << 5; const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1); const int blockSize = 4 * (1 << InnerHistBitsCount); const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin; #pragma unroll for (int block = 0; block < warpHistBlockCount; ++block) { #pragma unroll for (int i = 0; i < 4; ++i) { sum[i] += src[i + block * blockSize]; } } } __syncthreads(); if (fold < histSize) { for (int i = 0; i < 4; ++i) { Histogram[histSize * i + fold] = sum[i]; } } __syncthreads(); } __forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount, const TFeatureInBlock* features, int fCount, int leafId, int leafCount, float* binSums) { const int fold = threadIdx.x; const int histSize = 1 << (5 + InnerHistBitsCount); #pragma unroll 4 for (int fid = 0; fid < fCount; ++fid) { TFeatureInBlock group = features[fid]; const int deviceOffset = group.GroupOffset * statCount * leafCount; const int entriesPerLeaf = statCount * group.GroupSize; float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup; if (fold < features[fid].Folds) { const float val = Histogram[fid * histSize + fold]; if (abs(val) > 1e-20f) { if (blockCount > 1) { atomicAdd(dst + fold, val); } else { dst[fold] = val; } } } } } }; void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32* partIds, ui32 partCount, const ui32* bins, ui32 binsLineSize, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = partCount;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ numBlocks.x = (fCount + 3) / 4;\ numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\ using THist = TPointHistOneByte<Bits, blockSize>;\ hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \ features,\ fCount,\ bins, binsLineSize,\ stats, numStats, \ statLineSize,\ parts,\ partIds,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } if (partCount) { if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bits count " << maxBins); } } #undef PASS #undef HIST2_PASS } void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32* partIds, ui32 partCount, const ui32* cindex, const int* indices, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = partCount;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ const int groupCount = (fCount + 3) / 4;\ numBlocks.x = groupCount;\ numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\ using THist = TPointHistOneByte<Bits, blockSize>;\ hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \ features,\ fCount,\ cindex,\ indices,\ stats, numStats, \ statLineSize,\ parts,\ partIds,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } if (partCount) { if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bins count " << maxBins); } } #undef PASS #undef HIST2_PASS } /* * Single part */ void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32 partId, const ui32* bins, ui32 binsLineSize, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = 1;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ numBlocks.x = (fCount + 3) / 4;\ numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\ using THist = TPointHistOneByte<Bits, blockSize>;\ hipLaunchKernelGGL(( ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \ features,\ fCount,\ bins, binsLineSize,\ stats, numStats, \ statLineSize,\ parts,\ partId,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bits count " << maxBins); } #undef PASS #undef HIST2_PASS } void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32 partId, const ui32* cindex, const int* indices, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = 1;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ const int groupCount = (fCount + 3) / 4;\ numBlocks.x = groupCount;\ numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\ using THist = TPointHistOneByte<Bits, blockSize>;\ hipLaunchKernelGGL(( ComputeSplitPropertiesGatherImpl<THist, blockSize, 4>), dim3(numBlocks), dim3(blockSize), 0, stream, \ features,\ fCount,\ cindex,\ indices,\ stats, numStats, \ statLineSize,\ parts,\ partId,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bins count " << maxBins); } #undef PASS #undef HIST2_PASS } }
3c36a23cedd758872c2612bfa57cce81032119e2.cu
#include "hist.cuh" #include "hist_2_one_byte_base.cuh" #include "tuning_policy_enums.cuh" #include "compute_hist_loop_one_stat.cuh" #include <cooperative_groups.h> #include <library/cpp/cuda/wrappers/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> using namespace cooperative_groups; namespace NKernel { template <int Bits, int BlockSize> struct TPointHistOneByte { const int InnerHistBitsCount = Bits - 5; float* Histogram; static constexpr int GetHistSize() { return BlockSize * 32; } static constexpr int AddPointsBatchSize() { return TLoadSize<LoadSize()>::Size(); } static constexpr int Unroll(ECIndexLoadType) { #if __CUDA_ARCH__ < 700 const int NN = 2; #else const int NN = 4; #endif return NN; } static constexpr int GetBlockSize() { return BlockSize; } static constexpr ELoadSize LoadSize() { #if __CUDA_ARCH__ < 500 return ELoadSize::OneElement; #else return ELoadSize::FourElements; // return ELoadSize::TwoElements; #endif } static constexpr int BlockLoadSize(ECIndexLoadType indexLoadType) { return TLoadSize<LoadSize()>::Size() * BlockSize * Unroll(indexLoadType); } __forceinline__ __device__ int SliceOffset() { const int warpOffset = 1024 * (threadIdx.x / 32); const int blocks = 8 >> InnerHistBitsCount; const int innerHistStart = (threadIdx.x & ((blocks - 1) << (InnerHistBitsCount + 2))); return warpOffset + innerHistStart; } __forceinline__ __device__ TPointHistOneByte(float* hist) { static_assert(Bits >= 5, "Error: this hist is for 5-8 bits"); const int histSize = 32 * BlockSize; #pragma unroll 8 for (int i = threadIdx.x; i < histSize; i += BlockSize) { hist[i] = 0; } Histogram = hist + SliceOffset(); __syncthreads(); } __forceinline__ __device__ void AddPoint(ui32 ci, const float t) { auto syncTile = tiled_partition<32>(this_thread_block()); #pragma unroll for (int i = 0; i < 4; i++) { int f = (threadIdx.x + i) & 3; int bin = (ci >> (24 - 8 * f)) & 255; // int bin = bfe(ci, 24 - 8 * f, 8); const float statToAdd = (bin >> Bits) == 0 ? t : 0; const int mask = (1 << InnerHistBitsCount) - 1; const int higherBin = (bin >> 5) & mask; int offset = 4 * higherBin + f + ((bin & 31) << 5); if (InnerHistBitsCount > 0) { #pragma unroll for (int k = 0; k < (1 << InnerHistBitsCount); ++k) { const int pass = ((threadIdx.x >> 2) + k) & mask; syncTile.sync(); if (pass == higherBin) { Histogram[offset] += statToAdd; } } } else { syncTile.sync(); Histogram[offset] += statToAdd; } } } template <int N> __forceinline__ __device__ void AddPointsImpl(const ui32* ci, const float* t) { auto syncTile = tiled_partition<32>(this_thread_block()); #pragma unroll for (int i = 0; i < 4; i++) { const int f = (threadIdx.x + i) & 3; int bins[N]; float stats[N]; #pragma unroll for (int k = 0; k < N; ++k) { bins[k] = (ci[k] >> (24 - 8 * f)) & 255; // bins[k] = bfe(ci[k], 24 - 8 * f, 8); stats[k] = (bins[k] >> Bits) == 0 ? t[k] : 0.0f; } int offsets[N]; int higherBin[N]; const int mask = (1 << InnerHistBitsCount) - 1; #pragma unroll for (int k = 0; k < N; ++k) { higherBin[k] = (bins[k] >> 5) & mask; offsets[k] = 4 * higherBin[k] + f + ((bins[k] & 31) << 5); } if (InnerHistBitsCount > 0) { #pragma unroll for (int k = 0; k < (1 << InnerHistBitsCount); ++k) { const int pass = ((threadIdx.x >> 2) + k) & mask; syncTile.sync(); #pragma unroll for (int j = 0; j < N; ++j) { if (pass == higherBin[j]) { Histogram[offsets[j]] += stats[j]; } } } } else { syncTile.sync(); #pragma unroll for (int j = 0; j < N; ++j) { Histogram[offsets[j]] += stats[j]; } } } } template <int N> __forceinline__ __device__ void AddPoints(const ui32* ci, const float* t) { const int NN = AddPointsBatchSize(); static_assert(N % NN == 0, "Error: incorrect stripe size"); #pragma unroll for (int k = 0; k < N; k += NN) { AddPointsImpl<NN>(ci + k, t + k); } } __forceinline__ __device__ void Reduce() { Histogram -= SliceOffset(); __syncthreads(); { const int warpHistSize = 1024; for (int start = threadIdx.x; start < warpHistSize; start += BlockSize) { float sum = 0; //12 iterations #pragma unroll 12 for (int i = start; i < 32 * BlockSize; i += warpHistSize) { sum += Histogram[i]; } Histogram[warpHistSize + start] = sum; } } __syncthreads(); //now we have only 1024 entries hist const int warpHistBlockCount = 8 >> InnerHistBitsCount; const int fold = threadIdx.x; const int histSize = 1 << (5 + InnerHistBitsCount); float sum[4]; #pragma unroll for (int i = 0; i < 4; ++i) { sum[i] = 0.0f; } if (fold < histSize) { const int warpHistSize = 1024; const int lowerBitsOffset = (fold & 31) << 5; const int higherBin = (fold >> 5) & ((1 << InnerHistBitsCount) - 1); const int blockSize = 4 * (1 << InnerHistBitsCount); const volatile float* src = Histogram + warpHistSize + lowerBitsOffset + 4 * higherBin; #pragma unroll for (int block = 0; block < warpHistBlockCount; ++block) { #pragma unroll for (int i = 0; i < 4; ++i) { sum[i] += src[i + block * blockSize]; } } } __syncthreads(); if (fold < histSize) { for (int i = 0; i < 4; ++i) { Histogram[histSize * i + fold] = sum[i]; } } __syncthreads(); } __forceinline__ __device__ void AddToGlobalMemory(int statId, int statCount, int blockCount, const TFeatureInBlock* features, int fCount, int leafId, int leafCount, float* binSums) { const int fold = threadIdx.x; const int histSize = 1 << (5 + InnerHistBitsCount); #pragma unroll 4 for (int fid = 0; fid < fCount; ++fid) { TFeatureInBlock group = features[fid]; const int deviceOffset = group.GroupOffset * statCount * leafCount; const int entriesPerLeaf = statCount * group.GroupSize; float* dst = binSums + deviceOffset + leafId * entriesPerLeaf + statId * group.GroupSize + group.FoldOffsetInGroup; if (fold < features[fid].Folds) { const float val = Histogram[fid * histSize + fold]; if (abs(val) > 1e-20f) { if (blockCount > 1) { atomicAdd(dst + fold, val); } else { dst[fold] = val; } } } } } }; void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32* partIds, ui32 partCount, const ui32* bins, ui32 binsLineSize, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = partCount;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ numBlocks.x = (fCount + 3) / 4;\ numBlocks.x *= CeilDivide(maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\ using THist = TPointHistOneByte<Bits, blockSize>;\ ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\ features,\ fCount,\ bins, binsLineSize,\ stats, numStats, \ statLineSize,\ parts,\ partIds,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } if (partCount) { if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bits count " << maxBins); } } #undef PASS #undef HIST2_PASS } void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32* partIds, ui32 partCount, const ui32* cindex, const int* indices, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = partCount;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ const int groupCount = (fCount + 3) / 4;\ numBlocks.x = groupCount;\ numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\ using THist = TPointHistOneByte<Bits, blockSize>;\ ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\ features,\ fCount,\ cindex,\ indices,\ stats, numStats, \ statLineSize,\ parts,\ partIds,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partIds, partCount, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } if (partCount) { if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bins count " << maxBins); } } #undef PASS #undef HIST2_PASS } /* * Single part */ void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32 partId, const ui32* bins, ui32 binsLineSize, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = 1;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ numBlocks.x = (fCount + 3) / 4;\ numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.x * numBlocks.y * numBlocks.z));\ using THist = TPointHistOneByte<Bits, blockSize>;\ ComputeSplitPropertiesDirectLoadsImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\ features,\ fCount,\ bins, binsLineSize,\ stats, numStats, \ statLineSize,\ parts,\ partId,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, bins, binsLineSize, stats, numStats, statLineSize, histograms, stream);\ } if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bits count " << maxBins); } #undef PASS #undef HIST2_PASS } void ComputeHistOneByte(int maxBins, const TFeatureInBlock* features, const int fCount, const TDataPartition* parts, const ui32 partId, const ui32* cindex, const int* indices, const float* stats, ui32 numStats, ui32 statLineSize, float* histograms, TCudaStream stream) { #define PASS(Bits, NumStats)\ const int blockSize = 384;\ dim3 numBlocks;\ numBlocks.z = NumStats;\ numBlocks.y = 1;\ const int blocksPerSm = TArchProps::GetMajorVersion() > 3 ? 2 : 1;\ const int maxActiveBlocks = blocksPerSm * TArchProps::SMCount();\ const int groupCount = (fCount + 3) / 4;\ numBlocks.x = groupCount;\ numBlocks.x *= CeilDivide(2 * maxActiveBlocks, (int)(numBlocks.y * numBlocks.z * numBlocks.x));\ using THist = TPointHistOneByte<Bits, blockSize>;\ ComputeSplitPropertiesGatherImpl<THist, blockSize, 4><<<numBlocks, blockSize, 0, stream>>>(\ features,\ fCount,\ cindex,\ indices,\ stats, numStats, \ statLineSize,\ parts,\ partId,\ histograms); #define HIST2_PASS(Bits)\ if (numStats % 2 != 0) {\ PASS(Bits, 1)\ ComputeHist2OneByteBits<Bits, true>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } else {\ ComputeHist2OneByteBits<Bits, false>(features, fCount, parts, partId, cindex, indices, stats, numStats, statLineSize, histograms, stream);\ } if (maxBins <= 32) { HIST2_PASS(5) } else if (maxBins <= 64) { HIST2_PASS(6) // PASS(6, numStats) } else if (maxBins <= 128) { HIST2_PASS(7) // PASS(7, numStats) } else if (maxBins <= 255) { PASS(8, numStats) } else { CB_ENSURE(false, "Unsupported bins count " << maxBins); } #undef PASS #undef HIST2_PASS } }
fc16aae4c0385d59c78efcf5cebb8adc50ab15ab.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <THH/THHAtomics.cuh> // For atomicAdd on Half #include <thrust/complex.h> #include <thrust/pair.h> #include <thrust/tuple.h> // For compatibility with Pytorch 1.1 #ifndef TORCH_CHECK #define TORCH_CHECK AT_CHECK #endif // #define thc_cos THCNumerics<scalar_t>::cos // #define thc_sin THCNumerics<scalar_t>::sin #define thc_cos std::cos #define thc_sin std::sin #define FULL_MASK 0xffffffff static constexpr int MAX_BLOCK_SIZE = 1024; static constexpr int WORK_PER_THREAD = 16; static constexpr int ELEMENTARY_SIZE = MAX_BLOCK_SIZE / 2; static constexpr int MAX_N_FACTORS = 10; template <typename T, size_t N> using CudaAcsr32 = at::PackedTensorAccessor32<T, N, at::RestrictPtrTraits>; __host__ __device__ static inline int64_t div_up(int64_t a, int64_t b) { return (a + b - 1) / b; } __host__ __device__ static inline int div_up(int a, int b) { return (a + b - 1) / b; } template <typename scalar_t> static __device__ __forceinline__ void atomicAdd(thrust::complex<scalar_t> *address, thrust::complex<scalar_t> val) { atomicAdd((scalar_t *)address, val.real()); atomicAdd((scalar_t *)address + 1, val.imag()); } template <typename scalar_t> static __device__ __forceinline__ thrust::complex<scalar_t> __shfl_down_sync(unsigned int mask, thrust::complex<scalar_t> value, unsigned int delta, int width = warpSize) { return thrust::complex<scalar_t>(__shfl_down_sync(mask, value.real(), delta, width), __shfl_down_sync(mask, value.imag(), delta, width)); } // 2x2 matrix [a, b; c, d] multiplied by a vector [x, y] template <typename scalar_t> static __device__ __forceinline__ thrust::pair<scalar_t, scalar_t> mult2x2(scalar_t a, scalar_t b, scalar_t c, scalar_t d, scalar_t x, scalar_t y) { return thrust::make_pair(a * x + b * y, c * x + d * y); } template <typename scalar_t> __global__ void butterfly_factor_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, at::PackedTensorAccessor64<scalar_t, 3> output_a) { const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2] = {{twiddle_a[0][0][i], twiddle_a[0][1][i]}, {twiddle_a[1][0][i], twiddle_a[1][1][i]}}; for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2] = {input_a[b][0][i], input_a[b][1][i]}; #pragma unroll for (int j = 0; j <= 1; ++j) { output_a[b][j][i] = twiddle_val[j][0] * input_val[0] + twiddle_val[j][1] * input_val[1]; } } } } template <typename scalar_t> __global__ void butterfly_factor_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a) { const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2][2] = {{{twiddle_a[0][0][i][0], twiddle_a[0][0][i][1]}, {twiddle_a[0][1][i][0], twiddle_a[0][1][i][1]}}, {{twiddle_a[1][0][i][0], twiddle_a[1][0][i][1]}, {twiddle_a[1][1][i][0], twiddle_a[1][1][i][1]}}}; for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2][2] = {{input_a[b][0][i][0], input_a[b][0][i][1]}, {input_a[b][1][i][0], input_a[b][1][i][1]}}; #pragma unroll for (int j = 0; j <= 1; ++j) { output_a[b][j][i][0] = twiddle_val[j][0][0] * input_val[0][0] - twiddle_val[j][0][1] * input_val[0][1] + twiddle_val[j][1][0] * input_val[1][0] - twiddle_val[j][1][1] * input_val[1][1]; output_a[b][j][i][1] = twiddle_val[j][0][0] * input_val[0][1] + twiddle_val[j][0][1] * input_val[0][0] + twiddle_val[j][1][0] * input_val[1][1] + twiddle_val[j][1][1] * input_val[1][0]; } } } } void butterfly_factor_multiply_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output) { const auto batch_size = input.size(0); const auto n = input.size(2); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_cuda", [&] { switch (input.dim()) { case 3: // real { const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 3>(); const auto input_a = input.packed_accessor64<scalar_t, 3>(); auto output_a = output.packed_accessor64<scalar_t, 3>(); hipLaunchKernelGGL(( butterfly_factor_multiply_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, input_a, output_a); break; } case 4: // complex { const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); const auto input_a = input.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); hipLaunchKernelGGL(( butterfly_factor_multiply_complex_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, input_a, output_a); break; } default: AT_ERROR("butterfly_factor_multiply requires input dimension 3 or 4"); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_factor_multiply_cuda failed with error code ", hipGetLastError()); } template <typename T> __device__ __forceinline__ T sum_strided(T val, T *temp, int stride, int len, int thread_id) { if (stride >= len) { return val; } // Warp reduction for (int offset = warpSize / 2; offset >= stride; offset /= 2) { val += __shfl_down_sync(FULL_MASK, val, offset); } // Block reduction int block_reduction_stride = max(warpSize, stride); int n_block_reductions = div_up(len, block_reduction_stride); __syncthreads(); // Otherwise previous reads might be wrong if (thread_id < len) { temp[(thread_id % block_reduction_stride) * n_block_reductions + (thread_id / block_reduction_stride)] = val; } __syncthreads(); if (thread_id < n_block_reductions * stride) { val = temp[thread_id]; for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) { val += __shfl_down_sync(FULL_MASK, val, offset); } } return val; } template <typename scalar_t> __global__ void butterfly_factor_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 3> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, // at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_twiddle_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a) { const int batch_size = input_a.size(0); const int n = input_a.size(2); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2] = {{twiddle_a[0][0][i], twiddle_a[0][1][i]}, {twiddle_a[1][0][i], twiddle_a[1][1][i]}}; scalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; const int b_start = blockIdx.y * blockDim.y + threadIdx.y; // for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { for (int b = b_start; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2] = {input_a[b][0][i], input_a[b][1][i]}; const scalar_t grad_val[2] = {grad_a[b][0][i], grad_a[b][1][i]}; #pragma unroll for (int j = 0; j <= 1; ++j) { // d_twiddle_expanded_a[b][j][0][i] = grad_val[j] * input_val[0]; // d_twiddle_expanded_a[b][j][1][i] = grad_val[j] * input_val[1]; // atomicAdd(&d_twiddle_expanded_a[j][0][i], grad_val[j] * input_val[0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], grad_val[j] * input_val[1]); d_twiddle_val[j][0] += grad_val[j] * input_val[0]; d_twiddle_val[j][1] += grad_val[j] * input_val[1]; d_input_a[b][j][i] = twiddle_val[0][j] * grad_val[0] + twiddle_val[1][j] * grad_val[1]; } } // int tid = threadIdx.x + threadIdx.y * blockDim.x; // int nthreads = blockDim.x * blockDim.y; // __shared__ scalar_t temp_storage[MAX_BLOCK_SIZE]; // if (n < nthreads) { // int lane = tid % warpSize; // int wid = tid / warpSize; // #pragma unroll // for (int j = 0; j <= 1; ++j) { // d_twiddle_val[j][0] = sum_strided(d_twiddle_val[j][0], temp_storage, n, nthreads, tid); // d_twiddle_val[j][1] = sum_strided(d_twiddle_val[j][1], temp_storage, n, nthreads, tid); // } // int reduction_stride = max(warpSize, n); // int n_block_reductions = div_up(nthreads, reduction_stride); // if ((lane % n_block_reductions == 0) && (wid < n)) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][tid / n_block_reductions], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][tid / n_block_reductions], d_twiddle_val[j][1]); // } // } // } else { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); // } // } // Warp reduction for (int offset = warpSize / 2; offset >= n; offset /= 2) { #pragma unroll for (int j = 0; j <= 1; ++j) { d_twiddle_val[j][0] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][0], offset); d_twiddle_val[j][1] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][1], offset); } } __shared__ scalar_t s_d_twiddle[MAX_BLOCK_SIZE * 4]; // // const scalar_t (*temp)[n] = (scalar_t (*)[n])(&s_d_twiddle[0]); int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; int lane = tid % warpSize; int wid = tid / warpSize; if (n < nthreads) { __syncthreads(); s_d_twiddle[tid] = 0; s_d_twiddle[tid + MAX_BLOCK_SIZE] = 0; s_d_twiddle[tid + 2 * MAX_BLOCK_SIZE] = 0; s_d_twiddle[tid + 3 * MAX_BLOCK_SIZE] = 0; __syncthreads(); if (lane < n) { atomicAdd(&s_d_twiddle[i], d_twiddle_val[0][0]); atomicAdd(&s_d_twiddle[i + MAX_BLOCK_SIZE], d_twiddle_val[0][1]); atomicAdd(&s_d_twiddle[i + 2 * MAX_BLOCK_SIZE], d_twiddle_val[1][0]); atomicAdd(&s_d_twiddle[i + 3 * MAX_BLOCK_SIZE], d_twiddle_val[1][1]); } __syncthreads(); if (tid < n) { atomicAdd(&d_twiddle_expanded_a[0][0][i], s_d_twiddle[i]); atomicAdd(&d_twiddle_expanded_a[0][1][i], s_d_twiddle[i + MAX_BLOCK_SIZE]); atomicAdd(&d_twiddle_expanded_a[1][0][i], s_d_twiddle[i + 2 * MAX_BLOCK_SIZE]); atomicAdd(&d_twiddle_expanded_a[1][1][i], s_d_twiddle[i + 3 * MAX_BLOCK_SIZE]); } } else { #pragma unroll for (int j = 0; j <= 1; ++j) { atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); } } // // Block reduction // if (n < nthreads) { // // if (n < 0) { // int reduction_stride = max(warpSize, n); // int n_block_reductions = div_up(nthreads, reduction_stride); // if (lane < n) { // // When filling in the shared memory, we assume that n is a power of 2, // // otherwise we might have uninitialized values in the array. // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride)] = d_twiddle_val[0][0]; // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + n * n_block_reductions] = d_twiddle_val[0][1]; // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + 2 * n * n_block_reductions] = d_twiddle_val[1][0]; // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + 3 * n * n_block_reductions] = d_twiddle_val[1][1]; // } // __syncthreads(); // // if (tid == 0) { // // for (int j = 0; j < 4 * n * n_block_reductions; ++j) { // // printf("%i: %f\n", j, s_d_twiddle[j]); // // } // // } // if (wid < n) { // d_twiddle_val[0][0] = s_d_twiddle[tid]; // d_twiddle_val[0][1] = s_d_twiddle[tid + n * n_block_reductions]; // d_twiddle_val[1][0] = s_d_twiddle[tid + 2 * n * n_block_reductions]; // d_twiddle_val[1][1] = s_d_twiddle[tid + 3 * n * n_block_reductions]; // for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // d_twiddle_val[j][0] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][0], offset); // d_twiddle_val[j][1] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][1], offset); // } // } // if (lane % n_block_reductions == 0) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][tid / n_block_reductions], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][tid / n_block_reductions], d_twiddle_val[j][1]); // } // } // } // // } else { // } else if (lane < n) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); // } // } // if (lane < n) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); // } // } // #pragma unroll // for (int j = 0; j <= 1; ++j) { // d_twiddle_expanded_a[b_start][j][0][i] = d_twiddle_val[j][0]; // d_twiddle_expanded_a[b_start][j][1][i] = d_twiddle_val[j][1]; // } } } template <typename scalar_t> __global__ void butterfly_factor_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a, const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_expanded_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a) { const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2][2] = {{{twiddle_a[0][0][i][0], twiddle_a[0][0][i][1]}, {twiddle_a[0][1][i][0], twiddle_a[0][1][i][1]}}, {{twiddle_a[1][0][i][0], twiddle_a[1][0][i][1]}, {twiddle_a[1][1][i][0], twiddle_a[1][1][i][1]}}}; for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2][2] = {{input_a[b][0][i][0], input_a[b][0][i][1]}, {input_a[b][1][i][0], input_a[b][1][i][1]}}; const scalar_t grad_val[2][2] = {{grad_a[b][0][i][0], grad_a[b][0][i][1]}, {grad_a[b][1][i][0], grad_a[b][1][i][1]}}; #pragma unroll for (int j = 0; j <= 1; ++j) { d_twiddle_expanded_a[b][j][0][i][0] = grad_val[j][0] * input_val[0][0] + grad_val[j][1] * input_val[0][1]; d_twiddle_expanded_a[b][j][0][i][1] = -grad_val[j][0] * input_val[0][1] + grad_val[j][1] * input_val[0][0]; d_twiddle_expanded_a[b][j][1][i][0] = grad_val[j][0] * input_val[1][0] + grad_val[j][1] * input_val[1][1]; d_twiddle_expanded_a[b][j][1][i][1] = -grad_val[j][0] * input_val[1][1] + grad_val[j][1] * input_val[1][0]; d_input_a[b][j][i][0] = twiddle_val[0][j][0] * grad_val[0][0] + twiddle_val[0][j][1] * grad_val[0][1] + twiddle_val[1][j][0] * grad_val[1][0] + twiddle_val[1][j][1] * grad_val[1][1]; d_input_a[b][j][i][1] = twiddle_val[0][j][0] * grad_val[0][1] - twiddle_val[0][j][1] * grad_val[0][0] + twiddle_val[1][j][0] * grad_val[1][1] - twiddle_val[1][j][1] * grad_val[1][0]; } } } } void butterfly_factor_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& d_twiddle_expanded, at::Tensor& d_input) { const auto batch_size = input.size(0); const auto n = input.size(2); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); // AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_backward_cuda", [&] { AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_backward_cuda", [&] { switch (input.dim()) { case 3: // real { const auto grad_a = grad.packed_accessor64<scalar_t, 3>(); const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 3>(); const auto input_a = input.packed_accessor64<scalar_t, 3>(); // auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 4>(); auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 3>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); hipLaunchKernelGGL(( butterfly_factor_multiply_backward_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, twiddle_a, input_a, d_twiddle_expanded_a, d_input_a); break; } case 4: // complex { const auto grad_a = grad.packed_accessor64<scalar_t, 4>(); const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); const auto input_a = input.packed_accessor64<scalar_t, 4>(); auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); hipLaunchKernelGGL(( butterfly_factor_multiply_complex_backward_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, twiddle_a, input_a, d_twiddle_expanded_a, d_input_a); break; } default: AT_ERROR("butterfly_factor_multiply requires input dimension 3 or 4"); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_factor_multiply_backward_cuda failed with error code ", hipGetLastError()); } template <int LENGTH, typename T> __device__ __forceinline__ void sum_strided_atomic(T (&val)[LENGTH], T *storage, int stride, int nthreads, int tid) { // Warp reduction for (int offset = warpSize / 2; offset >= stride; offset /= 2) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] += __shfl_down_sync(FULL_MASK, val[j], offset); } } // Block reduction __syncthreads(); // Need this, otherwise might overwrite before other threads can read twiddle values if (tid < stride) { #pragma unroll for (int j = 0; j < LENGTH; j++) { storage[j * stride + tid] = 0; } } __syncthreads(); int lane = tid & (warpSize - 1); // int lane = tid % waprSize; if (lane < stride) { #pragma unroll for (int j = 0; j < LENGTH; j++) { // atomicAdd(&storage[j * stride + tid % stride], val[j]); atomicAdd(&storage[j * stride + (tid & (stride - 1))], val[j]); } } __syncthreads(); } /* Sum elements that are @stride apart by exchanging, using shared memory. After the function, threads with @tid < n_block_reductions * stride and @tid % n_block_reductions == 0 contains the sums. */ template <int LENGTH, typename T> __device__ __forceinline__ void sum_strided_exchange(T (&val)[LENGTH], T *storage, int log_stride, int nthreads, int tid) { int stride = 1 << log_stride; // Warp reduction for (int offset = warpSize / 2; offset >= stride; offset /= 2) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] += __shfl_down_sync(FULL_MASK, val[j], offset); } } int block_reduction_stride = max(warpSize, stride); // int n_block_reductions = div_up(nthreads, block_reduction_stride); int n_block_reductions = (nthreads + block_reduction_stride - 1) >> max(5, log_stride); int lane = tid % warpSize; __syncthreads(); // Otherwise previous reads might be wrong if ((tid < nthreads) && (lane < stride)) { #pragma unroll for (int j = 0; j < LENGTH; j++) { // storage[j * nthreads + (tid % block_reduction_stride) * n_block_reductions + (tid / block_reduction_stride)] = val[j]; storage[j * nthreads + (tid & (block_reduction_stride - 1)) * n_block_reductions + (tid / block_reduction_stride)] = val[j]; } } __syncthreads(); if (tid < n_block_reductions * stride) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] = storage[j * nthreads + tid]; } for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] += __shfl_down_sync(FULL_MASK, val[j], offset); } } } } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_intermediate_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i] = output_a[first_idx][b][s][input_base_idx + i]; } int i = threadIdx.x; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = twiddle_a[s][twiddle_start_idx + i][0][0]; s_twiddle[i][0][1] = twiddle_a[s][twiddle_start_idx + i][0][1]; s_twiddle[i][1][0] = twiddle_a[s][twiddle_start_idx + i][1][0]; s_twiddle[i][1][1] = twiddle_a[s][twiddle_start_idx + i][1][1]; } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; thrust::tie(s_input[pos], s_input[pos + stride]) = mult2x2(twiddle_val[0][0], twiddle_val[0][1], twiddle_val[1][0], twiddle_val[1][1], input_val[0], input_val[1]); if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos] = s_input[pos]; output_a[idx+1][b][s][input_base_idx + pos + stride] = s_input[pos + stride]; } } } } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_intermediate_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; // __shared__ complex_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_input_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_input = (complex_t *)&s_input_storage[0]; // To avoid warning about race-condition when initializing complex_t // __shared__ complex_t s_twiddle[ELEMENTARY_SIZE][2][2]; __shared__ scalar_t s_twiddle_storage[ELEMENTARY_SIZE][2][2][2]; complex_t (* s_twiddle)[2][2] = (complex_t (*)[2][2])&s_twiddle_storage[0]; // To avoid warning about race-condition when initializing complex_t int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i] = complex_t(output_a[first_idx][b][s][input_base_idx + i][0], output_a[first_idx][b][s][input_base_idx + i][1]); } int i = threadIdx.x; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][0][0], twiddle_a[s][twiddle_start_idx + i][0][0][1]); s_twiddle[i][0][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][1][0], twiddle_a[s][twiddle_start_idx + i][0][1][1]); s_twiddle[i][1][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][0][0], twiddle_a[s][twiddle_start_idx + i][1][0][1]); s_twiddle[i][1][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][1][0], twiddle_a[s][twiddle_start_idx + i][1][1][1]); } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const complex_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read const complex_t input_val[2] = {s_input[pos], s_input[pos + stride]}; thrust::tie(s_input[pos], s_input[pos + stride]) = mult2x2(twiddle_val[0][0], twiddle_val[0][1], twiddle_val[1][0], twiddle_val[1][1], input_val[0], input_val[1]); if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos][0] = s_input[pos].real(); output_a[idx+1][b][s][input_base_idx + pos][1] = s_input[pos].imag(); output_a[idx+1][b][s][input_base_idx + pos + stride][0] = s_input[pos + stride].real(); output_a[idx+1][b][s][input_base_idx + pos + stride][1] = s_input[pos + stride].imag(); } } } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][0], twiddle_a[s][twiddle_idx][0][1]}, {twiddle_a[s][twiddle_idx][1][0], twiddle_a[s][twiddle_idx][1][1]}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; output_a[idx+1][b][s][pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; output_a[idx+1][b][s][pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][twiddle_idx][0][0][0], twiddle_a[s][twiddle_idx][0][0][1]), complex_t(twiddle_a[s][twiddle_idx][0][1][0], twiddle_a[s][twiddle_idx][0][1][1])}, {complex_t(twiddle_a[s][twiddle_idx][1][0][0], twiddle_a[s][twiddle_idx][1][0][1]), complex_t(twiddle_a[s][twiddle_idx][1][1][0], twiddle_a[s][twiddle_idx][1][1][1])}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; const complex_t output_val[2] = {twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1], twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]}; output_a[idx+1][b][s][pos][0] = output_val[0].real(); output_a[idx+1][b][s][pos][1] = output_val[0].imag(); output_a[idx+1][b][s][pos + stride][0] = output_val[1].real(); output_a[idx+1][b][s][pos + stride][1] = output_val[1].imag(); } } void butterfly_multiply_intermediate_cuda(const at::Tensor& twiddle, at::Tensor& output, bool increasing_stride, bool return_intermediates) { const int batch_size = output.size(1); const int nstack = twiddle.size(0); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_intermediate_cuda", [&] { if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_intermediate_cuda_kernel<scalar_t, true, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_intermediate_cuda_kernel<scalar_t, true, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_onestep_cuda_kernel<scalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_onestep_cuda_kernel<scalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_intermediate_cuda_kernel<scalar_t, false, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_intermediate_cuda_kernel<scalar_t, false, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); auto output_a = output.packed_accessor64<scalar_t, 5>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, true, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, true, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_onestep_complex_cuda_kernel<scalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_onestep_complex_cuda_kernel<scalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, false, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, false, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_multiply_intermediate_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle // __shared__ scalar_t s_d_twiddle[ELEMENTARY_SIZE * 4]; // accscalar_t (* s_d_twiddle)[2][2] = (accscalar_t (*)[2][2])&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i] = d_input_a[b][s][input_base_idx + i]; } int i = threadIdx.x; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = twiddle_a[s][twiddle_start_idx + i][0][0]; s_twiddle[i][0][1] = twiddle_a[s][twiddle_start_idx + i][0][1]; s_twiddle[i][1][0] = twiddle_a[s][twiddle_start_idx + i][1][0]; s_twiddle[i][1][1] = twiddle_a[s][twiddle_start_idx + i][1][1]; } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos], output_a[idx][b][s][input_base_idx + pos + stride]}; accscalar_t d_twiddle_val[2][2] = {{grad_val[0] * input_val[0], grad_val[0] * input_val[1]}, {grad_val[1] * input_val[0], grad_val[1] * input_val[1]}}; int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid); if (tid < stride) { atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0], s_d_twiddle[twiddle_idx]); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1], s_d_twiddle[twiddle_idx + stride]); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0], s_d_twiddle[twiddle_idx + 2 * stride]); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1], s_d_twiddle[twiddle_idx + 3 * stride]); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read // sum_strided_exchange(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, log_stride, nthreads, tid); // int block_reduction_stride = max(warpSize, stride); // // int n_block_reductions = div_up(nthreads, block_reduction_stride); // int n_block_reductions = (nthreads + block_reduction_stride - 1) >> max(5, log_stride); // // if ((tid < n_block_reductions * stride) && (tid % n_block_reductions == 0)) { // if ((tid < n_block_reductions * stride) && ((tid & (n_block_reductions - 1)) == 0)) { // // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + tid / n_block_reductions][0][0], d_twiddle_val[0][0]); // // Trying to avoid integer division // int log_n_block_reductions = log_max_stride - max(5, log_stride); // Use the fact that nthreads == max_stride and warpSize == 32 // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][0][0], d_twiddle_val[0][0]); // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][0][1], d_twiddle_val[0][1]); // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][1][0], d_twiddle_val[1][0]); // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][1][1], d_twiddle_val[1][1]); // } } for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i] = s_grad[i]; } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; // __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2][2]; __shared__ scalar_t s_grad_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_grad = (complex_t *)&s_grad_storage[0]; // To avoid warning about race-condition when initializing complex_t // __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle __shared__ accscalar_t s_twiddle_storage[ELEMENTARY_SIZE][2][2][2]; acccomplex_t (* s_twiddle)[2][2] = (acccomplex_t (*)[2][2])&s_twiddle_storage[0]; // To avoid warning about race-condition when initializing complex_t // __shared__ scalar_t s_d_twiddle[ELEMENTARY_SIZE * 4]; // acccomplex_t (* s_d_twiddle)[2][2] = (acccomplex_t (*)[2][2])&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. acccomplex_t* s_d_twiddle = (acccomplex_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i] = complex_t(d_input_a[b][s][input_base_idx + i][0], d_input_a[b][s][input_base_idx + i][1]); } int i = threadIdx.x; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][0][0], twiddle_a[s][twiddle_start_idx + i][0][0][1]); s_twiddle[i][0][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][1][0], twiddle_a[s][twiddle_start_idx + i][0][1][1]); s_twiddle[i][1][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][0][0], twiddle_a[s][twiddle_start_idx + i][1][0][1]); s_twiddle[i][1][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][1][0], twiddle_a[s][twiddle_start_idx + i][1][1][1]); } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const complex_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then const complex_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1]; s_grad[pos + stride] = thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]; const complex_t input_val[2] = {complex_t(output_a[idx][b][s][input_base_idx + pos][0], output_a[idx][b][s][input_base_idx + pos][1]), complex_t(output_a[idx][b][s][input_base_idx + pos + stride][0], output_a[idx][b][s][input_base_idx + pos + stride][1])}; acccomplex_t d_twiddle_val[2][2] = {{grad_val[0] * thrust::conj(input_val[0]), grad_val[0] * thrust::conj(input_val[1])}, {grad_val[1] * thrust::conj(input_val[0]), grad_val[1] * thrust::conj(input_val[1])}}; int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<acccomplex_t (&)[4]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid); if (tid < stride) { atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0][0], s_d_twiddle[twiddle_idx].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0][1], s_d_twiddle[twiddle_idx].imag()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1][0], s_d_twiddle[twiddle_idx + stride].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1][1], s_d_twiddle[twiddle_idx + stride].imag()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0][0], s_d_twiddle[twiddle_idx + 2 * stride].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0][1], s_d_twiddle[twiddle_idx + 2 * stride].imag()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1][0], s_d_twiddle[twiddle_idx + 3 * stride].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1][1], s_d_twiddle[twiddle_idx + 3 * stride].imag()); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read } for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i][0] = s_grad[i].real(); d_input_a[b][s][input_base_idx + i][1] = s_grad[i].imag(); } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][0], twiddle_a[s][twiddle_idx][0][1]}, {twiddle_a[s][twiddle_idx][1][0], twiddle_a[s][twiddle_idx][1][1]}}; accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t grad_val[2] = {d_input_a[b][s][pos], d_input_a[b][s][pos + stride]}; d_input_a[b][s][pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; d_input_a[b][s][pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; d_twiddle_val[0][0] += grad_val[0] * input_val[0]; d_twiddle_val[0][1] += grad_val[0] * input_val[1]; d_twiddle_val[1][0] += grad_val[1] * input_val[0]; d_twiddle_val[1][1] += grad_val[1] * input_val[1]; } atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1], d_twiddle_val[1][1]); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][twiddle_idx][0][0][0], twiddle_a[s][twiddle_idx][0][0][1]), complex_t(twiddle_a[s][twiddle_idx][0][1][0], twiddle_a[s][twiddle_idx][0][1][1])}, {complex_t(twiddle_a[s][twiddle_idx][1][0][0], twiddle_a[s][twiddle_idx][1][0][1]), complex_t(twiddle_a[s][twiddle_idx][1][1][0], twiddle_a[s][twiddle_idx][1][1][1])}}; acccomplex_t d_twiddle_val[2][2] = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const complex_t grad_val[2] = {complex_t(d_input_a[b][s][pos][0], d_input_a[b][s][pos][1]), complex_t(d_input_a[b][s][pos + stride][0], d_input_a[b][s][pos + stride][1])}; const complex_t d_input_val[2] = {thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1], thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]}; d_input_a[b][s][pos][0] = d_input_val[0].real(); d_input_a[b][s][pos][1] = d_input_val[0].imag(); d_input_a[b][s][pos + stride][0] = d_input_val[1].real(); d_input_a[b][s][pos + stride][1] = d_input_val[1].imag(); const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; d_twiddle_val[0][0] += grad_val[0] * thrust::conj(input_val[0]); d_twiddle_val[0][1] += grad_val[0] * thrust::conj(input_val[1]); d_twiddle_val[1][0] += grad_val[1] * thrust::conj(input_val[0]); d_twiddle_val[1][1] += grad_val[1] * thrust::conj(input_val[1]); } atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0][0], d_twiddle_val[0][0].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0][1], d_twiddle_val[0][0].imag()); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1][0], d_twiddle_val[0][1].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1][1], d_twiddle_val[0][1].imag()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0][0], d_twiddle_val[1][0].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0][1], d_twiddle_val[1][0].imag()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1][0], d_twiddle_val[1][1].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1][1], d_twiddle_val[1][1].imag()); } void butterfly_multiply_intermediate_backward_cuda(const at::Tensor& twiddle, const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { const int batch_size = output.size(1); const int nstack = output.size(2); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_intermediate_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); const auto output_a = output.packed_accessor64<scalar_t, 4>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 4>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_onestep_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_onestep_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); const auto output_a = output.packed_accessor64<scalar_t, 5>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_complex_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_complex_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); hipLaunchKernelGGL(( butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_multiply_intermediate_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_untied_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; int b = blockIdx.x * blockDim.y + threadIdx.y; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * max_stride * 2] = output_a[first_idx][b][s][input_base_idx + i]; } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos_x] = s_input[pos]; output_a[idx+1][b][s][input_base_idx + pos_x + stride] = s_input[pos + stride]; } } } } // Trying out an implementation where consecutive threads process same input index, but different batch indices. // template <typename scalar_t, bool increasing_stride, bool return_intermediates> // __global__ void butterfly_multiply_untied_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, // at::PackedTensorAccessor64<scalar_t, 4> output_a, // int log_max_stride, // int log_n) { // const int batch_size = output_a.size(1); // const int s = blockIdx.z; // const int max_stride = 1 << log_max_stride; // const int input_base_idx = blockIdx.y * blockDim.y * 2; // __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; // __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // int b = blockIdx.x * blockDim.x + threadIdx.x; // int tid_x = threadIdx.x; // batch index // int tid_y = threadIdx.y; // int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; // if (b < batch_size) { // for (int i = tid_y; i < max_stride * 2; i += blockDim.y) { // s_input[tid_x + i * blockDim.x] = output_a[first_idx][b][s][input_base_idx + i]; // } // } // // for (int i = tid_x + tid_y * blockDim.x; i < blockDim.x * max_stride * 2; i += blockDim.x * blockDim.y) { // // int input_idx = i & (max_stride * 2 - 1); // int input_idx = i % (max_stride * 2); // // int batch_idx = i >> (log_max_stride + 1); // int batch_idx = (i - input_idx) / (max_stride * 2); // // if (blockIdx.x * blockDim.x + batch_idx < batch_size) { // // s_input[batch_idx + input_idx * blockDim.x] = output_a[blockIdx.x * blockDim.x + first_idx][batch_idx][s][input_base_idx + input_idx]; // // } // // } // for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { // int log_stride = increasing_stride ? idx : log_n - 1 - idx; // int stride = 1 << log_stride; // if (tid_x == 0) { // s_twiddle[tid_y][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][0][0]; // s_twiddle[tid_y][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][0][1]; // s_twiddle[tid_y][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][1][0]; // s_twiddle[tid_y][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][1][1]; // } // int low_order_bits = tid_y & (stride - 1); // int low_order_bits = tid_y % stride; // int pos_y = 2 * (tid_y - low_order_bits) + low_order_bits; // int pos_x = tid_x; // int pos = pos_x + pos_y * blockDim.x; // __syncthreads(); // const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_y][0][0], s_twiddle[tid_y][0][1]}, // {s_twiddle[tid_y][1][0], s_twiddle[tid_y][1][1]}}; // __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read // if (b < batch_size) { // const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride * blockDim.x]}; // s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; // s_input[pos + stride * blockDim.x] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; // if (return_intermediates || idx == first_idx + log_max_stride) { // output_a[idx+1][b][s][input_base_idx + pos_y] = s_input[pos]; // output_a[idx+1][b][s][input_base_idx + pos_y + stride] = s_input[pos + stride * blockDim.x]; // } // } // } // } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_untied_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; // __shared__ complex_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_input_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_input = (complex_t *)&s_input_storage[0]; // To avoid warning about race-condition when initializing complex_t int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i] = complex_t(output_a[first_idx][b][s][input_base_idx + i][0], output_a[first_idx][b][s][input_base_idx + i][1]); } int i = threadIdx.x; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1])}}; __syncthreads(); const complex_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos][0] = s_input[pos].real(); output_a[idx+1][b][s][input_base_idx + pos][1] = s_input[pos].imag(); output_a[idx+1][b][s][input_base_idx + pos + stride][0] = s_input[pos + stride].real(); output_a[idx+1][b][s][input_base_idx + pos + stride][1] = s_input[pos + stride].imag(); } } } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][i][0][0], twiddle_a[s][log_stride][i][0][1]}, {twiddle_a[s][log_stride][i][1][0], twiddle_a[s][log_stride][i][1][1]}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; output_a[idx+1][b][s][pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; output_a[idx+1][b][s][pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][i][0][0][0], twiddle_a[s][log_stride][i][0][0][1]), complex_t(twiddle_a[s][log_stride][i][0][1][0], twiddle_a[s][log_stride][i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][i][1][0][0], twiddle_a[s][log_stride][i][1][0][1]), complex_t(twiddle_a[s][log_stride][i][1][1][0], twiddle_a[s][log_stride][i][1][1][1])}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; const complex_t output_val[2] = {twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1], twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]}; output_a[idx+1][b][s][pos][0] = output_val[0].real(); output_a[idx+1][b][s][pos][1] = output_val[0].imag(); output_a[idx+1][b][s][pos + stride][0] = output_val[1].real(); output_a[idx+1][b][s][pos + stride][1] = output_val[1].imag(); } } void butterfly_multiply_untied_cuda(const at::Tensor& twiddle, at::Tensor& output, bool increasing_stride, bool return_intermediates) { const int batch_size = output.size(1); const int nstack = twiddle.size(0); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_untied_cuda", [&] { if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(div_up(MAX_BLOCK_SIZE, stride * 2), stride); // dim3 grid(div_up(batch_size, block.x), div_up(n / 2, stride), nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_untied_cuda_kernel<scalar_t, true, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_untied_cuda_kernel<scalar_t, true, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_onestep_cuda_kernel<scalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_onestep_cuda_kernel<scalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(div_up(MAX_BLOCK_SIZE, stride * 2), stride); // dim3 grid(div_up(batch_size, block.x), div_up(n / 2, stride), nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_untied_cuda_kernel<scalar_t, false, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_untied_cuda_kernel<scalar_t, false, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 6>(); auto output_a = output.packed_accessor64<scalar_t, 5>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_untied_complex_cuda_kernel<scalar_t, true, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_untied_complex_cuda_kernel<scalar_t, true, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_onestep_complex_cuda_kernel<scalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_onestep_complex_cuda_kernel<scalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); return_intermediates ?hipLaunchKernelGGL(( butterfly_multiply_untied_complex_cuda_kernel<scalar_t, false, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n) :hipLaunchKernelGGL(( butterfly_multiply_untied_complex_cuda_kernel<scalar_t, false, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, log_stride, log_n); } } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_multiply_untied_cuda failed with error code ", hipGetLastError()); } // Original implementation, with 1 batch per thread block // template <typename scalar_t, typename accscalar_t, bool increasing_stride> // __global__ void butterfly_multiply_untied_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, // const at::PackedTensorAccessor64<scalar_t, 4> output_a, // at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, // at::PackedTensorAccessor64<scalar_t, 3> d_input_a, // int log_max_stride, // int log_n) { // const int batch_size = output_a.size(1); // const int s = blockIdx.z; // const int max_stride = 1 << log_max_stride; // const int input_base_idx = blockIdx.y * blockDim.x * 2; // __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; // int b = blockIdx.x * blockDim.y + threadIdx.y; // if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) // for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { // s_grad[i] = d_input_a[b][s][input_base_idx + i]; // } // int i = threadIdx.x; // int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; // for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { // int log_stride = increasing_stride ? idx : log_n - 1 - idx; // int stride = 1 << log_stride; // int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; // int pos = 2 * (i - low_order_bits) + low_order_bits; // const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1]}, // {twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1]}}; // __syncthreads(); // const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; // s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; // s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; // const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos], output_a[idx][b][s][input_base_idx + pos + stride]}; // accscalar_t d_twiddle_val[2][2] = {{grad_val[0] * input_val[0], grad_val[0] * input_val[1]}, // {grad_val[1] * input_val[0], grad_val[1] * input_val[1]}}; // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0], d_twiddle_val[0][0]); // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1], d_twiddle_val[0][1]); // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0], d_twiddle_val[1][0]); // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1], d_twiddle_val[1][1]); // } // __syncthreads(); // for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { // d_input_a[b][s][input_base_idx + i] = s_grad[i]; // } // } // } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * max_stride * 2] = d_input_a[b][s][input_base_idx + i]; } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos_x], output_a[idx][b][s][input_base_idx + pos_x + stride]}; d_twiddle_val[0][0] = grad_val[0] * input_val[0]; d_twiddle_val[0][1] = grad_val[0] * input_val[1]; d_twiddle_val[1][0] = grad_val[1] * input_val[0]; d_twiddle_val[1][1] = grad_val[1] * input_val[1]; } int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, max_stride, nthreads, tid); if (tid_y == 0) { atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], s_d_twiddle[tid_x]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], s_d_twiddle[tid_x + max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], s_d_twiddle[tid_x + 2 * max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], s_d_twiddle[tid_x + 3 * max_stride]); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read } if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i] = s_grad[i + threadIdx.y * max_stride * 2]; } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 6> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; // __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2][2]; __shared__ scalar_t s_grad_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_grad = (complex_t *)&s_grad_storage[0]; // To avoid warning about race-condition when initializing complex_t int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i] = complex_t(d_input_a[b][s][input_base_idx + i][0], d_input_a[b][s][input_base_idx + i][1]); } int i = threadIdx.x; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1])}}; __syncthreads(); const complex_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1]; s_grad[pos + stride] = thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]; const complex_t input_val[2] = {complex_t(output_a[idx][b][s][input_base_idx + pos][0], output_a[idx][b][s][input_base_idx + pos][1]), complex_t(output_a[idx][b][s][input_base_idx + pos + stride][0], output_a[idx][b][s][input_base_idx + pos + stride][1])}; acccomplex_t d_twiddle_val[2][2] = {{grad_val[0] * thrust::conj(input_val[0]), grad_val[0] * thrust::conj(input_val[1])}, {grad_val[1] * thrust::conj(input_val[0]), grad_val[1] * thrust::conj(input_val[1])}}; atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], d_twiddle_val[0][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1], d_twiddle_val[0][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], d_twiddle_val[0][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1], d_twiddle_val[0][1].imag()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], d_twiddle_val[1][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1], d_twiddle_val[1][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], d_twiddle_val[1][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1], d_twiddle_val[1][1].imag()); } __syncthreads(); for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i][0] = s_grad[i].real(); d_input_a[b][s][input_base_idx + i][1] = s_grad[i].imag(); } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][i][0][0], twiddle_a[s][log_stride][i][0][1]}, {twiddle_a[s][log_stride][i][1][0], twiddle_a[s][log_stride][i][1][1]}}; accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const scalar_t grad_val[2] = {d_input_a[b][s][pos], d_input_a[b][s][pos + stride]}; d_input_a[b][s][pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; d_input_a[b][s][pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; d_twiddle_val[0][0] += grad_val[0] * input_val[0]; d_twiddle_val[0][1] += grad_val[0] * input_val[1]; d_twiddle_val[1][0] += grad_val[1] * input_val[0]; d_twiddle_val[1][1] += grad_val[1] * input_val[1]; } atomicAdd(&d_twiddle_a[s][log_stride][i][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][log_stride][i][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][log_stride][i][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][log_stride][i][1][1], d_twiddle_val[1][1]); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 6> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][i][0][0][0], twiddle_a[s][log_stride][i][0][0][1]), complex_t(twiddle_a[s][log_stride][i][0][1][0], twiddle_a[s][log_stride][i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][i][1][0][0], twiddle_a[s][log_stride][i][1][0][1]), complex_t(twiddle_a[s][log_stride][i][1][1][0], twiddle_a[s][log_stride][i][1][1][1])}}; acccomplex_t d_twiddle_val[2][2] = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const complex_t grad_val[2] = {complex_t(d_input_a[b][s][pos][0], d_input_a[b][s][pos][1]), complex_t(d_input_a[b][s][pos + stride][0], d_input_a[b][s][pos + stride][1])}; const complex_t d_input_val[2] = {thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1], thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]}; d_input_a[b][s][pos][0] = d_input_val[0].real(); d_input_a[b][s][pos][1] = d_input_val[0].imag(); d_input_a[b][s][pos + stride][0] = d_input_val[1].real(); d_input_a[b][s][pos + stride][1] = d_input_val[1].imag(); const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; d_twiddle_val[0][0] += grad_val[0] * thrust::conj(input_val[0]); d_twiddle_val[0][1] += grad_val[0] * thrust::conj(input_val[1]); d_twiddle_val[1][0] += grad_val[1] * thrust::conj(input_val[0]); d_twiddle_val[1][1] += grad_val[1] * thrust::conj(input_val[1]); } atomicAdd(&d_twiddle_a[s][log_stride][i][0][0][0], d_twiddle_val[0][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][0][0][1], d_twiddle_val[0][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][i][0][1][0], d_twiddle_val[0][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][0][1][1], d_twiddle_val[0][1].imag()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][0][0], d_twiddle_val[1][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][0][1], d_twiddle_val[1][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][1][0], d_twiddle_val[1][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][1][1], d_twiddle_val[1][1].imag()); } void butterfly_multiply_untied_backward_cuda(const at::Tensor& twiddle, const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { const int batch_size = output.size(1); const int nstack = output.size(2); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_untied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); const auto output_a = output.packed_accessor64<scalar_t, 4>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_onestep_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(stride); // dim3 grid(batch_size, div_up(n / 2, stride), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(stride); // dim3 grid(batch_size, div_up(n / 2, stride), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_onestep_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 6>(); const auto output_a = output.packed_accessor64<scalar_t, 5>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 6>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_complex_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_complex_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); hipLaunchKernelGGL(( butterfly_multiply_untied_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_multiply_untied_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride, int log_max_stride, typename Function0, typename Function1, typename Function2> __global__ void butterfly_multiply_untied_forward_backward_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a, Function0 load_input, Function1 load_grad, CudaAcsr32<scalar_t, 5> d_twiddle_a, Function2 save_d_input, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Forward pass to compute the intermediate values scalar_t input_val_storage[MAX_N_FACTORS][2]; // Storing inputs for backward pass load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; #pragma unroll for (int idx = 0; idx <= log_max_stride; ++idx) { // Let's not skip steps for now int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; input_val_storage[idx][0] = input_val[0]; input_val_storage[idx][1] = input_val[1]; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read // or s_s_input will be overwritten with s_grad before some thread can read } // Backward pass scalar_t* s_grad = &s_input[0]; // Reusing the same storage as s_input __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE][2][2]; load_grad(s_grad); #pragma unroll for (int idx = log_max_stride; idx >= 0; --idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {input_val_storage[idx][0], input_val_storage[idx][1]}; s_d_twiddle[tid_x + tid_y * max_stride][0][0] = grad_val[0] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][0][1] = grad_val[0] * input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride][1][0] = grad_val[1] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][1][1] = grad_val[1] * input_val[1]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val[0][0] += s_d_twiddle[tid_x + i * max_stride][0][0]; d_twiddle_val[0][1] += s_d_twiddle[tid_x + i * max_stride][0][1]; d_twiddle_val[1][0] += s_d_twiddle[tid_x + i * max_stride][1][0]; d_twiddle_val[1][1] += s_d_twiddle[tid_x + i * max_stride][1][1]; } } atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], d_twiddle_val[1][1]); } } save_d_input(s_grad); } void butterfly_multiply_untied_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; switch (log_stride) { case 0: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 0>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 0>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 1: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 1>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 1>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 2: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 2>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 2>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 3: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 3>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 3>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 4: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 4>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 4>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 5: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 5>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 5>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 6: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 6>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 6>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 7: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 7>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 7>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 8: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 8>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 8>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 9: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 9>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 9>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_multiply_untied_forward_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, bool increasing_stride, typename Function0, typename Function1> __global__ void butterfly_ortho_multiply_tied_cuda_kernel(const CudaAcsr32<scalar_t, 2> twiddle_cos_a, const CudaAcsr32<scalar_t, 2> twiddle_sin_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = 0; idx < (log_max_stride + 1); ++idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if ((tid_y == 0) && (tid_x < stride)) { s_twiddle[tid_x][0] = twiddle_cos_a[s][twiddle_start_idx + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][twiddle_start_idx + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int twiddle_idx = low_order_bits; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[twiddle_idx][0], s_twiddle[twiddle_idx][1]}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1]; s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } save_output(s_input); } void butterfly_ortho_multiply_tied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_tied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; increasing_stride ?hipLaunchKernelGGL(( butterfly_ortho_multiply_tied_cuda_kernel<scalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size) :hipLaunchKernelGGL(( butterfly_ortho_multiply_tied_cuda_kernel<scalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_ortho_multiply_tied_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride, typename Function0, typename Function1, typename Function2> __global__ void butterfly_ortho_multiply_tied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 2> twiddle_cos_a, const CudaAcsr32<scalar_t, 2> twiddle_sin_a, Function0 load_output, Function1 load_grad, CudaAcsr32<scalar_t, 2> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; __shared__ scalar_t s_output[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE]; int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; load_output(s_output); load_grad(s_grad); for (int idx = log_max_stride; idx >= 0; --idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if ((tid_y == blockDim.y - 1) && (tid_x < stride)) { s_twiddle[tid_x][0] = twiddle_cos_a[s][twiddle_start_idx + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][twiddle_start_idx + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int twiddle_idx = low_order_bits; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[twiddle_idx][0], s_twiddle[twiddle_idx][1]}; scalar_t d_twiddle_val[1] = {0}; // Idk, to be consistent with sum_strided's interface if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1]; s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1]; const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]}; const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1], -twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]}; s_output[pos] = input_val[0]; s_output[pos + stride] = input_val[1]; d_twiddle_val[0] = (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1]) + (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0]; } int tid = tid_x + tid_y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[1]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid); if ((tid_y == 0) && (tid_x < stride)) { atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx], s_d_twiddle[twiddle_idx]); } } save_d_input(s_grad); } void butterfly_ortho_multiply_tied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_tied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; increasing_stride ?hipLaunchKernelGGL(( butterfly_ortho_multiply_tied_backward_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size) :hipLaunchKernelGGL(( butterfly_ortho_multiply_tied_backward_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_ortho_multiply_tied_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, bool increasing_stride, typename Function0, typename Function1> __global__ void butterfly_ortho_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = 0; idx < (log_max_stride + 1); ++idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0] = twiddle_cos_a[s][log_stride][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][log_stride][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1]; s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } save_output(s_input); } void butterfly_ortho_multiply_untied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_untied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; increasing_stride ?hipLaunchKernelGGL(( butterfly_ortho_multiply_untied_cuda_kernel<scalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size) :hipLaunchKernelGGL(( butterfly_ortho_multiply_untied_cuda_kernel<scalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_ortho_multiply_untied_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride, typename Function0, typename Function1, typename Function2> __global__ void butterfly_ortho_multiply_untied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_output, Function1 load_grad, CudaAcsr32<scalar_t, 3> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_output[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE]; int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; load_output(s_output); load_grad(s_grad); for (int idx = log_max_stride; idx >= 0; --idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0] = twiddle_cos_a[s][log_stride][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][log_stride][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1]; s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1]; const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]}; const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1], -twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]}; s_output[pos] = input_val[0]; s_output[pos + stride] = input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride] = (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1]) + (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val = 0; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val += s_d_twiddle[tid_x + i * max_stride]; } } atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x], d_twiddle_val); } } save_d_input(s_grad); } void butterfly_ortho_multiply_untied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_untied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; increasing_stride ?hipLaunchKernelGGL(( butterfly_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size) :hipLaunchKernelGGL(( butterfly_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_ortho_multiply_untied_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename Function0, typename Function1> __global__ void bbt_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size, int nblocks) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int block = 0; block < nblocks; ++block) { for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } } save_output(s_input); } void bbt_multiply_untied_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_multiply_untied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; hipLaunchKernelGGL(( bbt_multiply_untied_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, save_output, log_stride, batch_size, nblocks); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "bbt_multiply_untied_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, int nblocks, typename Function0, typename Function1, typename Function2> __global__ void bbt_multiply_untied_forward_backward_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a, Function0 load_input, Function1 load_grad, CudaAcsr32<scalar_t, 5> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Forward pass to compute the intermediate values scalar_t input_val_storage[nblocks * 2 * MAX_N_FACTORS][2]; // Storing inputs for backward pass load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int block = 0; block < nblocks; ++block) { for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { // Let's not skip steps for now int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; input_val_storage[idx + block * 2 * (log_max_stride + 1)][0] = input_val[0]; input_val_storage[idx + block * 2 * (log_max_stride + 1)][1] = input_val[1]; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read // or s_s_input will be overwritten with s_grad before some thread can read } } // Backward pass scalar_t* s_grad = &s_input[0]; // Reusing the same storage as s_input __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE][2][2]; load_grad(s_grad); for (int block = nblocks - 1; block >= 0; --block) { for (int idx = 2 * (log_max_stride + 1) - 1; idx >= 0; --idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {input_val_storage[idx + block * 2 * (log_max_stride + 1)][0], input_val_storage[idx + block * 2 * (log_max_stride + 1)][1]}; s_d_twiddle[tid_x + tid_y * max_stride][0][0] = grad_val[0] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][0][1] = grad_val[0] * input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride][1][0] = grad_val[1] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][1][1] = grad_val[1] * input_val[1]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val[0][0] += s_d_twiddle[tid_x + i * max_stride][0][0]; d_twiddle_val[0][1] += s_d_twiddle[tid_x + i * max_stride][0][1]; d_twiddle_val[1][0] += s_d_twiddle[tid_x + i * max_stride][1][0]; d_twiddle_val[1][1] += s_d_twiddle[tid_x + i * max_stride][1][1]; } } atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1], d_twiddle_val[1][1]); } } } save_d_input(s_grad); } void bbt_multiply_untied_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_multiply_untied_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; switch (nblocks) { case 1: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 1>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 2: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 2>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 3: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 3>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 4: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 4>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 5: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 5>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 6: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 6>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 7: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 7>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 8: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 8>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 9: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 9>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 10: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 10>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 11: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 11>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 12: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 12>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 13: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 13>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 14: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 14>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "bbt_multiply_untied_forward_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename Function0, typename Function1> __global__ void bbt_ortho_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size, int nblocks) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int block = 0; block < nblocks; ++block) { for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0] = twiddle_cos_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1]; s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } } save_output(s_input); } void bbt_ortho_multiply_untied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_ortho_multiply_untied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; hipLaunchKernelGGL(( bbt_ortho_multiply_untied_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size, nblocks); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "bbt_ortho_multiply_untied_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, typename Function0, typename Function1, typename Function2> __global__ void bbt_ortho_multiply_untied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_output, Function1 load_grad, CudaAcsr32<scalar_t, 3> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size, int nblocks) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_output[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE]; int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; load_output(s_output); load_grad(s_grad); for (int block = nblocks - 1; block >= 0; --block) { for (int idx = 2 * (log_max_stride + 1) - 1; idx >= 0; --idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0] = twiddle_cos_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1]; s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1]; const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]}; const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1], -twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]}; s_output[pos] = input_val[0]; s_output[pos + stride] = input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride] = (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1]) + (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val = 0; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val += s_d_twiddle[tid_x + i * max_stride]; } } atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x], d_twiddle_val); } } } save_d_input(s_grad); } void bbt_ortho_multiply_untied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "bbt_ortho_multiply_untied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; hipLaunchKernelGGL(( bbt_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size, nblocks); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "bbt_ortho_multiply_untied_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_conv2d_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_max_stride, int log_n, int kernel_size, int padding, int h_out, int w_out) { const int batch_size = output_a.size(1); const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; const int max_stride = 1 << log_max_stride; // base index always 0 const int input_base_idx = 0; const int h_in = input_a.size(2); const int w_in = input_a.size(3); __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; int b = blockIdx.x * blockDim.y + threadIdx.y; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; if (b < batch_size) { for (int t = threadIdx.x; t < max_stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * max_stride * 2] = 0; else{ s_input[t + threadIdx.y * max_stride * 2] = input_a[batch_idx][input_base_idx + t][i][j]; // load input into first idx of output for backward pass // we allocated this memory already so shouldn't affect too much output_a[0][b][s][input_base_idx + t] = s_input[t + threadIdx.y * max_stride * 2]; } } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos_x] = s_input[pos]; output_a[idx+1][b][s][input_base_idx + pos_x + stride] = s_input[pos + stride]; } } } } void butterfly_conv2d_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output, const int kernel_size, const int padding, const int h_out, const int w_out, bool increasing_stride, bool return_intermediates) { const int b_in = input.size(0); const int n = input.size(1); /*c*/ const int nstack = twiddle.size(0); const int stack = kernel_size*kernel_size; const int log_n = int(log2((double) n)); const int batch_size = output.size(1); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_conv2d_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); // batch_size, c, h, w const auto input_a = input.packed_accessor64<scalar_t, 4>(); // log c_in, h*w*batch_size, nstack, c_in auto output_a = output.packed_accessor64<scalar_t, 4>(); // assume in_channels <= 1024 int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); // to support out_channels > in_channels int c_out_ratio = nstack / stack; // dim3 block(stride); // dim3 grid(batch_size, c_out_ratio, stack); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); if (increasing_stride) { return_intermediates ?hipLaunchKernelGGL(( butterfly_conv2d_cuda_kernel<scalar_t, true, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out) :hipLaunchKernelGGL(( butterfly_conv2d_cuda_kernel<scalar_t, true, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out); } else { return_intermediates ?hipLaunchKernelGGL(( butterfly_conv2d_cuda_kernel<scalar_t, false, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out) :hipLaunchKernelGGL(( butterfly_conv2d_cuda_kernel<scalar_t, false, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_conv2d_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_conv2d_backward_cuda_kernel( const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_max_stride, int log_n, int kernel_size, int padding, int h_out, int w_out) { const int batch_size = output_a.size(1); const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; // base index always 0 const int input_base_idx = 0; const int h_in = d_input_a.size(2); const int w_in = d_input_a.size(3); const int max_stride = 1 << log_max_stride; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * max_stride * 2] = grad_a[b][s][input_base_idx + i]; } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos_x], output_a[idx][b][s][input_base_idx + pos_x + stride]}; d_twiddle_val[0][0] = grad_val[0] * input_val[0]; d_twiddle_val[0][1] = grad_val[0] * input_val[1]; d_twiddle_val[1][0] = grad_val[1] * input_val[0]; d_twiddle_val[1][1] = grad_val[1] * input_val[1]; } int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, max_stride, nthreads, tid); if (tid_y == 0) { atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], s_d_twiddle[tid_x]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], s_d_twiddle[tid_x + max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], s_d_twiddle[tid_x + 2 * max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], s_d_twiddle[tid_x + 3 * max_stride]); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read } if (b < batch_size) { const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); for (int t = threadIdx.x; t < max_stride * 2; t += blockDim.x) { // map back to b, c, h, w // get index into patch int k_i = stack / kernel_size; // stack / kernel_size int k_j = stack % kernel_size; // stack % kernel_size // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; // this needs to be atomic because input is reused in forward pass // with out_channels > in_channels and for each entry of the patch if (i < w_in && j < h_in && i >= 0 && j >= 0) { atomicAdd(&d_input_a[batch_idx][input_base_idx + t][i][j], s_grad[t + threadIdx.y * max_stride * 2]); } } } } void butterfly_conv2d_backward_cuda(const at::Tensor&grad, const at::Tensor& twiddle, const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input, const int kernel_size, const int padding, const int h_out, const int w_out, bool increasing_stride) { const int batch_size = output.size(1); const int nstack = twiddle.size(0); const int stack = kernel_size*kernel_size; const int n = d_input.size(1); // c_in const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_conv2d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto grad_a = grad.packed_accessor64<scalar_t, 3>(); const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); const auto output_a = output.packed_accessor64<scalar_t, 4>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); // to support out_channels > in_channels int c_out_ratio = nstack / stack; // dim3 block(stride); // dim3 grid(batch_size, c_out_ratio, stack); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); increasing_stride ? hipLaunchKernelGGL(( butterfly_conv2d_backward_cuda_kernel<scalar_t, accscalar_t, true>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n, kernel_size, padding, h_out, w_out) : hipLaunchKernelGGL(( butterfly_conv2d_backward_cuda_kernel<scalar_t, accscalar_t, false>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n, kernel_size, padding, h_out, w_out); }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_conv2d_backward_cuda failed with error code ", hipGetLastError()); } void butterfly_conv2d_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor&grad, at::Tensor& d_twiddle, at::Tensor& d_input, const int kernel_size, const int padding, const int h_out, const int w_out, bool increasing_stride) { const int batch_size = grad.size(0); // b_out = b_in * h_out * w_out const int nstack = twiddle.size(0); const int stack = kernel_size * kernel_size; const int n = d_input.size(1); // c_in const int log_n = int(log2((double) n)); const int c_out_ratio = nstack / stack; const int h_in = input.size(2); const int w_in = input.size(3); AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "butterfly_conv2d_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0; else{ s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j]; } } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // map back to b, c, h, w // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i < w_in && j < h_in && i >= 0 && j >= 0) { atomicAdd(&d_input_a[batch_idx][t][i][j], s_grad[t + threadIdx.y * stride * 2]); } } } }; switch (log_stride) { case 0: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 0>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 0>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 1: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 1>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 1>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 2: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 2>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 2>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 3: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 3>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 3>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 4: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 4>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 4>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 5: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 5>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 5>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 6: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 6>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 6>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 7: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 7>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 7>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 8: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 8>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 8>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 9: increasing_stride ?hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 9>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) :hipLaunchKernelGGL(( butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 9>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "butterfly_conv2d_forward_backward_cuda failed with error code ", hipGetLastError()); } void bbt_conv2d_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output, const int kernel_size, const int padding, const int h_out, const int w_out) { const int b_in = input.size(0); const int n = input.size(1); /*c*/ const int nstack = twiddle.size(0); const int stack = kernel_size*kernel_size; const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); int batch_size = output.size(0); const int h_in = input.size(2); const int w_in = input.size(3); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "bbt_conv2d_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); // batch_size, c, h, w const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); // h*w*batch_size, nstack, c_in auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); // assume in_channels <= 1024 int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); // to support out_channels > in_channels int c_out_ratio = nstack / stack; // dim3 block(stride); // dim3 grid(batch_size, c_out_ratio, stack); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0; else{ s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j]; } } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; hipLaunchKernelGGL(( bbt_multiply_untied_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, save_output, log_stride, batch_size, nblocks) ; }); TORCH_CHECK(hipGetLastError() == hipSuccess, "bbt_conv2d_cuda failed with error code ", hipGetLastError()); } void bbt_conv2d_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor&grad, at::Tensor& d_twiddle, at::Tensor& d_input, const int kernel_size, const int padding, const int h_out, const int w_out) { int batch_size = grad.size(0); // b_out = b_in * h_out * w_out const int nstack = twiddle.size(0); const int stack = kernel_size * kernel_size; const int n = d_input.size(1); // c_in const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); const int c_out_ratio = nstack / stack; const int h_in = input.size(2); const int w_in = input.size(3); AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "bbt_conv2d_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0; else{ s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j]; } } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // map back to b, c, h, w // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i < w_in && j < h_in && i >= 0 && j >= 0) { atomicAdd(&d_input_a[batch_idx][t][i][j], s_grad[t + threadIdx.y * stride * 2]); } } } }; switch (nblocks) { case 1: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 1>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 2: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 2>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 3: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 3>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 4: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 4>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 5: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 5>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 6: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 6>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 7: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 7>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 8: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 8>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 9: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 9>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 10: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 10>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 11: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 11>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 12: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 12>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 13: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 13>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 14: hipLaunchKernelGGL(( bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 14>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "bbt_conv2d_forward_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, const at::PackedTensorAccessor64<scalar_t, 3> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 3> output_a) { const auto p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); // already divided by 2 for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { output_a[b][j][i] = (1 - p) * input_a[b][j][i] + p * permuted_input_a[b][j][i]; } } } } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, const at::PackedTensorAccessor64<scalar_t, 4> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a) { const auto p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { #pragma unroll for (int k = 0; k <= 1; ++k) { output_a[b][j][i][k] = (1 - p) * input_a[b][j][i][k] + p * permuted_input_a[b][j][i][k]; } } } } } void permutation_factor_even_odd_multiply_cuda(const at::Tensor& p, const at::Tensor& input, at::Tensor& output) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 2, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_even_odd_multiply", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); switch (input.dim()) { case 2: // real { const auto permuted_input = input.reshape({batch_size, n / 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2}); output = output.view({batch_size, 2, n / 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 3>(); auto output_a = output.packed_accessor64<scalar_t, 3>(); hipLaunchKernelGGL(( permutation_factor_even_odd_multiply_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), p_a, input_a, permuted_input_a, output_a); output = output.view({batch_size, n}); break; } case 3: // complex { const auto permuted_input = input.reshape({batch_size, n / 2, 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); output = output.view({batch_size, 2, n / 2, 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); hipLaunchKernelGGL(( permutation_factor_even_odd_multiply_complex_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), p_a, input_a, permuted_input_a, output_a); output = output.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_even_odd_multiply requires input dimension 2 or 3"); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "permutation_factor_even_odd_multiply_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 3> grad_reshaped_a, const at::PackedTensorAccessor64<scalar_t, 3> permuted_grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, const at::PackedTensorAccessor64<scalar_t, 3> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 2> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a) { const scalar_t p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { d_p_expanded_a[b][i] = (permuted_input_a[b][0][i] - input_a[b][0][i]) * grad_reshaped_a[b][0][i] + (permuted_input_a[b][1][i] - input_a[b][1][i]) * grad_reshaped_a[b][1][i]; d_input_a[b][i][0] = (1 - p) * grad_a[b][i][0] + p * permuted_grad_a[b][i][0]; d_input_a[b][i][1] = (1 - p) * grad_a[b][i][1] + p * permuted_grad_a[b][i][1]; } } } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a, const at::PackedTensorAccessor64<scalar_t, 4> grad_reshaped_a, const at::PackedTensorAccessor64<scalar_t, 4> permuted_grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, const at::PackedTensorAccessor64<scalar_t, 4> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 2> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a) { const scalar_t p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { d_p_expanded_a[b][i] = (permuted_input_a[b][0][i][0] - input_a[b][0][i][0]) * grad_reshaped_a[b][0][i][0] + (permuted_input_a[b][0][i][1] - input_a[b][0][i][1]) * grad_reshaped_a[b][0][i][1] + (permuted_input_a[b][1][i][0] - input_a[b][1][i][0]) * grad_reshaped_a[b][1][i][0] + (permuted_input_a[b][1][i][1] - input_a[b][1][i][1]) * grad_reshaped_a[b][1][i][1]; d_input_a[b][i][0][0] = (1 - p) * grad_a[b][i][0][0] + p * permuted_grad_a[b][i][0][0]; d_input_a[b][i][0][1] = (1 - p) * grad_a[b][i][0][1] + p * permuted_grad_a[b][i][0][1]; d_input_a[b][i][1][0] = (1 - p) * grad_a[b][i][1][0] + p * permuted_grad_a[b][i][1][0]; d_input_a[b][i][1][1] = (1 - p) * grad_a[b][i][1][1] + p * permuted_grad_a[b][i][1][1]; } } } void permutation_factor_even_odd_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& p, const at::Tensor& input, at::Tensor& d_p_expanded, at::Tensor& d_input) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 2, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_even_odd_multiply_backward", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); auto d_p_expanded_a = d_p_expanded.packed_accessor64<scalar_t, 2>(); switch (input.dim()) { case 2: // real { const auto permuted_input = input.reshape({batch_size, n / 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2}); const auto grad_reshaped = grad.reshape({batch_size, 2, n / 2}); const auto permuted_grad = grad.reshape({batch_size, 2, n / 2}).transpose(1, 2); const auto grad_folded = grad.reshape({batch_size, n / 2, 2}); d_input = d_input.view({batch_size, n/ 2, 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 3>(); const auto grad_reshaped_a = grad_reshaped.packed_accessor64<scalar_t, 3>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 3>(); const auto permuted_grad_a = permuted_grad.packed_accessor64<scalar_t, 3>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); hipLaunchKernelGGL(( permutation_factor_even_odd_multiply_backward_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, grad_reshaped_a, permuted_grad_a, p_a, input_a, permuted_input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n}); break; } case 3: // complex { const auto permuted_input = input.reshape({batch_size, n / 2, 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); const auto grad_reshaped = grad.reshape({batch_size, 2, n / 2, 2}); const auto permuted_grad = grad.reshape({batch_size, 2, n / 2, 2}).transpose(1, 2); const auto grad_folded = grad.reshape({batch_size, n / 2, 2, 2}); d_input = d_input.view({batch_size, n/ 2, 2, 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 4>(); const auto grad_reshaped_a = grad_reshaped.packed_accessor64<scalar_t, 4>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 4>(); const auto permuted_grad_a = permuted_grad.packed_accessor64<scalar_t, 4>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); hipLaunchKernelGGL(( permutation_factor_even_odd_multiply_complex_backward_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, grad_reshaped_a, permuted_grad_a, p_a, input_a, permuted_input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_even_odd_multiply_backward requires input dimension 2 or 3"); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "permutation_factor_even_odd_multiply_backward_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, at::PackedTensorAccessor64<scalar_t, 3> output_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); // already divided by 2 for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { const scalar_t in[2] = {input_a[b][j][i], input_a[b][j][n - 1 - i]}; output_a[b][j][i] = (1 - p[j]) * in[0] + p[j] * in[1]; output_a[b][j][n - 1 - i] = p[j] * in[0] + (1 - p[j]) * in[1]; } } } } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { #pragma unroll for (int k = 0; k <= 1; ++k) { const scalar_t in[2] = {input_a[b][j][i][k], input_a[b][j][n - 1 - i][k]}; output_a[b][j][i][k] = (1 - p[j]) * in[0] + p[j] * in[1]; output_a[b][j][n - 1 - i][k] = p[j] * in[0] + (1 - p[j]) * in[1]; } } } } } void permutation_factor_reverse_multiply_cuda(const at::Tensor& p, const at::Tensor& input, at::Tensor& output) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 4, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_reverse_multiply", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); switch (input.dim()) { case 2: // real { const auto input_folded = input.reshape({batch_size, 2, n / 2}); output = output.view({batch_size, 2, n / 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); auto output_a = output.packed_accessor64<scalar_t, 3>(); hipLaunchKernelGGL(( permutation_factor_reverse_multiply_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), p_a, input_a, output_a); output = output.view({batch_size, n}); break; } case 3: // complex { const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); output = output.view({batch_size, 2, n / 2, 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); hipLaunchKernelGGL(( permutation_factor_reverse_multiply_complex_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), p_a, input_a, output_a); output = output.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_reverse_multiply requires input dimension 2 or 3"); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "permutation_factor_reverse_multiply_cuda failed with error code ", hipGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, at::PackedTensorAccessor64<scalar_t, 3> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { const scalar_t in[2] = {input_a[b][j][i], input_a[b][j][n - 1 - i]}; const scalar_t g[2] = {grad_a[b][j][i], grad_a[b][j][n - 1 - i]}; d_p_expanded_a[j][b][i] = (in[1] - in[0]) * (g[0] - g[1]); d_input_a[b][j][i] = (1 - p[j]) * g[0] + p[j] * g[1]; d_input_a[b][j][n - 1 - i] = p[j] * g[0] + (1 - p[j]) * g[1]; } } } } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 3> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { scalar_t d_p_expanded_temp = 0; #pragma unroll for (int k = 0; k <= 1; ++k) { const scalar_t in[2] = {input_a[b][j][i][k], input_a[b][j][n - 1 - i][k]}; const scalar_t g[2] = {grad_a[b][j][i][k], grad_a[b][j][n - 1 - i][k]}; d_p_expanded_temp += (in[1] - in[0]) * (g[0] - g[1]); d_input_a[b][j][i][k] = (1 - p[j]) * g[0] + p[j] * g[1]; d_input_a[b][j][n - 1 - i][k] = p[j] * g[0] + (1 - p[j]) * g[1]; } d_p_expanded_a[j][b][i] = d_p_expanded_temp; } } } } void permutation_factor_reverse_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& p, const at::Tensor& input, at::Tensor& d_p_expanded, at::Tensor& d_input) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 4, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_reverse_multiply_backward", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); auto d_p_expanded_a = d_p_expanded.packed_accessor64<scalar_t, 3>(); switch (input.dim()) { case 2: // real { const auto input_folded = input.reshape({batch_size, 2, n / 2}); const auto grad_folded = grad.reshape({batch_size, 2, n / 2}); d_input = d_input.view({batch_size, 2, n/ 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 3>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); hipLaunchKernelGGL(( permutation_factor_reverse_multiply_backward_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, p_a, input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n}); break; } case 3: // complex { const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); const auto grad_folded = grad.reshape({batch_size, 2, n / 2, 2}); d_input = d_input.view({batch_size, 2, n/ 2, 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 4>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); hipLaunchKernelGGL(( permutation_factor_reverse_multiply_complex_backward_cuda_kernel<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_a, p_a, input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_reverse_multiply_backward requires input dimension 2 or 3"); } }); TORCH_CHECK(hipGetLastError() == hipSuccess, "permutation_factor_reverse_multiply_backward_cuda failed with error code ", hipGetLastError()); }
fc16aae4c0385d59c78efcf5cebb8adc50ab15ab.cu
#include <stdio.h> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THCAtomics.cuh> // For atomicAdd on Half #include <thrust/complex.h> #include <thrust/pair.h> #include <thrust/tuple.h> // For compatibility with Pytorch 1.1 #ifndef TORCH_CHECK #define TORCH_CHECK AT_CHECK #endif // #define thc_cos THCNumerics<scalar_t>::cos // #define thc_sin THCNumerics<scalar_t>::sin #define thc_cos std::cos #define thc_sin std::sin #define FULL_MASK 0xffffffff static constexpr int MAX_BLOCK_SIZE = 1024; static constexpr int WORK_PER_THREAD = 16; static constexpr int ELEMENTARY_SIZE = MAX_BLOCK_SIZE / 2; static constexpr int MAX_N_FACTORS = 10; template <typename T, size_t N> using CudaAcsr32 = at::PackedTensorAccessor32<T, N, at::RestrictPtrTraits>; __host__ __device__ static inline int64_t div_up(int64_t a, int64_t b) { return (a + b - 1) / b; } __host__ __device__ static inline int div_up(int a, int b) { return (a + b - 1) / b; } template <typename scalar_t> static __device__ __forceinline__ void atomicAdd(thrust::complex<scalar_t> *address, thrust::complex<scalar_t> val) { atomicAdd((scalar_t *)address, val.real()); atomicAdd((scalar_t *)address + 1, val.imag()); } template <typename scalar_t> static __device__ __forceinline__ thrust::complex<scalar_t> __shfl_down_sync(unsigned int mask, thrust::complex<scalar_t> value, unsigned int delta, int width = warpSize) { return thrust::complex<scalar_t>(__shfl_down_sync(mask, value.real(), delta, width), __shfl_down_sync(mask, value.imag(), delta, width)); } // 2x2 matrix [a, b; c, d] multiplied by a vector [x, y] template <typename scalar_t> static __device__ __forceinline__ thrust::pair<scalar_t, scalar_t> mult2x2(scalar_t a, scalar_t b, scalar_t c, scalar_t d, scalar_t x, scalar_t y) { return thrust::make_pair(a * x + b * y, c * x + d * y); } template <typename scalar_t> __global__ void butterfly_factor_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, at::PackedTensorAccessor64<scalar_t, 3> output_a) { const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2] = {{twiddle_a[0][0][i], twiddle_a[0][1][i]}, {twiddle_a[1][0][i], twiddle_a[1][1][i]}}; for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2] = {input_a[b][0][i], input_a[b][1][i]}; #pragma unroll for (int j = 0; j <= 1; ++j) { output_a[b][j][i] = twiddle_val[j][0] * input_val[0] + twiddle_val[j][1] * input_val[1]; } } } } template <typename scalar_t> __global__ void butterfly_factor_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a) { const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2][2] = {{{twiddle_a[0][0][i][0], twiddle_a[0][0][i][1]}, {twiddle_a[0][1][i][0], twiddle_a[0][1][i][1]}}, {{twiddle_a[1][0][i][0], twiddle_a[1][0][i][1]}, {twiddle_a[1][1][i][0], twiddle_a[1][1][i][1]}}}; for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2][2] = {{input_a[b][0][i][0], input_a[b][0][i][1]}, {input_a[b][1][i][0], input_a[b][1][i][1]}}; #pragma unroll for (int j = 0; j <= 1; ++j) { output_a[b][j][i][0] = twiddle_val[j][0][0] * input_val[0][0] - twiddle_val[j][0][1] * input_val[0][1] + twiddle_val[j][1][0] * input_val[1][0] - twiddle_val[j][1][1] * input_val[1][1]; output_a[b][j][i][1] = twiddle_val[j][0][0] * input_val[0][1] + twiddle_val[j][0][1] * input_val[0][0] + twiddle_val[j][1][0] * input_val[1][1] + twiddle_val[j][1][1] * input_val[1][0]; } } } } void butterfly_factor_multiply_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output) { const auto batch_size = input.size(0); const auto n = input.size(2); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_cuda", [&] { switch (input.dim()) { case 3: // real { const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 3>(); const auto input_a = input.packed_accessor64<scalar_t, 3>(); auto output_a = output.packed_accessor64<scalar_t, 3>(); butterfly_factor_multiply_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a); break; } case 4: // complex { const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); const auto input_a = input.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); butterfly_factor_multiply_complex_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a); break; } default: AT_ERROR("butterfly_factor_multiply requires input dimension 3 or 4"); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_factor_multiply_cuda failed with error code ", cudaGetLastError()); } template <typename T> __device__ __forceinline__ T sum_strided(T val, T *temp, int stride, int len, int thread_id) { if (stride >= len) { return val; } // Warp reduction for (int offset = warpSize / 2; offset >= stride; offset /= 2) { val += __shfl_down_sync(FULL_MASK, val, offset); } // Block reduction int block_reduction_stride = max(warpSize, stride); int n_block_reductions = div_up(len, block_reduction_stride); __syncthreads(); // Otherwise previous reads might be wrong if (thread_id < len) { temp[(thread_id % block_reduction_stride) * n_block_reductions + (thread_id / block_reduction_stride)] = val; } __syncthreads(); if (thread_id < n_block_reductions * stride) { val = temp[thread_id]; for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) { val += __shfl_down_sync(FULL_MASK, val, offset); } } return val; } template <typename scalar_t> __global__ void butterfly_factor_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 3> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, // at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_twiddle_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a) { const int batch_size = input_a.size(0); const int n = input_a.size(2); for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2] = {{twiddle_a[0][0][i], twiddle_a[0][1][i]}, {twiddle_a[1][0][i], twiddle_a[1][1][i]}}; scalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; const int b_start = blockIdx.y * blockDim.y + threadIdx.y; // for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { for (int b = b_start; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2] = {input_a[b][0][i], input_a[b][1][i]}; const scalar_t grad_val[2] = {grad_a[b][0][i], grad_a[b][1][i]}; #pragma unroll for (int j = 0; j <= 1; ++j) { // d_twiddle_expanded_a[b][j][0][i] = grad_val[j] * input_val[0]; // d_twiddle_expanded_a[b][j][1][i] = grad_val[j] * input_val[1]; // atomicAdd(&d_twiddle_expanded_a[j][0][i], grad_val[j] * input_val[0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], grad_val[j] * input_val[1]); d_twiddle_val[j][0] += grad_val[j] * input_val[0]; d_twiddle_val[j][1] += grad_val[j] * input_val[1]; d_input_a[b][j][i] = twiddle_val[0][j] * grad_val[0] + twiddle_val[1][j] * grad_val[1]; } } // int tid = threadIdx.x + threadIdx.y * blockDim.x; // int nthreads = blockDim.x * blockDim.y; // __shared__ scalar_t temp_storage[MAX_BLOCK_SIZE]; // if (n < nthreads) { // int lane = tid % warpSize; // int wid = tid / warpSize; // #pragma unroll // for (int j = 0; j <= 1; ++j) { // d_twiddle_val[j][0] = sum_strided(d_twiddle_val[j][0], temp_storage, n, nthreads, tid); // d_twiddle_val[j][1] = sum_strided(d_twiddle_val[j][1], temp_storage, n, nthreads, tid); // } // int reduction_stride = max(warpSize, n); // int n_block_reductions = div_up(nthreads, reduction_stride); // if ((lane % n_block_reductions == 0) && (wid < n)) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][tid / n_block_reductions], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][tid / n_block_reductions], d_twiddle_val[j][1]); // } // } // } else { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); // } // } // Warp reduction for (int offset = warpSize / 2; offset >= n; offset /= 2) { #pragma unroll for (int j = 0; j <= 1; ++j) { d_twiddle_val[j][0] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][0], offset); d_twiddle_val[j][1] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][1], offset); } } __shared__ scalar_t s_d_twiddle[MAX_BLOCK_SIZE * 4]; // // const scalar_t (*temp)[n] = (scalar_t (*)[n])(&s_d_twiddle[0]); int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; int lane = tid % warpSize; int wid = tid / warpSize; if (n < nthreads) { __syncthreads(); s_d_twiddle[tid] = 0; s_d_twiddle[tid + MAX_BLOCK_SIZE] = 0; s_d_twiddle[tid + 2 * MAX_BLOCK_SIZE] = 0; s_d_twiddle[tid + 3 * MAX_BLOCK_SIZE] = 0; __syncthreads(); if (lane < n) { atomicAdd(&s_d_twiddle[i], d_twiddle_val[0][0]); atomicAdd(&s_d_twiddle[i + MAX_BLOCK_SIZE], d_twiddle_val[0][1]); atomicAdd(&s_d_twiddle[i + 2 * MAX_BLOCK_SIZE], d_twiddle_val[1][0]); atomicAdd(&s_d_twiddle[i + 3 * MAX_BLOCK_SIZE], d_twiddle_val[1][1]); } __syncthreads(); if (tid < n) { atomicAdd(&d_twiddle_expanded_a[0][0][i], s_d_twiddle[i]); atomicAdd(&d_twiddle_expanded_a[0][1][i], s_d_twiddle[i + MAX_BLOCK_SIZE]); atomicAdd(&d_twiddle_expanded_a[1][0][i], s_d_twiddle[i + 2 * MAX_BLOCK_SIZE]); atomicAdd(&d_twiddle_expanded_a[1][1][i], s_d_twiddle[i + 3 * MAX_BLOCK_SIZE]); } } else { #pragma unroll for (int j = 0; j <= 1; ++j) { atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); } } // // Block reduction // if (n < nthreads) { // // if (n < 0) { // int reduction_stride = max(warpSize, n); // int n_block_reductions = div_up(nthreads, reduction_stride); // if (lane < n) { // // When filling in the shared memory, we assume that n is a power of 2, // // otherwise we might have uninitialized values in the array. // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride)] = d_twiddle_val[0][0]; // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + n * n_block_reductions] = d_twiddle_val[0][1]; // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + 2 * n * n_block_reductions] = d_twiddle_val[1][0]; // s_d_twiddle[(tid % n) * n_block_reductions + (tid / reduction_stride) + 3 * n * n_block_reductions] = d_twiddle_val[1][1]; // } // __syncthreads(); // // if (tid == 0) { // // for (int j = 0; j < 4 * n * n_block_reductions; ++j) { // // printf("%i: %f\n", j, s_d_twiddle[j]); // // } // // } // if (wid < n) { // d_twiddle_val[0][0] = s_d_twiddle[tid]; // d_twiddle_val[0][1] = s_d_twiddle[tid + n * n_block_reductions]; // d_twiddle_val[1][0] = s_d_twiddle[tid + 2 * n * n_block_reductions]; // d_twiddle_val[1][1] = s_d_twiddle[tid + 3 * n * n_block_reductions]; // for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // d_twiddle_val[j][0] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][0], offset); // d_twiddle_val[j][1] += __shfl_down_sync(FULL_MASK, d_twiddle_val[j][1], offset); // } // } // if (lane % n_block_reductions == 0) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][tid / n_block_reductions], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][tid / n_block_reductions], d_twiddle_val[j][1]); // } // } // } // // } else { // } else if (lane < n) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); // } // } // if (lane < n) { // #pragma unroll // for (int j = 0; j <= 1; ++j) { // atomicAdd(&d_twiddle_expanded_a[j][0][i], d_twiddle_val[j][0]); // atomicAdd(&d_twiddle_expanded_a[j][1][i], d_twiddle_val[j][1]); // } // } // #pragma unroll // for (int j = 0; j <= 1; ++j) { // d_twiddle_expanded_a[b_start][j][0][i] = d_twiddle_val[j][0]; // d_twiddle_expanded_a[b_start][j][1][i] = d_twiddle_val[j][1]; // } } } template <typename scalar_t> __global__ void butterfly_factor_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a, const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_expanded_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a) { const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { const scalar_t twiddle_val[2][2][2] = {{{twiddle_a[0][0][i][0], twiddle_a[0][0][i][1]}, {twiddle_a[0][1][i][0], twiddle_a[0][1][i][1]}}, {{twiddle_a[1][0][i][0], twiddle_a[1][0][i][1]}, {twiddle_a[1][1][i][0], twiddle_a[1][1][i][1]}}}; for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2][2] = {{input_a[b][0][i][0], input_a[b][0][i][1]}, {input_a[b][1][i][0], input_a[b][1][i][1]}}; const scalar_t grad_val[2][2] = {{grad_a[b][0][i][0], grad_a[b][0][i][1]}, {grad_a[b][1][i][0], grad_a[b][1][i][1]}}; #pragma unroll for (int j = 0; j <= 1; ++j) { d_twiddle_expanded_a[b][j][0][i][0] = grad_val[j][0] * input_val[0][0] + grad_val[j][1] * input_val[0][1]; d_twiddle_expanded_a[b][j][0][i][1] = -grad_val[j][0] * input_val[0][1] + grad_val[j][1] * input_val[0][0]; d_twiddle_expanded_a[b][j][1][i][0] = grad_val[j][0] * input_val[1][0] + grad_val[j][1] * input_val[1][1]; d_twiddle_expanded_a[b][j][1][i][1] = -grad_val[j][0] * input_val[1][1] + grad_val[j][1] * input_val[1][0]; d_input_a[b][j][i][0] = twiddle_val[0][j][0] * grad_val[0][0] + twiddle_val[0][j][1] * grad_val[0][1] + twiddle_val[1][j][0] * grad_val[1][0] + twiddle_val[1][j][1] * grad_val[1][1]; d_input_a[b][j][i][1] = twiddle_val[0][j][0] * grad_val[0][1] - twiddle_val[0][j][1] * grad_val[0][0] + twiddle_val[1][j][0] * grad_val[1][1] - twiddle_val[1][j][1] * grad_val[1][0]; } } } } void butterfly_factor_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& d_twiddle_expanded, at::Tensor& d_input) { const auto batch_size = input.size(0); const auto n = input.size(2); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); // AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_backward_cuda", [&] { AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_factor_multiply_backward_cuda", [&] { switch (input.dim()) { case 3: // real { const auto grad_a = grad.packed_accessor64<scalar_t, 3>(); const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 3>(); const auto input_a = input.packed_accessor64<scalar_t, 3>(); // auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 4>(); auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 3>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); butterfly_factor_multiply_backward_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, twiddle_a, input_a, d_twiddle_expanded_a, d_input_a); break; } case 4: // complex { const auto grad_a = grad.packed_accessor64<scalar_t, 4>(); const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); const auto input_a = input.packed_accessor64<scalar_t, 4>(); auto d_twiddle_expanded_a = d_twiddle_expanded.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); butterfly_factor_multiply_complex_backward_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, twiddle_a, input_a, d_twiddle_expanded_a, d_input_a); break; } default: AT_ERROR("butterfly_factor_multiply requires input dimension 3 or 4"); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_factor_multiply_backward_cuda failed with error code ", cudaGetLastError()); } template <int LENGTH, typename T> __device__ __forceinline__ void sum_strided_atomic(T (&val)[LENGTH], T *storage, int stride, int nthreads, int tid) { // Warp reduction for (int offset = warpSize / 2; offset >= stride; offset /= 2) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] += __shfl_down_sync(FULL_MASK, val[j], offset); } } // Block reduction __syncthreads(); // Need this, otherwise might overwrite before other threads can read twiddle values if (tid < stride) { #pragma unroll for (int j = 0; j < LENGTH; j++) { storage[j * stride + tid] = 0; } } __syncthreads(); int lane = tid & (warpSize - 1); // int lane = tid % waprSize; if (lane < stride) { #pragma unroll for (int j = 0; j < LENGTH; j++) { // atomicAdd(&storage[j * stride + tid % stride], val[j]); atomicAdd(&storage[j * stride + (tid & (stride - 1))], val[j]); } } __syncthreads(); } /* Sum elements that are @stride apart by exchanging, using shared memory. After the function, threads with @tid < n_block_reductions * stride and @tid % n_block_reductions == 0 contains the sums. */ template <int LENGTH, typename T> __device__ __forceinline__ void sum_strided_exchange(T (&val)[LENGTH], T *storage, int log_stride, int nthreads, int tid) { int stride = 1 << log_stride; // Warp reduction for (int offset = warpSize / 2; offset >= stride; offset /= 2) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] += __shfl_down_sync(FULL_MASK, val[j], offset); } } int block_reduction_stride = max(warpSize, stride); // int n_block_reductions = div_up(nthreads, block_reduction_stride); int n_block_reductions = (nthreads + block_reduction_stride - 1) >> max(5, log_stride); int lane = tid % warpSize; __syncthreads(); // Otherwise previous reads might be wrong if ((tid < nthreads) && (lane < stride)) { #pragma unroll for (int j = 0; j < LENGTH; j++) { // storage[j * nthreads + (tid % block_reduction_stride) * n_block_reductions + (tid / block_reduction_stride)] = val[j]; storage[j * nthreads + (tid & (block_reduction_stride - 1)) * n_block_reductions + (tid / block_reduction_stride)] = val[j]; } } __syncthreads(); if (tid < n_block_reductions * stride) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] = storage[j * nthreads + tid]; } for (int offset = n_block_reductions / 2; offset > 0; offset /= 2) { #pragma unroll for (int j = 0; j < LENGTH; j++) { val[j] += __shfl_down_sync(FULL_MASK, val[j], offset); } } } } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_intermediate_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i] = output_a[first_idx][b][s][input_base_idx + i]; } int i = threadIdx.x; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = twiddle_a[s][twiddle_start_idx + i][0][0]; s_twiddle[i][0][1] = twiddle_a[s][twiddle_start_idx + i][0][1]; s_twiddle[i][1][0] = twiddle_a[s][twiddle_start_idx + i][1][0]; s_twiddle[i][1][1] = twiddle_a[s][twiddle_start_idx + i][1][1]; } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; thrust::tie(s_input[pos], s_input[pos + stride]) = mult2x2(twiddle_val[0][0], twiddle_val[0][1], twiddle_val[1][0], twiddle_val[1][1], input_val[0], input_val[1]); if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos] = s_input[pos]; output_a[idx+1][b][s][input_base_idx + pos + stride] = s_input[pos + stride]; } } } } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_intermediate_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; // __shared__ complex_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_input_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_input = (complex_t *)&s_input_storage[0]; // To avoid warning about race-condition when initializing complex_t // __shared__ complex_t s_twiddle[ELEMENTARY_SIZE][2][2]; __shared__ scalar_t s_twiddle_storage[ELEMENTARY_SIZE][2][2][2]; complex_t (* s_twiddle)[2][2] = (complex_t (*)[2][2])&s_twiddle_storage[0]; // To avoid warning about race-condition when initializing complex_t int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i] = complex_t(output_a[first_idx][b][s][input_base_idx + i][0], output_a[first_idx][b][s][input_base_idx + i][1]); } int i = threadIdx.x; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][0][0], twiddle_a[s][twiddle_start_idx + i][0][0][1]); s_twiddle[i][0][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][1][0], twiddle_a[s][twiddle_start_idx + i][0][1][1]); s_twiddle[i][1][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][0][0], twiddle_a[s][twiddle_start_idx + i][1][0][1]); s_twiddle[i][1][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][1][0], twiddle_a[s][twiddle_start_idx + i][1][1][1]); } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const complex_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read const complex_t input_val[2] = {s_input[pos], s_input[pos + stride]}; thrust::tie(s_input[pos], s_input[pos + stride]) = mult2x2(twiddle_val[0][0], twiddle_val[0][1], twiddle_val[1][0], twiddle_val[1][1], input_val[0], input_val[1]); if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos][0] = s_input[pos].real(); output_a[idx+1][b][s][input_base_idx + pos][1] = s_input[pos].imag(); output_a[idx+1][b][s][input_base_idx + pos + stride][0] = s_input[pos + stride].real(); output_a[idx+1][b][s][input_base_idx + pos + stride][1] = s_input[pos + stride].imag(); } } } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][0], twiddle_a[s][twiddle_idx][0][1]}, {twiddle_a[s][twiddle_idx][1][0], twiddle_a[s][twiddle_idx][1][1]}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; output_a[idx+1][b][s][pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; output_a[idx+1][b][s][pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][twiddle_idx][0][0][0], twiddle_a[s][twiddle_idx][0][0][1]), complex_t(twiddle_a[s][twiddle_idx][0][1][0], twiddle_a[s][twiddle_idx][0][1][1])}, {complex_t(twiddle_a[s][twiddle_idx][1][0][0], twiddle_a[s][twiddle_idx][1][0][1]), complex_t(twiddle_a[s][twiddle_idx][1][1][0], twiddle_a[s][twiddle_idx][1][1][1])}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; const complex_t output_val[2] = {twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1], twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]}; output_a[idx+1][b][s][pos][0] = output_val[0].real(); output_a[idx+1][b][s][pos][1] = output_val[0].imag(); output_a[idx+1][b][s][pos + stride][0] = output_val[1].real(); output_a[idx+1][b][s][pos + stride][1] = output_val[1].imag(); } } void butterfly_multiply_intermediate_cuda(const at::Tensor& twiddle, at::Tensor& output, bool increasing_stride, bool return_intermediates) { const int batch_size = output.size(1); const int nstack = twiddle.size(0); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_intermediate_cuda", [&] { if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ? butterfly_multiply_intermediate_cuda_kernel<scalar_t, true, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_intermediate_cuda_kernel<scalar_t, true, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_onestep_cuda_kernel<scalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_onestep_cuda_kernel<scalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ? butterfly_multiply_intermediate_cuda_kernel<scalar_t, false, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_intermediate_cuda_kernel<scalar_t, false, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); auto output_a = output.packed_accessor64<scalar_t, 5>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ? butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, true, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, true, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_onestep_complex_cuda_kernel<scalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_onestep_complex_cuda_kernel<scalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); return_intermediates ? butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, false, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_intermediate_complex_cuda_kernel<scalar_t, false, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_intermediate_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle // __shared__ scalar_t s_d_twiddle[ELEMENTARY_SIZE * 4]; // accscalar_t (* s_d_twiddle)[2][2] = (accscalar_t (*)[2][2])&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i] = d_input_a[b][s][input_base_idx + i]; } int i = threadIdx.x; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = twiddle_a[s][twiddle_start_idx + i][0][0]; s_twiddle[i][0][1] = twiddle_a[s][twiddle_start_idx + i][0][1]; s_twiddle[i][1][0] = twiddle_a[s][twiddle_start_idx + i][1][0]; s_twiddle[i][1][1] = twiddle_a[s][twiddle_start_idx + i][1][1]; } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos], output_a[idx][b][s][input_base_idx + pos + stride]}; accscalar_t d_twiddle_val[2][2] = {{grad_val[0] * input_val[0], grad_val[0] * input_val[1]}, {grad_val[1] * input_val[0], grad_val[1] * input_val[1]}}; int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid); if (tid < stride) { atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0], s_d_twiddle[twiddle_idx]); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1], s_d_twiddle[twiddle_idx + stride]); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0], s_d_twiddle[twiddle_idx + 2 * stride]); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1], s_d_twiddle[twiddle_idx + 3 * stride]); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read // sum_strided_exchange(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, log_stride, nthreads, tid); // int block_reduction_stride = max(warpSize, stride); // // int n_block_reductions = div_up(nthreads, block_reduction_stride); // int n_block_reductions = (nthreads + block_reduction_stride - 1) >> max(5, log_stride); // // if ((tid < n_block_reductions * stride) && (tid % n_block_reductions == 0)) { // if ((tid < n_block_reductions * stride) && ((tid & (n_block_reductions - 1)) == 0)) { // // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + tid / n_block_reductions][0][0], d_twiddle_val[0][0]); // // Trying to avoid integer division // int log_n_block_reductions = log_max_stride - max(5, log_stride); // Use the fact that nthreads == max_stride and warpSize == 32 // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][0][0], d_twiddle_val[0][0]); // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][0][1], d_twiddle_val[0][1]); // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][1][0], d_twiddle_val[1][0]); // atomicAdd(&d_twiddle_a[s][twiddle_start_idx + (tid >> log_n_block_reductions)][1][1], d_twiddle_val[1][1]); // } } for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i] = s_grad[i]; } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.x * blockDim.x * 2; // __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2][2]; __shared__ scalar_t s_grad_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_grad = (complex_t *)&s_grad_storage[0]; // To avoid warning about race-condition when initializing complex_t // __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle __shared__ accscalar_t s_twiddle_storage[ELEMENTARY_SIZE][2][2][2]; acccomplex_t (* s_twiddle)[2][2] = (acccomplex_t (*)[2][2])&s_twiddle_storage[0]; // To avoid warning about race-condition when initializing complex_t // __shared__ scalar_t s_d_twiddle[ELEMENTARY_SIZE * 4]; // acccomplex_t (* s_d_twiddle)[2][2] = (acccomplex_t (*)[2][2])&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. acccomplex_t* s_d_twiddle = (acccomplex_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.y * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i] = complex_t(d_input_a[b][s][input_base_idx + i][0], d_input_a[b][s][input_base_idx + i][1]); } int i = threadIdx.x; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if (i < stride) { s_twiddle[i][0][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][0][0], twiddle_a[s][twiddle_start_idx + i][0][0][1]); s_twiddle[i][0][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][0][1][0], twiddle_a[s][twiddle_start_idx + i][0][1][1]); s_twiddle[i][1][0] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][0][0], twiddle_a[s][twiddle_start_idx + i][1][0][1]); s_twiddle[i][1][1] = complex_t(twiddle_a[s][twiddle_start_idx + i][1][1][0], twiddle_a[s][twiddle_start_idx + i][1][1][1]); } int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; __syncthreads(); const complex_t twiddle_val[2][2] = {{s_twiddle[twiddle_idx][0][0], s_twiddle[twiddle_idx][0][1]}, {s_twiddle[twiddle_idx][1][0], s_twiddle[twiddle_idx][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then const complex_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1]; s_grad[pos + stride] = thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]; const complex_t input_val[2] = {complex_t(output_a[idx][b][s][input_base_idx + pos][0], output_a[idx][b][s][input_base_idx + pos][1]), complex_t(output_a[idx][b][s][input_base_idx + pos + stride][0], output_a[idx][b][s][input_base_idx + pos + stride][1])}; acccomplex_t d_twiddle_val[2][2] = {{grad_val[0] * thrust::conj(input_val[0]), grad_val[0] * thrust::conj(input_val[1])}, {grad_val[1] * thrust::conj(input_val[0]), grad_val[1] * thrust::conj(input_val[1])}}; int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<acccomplex_t (&)[4]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid); if (tid < stride) { atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0][0], s_d_twiddle[twiddle_idx].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][0][1], s_d_twiddle[twiddle_idx].imag()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1][0], s_d_twiddle[twiddle_idx + stride].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][0][1][1], s_d_twiddle[twiddle_idx + stride].imag()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0][0], s_d_twiddle[twiddle_idx + 2 * stride].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][0][1], s_d_twiddle[twiddle_idx + 2 * stride].imag()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1][0], s_d_twiddle[twiddle_idx + 3 * stride].real()); atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx][1][1][1], s_d_twiddle[twiddle_idx + 3 * stride].imag()); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read } for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i][0] = s_grad[i].real(); d_input_a[b][s][input_base_idx + i][1] = s_grad[i].imag(); } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 4> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][twiddle_idx][0][0], twiddle_a[s][twiddle_idx][0][1]}, {twiddle_a[s][twiddle_idx][1][0], twiddle_a[s][twiddle_idx][1][1]}}; accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const scalar_t grad_val[2] = {d_input_a[b][s][pos], d_input_a[b][s][pos + stride]}; d_input_a[b][s][pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; d_input_a[b][s][pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; d_twiddle_val[0][0] += grad_val[0] * input_val[0]; d_twiddle_val[0][1] += grad_val[0] * input_val[1]; d_twiddle_val[1][0] += grad_val[1] * input_val[0]; d_twiddle_val[1][1] += grad_val[1] * input_val[1]; } atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1], d_twiddle_val[1][1]); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int twiddle_idx = twiddle_start_idx + low_order_bits; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][twiddle_idx][0][0][0], twiddle_a[s][twiddle_idx][0][0][1]), complex_t(twiddle_a[s][twiddle_idx][0][1][0], twiddle_a[s][twiddle_idx][0][1][1])}, {complex_t(twiddle_a[s][twiddle_idx][1][0][0], twiddle_a[s][twiddle_idx][1][0][1]), complex_t(twiddle_a[s][twiddle_idx][1][1][0], twiddle_a[s][twiddle_idx][1][1][1])}}; acccomplex_t d_twiddle_val[2][2] = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}}; for (int b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { const complex_t grad_val[2] = {complex_t(d_input_a[b][s][pos][0], d_input_a[b][s][pos][1]), complex_t(d_input_a[b][s][pos + stride][0], d_input_a[b][s][pos + stride][1])}; const complex_t d_input_val[2] = {thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1], thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]}; d_input_a[b][s][pos][0] = d_input_val[0].real(); d_input_a[b][s][pos][1] = d_input_val[0].imag(); d_input_a[b][s][pos + stride][0] = d_input_val[1].real(); d_input_a[b][s][pos + stride][1] = d_input_val[1].imag(); const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; d_twiddle_val[0][0] += grad_val[0] * thrust::conj(input_val[0]); d_twiddle_val[0][1] += grad_val[0] * thrust::conj(input_val[1]); d_twiddle_val[1][0] += grad_val[1] * thrust::conj(input_val[0]); d_twiddle_val[1][1] += grad_val[1] * thrust::conj(input_val[1]); } atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0][0], d_twiddle_val[0][0].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][0][1], d_twiddle_val[0][0].imag()); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1][0], d_twiddle_val[0][1].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][0][1][1], d_twiddle_val[0][1].imag()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0][0], d_twiddle_val[1][0].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][0][1], d_twiddle_val[1][0].imag()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1][0], d_twiddle_val[1][1].real()); atomicAdd(&d_twiddle_a[s][twiddle_idx][1][1][1], d_twiddle_val[1][1].imag()); } void butterfly_multiply_intermediate_backward_cuda(const at::Tensor& twiddle, const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { const int batch_size = output.size(1); const int nstack = output.size(2); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_intermediate_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 4>(); const auto output_a = output.packed_accessor64<scalar_t, 4>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 4>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_backward_onestep_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); butterfly_multiply_intermediate_backward_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); butterfly_multiply_intermediate_backward_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_backward_onestep_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); const auto output_a = output.packed_accessor64<scalar_t, 5>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); butterfly_multiply_intermediate_backward_complex_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(div_up(n / 2, stride), batch_size, nstack); butterfly_multiply_intermediate_backward_complex_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(n / 2, MAX_BLOCK_SIZE / 2), div_up(batch_size, WORK_PER_THREAD), nstack); butterfly_multiply_intermediate_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_intermediate_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_untied_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; int b = blockIdx.x * blockDim.y + threadIdx.y; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * max_stride * 2] = output_a[first_idx][b][s][input_base_idx + i]; } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos_x] = s_input[pos]; output_a[idx+1][b][s][input_base_idx + pos_x + stride] = s_input[pos + stride]; } } } } // Trying out an implementation where consecutive threads process same input index, but different batch indices. // template <typename scalar_t, bool increasing_stride, bool return_intermediates> // __global__ void butterfly_multiply_untied_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, // at::PackedTensorAccessor64<scalar_t, 4> output_a, // int log_max_stride, // int log_n) { // const int batch_size = output_a.size(1); // const int s = blockIdx.z; // const int max_stride = 1 << log_max_stride; // const int input_base_idx = blockIdx.y * blockDim.y * 2; // __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; // __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // int b = blockIdx.x * blockDim.x + threadIdx.x; // int tid_x = threadIdx.x; // batch index // int tid_y = threadIdx.y; // int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; // if (b < batch_size) { // for (int i = tid_y; i < max_stride * 2; i += blockDim.y) { // s_input[tid_x + i * blockDim.x] = output_a[first_idx][b][s][input_base_idx + i]; // } // } // // for (int i = tid_x + tid_y * blockDim.x; i < blockDim.x * max_stride * 2; i += blockDim.x * blockDim.y) { // // int input_idx = i & (max_stride * 2 - 1); // int input_idx = i % (max_stride * 2); // // int batch_idx = i >> (log_max_stride + 1); // int batch_idx = (i - input_idx) / (max_stride * 2); // // if (blockIdx.x * blockDim.x + batch_idx < batch_size) { // // s_input[batch_idx + input_idx * blockDim.x] = output_a[blockIdx.x * blockDim.x + first_idx][batch_idx][s][input_base_idx + input_idx]; // // } // // } // for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { // int log_stride = increasing_stride ? idx : log_n - 1 - idx; // int stride = 1 << log_stride; // if (tid_x == 0) { // s_twiddle[tid_y][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][0][0]; // s_twiddle[tid_y][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][0][1]; // s_twiddle[tid_y][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][1][0]; // s_twiddle[tid_y][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_y][1][1]; // } // int low_order_bits = tid_y & (stride - 1); // int low_order_bits = tid_y % stride; // int pos_y = 2 * (tid_y - low_order_bits) + low_order_bits; // int pos_x = tid_x; // int pos = pos_x + pos_y * blockDim.x; // __syncthreads(); // const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_y][0][0], s_twiddle[tid_y][0][1]}, // {s_twiddle[tid_y][1][0], s_twiddle[tid_y][1][1]}}; // __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read // if (b < batch_size) { // const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride * blockDim.x]}; // s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; // s_input[pos + stride * blockDim.x] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; // if (return_intermediates || idx == first_idx + log_max_stride) { // output_a[idx+1][b][s][input_base_idx + pos_y] = s_input[pos]; // output_a[idx+1][b][s][input_base_idx + pos_y + stride] = s_input[pos + stride * blockDim.x]; // } // } // } // } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_multiply_untied_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; // __shared__ complex_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_input_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_input = (complex_t *)&s_input_storage[0]; // To avoid warning about race-condition when initializing complex_t int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_input[i] = complex_t(output_a[first_idx][b][s][input_base_idx + i][0], output_a[first_idx][b][s][input_base_idx + i][1]); } int i = threadIdx.x; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1])}}; __syncthreads(); const complex_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos][0] = s_input[pos].real(); output_a[idx+1][b][s][input_base_idx + pos][1] = s_input[pos].imag(); output_a[idx+1][b][s][input_base_idx + pos + stride][0] = s_input[pos + stride].real(); output_a[idx+1][b][s][input_base_idx + pos + stride][1] = s_input[pos + stride].imag(); } } } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][i][0][0], twiddle_a[s][log_stride][i][0][1]}, {twiddle_a[s][log_stride][i][1][0], twiddle_a[s][log_stride][i][1][1]}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; output_a[idx+1][b][s][pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; output_a[idx+1][b][s][pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } } template <typename scalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, at::PackedTensorAccessor64<scalar_t, 5> output_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][i][0][0][0], twiddle_a[s][log_stride][i][0][0][1]), complex_t(twiddle_a[s][log_stride][i][0][1][0], twiddle_a[s][log_stride][i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][i][1][0][0], twiddle_a[s][log_stride][i][1][0][1]), complex_t(twiddle_a[s][log_stride][i][1][1][0], twiddle_a[s][log_stride][i][1][1][1])}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; const complex_t output_val[2] = {twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1], twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]}; output_a[idx+1][b][s][pos][0] = output_val[0].real(); output_a[idx+1][b][s][pos][1] = output_val[0].imag(); output_a[idx+1][b][s][pos + stride][0] = output_val[1].real(); output_a[idx+1][b][s][pos + stride][1] = output_val[1].imag(); } } void butterfly_multiply_untied_cuda(const at::Tensor& twiddle, at::Tensor& output, bool increasing_stride, bool return_intermediates) { const int batch_size = output.size(1); const int nstack = twiddle.size(0); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_untied_cuda", [&] { if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(div_up(MAX_BLOCK_SIZE, stride * 2), stride); // dim3 grid(div_up(batch_size, block.x), div_up(n / 2, stride), nstack); return_intermediates ? butterfly_multiply_untied_cuda_kernel<scalar_t, true, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_untied_cuda_kernel<scalar_t, true, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_onestep_cuda_kernel<scalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_onestep_cuda_kernel<scalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(div_up(MAX_BLOCK_SIZE, stride * 2), stride); // dim3 grid(div_up(batch_size, block.x), div_up(n / 2, stride), nstack); return_intermediates ? butterfly_multiply_untied_cuda_kernel<scalar_t, false, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_untied_cuda_kernel<scalar_t, false, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 6>(); auto output_a = output.packed_accessor64<scalar_t, 5>(); if (increasing_stride) { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); return_intermediates ? butterfly_multiply_untied_complex_cuda_kernel<scalar_t, true, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_untied_complex_cuda_kernel<scalar_t, true, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_onestep_complex_cuda_kernel<scalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } else { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_onestep_complex_cuda_kernel<scalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); return_intermediates ? butterfly_multiply_untied_complex_cuda_kernel<scalar_t, false, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n) : butterfly_multiply_untied_complex_cuda_kernel<scalar_t, false, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, log_stride, log_n); } } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_cuda failed with error code ", cudaGetLastError()); } // Original implementation, with 1 batch per thread block // template <typename scalar_t, typename accscalar_t, bool increasing_stride> // __global__ void butterfly_multiply_untied_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, // const at::PackedTensorAccessor64<scalar_t, 4> output_a, // at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, // at::PackedTensorAccessor64<scalar_t, 3> d_input_a, // int log_max_stride, // int log_n) { // const int batch_size = output_a.size(1); // const int s = blockIdx.z; // const int max_stride = 1 << log_max_stride; // const int input_base_idx = blockIdx.y * blockDim.x * 2; // __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; // int b = blockIdx.x * blockDim.y + threadIdx.y; // if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) // for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { // s_grad[i] = d_input_a[b][s][input_base_idx + i]; // } // int i = threadIdx.x; // int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; // for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { // int log_stride = increasing_stride ? idx : log_n - 1 - idx; // int stride = 1 << log_stride; // int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; // int pos = 2 * (i - low_order_bits) + low_order_bits; // const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1]}, // {twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1]}}; // __syncthreads(); // const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; // s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; // s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; // const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos], output_a[idx][b][s][input_base_idx + pos + stride]}; // accscalar_t d_twiddle_val[2][2] = {{grad_val[0] * input_val[0], grad_val[0] * input_val[1]}, // {grad_val[1] * input_val[0], grad_val[1] * input_val[1]}}; // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0], d_twiddle_val[0][0]); // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1], d_twiddle_val[0][1]); // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0], d_twiddle_val[1][0]); // atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1], d_twiddle_val[1][1]); // } // __syncthreads(); // for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { // d_input_a[b][s][input_base_idx + i] = s_grad[i]; // } // } // } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_max_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * max_stride * 2] = d_input_a[b][s][input_base_idx + i]; } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos_x], output_a[idx][b][s][input_base_idx + pos_x + stride]}; d_twiddle_val[0][0] = grad_val[0] * input_val[0]; d_twiddle_val[0][1] = grad_val[0] * input_val[1]; d_twiddle_val[1][0] = grad_val[1] * input_val[0]; d_twiddle_val[1][1] = grad_val[1] * input_val[1]; } int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, max_stride, nthreads, tid); if (tid_y == 0) { atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], s_d_twiddle[tid_x]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], s_d_twiddle[tid_x + max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], s_d_twiddle[tid_x + 2 * max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], s_d_twiddle[tid_x + 3 * max_stride]); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read } if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i] = s_grad[i + threadIdx.y * max_stride * 2]; } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 6> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_max_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int max_stride = 1 << log_max_stride; const int input_base_idx = blockIdx.y * blockDim.x * 2; // __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2][2]; __shared__ scalar_t s_grad_storage[ELEMENTARY_SIZE * 2][2]; complex_t* s_grad = (complex_t *)&s_grad_storage[0]; // To avoid warning about race-condition when initializing complex_t int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { // Currently we assume 1 batch per thread block, so all threads in the block should enter (otherwise deadlock) for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i] = complex_t(d_input_a[b][s][input_base_idx + i][0], d_input_a[b][s][input_base_idx + i][1]); } int i = threadIdx.x; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1]), complex_t(twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1])}}; __syncthreads(); const complex_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1]; s_grad[pos + stride] = thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]; const complex_t input_val[2] = {complex_t(output_a[idx][b][s][input_base_idx + pos][0], output_a[idx][b][s][input_base_idx + pos][1]), complex_t(output_a[idx][b][s][input_base_idx + pos + stride][0], output_a[idx][b][s][input_base_idx + pos + stride][1])}; acccomplex_t d_twiddle_val[2][2] = {{grad_val[0] * thrust::conj(input_val[0]), grad_val[0] * thrust::conj(input_val[1])}, {grad_val[1] * thrust::conj(input_val[0]), grad_val[1] * thrust::conj(input_val[1])}}; atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][0], d_twiddle_val[0][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][0][1], d_twiddle_val[0][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][0], d_twiddle_val[0][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][0][1][1], d_twiddle_val[0][1].imag()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][0], d_twiddle_val[1][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][0][1], d_twiddle_val[1][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][0], d_twiddle_val[1][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + i][1][1][1], d_twiddle_val[1][1].imag()); } __syncthreads(); for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { d_input_a[b][s][input_base_idx + i][0] = s_grad[i].real(); d_input_a[b][s][input_base_idx + i][1] = s_grad[i].imag(); } } } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_onestep_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a, int log_stride, int log_n) { const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const scalar_t twiddle_val[2][2] = {{twiddle_a[s][log_stride][i][0][0], twiddle_a[s][log_stride][i][0][1]}, {twiddle_a[s][log_stride][i][1][0], twiddle_a[s][log_stride][i][1][1]}}; accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const scalar_t grad_val[2] = {d_input_a[b][s][pos], d_input_a[b][s][pos + stride]}; d_input_a[b][s][pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; d_input_a[b][s][pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][pos], output_a[idx][b][s][pos + stride]}; d_twiddle_val[0][0] += grad_val[0] * input_val[0]; d_twiddle_val[0][1] += grad_val[0] * input_val[1]; d_twiddle_val[1][0] += grad_val[1] * input_val[0]; d_twiddle_val[1][1] += grad_val[1] * input_val[1]; } atomicAdd(&d_twiddle_a[s][log_stride][i][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][log_stride][i][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][log_stride][i][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][log_stride][i][1][1], d_twiddle_val[1][1]); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_multiply_untied_backward_onestep_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 6> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 5> output_a, at::PackedTensorAccessor64<scalar_t, 6> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_stride, int log_n) { using complex_t = thrust::complex<scalar_t>; using acccomplex_t = thrust::complex<accscalar_t>; const int batch_size = output_a.size(1); const int s = blockIdx.z; const int idx = increasing_stride ? log_stride : (log_n - 1 - log_stride); // Index to access output_a const int n = output_a.size(3); int stride = 1 << log_stride; int i = blockIdx.y * blockDim.x + threadIdx.x; if (i > n) return; int low_order_bits = i & (stride - 1); // int low_order_bits = i % stride; int pos = 2 * (i - low_order_bits) + low_order_bits; const complex_t twiddle_val[2][2] = {{complex_t(twiddle_a[s][log_stride][i][0][0][0], twiddle_a[s][log_stride][i][0][0][1]), complex_t(twiddle_a[s][log_stride][i][0][1][0], twiddle_a[s][log_stride][i][0][1][1])}, {complex_t(twiddle_a[s][log_stride][i][1][0][0], twiddle_a[s][log_stride][i][1][0][1]), complex_t(twiddle_a[s][log_stride][i][1][1][0], twiddle_a[s][log_stride][i][1][1][1])}}; acccomplex_t d_twiddle_val[2][2] = {{{0, 0}, {0, 0}}, {{0, 0}, {0, 0}}}; for (int b = blockIdx.x * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.x) { const complex_t grad_val[2] = {complex_t(d_input_a[b][s][pos][0], d_input_a[b][s][pos][1]), complex_t(d_input_a[b][s][pos + stride][0], d_input_a[b][s][pos + stride][1])}; const complex_t d_input_val[2] = {thrust::conj(twiddle_val[0][0]) * grad_val[0] + thrust::conj(twiddle_val[1][0]) * grad_val[1], thrust::conj(twiddle_val[0][1]) * grad_val[0] + thrust::conj(twiddle_val[1][1]) * grad_val[1]}; d_input_a[b][s][pos][0] = d_input_val[0].real(); d_input_a[b][s][pos][1] = d_input_val[0].imag(); d_input_a[b][s][pos + stride][0] = d_input_val[1].real(); d_input_a[b][s][pos + stride][1] = d_input_val[1].imag(); const complex_t input_val[2] = {complex_t(output_a[idx][b][s][pos][0], output_a[idx][b][s][pos][1]), complex_t(output_a[idx][b][s][pos + stride][0], output_a[idx][b][s][pos + stride][1])}; d_twiddle_val[0][0] += grad_val[0] * thrust::conj(input_val[0]); d_twiddle_val[0][1] += grad_val[0] * thrust::conj(input_val[1]); d_twiddle_val[1][0] += grad_val[1] * thrust::conj(input_val[0]); d_twiddle_val[1][1] += grad_val[1] * thrust::conj(input_val[1]); } atomicAdd(&d_twiddle_a[s][log_stride][i][0][0][0], d_twiddle_val[0][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][0][0][1], d_twiddle_val[0][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][i][0][1][0], d_twiddle_val[0][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][0][1][1], d_twiddle_val[0][1].imag()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][0][0], d_twiddle_val[1][0].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][0][1], d_twiddle_val[1][0].imag()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][1][0], d_twiddle_val[1][1].real()); atomicAdd(&d_twiddle_a[s][log_stride][i][1][1][1], d_twiddle_val[1][1].imag()); } void butterfly_multiply_untied_backward_cuda(const at::Tensor& twiddle, const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { const int batch_size = output.size(1); const int nstack = output.size(2); const int n = output.size(3); const int log_n = int(log2((double) n)); const bool complex = output.dim() == 5; AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_multiply_untied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; if (!complex) { // real const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); const auto output_a = output.packed_accessor64<scalar_t, 4>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_backward_onestep_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(stride); // dim3 grid(batch_size, div_up(n / 2, stride), nstack); butterfly_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), div_up(n / 2, stride), nstack); // dim3 block(stride); // dim3 grid(batch_size, div_up(n / 2, stride), nstack); butterfly_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_backward_onestep_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } else { // complex const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 6>(); const auto output_a = output.packed_accessor64<scalar_t, 5>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 6>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); if (increasing_stride) { int log_stride = log_n - 1; for (; (1 << log_stride) > ELEMENTARY_SIZE; --log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } int stride = 1 << log_stride; dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); butterfly_multiply_untied_backward_complex_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } else { int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride); dim3 grid(batch_size, div_up(n / 2, stride), nstack); butterfly_multiply_untied_backward_complex_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); for (log_stride++; log_stride <= log_n - 1; ++log_stride) { dim3 block(MAX_BLOCK_SIZE / 2); dim3 grid(div_up(batch_size, WORK_PER_THREAD), div_up(n / 2, MAX_BLOCK_SIZE / 2), nstack); butterfly_multiply_untied_backward_onestep_complex_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n); } } } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride, int log_max_stride, typename Function0, typename Function1, typename Function2> __global__ void butterfly_multiply_untied_forward_backward_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a, Function0 load_input, Function1 load_grad, CudaAcsr32<scalar_t, 5> d_twiddle_a, Function2 save_d_input, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Forward pass to compute the intermediate values scalar_t input_val_storage[MAX_N_FACTORS][2]; // Storing inputs for backward pass load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; #pragma unroll for (int idx = 0; idx <= log_max_stride; ++idx) { // Let's not skip steps for now int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; input_val_storage[idx][0] = input_val[0]; input_val_storage[idx][1] = input_val[1]; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read // or s_s_input will be overwritten with s_grad before some thread can read } // Backward pass scalar_t* s_grad = &s_input[0]; // Reusing the same storage as s_input __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE][2][2]; load_grad(s_grad); #pragma unroll for (int idx = log_max_stride; idx >= 0; --idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {input_val_storage[idx][0], input_val_storage[idx][1]}; s_d_twiddle[tid_x + tid_y * max_stride][0][0] = grad_val[0] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][0][1] = grad_val[0] * input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride][1][0] = grad_val[1] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][1][1] = grad_val[1] * input_val[1]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val[0][0] += s_d_twiddle[tid_x + i * max_stride][0][0]; d_twiddle_val[0][1] += s_d_twiddle[tid_x + i * max_stride][0][1]; d_twiddle_val[1][0] += s_d_twiddle[tid_x + i * max_stride][1][0]; d_twiddle_val[1][1] += s_d_twiddle[tid_x + i * max_stride][1][1]; } } atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], d_twiddle_val[1][1]); } } save_d_input(s_grad); } void butterfly_multiply_untied_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_multiply_untied_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; switch (log_stride) { case 0: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 0> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 0> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 1: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 1> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 1> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 2: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 2> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 2> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 3: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 3> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 3> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 4: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 4> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 4> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 5: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 5> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 5> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 6: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 6> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 6> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 7: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 7> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 7> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 8: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 8> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 8> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 9: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 9> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 9> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_multiply_untied_forward_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, bool increasing_stride, typename Function0, typename Function1> __global__ void butterfly_ortho_multiply_tied_cuda_kernel(const CudaAcsr32<scalar_t, 2> twiddle_cos_a, const CudaAcsr32<scalar_t, 2> twiddle_sin_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = 0; idx < (log_max_stride + 1); ++idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; if ((tid_y == 0) && (tid_x < stride)) { s_twiddle[tid_x][0] = twiddle_cos_a[s][twiddle_start_idx + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][twiddle_start_idx + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int twiddle_idx = low_order_bits; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[twiddle_idx][0], s_twiddle[twiddle_idx][1]}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1]; s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } save_output(s_input); } void butterfly_ortho_multiply_tied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_tied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; increasing_stride ? butterfly_ortho_multiply_tied_cuda_kernel<scalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size) : butterfly_ortho_multiply_tied_cuda_kernel<scalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_ortho_multiply_tied_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride, typename Function0, typename Function1, typename Function2> __global__ void butterfly_ortho_multiply_tied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 2> twiddle_cos_a, const CudaAcsr32<scalar_t, 2> twiddle_sin_a, Function0 load_output, Function1 load_grad, CudaAcsr32<scalar_t, 2> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; __shared__ scalar_t s_output[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE]; int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; load_output(s_output); load_grad(s_grad); for (int idx = log_max_stride; idx >= 0; --idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; int twiddle_start_idx = stride - 1; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if ((tid_y == blockDim.y - 1) && (tid_x < stride)) { s_twiddle[tid_x][0] = twiddle_cos_a[s][twiddle_start_idx + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][twiddle_start_idx + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int twiddle_idx = low_order_bits; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[twiddle_idx][0], s_twiddle[twiddle_idx][1]}; scalar_t d_twiddle_val[1] = {0}; // Idk, to be consistent with sum_strided's interface if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1]; s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1]; const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]}; const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1], -twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]}; s_output[pos] = input_val[0]; s_output[pos + stride] = input_val[1]; d_twiddle_val[0] = (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1]) + (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0]; } int tid = tid_x + tid_y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[1]>(d_twiddle_val), s_d_twiddle, stride, nthreads, tid); if ((tid_y == 0) && (tid_x < stride)) { atomicAdd(&d_twiddle_a[s][twiddle_start_idx + twiddle_idx], s_d_twiddle[twiddle_idx]); } } save_d_input(s_grad); } void butterfly_ortho_multiply_tied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_tied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 2, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; increasing_stride ? butterfly_ortho_multiply_tied_backward_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size) : butterfly_ortho_multiply_tied_backward_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_ortho_multiply_tied_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, bool increasing_stride, typename Function0, typename Function1> __global__ void butterfly_ortho_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = 0; idx < (log_max_stride + 1); ++idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0] = twiddle_cos_a[s][log_stride][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][log_stride][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1]; s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } save_output(s_input); } void butterfly_ortho_multiply_untied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output, bool increasing_stride) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "butterfly_ortho_multiply_untied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; increasing_stride ? butterfly_ortho_multiply_untied_cuda_kernel<scalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size) : butterfly_ortho_multiply_untied_cuda_kernel<scalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_ortho_multiply_untied_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride, typename Function0, typename Function1, typename Function2> __global__ void butterfly_ortho_multiply_untied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_output, Function1 load_grad, CudaAcsr32<scalar_t, 3> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d butterfly_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_output[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE]; int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; load_output(s_output); load_grad(s_grad); for (int idx = log_max_stride; idx >= 0; --idx) { int log_stride = increasing_stride ? idx : log_max_stride - idx; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0] = twiddle_cos_a[s][log_stride][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][log_stride][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1]; s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1]; const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]}; const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1], -twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]}; s_output[pos] = input_val[0]; s_output[pos + stride] = input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride] = (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1]) + (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val = 0; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val += s_d_twiddle[tid_x + i * max_stride]; } } atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x], d_twiddle_val); } } save_d_input(s_grad); } void butterfly_ortho_multiply_untied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input, bool increasing_stride) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_ortho_multiply_untied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; increasing_stride ? butterfly_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size) : butterfly_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_ortho_multiply_untied_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename Function0, typename Function1> __global__ void bbt_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size, int nblocks) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int block = 0; block < nblocks; ++block) { for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } } save_output(s_input); } void bbt_multiply_untied_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_multiply_untied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; bbt_multiply_untied_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, save_output, log_stride, batch_size, nblocks); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "bbt_multiply_untied_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, int nblocks, typename Function0, typename Function1, typename Function2> __global__ void bbt_multiply_untied_forward_backward_cuda_kernel(const CudaAcsr32<scalar_t, 5> twiddle_a, Function0 load_input, Function1 load_grad, CudaAcsr32<scalar_t, 5> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Forward pass to compute the intermediate values scalar_t input_val_storage[nblocks * 2 * MAX_N_FACTORS][2]; // Storing inputs for backward pass load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int block = 0; block < nblocks; ++block) { for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { // Let's not skip steps for now int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; input_val_storage[idx + block * 2 * (log_max_stride + 1)][0] = input_val[0]; input_val_storage[idx + block * 2 * (log_max_stride + 1)][1] = input_val[1]; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read // or s_s_input will be overwritten with s_grad before some thread can read } } // Backward pass scalar_t* s_grad = &s_input[0]; // Reusing the same storage as s_input __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE][2][2]; load_grad(s_grad); for (int block = nblocks - 1; block >= 0; --block) { for (int idx = 2 * (log_max_stride + 1) - 1; idx >= 0; --idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {input_val_storage[idx + block * 2 * (log_max_stride + 1)][0], input_val_storage[idx + block * 2 * (log_max_stride + 1)][1]}; s_d_twiddle[tid_x + tid_y * max_stride][0][0] = grad_val[0] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][0][1] = grad_val[0] * input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride][1][0] = grad_val[1] * input_val[0]; s_d_twiddle[tid_x + tid_y * max_stride][1][1] = grad_val[1] * input_val[1]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val[0][0] += s_d_twiddle[tid_x + i * max_stride][0][0]; d_twiddle_val[0][1] += s_d_twiddle[tid_x + i * max_stride][0][1]; d_twiddle_val[1][0] += s_d_twiddle[tid_x + i * max_stride][1][0]; d_twiddle_val[1][1] += s_d_twiddle[tid_x + i * max_stride][1][1]; } } atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][0], d_twiddle_val[0][0]); atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][0][1], d_twiddle_val[0][1]); atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][0], d_twiddle_val[1][0]); atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x][1][1], d_twiddle_val[1][1]); } } } save_d_input(s_grad); } void bbt_multiply_untied_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_multiply_untied_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; switch (nblocks) { case 1: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 1> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 2: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 2> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 3: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 3> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 4: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 4> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 5: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 5> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 6: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 6> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 7: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 7> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 8: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 8> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 9: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 9> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 10: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 10> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 11: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 11> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 12: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 12> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 13: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 13> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 14: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 14> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "bbt_multiply_untied_forward_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename Function0, typename Function1> __global__ void bbt_ortho_multiply_untied_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_input, Function1 save_output, int log_max_stride, int batch_size, int nblocks) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; load_input(s_input); int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int block = 0; block < nblocks; ++block) { for (int idx = 0; idx < 2 * (log_max_stride + 1); ++idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0] = twiddle_cos_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0] * input_val[0] - twiddle_val[1] * input_val[1]; s_input[pos + stride] = twiddle_val[1] * input_val[0] + twiddle_val[0] * input_val[1]; } __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read } } save_output(s_input); } void bbt_ortho_multiply_untied_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& input, at::Tensor& output) { int batch_size = input.size(0); const int nstack = input.size(1); const int n = input.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "bbt_ortho_multiply_untied_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_input = [batch_size, stride, input_a] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_input[i + threadIdx.y * stride * 2] = input_a[b][s][i]; } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; bbt_ortho_multiply_untied_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_input, save_output, log_stride, batch_size, nblocks); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "bbt_ortho_multiply_untied_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, typename Function0, typename Function1, typename Function2> __global__ void bbt_ortho_multiply_untied_backward_cuda_kernel(const CudaAcsr32<scalar_t, 3> twiddle_cos_a, const CudaAcsr32<scalar_t, 3> twiddle_sin_a, Function0 load_output, Function1 load_grad, CudaAcsr32<scalar_t, 3> d_twiddle_a, Function2 save_d_input, int log_max_stride, int batch_size, int nblocks) { const int s = blockIdx.y + gridDim.y * blockIdx.z; // For conv2d bbt_ortho as well const int max_stride = 1 << log_max_stride; const int input_base_idx = 0; __shared__ scalar_t s_output[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2]; __shared__ accscalar_t s_d_twiddle[ELEMENTARY_SIZE]; int b = blockIdx.x * blockDim.y + threadIdx.y; int tid_x = threadIdx.x; int tid_y = threadIdx.y; load_output(s_output); load_grad(s_grad); for (int block = nblocks - 1; block >= 0; --block) { for (int idx = 2 * (log_max_stride + 1) - 1; idx >= 0; --idx) { int log_stride = idx <= log_max_stride ? log_max_stride - idx : idx - log_max_stride - 1; int stride = 1 << log_stride; // tid_y == 0 is writing (atomicAdd) so tid_y == -1 can do the reading, instead of having to wait for tid_y == 0 if (tid_y == blockDim.y - 1) { s_twiddle[tid_x][0] = twiddle_cos_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; s_twiddle[tid_x][1] = twiddle_sin_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2] = {s_twiddle[tid_x][0], s_twiddle[tid_x][1]}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0] * grad_val[0] + twiddle_val[1] * grad_val[1]; s_grad[pos + stride] = -twiddle_val[1] * grad_val[0] + twiddle_val[0] * grad_val[1]; const scalar_t output_val[2] = {s_output[pos], s_output[pos + stride]}; const scalar_t input_val[2] = {twiddle_val[0] * output_val[0] + twiddle_val[1] * output_val[1], -twiddle_val[1] * output_val[0] + twiddle_val[0] * output_val[1]}; s_output[pos] = input_val[0]; s_output[pos + stride] = input_val[1]; s_d_twiddle[tid_x + tid_y * max_stride] = (grad_val[0] * input_val[0] + grad_val[1] * input_val[1]) * (-twiddle_val[1]) + (-grad_val[0] * input_val[1] + grad_val[1] * input_val[0]) * twiddle_val[0]; } __syncthreads(); if (tid_y == 0) { accscalar_t d_twiddle_val = 0; for (int i = 0; i < blockDim.y; ++i) { if (blockIdx.x * blockDim.y + i < batch_size) { d_twiddle_val += s_d_twiddle[tid_x + i * max_stride]; } } atomicAdd(&d_twiddle_a[s][idx + block * 2 * (log_max_stride + 1)][input_base_idx / 2 + tid_x], d_twiddle_val); } } } save_d_input(s_grad); } void bbt_ortho_multiply_untied_backward_cuda(const at::Tensor& twiddle_cos, const at::Tensor& twiddle_sin, const at::Tensor& output, const at::Tensor& grad, at::Tensor& d_twiddle, at::Tensor& d_input) { int batch_size = output.size(0); const int nstack = output.size(1); const int n = output.size(2); const int log_n = int(log2((double) n)); int nblocks = twiddle_cos.size(1) / (2 * log_n); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "bbt_ortho_multiply_untied_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_cos_a = twiddle_cos.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto twiddle_sin_a = twiddle_sin.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), 1, nstack); auto load_output = [batch_size, stride, output_a] __device__ (scalar_t* s_output) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_output[i + threadIdx.y * stride * 2] = output_a[b][s][i]; } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int s = blockIdx.z; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { d_input_a[b][s][i] = s_grad[i + threadIdx.y * stride * 2]; } } }; bbt_ortho_multiply_untied_backward_cuda_kernel<scalar_t, accscalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_cos_a, twiddle_sin_a, load_output, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size, nblocks); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "bbt_ortho_multiply_untied_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, bool increasing_stride, bool return_intermediates> __global__ void butterfly_conv2d_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a, int log_max_stride, int log_n, int kernel_size, int padding, int h_out, int w_out) { const int batch_size = output_a.size(1); const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; const int max_stride = 1 << log_max_stride; // base index always 0 const int input_base_idx = 0; const int h_in = input_a.size(2); const int w_in = input_a.size(3); __shared__ scalar_t s_input[ELEMENTARY_SIZE * 2]; __shared__ scalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; int b = blockIdx.x * blockDim.y + threadIdx.y; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; if (b < batch_size) { for (int t = threadIdx.x; t < max_stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * max_stride * 2] = 0; else{ s_input[t + threadIdx.y * max_stride * 2] = input_a[batch_idx][input_base_idx + t][i][j]; // load input into first idx of output for backward pass // we allocated this memory already so shouldn't affect too much output_a[0][b][s][input_base_idx + t] = s_input[t + threadIdx.y * max_stride * 2]; } } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; for (int idx = first_idx; idx <= first_idx + log_max_stride; ++idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; __syncthreads(); // otherwise some thread might go back to writing to s_twiddle before other thread can read if (b < batch_size) { const scalar_t input_val[2] = {s_input[pos], s_input[pos + stride]}; s_input[pos] = twiddle_val[0][0] * input_val[0] + twiddle_val[0][1] * input_val[1]; s_input[pos + stride] = twiddle_val[1][0] * input_val[0] + twiddle_val[1][1] * input_val[1]; if (return_intermediates || idx == first_idx + log_max_stride) { output_a[idx+1][b][s][input_base_idx + pos_x] = s_input[pos]; output_a[idx+1][b][s][input_base_idx + pos_x + stride] = s_input[pos + stride]; } } } } void butterfly_conv2d_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output, const int kernel_size, const int padding, const int h_out, const int w_out, bool increasing_stride, bool return_intermediates) { const int b_in = input.size(0); const int n = input.size(1); /*c*/ const int nstack = twiddle.size(0); const int stack = kernel_size*kernel_size; const int log_n = int(log2((double) n)); const int batch_size = output.size(1); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_conv2d_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); // batch_size, c, h, w const auto input_a = input.packed_accessor64<scalar_t, 4>(); // log c_in, h*w*batch_size, nstack, c_in auto output_a = output.packed_accessor64<scalar_t, 4>(); // assume in_channels <= 1024 int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); // to support out_channels > in_channels int c_out_ratio = nstack / stack; // dim3 block(stride); // dim3 grid(batch_size, c_out_ratio, stack); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); if (increasing_stride) { return_intermediates ? butterfly_conv2d_cuda_kernel<scalar_t, true, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out) : butterfly_conv2d_cuda_kernel<scalar_t, true, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out); } else { return_intermediates ? butterfly_conv2d_cuda_kernel<scalar_t, false, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out) : butterfly_conv2d_cuda_kernel<scalar_t, false, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, input_a, output_a, log_stride, log_n, kernel_size, padding, h_out, w_out); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_conv2d_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t, typename accscalar_t, bool increasing_stride> __global__ void butterfly_conv2d_backward_cuda_kernel( const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 5> twiddle_a, const at::PackedTensorAccessor64<scalar_t, 4> output_a, at::PackedTensorAccessor64<scalar_t, 5> d_twiddle_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a, int log_max_stride, int log_n, int kernel_size, int padding, int h_out, int w_out) { const int batch_size = output_a.size(1); const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; // base index always 0 const int input_base_idx = 0; const int h_in = d_input_a.size(2); const int w_in = d_input_a.size(3); const int max_stride = 1 << log_max_stride; __shared__ scalar_t s_grad[ELEMENTARY_SIZE * 2]; __shared__ accscalar_t s_twiddle[ELEMENTARY_SIZE][2][2]; // Use accscalar_t instead of scalar_t since we'll reuse the storage for s_d_twiddle accscalar_t* s_d_twiddle = (accscalar_t *)&s_twiddle[0][0][0]; // Reusing the same storage as s_twiddle, have to be careful if we change the implemetnation. int b = blockIdx.x * blockDim.y + threadIdx.y; if (b < batch_size) { for (int i = threadIdx.x; i < max_stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * max_stride * 2] = grad_a[b][s][input_base_idx + i]; } } int tid_x = threadIdx.x; int tid_y = threadIdx.y; int first_idx = increasing_stride ? 0 : log_n - 1 - log_max_stride; for (int idx = first_idx + log_max_stride; idx >= first_idx; --idx) { int log_stride = increasing_stride ? idx : log_n - 1 - idx; int stride = 1 << log_stride; if (tid_y == 0) { s_twiddle[tid_x][0][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0]; s_twiddle[tid_x][0][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1]; s_twiddle[tid_x][1][0] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0]; s_twiddle[tid_x][1][1] = twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1]; } int low_order_bits = tid_x & (stride - 1); // int low_order_bits = tid_x % stride; int pos_x = 2 * (tid_x - low_order_bits) + low_order_bits; int pos_y = tid_y * max_stride * 2; int pos = pos_x + pos_y; __syncthreads(); const scalar_t twiddle_val[2][2] = {{s_twiddle[tid_x][0][0], s_twiddle[tid_x][0][1]}, {s_twiddle[tid_x][1][0], s_twiddle[tid_x][1][1]}}; // Don't need to sync here since we sync later at sum_strided_atomic, so no writing to s_twiddle can occur until then accscalar_t d_twiddle_val[2][2] = {{0, 0}, {0, 0}}; if (b < batch_size) { const scalar_t grad_val[2] = {s_grad[pos], s_grad[pos + stride]}; s_grad[pos] = twiddle_val[0][0] * grad_val[0] + twiddle_val[1][0] * grad_val[1]; s_grad[pos + stride] = twiddle_val[0][1] * grad_val[0] + twiddle_val[1][1] * grad_val[1]; const scalar_t input_val[2] = {output_a[idx][b][s][input_base_idx + pos_x], output_a[idx][b][s][input_base_idx + pos_x + stride]}; d_twiddle_val[0][0] = grad_val[0] * input_val[0]; d_twiddle_val[0][1] = grad_val[0] * input_val[1]; d_twiddle_val[1][0] = grad_val[1] * input_val[0]; d_twiddle_val[1][1] = grad_val[1] * input_val[1]; } int tid = threadIdx.x + threadIdx.y * blockDim.x; int nthreads = blockDim.x * blockDim.y; sum_strided_atomic(reinterpret_cast<accscalar_t (&)[4]>(d_twiddle_val), s_d_twiddle, max_stride, nthreads, tid); if (tid_y == 0) { atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][0], s_d_twiddle[tid_x]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][0][1], s_d_twiddle[tid_x + max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][0], s_d_twiddle[tid_x + 2 * max_stride]); atomicAdd(&d_twiddle_a[s][log_stride][input_base_idx / 2 + tid_x][1][1], s_d_twiddle[tid_x + 3 * max_stride]); } __syncthreads(); // Otherwise s_d_twiddle will be overwritten with s_twiddle before some thread can read } if (b < batch_size) { const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); for (int t = threadIdx.x; t < max_stride * 2; t += blockDim.x) { // map back to b, c, h, w // get index into patch int k_i = stack / kernel_size; // stack / kernel_size int k_j = stack % kernel_size; // stack % kernel_size // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; // this needs to be atomic because input is reused in forward pass // with out_channels > in_channels and for each entry of the patch if (i < w_in && j < h_in && i >= 0 && j >= 0) { atomicAdd(&d_input_a[batch_idx][input_base_idx + t][i][j], s_grad[t + threadIdx.y * max_stride * 2]); } } } } void butterfly_conv2d_backward_cuda(const at::Tensor&grad, const at::Tensor& twiddle, const at::Tensor& output, at::Tensor& d_twiddle, at::Tensor& d_input, const int kernel_size, const int padding, const int h_out, const int w_out, bool increasing_stride) { const int batch_size = output.size(1); const int nstack = twiddle.size(0); const int stack = kernel_size*kernel_size; const int n = d_input.size(1); // c_in const int log_n = int(log2((double) n)); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "butterfly_conv2d_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto grad_a = grad.packed_accessor64<scalar_t, 3>(); const auto twiddle_a = twiddle.packed_accessor64<scalar_t, 5>(); const auto output_a = output.packed_accessor64<scalar_t, 4>(); auto d_twiddle_a = d_twiddle.packed_accessor64<scalar_t, 5>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); // to support out_channels > in_channels int c_out_ratio = nstack / stack; // dim3 block(stride); // dim3 grid(batch_size, c_out_ratio, stack); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); increasing_stride ? butterfly_conv2d_backward_cuda_kernel<scalar_t, accscalar_t, true> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( grad_a, twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n, kernel_size, padding, h_out, w_out) : butterfly_conv2d_backward_cuda_kernel<scalar_t, accscalar_t, false> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( grad_a, twiddle_a, output_a, d_twiddle_a, d_input_a, log_stride, log_n, kernel_size, padding, h_out, w_out); }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_conv2d_backward_cuda failed with error code ", cudaGetLastError()); } void butterfly_conv2d_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor&grad, at::Tensor& d_twiddle, at::Tensor& d_input, const int kernel_size, const int padding, const int h_out, const int w_out, bool increasing_stride) { const int batch_size = grad.size(0); // b_out = b_in * h_out * w_out const int nstack = twiddle.size(0); const int stack = kernel_size * kernel_size; const int n = d_input.size(1); // c_in const int log_n = int(log2((double) n)); const int c_out_ratio = nstack / stack; const int h_in = input.size(2); const int w_in = input.size(3); AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "butterfly_conv2d_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0; else{ s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j]; } } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // map back to b, c, h, w // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i < w_in && j < h_in && i >= 0 && j >= 0) { atomicAdd(&d_input_a[batch_idx][t][i][j], s_grad[t + threadIdx.y * stride * 2]); } } } }; switch (log_stride) { case 0: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 0> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 0> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 1: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 1> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 1> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 2: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 2> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 2> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 3: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 3> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 3> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 4: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 4> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 4> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 5: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 5> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 5> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 6: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 6> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 6> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 7: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 7> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 7> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 8: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 8> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 8> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; case 9: increasing_stride ? butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, true, 9> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size) : butterfly_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, false, 9> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, batch_size); break; } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "butterfly_conv2d_forward_backward_cuda failed with error code ", cudaGetLastError()); } void bbt_conv2d_cuda(const at::Tensor& twiddle, const at::Tensor& input, at::Tensor& output, const int kernel_size, const int padding, const int h_out, const int w_out) { const int b_in = input.size(0); const int n = input.size(1); /*c*/ const int nstack = twiddle.size(0); const int stack = kernel_size*kernel_size; const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); int batch_size = output.size(0); const int h_in = input.size(2); const int w_in = input.size(3); AT_DISPATCH_FLOATING_TYPES(output.scalar_type(), "bbt_conv2d_cuda", [&] { const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); // batch_size, c, h, w const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); // h*w*batch_size, nstack, c_in auto output_a = output.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); // assume in_channels <= 1024 int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); // to support out_channels > in_channels int c_out_ratio = nstack / stack; // dim3 block(stride); // dim3 grid(batch_size, c_out_ratio, stack); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0; else{ s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j]; } } } }; auto save_output = [batch_size, stride, output_a] __device__ (scalar_t* s_input) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { output_a[b][s][i] = s_input[i + threadIdx.y * stride * 2]; } } }; bbt_multiply_untied_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, save_output, log_stride, batch_size, nblocks) ; }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "bbt_conv2d_cuda failed with error code ", cudaGetLastError()); } void bbt_conv2d_forward_backward_cuda(const at::Tensor& twiddle, const at::Tensor& input, const at::Tensor&grad, at::Tensor& d_twiddle, at::Tensor& d_input, const int kernel_size, const int padding, const int h_out, const int w_out) { int batch_size = grad.size(0); // b_out = b_in * h_out * w_out const int nstack = twiddle.size(0); const int stack = kernel_size * kernel_size; const int n = d_input.size(1); // c_in const int log_n = int(log2((double) n)); int nblocks = twiddle.size(1) / (2 * log_n); const int c_out_ratio = nstack / stack; const int h_in = input.size(2); const int w_in = input.size(3); AT_DISPATCH_FLOATING_TYPES(grad.scalar_type(), "bbt_conv2d_forward_backward_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; const auto twiddle_a = twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); const auto input_a = input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); const auto grad_a = grad.packed_accessor32<scalar_t, 3, at::RestrictPtrTraits>(); auto d_twiddle_a = d_twiddle.packed_accessor32<scalar_t, 5, at::RestrictPtrTraits>(); auto d_input_a = d_input.packed_accessor32<scalar_t, 4, at::RestrictPtrTraits>(); int stride = std::min<int>(ELEMENTARY_SIZE, n / 2); int log_stride = int(log2((double) stride)); dim3 block(stride, div_up(MAX_BLOCK_SIZE, stride * 2)); dim3 grid(div_up(batch_size, block.y), c_out_ratio, stack); auto load_input = [batch_size, stride, input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_input) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i >= w_in or j >= h_in or i < 0 or j < 0) s_input[t + threadIdx.y * stride * 2] = 0; else{ s_input[t + threadIdx.y * stride * 2] = input_a[batch_idx][t][i][j]; } } } }; auto load_grad = [batch_size, stride, grad_a] __device__ (scalar_t* s_grad) { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int s = blockIdx.y + gridDim.y * stack; if (b < batch_size) { for (int i = threadIdx.x; i < stride * 2; i += blockDim.x) { s_grad[i + threadIdx.y * stride * 2] = grad_a[b][s][i]; } } }; auto save_d_input = [batch_size, stride, d_input_a, kernel_size, padding, h_out, w_out, h_in, w_in] __device__ (scalar_t* s_grad) mutable { const int b = blockIdx.x * blockDim.y + threadIdx.y; const int stack = blockIdx.z; const int patch_idx = b % (h_out * w_out); const int batch_idx = b / (h_out * w_out); if (b < batch_size) { for (int t = threadIdx.x; t < stride * 2; t += blockDim.x) { // map back to b, c, h, w // get index into patch int k_i = stack / kernel_size; int k_j = stack % kernel_size; // get patch index into full matrix int p_i = (patch_idx) / w_out; int p_j = (patch_idx) % (w_out); // combine indices and adjust for padding int i = k_i + p_i - padding; int j = k_j + p_j - padding; if (i < w_in && j < h_in && i >= 0 && j >= 0) { atomicAdd(&d_input_a[batch_idx][t][i][j], s_grad[t + threadIdx.y * stride * 2]); } } } }; switch (nblocks) { case 1: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 1> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 2: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 2> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 3: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 3> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 4: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 4> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 5: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 5> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 6: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 6> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 7: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 7> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 8: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 8> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 9: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 9> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 10: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 10> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 11: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 11> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 12: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 12> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 13: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 13> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; case 14: bbt_multiply_untied_forward_backward_cuda_kernel<scalar_t, accscalar_t, 14> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(twiddle_a, load_input, load_grad, d_twiddle_a, save_d_input, log_stride, batch_size); break; } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "bbt_conv2d_forward_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, const at::PackedTensorAccessor64<scalar_t, 3> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 3> output_a) { const auto p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); // already divided by 2 for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { output_a[b][j][i] = (1 - p) * input_a[b][j][i] + p * permuted_input_a[b][j][i]; } } } } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, const at::PackedTensorAccessor64<scalar_t, 4> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a) { const auto p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { #pragma unroll for (int k = 0; k <= 1; ++k) { output_a[b][j][i][k] = (1 - p) * input_a[b][j][i][k] + p * permuted_input_a[b][j][i][k]; } } } } } void permutation_factor_even_odd_multiply_cuda(const at::Tensor& p, const at::Tensor& input, at::Tensor& output) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 2, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_even_odd_multiply", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); switch (input.dim()) { case 2: // real { const auto permuted_input = input.reshape({batch_size, n / 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2}); output = output.view({batch_size, 2, n / 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 3>(); auto output_a = output.packed_accessor64<scalar_t, 3>(); permutation_factor_even_odd_multiply_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, permuted_input_a, output_a); output = output.view({batch_size, n}); break; } case 3: // complex { const auto permuted_input = input.reshape({batch_size, n / 2, 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); output = output.view({batch_size, 2, n / 2, 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); permutation_factor_even_odd_multiply_complex_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, permuted_input_a, output_a); output = output.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_even_odd_multiply requires input dimension 2 or 3"); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "permutation_factor_even_odd_multiply_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 3> grad_reshaped_a, const at::PackedTensorAccessor64<scalar_t, 3> permuted_grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, const at::PackedTensorAccessor64<scalar_t, 3> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 2> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a) { const scalar_t p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { d_p_expanded_a[b][i] = (permuted_input_a[b][0][i] - input_a[b][0][i]) * grad_reshaped_a[b][0][i] + (permuted_input_a[b][1][i] - input_a[b][1][i]) * grad_reshaped_a[b][1][i]; d_input_a[b][i][0] = (1 - p) * grad_a[b][i][0] + p * permuted_grad_a[b][i][0]; d_input_a[b][i][1] = (1 - p) * grad_a[b][i][1] + p * permuted_grad_a[b][i][1]; } } } template <typename scalar_t> __global__ void permutation_factor_even_odd_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a, const at::PackedTensorAccessor64<scalar_t, 4> grad_reshaped_a, const at::PackedTensorAccessor64<scalar_t, 4> permuted_grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, const at::PackedTensorAccessor64<scalar_t, 4> permuted_input_a, at::PackedTensorAccessor64<scalar_t, 2> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a) { const scalar_t p = p_a[0]; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { d_p_expanded_a[b][i] = (permuted_input_a[b][0][i][0] - input_a[b][0][i][0]) * grad_reshaped_a[b][0][i][0] + (permuted_input_a[b][0][i][1] - input_a[b][0][i][1]) * grad_reshaped_a[b][0][i][1] + (permuted_input_a[b][1][i][0] - input_a[b][1][i][0]) * grad_reshaped_a[b][1][i][0] + (permuted_input_a[b][1][i][1] - input_a[b][1][i][1]) * grad_reshaped_a[b][1][i][1]; d_input_a[b][i][0][0] = (1 - p) * grad_a[b][i][0][0] + p * permuted_grad_a[b][i][0][0]; d_input_a[b][i][0][1] = (1 - p) * grad_a[b][i][0][1] + p * permuted_grad_a[b][i][0][1]; d_input_a[b][i][1][0] = (1 - p) * grad_a[b][i][1][0] + p * permuted_grad_a[b][i][1][0]; d_input_a[b][i][1][1] = (1 - p) * grad_a[b][i][1][1] + p * permuted_grad_a[b][i][1][1]; } } } void permutation_factor_even_odd_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& p, const at::Tensor& input, at::Tensor& d_p_expanded, at::Tensor& d_input) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 2, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_even_odd_multiply_backward", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); auto d_p_expanded_a = d_p_expanded.packed_accessor64<scalar_t, 2>(); switch (input.dim()) { case 2: // real { const auto permuted_input = input.reshape({batch_size, n / 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2}); const auto grad_reshaped = grad.reshape({batch_size, 2, n / 2}); const auto permuted_grad = grad.reshape({batch_size, 2, n / 2}).transpose(1, 2); const auto grad_folded = grad.reshape({batch_size, n / 2, 2}); d_input = d_input.view({batch_size, n/ 2, 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 3>(); const auto grad_reshaped_a = grad_reshaped.packed_accessor64<scalar_t, 3>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 3>(); const auto permuted_grad_a = permuted_grad.packed_accessor64<scalar_t, 3>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); permutation_factor_even_odd_multiply_backward_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, grad_reshaped_a, permuted_grad_a, p_a, input_a, permuted_input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n}); break; } case 3: // complex { const auto permuted_input = input.reshape({batch_size, n / 2, 2, 2}).transpose(1, 2); const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); const auto grad_reshaped = grad.reshape({batch_size, 2, n / 2, 2}); const auto permuted_grad = grad.reshape({batch_size, 2, n / 2, 2}).transpose(1, 2); const auto grad_folded = grad.reshape({batch_size, n / 2, 2, 2}); d_input = d_input.view({batch_size, n/ 2, 2, 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); const auto permuted_input_a = permuted_input.packed_accessor64<scalar_t, 4>(); const auto grad_reshaped_a = grad_reshaped.packed_accessor64<scalar_t, 4>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 4>(); const auto permuted_grad_a = permuted_grad.packed_accessor64<scalar_t, 4>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); permutation_factor_even_odd_multiply_complex_backward_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, grad_reshaped_a, permuted_grad_a, p_a, input_a, permuted_input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_even_odd_multiply_backward requires input dimension 2 or 3"); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "permutation_factor_even_odd_multiply_backward_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, at::PackedTensorAccessor64<scalar_t, 3> output_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); // already divided by 2 for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { const scalar_t in[2] = {input_a[b][j][i], input_a[b][j][n - 1 - i]}; output_a[b][j][i] = (1 - p[j]) * in[0] + p[j] * in[1]; output_a[b][j][n - 1 - i] = p[j] * in[0] + (1 - p[j]) * in[1]; } } } } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_complex_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 4> output_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { #pragma unroll for (int k = 0; k <= 1; ++k) { const scalar_t in[2] = {input_a[b][j][i][k], input_a[b][j][n - 1 - i][k]}; output_a[b][j][i][k] = (1 - p[j]) * in[0] + p[j] * in[1]; output_a[b][j][n - 1 - i][k] = p[j] * in[0] + (1 - p[j]) * in[1]; } } } } } void permutation_factor_reverse_multiply_cuda(const at::Tensor& p, const at::Tensor& input, at::Tensor& output) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 4, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_reverse_multiply", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); switch (input.dim()) { case 2: // real { const auto input_folded = input.reshape({batch_size, 2, n / 2}); output = output.view({batch_size, 2, n / 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); auto output_a = output.packed_accessor64<scalar_t, 3>(); permutation_factor_reverse_multiply_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, output_a); output = output.view({batch_size, n}); break; } case 3: // complex { const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); output = output.view({batch_size, 2, n / 2, 2}); const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); auto output_a = output.packed_accessor64<scalar_t, 4>(); permutation_factor_reverse_multiply_complex_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(p_a, input_a, output_a); output = output.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_reverse_multiply requires input dimension 2 or 3"); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "permutation_factor_reverse_multiply_cuda failed with error code ", cudaGetLastError()); } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 3> grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 3> input_a, at::PackedTensorAccessor64<scalar_t, 3> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 3> d_input_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { const scalar_t in[2] = {input_a[b][j][i], input_a[b][j][n - 1 - i]}; const scalar_t g[2] = {grad_a[b][j][i], grad_a[b][j][n - 1 - i]}; d_p_expanded_a[j][b][i] = (in[1] - in[0]) * (g[0] - g[1]); d_input_a[b][j][i] = (1 - p[j]) * g[0] + p[j] * g[1]; d_input_a[b][j][n - 1 - i] = p[j] * g[0] + (1 - p[j]) * g[1]; } } } } template <typename scalar_t> __global__ void permutation_factor_reverse_multiply_complex_backward_cuda_kernel(const at::PackedTensorAccessor64<scalar_t, 4> grad_a, const at::PackedTensorAccessor64<scalar_t, 1> p_a, const at::PackedTensorAccessor64<scalar_t, 4> input_a, at::PackedTensorAccessor64<scalar_t, 3> d_p_expanded_a, at::PackedTensorAccessor64<scalar_t, 4> d_input_a) { const scalar_t p[2] = {p_a[0], p_a[1]}; const auto batch_size = input_a.size(0); const auto n = input_a.size(2); for (int64_t i = blockIdx.x * blockDim.x + threadIdx.x; i < n / 2; i += blockDim.x * gridDim.x) { for (int64_t b = blockIdx.y * blockDim.y + threadIdx.y; b < batch_size; b += blockDim.y * gridDim.y) { #pragma unroll for (int j = 0; j <= 1; ++j) { scalar_t d_p_expanded_temp = 0; #pragma unroll for (int k = 0; k <= 1; ++k) { const scalar_t in[2] = {input_a[b][j][i][k], input_a[b][j][n - 1 - i][k]}; const scalar_t g[2] = {grad_a[b][j][i][k], grad_a[b][j][n - 1 - i][k]}; d_p_expanded_temp += (in[1] - in[0]) * (g[0] - g[1]); d_input_a[b][j][i][k] = (1 - p[j]) * g[0] + p[j] * g[1]; d_input_a[b][j][n - 1 - i][k] = p[j] * g[0] + (1 - p[j]) * g[1]; } d_p_expanded_a[j][b][i] = d_p_expanded_temp; } } } } void permutation_factor_reverse_multiply_backward_cuda(const at::Tensor& grad, const at::Tensor& p, const at::Tensor& input, at::Tensor& d_p_expanded, at::Tensor& d_input) { const auto batch_size = input.size(0); const auto n = input.size(1); dim3 block; block.x = std::min<int64_t>(MAX_BLOCK_SIZE, n / 2); block.y = div_up(MAX_BLOCK_SIZE, block.x); dim3 grid(div_up(n / 4, block.x), div_up(batch_size, block.y * WORK_PER_THREAD)); AT_DISPATCH_FLOATING_TYPES(input.scalar_type(), "permutation_factor_reverse_multiply_backward", [&] { const auto p_a = p.packed_accessor64<scalar_t, 1>(); auto d_p_expanded_a = d_p_expanded.packed_accessor64<scalar_t, 3>(); switch (input.dim()) { case 2: // real { const auto input_folded = input.reshape({batch_size, 2, n / 2}); const auto grad_folded = grad.reshape({batch_size, 2, n / 2}); d_input = d_input.view({batch_size, 2, n/ 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 3>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 3>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 3>(); permutation_factor_reverse_multiply_backward_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, p_a, input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n}); break; } case 3: // complex { const auto input_folded = input.reshape({batch_size, 2, n / 2, 2}); const auto grad_folded = grad.reshape({batch_size, 2, n / 2, 2}); d_input = d_input.view({batch_size, 2, n/ 2, 2}); // Accessors const auto input_a = input_folded.packed_accessor64<scalar_t, 4>(); const auto grad_a = grad_folded.packed_accessor64<scalar_t, 4>(); auto d_input_a = d_input.packed_accessor64<scalar_t, 4>(); permutation_factor_reverse_multiply_complex_backward_cuda_kernel<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>(grad_a, p_a, input_a, d_p_expanded_a, d_input_a); d_input = d_input.view({batch_size, n, 2}); break; } default: AT_ERROR("permutation_factor_reverse_multiply_backward requires input dimension 2 or 3"); } }); TORCH_CHECK(cudaGetLastError() == cudaSuccess, "permutation_factor_reverse_multiply_backward_cuda failed with error code ", cudaGetLastError()); }
4dd0863fe873dabd2cc70e2d04782370c5952e62.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <vector> void printArray(const float* x, int n) { printf("("); for (int i = 0; i < n; i++) { printf("%f, ", x[i]); } printf(")\n"); } __global__ void f_h(const int n, const float h, const float *x, float *y, bool *run) { *run = true; // int idx = blockIdx.x * blockDim.x + threadIdx.x; float coef = 1 / (n * h) * .3989422804; for (int j = 0; j < n; j++) { float sum = 0; float x_val = x[j]; for (int i = 0; i < n; i++) { float val = (x_val-x[i]) / h; float k = exp(-(val * val) / 2); sum = sum + k; } y[j] = coef * sum; } } __host__ void gpuCall(int n, float h, const float *x_v, float *y_v) { printf("START GPU CALL\n"); float *x, *y; bool *run; hipMallocManaged(&x, n*sizeof(float)); hipMallocManaged(&y, n*sizeof(float)); hipMallocManaged(&run, sizeof(bool)); *run = false; for (int i = 0; i < n; i++) { x[i] = x_v[i]; } //============================================================== printf("X before\n"); printArray(x, n); printf("\n"); printf("Y before\n"); printArray(y, n); //============================================================== hipLaunchKernelGGL(( f_h), dim3(1), dim3(1), 0, 0, n, h, x, y, run); hipDeviceSynchronize(); printf("Did it run? %d\n", *run); //============================================================== printf("\n"); printf("Y\n"); printArray(y, n); //============================================================== hipFree((float*)x); hipFree(y); hipFree(run); }
4dd0863fe873dabd2cc70e2d04782370c5952e62.cu
#include <cuda.h> #include <stdio.h> #include <vector> void printArray(const float* x, int n) { printf("("); for (int i = 0; i < n; i++) { printf("%f, ", x[i]); } printf(")\n"); } __global__ void f_h(const int n, const float h, const float *x, float *y, bool *run) { *run = true; // int idx = blockIdx.x * blockDim.x + threadIdx.x; float coef = 1 / (n * h) * .3989422804; for (int j = 0; j < n; j++) { float sum = 0; float x_val = x[j]; for (int i = 0; i < n; i++) { float val = (x_val-x[i]) / h; float k = exp(-(val * val) / 2); sum = sum + k; } y[j] = coef * sum; } } __host__ void gpuCall(int n, float h, const float *x_v, float *y_v) { printf("START GPU CALL\n"); float *x, *y; bool *run; cudaMallocManaged(&x, n*sizeof(float)); cudaMallocManaged(&y, n*sizeof(float)); cudaMallocManaged(&run, sizeof(bool)); *run = false; for (int i = 0; i < n; i++) { x[i] = x_v[i]; } //============================================================== printf("X before\n"); printArray(x, n); printf("\n"); printf("Y before\n"); printArray(y, n); //============================================================== f_h<<<1, 1>>>(n, h, x, y, run); cudaDeviceSynchronize(); printf("Did it run? %d\n", *run); //============================================================== printf("\n"); printf("Y\n"); printArray(y, n); //============================================================== cudaFree((float*)x); cudaFree(y); cudaFree(run); }
46a5d3e2d8eedc4e4464d1c3c83f87709c991b1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** @file imagenet_vgg.cu * @brief AlexNet network for ImageNet. * @author Ang Li (PNNL) * */ #include <stdio.h> #include <assert.h> #include <sys/time.h> #include <iostream> #include <string> #include <hip/hip_cooperative_groups.h> #include <iostream> #include <fstream> #include <vector> #include "utility.h" #include "sbnn32_param.h" #include "sbnn64_param.h" #include "sbnn32.cuh" #include "sbnn64.cuh" #include "data.h" using namespace cooperative_groups; using namespace std; int main32(); int main64(); int main() { main32(); //main64(); } __global__ void alexnet32( In32Conv32LayerParam* bconv1, Conv32LayerParam* bconv2, Conv32LayerParam* bconv3, Conv32LayerParam* bconv4, Conv32LayerParam* bconv5, Fc32LayerParam* bfc1, Fc32LayerParam* bfc2, Out32LayerParam* bout) { grid_group grid = this_grid(); //========= Conv1 ============ In32ConvPool32Layer(bconv1); grid.sync(); //========= Conv2 ============ ConvPool32Layer(bconv2); grid.sync(); //========= Conv3 ============ Conv32Layer(bconv3); grid.sync(); //========= Conv4 ============ Conv32Layer(bconv4); grid.sync(); //========= Conv5 ============ ConvPool32Layer(bconv5); grid.sync(); //========= Fc1 ============ Fc32Layer(bfc1); //Fc32LayerBatched(bfc1); grid.sync(); //========= Fc2 ============ Fc32Layer(bfc2); //Fc32LayerBatched(bfc2); grid.sync(); ////========== Output =========== Out32Layer(bout); //Out32LayerBatched(bout); } __global__ void alexnet64( In32Conv64LayerParam* bconv1, Conv64LayerParam* bconv2, Conv64LayerParam* bconv3, Conv64LayerParam* bconv4, Conv64LayerParam* bconv5, Fc64LayerParam* bfc1, Fc64LayerParam* bfc2, Out64LayerParam* bout) { grid_group grid = this_grid(); SET_KERNEL_TIMER; //========= Conv1 ============ In32ConvPool64Layer(bconv1); grid.sync(); TICK_KERNEL_TIMER(bconv1); //========= Conv2 ============ ConvPool64Layer(bconv2); grid.sync(); TICK_KERNEL_TIMER(bconv2); //========= Conv3 ============ Conv64Layer(bconv3); grid.sync(); TICK_KERNEL_TIMER(bconv3); //========= Conv4 ============ Conv64Layer(bconv4); grid.sync(); TICK_KERNEL_TIMER(bconv4); //========= Conv5 ============ ConvPool64Layer(bconv5); grid.sync(); TICK_KERNEL_TIMER(bconv5); //========= Fc1 ============ //Fc64Layer(bfc1); Fc64LayerBatched(bfc1); grid.sync(); TICK_KERNEL_TIMER(bfc1); //========= Fc2 ============ //Fc64Layer(bfc2); Fc64LayerBatched(bfc2); grid.sync(); TICK_KERNEL_TIMER(bfc2); ////========== Output =========== //Out64Layer(bout); Out64LayerBatched(bout); TICK_KERNEL_TIMER(bout); } int main32() { int dev = 6; hipSetDevice(dev); const unsigned batch = 32; const unsigned output_size = 1000; const unsigned image_height = 224; const unsigned image_width = 224; const unsigned image_channel = 3; const unsigned n_hidden = 4096; //=============== Get Input and Label ================= float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float)); unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned)); read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch); //================ Get Weight ================= FILE* config_file = fopen("./pytorch_training/alexnet_imagenet.csv","r"); //================ Set Network ================= //Bconv1 Layer In32Conv32LayerParam* bconv1 = new In32Conv32LayerParam("Conv1", image_height, image_width, 11, 11, 3, 64, batch, 4, 4, true, 2, 2, false); In32Conv32LayerParam* bconv1_gpu = bconv1->initialize(images, config_file); //Bconv2 Layer Conv32LayerParam* bconv2 = new Conv32LayerParam("Conv2", bconv1->output_height, bconv1->output_width, 5, 5, 64, 192, batch, 1, 1, true, 2, 2, false); Conv32LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu()); //Bconv3 Layer Conv32LayerParam* bconv3 = new Conv32LayerParam("Conv3", bconv2->output_height, bconv2->output_width, 3, 3, 192, 384, batch); Conv32LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu()); //Bconv4 Layer Conv32LayerParam* bconv4 = new Conv32LayerParam("Conv4", bconv3->output_height, bconv3->output_width, 3, 3, 384, 256, batch); Conv32LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu()); //Bconv5 Layer Conv32LayerParam* bconv5 = new Conv32LayerParam("Conv5", bconv4->output_height, bconv4->output_width, 3, 3, 256, 256, batch, 1, 1, true, 2, 2, true); Conv32LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu()); //Fc1 Layer Fc32LayerParam* bfc1 = new Fc32LayerParam("Fc1", batch, (bconv5->output_height) *(bconv5->output_width)*256, n_hidden); Fc32LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv5->get_output_gpu()); //Fc2 Layer Fc32LayerParam* bfc2 = new Fc32LayerParam("Fc2", batch, n_hidden, n_hidden); Fc32LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu()); //Out Layer Out32LayerParam* bout = new Out32LayerParam("Fout", batch, n_hidden, output_size, true); Out32LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu()); //================ Setup Kernel ================= int numThreads = 1024; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); int numBlocksPerSm; int shared_memory = 512*sizeof(int)*32; hipFuncSetAttribute(alexnet32, hipFuncAttributeMaxDynamicSharedMemorySize,98304); hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, alexnet32, numThreads, shared_memory); //hipFuncSetAttribute(alexnet32, hipFuncAttributePreferredSharedMemoryCarveout,0); void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bfc1_gpu, &bfc2_gpu, &bout_gpu}; START_TIMER; hipLaunchCooperativeKernel((void*)alexnet32, numBlocksPerSm*deviceProp.multiProcessorCount, numThreads, args, shared_memory); STOP_TIMER; //================ Output ================= float* output = bout->download_output(); validate_prediction(output, image_labels, output_size, batch); delete bconv1; delete bconv2; delete bconv3; delete bconv4; delete bconv5; delete bfc1; delete bfc2; delete bout; return 0; } int main64() { int dev = 6; hipSetDevice(dev); const unsigned batch = 32; const unsigned output_size = 1000; const unsigned image_height = 224; const unsigned image_width = 224; const unsigned image_channel = 3; const unsigned n_hidden = 4096; //=============== Get Input and Label ================= float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float)); unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned)); read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch); //================ Get Weight ================= FILE* config_file = fopen("./pytorch_training/alexnet_imagenet.csv","r"); //FILE* config_file = fopen("../pytorch/BinaryNet/alexnet_imagenet.csv","r"); //================ Set Network ================= //Bconv1 Layer In32Conv64LayerParam* bconv1 = new In32Conv64LayerParam("Conv1", image_height, image_width, 11, 11, 3, 64, batch, 4, 4, true, 2, 2, false); In32Conv64LayerParam* bconv1_gpu = bconv1->initialize(images, config_file); //Bconv2 Layer Conv64LayerParam* bconv2 = new Conv64LayerParam("Conv2", bconv1->output_height, bconv1->output_width, 5, 5, 64, 192, batch, 1, 1, true, 2, 2, false); Conv64LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu()); //Bconv3 Layer Conv64LayerParam* bconv3 = new Conv64LayerParam("Conv3", bconv2->output_height, bconv2->output_width, 3, 3, 192, 384, batch); Conv64LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu()); //Bconv4 Layer Conv64LayerParam* bconv4 = new Conv64LayerParam("Conv4", bconv3->output_height, bconv3->output_width, 3, 3, 384, 256, batch); Conv64LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu()); //Bconv5 Layer Conv64LayerParam* bconv5 = new Conv64LayerParam("Conv5", bconv4->output_height, bconv4->output_width, 3, 3, 256, 256, batch, 1, 1, true, 2, 2, true); Conv64LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu()); //Fc1 Layer Fc64LayerParam* bfc1 = new Fc64LayerParam("Fc1", batch, (bconv5->output_height) *(bconv5->output_width)*256, n_hidden); Fc64LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv5->get_output_gpu()); //Fc2 Layer Fc64LayerParam* bfc2 = new Fc64LayerParam("Fc2", batch, n_hidden, n_hidden); Fc64LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu()); //Out Layer Out64LayerParam* bout = new Out64LayerParam("Fout", batch, n_hidden, output_size, true); Out64LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu()); //================ Setup Kernel ================= int numThreads = 1024; hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); int numBlocksPerSm; int shared_memory = 512*sizeof(int)*32; hipFuncSetAttribute(alexnet64, hipFuncAttributeMaxDynamicSharedMemorySize,98304); hipOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, alexnet64, numThreads, shared_memory); //hipFuncSetAttribute(alexnet64, hipFuncAttributePreferredSharedMemoryCarveout,0); void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bfc1_gpu, &bfc2_gpu, &bout_gpu}; START_TIMER; hipLaunchCooperativeKernel((void*)alexnet64, numBlocksPerSm*deviceProp.multiProcessorCount, numThreads, args, shared_memory); STOP_TIMER; //float* ss = bfc2->download_full_output(); //int a = 82420; //int b = a+100; //int max_width = 4; //for (int i=a; i<b; i++) //{ //printf("%*.0f ",max_width, ss[i]); //if ( (i-a+1)%18 == 0) //printf("\n"); //} //printf("\n"); // //float* ss = bfc1->download_full_output(); //int a = 20980; //int b = 21080; //int max_width = 4; //for (int i=a; i<b; i++) //{ //printf("%*.0f ",max_width, ss[i]); //if ( (i-a+1)%18 == 0) //printf("\n"); //} //printf("\n"); //================ Output ================= float* output = bout->download_output(); validate_prediction(output, image_labels, output_size, batch); delete bconv1; delete bconv2; delete bconv3; delete bconv4; delete bconv5; delete bfc1; delete bfc2; delete bout; return 0; }
46a5d3e2d8eedc4e4464d1c3c83f87709c991b1c.cu
/** @file imagenet_vgg.cu * @brief AlexNet network for ImageNet. * @author Ang Li (PNNL) * */ #include <stdio.h> #include <assert.h> #include <sys/time.h> #include <iostream> #include <string> #include <cooperative_groups.h> #include <iostream> #include <fstream> #include <vector> #include "utility.h" #include "sbnn32_param.h" #include "sbnn64_param.h" #include "sbnn32.cuh" #include "sbnn64.cuh" #include "data.h" using namespace cooperative_groups; using namespace std; int main32(); int main64(); int main() { main32(); //main64(); } __global__ void alexnet32( In32Conv32LayerParam* bconv1, Conv32LayerParam* bconv2, Conv32LayerParam* bconv3, Conv32LayerParam* bconv4, Conv32LayerParam* bconv5, Fc32LayerParam* bfc1, Fc32LayerParam* bfc2, Out32LayerParam* bout) { grid_group grid = this_grid(); //========= Conv1 ============ In32ConvPool32Layer(bconv1); grid.sync(); //========= Conv2 ============ ConvPool32Layer(bconv2); grid.sync(); //========= Conv3 ============ Conv32Layer(bconv3); grid.sync(); //========= Conv4 ============ Conv32Layer(bconv4); grid.sync(); //========= Conv5 ============ ConvPool32Layer(bconv5); grid.sync(); //========= Fc1 ============ Fc32Layer(bfc1); //Fc32LayerBatched(bfc1); grid.sync(); //========= Fc2 ============ Fc32Layer(bfc2); //Fc32LayerBatched(bfc2); grid.sync(); ////========== Output =========== Out32Layer(bout); //Out32LayerBatched(bout); } __global__ void alexnet64( In32Conv64LayerParam* bconv1, Conv64LayerParam* bconv2, Conv64LayerParam* bconv3, Conv64LayerParam* bconv4, Conv64LayerParam* bconv5, Fc64LayerParam* bfc1, Fc64LayerParam* bfc2, Out64LayerParam* bout) { grid_group grid = this_grid(); SET_KERNEL_TIMER; //========= Conv1 ============ In32ConvPool64Layer(bconv1); grid.sync(); TICK_KERNEL_TIMER(bconv1); //========= Conv2 ============ ConvPool64Layer(bconv2); grid.sync(); TICK_KERNEL_TIMER(bconv2); //========= Conv3 ============ Conv64Layer(bconv3); grid.sync(); TICK_KERNEL_TIMER(bconv3); //========= Conv4 ============ Conv64Layer(bconv4); grid.sync(); TICK_KERNEL_TIMER(bconv4); //========= Conv5 ============ ConvPool64Layer(bconv5); grid.sync(); TICK_KERNEL_TIMER(bconv5); //========= Fc1 ============ //Fc64Layer(bfc1); Fc64LayerBatched(bfc1); grid.sync(); TICK_KERNEL_TIMER(bfc1); //========= Fc2 ============ //Fc64Layer(bfc2); Fc64LayerBatched(bfc2); grid.sync(); TICK_KERNEL_TIMER(bfc2); ////========== Output =========== //Out64Layer(bout); Out64LayerBatched(bout); TICK_KERNEL_TIMER(bout); } int main32() { int dev = 6; cudaSetDevice(dev); const unsigned batch = 32; const unsigned output_size = 1000; const unsigned image_height = 224; const unsigned image_width = 224; const unsigned image_channel = 3; const unsigned n_hidden = 4096; //=============== Get Input and Label ================= float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float)); unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned)); read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch); //================ Get Weight ================= FILE* config_file = fopen("./pytorch_training/alexnet_imagenet.csv","r"); //================ Set Network ================= //Bconv1 Layer In32Conv32LayerParam* bconv1 = new In32Conv32LayerParam("Conv1", image_height, image_width, 11, 11, 3, 64, batch, 4, 4, true, 2, 2, false); In32Conv32LayerParam* bconv1_gpu = bconv1->initialize(images, config_file); //Bconv2 Layer Conv32LayerParam* bconv2 = new Conv32LayerParam("Conv2", bconv1->output_height, bconv1->output_width, 5, 5, 64, 192, batch, 1, 1, true, 2, 2, false); Conv32LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu()); //Bconv3 Layer Conv32LayerParam* bconv3 = new Conv32LayerParam("Conv3", bconv2->output_height, bconv2->output_width, 3, 3, 192, 384, batch); Conv32LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu()); //Bconv4 Layer Conv32LayerParam* bconv4 = new Conv32LayerParam("Conv4", bconv3->output_height, bconv3->output_width, 3, 3, 384, 256, batch); Conv32LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu()); //Bconv5 Layer Conv32LayerParam* bconv5 = new Conv32LayerParam("Conv5", bconv4->output_height, bconv4->output_width, 3, 3, 256, 256, batch, 1, 1, true, 2, 2, true); Conv32LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu()); //Fc1 Layer Fc32LayerParam* bfc1 = new Fc32LayerParam("Fc1", batch, (bconv5->output_height) *(bconv5->output_width)*256, n_hidden); Fc32LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv5->get_output_gpu()); //Fc2 Layer Fc32LayerParam* bfc2 = new Fc32LayerParam("Fc2", batch, n_hidden, n_hidden); Fc32LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu()); //Out Layer Out32LayerParam* bout = new Out32LayerParam("Fout", batch, n_hidden, output_size, true); Out32LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu()); //================ Setup Kernel ================= int numThreads = 1024; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); int numBlocksPerSm; int shared_memory = 512*sizeof(int)*32; cudaFuncSetAttribute(alexnet32, cudaFuncAttributeMaxDynamicSharedMemorySize,98304); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, alexnet32, numThreads, shared_memory); //cudaFuncSetAttribute(alexnet32, cudaFuncAttributePreferredSharedMemoryCarveout,0); void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bfc1_gpu, &bfc2_gpu, &bout_gpu}; START_TIMER; cudaLaunchCooperativeKernel((void*)alexnet32, numBlocksPerSm*deviceProp.multiProcessorCount, numThreads, args, shared_memory); STOP_TIMER; //================ Output ================= float* output = bout->download_output(); validate_prediction(output, image_labels, output_size, batch); delete bconv1; delete bconv2; delete bconv3; delete bconv4; delete bconv5; delete bfc1; delete bfc2; delete bout; return 0; } int main64() { int dev = 6; cudaSetDevice(dev); const unsigned batch = 32; const unsigned output_size = 1000; const unsigned image_height = 224; const unsigned image_width = 224; const unsigned image_channel = 3; const unsigned n_hidden = 4096; //=============== Get Input and Label ================= float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float)); unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned)); read_ImageNet_normalized("./imagenet_files.txt", images, image_labels, batch); //================ Get Weight ================= FILE* config_file = fopen("./pytorch_training/alexnet_imagenet.csv","r"); //FILE* config_file = fopen("../pytorch/BinaryNet/alexnet_imagenet.csv","r"); //================ Set Network ================= //Bconv1 Layer In32Conv64LayerParam* bconv1 = new In32Conv64LayerParam("Conv1", image_height, image_width, 11, 11, 3, 64, batch, 4, 4, true, 2, 2, false); In32Conv64LayerParam* bconv1_gpu = bconv1->initialize(images, config_file); //Bconv2 Layer Conv64LayerParam* bconv2 = new Conv64LayerParam("Conv2", bconv1->output_height, bconv1->output_width, 5, 5, 64, 192, batch, 1, 1, true, 2, 2, false); Conv64LayerParam* bconv2_gpu = bconv2->initialize(config_file, bconv1->get_output_gpu()); //Bconv3 Layer Conv64LayerParam* bconv3 = new Conv64LayerParam("Conv3", bconv2->output_height, bconv2->output_width, 3, 3, 192, 384, batch); Conv64LayerParam* bconv3_gpu = bconv3->initialize(config_file, bconv2->get_output_gpu()); //Bconv4 Layer Conv64LayerParam* bconv4 = new Conv64LayerParam("Conv4", bconv3->output_height, bconv3->output_width, 3, 3, 384, 256, batch); Conv64LayerParam* bconv4_gpu = bconv4->initialize(config_file, bconv3->get_output_gpu()); //Bconv5 Layer Conv64LayerParam* bconv5 = new Conv64LayerParam("Conv5", bconv4->output_height, bconv4->output_width, 3, 3, 256, 256, batch, 1, 1, true, 2, 2, true); Conv64LayerParam* bconv5_gpu = bconv5->initialize(config_file, bconv4->get_output_gpu()); //Fc1 Layer Fc64LayerParam* bfc1 = new Fc64LayerParam("Fc1", batch, (bconv5->output_height) *(bconv5->output_width)*256, n_hidden); Fc64LayerParam* bfc1_gpu = bfc1->initialize(config_file, bconv5->get_output_gpu()); //Fc2 Layer Fc64LayerParam* bfc2 = new Fc64LayerParam("Fc2", batch, n_hidden, n_hidden); Fc64LayerParam* bfc2_gpu = bfc2->initialize(config_file, bfc1->get_output_gpu()); //Out Layer Out64LayerParam* bout = new Out64LayerParam("Fout", batch, n_hidden, output_size, true); Out64LayerParam* bout_gpu = bout->initialize(config_file, bfc2->get_output_gpu()); //================ Setup Kernel ================= int numThreads = 1024; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); int numBlocksPerSm; int shared_memory = 512*sizeof(int)*32; cudaFuncSetAttribute(alexnet64, cudaFuncAttributeMaxDynamicSharedMemorySize,98304); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocksPerSm, alexnet64, numThreads, shared_memory); //cudaFuncSetAttribute(alexnet64, cudaFuncAttributePreferredSharedMemoryCarveout,0); void* args[] = {&bconv1_gpu, &bconv2_gpu, &bconv3_gpu, &bconv4_gpu, &bconv5_gpu, &bfc1_gpu, &bfc2_gpu, &bout_gpu}; START_TIMER; cudaLaunchCooperativeKernel((void*)alexnet64, numBlocksPerSm*deviceProp.multiProcessorCount, numThreads, args, shared_memory); STOP_TIMER; //float* ss = bfc2->download_full_output(); //int a = 82420; //int b = a+100; //int max_width = 4; //for (int i=a; i<b; i++) //{ //printf("%*.0f ",max_width, ss[i]); //if ( (i-a+1)%18 == 0) //printf("\n"); //} //printf("\n"); // //float* ss = bfc1->download_full_output(); //int a = 20980; //int b = 21080; //int max_width = 4; //for (int i=a; i<b; i++) //{ //printf("%*.0f ",max_width, ss[i]); //if ( (i-a+1)%18 == 0) //printf("\n"); //} //printf("\n"); //================ Output ================= float* output = bout->download_output(); validate_prediction(output, image_labels, output_size, batch); delete bconv1; delete bconv2; delete bconv3; delete bconv4; delete bconv5; delete bfc1; delete bfc2; delete bout; return 0; }
f76900c9a03480600cf970e02f52ba42465baeba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This example introduces CUDA's abstraction of data parallel computational // "kernels", or __global__ functions. A __global__ function acts like the // main() function of a GPU program, and is allowed to manipulate device // memory directly. #include <stdlib.h> #include <stdio.h> // "kernels" or __global__ functions are the entry points to code that executes on the GPU // The keyword __global__ indicates to the compiler that this function is a GPU entry point. // __global__ functions must return void, and may only be called or "launched" from code that // executes on the CPU. __global__ void kernel(int *array) { // compute the index of this particular thread // in the grid: // multiply the index of this thread's block (blockIdx.x) // by the number of threads per block (blockDim.x) // and add the index of this thread inside its block (threadIdx.x) int index = blockIdx.x * blockDim.x + threadIdx.x; // write out 7 to a single element of the array using standard // array indexing notation: array[index] = 7; } int main(void) { // create arrays of 256 elements int num_elements = 256; // compute the size of the arrays in bytes int num_bytes = num_elements * sizeof(int); // pointers to host & device arrays int *device_array = 0; int *host_array = 0; // malloc a host array host_array = (int*)malloc(num_bytes); // hipMalloc a device array hipMalloc((void**)&device_array, num_bytes); // if either memory allocation failed, report an error message if(host_array == 0 || device_array == 0) { printf("couldn't allocate memory\n"); return 1; } // launch the global function by choosing the number of CUDA threads // to instantiate: // choose a number of threads per block // 128 threads (4 warps) tends to be a good number int block_size = 128; // divide the number of elements to process by the block size // to determine the number of blocks to launch int grid_size = num_elements / block_size; // To invoke the global function, use the triple chevron notation. // The first argument is the number of blocks (grid_size). // The second argument is the number of threads per block (block_size). // This is called "configuring" the launch. // After the triple chevrons, pass function arguments as normal. hipLaunchKernelGGL(( kernel), dim3(grid_size),dim3(block_size), 0, 0, device_array); // download and inspect the result on the host: hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost); // print out the result element by element for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n"); // deallocate memory free(host_array); hipFree(device_array); }
f76900c9a03480600cf970e02f52ba42465baeba.cu
// This example introduces CUDA's abstraction of data parallel computational // "kernels", or __global__ functions. A __global__ function acts like the // main() function of a GPU program, and is allowed to manipulate device // memory directly. #include <stdlib.h> #include <stdio.h> // "kernels" or __global__ functions are the entry points to code that executes on the GPU // The keyword __global__ indicates to the compiler that this function is a GPU entry point. // __global__ functions must return void, and may only be called or "launched" from code that // executes on the CPU. __global__ void kernel(int *array) { // compute the index of this particular thread // in the grid: // multiply the index of this thread's block (blockIdx.x) // by the number of threads per block (blockDim.x) // and add the index of this thread inside its block (threadIdx.x) int index = blockIdx.x * blockDim.x + threadIdx.x; // write out 7 to a single element of the array using standard // array indexing notation: array[index] = 7; } int main(void) { // create arrays of 256 elements int num_elements = 256; // compute the size of the arrays in bytes int num_bytes = num_elements * sizeof(int); // pointers to host & device arrays int *device_array = 0; int *host_array = 0; // malloc a host array host_array = (int*)malloc(num_bytes); // cudaMalloc a device array cudaMalloc((void**)&device_array, num_bytes); // if either memory allocation failed, report an error message if(host_array == 0 || device_array == 0) { printf("couldn't allocate memory\n"); return 1; } // launch the global function by choosing the number of CUDA threads // to instantiate: // choose a number of threads per block // 128 threads (4 warps) tends to be a good number int block_size = 128; // divide the number of elements to process by the block size // to determine the number of blocks to launch int grid_size = num_elements / block_size; // To invoke the global function, use the triple chevron notation. // The first argument is the number of blocks (grid_size). // The second argument is the number of threads per block (block_size). // This is called "configuring" the launch. // After the triple chevrons, pass function arguments as normal. kernel<<<grid_size,block_size>>>(device_array); // download and inspect the result on the host: cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); // print out the result element by element for(int i=0; i < num_elements; ++i) { printf("%d ", host_array[i]); } printf("\n"); // deallocate memory free(host_array); cudaFree(device_array); }
cb24a5ad910b2f040a3411523d761914b0efe706.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void sign_double(int n,int idx,double *dy,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) { double x = dy[i]; result[i] = (x > 0) - (x < 0); } } }
cb24a5ad910b2f040a3411523d761914b0efe706.cu
extern "C" __global__ void sign_double(int n,int idx,double *dy,int incy,double *result) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { if(i >= idx && i % incy == 0) { double x = dy[i]; result[i] = (x > 0) - (x < 0); } } }
aa8a9220973a933692733c3476c3c364c1d7b153.hip
// !!! This is a file automatically generated by hipify!!! /* Density_hist.cu * This algorithm will populate a 3-dimensional histogram with density information * [Histogram ] = density_hist(x,y,z,bin_size,radius) */ #include <mex.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <math.h> #define SHARE_PIX 4096 #define CUDA_CHECK_ERROR() __cuda_check_errors(__FILE__, __LINE__) #define CUDA_SAFE_CALL(err) __cuda_safe_call(err, __FILE__, __LINE__) inline void __cuda_check_errors (const char *filename, const int64_t line_number) { hipError_t err = hipDeviceSynchronize (); if (err != hipSuccess) { printf ("CUDA error %i at %s:%i: %s\n", err, filename, line_number, hipGetErrorString (err)); hipDeviceReset(); mexErrMsgTxt("Get Thee To A Nunnery!\n"); } } inline void __cuda_safe_call (hipError_t err, const char *filename, const int64_t line_number) { if (err != hipSuccess) { printf ("CUDA error %i at %s:%i: %s\n", err, filename, line_number, hipGetErrorString (err)); hipDeviceReset(); mexErrMsgTxt("Get Thee To A Nunnery!\n"); } } __global__ void populate(float *d_x, float *d_y, float *d_z, float *d_i1, int64_t *imsize, int64_t b, float radius){ // declare variables // gpu position variables int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bx = blockIdx.x*blockDim.x + tx; int by = blockIdx.y*blockDim.y + ty; int bz = blockIdx.z*blockDim.z + tz; __shared__ float xf[SHARE_PIX], yf[SHARE_PIX], zf[SHARE_PIX]; // shared logic, we get 4.1k spots of 'shared' memory // we will use those spots to cycle over all localizations // we can speed this up by using the parallel threads to grab data int crunch = blockDim.x*blockDim.y*blockDim.z; //if(crunch > SHARE_PIX){crunch = SHARE_PIX;} int dthr = tx + blockDim.x*ty + blockDim.x*blockDim.y*tz; // local thread for moving data into local int chunks = ((int)b-1)/crunch +1; // number of data chunks to // here ind is column major so in terms of i,j,k and m,n,o being index and size respectively // ind = i + j*m + k*m*n int m = imsize[0]; int n = imsize[1]; int o = imsize[2]; int ind = bx + m*by + m*n*bz; //int i = ind % m; //int j = (ind / m) % n; //int k = ind/(m*n); float cnt, dist; int ld, lk; int toplp = crunch; if(bx < m && by < n && bz < o){ cnt = 0; //for(lk = 0; lk < chunks; lk++){ //ld = dthr + crunch*lk; // load variable (linearized thread ID + loopvar * max_share_size) /* __syncthreads(); // ensure all data is loaded if(ld < (int)b){ // if load variable has something to load, load the localization data xf[ld % SHARE_PIX] = d_x[ld] - (float)by; yf[ld % SHARE_PIX] = d_y[ld] - (float)bx; zf[ld % SHARE_PIX] = d_z[ld] - (float)bz; } __syncthreads(); // ensure all data is loaded*/ //if(crunch*(lk+1) > (int)b-crunch*(lk)){toplp = (int)b - crunch*lk;} // if we do not utitlize full data space, ensure we only count 'new' data //if(toplp >0){ for(int ii = 0; ii < b; ii++){ // Histogram Algorithm //dist = (d_x[ii] - (float)by)*(d_x[ii] - (float)by) + (d_y[ii] - (float)bx)*(d_y[ii] - (float)bx) + (d_z[ii] - (float)bz)*(d_z[ii] - (float)bz); // This commented secion is 'slow' but gives correct math /*dist += (d_x[ii] - (float)by)*(d_x[ii] - (float)by); dist += (d_y[ii] - (float)bx)*(d_y[ii] - (float)bx); dist += (d_z[ii] - (float)bz)*(d_z[ii] - (float)bz);*/ //if( powf(dist,0.5) <= radius){ // check if localization is within 'radius' if( powf((d_x[ii] - (float)by)*(d_x[ii] - (float)by) + (d_y[ii] - (float)bx)*(d_y[ii] - (float)bx) + (d_z[ii] - (float)bz)*(d_z[ii] - (float)bz),0.5) <= radius){ //if( powf(xf[ii]*xf[ii] + yf[ii]*yf[ii] + zf[ii]*zf[ii],0.5) <= radius){ // check if localization is within 'radius' cnt += 1; // Record found molecules } } // } // } d_i1[ind] = cnt; // global write to output variable } } void mexFunction(int64_t nlhs, mxArray *plhs[], int64_t nrhs, mxArray const *prhs[]) { // Declare Variables for use hipEvent_t start, stop; float time; float *xf = (float *)mxGetPr(prhs[0]); float *yf = (float *)mxGetPr(prhs[1]); float *zf = (float *)mxGetPr(prhs[2]); float radius = (float)mxGetScalar(prhs[4]); float *d_i1, *d_x, *d_y, *d_z; int64_t *imsize, *d_size; int64_t *bigly = (int64_t *)mxGetDimensions(prhs[0]); // imsize is an int64_t array containing the size of the histogram to be constructed imsize = (int64_t *)mxGetPr(prhs[3]); if(imsize[2] < 0 || imsize[2] > 10000){imsize[2] = 1;} // fix glitch that occurs for 2D grids printf("m = %d, n = %d, o= %d\n",imsize[0],imsize[1],imsize[2]); int64_t pixels = imsize[0]*imsize[1]*imsize[2]; printf("pixels = %d\n",pixels); // Allocate Space CUDA_SAFE_CALL(hipMalloc((void**)&d_x,sizeof(float)*bigly[0])); CUDA_SAFE_CALL(hipMalloc((void**)&d_y,sizeof(float)*bigly[0])); CUDA_SAFE_CALL(hipMalloc((void**)&d_z,sizeof(float)*bigly[0])); CUDA_SAFE_CALL(hipMalloc((void**)&d_i1,sizeof(float)*pixels)); CUDA_SAFE_CALL(hipMalloc((void**)&d_size,sizeof(int64_t)*3)); // Copy Data onto GPU CUDA_SAFE_CALL(hipMemcpy(d_x,xf,sizeof(float)*bigly[0],hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_y,yf,sizeof(float)*bigly[0],hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_z,zf,sizeof(float)*bigly[0],hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_size,imsize,sizeof(int64_t)*3,hipMemcpyHostToDevice)); // GPU Setup and Launch int tpx = 16; int tpy = 8; // This gives largest multiple of 32 (warp size) for maximum occupancy int tpz = 8; dim3 dimGrid(((int)imsize[0]-1)/tpx + 1,((int)imsize[1]-1)/tpy + 1,((int)imsize[2]-1)/tpz + 1) ; dim3 dimBlock(tpx, tpy, tpz); printf("Dimensions of the grid %d x %d x %d\n", ((int)imsize[0]-1)/tpx + 1,((int)imsize[1]-1)/tpy + 1,((int)imsize[2]-1)/tpz + 1); // Launch GPU with timing hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); hipLaunchKernelGGL(( populate) , dim3(dimGrid),dim3(dimBlock), 0, 0, d_x, d_y, d_z, d_i1, d_size, bigly[0], radius); CUDA_CHECK_ERROR(); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&time,start ,stop); hipEventDestroy(start); hipEventDestroy(stop); printf("Kernel Execution in %f s\n",time/1000); // Gather Data Back size_t *dims; dims[0] = (size_t)imsize[0]; dims[1] = imsize[1]; dims[2] = imsize[2]; plhs[0] = mxCreateNumericArray(3, dims, mxSINGLE_CLASS, mxREAL); float *pout = (float *)mxGetPr(plhs[0]); CUDA_SAFE_CALL(hipMemcpy(pout,d_i1,sizeof(float)*imsize[0]*imsize[1]*imsize[2],hipMemcpyDeviceToHost)); // Clean up hipFree(d_size); hipFree(d_x); hipFree(d_y); hipFree(d_i1); hipFree(d_z); }
aa8a9220973a933692733c3476c3c364c1d7b153.cu
/* Density_hist.cu * This algorithm will populate a 3-dimensional histogram with density information * [Histogram ] = density_hist(x,y,z,bin_size,radius) */ #include <mex.h> #include <stdint.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <math.h> #define SHARE_PIX 4096 #define CUDA_CHECK_ERROR() __cuda_check_errors(__FILE__, __LINE__) #define CUDA_SAFE_CALL(err) __cuda_safe_call(err, __FILE__, __LINE__) inline void __cuda_check_errors (const char *filename, const int64_t line_number) { cudaError err = cudaDeviceSynchronize (); if (err != cudaSuccess) { printf ("CUDA error %i at %s:%i: %s\n", err, filename, line_number, cudaGetErrorString (err)); cudaDeviceReset(); mexErrMsgTxt("Get Thee To A Nunnery!\n"); } } inline void __cuda_safe_call (cudaError err, const char *filename, const int64_t line_number) { if (err != cudaSuccess) { printf ("CUDA error %i at %s:%i: %s\n", err, filename, line_number, cudaGetErrorString (err)); cudaDeviceReset(); mexErrMsgTxt("Get Thee To A Nunnery!\n"); } } __global__ void populate(float *d_x, float *d_y, float *d_z, float *d_i1, int64_t *imsize, int64_t b, float radius){ // declare variables // gpu position variables int tx = threadIdx.x; int ty = threadIdx.y; int tz = threadIdx.z; int bx = blockIdx.x*blockDim.x + tx; int by = blockIdx.y*blockDim.y + ty; int bz = blockIdx.z*blockDim.z + tz; __shared__ float xf[SHARE_PIX], yf[SHARE_PIX], zf[SHARE_PIX]; // shared logic, we get 4.1k spots of 'shared' memory // we will use those spots to cycle over all localizations // we can speed this up by using the parallel threads to grab data int crunch = blockDim.x*blockDim.y*blockDim.z; //if(crunch > SHARE_PIX){crunch = SHARE_PIX;} int dthr = tx + blockDim.x*ty + blockDim.x*blockDim.y*tz; // local thread for moving data into local int chunks = ((int)b-1)/crunch +1; // number of data chunks to // here ind is column major so in terms of i,j,k and m,n,o being index and size respectively // ind = i + j*m + k*m*n int m = imsize[0]; int n = imsize[1]; int o = imsize[2]; int ind = bx + m*by + m*n*bz; //int i = ind % m; //int j = (ind / m) % n; //int k = ind/(m*n); float cnt, dist; int ld, lk; int toplp = crunch; if(bx < m && by < n && bz < o){ cnt = 0; //for(lk = 0; lk < chunks; lk++){ //ld = dthr + crunch*lk; // load variable (linearized thread ID + loopvar * max_share_size) /* __syncthreads(); // ensure all data is loaded if(ld < (int)b){ // if load variable has something to load, load the localization data xf[ld % SHARE_PIX] = d_x[ld] - (float)by; yf[ld % SHARE_PIX] = d_y[ld] - (float)bx; zf[ld % SHARE_PIX] = d_z[ld] - (float)bz; } __syncthreads(); // ensure all data is loaded*/ //if(crunch*(lk+1) > (int)b-crunch*(lk)){toplp = (int)b - crunch*lk;} // if we do not utitlize full data space, ensure we only count 'new' data //if(toplp >0){ for(int ii = 0; ii < b; ii++){ // Histogram Algorithm //dist = (d_x[ii] - (float)by)*(d_x[ii] - (float)by) + (d_y[ii] - (float)bx)*(d_y[ii] - (float)bx) + (d_z[ii] - (float)bz)*(d_z[ii] - (float)bz); // This commented secion is 'slow' but gives correct math /*dist += (d_x[ii] - (float)by)*(d_x[ii] - (float)by); dist += (d_y[ii] - (float)bx)*(d_y[ii] - (float)bx); dist += (d_z[ii] - (float)bz)*(d_z[ii] - (float)bz);*/ //if( powf(dist,0.5) <= radius){ // check if localization is within 'radius' if( powf((d_x[ii] - (float)by)*(d_x[ii] - (float)by) + (d_y[ii] - (float)bx)*(d_y[ii] - (float)bx) + (d_z[ii] - (float)bz)*(d_z[ii] - (float)bz),0.5) <= radius){ //if( powf(xf[ii]*xf[ii] + yf[ii]*yf[ii] + zf[ii]*zf[ii],0.5) <= radius){ // check if localization is within 'radius' cnt += 1; // Record found molecules } } // } // } d_i1[ind] = cnt; // global write to output variable } } void mexFunction(int64_t nlhs, mxArray *plhs[], int64_t nrhs, mxArray const *prhs[]) { // Declare Variables for use cudaEvent_t start, stop; float time; float *xf = (float *)mxGetPr(prhs[0]); float *yf = (float *)mxGetPr(prhs[1]); float *zf = (float *)mxGetPr(prhs[2]); float radius = (float)mxGetScalar(prhs[4]); float *d_i1, *d_x, *d_y, *d_z; int64_t *imsize, *d_size; int64_t *bigly = (int64_t *)mxGetDimensions(prhs[0]); // imsize is an int64_t array containing the size of the histogram to be constructed imsize = (int64_t *)mxGetPr(prhs[3]); if(imsize[2] < 0 || imsize[2] > 10000){imsize[2] = 1;} // fix glitch that occurs for 2D grids printf("m = %d, n = %d, o= %d\n",imsize[0],imsize[1],imsize[2]); int64_t pixels = imsize[0]*imsize[1]*imsize[2]; printf("pixels = %d\n",pixels); // Allocate Space CUDA_SAFE_CALL(cudaMalloc((void**)&d_x,sizeof(float)*bigly[0])); CUDA_SAFE_CALL(cudaMalloc((void**)&d_y,sizeof(float)*bigly[0])); CUDA_SAFE_CALL(cudaMalloc((void**)&d_z,sizeof(float)*bigly[0])); CUDA_SAFE_CALL(cudaMalloc((void**)&d_i1,sizeof(float)*pixels)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_size,sizeof(int64_t)*3)); // Copy Data onto GPU CUDA_SAFE_CALL(cudaMemcpy(d_x,xf,sizeof(float)*bigly[0],cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_y,yf,sizeof(float)*bigly[0],cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_z,zf,sizeof(float)*bigly[0],cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_size,imsize,sizeof(int64_t)*3,cudaMemcpyHostToDevice)); // GPU Setup and Launch int tpx = 16; int tpy = 8; // This gives largest multiple of 32 (warp size) for maximum occupancy int tpz = 8; dim3 dimGrid(((int)imsize[0]-1)/tpx + 1,((int)imsize[1]-1)/tpy + 1,((int)imsize[2]-1)/tpz + 1) ; dim3 dimBlock(tpx, tpy, tpz); printf("Dimensions of the grid %d x %d x %d\n", ((int)imsize[0]-1)/tpx + 1,((int)imsize[1]-1)/tpy + 1,((int)imsize[2]-1)/tpz + 1); // Launch GPU with timing cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); populate <<<dimGrid,dimBlock>>> (d_x, d_y, d_z, d_i1, d_size, bigly[0], radius); CUDA_CHECK_ERROR(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time,start ,stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Kernel Execution in %f s\n",time/1000); // Gather Data Back size_t *dims; dims[0] = (size_t)imsize[0]; dims[1] = imsize[1]; dims[2] = imsize[2]; plhs[0] = mxCreateNumericArray(3, dims, mxSINGLE_CLASS, mxREAL); float *pout = (float *)mxGetPr(plhs[0]); CUDA_SAFE_CALL(cudaMemcpy(pout,d_i1,sizeof(float)*imsize[0]*imsize[1]*imsize[2],cudaMemcpyDeviceToHost)); // Clean up cudaFree(d_size); cudaFree(d_x); cudaFree(d_y); cudaFree(d_i1); cudaFree(d_z); }
9596ecd14a18b1f632e8e525086d875d33d03b75.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : gpu_euclidean_clustering.cu Author : AnhNV91 Version : 1.0 Description : Clustering analysis using Euclidean distance and single linkage ============================================================================ */ #include "gpu_euclidean_clustering.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <time.h> #include <sys/time.h> #define MAX_SHARED_SIZE 2048 #define BLOCK_SIZE_X 1024 //#define SERIAL 1 static FILE* execution_time_fp; static FILE* response_time_fp; static FILE* remain_time_fp; inline void gassert(hipError_t err_code, const char *file, int line) { if (err_code != hipSuccess) { fprintf(stderr, "Error: %s %s %d\n", hipGetErrorString(err_code), file, line); hipDeviceReset(); exit(EXIT_FAILURE); } } #define checkCudaErrors(val) gassert(val, __FILE__, __LINE__) GpuEuclideanCluster::GpuEuclideanCluster() { x_ = NULL; y_ = NULL; z_ = NULL; size_ = 0; threshold_ = 0; cluster_indices_ = NULL; cluster_indices_host_ = NULL; min_cluster_pts_ = 0; max_cluster_pts_ = 1000000000; cluster_num_ = 0; if(GPU_PROFILING == 1){ hipEventCreate(&e_event_start); hipEventCreate(&e_event_stop); hipEventCreate(&r_event_start); hipEventCreate(&r_event_stop); } } void GpuEuclideanCluster::setInputPoints(float *x, float *y, float *z, int size) { size_ = size; checkCudaErrors(hipMalloc(&x_, size_ * sizeof(float))); checkCudaErrors(hipMalloc(&y_, size_ * sizeof(float))); checkCudaErrors(hipMalloc(&z_, size_ * sizeof(float))); checkCudaErrors(hipMemcpy(x_, x, size_ * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(y_, y, size_ * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(z_, z, size_ * sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc(&cluster_indices_, size_ * sizeof(int))); cluster_indices_host_ = (int *) malloc(size_ * sizeof(int)); } void GpuEuclideanCluster::setThreshold(double threshold) { threshold_ = threshold; } void GpuEuclideanCluster::setMinClusterPts(int min_cluster_pts) { min_cluster_pts_ = min_cluster_pts; } void GpuEuclideanCluster::setMaxClusterPts(int max_cluster_pts) { max_cluster_pts_ = max_cluster_pts; } /* Initially, each point is assigned to an individual cluster. * */ extern "C" __global__ void pclEuclideanInitialize(int *cluster_indices, int size) { for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < size; index += blockDim.x * gridDim.x){ cluster_indices[index] = index; } } /* Connected component labeling points at GPU block thread level. * Input list of points is divided into multiple smaller groups. * Each group of point is assigned to a block of GPU thread. * Each thread in a block handles one point in the group. It iterates over * points in the group and compare the distance between the current point A * and the point B it has to handle. * * If the distance between A and B is less than the threshold, then those * two points belong to a same connected component and the cluster_changed * is marked by 1. * * A synchronization is called to make sure all thread in the block finish A * before moving to the update phase. * After finishing checking cluster_changed, threads update the cluster * index of all points. If a thread has cluster_changed is 1, then the corresponding * cluster of the point it is handling is changed to the cluster of B. Otherwise * the original cluster of A remains unchanged. * * Another synchronization is called before all threads in the block move to * other points after done checking A. * * After this kernel finishes, all points in each block are labeled. */ extern "C" __global__ void blockLabelling(float *x, float *y, float *z, int *cluster_indices, int size, float threshold) { int block_start = blockIdx.x * blockDim.x; int block_end = (block_start + blockDim.x <= size) ? (block_start + blockDim.x) : size; int row = threadIdx.x + block_start; __shared__ int local_offset[BLOCK_SIZE_X]; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; __shared__ int local_cluster_changed[BLOCK_SIZE_X]; if (row < block_end) { local_offset[threadIdx.x] = threadIdx.x; local_x[threadIdx.x] = x[row]; local_y[threadIdx.x] = y[row]; local_z[threadIdx.x] = z[row]; __syncthreads(); for (int column = block_start; column < block_end; column++) { float tmp_x = local_x[threadIdx.x] - local_x[column - block_start]; float tmp_y = local_y[threadIdx.x] - local_y[column - block_start]; float tmp_z = local_z[threadIdx.x] - local_z[column - block_start]; int column_offset = local_offset[column - block_start]; int row_offset = local_offset[threadIdx.x]; local_cluster_changed[threadIdx.x] = 0; __syncthreads(); if (row > column && column_offset != row_offset && norm3df(tmp_x, tmp_y, tmp_z) < threshold) local_cluster_changed[row_offset] = 1; __syncthreads(); local_offset[threadIdx.x] = (local_cluster_changed[row_offset] == 1) ? column_offset : row_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_indices[block_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_indices[row] = new_cluster; } } extern "C" __global__ void blockLabelling_with_slicing(float *x, float *y, float *z, int *cluster_indices, int size, float threshold, int slice_id, int slice_cnt) { int total_block_start = blockIdx.x * blockDim.x; int total_block_end = (total_block_start + blockDim.x <= size) ? (total_block_start + blockDim.x) : size; int quotient = (total_block_start-total_block_end) / slice_cnt; int remainder = (total_block_start-total_block_end) % slice_cnt; int block_start = quotient*(slice_id+1); int block_end = block_start + quotient; if( (slice_id+1) == slice_cnt) block_end += remainder; int row = threadIdx.x + block_start; __shared__ int local_offset[BLOCK_SIZE_X]; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; __shared__ int local_cluster_changed[BLOCK_SIZE_X]; if (row < block_end) { local_offset[threadIdx.x] = threadIdx.x; local_x[threadIdx.x] = x[row]; local_y[threadIdx.x] = y[row]; local_z[threadIdx.x] = z[row]; __syncthreads(); for (int column = block_start; column < block_end; column++) { float tmp_x = local_x[threadIdx.x] - local_x[column - block_start]; float tmp_y = local_y[threadIdx.x] - local_y[column - block_start]; float tmp_z = local_z[threadIdx.x] - local_z[column - block_start]; int column_offset = local_offset[column - block_start]; int row_offset = local_offset[threadIdx.x]; local_cluster_changed[threadIdx.x] = 0; __syncthreads(); if (row > column && column_offset != row_offset && norm3df(tmp_x, tmp_y, tmp_z) < threshold) local_cluster_changed[row_offset] = 1; __syncthreads(); local_offset[threadIdx.x] = (local_cluster_changed[row_offset] == 1) ? column_offset : row_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_indices[block_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_indices[row] = new_cluster; } } /* These kernels are used to collect remained clusters after each labeling phase. * * Basically, in each labeling phases, several clusters are merged together. * * The first kernel scans over the cluster_indices array and marks the cluster_mark * element corresponding with the cluster of the current point by 1. If a cluster * does not exists in the current phase (which means it was merged to some other * clusters), then its cluster_mark is 0. * * The second kernel scans over the original cluster_indices again and copy those * indices to new location on the target_clusters. */ extern "C" __global__ void clusterMark(int *cluster_list, int *cluster_mark, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) cluster_mark[cluster_list[i]] = 1; } extern "C" __global__ void clusterCollector(int *old_cluster_list, int *new_cluster_list, int *cluster_location, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x){ new_cluster_list[cluster_location[old_cluster_list[i]]] = old_cluster_list[i]; } } /* Create a cluster matrix. * * A cluster matrix is to record the relationship between each pair * of clusters. If a pair of cluster x and y are connected, then * the matrix element [x][y] are 1. Otherwise it is 0. Notice that * only the lower half of the matrix is used. * * To build this matrix, each GPU thread handles one point A, iterates * over all points B, and compare distance between A and B. Assuming * that A belongs to a cluster x, and B belongs to cluster y. If their * distance is less than the threshold, then the matrix element [x][y] * is set to 1. */ extern "C" __global__ void buildClusterMatrix(float *x, float *y, float *z, int *cluster_indices, int *cluster_matrix, int *cluster_offset, int size, int cluster_num, float threshold) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; if (index>size) return; for (int column = index; column < size; column += stride) { local_x[threadIdx.x] = x[column]; local_y[threadIdx.x] = y[column]; local_z[threadIdx.x] = z[column]; int column_cluster = cluster_indices[column]; int cc_offset = cluster_offset[column_cluster]; __syncthreads(); for (int row = 0; row < column; row++) { float tmp_x = x[row] - local_x[threadIdx.x]; float tmp_y = y[row] - local_y[threadIdx.x]; float tmp_z = z[row] - local_z[threadIdx.x]; int row_cluster = cluster_indices[row]; int rc_offset = cluster_offset[row_cluster]; if (row_cluster != column_cluster && norm3df(tmp_x, tmp_y, tmp_z) < threshold) cluster_matrix[rc_offset * cluster_num + cc_offset] = 1; } __syncthreads(); } } extern "C" __global__ void buildClusterMatrix_with_slicing(float *x, float *y, float *z, int *cluster_indices, int *cluster_matrix, int *cluster_offset, int size, int cluster_num, float threshold, int slice_id, int slice_cnt) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; if (index>size) return; int quotient = size/slice_cnt; int remainder = size%slice_cnt; int column_start = index + slice_id * quotient; int column_end = column_start + remainder; if((slice_id + 1) == slice_cnt) column_end + remainder; if(column_end > size) column_end = size; // for (int column = index; column < size; column += stride) for (int column = column_start; column < column_end; column += stride) { local_x[threadIdx.x] = x[column]; local_y[threadIdx.x] = y[column]; local_z[threadIdx.x] = z[column]; int column_cluster = cluster_indices[column]; int cc_offset = cluster_offset[column_cluster]; __syncthreads(); // int row_start = slice_id * column/slice_cnt; // int row_end = row_start + column/slice_cnt; // if((slice_id + 1) == slice_cnt) row_end + column%slice_cnt; // if(row_end > column) row_end = column; // for (int row = row_start; row < row_end; row++) for (int row = 0; row < column; row++) { float tmp_x = x[row] - local_x[threadIdx.x]; float tmp_y = y[row] - local_y[threadIdx.x]; float tmp_z = z[row] - local_z[threadIdx.x]; int row_cluster = cluster_indices[row]; int rc_offset = cluster_offset[row_cluster]; if (row_cluster != column_cluster && norm3df(tmp_x, tmp_y, tmp_z) < threshold) cluster_matrix[rc_offset * cluster_num + cc_offset] = 1; } __syncthreads(); } } /* Merge clusters based on the cluster_matrix. * * This merge process is done per block. The input list of clusters * are divided into smaller chunks to be handled by GPU blocks. * * Each thread in a block handles one row of the matrix and iterates * over all columns of the matrix. A synchronization per each iteration * is needed to make sure all threads done merging clusters in the * current column before moving to the next column. * * In each iteration, each thread check if the cluster corresponding * with the current row is connected to the cluster corresponding to the * current column. If so, then the cluster of the row is changed (merged) * to the cluster of the column. */ extern "C" __global__ void mergeClusters(int *cluster_matrix, int *cluster_list, int cluster_num) { int row_start = blockIdx.x * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = row_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; /* The cluster matrix is symmetric, so the * number of rows and columns are the same */ if (col < row_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row < col && row_offset != col_offset && (cluster_matrix[row * cluster_num + col] == 1)) local_changed[col_offset] = 1; __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[row_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; } } /* Reflex the change in the cluster merging step * to cluster indices of all input points. * * Clusters of input points are changed to the target clusters * corresponding with their source clusters. */ extern "C" __global__ void reflexClusterChanges(int *cluster_indices, int *cluster_offset, int *cluster_list, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) cluster_indices[i] = cluster_list[cluster_offset[cluster_indices[i]]]; } /* Rebuild cluster matrix after merging clusters. * * After several cluster are merged together, the number of clusters * reduces and the cluster matrix needs to be rebuilt. * * Each thread iterate over rows of one column of the source matrix. * If a element [x][y] of the source matrix is 1, then the element * [m][n] of the target matrix, in which m and n are the * new clusters of x and y, is set to 1. */ extern "C" __global__ void rebuildClusterMatrix(int *old_cluster_matrix, int *new_clusters, int *new_cluster_matrix, int *new_cluster_offset, int old_size, int new_size) { for (int column = threadIdx.x + blockIdx.x * blockDim.x; column < old_size; column += blockDim.x * gridDim.x) { for (int row = 0; row < column; row++) { int new_row = new_cluster_offset[new_clusters[row]]; int new_column = new_cluster_offset[new_clusters[column]]; if (old_cluster_matrix[row * old_size + column] == 1) new_cluster_matrix[new_row * new_size + new_column] = 1; } } } /* Perform exclusive scan on the input array using * thurst's scan. * * The variable 'sum' records the last element of * the array after being scanned. */ void GpuEuclideanCluster::exclusiveScan(int *input, int ele_num, int *sum) { thrust::device_ptr<int> dev_ptr(input); thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr); checkCudaErrors(hipDeviceSynchronize()); *sum = *(dev_ptr + ele_num - 1); } /* Reset the cluster indexes in the point cloud from 0. * * After merging, the cluster indexes of points are still large. Collecting * those large indexes is either time-consuming (without using hash) or * wasting memory space (using hash). By reset the cluster indexes from 0, * we can use hashing to collect those indexes with the space complexity equal * to the number of clusters. */ extern "C" __global__ void resetClusterIndexes(int *cluster_indices, int *cluster_offset, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) { int old_cluster = cluster_indices[i]; cluster_indices[i] = cluster_offset[old_cluster]; } } /* Calculate the cluster indices of input points. * * Initially, the cluster index of the point at index ith * is set to i. This method merges cluster indices * of points that belong to same clusters. * * Result of this method is stored at cluster_indices_host_. */ void GpuEuclideanCluster::extractClustersOld() { int block_x, grid_x; block_x = (size_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : size_; grid_x = (size_ - 1) / block_x + 1; int *cluster_offset; int cluster_num, old_cluster_num; // pclEuclideanInitialize << < grid_x, block_x >> > (cluster_indices_, size_); checkCudaErrors(hipDeviceSynchronize()); old_cluster_num = cluster_num = size_; checkCudaErrors(hipMalloc(&cluster_offset, (size_ + 1) * sizeof(int))); checkCudaErrors(hipMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); blockLabelling << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, size_, threshold_); clusterMark << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); exclusiveScan(cluster_offset, size_ + 1, &cluster_num); int *cluster_list, *new_cluster_list, *tmp; checkCudaErrors(hipMalloc(&cluster_list, cluster_num * sizeof(int))); clusterCollector << < grid_x, block_x >> > (cluster_indices_, cluster_list, cluster_offset, size_); checkCudaErrors(hipDeviceSynchronize()); int *cluster_matrix; int *new_cluster_matrix; checkCudaErrors(hipMalloc(&cluster_matrix, cluster_num * cluster_num * sizeof(int))); checkCudaErrors(hipMemset(cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMalloc(&new_cluster_list, cluster_num * sizeof(int))); buildClusterMatrix << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, cluster_matrix, cluster_offset, size_, cluster_num, threshold_); checkCudaErrors(hipDeviceSynchronize()); int block_x2 = 0, grid_x2 = 0; /* Loop until there is no change in the number of clusters */ do { old_cluster_num = cluster_num; block_x2 = (cluster_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : cluster_num; grid_x2 = (cluster_num - 1) / block_x2 + 1; mergeClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num); reflexClusterChanges << < grid_x, block_x >> > (cluster_indices_, cluster_offset, cluster_list, size_); checkCudaErrors(hipMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); clusterMark << < grid_x2, block_x2 >> > (cluster_list, cluster_offset, cluster_num); exclusiveScan(cluster_offset, size_ + 1, &cluster_num); if (grid_x2 == 1 && cluster_num == old_cluster_num) break; clusterCollector << < grid_x2, block_x2 >> > (cluster_list, new_cluster_list, cluster_offset, old_cluster_num); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMalloc(&new_cluster_matrix, cluster_num * cluster_num * sizeof(int))); checkCudaErrors(hipMemset(new_cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); rebuildClusterMatrix << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, new_cluster_matrix, cluster_offset, old_cluster_num, cluster_num); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipFree(cluster_matrix)); cluster_matrix = new_cluster_matrix; tmp = cluster_list; cluster_list = new_cluster_list; new_cluster_list = tmp; } while (1); cluster_num_ = cluster_num; resetClusterIndexes << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMemcpy(cluster_indices_host_, cluster_indices_, size_ * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(cluster_matrix)); checkCudaErrors(hipFree(cluster_list)); checkCudaErrors(hipFree(new_cluster_list)); checkCudaErrors(hipFree(cluster_offset)); } extern "C" __global__ void mergeSelfClusters(int *cluster_matrix, int *cluster_list, int cluster_num, bool *changed) { int row_start = blockIdx.x * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = row_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; bool block_changed = false; if (col < row_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row < col && row_offset != col_offset && (cluster_matrix[row * cluster_num + col] == 1)) { local_changed[col_offset] = 1; block_changed = true; } __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[row_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; __syncthreads(); if (block_changed) *changed = true; } } extern "C" __global__ void mergeSelfClustersWithSlicing(int *cluster_matrix, int *cluster_list, int cluster_num, bool *changed, int row_start, int row_end) { // int row_start = blockIdx.x * blockDim.x; // int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = row_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; bool block_changed = false; if (col < row_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row < col && row_offset != col_offset && (cluster_matrix[row * cluster_num + col] == 1)) { local_changed[col_offset] = 1; block_changed = true; } __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[row_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; __syncthreads(); if (block_changed) *changed = true; } } /* Merge clusters from different blocks of points. * * The relationship of those clusters are expressed by a cluster matrix. * The merge is done by assigning each thread in a block of GPU threads * to move from top to bottom of the matrix and check if there are any * 1 element in the matrix. * * This kernel only merge matrices that staying in a same diagonal of a * group of matrix. The index of the diagonal is indicated by shift_level. */ extern "C" __global__ void mergeInterClusters(int *cluster_matrix, int *cluster_list, int shift_level, int base_row, int base_column, int sub_matrix_row, int sub_matrix_col, int sub_matrix_offset_row, int sub_matrix_offset_col, int cluster_num, bool *changed) { int col_start = (base_column + (blockIdx.x / sub_matrix_col) * sub_matrix_offset_col + (blockIdx.x + shift_level - sub_matrix_col * ((blockIdx.x + shift_level) / sub_matrix_col))) * blockDim.x; int col_end = (col_start + blockDim.x <= cluster_num) ? col_start + blockDim.x : cluster_num; int row_start = (base_row + (blockIdx.x / sub_matrix_row) * sub_matrix_offset_row + (blockIdx.x - sub_matrix_row * (blockIdx.x / sub_matrix_row))) * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = col_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; bool block_changed = false; if (col < col_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row_offset != col_offset && cluster_matrix[row * cluster_num + col] == 1) { local_changed[col_offset] = 1; block_changed = true; } __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[col_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; if (block_changed) *changed = true; } } /* Checking if two individual blocks have any clusters that intersect. * * If there are, then the diagonal index that the block belongs to is * recorded in changed_diag. All blocks in the same diagonal are merged * in the next step. */ extern "C" __global__ void clustersIntersecCheck(int *cluster_matrix, int *changed_diag, int base_row, int base_column, int sub_matrix_row, int sub_matrix_col, int sub_matrix_offset_row, int sub_matrix_offset_col, int cluster_num) { //Thinking about using % or not int col_idx = (blockIdx.x / sub_matrix_col) * sub_matrix_offset_col + (blockIdx.x % sub_matrix_col); int row_idx = (blockIdx.x / sub_matrix_row) * sub_matrix_offset_row + (blockIdx.y % sub_matrix_col); int col_start = (base_column + col_idx) * blockDim.x; int col_end = (col_start + blockDim.x <= cluster_num) ? col_start + blockDim.x : cluster_num; int row_start = (base_row + row_idx) * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = col_start + threadIdx.x; int diag_offset = (col_idx > row_idx) ? col_idx - row_idx : col_idx + row_idx; if (col < col_end && col_start <= col_end && row_start <= row_end) { for (int row = row_start; row < row_end; row++) { if (cluster_matrix[row * cluster_num + col] == 1) { *changed_diag = diag_offset; break; } } } } /* Extract clusters of points. * * This method can handle the case with sparse points (distance between points * are larger than threshold), which may lead to infinite loop in the first method. */ void GpuEuclideanCluster::extractClusters() { int block_x, grid_x; block_x = (size_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : size_; grid_x = (size_ - 1) / block_x + 1; int *cluster_offset; int cluster_num, old_cluster_num; stop_cpu_profiling(); request_scheduling(17); pclEuclideanInitialize << < grid_x, block_x >> > (cluster_indices_, size_); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(17, LAUNCH); start_profiling_cpu_time(); // #ifdef SLICING // int* init_data = (int *)malloc((size_) * sizeof(int)); // for(int i = 0; i < size_; i++){ // init_data[i] = i; // } // stop_cpu_profiling(); // request_scheduling(17); // checkCudaErrors(hipMemcpy(cluster_indices_, init_data, (size_) * sizeof(int), hipMemcpyHostToDevice)); // checkCudaErrors(hipDeviceSynchronize()); // stop_profiling(17, HTOD); // start_profiling_cpu_time(); // #endif old_cluster_num = cluster_num = size_; stop_cpu_profiling(); request_scheduling(18); checkCudaErrors(hipMalloc(&cluster_offset, (size_ + 1) * sizeof(int))); checkCudaErrors(hipMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); stop_profiling(18,HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(19); blockLabelling << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, size_, threshold_); stop_profiling(19, LAUNCH); start_profiling_cpu_time(); // #ifdef SLICING // int slice_cnt = 2; // for(int slice_id = 0; slice_id < slice_cnt; slice_id++){ // stop_cpu_profiling(); // request_scheduling(19); // blockLabelling_with_slicing << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, size_, threshold_, slice_id, slice_cnt); // stop_profiling(19, LAUNCH); // start_profiling_cpu_time(); // } // #endif stop_cpu_profiling(); request_scheduling(20); clusterMark << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); stop_profiling(20, LAUNCH); start_profiling_cpu_time(); exclusiveScan(cluster_offset, size_ + 1, &cluster_num); int *cluster_list, *new_cluster_list, *tmp; stop_cpu_profiling(); request_scheduling(21); checkCudaErrors(hipMalloc(&cluster_list, cluster_num * sizeof(int))); stop_profiling(21, HTOD); start_profiling_cpu_time(); #ifndef SLICING stop_cpu_profiling(); request_scheduling(22); clusterCollector << < grid_x, block_x >> > (cluster_indices_, cluster_list, cluster_offset, size_); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, LAUNCH); start_profiling_cpu_time(); #endif #ifdef SLICING // [22] ////////////////////////////////////////////////////////////// int idx = 0; int* h_cluster_indices = (int *)malloc(size_ * sizeof(int)); stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(h_cluster_indices, cluster_indices_, size_/3*sizeof(int), hipMemcpyDeviceToHost)); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += size_/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(h_cluster_indices[idx]), &(cluster_indices_[idx]), size_/3*sizeof(int), hipMemcpyDeviceToHost)); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += size_/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(h_cluster_indices[idx]), &(cluster_indices_[idx]), (size_/3 + size_%3)*sizeof(int), hipMemcpyDeviceToHost)); stop_profiling(22, DTOH); start_profiling_cpu_time(); int* h_cluster_offset = (int *)malloc((size_ + 1) * sizeof(int)); idx = 0; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(h_cluster_offset, cluster_offset, (size_+1)/3*sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += (size_+1)/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(h_cluster_offset[idx]), &(cluster_offset[idx]), (size_+1)/3*sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += (size_+1)/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(h_cluster_offset[idx]), &(cluster_offset[idx]), ((size_+1)%3 + (size_+1)/3)*sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); int* h_cluster_list = (int *)malloc(cluster_num * sizeof(int)); idx = 0; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(h_cluster_list, cluster_list, cluster_num/3 * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(h_cluster_list[idx]), &(cluster_list[idx]), (cluster_num/3) * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(h_cluster_list[idx]), &(cluster_list[idx]), (cluster_num/3 + cluster_num%3) * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); for(int i = 0; i < size_; i++){ h_cluster_list[h_cluster_offset[h_cluster_indices[i]]] = h_cluster_indices[i]; } idx = 0; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(cluster_list, h_cluster_list, cluster_num/3 * sizeof(int), hipMemcpyHostToDevice)); stop_profiling(22, HTOD); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(cluster_list[idx]), &(h_cluster_list[idx]), (cluster_num/3) * sizeof(int), hipMemcpyHostToDevice)); stop_profiling(22, HTOD); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(hipMemcpy(&(cluster_list[idx]), &(h_cluster_list[idx]), (cluster_num%3 + cluster_num/3 ) * sizeof(int), hipMemcpyHostToDevice)); stop_profiling(22, HTOD); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); #endif int *cluster_matrix; int *new_cluster_matrix; stop_cpu_profiling(); request_scheduling(23); checkCudaErrors(hipMalloc(&cluster_matrix, cluster_num * cluster_num * sizeof(int))); stop_profiling(23, HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(24); checkCudaErrors(hipMemset(cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); stop_profiling(24, HTOD); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); stop_cpu_profiling(); request_scheduling(25); checkCudaErrors(hipMalloc(&new_cluster_list, cluster_num * sizeof(int))); stop_profiling(25, HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(26); buildClusterMatrix << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, cluster_matrix, cluster_offset, size_, cluster_num, threshold_); stop_profiling(26, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); // [25] ////////////////////////////////////////////////////////////// // slice_cnt = 5; // for(int slice_id = 0; slice_id<slice_cnt; slice_id++){ // stop_cpu_profiling(); // request_scheduling(26); // buildClusterMatrix_with_slicing << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, cluster_matrix, cluster_offset, size_, cluster_num, threshold_, slice_id, slice_cnt); // stop_profiling(26, LAUNCH); // start_profiling_cpu_time(); // checkCudaErrors(hipDeviceSynchronize()); // } /////////////////////////////////////////////////////////////////////// int block_x2 = 0, grid_x2 = 0; bool *changed; stop_cpu_profiling(); request_scheduling(27); checkCudaErrors(hipHostMalloc(&changed, sizeof(bool))); stop_profiling(27, LAUNCH); start_profiling_cpu_time(); #ifndef SERIAL int *changed_diag; stop_cpu_profiling(); request_scheduling(28); checkCudaErrors(hipHostMalloc(&changed_diag, sizeof(int))); stop_profiling(28, LAUNCH); start_profiling_cpu_time(); #endif int max_base_row = 0; do { *changed = false; block_x2 = (cluster_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : cluster_num; grid_x2 = (cluster_num - 1) / block_x2 + 1; stop_cpu_profiling(); request_scheduling(29); mergeSelfClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num, changed); checkCudaErrors(hipDeviceSynchronize()); stop_profiling(29, LAUNCH); start_profiling_cpu_time(); // #ifdef SLICING // slice_cnt = 5; // int row_start_origin = block_x2; // int row_end_origin = (row_start_origin + block_x2 <= cluster_num) ? row_start_origin + block_x2 : cluster_num; // int quotient = row_start_origin / row_end_origin; // int remainder = row_start_origin % row_end_origin; // if(block_x2 >= slice_cnt){ // for(int slice_id = 0; slice_id < slice_cnt; slice_id++){ // int row_start = slice_id*quotient; // int row_end = row_start + quotient; // if((slice_id+1) == slice_cnt) row_end += remainder; // if(row_end > row_end_origin) row_end = row_end_origin; // stop_cpu_profiling(); // request_scheduling(29); // mergeSelfClustersWithSlicing << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num, changed, row_start, row_end); // checkCudaErrors(hipDeviceSynchronize()); // stop_profiling(29, LAUNCH); // start_profiling_cpu_time(); // } // } // else{ // stop_cpu_profiling(); // request_scheduling(29); // mergeSelfClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num, changed); // checkCudaErrors(hipDeviceSynchronize()); // stop_profiling(29, LAUNCH); // start_profiling_cpu_time(); // } // #endif int base_row = 1, base_column = 0; int sub_matrix_offset_row = 2, sub_matrix_offset_col = 2; int sub_matrix_row = 1, sub_matrix_col = 1; int sub_matrix_num; int max_rows = grid_x2; max_base_row = base_row; while (!(*changed) && cluster_num > BLOCK_SIZE_X && base_row * BLOCK_SIZE_X < cluster_num && base_column < cluster_num) { sub_matrix_num = (cluster_num - base_row - 1) / sub_matrix_offset_row + 1; block_x2 = BLOCK_SIZE_X; grid_x2 = sub_matrix_num * sub_matrix_col; #ifdef SERIAL //Merge clusters in each sub-matrix by moving from top to bottom of the similarity sub-matrix for (int shift_level = 0; !(*changed) && shift_level < sub_matrix_col; shift_level++) { stop_cpu_profiling(); request_scheduling(30); hipLaunchKernelGGL(( mergeInterClusters), dim3(grid_x2), dim3(block_x2), 0, 0, cluster_matrix, cluster_list, shift_level, base_row, base_column, sub_matrix_row, sub_matrix_col, sub_matrix_offset_row, sub_matrix_offset_col, cluster_num, changed); stop_profiling(30, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); } #else int grid_y2 = sub_matrix_row; dim3 block_size(block_x2, 1, 1); dim3 grid_size(grid_x2, grid_y2, 1); *changed_diag = -1; stop_cpu_profiling(); request_scheduling(31); clustersIntersecCheck << < grid_size, block_size >> > (cluster_matrix, changed_diag, base_row, base_column, sub_matrix_row, sub_matrix_col, sub_matrix_offset_row, sub_matrix_offset_col, cluster_num); stop_profiling(31, LAUNCH); checkCudaErrors(hipDeviceSynchronize()); start_profiling_cpu_time(); if (*changed_diag > 0) { //Merge clusters in sub-matrix that stay in the changed_diag diagonal by moving from top to bottom of the matrix. stop_cpu_profiling(); request_scheduling(32); mergeInterClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, *changed_diag, base_row, base_column, sub_matrix_row, sub_matrix_col, sub_matrix_offset_row, sub_matrix_offset_col, cluster_num, changed); stop_profiling(32, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); } #endif base_row += sub_matrix_row; sub_matrix_row = (sub_matrix_row * 2 + base_row < max_rows) ? sub_matrix_row * 2 : max_rows - base_row; sub_matrix_col *= 2; sub_matrix_offset_row *= 2; sub_matrix_offset_col *= 2; } max_base_row = base_row; if (*changed) { stop_cpu_profiling(); request_scheduling(33); reflexClusterChanges << < grid_x, block_x >> > (cluster_indices_, cluster_offset, cluster_list, size_); stop_profiling(33, LAUNCH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(34); checkCudaErrors(hipMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); stop_profiling(34, HTOD); start_profiling_cpu_time(); block_x2 = (cluster_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : cluster_num; grid_x2 = (cluster_num - 1) / block_x2 + 1; stop_cpu_profiling(); request_scheduling(35); clusterMark << < grid_x2, block_x2 >> > (cluster_list, cluster_offset, cluster_num); stop_profiling(35, LAUNCH); start_profiling_cpu_time(); old_cluster_num = cluster_num; exclusiveScan(cluster_offset, size_ + 1, &cluster_num); stop_cpu_profiling(); request_scheduling(36); clusterCollector << < grid_x2, block_x2 >> > (cluster_list, new_cluster_list, cluster_offset, old_cluster_num); stop_profiling(36, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hipMalloc(&new_cluster_matrix, cluster_num * cluster_num * sizeof(int))); stop_cpu_profiling(); request_scheduling(37); checkCudaErrors(hipMemset(new_cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); stop_profiling(37, HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(38); rebuildClusterMatrix << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, new_cluster_matrix, cluster_offset, old_cluster_num, cluster_num); stop_profiling(38, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); stop_cpu_profiling(); request_scheduling(39); checkCudaErrors(hipFree(cluster_matrix)); stop_profiling(39, HTOD); start_profiling_cpu_time(); cluster_matrix = new_cluster_matrix; tmp = cluster_list; cluster_list = new_cluster_list; new_cluster_list = tmp; } } while (*changed && max_base_row < cluster_num); cluster_num_ = cluster_num; //Reset all cluster indexes to make them start from 0 stop_cpu_profiling(); request_scheduling(40); resetClusterIndexes << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); stop_profiling(40, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(hipDeviceSynchronize()); stop_cpu_profiling(); request_scheduling(41); checkCudaErrors(hipMemcpy(cluster_indices_host_, cluster_indices_, size_ * sizeof(int), hipMemcpyDeviceToHost)); stop_profiling(41, DTOH); start_profiling_cpu_time(); // slice_cnt = 3; // int quotient = size_/slice_cnt; // int remainder = size_%slice_cnt; // for(int slice_id = 0; slice_id < slice_cnt; slice_id++){ // int start_idx = slice_id * quotient; // int size = // stop_cpu_profiling(); // request_scheduling(36); // stop_profiling(36, DTOH); // start_profiling_cpu_time(); // } /////////////////////////////////////////////////////////////////// stop_cpu_profiling(); request_scheduling(42); checkCudaErrors(hipFree(cluster_matrix)); stop_profiling(42, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(43); checkCudaErrors(hipFree(cluster_list)); stop_profiling(43, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(44); checkCudaErrors(hipFree(new_cluster_list)); stop_profiling(44, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(45); checkCudaErrors(hipFree(cluster_offset)); stop_profiling(45, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(46); checkCudaErrors(hipHostFree(changed)); stop_profiling(46, DTOH); start_profiling_cpu_time(); #ifndef SERIAL stop_cpu_profiling(); request_scheduling(47); checkCudaErrors(hipHostFree(changed_diag)); stop_profiling(47, DTOH); start_profiling_cpu_time(); #endif } /* Collect points that belong to same clusters and put them together. * * The output is a vector whose each element contains indexes of points * that belong to a same clusters. */ std::vector <GpuEuclideanCluster::GClusterIndex> GpuEuclideanCluster::getOutput() { std::vector <GClusterIndex> cluster_indices(cluster_num_); for (unsigned int i = 0; i < cluster_indices.size(); i++) cluster_indices[i].index_value = -1; for (int i = 0; i < size_; i++) { cluster_indices[cluster_indices_host_[i]].points_in_cluster.push_back(i); cluster_indices[cluster_indices_host_[i]].index_value = cluster_indices_host_[i]; } for (unsigned int i = 0; i < cluster_indices.size();) { int number_of_pts = cluster_indices[i].points_in_cluster.size(); if (number_of_pts < min_cluster_pts_ || number_of_pts > max_cluster_pts_) cluster_indices.erase(cluster_indices.begin() + i); else i++; } return cluster_indices; } /* Generate sparse points. * The number of points is fixed at 10000. * Cannot afford more (e.g. 100 000 points) since * GPU memory is not enough for a matrix with 10 billions cells. */ GpuEuclideanCluster::SamplePointListXYZ GpuEuclideanCluster::generateSample() { GpuEuclideanCluster::SamplePointListXYZ output; output.size = 10000; output.x = (float *) malloc(sizeof(float) * output.size); output.y = (float *) malloc(sizeof(float) * output.size); output.z = (float *) malloc(sizeof(float) * output.size); output.x[0] = 0; output.y[0] = 0; output.z[0] = 0; for (int i = 1; i < output.size; i++) { output.x[i] = (i % 3 == 0) ? output.x[i - 1] + threshold_ + 1 : output.x[i - 1]; output.y[i] = (i % 3 == 1) ? output.y[i - 1] + threshold_ + 1 : output.y[i - 1]; output.z[i] = (i % 3 == 2) ? output.z[i - 1] + threshold_ + 1 : output.z[i - 1]; } return output; } GpuEuclideanCluster::~GpuEuclideanCluster() { checkCudaErrors(hipFree(x_)); checkCudaErrors(hipFree(y_)); checkCudaErrors(hipFree(z_)); checkCudaErrors(hipFree(cluster_indices_)); free(cluster_indices_host_); } /* GPU Profiling */ void start_profiling_execution_time(){ if(GPU_PROFILING == 1) hipEventRecord(e_event_start, 0); } void start_profiling_response_time(){ if(GPU_PROFILING == 1) hipEventRecord(r_event_start, 0); } void start_profiling_cpu_time(){ if(GPU_PROFILING == 1){ cpu_id++; gettimeofday(&startTime, NULL); } } void stop_profiling(int id, int type){ if(GPU_PROFILING == 1){ float e_time, r_time; char gpu_id_buf[BUFFER_SIZE]; if(gpu_scheduling_flag_==1){ sched_info_->scheduling_flag = 0; sched_info_->state = NONE; } hipEventRecord(e_event_stop, 0); hipEventRecord(r_event_stop, 0); hipEventSynchronize(e_event_stop); hipEventSynchronize(r_event_stop); hipEventElapsedTime(&e_time, e_event_start, e_event_stop); hipEventElapsedTime(&r_time, r_event_start, r_event_stop); e_time = MS2US(e_time); r_time = MS2US(r_time); // write_data(gid, time, type); sprintf(gpu_id_buf,"g%d",id); //write_profiling_data(id, e_time, r_time, type); write_profiling_data(gpu_id_buf, e_time, r_time, type); // gid++; } } void stop_cpu_profiling(){ if(GPU_PROFILING == 1){ long long int elapsedTime; char cpu_id_buf[BUFFER_SIZE]; gettimeofday(&endTime, NULL); elapsedTime = ((long long int)(endTime.tv_sec - startTime.tv_sec)) * 1000000ll + (endTime.tv_usec - startTime.tv_usec); sprintf(cpu_id_buf,"e%d",cpu_id); write_cpu_profiling_data(cpu_id_buf,elapsedTime); } } void write_profiling_data(const char* id, float e_time, float r_time, int type){ if(GPU_PROFILING == 1){ fprintf(execution_time_fp, "%s, %f, %d\n", id, e_time, type); fprintf(response_time_fp, "%s, %f, %d\n", id, r_time, type); fprintf(remain_time_fp, "%s, %llu\n", id, absolute_deadline_ - get_current_time_us()); } } void write_cpu_profiling_data(const char *id, long long int c_time){ if(GPU_PROFILING == 1){ fprintf(execution_time_fp, "%s, %02d\n", id, c_time); fprintf(response_time_fp, "%s, %02d\n", id, c_time); } } void write_dummy_line(){ if(GPU_PROFILING == 1){ fprintf(execution_time_fp, "-1, -1, -1\n"); fflush(execution_time_fp); fprintf(response_time_fp, "-1, -1, -1\n"); fflush(response_time_fp); fprintf(remain_time_fp, "-1, -1\n"); fflush(remain_time_fp); cpu_id = 0; } } void initialize_file(const char execution_time_filename[], const char response_time_filename[], const char remain_time_filename[]){ if(GPU_PROFILING == 1){ execution_time_fp = fopen(execution_time_filename, "w+"); fprintf(execution_time_fp, "ID, TIME, TYPE\n"); response_time_fp = fopen(response_time_filename, "w+"); fprintf(response_time_fp, "ID, TIME, TYPE\n"); remain_time_fp = fopen(remain_time_filename, "w+"); fprintf(remain_time_fp, "ID, TIME\n"); } } void close_file(){ if(GPU_PROFILING == 1){ fclose(execution_time_fp); fclose(response_time_fp); fclose(remain_time_fp); } } void sig_handler(int signum){ if(signum == SIGUSR1 || signum == SIGUSR2){ is_scheduled_ = 1; return; } else termination(); } void termination(){ if(gpu_scheduling_flag_==1){ sched_info_->state = STOP; shmdt(sched_info_); } if(remove(task_filename_)){ printf("Cannot remove file %s\n", task_filename_); exit(1); } exit(0); } unsigned long long get_current_time_us(){ struct timespec ts; unsigned long long current_time; clock_gettime(CLOCK_REALTIME, &ts); current_time = ts.tv_sec%10000 * 1000000 + ts.tv_nsec/1000; return current_time; } void us_sleep(unsigned long long us){ struct timespec ts; ts.tv_sec = us/1000000; ts.tv_nsec = us%1000000*1000; nanosleep(&ts, NULL); return; } void initialize_signal_handler(){ signal(SIGINT, sig_handler); signal(SIGTSTP, sig_handler); signal(SIGQUIT, sig_handler); signal(SIGUSR1, sig_handler); signal(SIGUSR2, sig_handler); } void create_task_file(){ FILE* task_fp; task_fp = fopen(task_filename_, "w"); if(task_fp == NULL){ printf("Cannot create task file at %s\n", task_filename_); exit(1); } fprintf(task_fp, "%d\n", getpid()); fprintf(task_fp, "%d", key_id_); fclose(task_fp); } void get_scheduler_pid(){ FILE* scheduler_fp; printf("Wait the scheduler...\n"); while(1){ scheduler_fp = fopen("/tmp/np_edf_scheduler", "r"); if(scheduler_fp) break; } while(1){ fscanf(scheduler_fp, "%d", &scheduler_pid_); if(scheduler_pid_ != 0) break; } printf("Scheduler pid: %d\n", scheduler_pid_); fclose(scheduler_fp); } void initialize_sched_info(){ if(gpu_scheduling_flag_!=1) return; FILE* sm_key_fp; sm_key_fp = fopen("/tmp/sm_key", "r"); if(sm_key_fp == NULL){ printf("Cannot open /tmp/sm_key\n"); termination(); } key_ = ftok("/tmp/sm_key", key_id_); shmid_ = shmget(key_, sizeof(SchedInfo), 0666|IPC_CREAT); sched_info_ = (SchedInfo*)shmat(shmid_, 0, 0); sched_info_->pid = getpid(); sched_info_->state = NONE; sched_info_->scheduling_flag = 0; } void init_scheduling(char* task_filename, const char* deadline_filename, int key_id){ if(gpu_scheduling_flag_!=1) return; // Get deadline list get_deadline_list(deadline_filename); // Initialize key id for shared memory key_id_ = key_id; // Initialize signal handler initialize_signal_handler(); // Create task file sprintf(task_filename_, "%s", task_filename); create_task_file(); // Get scheduler pid get_scheduler_pid(); // Initialize scheduling information (shared memory data) initialize_sched_info(); sigemptyset(&sigset_); sigaddset(&sigset_, SIGUSR1); sigaddset(&sigset_, SIGUSR2); sigprocmask(SIG_BLOCK, &sigset_, NULL); // sigwait(&sigset_, &sig_); // kill(scheduler_pid_, SIGUSR2); // sigprocmask(SIG_UNBLOCK, &sigset_, NULL); printf("Task [%d] is ready to work\n", getpid()); // sigaddset(&sigset_, SIGUSR1); // sigprocmask(SIG_BLOCK, &sigset_, NULL); } void request_scheduling(int id){ if(gpu_scheduling_flag_ == 1){ unsigned long long relative_deadline = deadline_list_[id]; if(identical_deadline_ != 0) sched_info_->deadline = absolute_deadline_; else sched_info_->deadline = get_current_time_us() + relative_deadline; sched_info_->state = WAIT; // printf("Request schedule - deadline: %llu\n", sched_info_->deadline); } start_profiling_response_time(); if(gpu_scheduling_flag_ == 1){ while(1){ kill(scheduler_pid_, SIGUSR1); // if(!sigwait(&sigset_, &sig_)) break; // if(is_scheduled_ == 1) break; if(sched_info_->scheduling_flag == 1) break; } } start_profiling_execution_time(); if(gpu_scheduling_flag_==1){ sched_info_->state = RUN; sched_info_->deadline = 0; } } void get_deadline_list(const char* filename){ FILE* fp; fp = fopen(filename, "r"); if(fp==NULL){ fprintf(stderr, "Cannot find file %s\n", filename); exit(1); } char buf[1024]; long long int deadline; for(int i = 0; i < sizeof(deadline_list_)/sizeof(long long int); i++){ fgets(buf, 1024, fp); strtok(buf, "\n"); sscanf(buf, "%*s, %llu", &deadline); deadline_list_[i] = deadline; } } void set_identical_deadline(unsigned long long identical_deadline){ identical_deadline_ = identical_deadline; } void set_absolute_deadline(){ absolute_deadline_ = get_current_time_us() + identical_deadline_; } void set_slicing_flag(int flag){ slicing_flag_ = flag; } void set_gpu_scheduling_flag(int gpu_scheduling_flag){ gpu_scheduling_flag_ = gpu_scheduling_flag; }
9596ecd14a18b1f632e8e525086d875d33d03b75.cu
/* ============================================================================ Name : gpu_euclidean_clustering.cu Author : AnhNV91 Version : 1.0 Description : Clustering analysis using Euclidean distance and single linkage ============================================================================ */ #include "gpu_euclidean_clustering.h" #include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/scan.h> #include <thrust/fill.h> #include <time.h> #include <sys/time.h> #define MAX_SHARED_SIZE 2048 #define BLOCK_SIZE_X 1024 //#define SERIAL 1 static FILE* execution_time_fp; static FILE* response_time_fp; static FILE* remain_time_fp; inline void gassert(cudaError_t err_code, const char *file, int line) { if (err_code != cudaSuccess) { fprintf(stderr, "Error: %s %s %d\n", cudaGetErrorString(err_code), file, line); cudaDeviceReset(); exit(EXIT_FAILURE); } } #define checkCudaErrors(val) gassert(val, __FILE__, __LINE__) GpuEuclideanCluster::GpuEuclideanCluster() { x_ = NULL; y_ = NULL; z_ = NULL; size_ = 0; threshold_ = 0; cluster_indices_ = NULL; cluster_indices_host_ = NULL; min_cluster_pts_ = 0; max_cluster_pts_ = 1000000000; cluster_num_ = 0; if(GPU_PROFILING == 1){ cudaEventCreate(&e_event_start); cudaEventCreate(&e_event_stop); cudaEventCreate(&r_event_start); cudaEventCreate(&r_event_stop); } } void GpuEuclideanCluster::setInputPoints(float *x, float *y, float *z, int size) { size_ = size; checkCudaErrors(cudaMalloc(&x_, size_ * sizeof(float))); checkCudaErrors(cudaMalloc(&y_, size_ * sizeof(float))); checkCudaErrors(cudaMalloc(&z_, size_ * sizeof(float))); checkCudaErrors(cudaMemcpy(x_, x, size_ * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(y_, y, size_ * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(z_, z, size_ * sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc(&cluster_indices_, size_ * sizeof(int))); cluster_indices_host_ = (int *) malloc(size_ * sizeof(int)); } void GpuEuclideanCluster::setThreshold(double threshold) { threshold_ = threshold; } void GpuEuclideanCluster::setMinClusterPts(int min_cluster_pts) { min_cluster_pts_ = min_cluster_pts; } void GpuEuclideanCluster::setMaxClusterPts(int max_cluster_pts) { max_cluster_pts_ = max_cluster_pts; } /* Initially, each point is assigned to an individual cluster. * */ extern "C" __global__ void pclEuclideanInitialize(int *cluster_indices, int size) { for (int index = threadIdx.x + blockIdx.x * blockDim.x; index < size; index += blockDim.x * gridDim.x){ cluster_indices[index] = index; } } /* Connected component labeling points at GPU block thread level. * Input list of points is divided into multiple smaller groups. * Each group of point is assigned to a block of GPU thread. * Each thread in a block handles one point in the group. It iterates over * points in the group and compare the distance between the current point A * and the point B it has to handle. * * If the distance between A and B is less than the threshold, then those * two points belong to a same connected component and the cluster_changed * is marked by 1. * * A synchronization is called to make sure all thread in the block finish A * before moving to the update phase. * After finishing checking cluster_changed, threads update the cluster * index of all points. If a thread has cluster_changed is 1, then the corresponding * cluster of the point it is handling is changed to the cluster of B. Otherwise * the original cluster of A remains unchanged. * * Another synchronization is called before all threads in the block move to * other points after done checking A. * * After this kernel finishes, all points in each block are labeled. */ extern "C" __global__ void blockLabelling(float *x, float *y, float *z, int *cluster_indices, int size, float threshold) { int block_start = blockIdx.x * blockDim.x; int block_end = (block_start + blockDim.x <= size) ? (block_start + blockDim.x) : size; int row = threadIdx.x + block_start; __shared__ int local_offset[BLOCK_SIZE_X]; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; __shared__ int local_cluster_changed[BLOCK_SIZE_X]; if (row < block_end) { local_offset[threadIdx.x] = threadIdx.x; local_x[threadIdx.x] = x[row]; local_y[threadIdx.x] = y[row]; local_z[threadIdx.x] = z[row]; __syncthreads(); for (int column = block_start; column < block_end; column++) { float tmp_x = local_x[threadIdx.x] - local_x[column - block_start]; float tmp_y = local_y[threadIdx.x] - local_y[column - block_start]; float tmp_z = local_z[threadIdx.x] - local_z[column - block_start]; int column_offset = local_offset[column - block_start]; int row_offset = local_offset[threadIdx.x]; local_cluster_changed[threadIdx.x] = 0; __syncthreads(); if (row > column && column_offset != row_offset && norm3df(tmp_x, tmp_y, tmp_z) < threshold) local_cluster_changed[row_offset] = 1; __syncthreads(); local_offset[threadIdx.x] = (local_cluster_changed[row_offset] == 1) ? column_offset : row_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_indices[block_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_indices[row] = new_cluster; } } extern "C" __global__ void blockLabelling_with_slicing(float *x, float *y, float *z, int *cluster_indices, int size, float threshold, int slice_id, int slice_cnt) { int total_block_start = blockIdx.x * blockDim.x; int total_block_end = (total_block_start + blockDim.x <= size) ? (total_block_start + blockDim.x) : size; int quotient = (total_block_start-total_block_end) / slice_cnt; int remainder = (total_block_start-total_block_end) % slice_cnt; int block_start = quotient*(slice_id+1); int block_end = block_start + quotient; if( (slice_id+1) == slice_cnt) block_end += remainder; int row = threadIdx.x + block_start; __shared__ int local_offset[BLOCK_SIZE_X]; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; __shared__ int local_cluster_changed[BLOCK_SIZE_X]; if (row < block_end) { local_offset[threadIdx.x] = threadIdx.x; local_x[threadIdx.x] = x[row]; local_y[threadIdx.x] = y[row]; local_z[threadIdx.x] = z[row]; __syncthreads(); for (int column = block_start; column < block_end; column++) { float tmp_x = local_x[threadIdx.x] - local_x[column - block_start]; float tmp_y = local_y[threadIdx.x] - local_y[column - block_start]; float tmp_z = local_z[threadIdx.x] - local_z[column - block_start]; int column_offset = local_offset[column - block_start]; int row_offset = local_offset[threadIdx.x]; local_cluster_changed[threadIdx.x] = 0; __syncthreads(); if (row > column && column_offset != row_offset && norm3df(tmp_x, tmp_y, tmp_z) < threshold) local_cluster_changed[row_offset] = 1; __syncthreads(); local_offset[threadIdx.x] = (local_cluster_changed[row_offset] == 1) ? column_offset : row_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_indices[block_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_indices[row] = new_cluster; } } /* These kernels are used to collect remained clusters after each labeling phase. * * Basically, in each labeling phases, several clusters are merged together. * * The first kernel scans over the cluster_indices array and marks the cluster_mark * element corresponding with the cluster of the current point by 1. If a cluster * does not exists in the current phase (which means it was merged to some other * clusters), then its cluster_mark is 0. * * The second kernel scans over the original cluster_indices again and copy those * indices to new location on the target_clusters. */ extern "C" __global__ void clusterMark(int *cluster_list, int *cluster_mark, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) cluster_mark[cluster_list[i]] = 1; } extern "C" __global__ void clusterCollector(int *old_cluster_list, int *new_cluster_list, int *cluster_location, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x){ new_cluster_list[cluster_location[old_cluster_list[i]]] = old_cluster_list[i]; } } /* Create a cluster matrix. * * A cluster matrix is to record the relationship between each pair * of clusters. If a pair of cluster x and y are connected, then * the matrix element [x][y] are 1. Otherwise it is 0. Notice that * only the lower half of the matrix is used. * * To build this matrix, each GPU thread handles one point A, iterates * over all points B, and compare distance between A and B. Assuming * that A belongs to a cluster x, and B belongs to cluster y. If their * distance is less than the threshold, then the matrix element [x][y] * is set to 1. */ extern "C" __global__ void buildClusterMatrix(float *x, float *y, float *z, int *cluster_indices, int *cluster_matrix, int *cluster_offset, int size, int cluster_num, float threshold) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; if (index>size) return; for (int column = index; column < size; column += stride) { local_x[threadIdx.x] = x[column]; local_y[threadIdx.x] = y[column]; local_z[threadIdx.x] = z[column]; int column_cluster = cluster_indices[column]; int cc_offset = cluster_offset[column_cluster]; __syncthreads(); for (int row = 0; row < column; row++) { float tmp_x = x[row] - local_x[threadIdx.x]; float tmp_y = y[row] - local_y[threadIdx.x]; float tmp_z = z[row] - local_z[threadIdx.x]; int row_cluster = cluster_indices[row]; int rc_offset = cluster_offset[row_cluster]; if (row_cluster != column_cluster && norm3df(tmp_x, tmp_y, tmp_z) < threshold) cluster_matrix[rc_offset * cluster_num + cc_offset] = 1; } __syncthreads(); } } extern "C" __global__ void buildClusterMatrix_with_slicing(float *x, float *y, float *z, int *cluster_indices, int *cluster_matrix, int *cluster_offset, int size, int cluster_num, float threshold, int slice_id, int slice_cnt) { int index = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; __shared__ float local_x[BLOCK_SIZE_X]; __shared__ float local_y[BLOCK_SIZE_X]; __shared__ float local_z[BLOCK_SIZE_X]; if (index>size) return; int quotient = size/slice_cnt; int remainder = size%slice_cnt; int column_start = index + slice_id * quotient; int column_end = column_start + remainder; if((slice_id + 1) == slice_cnt) column_end + remainder; if(column_end > size) column_end = size; // for (int column = index; column < size; column += stride) for (int column = column_start; column < column_end; column += stride) { local_x[threadIdx.x] = x[column]; local_y[threadIdx.x] = y[column]; local_z[threadIdx.x] = z[column]; int column_cluster = cluster_indices[column]; int cc_offset = cluster_offset[column_cluster]; __syncthreads(); // int row_start = slice_id * column/slice_cnt; // int row_end = row_start + column/slice_cnt; // if((slice_id + 1) == slice_cnt) row_end + column%slice_cnt; // if(row_end > column) row_end = column; // for (int row = row_start; row < row_end; row++) for (int row = 0; row < column; row++) { float tmp_x = x[row] - local_x[threadIdx.x]; float tmp_y = y[row] - local_y[threadIdx.x]; float tmp_z = z[row] - local_z[threadIdx.x]; int row_cluster = cluster_indices[row]; int rc_offset = cluster_offset[row_cluster]; if (row_cluster != column_cluster && norm3df(tmp_x, tmp_y, tmp_z) < threshold) cluster_matrix[rc_offset * cluster_num + cc_offset] = 1; } __syncthreads(); } } /* Merge clusters based on the cluster_matrix. * * This merge process is done per block. The input list of clusters * are divided into smaller chunks to be handled by GPU blocks. * * Each thread in a block handles one row of the matrix and iterates * over all columns of the matrix. A synchronization per each iteration * is needed to make sure all threads done merging clusters in the * current column before moving to the next column. * * In each iteration, each thread check if the cluster corresponding * with the current row is connected to the cluster corresponding to the * current column. If so, then the cluster of the row is changed (merged) * to the cluster of the column. */ extern "C" __global__ void mergeClusters(int *cluster_matrix, int *cluster_list, int cluster_num) { int row_start = blockIdx.x * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = row_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; /* The cluster matrix is symmetric, so the * number of rows and columns are the same */ if (col < row_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row < col && row_offset != col_offset && (cluster_matrix[row * cluster_num + col] == 1)) local_changed[col_offset] = 1; __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[row_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; } } /* Reflex the change in the cluster merging step * to cluster indices of all input points. * * Clusters of input points are changed to the target clusters * corresponding with their source clusters. */ extern "C" __global__ void reflexClusterChanges(int *cluster_indices, int *cluster_offset, int *cluster_list, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) cluster_indices[i] = cluster_list[cluster_offset[cluster_indices[i]]]; } /* Rebuild cluster matrix after merging clusters. * * After several cluster are merged together, the number of clusters * reduces and the cluster matrix needs to be rebuilt. * * Each thread iterate over rows of one column of the source matrix. * If a element [x][y] of the source matrix is 1, then the element * [m][n] of the target matrix, in which m and n are the * new clusters of x and y, is set to 1. */ extern "C" __global__ void rebuildClusterMatrix(int *old_cluster_matrix, int *new_clusters, int *new_cluster_matrix, int *new_cluster_offset, int old_size, int new_size) { for (int column = threadIdx.x + blockIdx.x * blockDim.x; column < old_size; column += blockDim.x * gridDim.x) { for (int row = 0; row < column; row++) { int new_row = new_cluster_offset[new_clusters[row]]; int new_column = new_cluster_offset[new_clusters[column]]; if (old_cluster_matrix[row * old_size + column] == 1) new_cluster_matrix[new_row * new_size + new_column] = 1; } } } /* Perform exclusive scan on the input array using * thurst's scan. * * The variable 'sum' records the last element of * the array after being scanned. */ void GpuEuclideanCluster::exclusiveScan(int *input, int ele_num, int *sum) { thrust::device_ptr<int> dev_ptr(input); thrust::exclusive_scan(dev_ptr, dev_ptr + ele_num, dev_ptr); checkCudaErrors(cudaDeviceSynchronize()); *sum = *(dev_ptr + ele_num - 1); } /* Reset the cluster indexes in the point cloud from 0. * * After merging, the cluster indexes of points are still large. Collecting * those large indexes is either time-consuming (without using hash) or * wasting memory space (using hash). By reset the cluster indexes from 0, * we can use hashing to collect those indexes with the space complexity equal * to the number of clusters. */ extern "C" __global__ void resetClusterIndexes(int *cluster_indices, int *cluster_offset, int size) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) { int old_cluster = cluster_indices[i]; cluster_indices[i] = cluster_offset[old_cluster]; } } /* Calculate the cluster indices of input points. * * Initially, the cluster index of the point at index ith * is set to i. This method merges cluster indices * of points that belong to same clusters. * * Result of this method is stored at cluster_indices_host_. */ void GpuEuclideanCluster::extractClustersOld() { int block_x, grid_x; block_x = (size_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : size_; grid_x = (size_ - 1) / block_x + 1; int *cluster_offset; int cluster_num, old_cluster_num; // pclEuclideanInitialize << < grid_x, block_x >> > (cluster_indices_, size_); checkCudaErrors(cudaDeviceSynchronize()); old_cluster_num = cluster_num = size_; checkCudaErrors(cudaMalloc(&cluster_offset, (size_ + 1) * sizeof(int))); checkCudaErrors(cudaMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); blockLabelling << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, size_, threshold_); clusterMark << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); exclusiveScan(cluster_offset, size_ + 1, &cluster_num); int *cluster_list, *new_cluster_list, *tmp; checkCudaErrors(cudaMalloc(&cluster_list, cluster_num * sizeof(int))); clusterCollector << < grid_x, block_x >> > (cluster_indices_, cluster_list, cluster_offset, size_); checkCudaErrors(cudaDeviceSynchronize()); int *cluster_matrix; int *new_cluster_matrix; checkCudaErrors(cudaMalloc(&cluster_matrix, cluster_num * cluster_num * sizeof(int))); checkCudaErrors(cudaMemset(cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMalloc(&new_cluster_list, cluster_num * sizeof(int))); buildClusterMatrix << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, cluster_matrix, cluster_offset, size_, cluster_num, threshold_); checkCudaErrors(cudaDeviceSynchronize()); int block_x2 = 0, grid_x2 = 0; /* Loop until there is no change in the number of clusters */ do { old_cluster_num = cluster_num; block_x2 = (cluster_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : cluster_num; grid_x2 = (cluster_num - 1) / block_x2 + 1; mergeClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num); reflexClusterChanges << < grid_x, block_x >> > (cluster_indices_, cluster_offset, cluster_list, size_); checkCudaErrors(cudaMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); clusterMark << < grid_x2, block_x2 >> > (cluster_list, cluster_offset, cluster_num); exclusiveScan(cluster_offset, size_ + 1, &cluster_num); if (grid_x2 == 1 && cluster_num == old_cluster_num) break; clusterCollector << < grid_x2, block_x2 >> > (cluster_list, new_cluster_list, cluster_offset, old_cluster_num); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMalloc(&new_cluster_matrix, cluster_num * cluster_num * sizeof(int))); checkCudaErrors(cudaMemset(new_cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); rebuildClusterMatrix << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, new_cluster_matrix, cluster_offset, old_cluster_num, cluster_num); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaFree(cluster_matrix)); cluster_matrix = new_cluster_matrix; tmp = cluster_list; cluster_list = new_cluster_list; new_cluster_list = tmp; } while (1); cluster_num_ = cluster_num; resetClusterIndexes << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(cluster_indices_host_, cluster_indices_, size_ * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(cluster_matrix)); checkCudaErrors(cudaFree(cluster_list)); checkCudaErrors(cudaFree(new_cluster_list)); checkCudaErrors(cudaFree(cluster_offset)); } extern "C" __global__ void mergeSelfClusters(int *cluster_matrix, int *cluster_list, int cluster_num, bool *changed) { int row_start = blockIdx.x * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = row_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; bool block_changed = false; if (col < row_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row < col && row_offset != col_offset && (cluster_matrix[row * cluster_num + col] == 1)) { local_changed[col_offset] = 1; block_changed = true; } __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[row_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; __syncthreads(); if (block_changed) *changed = true; } } extern "C" __global__ void mergeSelfClustersWithSlicing(int *cluster_matrix, int *cluster_list, int cluster_num, bool *changed, int row_start, int row_end) { // int row_start = blockIdx.x * blockDim.x; // int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = row_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; bool block_changed = false; if (col < row_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row < col && row_offset != col_offset && (cluster_matrix[row * cluster_num + col] == 1)) { local_changed[col_offset] = 1; block_changed = true; } __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[row_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; __syncthreads(); if (block_changed) *changed = true; } } /* Merge clusters from different blocks of points. * * The relationship of those clusters are expressed by a cluster matrix. * The merge is done by assigning each thread in a block of GPU threads * to move from top to bottom of the matrix and check if there are any * 1 element in the matrix. * * This kernel only merge matrices that staying in a same diagonal of a * group of matrix. The index of the diagonal is indicated by shift_level. */ extern "C" __global__ void mergeInterClusters(int *cluster_matrix, int *cluster_list, int shift_level, int base_row, int base_column, int sub_matrix_row, int sub_matrix_col, int sub_matrix_offset_row, int sub_matrix_offset_col, int cluster_num, bool *changed) { int col_start = (base_column + (blockIdx.x / sub_matrix_col) * sub_matrix_offset_col + (blockIdx.x + shift_level - sub_matrix_col * ((blockIdx.x + shift_level) / sub_matrix_col))) * blockDim.x; int col_end = (col_start + blockDim.x <= cluster_num) ? col_start + blockDim.x : cluster_num; int row_start = (base_row + (blockIdx.x / sub_matrix_row) * sub_matrix_offset_row + (blockIdx.x - sub_matrix_row * (blockIdx.x / sub_matrix_row))) * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = col_start + threadIdx.x; __shared__ int local_changed[BLOCK_SIZE_X]; __shared__ int local_offset[BLOCK_SIZE_X]; bool block_changed = false; if (col < col_end) { local_offset[threadIdx.x] = threadIdx.x; __syncthreads(); for (int row = row_start; row < row_end; row++) { int col_offset = local_offset[threadIdx.x]; int row_offset = local_offset[row - row_start]; local_changed[threadIdx.x] = 0; __syncthreads(); if (row_offset != col_offset && cluster_matrix[row * cluster_num + col] == 1) { local_changed[col_offset] = 1; block_changed = true; } __syncthreads(); local_offset[threadIdx.x] = (local_changed[col_offset] == 1) ? row_offset : col_offset; __syncthreads(); } __syncthreads(); int new_cluster = cluster_list[col_start + local_offset[threadIdx.x]]; __syncthreads(); cluster_list[col] = new_cluster; if (block_changed) *changed = true; } } /* Checking if two individual blocks have any clusters that intersect. * * If there are, then the diagonal index that the block belongs to is * recorded in changed_diag. All blocks in the same diagonal are merged * in the next step. */ extern "C" __global__ void clustersIntersecCheck(int *cluster_matrix, int *changed_diag, int base_row, int base_column, int sub_matrix_row, int sub_matrix_col, int sub_matrix_offset_row, int sub_matrix_offset_col, int cluster_num) { //Thinking about using % or not int col_idx = (blockIdx.x / sub_matrix_col) * sub_matrix_offset_col + (blockIdx.x % sub_matrix_col); int row_idx = (blockIdx.x / sub_matrix_row) * sub_matrix_offset_row + (blockIdx.y % sub_matrix_col); int col_start = (base_column + col_idx) * blockDim.x; int col_end = (col_start + blockDim.x <= cluster_num) ? col_start + blockDim.x : cluster_num; int row_start = (base_row + row_idx) * blockDim.x; int row_end = (row_start + blockDim.x <= cluster_num) ? row_start + blockDim.x : cluster_num; int col = col_start + threadIdx.x; int diag_offset = (col_idx > row_idx) ? col_idx - row_idx : col_idx + row_idx; if (col < col_end && col_start <= col_end && row_start <= row_end) { for (int row = row_start; row < row_end; row++) { if (cluster_matrix[row * cluster_num + col] == 1) { *changed_diag = diag_offset; break; } } } } /* Extract clusters of points. * * This method can handle the case with sparse points (distance between points * are larger than threshold), which may lead to infinite loop in the first method. */ void GpuEuclideanCluster::extractClusters() { int block_x, grid_x; block_x = (size_ > BLOCK_SIZE_X) ? BLOCK_SIZE_X : size_; grid_x = (size_ - 1) / block_x + 1; int *cluster_offset; int cluster_num, old_cluster_num; stop_cpu_profiling(); request_scheduling(17); pclEuclideanInitialize << < grid_x, block_x >> > (cluster_indices_, size_); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(17, LAUNCH); start_profiling_cpu_time(); // #ifdef SLICING // int* init_data = (int *)malloc((size_) * sizeof(int)); // for(int i = 0; i < size_; i++){ // init_data[i] = i; // } // stop_cpu_profiling(); // request_scheduling(17); // checkCudaErrors(cudaMemcpy(cluster_indices_, init_data, (size_) * sizeof(int), cudaMemcpyHostToDevice)); // checkCudaErrors(cudaDeviceSynchronize()); // stop_profiling(17, HTOD); // start_profiling_cpu_time(); // #endif old_cluster_num = cluster_num = size_; stop_cpu_profiling(); request_scheduling(18); checkCudaErrors(cudaMalloc(&cluster_offset, (size_ + 1) * sizeof(int))); checkCudaErrors(cudaMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); stop_profiling(18,HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(19); blockLabelling << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, size_, threshold_); stop_profiling(19, LAUNCH); start_profiling_cpu_time(); // #ifdef SLICING // int slice_cnt = 2; // for(int slice_id = 0; slice_id < slice_cnt; slice_id++){ // stop_cpu_profiling(); // request_scheduling(19); // blockLabelling_with_slicing << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, size_, threshold_, slice_id, slice_cnt); // stop_profiling(19, LAUNCH); // start_profiling_cpu_time(); // } // #endif stop_cpu_profiling(); request_scheduling(20); clusterMark << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); stop_profiling(20, LAUNCH); start_profiling_cpu_time(); exclusiveScan(cluster_offset, size_ + 1, &cluster_num); int *cluster_list, *new_cluster_list, *tmp; stop_cpu_profiling(); request_scheduling(21); checkCudaErrors(cudaMalloc(&cluster_list, cluster_num * sizeof(int))); stop_profiling(21, HTOD); start_profiling_cpu_time(); #ifndef SLICING stop_cpu_profiling(); request_scheduling(22); clusterCollector << < grid_x, block_x >> > (cluster_indices_, cluster_list, cluster_offset, size_); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, LAUNCH); start_profiling_cpu_time(); #endif #ifdef SLICING // [22] ////////////////////////////////////////////////////////////// int idx = 0; int* h_cluster_indices = (int *)malloc(size_ * sizeof(int)); stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(h_cluster_indices, cluster_indices_, size_/3*sizeof(int), cudaMemcpyDeviceToHost)); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += size_/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(h_cluster_indices[idx]), &(cluster_indices_[idx]), size_/3*sizeof(int), cudaMemcpyDeviceToHost)); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += size_/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(h_cluster_indices[idx]), &(cluster_indices_[idx]), (size_/3 + size_%3)*sizeof(int), cudaMemcpyDeviceToHost)); stop_profiling(22, DTOH); start_profiling_cpu_time(); int* h_cluster_offset = (int *)malloc((size_ + 1) * sizeof(int)); idx = 0; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(h_cluster_offset, cluster_offset, (size_+1)/3*sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += (size_+1)/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(h_cluster_offset[idx]), &(cluster_offset[idx]), (size_+1)/3*sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += (size_+1)/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(h_cluster_offset[idx]), &(cluster_offset[idx]), ((size_+1)%3 + (size_+1)/3)*sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); int* h_cluster_list = (int *)malloc(cluster_num * sizeof(int)); idx = 0; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(h_cluster_list, cluster_list, cluster_num/3 * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(h_cluster_list[idx]), &(cluster_list[idx]), (cluster_num/3) * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(h_cluster_list[idx]), &(cluster_list[idx]), (cluster_num/3 + cluster_num%3) * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(22, DTOH); start_profiling_cpu_time(); for(int i = 0; i < size_; i++){ h_cluster_list[h_cluster_offset[h_cluster_indices[i]]] = h_cluster_indices[i]; } idx = 0; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(cluster_list, h_cluster_list, cluster_num/3 * sizeof(int), cudaMemcpyHostToDevice)); stop_profiling(22, HTOD); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(cluster_list[idx]), &(h_cluster_list[idx]), (cluster_num/3) * sizeof(int), cudaMemcpyHostToDevice)); stop_profiling(22, HTOD); start_profiling_cpu_time(); idx += cluster_num/3; stop_cpu_profiling(); request_scheduling(22); checkCudaErrors(cudaMemcpy(&(cluster_list[idx]), &(h_cluster_list[idx]), (cluster_num%3 + cluster_num/3 ) * sizeof(int), cudaMemcpyHostToDevice)); stop_profiling(22, HTOD); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); #endif int *cluster_matrix; int *new_cluster_matrix; stop_cpu_profiling(); request_scheduling(23); checkCudaErrors(cudaMalloc(&cluster_matrix, cluster_num * cluster_num * sizeof(int))); stop_profiling(23, HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(24); checkCudaErrors(cudaMemset(cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); stop_profiling(24, HTOD); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); stop_cpu_profiling(); request_scheduling(25); checkCudaErrors(cudaMalloc(&new_cluster_list, cluster_num * sizeof(int))); stop_profiling(25, HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(26); buildClusterMatrix << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, cluster_matrix, cluster_offset, size_, cluster_num, threshold_); stop_profiling(26, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); // [25] ////////////////////////////////////////////////////////////// // slice_cnt = 5; // for(int slice_id = 0; slice_id<slice_cnt; slice_id++){ // stop_cpu_profiling(); // request_scheduling(26); // buildClusterMatrix_with_slicing << < grid_x, block_x >> > (x_, y_, z_, cluster_indices_, cluster_matrix, cluster_offset, size_, cluster_num, threshold_, slice_id, slice_cnt); // stop_profiling(26, LAUNCH); // start_profiling_cpu_time(); // checkCudaErrors(cudaDeviceSynchronize()); // } /////////////////////////////////////////////////////////////////////// int block_x2 = 0, grid_x2 = 0; bool *changed; stop_cpu_profiling(); request_scheduling(27); checkCudaErrors(cudaMallocHost(&changed, sizeof(bool))); stop_profiling(27, LAUNCH); start_profiling_cpu_time(); #ifndef SERIAL int *changed_diag; stop_cpu_profiling(); request_scheduling(28); checkCudaErrors(cudaMallocHost(&changed_diag, sizeof(int))); stop_profiling(28, LAUNCH); start_profiling_cpu_time(); #endif int max_base_row = 0; do { *changed = false; block_x2 = (cluster_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : cluster_num; grid_x2 = (cluster_num - 1) / block_x2 + 1; stop_cpu_profiling(); request_scheduling(29); mergeSelfClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num, changed); checkCudaErrors(cudaDeviceSynchronize()); stop_profiling(29, LAUNCH); start_profiling_cpu_time(); // #ifdef SLICING // slice_cnt = 5; // int row_start_origin = block_x2; // int row_end_origin = (row_start_origin + block_x2 <= cluster_num) ? row_start_origin + block_x2 : cluster_num; // int quotient = row_start_origin / row_end_origin; // int remainder = row_start_origin % row_end_origin; // if(block_x2 >= slice_cnt){ // for(int slice_id = 0; slice_id < slice_cnt; slice_id++){ // int row_start = slice_id*quotient; // int row_end = row_start + quotient; // if((slice_id+1) == slice_cnt) row_end += remainder; // if(row_end > row_end_origin) row_end = row_end_origin; // stop_cpu_profiling(); // request_scheduling(29); // mergeSelfClustersWithSlicing << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num, changed, row_start, row_end); // checkCudaErrors(cudaDeviceSynchronize()); // stop_profiling(29, LAUNCH); // start_profiling_cpu_time(); // } // } // else{ // stop_cpu_profiling(); // request_scheduling(29); // mergeSelfClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, cluster_num, changed); // checkCudaErrors(cudaDeviceSynchronize()); // stop_profiling(29, LAUNCH); // start_profiling_cpu_time(); // } // #endif int base_row = 1, base_column = 0; int sub_matrix_offset_row = 2, sub_matrix_offset_col = 2; int sub_matrix_row = 1, sub_matrix_col = 1; int sub_matrix_num; int max_rows = grid_x2; max_base_row = base_row; while (!(*changed) && cluster_num > BLOCK_SIZE_X && base_row * BLOCK_SIZE_X < cluster_num && base_column < cluster_num) { sub_matrix_num = (cluster_num - base_row - 1) / sub_matrix_offset_row + 1; block_x2 = BLOCK_SIZE_X; grid_x2 = sub_matrix_num * sub_matrix_col; #ifdef SERIAL //Merge clusters in each sub-matrix by moving from top to bottom of the similarity sub-matrix for (int shift_level = 0; !(*changed) && shift_level < sub_matrix_col; shift_level++) { stop_cpu_profiling(); request_scheduling(30); mergeInterClusters<<<grid_x2, block_x2>>>(cluster_matrix, cluster_list, shift_level, base_row, base_column, sub_matrix_row, sub_matrix_col, sub_matrix_offset_row, sub_matrix_offset_col, cluster_num, changed); stop_profiling(30, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); } #else int grid_y2 = sub_matrix_row; dim3 block_size(block_x2, 1, 1); dim3 grid_size(grid_x2, grid_y2, 1); *changed_diag = -1; stop_cpu_profiling(); request_scheduling(31); clustersIntersecCheck << < grid_size, block_size >> > (cluster_matrix, changed_diag, base_row, base_column, sub_matrix_row, sub_matrix_col, sub_matrix_offset_row, sub_matrix_offset_col, cluster_num); stop_profiling(31, LAUNCH); checkCudaErrors(cudaDeviceSynchronize()); start_profiling_cpu_time(); if (*changed_diag > 0) { //Merge clusters in sub-matrix that stay in the changed_diag diagonal by moving from top to bottom of the matrix. stop_cpu_profiling(); request_scheduling(32); mergeInterClusters << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, *changed_diag, base_row, base_column, sub_matrix_row, sub_matrix_col, sub_matrix_offset_row, sub_matrix_offset_col, cluster_num, changed); stop_profiling(32, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); } #endif base_row += sub_matrix_row; sub_matrix_row = (sub_matrix_row * 2 + base_row < max_rows) ? sub_matrix_row * 2 : max_rows - base_row; sub_matrix_col *= 2; sub_matrix_offset_row *= 2; sub_matrix_offset_col *= 2; } max_base_row = base_row; if (*changed) { stop_cpu_profiling(); request_scheduling(33); reflexClusterChanges << < grid_x, block_x >> > (cluster_indices_, cluster_offset, cluster_list, size_); stop_profiling(33, LAUNCH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(34); checkCudaErrors(cudaMemset(cluster_offset, 0, (size_ + 1) * sizeof(int))); stop_profiling(34, HTOD); start_profiling_cpu_time(); block_x2 = (cluster_num > BLOCK_SIZE_X) ? BLOCK_SIZE_X : cluster_num; grid_x2 = (cluster_num - 1) / block_x2 + 1; stop_cpu_profiling(); request_scheduling(35); clusterMark << < grid_x2, block_x2 >> > (cluster_list, cluster_offset, cluster_num); stop_profiling(35, LAUNCH); start_profiling_cpu_time(); old_cluster_num = cluster_num; exclusiveScan(cluster_offset, size_ + 1, &cluster_num); stop_cpu_profiling(); request_scheduling(36); clusterCollector << < grid_x2, block_x2 >> > (cluster_list, new_cluster_list, cluster_offset, old_cluster_num); stop_profiling(36, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMalloc(&new_cluster_matrix, cluster_num * cluster_num * sizeof(int))); stop_cpu_profiling(); request_scheduling(37); checkCudaErrors(cudaMemset(new_cluster_matrix, 0, cluster_num * cluster_num * sizeof(int))); stop_profiling(37, HTOD); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(38); rebuildClusterMatrix << < grid_x2, block_x2 >> > (cluster_matrix, cluster_list, new_cluster_matrix, cluster_offset, old_cluster_num, cluster_num); stop_profiling(38, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); stop_cpu_profiling(); request_scheduling(39); checkCudaErrors(cudaFree(cluster_matrix)); stop_profiling(39, HTOD); start_profiling_cpu_time(); cluster_matrix = new_cluster_matrix; tmp = cluster_list; cluster_list = new_cluster_list; new_cluster_list = tmp; } } while (*changed && max_base_row < cluster_num); cluster_num_ = cluster_num; //Reset all cluster indexes to make them start from 0 stop_cpu_profiling(); request_scheduling(40); resetClusterIndexes << < grid_x, block_x >> > (cluster_indices_, cluster_offset, size_); stop_profiling(40, LAUNCH); start_profiling_cpu_time(); checkCudaErrors(cudaDeviceSynchronize()); stop_cpu_profiling(); request_scheduling(41); checkCudaErrors(cudaMemcpy(cluster_indices_host_, cluster_indices_, size_ * sizeof(int), cudaMemcpyDeviceToHost)); stop_profiling(41, DTOH); start_profiling_cpu_time(); // slice_cnt = 3; // int quotient = size_/slice_cnt; // int remainder = size_%slice_cnt; // for(int slice_id = 0; slice_id < slice_cnt; slice_id++){ // int start_idx = slice_id * quotient; // int size = // stop_cpu_profiling(); // request_scheduling(36); // stop_profiling(36, DTOH); // start_profiling_cpu_time(); // } /////////////////////////////////////////////////////////////////// stop_cpu_profiling(); request_scheduling(42); checkCudaErrors(cudaFree(cluster_matrix)); stop_profiling(42, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(43); checkCudaErrors(cudaFree(cluster_list)); stop_profiling(43, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(44); checkCudaErrors(cudaFree(new_cluster_list)); stop_profiling(44, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(45); checkCudaErrors(cudaFree(cluster_offset)); stop_profiling(45, DTOH); start_profiling_cpu_time(); stop_cpu_profiling(); request_scheduling(46); checkCudaErrors(cudaFreeHost(changed)); stop_profiling(46, DTOH); start_profiling_cpu_time(); #ifndef SERIAL stop_cpu_profiling(); request_scheduling(47); checkCudaErrors(cudaFreeHost(changed_diag)); stop_profiling(47, DTOH); start_profiling_cpu_time(); #endif } /* Collect points that belong to same clusters and put them together. * * The output is a vector whose each element contains indexes of points * that belong to a same clusters. */ std::vector <GpuEuclideanCluster::GClusterIndex> GpuEuclideanCluster::getOutput() { std::vector <GClusterIndex> cluster_indices(cluster_num_); for (unsigned int i = 0; i < cluster_indices.size(); i++) cluster_indices[i].index_value = -1; for (int i = 0; i < size_; i++) { cluster_indices[cluster_indices_host_[i]].points_in_cluster.push_back(i); cluster_indices[cluster_indices_host_[i]].index_value = cluster_indices_host_[i]; } for (unsigned int i = 0; i < cluster_indices.size();) { int number_of_pts = cluster_indices[i].points_in_cluster.size(); if (number_of_pts < min_cluster_pts_ || number_of_pts > max_cluster_pts_) cluster_indices.erase(cluster_indices.begin() + i); else i++; } return cluster_indices; } /* Generate sparse points. * The number of points is fixed at 10000. * Cannot afford more (e.g. 100 000 points) since * GPU memory is not enough for a matrix with 10 billions cells. */ GpuEuclideanCluster::SamplePointListXYZ GpuEuclideanCluster::generateSample() { GpuEuclideanCluster::SamplePointListXYZ output; output.size = 10000; output.x = (float *) malloc(sizeof(float) * output.size); output.y = (float *) malloc(sizeof(float) * output.size); output.z = (float *) malloc(sizeof(float) * output.size); output.x[0] = 0; output.y[0] = 0; output.z[0] = 0; for (int i = 1; i < output.size; i++) { output.x[i] = (i % 3 == 0) ? output.x[i - 1] + threshold_ + 1 : output.x[i - 1]; output.y[i] = (i % 3 == 1) ? output.y[i - 1] + threshold_ + 1 : output.y[i - 1]; output.z[i] = (i % 3 == 2) ? output.z[i - 1] + threshold_ + 1 : output.z[i - 1]; } return output; } GpuEuclideanCluster::~GpuEuclideanCluster() { checkCudaErrors(cudaFree(x_)); checkCudaErrors(cudaFree(y_)); checkCudaErrors(cudaFree(z_)); checkCudaErrors(cudaFree(cluster_indices_)); free(cluster_indices_host_); } /* GPU Profiling */ void start_profiling_execution_time(){ if(GPU_PROFILING == 1) cudaEventRecord(e_event_start, 0); } void start_profiling_response_time(){ if(GPU_PROFILING == 1) cudaEventRecord(r_event_start, 0); } void start_profiling_cpu_time(){ if(GPU_PROFILING == 1){ cpu_id++; gettimeofday(&startTime, NULL); } } void stop_profiling(int id, int type){ if(GPU_PROFILING == 1){ float e_time, r_time; char gpu_id_buf[BUFFER_SIZE]; if(gpu_scheduling_flag_==1){ sched_info_->scheduling_flag = 0; sched_info_->state = NONE; } cudaEventRecord(e_event_stop, 0); cudaEventRecord(r_event_stop, 0); cudaEventSynchronize(e_event_stop); cudaEventSynchronize(r_event_stop); cudaEventElapsedTime(&e_time, e_event_start, e_event_stop); cudaEventElapsedTime(&r_time, r_event_start, r_event_stop); e_time = MS2US(e_time); r_time = MS2US(r_time); // write_data(gid, time, type); sprintf(gpu_id_buf,"g%d",id); //write_profiling_data(id, e_time, r_time, type); write_profiling_data(gpu_id_buf, e_time, r_time, type); // gid++; } } void stop_cpu_profiling(){ if(GPU_PROFILING == 1){ long long int elapsedTime; char cpu_id_buf[BUFFER_SIZE]; gettimeofday(&endTime, NULL); elapsedTime = ((long long int)(endTime.tv_sec - startTime.tv_sec)) * 1000000ll + (endTime.tv_usec - startTime.tv_usec); sprintf(cpu_id_buf,"e%d",cpu_id); write_cpu_profiling_data(cpu_id_buf,elapsedTime); } } void write_profiling_data(const char* id, float e_time, float r_time, int type){ if(GPU_PROFILING == 1){ fprintf(execution_time_fp, "%s, %f, %d\n", id, e_time, type); fprintf(response_time_fp, "%s, %f, %d\n", id, r_time, type); fprintf(remain_time_fp, "%s, %llu\n", id, absolute_deadline_ - get_current_time_us()); } } void write_cpu_profiling_data(const char *id, long long int c_time){ if(GPU_PROFILING == 1){ fprintf(execution_time_fp, "%s, %02d\n", id, c_time); fprintf(response_time_fp, "%s, %02d\n", id, c_time); } } void write_dummy_line(){ if(GPU_PROFILING == 1){ fprintf(execution_time_fp, "-1, -1, -1\n"); fflush(execution_time_fp); fprintf(response_time_fp, "-1, -1, -1\n"); fflush(response_time_fp); fprintf(remain_time_fp, "-1, -1\n"); fflush(remain_time_fp); cpu_id = 0; } } void initialize_file(const char execution_time_filename[], const char response_time_filename[], const char remain_time_filename[]){ if(GPU_PROFILING == 1){ execution_time_fp = fopen(execution_time_filename, "w+"); fprintf(execution_time_fp, "ID, TIME, TYPE\n"); response_time_fp = fopen(response_time_filename, "w+"); fprintf(response_time_fp, "ID, TIME, TYPE\n"); remain_time_fp = fopen(remain_time_filename, "w+"); fprintf(remain_time_fp, "ID, TIME\n"); } } void close_file(){ if(GPU_PROFILING == 1){ fclose(execution_time_fp); fclose(response_time_fp); fclose(remain_time_fp); } } void sig_handler(int signum){ if(signum == SIGUSR1 || signum == SIGUSR2){ is_scheduled_ = 1; return; } else termination(); } void termination(){ if(gpu_scheduling_flag_==1){ sched_info_->state = STOP; shmdt(sched_info_); } if(remove(task_filename_)){ printf("Cannot remove file %s\n", task_filename_); exit(1); } exit(0); } unsigned long long get_current_time_us(){ struct timespec ts; unsigned long long current_time; clock_gettime(CLOCK_REALTIME, &ts); current_time = ts.tv_sec%10000 * 1000000 + ts.tv_nsec/1000; return current_time; } void us_sleep(unsigned long long us){ struct timespec ts; ts.tv_sec = us/1000000; ts.tv_nsec = us%1000000*1000; nanosleep(&ts, NULL); return; } void initialize_signal_handler(){ signal(SIGINT, sig_handler); signal(SIGTSTP, sig_handler); signal(SIGQUIT, sig_handler); signal(SIGUSR1, sig_handler); signal(SIGUSR2, sig_handler); } void create_task_file(){ FILE* task_fp; task_fp = fopen(task_filename_, "w"); if(task_fp == NULL){ printf("Cannot create task file at %s\n", task_filename_); exit(1); } fprintf(task_fp, "%d\n", getpid()); fprintf(task_fp, "%d", key_id_); fclose(task_fp); } void get_scheduler_pid(){ FILE* scheduler_fp; printf("Wait the scheduler...\n"); while(1){ scheduler_fp = fopen("/tmp/np_edf_scheduler", "r"); if(scheduler_fp) break; } while(1){ fscanf(scheduler_fp, "%d", &scheduler_pid_); if(scheduler_pid_ != 0) break; } printf("Scheduler pid: %d\n", scheduler_pid_); fclose(scheduler_fp); } void initialize_sched_info(){ if(gpu_scheduling_flag_!=1) return; FILE* sm_key_fp; sm_key_fp = fopen("/tmp/sm_key", "r"); if(sm_key_fp == NULL){ printf("Cannot open /tmp/sm_key\n"); termination(); } key_ = ftok("/tmp/sm_key", key_id_); shmid_ = shmget(key_, sizeof(SchedInfo), 0666|IPC_CREAT); sched_info_ = (SchedInfo*)shmat(shmid_, 0, 0); sched_info_->pid = getpid(); sched_info_->state = NONE; sched_info_->scheduling_flag = 0; } void init_scheduling(char* task_filename, const char* deadline_filename, int key_id){ if(gpu_scheduling_flag_!=1) return; // Get deadline list get_deadline_list(deadline_filename); // Initialize key id for shared memory key_id_ = key_id; // Initialize signal handler initialize_signal_handler(); // Create task file sprintf(task_filename_, "%s", task_filename); create_task_file(); // Get scheduler pid get_scheduler_pid(); // Initialize scheduling information (shared memory data) initialize_sched_info(); sigemptyset(&sigset_); sigaddset(&sigset_, SIGUSR1); sigaddset(&sigset_, SIGUSR2); sigprocmask(SIG_BLOCK, &sigset_, NULL); // sigwait(&sigset_, &sig_); // kill(scheduler_pid_, SIGUSR2); // sigprocmask(SIG_UNBLOCK, &sigset_, NULL); printf("Task [%d] is ready to work\n", getpid()); // sigaddset(&sigset_, SIGUSR1); // sigprocmask(SIG_BLOCK, &sigset_, NULL); } void request_scheduling(int id){ if(gpu_scheduling_flag_ == 1){ unsigned long long relative_deadline = deadline_list_[id]; if(identical_deadline_ != 0) sched_info_->deadline = absolute_deadline_; else sched_info_->deadline = get_current_time_us() + relative_deadline; sched_info_->state = WAIT; // printf("Request schedule - deadline: %llu\n", sched_info_->deadline); } start_profiling_response_time(); if(gpu_scheduling_flag_ == 1){ while(1){ kill(scheduler_pid_, SIGUSR1); // if(!sigwait(&sigset_, &sig_)) break; // if(is_scheduled_ == 1) break; if(sched_info_->scheduling_flag == 1) break; } } start_profiling_execution_time(); if(gpu_scheduling_flag_==1){ sched_info_->state = RUN; sched_info_->deadline = 0; } } void get_deadline_list(const char* filename){ FILE* fp; fp = fopen(filename, "r"); if(fp==NULL){ fprintf(stderr, "Cannot find file %s\n", filename); exit(1); } char buf[1024]; long long int deadline; for(int i = 0; i < sizeof(deadline_list_)/sizeof(long long int); i++){ fgets(buf, 1024, fp); strtok(buf, "\n"); sscanf(buf, "%*s, %llu", &deadline); deadline_list_[i] = deadline; } } void set_identical_deadline(unsigned long long identical_deadline){ identical_deadline_ = identical_deadline; } void set_absolute_deadline(){ absolute_deadline_ = get_current_time_us() + identical_deadline_; } void set_slicing_flag(int flag){ slicing_flag_ = flag; } void set_gpu_scheduling_flag(int gpu_scheduling_flag){ gpu_scheduling_flag_ = gpu_scheduling_flag; }
4bd6ecceafeff8fc37134f7de9d4bc8ea15e8f23.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by op2.m on 19-Oct-2012 16:21:04 // // user function __device__ #include "dotPV.h" // CUDA kernel function __global__ void op_cuda_dotPV( double *arg0, double *arg1, double *arg2, int offset_s, int set_size ) { double arg2_l[1]; for (int d=0; d<1; d++) arg2_l[d]=ZERO_double; // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call dotPV( arg0+n, arg1+n, arg2_l ); } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg2[d+blockIdx.x*1],arg2_l[d]); } // host stub function void op_par_loop_dotPV(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2 ){ double *arg2h = (double *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; if (OP_diags>2) { printf(" kernel routine w/o indirection: dotPV\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(4); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg2.data = OP_reduct_h + reduct_bytes; arg2.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg2.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); hipLaunchKernelGGL(( op_cuda_dotPV), dim3(nblocks),dim3(nthread),nshared, 0, (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, offset_s, set->size ); cutilSafeCall(hipDeviceSynchronize()); cutilCheckMsg("op_cuda_dotPV execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1]; arg2.data = (char *)arg2h; op_mpi_reduce(&arg2,arg2h); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size; }
4bd6ecceafeff8fc37134f7de9d4bc8ea15e8f23.cu
// // auto-generated by op2.m on 19-Oct-2012 16:21:04 // // user function __device__ #include "dotPV.h" // CUDA kernel function __global__ void op_cuda_dotPV( double *arg0, double *arg1, double *arg2, int offset_s, int set_size ) { double arg2_l[1]; for (int d=0; d<1; d++) arg2_l[d]=ZERO_double; // process set elements for (int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x) { // user-supplied kernel call dotPV( arg0+n, arg1+n, arg2_l ); } // global reductions for(int d=0; d<1; d++) op_reduction<OP_INC>(&arg2[d+blockIdx.x*1],arg2_l[d]); } // host stub function void op_par_loop_dotPV(char const *name, op_set set, op_arg arg0, op_arg arg1, op_arg arg2 ){ double *arg2h = (double *)arg2.data; int nargs = 3; op_arg args[3]; args[0] = arg0; args[1] = arg1; args[2] = arg2; if (OP_diags>2) { printf(" kernel routine w/o indirection: dotPV\n"); } op_mpi_halo_exchanges(set, nargs, args); // initialise timers double cpu_t1, cpu_t2, wall_t1=0, wall_t2=0; op_timing_realloc(4); OP_kernels[4].name = name; OP_kernels[4].count += 1; if (set->size >0) { op_timers_core(&cpu_t1, &wall_t1); // set CUDA execution parameters #ifdef OP_BLOCK_SIZE_4 int nthread = OP_BLOCK_SIZE_4; #else // int nthread = OP_block_size; int nthread = 128; #endif int nblocks = 200; // transfer global reduction data to GPU int maxblocks = nblocks; int reduct_bytes = 0; int reduct_size = 0; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); reduct_size = MAX(reduct_size,sizeof(double)); reallocReductArrays(reduct_bytes); reduct_bytes = 0; arg2.data = OP_reduct_h + reduct_bytes; arg2.data_d = OP_reduct_d + reduct_bytes; for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) ((double *)arg2.data)[d+b*1] = ZERO_double; reduct_bytes += ROUND_UP(maxblocks*1*sizeof(double)); mvReductArraysToDevice(reduct_bytes); // work out shared memory requirements per element int nshared = 0; // execute plan int offset_s = nshared*OP_WARPSIZE; nshared = MAX(nshared*nthread,reduct_size*nthread); op_cuda_dotPV<<<nblocks,nthread,nshared>>>( (double *) arg0.data_d, (double *) arg1.data_d, (double *) arg2.data_d, offset_s, set->size ); cutilSafeCall(cudaDeviceSynchronize()); cutilCheckMsg("op_cuda_dotPV execution failed\n"); // transfer global reduction data back to CPU mvReductArraysToHost(reduct_bytes); for (int b=0; b<maxblocks; b++) for (int d=0; d<1; d++) arg2h[d] = arg2h[d] + ((double *)arg2.data)[d+b*1]; arg2.data = (char *)arg2h; op_mpi_reduce(&arg2,arg2h); } op_mpi_set_dirtybit(nargs, args); // update kernel record op_timers_core(&cpu_t2, &wall_t2); OP_kernels[4].time += wall_t2 - wall_t1; OP_kernels[4].transfer += (float)set->size * arg0.size; OP_kernels[4].transfer += (float)set->size * arg1.size; }
73631ed86f14f196f12a851c3eb051e280f53158.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void K() { // Original: if (condition) atomicInc(&counter, 1000000); //unsigned val = __ballot(condition); // leader. //unsigned wcount = __popc(val); //if (threadIdx.x % 32 == 0) printf("%d\n", __popc(val)); printf("%d\n", __ffs(0xF0000000)); } int main() { hipLaunchKernelGGL(( K), dim3(1), dim3(1), 0, 0, ); hipDeviceSynchronize(); return 0; }
73631ed86f14f196f12a851c3eb051e280f53158.cu
#include <stdio.h> #include <cuda.h> __global__ void K() { // Original: if (condition) atomicInc(&counter, 1000000); //unsigned val = __ballot(condition); // leader. //unsigned wcount = __popc(val); //if (threadIdx.x % 32 == 0) printf("%d\n", __popc(val)); printf("%d\n", __ffs(0xF0000000)); } int main() { K<<<1, 1>>>(); cudaDeviceSynchronize(); return 0; }
293d7e5f9167ae7630c19cdaa942a0fde489044e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <sys/time.h> #define TILE_DIM 32 using namespace std; __global__ void matmul(double *a, double* b, double *c, int aw, int bw) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0.0; for (int i = 0; i < aw; i++) { sum += a[row*aw+i] * b[i*bw+col]; } c[row*bw+col] = sum; } __global__ void matmul_shared(double *a, double* b, double *c, int aw, int bw) { // (*) create 2D shared memory arrays of size (TILE_DIM x TILE_DIM) for matrices (a) and (b) __shared__ double aTile[TILE_DIM][TILE_DIM], bTile[TILE_DIM][TILE_DIM]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0.0; // This loop is necessary since (aw) and (bh) are multiple of TILE_DIM for (int ia=0; ia<aw; ia+=TILE_DIM) { // copy block data of iteration (ia) to shared memory aTile[threadIdx.y][threadIdx.x] = a[row*aw + ia + threadIdx.x]; bTile[threadIdx.y][threadIdx.x] = b[(ia+threadIdx.y)*bw+col]; // (*) synchronize if necessary __syncthreads(); // (*) do multiplication for (int i = 0; i < TILE_DIM; i++) { sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x]; } // (*) synchronize if necessary __syncthreads(); } c[row*bw+col] = sum; } int main() { time_t sTime = time(NULL); timeval tt1, tt2; int ms; double fms; int ah=2560; int aw=2560; int bh=aw; int bw=2560; // host arrays double *a = (double*)malloc(ah*aw*sizeof(double)); double *b = (double*)malloc(bh*bw*sizeof(double)); double *c = (double*)malloc(ah*bw*sizeof(double)); for (int i=0;i<ah;i++) for (int j=0;j<aw;j++) a[i*ah+j] = (double)(i+j); for (int i=0;i<bh;i++) for (int j=0;j<bw;j++) b[i*bh+j] = (double)(i-j); // device arrays double *a_dev; hipMalloc((void**) &a_dev, ah*aw * sizeof(double)); double *b_dev; hipMalloc((void**) &b_dev, bh*bw * sizeof(double)); double *c_dev; hipMalloc((void**) &c_dev, ah*bw * sizeof(double)); // copy to device hipMemcpy(a_dev, a, ah*aw * sizeof(double) , hipMemcpyHostToDevice); hipMemcpy(b_dev, b, bh*bw * sizeof(double) , hipMemcpyHostToDevice); // kernel run dim3 nBlocks(bw/TILE_DIM, ah/TILE_DIM, 1); dim3 nThreads(TILE_DIM, TILE_DIM, 1); gettimeofday( &tt1, NULL ); //matmul <<< nBlocks, nThreads >>> (a_dev, b_dev, c_dev, aw, bw); hipLaunchKernelGGL(( matmul_shared) , dim3(nBlocks), dim3(nThreads) , 0, 0, a_dev, b_dev, c_dev, aw, bw); hipDeviceSynchronize(); gettimeofday( &tt2, NULL ); // copy from device hipMemcpy(c, c_dev, ah*bw * sizeof(double) , hipMemcpyDeviceToHost); // timing ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Comp time = " << fms << endl; double dNumOps = 2.0 * (double)aw * (double)aw * (double)bw; double gflops = 1.0e-9 * dNumOps/fms; cout << "GFlops = " << gflops << endl; cout << "value check = " << c[145] << endl; }
293d7e5f9167ae7630c19cdaa942a0fde489044e.cu
#include <iostream> #include <sys/time.h> #define TILE_DIM 32 using namespace std; __global__ void matmul(double *a, double* b, double *c, int aw, int bw) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0.0; for (int i = 0; i < aw; i++) { sum += a[row*aw+i] * b[i*bw+col]; } c[row*bw+col] = sum; } __global__ void matmul_shared(double *a, double* b, double *c, int aw, int bw) { // (*) create 2D shared memory arrays of size (TILE_DIM x TILE_DIM) for matrices (a) and (b) __shared__ double aTile[TILE_DIM][TILE_DIM], bTile[TILE_DIM][TILE_DIM]; int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; double sum = 0.0; // This loop is necessary since (aw) and (bh) are multiple of TILE_DIM for (int ia=0; ia<aw; ia+=TILE_DIM) { // copy block data of iteration (ia) to shared memory aTile[threadIdx.y][threadIdx.x] = a[row*aw + ia + threadIdx.x]; bTile[threadIdx.y][threadIdx.x] = b[(ia+threadIdx.y)*bw+col]; // (*) synchronize if necessary __syncthreads(); // (*) do multiplication for (int i = 0; i < TILE_DIM; i++) { sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x]; } // (*) synchronize if necessary __syncthreads(); } c[row*bw+col] = sum; } int main() { time_t sTime = time(NULL); timeval tt1, tt2; int ms; double fms; int ah=2560; int aw=2560; int bh=aw; int bw=2560; // host arrays double *a = (double*)malloc(ah*aw*sizeof(double)); double *b = (double*)malloc(bh*bw*sizeof(double)); double *c = (double*)malloc(ah*bw*sizeof(double)); for (int i=0;i<ah;i++) for (int j=0;j<aw;j++) a[i*ah+j] = (double)(i+j); for (int i=0;i<bh;i++) for (int j=0;j<bw;j++) b[i*bh+j] = (double)(i-j); // device arrays double *a_dev; cudaMalloc((void**) &a_dev, ah*aw * sizeof(double)); double *b_dev; cudaMalloc((void**) &b_dev, bh*bw * sizeof(double)); double *c_dev; cudaMalloc((void**) &c_dev, ah*bw * sizeof(double)); // copy to device cudaMemcpy(a_dev, a, ah*aw * sizeof(double) , cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b, bh*bw * sizeof(double) , cudaMemcpyHostToDevice); // kernel run dim3 nBlocks(bw/TILE_DIM, ah/TILE_DIM, 1); dim3 nThreads(TILE_DIM, TILE_DIM, 1); gettimeofday( &tt1, NULL ); //matmul <<< nBlocks, nThreads >>> (a_dev, b_dev, c_dev, aw, bw); matmul_shared <<< nBlocks, nThreads >>> (a_dev, b_dev, c_dev, aw, bw); cudaThreadSynchronize(); gettimeofday( &tt2, NULL ); // copy from device cudaMemcpy(c, c_dev, ah*bw * sizeof(double) , cudaMemcpyDeviceToHost); // timing ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = ((double)ms)/1000000.0; cout << "Comp time = " << fms << endl; double dNumOps = 2.0 * (double)aw * (double)aw * (double)bw; double gflops = 1.0e-9 * dNumOps/fms; cout << "GFlops = " << gflops << endl; cout << "value check = " << c[145] << endl; }
d4614ffa2f5147b7e8cd2719218a81e08d8760a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> //#include <torch/tensor.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include "common.h" #include "device_tensor.h" namespace { template <typename DType, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = gradOutput[batch][plane][n]; DType c = ScalarConvert<Acctype, DType>::to(input[batch][plane][n] - mean); return Float2<DType, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; template <typename DType, typename Acctype> struct SumOp { __device__ SumOp(DeviceTensor<DType, 3> i) : input(i){} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = input[batch][plane][n]; return Float2<DType, Acctype>(g, g * g); } DType mean; DeviceTensor<DType, 3> input; }; // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename DType> __global__ void BatchNorm_Forward_kernel ( DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; output[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Backward_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradMean, DeviceTensor<DType, 1> gradStd, bool train) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (train && threadIdx.x == 0) { gradMean[c] = - gradOutputSum * gamma[c] * invstd; gradStd[c] = - dotP * gamma[c] * invstd * invstd; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP * invstd; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void Sum_Square_Forward_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> sum, DeviceTensor<DType, 1> square) { int c = blockIdx.x; /* main operation */ SumOp<DType, DType> g(input); Float2<DType, DType> res = reduce<Float2<DType, DType>, SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c); DType xsum = res.v1; DType xsquare = res.v2; if (threadIdx.x == 0) { sum[c] = xsum; square[c] = xsquare; } } template <typename DType> __global__ void Sum_Square_Backward_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> gradSum, DeviceTensor<DType, 1> gradSquare) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] * input[batch][c][x]; } } } } // namespcae at::Tensor BatchNorm_Forward_CUDA( const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_) { auto output_ = at::zeros_like(input_); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, stream, output, input, mean, std, gamma, beta); })); AT_ASSERT(hipGetLastError() == hipSuccess); return output_; } std::vector<at::Tensor> BatchNorm_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_, bool train) { /* outputs*/ at::Tensor gradinput_ = at::zeros_like(input_); at::Tensor gradgamma_ = at::zeros_like(gamma_); at::Tensor gradbeta_ = at::zeros_like(beta_); at::Tensor gradMean_ = at::zeros_like(mean_); at::Tensor gradStd_ = at::zeros_like(std_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_); DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_); /* kernel function */ hipLaunchKernelGGL(( BatchNorm_Backward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradoutput, input, gradinput, gradgamma, gradbeta, mean, std, gamma, beta, gradMean, gradStd, train); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> Sum_Square_Forward_CUDA( const at::Tensor input_) { /* outputs */ at::Tensor sum_ = at::zeros({input_.size(1)}, input_.options()); at::Tensor square_ = at::zeros({input_.size(1)}, input_.options()); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_); DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_); /* kernel function */ hipLaunchKernelGGL(( Sum_Square_Forward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, input, sum, square); })); AT_ASSERT(hipGetLastError() == hipSuccess); return {sum_, square_}; } at::Tensor Sum_Square_Backward_CUDA( const at::Tensor input_, const at::Tensor gradSum_, const at::Tensor gradSquare_) { /* outputs */ at::Tensor gradInput_ = at::zeros_like(input_); /* cuda utils*/ hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_); DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_); /* kernel function */ hipLaunchKernelGGL(( Sum_Square_Backward_kernel<scalar_t>) , dim3(blocks), dim3(threads), 0, stream, gradInput, input, gradSum, gradSquare); })); AT_ASSERT(hipGetLastError() == hipSuccess); return gradInput_; }
d4614ffa2f5147b7e8cd2719218a81e08d8760a7.cu
#include <vector> //#include <torch/tensor.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include "common.h" #include "device_tensor.h" namespace { template <typename DType, typename Acctype, typename DeviceTensor3> struct GradOp { __device__ GradOp(Acctype m, const DeviceTensor3 i, const DeviceTensor3 g) : mean(m), input(i), gradOutput(g) {} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = gradOutput[batch][plane][n]; DType c = ScalarConvert<Acctype, DType>::to(input[batch][plane][n] - mean); return Float2<DType, Acctype>(g, g * c); } const Acctype mean; const DeviceTensor3 input; const DeviceTensor3 gradOutput; }; template <typename DType, typename Acctype> struct SumOp { __device__ SumOp(DeviceTensor<DType, 3> i) : input(i){} __device__ __forceinline__ Float2<DType, Acctype> operator()(int batch, int plane, int n) { DType g = input[batch][plane][n]; return Float2<DType, Acctype>(g, g * g); } DType mean; DeviceTensor<DType, 3> input; }; // Sum across (batch, x/y/z) applying Op() pointwise template<typename T, typename Op, typename DeviceTensor3> __device__ T reduce(Op op, DeviceTensor3 tensor, int plane) { T sum = (T)0; for (int batch = 0; batch < tensor.getSize(0); ++batch) { for (int x = threadIdx.x; x < tensor.getSize(2); x += blockDim.x) { sum += op(batch, plane, x); } } // sum over NumThreads within a warp sum = warpSum(sum); // 'transpose', and reduce within warp again __shared__ T shared[32]; __syncthreads(); if (threadIdx.x % WARP_SIZE == 0) { shared[threadIdx.x / WARP_SIZE] = sum; } if (threadIdx.x >= blockDim.x / WARP_SIZE && threadIdx.x < WARP_SIZE) { // zero out the other entries in shared shared[threadIdx.x] = (T)0; } __syncthreads(); if (threadIdx.x / WARP_SIZE == 0) { sum = warpSum(shared[threadIdx.x]); if (threadIdx.x == 0) { shared[0] = sum; } } __syncthreads(); // Everyone picks it up, should be broadcast into the whole gradInput return shared[0]; } template <typename DType> __global__ void BatchNorm_Forward_kernel ( DeviceTensor<DType, 3> output, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta) { int c = blockIdx.x; /* main operation */ for (int b = 0; b < input.getSize(0); ++b) { for (int x = threadIdx.x; x < input.getSize(2); x += blockDim.x) { DType inp = input[b][c][x]; output[b][c][x] = gamma[c] * (inp - mean[c]) / std[c] + beta[c]; } } } template <typename DType> __global__ void BatchNorm_Backward_kernel ( DeviceTensor<DType, 3> gradoutput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 3> gradinput, DeviceTensor<DType, 1> gradgamma, DeviceTensor<DType, 1> gradbeta, DeviceTensor<DType, 1> mean, DeviceTensor<DType, 1> std, DeviceTensor<DType, 1> gamma, DeviceTensor<DType, 1> beta, DeviceTensor<DType, 1> gradMean, DeviceTensor<DType, 1> gradStd, bool train) { /* declarations of the variables */ /* Get the index and channels */ int c = blockIdx.x; /* main operation */ GradOp<DType, DType, DeviceTensor<DType, 3>> g(mean[c], input, gradoutput); Float2<DType, DType> res = reduce<Float2<DType, DType>, GradOp<DType, DType, DeviceTensor<DType, 3>>, DeviceTensor<DType, 3>>(g, gradoutput, c); DType gradOutputSum = res.v1; DType dotP = res.v2; DType invstd = DType(1.0) / std[c]; DType gradScale = invstd * gamma[c]; if (train && threadIdx.x == 0) { gradMean[c] = - gradOutputSum * gamma[c] * invstd; gradStd[c] = - dotP * gamma[c] * invstd * invstd; } if (gradinput.numElements() > 0) { for (int batch = 0; batch < gradoutput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradoutput.getSize(2); x += blockDim.x) { gradinput[batch][c][x] = gradoutput[batch][c][x] * gradScale; } } } if (gradgamma.numElements() > 0) { if (threadIdx.x == 0) { gradgamma[c] += dotP * invstd; } } if (gradbeta.numElements() > 0) { if (threadIdx.x == 0) { gradbeta[c] += gradOutputSum; } } } template <typename DType> __global__ void Sum_Square_Forward_kernel ( DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> sum, DeviceTensor<DType, 1> square) { int c = blockIdx.x; /* main operation */ SumOp<DType, DType> g(input); Float2<DType, DType> res = reduce<Float2<DType, DType>, SumOp<DType, DType>, DeviceTensor<DType, 3>>(g, input, c); DType xsum = res.v1; DType xsquare = res.v2; if (threadIdx.x == 0) { sum[c] = xsum; square[c] = xsquare; } } template <typename DType> __global__ void Sum_Square_Backward_kernel ( DeviceTensor<DType, 3> gradInput, DeviceTensor<DType, 3> input, DeviceTensor<DType, 1> gradSum, DeviceTensor<DType, 1> gradSquare) { int c = blockIdx.x; /* main operation */ for (int batch = 0; batch < gradInput.getSize(0); ++batch) { for (int x = threadIdx.x; x < gradInput.getSize(2); x += blockDim.x) { gradInput[batch][c][x] = gradSum[c] + 2 * gradSquare[c] * input[batch][c][x]; } } } } // namespcae at::Tensor BatchNorm_Forward_CUDA( const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_) { auto output_ = at::zeros_like(input_); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> output = devicetensor<scalar_t, 3>(output_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); /* kernel function */ BatchNorm_Forward_kernel<scalar_t><<<blocks, threads, 0, stream>>>( output, input, mean, std, gamma, beta); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return output_; } std::vector<at::Tensor> BatchNorm_Backward_CUDA( const at::Tensor gradoutput_, const at::Tensor input_, const at::Tensor mean_, const at::Tensor std_, const at::Tensor gamma_, const at::Tensor beta_, bool train) { /* outputs*/ at::Tensor gradinput_ = at::zeros_like(input_); at::Tensor gradgamma_ = at::zeros_like(gamma_); at::Tensor gradbeta_ = at::zeros_like(beta_); at::Tensor gradMean_ = at::zeros_like(mean_); at::Tensor gradStd_ = at::zeros_like(std_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "BatchNorm_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradoutput = devicetensor<scalar_t, 3>(gradoutput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 3> gradinput = devicetensor<scalar_t, 3>(gradinput_); DeviceTensor<scalar_t, 1> gradgamma = devicetensor<scalar_t, 1>(gradgamma_); DeviceTensor<scalar_t, 1> gradbeta = devicetensor<scalar_t, 1>(gradbeta_); DeviceTensor<scalar_t, 1> mean = devicetensor<scalar_t, 1>(mean_); DeviceTensor<scalar_t, 1> std = devicetensor<scalar_t, 1>(std_); DeviceTensor<scalar_t, 1> gamma = devicetensor<scalar_t, 1>(gamma_); DeviceTensor<scalar_t, 1> beta = devicetensor<scalar_t, 1>(beta_); DeviceTensor<scalar_t, 1> gradMean = devicetensor<scalar_t, 1>(gradMean_); DeviceTensor<scalar_t, 1> gradStd = devicetensor<scalar_t, 1>(gradStd_); /* kernel function */ BatchNorm_Backward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>( gradoutput, input, gradinput, gradgamma, gradbeta, mean, std, gamma, beta, gradMean, gradStd, train); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {gradinput_, gradMean_, gradStd_, gradgamma_, gradbeta_}; } std::vector<at::Tensor> Sum_Square_Forward_CUDA( const at::Tensor input_) { /* outputs */ at::Tensor sum_ = at::zeros({input_.size(1)}, input_.options()); at::Tensor square_ = at::zeros({input_.size(1)}, input_.options()); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_forward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> sum = devicetensor<scalar_t, 1>(sum_); DeviceTensor<scalar_t, 1> square = devicetensor<scalar_t, 1>(square_); /* kernel function */ Sum_Square_Forward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(input, sum, square); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return {sum_, square_}; } at::Tensor Sum_Square_Backward_CUDA( const at::Tensor input_, const at::Tensor gradSum_, const at::Tensor gradSquare_) { /* outputs */ at::Tensor gradInput_ = at::zeros_like(input_); /* cuda utils*/ cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 blocks(input_.size(1)); dim3 threads(getNumThreads(input_.size(2))); AT_DISPATCH_FLOATING_TYPES(input_.type(), "SumSquare_Backward_CUDA", ([&] { /* Device tensors */ DeviceTensor<scalar_t, 3> gradInput = devicetensor<scalar_t, 3>(gradInput_); DeviceTensor<scalar_t, 3> input = devicetensor<scalar_t, 3>(input_); DeviceTensor<scalar_t, 1> gradSum = devicetensor<scalar_t, 1>(gradSum_); DeviceTensor<scalar_t, 1> gradSquare =devicetensor<scalar_t, 1>(gradSquare_); /* kernel function */ Sum_Square_Backward_kernel<scalar_t> <<<blocks, threads, 0, stream>>>(gradInput, input, gradSum, gradSquare); })); AT_ASSERT(cudaGetLastError() == cudaSuccess); return gradInput_; }
0527cc6a75540b560560f9ff674a7a56e2e4ac30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "MatrixCoverGPU.cuh" __global__ void delete_rows_and_columns(int*dl_matrix, int* deleted_rows, int* deleted_cols, const int *search_depth, const int *selected_row_id, const int *total_dl_matrix_row_num, const int *total_dl_matrix_col_num, int * offset_col, int * offset_row, int * offset_matrix, const int graph_count) { for (int k = blockIdx.x; k< graph_count; k = k + gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_col_num[k]; i=i+blockDim.x) { if (dl_matrix[offset_matrix[k]+selected_row_id[k]*total_dl_matrix_col_num[k]+i] == 1 && deleted_cols[offset_col[k]+i] == 0)//we only delete rows that are not deleted or removed { deleted_cols[offset_col[k]+i] = search_depth[k]; for (int j = 0; j < total_dl_matrix_row_num[k]; j++) { if (dl_matrix[offset_matrix[k]+j*total_dl_matrix_col_num[k]+i] == 1 && deleted_rows[offset_row[k]+j] == 0) { atomicExch(offset_row[k]+deleted_rows+j, search_depth[k]); } } } } } } __global__ void init_vectors(int* vec, const int *vec_length, int* offset, const int graph_count) { for (int k=blockIdx.x; k<graph_count; k= k+gridDim.x) { for (int i = threadIdx.x; i < vec_length[k]; i = i + blockDim.x) { vec[offset[k]+i] = 0; } } } /* void get_largest_value_launcher(int* vec, hipcub::KeyValuePair<int, int> *argmax, int vec_length) { void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipcub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, vec, argmax, vec_length); // Allocate temporary storage hipMalloc(&d_temp_storage, temp_storage_bytes); // Run argmax-reduction hipcub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, vec, argmax, vec_length); hipFree(d_temp_storage); } */ __global__ void get_largest_value(int* vec, int *conflict_col_id, const int *vec_length, int *max, int *offset, const int graph_count) { for (int k=blockIdx.x; k<graph_count; k = k+gridDim.x) { for (int i = threadIdx.x; i< vec_length[k]; i = i + blockDim.x) { atomicMax(max+k, vec[offset[k]+i]); } for (int i = threadIdx.x; i< vec_length[k]; i = i + blockDim.x) { if (vec[offset[k]+i]==max) { conflict_col_id[k] = i; } } } } __global__ void init_vectors_reserved(int *vec, int *vec_length, int *offset, const int graph_count) { for (int k = blockIdx.x, k<graph_count; k = k+gridDim.x) { for (int i = threadIdx.x; i < vec_length[k]; i= i+ blockDim.x) { if (vec[offset[k]+i] != -1) { vec[offset[k]+i] = 0; } } } } __global__ void check_existance_of_candidate_rows(int* deleted_rows, int* row_group, const int *search_depth, int *token, int *total_dl_matrix_row_num, int* selected_row_id, int* offset_row, const int graph_count) { for (int k = blockIdx.x, k<graph_count; k = k+gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i + blockDim.x) { //std::cout<<deleted_rows[i]<<' '<<row_group[i]<<std::endl; if (deleted_rows[offset[k]+i] == 0 && row_group[offset[k]+i] == search_depth[k]) { //std::cout<<"Candidate Row Found...."<<std::endl; atomicExch(token+k, 1); atomicMin(selected_row_id+k, i); } } } } __global__ void get_vertex_row_group(int* row_group, int* dl_matrix, int* vertex_num, int* total_dl_matrix_row_num, int* total_dl_matrix_col_num, int * offset_row, int *offset_matrix, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i + blockDim.x) { for (int j = 0; j < vertex_num[k]; j++) { row_group[offset_row[k]+i]+= dl_matrix[offset_matrix[k] + i*total_dl_matrix_col_num[k]+j] * (j + 1); } } } } /* removed __global__ void select_row(int* deleted_rows, int* row_group, int* search_depth, int *total_dl_matrix_row_num, int* selected_row_id) { for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) { if (deleted_rows[i] == 0 && row_group[i] == search_depth) { atomicExch(selected_row_id, i); atomicMin(selected_row_id, i); } } __syncthreads(); } */ __global__ void recover_deleted_rows(int* deleted_rows, int *search_depth, int *total_dl_matrix_row_num, int* offset_row, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i=i+blockDim.x) { if (abs(deleted_rows[offset_row[k]+i]) > search_depth[k] || deleted_rows[offset_row[k]+i] == search_depth[k]) { deleted_rows[offset_row[k]+i] = 0; } } } } __global__ void recover_deleted_cols(int* deleted_cols, int *search_depth, int* total_dl_matrix_col_num, int *offset_col, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_col_num[k]; i=i+blockDim.x) { if (deleted_cols[offset_col[k]+i] >= search_depth[k]) { deleted_cols[offset_col[k]+i] = 0; } } } } __global__ void recover_results(int* results, int *search_depth, int *total_dl_matrix_row_num, int *offset_row, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i + blockDim.x) { if (results[offset_row[k]+i] == search_depth[k]) { results[offset_row[k]+i] = 0; } } } } __global__ void get_conflict_node_id(int* deleted_rows, int* row_group, int *search_depth, int *conflict_node_id, int *total_dl_matrix_row_num, int * offset_row, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i+ blockDim.x) { if (row_group[offset_row[k]+i] == search_depth[k] + 1 && deleted_rows[offset_row[k]+i] > conflict_node_id[k]) { atomicExch(conflict_node_id+k, deleted_rows[offset_row[k]+i]); } } } //__syncthreads(); } //problem __global__ void get_conflict_col(int* dl_matrix, int* deleted_rows, int* deleted_cols, int* row_group, int *conflict_node_id, int *search_depth, int *conflict_col_id, int *vertex_num, int *total_dl_matrix_row_num, int *total_dl_matrix_col_num, int * offset_col, int * offset_row, int * offset_matrix, const int graph_count) { //*conflict_col_id = 0; for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i=i+blockDim.x) { //find the conflict edge that connects current node and the most closest node. if (row_group[offset_row[k]+i] == search_depth[k] + 1 && deleted_rows[offset_row[k]+i] == conflict_node_id[k]) { for (int j = total_dl_matrix_col_num[k] - 1; j > vertex_num[k]; j--) { if (dl_matrix[offset_matrix[k]+i*total_dl_matrix_col_num[k]+j] * deleted_cols[offset_col[k]+j] == conflict_node_id[k]) { atomicExch(conflict_col_id+k, j); } } } } } __syncthreads(); } __global__ void remove_cols(int* deleted_cols, int* col_group, int *conflict_col_id, int *total_dl_matrix_col_num, int *offset_col, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_col_num[k]; i = i + blockDim.x) { if (col_group[offset_col[k]+i] == col_group[conflict_col_id[k]]) { deleted_cols[offset_col[k]+i] = -1; } } } } //+============================================================================================================================================= __global__ void print_vec(int *vec, int start, int vec_length) { for(int i=0; i<vec_length; i++) { printf("%d ", vec[start+i]); } printf("\n"); } /* __global__ void print_mat(int *mat[], int total_dl_matrix_row_num, int total_dl_matrix_col_num) { for(int i=0; i<total_dl_matrix_row_num; i++) { for(int j=0; j<total_dl_matrix_col_num; j++) { printf("%d ", mat[i][j]); } printf("\n"); } } */ __global__ void add_gpu(int *vector, int idx, int val) { vector[idx] = vector[idx] + val } __global__ void set_vector_value(int* vector, int idx, int val) { vector[idx]=val; } void mc_solver(int* dl_matrix, int* results, int* deleted_cols, int* deleted_rows, int* col_group, int* row_group, int* conflict_count, int *vertex_num, int *total_dl_matrix_row_num, int *total_dl_matrix_col_num, int * offset_col, int * offset_row, int * offset_matrix, const int graph_count) { //to be refreshed if one conflict reaches many counts int *search_depth = new int [graph_count]; int *selected_row_id = new int [graph_count]; int *vertex_num = new int [graph_count]; int *total_dl_matrix_col_num = new int [graph_count]; int *total_dl_matrix_row_num= new int [graph_count]; int *current_conflict_count= new int [graph_count]; int *conflict_node_id= new int [graph_count]; int *conflict_col_id= new int [graph_count]; int *existance_of_candidate_rows=new int[graph_count]; const int hard_conflict_threshold = 500; int *existance_of_candidate_rows_gpu; int *conflict_col_id_gpu; int *selected_row_id_gpu; int *conflict_node_id_gpu; hipMalloc(&existance_of_candidate_rows_gpu, sizeof(int)); hipMalloc(&selected_row_id_gpu, sizeof(int)); hipMalloc(&conflict_node_id_gpu, sizeof(int)); hipMalloc(&conflict_col_id_gpu, sizeof(int)); char brk; const int block_count = 1; const int thread_count = 32; //init lots of vectors hipLaunchKernelGGL(( init_vectors), dim3(block_count),dim3(thread_count), 0, 0, conflict_count, total_dl_matrix_col_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count),dim3(thread_count), 0, 0, deleted_cols, total_dl_matrix_col_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count),dim3(thread_count), 0, 0, deleted_rows, total_dl_matrix_row_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count),dim3(thread_count), 0, 0, results, total_dl_matrix_row_num); //init_vectors<<<block_count,thread_count>>>(row_group, total_dl_matrix_row_num); //__syncthreads(); //get_vertex_row_group<<<block_count,thread_count >>>(row_group, dl_matrix, vertex_num, total_dl_matrix_row_num); //__syncthreads(); //print_mat<<<block_count,thread_count>>>(dl_matrix, total_dl_matrix_row_num, total_dl_matrix_col_num); hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, deleted_cols, total_dl_matrix_col_num_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, deleted_rows, total_dl_matrix_row_num_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, results, total_dl_matrix_row_num_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, row_group, total_dl_matrix_row_num_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, col_group, total_dl_matrix_col_num_gpu); hipDeviceSynchronize(); for (search_depth = 1; search_depth <= vertex_num;) { std::cout<<"search depth is "<<search_depth<<std::endl; std::cout<<"deleted_cols "<<std::endl; hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, deleted_cols, total_dl_matrix_col_num_gpu); hipDeviceSynchronize(); std::cout<<"deleted_rows "<<std::endl; hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, deleted_rows, total_dl_matrix_row_num_gpu); hipDeviceSynchronize(); std::cout<<"results "<<std::endl; hipLaunchKernelGGL(( print_vec), dim3(1),dim3(1), 0, 0, results, total_dl_matrix_row_num_gpu); hipDeviceSynchronize(); std::cin>>brk; hipMemset(existance_of_candidate_rows_gpu,0,sizeof(int)); hipMemset(selected_row_id_gpu,-1,sizeof(int)); //existance_of_candidate_rows=0; //selected_row_id=-1; hipLaunchKernelGGL(( check_existance_of_candidate_rows) , dim3(block_count), dim3(thread_count) , 0, 0, deleted_rows, row_group, search_depth, existance_of_candidate_rows_gpu, total_dl_matrix_row_num); //__syncthreads(); hipMemcpy(existance_of_candidate_rows, existance_of_candidate_rows_gpu, sizeof(int), hipMemcpyDeviceToHost); std::cout<<"check_existance_of_candidate_rows "<<std::endl; if (*existance_of_candidate_rows==1) { //check if there are candidate rows existing, if no, do backtrace hipLaunchKernelGGL(( select_row) , dim3(block_count), dim3(thread_count) , 0, 0, deleted_rows, row_group, search_depth, total_dl_matrix_row_num, selected_row_id_gpu); //select row and add to results hipMemcpy(selected_row_id, selected_row_id_gpu, sizeof(int), hipMemcpyDeviceToHost); std::cout<<"selected row id is "<<*selected_row_id<<std::endl; //__syncthreads(); //hipMemset(&results[*selected_row_id],search_depth,sizeof(int)); hipLaunchKernelGGL(( set_vector_value), dim3(1),dim3(1), 0, 0, results, *selected_row_id, search_depth); hipLaunchKernelGGL(( delete_rows_and_columns) , dim3(block_count), dim3(thread_count) , 0, 0, dl_matrix, deleted_rows, deleted_cols, search_depth, *selected_row_id, total_dl_matrix_row_num, total_dl_matrix_col_num); //delete covered rows and columns //__syncthreads(); //deleted_rows[*selected_row_id] = -search_depth; hipLaunchKernelGGL(( set_vector_value), dim3(1),dim3(1), 0, 0, deleted_rows, *selected_row_id, -search_depth); search_depth++; //next step //print_vec(deleted_cols, total_dl_matrix_col_num); //print_vec(deleted_rows, total_dl_matrix_row_num); //print_vec(conflict_count, total_dl_matrix_col_num); //print_vec(results, total_dl_matrix_row_num); continue; } else { //do backtrace search_depth--; if (search_depth > 0) { //conflict_node_id = get_conflict_node_id(deleted_rows, row_group, search_depth, total_dl_matrix_row_num); hipLaunchKernelGGL(( get_conflict_node_id) , dim3(block_count), dim3(thread_count) , 0, 0, deleted_rows, row_group, search_depth, conflict_node_id_gpu, total_dl_matrix_row_num); hipMemcpy(conflict_node_id, conflict_node_id_gpu, sizeof(int), hipMemcpyDeviceToHost); //__syncthreads(); hipLaunchKernelGGL(( get_conflict_col) , dim3(block_count), dim3(thread_count) , 0, 0, dl_matrix, deleted_rows, deleted_cols, row_group, *conflict_node_id, search_depth, conflict_col_id_gpu, vertex_num, total_dl_matrix_row_num, total_dl_matrix_col_num); hipMemcpy(conflict_col_id, conflict_col_id_gpu, sizeof(int), hipMemcpyDeviceToHost); //conflict_count[*conflict_col_id]++; //update conflict edge count hipLaunchKernelGGL(( add_gpu), dim3(1),dim3(1), 0, 0, &deleted_rows[*selected_row_id],1); hipLaunchKernelGGL(( recover_deleted_rows), dim3(block_count), dim3(thread_count) , 0, 0, deleted_rows, search_depth, total_dl_matrix_row_num); //recover deleted rows previously selected rows //__syncthreads(); hipLaunchKernelGGL(( recover_deleted_cols), dim3(block_count), dim3(thread_count) , 0, 0, deleted_cols, search_depth, total_dl_matrix_col_num); //recover deleted cols except afftected by previously selected rows //__syncthreads(); hipLaunchKernelGGL(( recover_results), dim3(block_count), dim3(thread_count) , 0, 0, results, search_depth, total_dl_matrix_row_num); //recover results //__syncthreads(); hipMemcpy(&current_conflict_count, &conflict_count[*conflict_col_id], sizeof(int), hipMemcpyDeviceToHost); if (current_conflict_count > hard_conflict_threshold) { search_depth = 1; hipLaunchKernelGGL(( init_vectors), dim3(block_count), dim3(thread_count) , 0, 0, conflict_count, total_dl_matrix_col_num); hipLaunchKernelGGL(( init_vectors_reserved), dim3(block_count), dim3(thread_count) , 0, 0, deleted_cols, total_dl_matrix_col_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count), dim3(thread_count) , 0, 0, deleted_rows, total_dl_matrix_row_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count), dim3(thread_count) , 0, 0, results, total_dl_matrix_row_num); //__syncthreads(); hipLaunchKernelGGL(( remove_cols), dim3(block_count), dim3(thread_count) , 0, 0, deleted_cols, col_group, *conflict_col_id, total_dl_matrix_col_num); //__syncthreads(); //deleted_cols[*conflict_col_id] = -1; hipMemset(&deleted_cols[*conflict_col_id],-1,sizeof(int)); } } else { //if all vertices are gone through, directly remove the edge with largest conflict count. search_depth = 1; hipLaunchKernelGGL(( get_largest_value), dim3(block_count), dim3(thread_count) , 0, 0, conflict_count, conflict_col_id_gpu, total_dl_matrix_col_num); hipMemcpy(conflict_col_id, conflict_col_id_gpu, sizeof(int), hipMemcpyDeviceToHost); //__syncthreads(); hipLaunchKernelGGL(( init_vectors), dim3(block_count), dim3(thread_count) , 0, 0, conflict_count, total_dl_matrix_col_num); hipLaunchKernelGGL(( init_vectors_reserved), dim3(block_count), dim3(thread_count) , 0, 0, deleted_cols, total_dl_matrix_col_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count), dim3(thread_count) , 0, 0, deleted_rows, total_dl_matrix_row_num); hipLaunchKernelGGL(( init_vectors), dim3(block_count), dim3(thread_count) , 0, 0, results, total_dl_matrix_row_num); //__syncthreads(); hipLaunchKernelGGL(( remove_cols), dim3(block_count), dim3(thread_count) , 0, 0, deleted_cols, col_group, *conflict_col_id, total_dl_matrix_col_num); } //print_vec(deleted_cols, total_dl_matrix_col_num); //print_vec(deleted_rows, total_dl_matrix_row_num); //print_vec(conflict_count, total_dl_matrix_col_num); //print_vec(results, total_dl_matrix_row_num); } } hipFree(existance_of_candidate_rows_gpu); hipFree(selected_row_id_gpu); hipFree(conflict_col_id_gpu); hipFree(conflict_node_id_gpu); delete existance_of_candidate_rows; delete conflict_col_id; delete selected_row_id; delete conflict_node_id; }
0527cc6a75540b560560f9ff674a7a56e2e4ac30.cu
#include "MatrixCoverGPU.cuh" __global__ void delete_rows_and_columns(int*dl_matrix, int* deleted_rows, int* deleted_cols, const int *search_depth, const int *selected_row_id, const int *total_dl_matrix_row_num, const int *total_dl_matrix_col_num, int * offset_col, int * offset_row, int * offset_matrix, const int graph_count) { for (int k = blockIdx.x; k< graph_count; k = k + gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_col_num[k]; i=i+blockDim.x) { if (dl_matrix[offset_matrix[k]+selected_row_id[k]*total_dl_matrix_col_num[k]+i] == 1 && deleted_cols[offset_col[k]+i] == 0)//we only delete rows that are not deleted or removed { deleted_cols[offset_col[k]+i] = search_depth[k]; for (int j = 0; j < total_dl_matrix_row_num[k]; j++) { if (dl_matrix[offset_matrix[k]+j*total_dl_matrix_col_num[k]+i] == 1 && deleted_rows[offset_row[k]+j] == 0) { atomicExch(offset_row[k]+deleted_rows+j, search_depth[k]); } } } } } } __global__ void init_vectors(int* vec, const int *vec_length, int* offset, const int graph_count) { for (int k=blockIdx.x; k<graph_count; k= k+gridDim.x) { for (int i = threadIdx.x; i < vec_length[k]; i = i + blockDim.x) { vec[offset[k]+i] = 0; } } } /* void get_largest_value_launcher(int* vec, cub::KeyValuePair<int, int> *argmax, int vec_length) { void* d_temp_storage = NULL; size_t temp_storage_bytes = 0; cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, vec, argmax, vec_length); // Allocate temporary storage cudaMalloc(&d_temp_storage, temp_storage_bytes); // Run argmax-reduction cub::DeviceReduce::ArgMax(d_temp_storage, temp_storage_bytes, vec, argmax, vec_length); cudaFree(d_temp_storage); } */ __global__ void get_largest_value(int* vec, int *conflict_col_id, const int *vec_length, int *max, int *offset, const int graph_count) { for (int k=blockIdx.x; k<graph_count; k = k+gridDim.x) { for (int i = threadIdx.x; i< vec_length[k]; i = i + blockDim.x) { atomicMax(max+k, vec[offset[k]+i]); } for (int i = threadIdx.x; i< vec_length[k]; i = i + blockDim.x) { if (vec[offset[k]+i]==max) { conflict_col_id[k] = i; } } } } __global__ void init_vectors_reserved(int *vec, int *vec_length, int *offset, const int graph_count) { for (int k = blockIdx.x, k<graph_count; k = k+gridDim.x) { for (int i = threadIdx.x; i < vec_length[k]; i= i+ blockDim.x) { if (vec[offset[k]+i] != -1) { vec[offset[k]+i] = 0; } } } } __global__ void check_existance_of_candidate_rows(int* deleted_rows, int* row_group, const int *search_depth, int *token, int *total_dl_matrix_row_num, int* selected_row_id, int* offset_row, const int graph_count) { for (int k = blockIdx.x, k<graph_count; k = k+gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i + blockDim.x) { //std::cout<<deleted_rows[i]<<' '<<row_group[i]<<std::endl; if (deleted_rows[offset[k]+i] == 0 && row_group[offset[k]+i] == search_depth[k]) { //std::cout<<"Candidate Row Found...."<<std::endl; atomicExch(token+k, 1); atomicMin(selected_row_id+k, i); } } } } __global__ void get_vertex_row_group(int* row_group, int* dl_matrix, int* vertex_num, int* total_dl_matrix_row_num, int* total_dl_matrix_col_num, int * offset_row, int *offset_matrix, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i + blockDim.x) { for (int j = 0; j < vertex_num[k]; j++) { row_group[offset_row[k]+i]+= dl_matrix[offset_matrix[k] + i*total_dl_matrix_col_num[k]+j] * (j + 1); } } } } /* removed __global__ void select_row(int* deleted_rows, int* row_group, int* search_depth, int *total_dl_matrix_row_num, int* selected_row_id) { for (int i = threadIdx.x; i < total_dl_matrix_row_num; i = i + blockDim.x) { if (deleted_rows[i] == 0 && row_group[i] == search_depth) { atomicExch(selected_row_id, i); atomicMin(selected_row_id, i); } } __syncthreads(); } */ __global__ void recover_deleted_rows(int* deleted_rows, int *search_depth, int *total_dl_matrix_row_num, int* offset_row, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i=i+blockDim.x) { if (abs(deleted_rows[offset_row[k]+i]) > search_depth[k] || deleted_rows[offset_row[k]+i] == search_depth[k]) { deleted_rows[offset_row[k]+i] = 0; } } } } __global__ void recover_deleted_cols(int* deleted_cols, int *search_depth, int* total_dl_matrix_col_num, int *offset_col, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_col_num[k]; i=i+blockDim.x) { if (deleted_cols[offset_col[k]+i] >= search_depth[k]) { deleted_cols[offset_col[k]+i] = 0; } } } } __global__ void recover_results(int* results, int *search_depth, int *total_dl_matrix_row_num, int *offset_row, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i + blockDim.x) { if (results[offset_row[k]+i] == search_depth[k]) { results[offset_row[k]+i] = 0; } } } } __global__ void get_conflict_node_id(int* deleted_rows, int* row_group, int *search_depth, int *conflict_node_id, int *total_dl_matrix_row_num, int * offset_row, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i = i+ blockDim.x) { if (row_group[offset_row[k]+i] == search_depth[k] + 1 && deleted_rows[offset_row[k]+i] > conflict_node_id[k]) { atomicExch(conflict_node_id+k, deleted_rows[offset_row[k]+i]); } } } //__syncthreads(); } //problem __global__ void get_conflict_col(int* dl_matrix, int* deleted_rows, int* deleted_cols, int* row_group, int *conflict_node_id, int *search_depth, int *conflict_col_id, int *vertex_num, int *total_dl_matrix_row_num, int *total_dl_matrix_col_num, int * offset_col, int * offset_row, int * offset_matrix, const int graph_count) { //*conflict_col_id = 0; for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_row_num[k]; i=i+blockDim.x) { //find the conflict edge that connects current node and the most closest node. if (row_group[offset_row[k]+i] == search_depth[k] + 1 && deleted_rows[offset_row[k]+i] == conflict_node_id[k]) { for (int j = total_dl_matrix_col_num[k] - 1; j > vertex_num[k]; j--) { if (dl_matrix[offset_matrix[k]+i*total_dl_matrix_col_num[k]+j] * deleted_cols[offset_col[k]+j] == conflict_node_id[k]) { atomicExch(conflict_col_id+k, j); } } } } } __syncthreads(); } __global__ void remove_cols(int* deleted_cols, int* col_group, int *conflict_col_id, int *total_dl_matrix_col_num, int *offset_col, const int graph_count) { for (int k = blockIdx.x; k<graph_count; k = k+ gridDim.x) { for (int i = threadIdx.x; i < total_dl_matrix_col_num[k]; i = i + blockDim.x) { if (col_group[offset_col[k]+i] == col_group[conflict_col_id[k]]) { deleted_cols[offset_col[k]+i] = -1; } } } } //+============================================================================================================================================= __global__ void print_vec(int *vec, int start, int vec_length) { for(int i=0; i<vec_length; i++) { printf("%d ", vec[start+i]); } printf("\n"); } /* __global__ void print_mat(int *mat[], int total_dl_matrix_row_num, int total_dl_matrix_col_num) { for(int i=0; i<total_dl_matrix_row_num; i++) { for(int j=0; j<total_dl_matrix_col_num; j++) { printf("%d ", mat[i][j]); } printf("\n"); } } */ __global__ void add_gpu(int *vector, int idx, int val) { vector[idx] = vector[idx] + val } __global__ void set_vector_value(int* vector, int idx, int val) { vector[idx]=val; } void mc_solver(int* dl_matrix, int* results, int* deleted_cols, int* deleted_rows, int* col_group, int* row_group, int* conflict_count, int *vertex_num, int *total_dl_matrix_row_num, int *total_dl_matrix_col_num, int * offset_col, int * offset_row, int * offset_matrix, const int graph_count) { //to be refreshed if one conflict reaches many counts int *search_depth = new int [graph_count]; int *selected_row_id = new int [graph_count]; int *vertex_num = new int [graph_count]; int *total_dl_matrix_col_num = new int [graph_count]; int *total_dl_matrix_row_num= new int [graph_count]; int *current_conflict_count= new int [graph_count]; int *conflict_node_id= new int [graph_count]; int *conflict_col_id= new int [graph_count]; int *existance_of_candidate_rows=new int[graph_count]; const int hard_conflict_threshold = 500; int *existance_of_candidate_rows_gpu; int *conflict_col_id_gpu; int *selected_row_id_gpu; int *conflict_node_id_gpu; cudaMalloc(&existance_of_candidate_rows_gpu, sizeof(int)); cudaMalloc(&selected_row_id_gpu, sizeof(int)); cudaMalloc(&conflict_node_id_gpu, sizeof(int)); cudaMalloc(&conflict_col_id_gpu, sizeof(int)); char brk; const int block_count = 1; const int thread_count = 32; //init lots of vectors init_vectors<<<block_count,thread_count>>>(conflict_count, total_dl_matrix_col_num); init_vectors<<<block_count,thread_count>>>(deleted_cols, total_dl_matrix_col_num); init_vectors<<<block_count,thread_count>>>(deleted_rows, total_dl_matrix_row_num); init_vectors<<<block_count,thread_count>>>(results, total_dl_matrix_row_num); //init_vectors<<<block_count,thread_count>>>(row_group, total_dl_matrix_row_num); //__syncthreads(); //get_vertex_row_group<<<block_count,thread_count >>>(row_group, dl_matrix, vertex_num, total_dl_matrix_row_num); //__syncthreads(); //print_mat<<<block_count,thread_count>>>(dl_matrix, total_dl_matrix_row_num, total_dl_matrix_col_num); print_vec<<<1,1>>>(deleted_cols, total_dl_matrix_col_num_gpu); cudaDeviceSynchronize(); print_vec<<<1,1>>>(deleted_rows, total_dl_matrix_row_num_gpu); cudaDeviceSynchronize(); print_vec<<<1,1>>>(results, total_dl_matrix_row_num_gpu); cudaDeviceSynchronize(); print_vec<<<1,1>>>(row_group, total_dl_matrix_row_num_gpu); cudaDeviceSynchronize(); print_vec<<<1,1>>>(col_group, total_dl_matrix_col_num_gpu); cudaDeviceSynchronize(); for (search_depth = 1; search_depth <= vertex_num;) { std::cout<<"search depth is "<<search_depth<<std::endl; std::cout<<"deleted_cols "<<std::endl; print_vec<<<1,1>>>(deleted_cols, total_dl_matrix_col_num_gpu); cudaDeviceSynchronize(); std::cout<<"deleted_rows "<<std::endl; print_vec<<<1,1>>>(deleted_rows, total_dl_matrix_row_num_gpu); cudaDeviceSynchronize(); std::cout<<"results "<<std::endl; print_vec<<<1,1>>>(results, total_dl_matrix_row_num_gpu); cudaDeviceSynchronize(); std::cin>>brk; cudaMemset(existance_of_candidate_rows_gpu,0,sizeof(int)); cudaMemset(selected_row_id_gpu,-1,sizeof(int)); //existance_of_candidate_rows=0; //selected_row_id=-1; check_existance_of_candidate_rows <<<block_count, thread_count >>> (deleted_rows, row_group, search_depth, existance_of_candidate_rows_gpu, total_dl_matrix_row_num); //__syncthreads(); cudaMemcpy(existance_of_candidate_rows, existance_of_candidate_rows_gpu, sizeof(int), cudaMemcpyDeviceToHost); std::cout<<"check_existance_of_candidate_rows "<<std::endl; if (*existance_of_candidate_rows==1) { //check if there are candidate rows existing, if no, do backtrace select_row <<<block_count, thread_count >>> (deleted_rows, row_group, search_depth, total_dl_matrix_row_num, selected_row_id_gpu); //select row and add to results cudaMemcpy(selected_row_id, selected_row_id_gpu, sizeof(int), cudaMemcpyDeviceToHost); std::cout<<"selected row id is "<<*selected_row_id<<std::endl; //__syncthreads(); //cudaMemset(&results[*selected_row_id],search_depth,sizeof(int)); set_vector_value<<<1,1>>>(results, *selected_row_id, search_depth); delete_rows_and_columns <<<block_count, thread_count >>> (dl_matrix, deleted_rows, deleted_cols, search_depth, *selected_row_id, total_dl_matrix_row_num, total_dl_matrix_col_num); //delete covered rows and columns //__syncthreads(); //deleted_rows[*selected_row_id] = -search_depth; set_vector_value<<<1,1>>>(deleted_rows, *selected_row_id, -search_depth); search_depth++; //next step //print_vec(deleted_cols, total_dl_matrix_col_num); //print_vec(deleted_rows, total_dl_matrix_row_num); //print_vec(conflict_count, total_dl_matrix_col_num); //print_vec(results, total_dl_matrix_row_num); continue; } else { //do backtrace search_depth--; if (search_depth > 0) { //conflict_node_id = get_conflict_node_id(deleted_rows, row_group, search_depth, total_dl_matrix_row_num); get_conflict_node_id <<<block_count, thread_count >>> (deleted_rows, row_group, search_depth, conflict_node_id_gpu, total_dl_matrix_row_num); cudaMemcpy(conflict_node_id, conflict_node_id_gpu, sizeof(int), cudaMemcpyDeviceToHost); //__syncthreads(); get_conflict_col <<<block_count, thread_count >>> (dl_matrix, deleted_rows, deleted_cols, row_group, *conflict_node_id, search_depth, conflict_col_id_gpu, vertex_num, total_dl_matrix_row_num, total_dl_matrix_col_num); cudaMemcpy(conflict_col_id, conflict_col_id_gpu, sizeof(int), cudaMemcpyDeviceToHost); //conflict_count[*conflict_col_id]++; //update conflict edge count add_gpu<<<1,1>>>(&deleted_rows[*selected_row_id],1); recover_deleted_rows<<<block_count, thread_count >>>(deleted_rows, search_depth, total_dl_matrix_row_num); //recover deleted rows previously selected rows //__syncthreads(); recover_deleted_cols<<<block_count, thread_count >>>(deleted_cols, search_depth, total_dl_matrix_col_num); //recover deleted cols except afftected by previously selected rows //__syncthreads(); recover_results<<<block_count, thread_count >>>(results, search_depth, total_dl_matrix_row_num); //recover results //__syncthreads(); cudaMemcpy(&current_conflict_count, &conflict_count[*conflict_col_id], sizeof(int), cudaMemcpyDeviceToHost); if (current_conflict_count > hard_conflict_threshold) { search_depth = 1; init_vectors<<<block_count, thread_count >>>(conflict_count, total_dl_matrix_col_num); init_vectors_reserved<<<block_count, thread_count >>>(deleted_cols, total_dl_matrix_col_num); init_vectors<<<block_count, thread_count >>>(deleted_rows, total_dl_matrix_row_num); init_vectors<<<block_count, thread_count >>>(results, total_dl_matrix_row_num); //__syncthreads(); remove_cols<<<block_count, thread_count >>>(deleted_cols, col_group, *conflict_col_id, total_dl_matrix_col_num); //__syncthreads(); //deleted_cols[*conflict_col_id] = -1; cudaMemset(&deleted_cols[*conflict_col_id],-1,sizeof(int)); } } else { //if all vertices are gone through, directly remove the edge with largest conflict count. search_depth = 1; get_largest_value<<<block_count, thread_count >>>(conflict_count, conflict_col_id_gpu, total_dl_matrix_col_num); cudaMemcpy(conflict_col_id, conflict_col_id_gpu, sizeof(int), cudaMemcpyDeviceToHost); //__syncthreads(); init_vectors<<<block_count, thread_count >>>(conflict_count, total_dl_matrix_col_num); init_vectors_reserved<<<block_count, thread_count >>>(deleted_cols, total_dl_matrix_col_num); init_vectors<<<block_count, thread_count >>>(deleted_rows, total_dl_matrix_row_num); init_vectors<<<block_count, thread_count >>>(results, total_dl_matrix_row_num); //__syncthreads(); remove_cols<<<block_count, thread_count >>>(deleted_cols, col_group, *conflict_col_id, total_dl_matrix_col_num); } //print_vec(deleted_cols, total_dl_matrix_col_num); //print_vec(deleted_rows, total_dl_matrix_row_num); //print_vec(conflict_count, total_dl_matrix_col_num); //print_vec(results, total_dl_matrix_row_num); } } cudaFree(existance_of_candidate_rows_gpu); cudaFree(selected_row_id_gpu); cudaFree(conflict_col_id_gpu); cudaFree(conflict_node_id_gpu); delete existance_of_candidate_rows; delete conflict_col_id; delete selected_row_id; delete conflict_node_id; }
c9be1289cc914c33c776d7ff056898a746644285.hip
// !!! This is a file automatically generated by hipify!!! #include <cudf/copying.hpp> #include <rmm/exec_policy.hpp> #include <rmm/cuda_stream_view.hpp> #include "MessageUtil.cuh" namespace ral { namespace communication { namespace messages { std::pair<int32_t, int32_t> getCharsColumnStartAndEnd(const cudf::strings_column_view & column){ cudf::size_type offset = column.offset(); cudf::column_view offsets_column = column.offsets(); int32_t chars_column_start, chars_column_end; hipMemcpy(&chars_column_start, (void*)(offsets_column.head<int32_t>() + offset), sizeof(int32_t), hipMemcpyDeviceToHost); hipMemcpy(&chars_column_end, (void*)(offsets_column.head<int32_t>() + offset + column.size()), sizeof(int32_t), hipMemcpyDeviceToHost); return {chars_column_start, chars_column_end}; } std::unique_ptr<cudf::column> getRebasedStringOffsets(const cudf::strings_column_view & column, int32_t chars_column_start){ cudf::size_type offset = column.offset(); cudf::column_view offsets_column = column.offsets(); // NOTE that the offsets column size is usually one more than the number of strings. It starts at 0 and ends at chars_column.size() auto new_offsets = cudf::allocate_like(offsets_column, column.size() + 1, cudf::mask_allocation_policy::NEVER); auto mutable_col = new_offsets->mutable_view(); cudf::copy_range_in_place(offsets_column, mutable_col, offset, offset + column.size() + 1, 0); rmm::cuda_stream_view stream; thrust::transform(rmm::exec_policy(stream), mutable_col.begin<int32_t>(), mutable_col.end<int32_t>(), mutable_col.begin<int32_t>(), [chars_column_start] __device__ (int32_t value){ return value - chars_column_start; }); return new_offsets; } } // namespace messages } // namespace communication } // namespace ral
c9be1289cc914c33c776d7ff056898a746644285.cu
#include <cudf/copying.hpp> #include <rmm/exec_policy.hpp> #include <rmm/cuda_stream_view.hpp> #include "MessageUtil.cuh" namespace ral { namespace communication { namespace messages { std::pair<int32_t, int32_t> getCharsColumnStartAndEnd(const cudf::strings_column_view & column){ cudf::size_type offset = column.offset(); cudf::column_view offsets_column = column.offsets(); int32_t chars_column_start, chars_column_end; cudaMemcpy(&chars_column_start, (void*)(offsets_column.head<int32_t>() + offset), sizeof(int32_t), cudaMemcpyDeviceToHost); cudaMemcpy(&chars_column_end, (void*)(offsets_column.head<int32_t>() + offset + column.size()), sizeof(int32_t), cudaMemcpyDeviceToHost); return {chars_column_start, chars_column_end}; } std::unique_ptr<cudf::column> getRebasedStringOffsets(const cudf::strings_column_view & column, int32_t chars_column_start){ cudf::size_type offset = column.offset(); cudf::column_view offsets_column = column.offsets(); // NOTE that the offsets column size is usually one more than the number of strings. It starts at 0 and ends at chars_column.size() auto new_offsets = cudf::allocate_like(offsets_column, column.size() + 1, cudf::mask_allocation_policy::NEVER); auto mutable_col = new_offsets->mutable_view(); cudf::copy_range_in_place(offsets_column, mutable_col, offset, offset + column.size() + 1, 0); rmm::cuda_stream_view stream; thrust::transform(rmm::exec_policy(stream), mutable_col.begin<int32_t>(), mutable_col.end<int32_t>(), mutable_col.begin<int32_t>(), [chars_column_start] __device__ (int32_t value){ return value - chars_column_start; }); return new_offsets; } } // namespace messages } // namespace communication } // namespace ral
903bfd751159bfa95704cdaef166b86b23cb8d8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Alejandro Salmon F.D. A01201954 Lab1: Parallel PI */ // inlude path of your own cuda_runtime.h... if its in the same folder just delete the ../ #include "../cuda_runtime.h" #include <stdlib.h> #include <stdio.h> #define NO_RECTANGLES 1000000000 #define width 1.0/NO_RECTANGLES #define BLOCKS_PER_GRID 12 #define THREADS_PER_BLOCK 512 // Graphic processor calculation of Pi in parallel way __global__ void parallel_Pi(double *device_array) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int aux = tid; double mid; double height; while (aux < NO_RECTANGLES) { mid = (aux + 0.5) * width; height = 4.0 / (1.0 + mid * mid); device_array[tid] += height; aux += blockDim.x * gridDim.x; } } int main(void) { int size = THREADS_PER_BLOCK * BLOCKS_PER_GRID; // Device array and local array for getting pi values from gpu and copying them to host double *local_array; double *device_array; //allocate local memory to get values from gpu local_array = (double*) malloc(size * sizeof(double)); // allocate memory in device (GPU) hipMalloc((void**) &device_array, size * sizeof(double)); hipLaunchKernelGGL(( parallel_Pi), dim3(BLOCKS_PER_GRID), dim3(THREADS_PER_BLOCK), 0, 0, device_array); // copy result from device to host so we can handle it hipMemcpy(local_array, device_array, size * sizeof(double), hipMemcpyDeviceToHost); double pi = 0; // Pi is sum of all values in local array multiplied by the specified width of the rectangles for(int i = 0; i < size; i++) { pi += local_array[i]; } pi = pi * width; printf("Calculated pi value: %lf\n", pi); printf("YAY PARALLEL\n"); // Free memory space in device and host free(local_array); hipFree(device_array); return 0; }
903bfd751159bfa95704cdaef166b86b23cb8d8f.cu
/* Alejandro Salmon F.D. A01201954 Lab1: Parallel PI */ // inlude path of your own cuda_runtime.h... if its in the same folder just delete the ../ #include "../cuda_runtime.h" #include <stdlib.h> #include <stdio.h> #define NO_RECTANGLES 1000000000 #define width 1.0/NO_RECTANGLES #define BLOCKS_PER_GRID 12 #define THREADS_PER_BLOCK 512 // Graphic processor calculation of Pi in parallel way __global__ void parallel_Pi(double *device_array) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int aux = tid; double mid; double height; while (aux < NO_RECTANGLES) { mid = (aux + 0.5) * width; height = 4.0 / (1.0 + mid * mid); device_array[tid] += height; aux += blockDim.x * gridDim.x; } } int main(void) { int size = THREADS_PER_BLOCK * BLOCKS_PER_GRID; // Device array and local array for getting pi values from gpu and copying them to host double *local_array; double *device_array; //allocate local memory to get values from gpu local_array = (double*) malloc(size * sizeof(double)); // allocate memory in device (GPU) cudaMalloc((void**) &device_array, size * sizeof(double)); parallel_Pi<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(device_array); // copy result from device to host so we can handle it cudaMemcpy(local_array, device_array, size * sizeof(double), cudaMemcpyDeviceToHost); double pi = 0; // Pi is sum of all values in local array multiplied by the specified width of the rectangles for(int i = 0; i < size; i++) { pi += local_array[i]; } pi = pi * width; printf("Calculated pi value: %lf\n", pi); printf("YAY PARALLEL\n"); // Free memory space in device and host free(local_array); cudaFree(device_array); return 0; }
cd6780c43f489754d1a50d1a49400461d83f7e91.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> #define MAXN 8000 /* Max value of N */ int N; /* Matrix Dimension*/ int numThreads; /* Number of Threads */ /*Random*/ #define randm() 4|2[uid]&3 /*CUDA Function for calculating mean column-wise and then reducing each column's totals*/ /*This Function will be called Number of blocks times*/ __global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil) { extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads __shared__ float meansum;//shared 1D-matrix for storing mean total of each threads __shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block int eva_block,index; unsigned int thread_id = threadIdx.y;//Getting Id of thread unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix __syncthreads();//waiting for all threads mean[thread_id]=input[j];//Assigned each column element of matrix to each thread /*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/ for(int i=0;i<dim1;i+=numThread) { index=dim1*(numThread+thread_id+i);//calculating index of remaining element eva_block=index+blockIdx.x; if(eva_block < dim1*dim1) { mean[thread_id]+=input[index]; } } /*Reducing sum of each thread to final block sum*/ if(thread_id==0) { for(int i=0;i<numThread;i++) { meansum+=mean[thread_id+i]; } mean_out[blockIdx.x]=meansum/dim1;//Mean of block } __syncthreads(); sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block /*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/ for(int i=0;i<dim1;i+=numThread) { index=dim1*(numThread+thread_id+i); eva_block=index+blockIdx.x; if(eva_block < dim1*dim1) { sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0); } } /*Reducing SD Sum of each thread to final block SD sum*/ if(thread_id==0) { sdsum=0; for(int i=0;i<numThread;i++) { sdsum+=sd[thread_id+i];//calculating index of remaining element } sd_out[blockIdx.x]=sdsum/dim1;//SD of block } __syncthreads();//waiting for threads /*Normalization of each block data on basis of mean and sd of each block*/ output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x]; /*Reducing Normalized Sum for remaining elements*/ for(int i=0;i<eval_ceil;i++){ if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1) { output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes } } } /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the program parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ char uid[32]; /*User name */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc == 4) { seed = atoi(argv[3]); srand(seed); printf("Random seed = %i\n", seed); } if (argc >= 3) { N = atoi(argv[1]); numThreads = atoi(argv[2]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } /*Number of Threads should be less than or equal to 1024 else exit*/ if (numThreads > 1024) { printf("Number of threads cannot be more than %i.\n", 1024); exit(0); } } else { printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]); exit(0); } printf("\nMatrix dimension N = %i.\n", N); } int main(int argc, char **argv) { /* Timing variables */ hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; clock_t etstart2, etstop2; /* Elapsed times using times() */ unsigned long long usecstart, usecstop; struct tms cputstart, cputstop; /* CPU times for my processes */ /* Process program parameters */ parameters(argc, argv); float* Host_Input = new float [N * N];//Input Matrix float* Host_Output = new float [N * N];//Output Matrix int i,j; /*Initializing Input Matrix with random values*/ printf("\nInitializing...\n"); for(i=0;i<N;i++) { for(j=0;j<N;j++) { //Host_Input[j* N + i] = j+1; Host_Input[j* N + i] = (float)rand() / 32768.0; } } float* input;//Device Input Matrix float* output;//Device Output Matrix float* mean_out;//Device Mean Matrix float* sd_out;//Device SD Matrix size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix //allocated the device memory for source array hipMalloc(&input, matrix_size_2d); hipMemcpy(input, Host_Input, matrix_size_2d, hipMemcpyHostToDevice); //allocate the device memory for destination array hipMalloc(&output, matrix_size_2d); //allocate the device memory for mean array hipMalloc(&mean_out, matrix_size_1d); //allocate the device memory for sd array hipMalloc(&sd_out, matrix_size_1d); dim3 dimBlock; dim3 dimGrid; /* Designing Decisions for number of blocks and number of threads in each block */ if( N < numThreads) { dimBlock.x = 1; dimBlock.y = N; dimGrid.x = N; dimGrid.y = 1; } else { dimBlock.x = 1; dimBlock.y = numThreads; dimGrid.x = N; dimGrid.y = 1; } /* Start Clock */ printf("\nStarting clock.\n"); hipEventRecord(start); gettimeofday(&etstart,&tzdummy); etstart2 = times(&cputstart); double d_ceil=(double)N/(double)numThreads; int c=ceil(d_ceil); //printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c); //Calling CUDA Kernel Function For Normalizing Matrix hipLaunchKernelGGL(( Mean_SD_Norm), dim3(dimGrid), dim3(dimBlock), matrix_size_1d, 0, input,output,mean_out,sd_out,N,numThreads,c); hipDeviceSynchronize(); /* Stop Clock code below*/ hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); gettimeofday(&etstop, &tzdummy); etstop2 = times(&cputstop); printf("Stopped clock.\n"); /*Copying Output Device Matrix to Output Host Matrix*/ hipMemcpy(Host_Output, output, N * N * sizeof(float), hipMemcpyDeviceToHost); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; /* Display output */ /* if (N < 10) { printf("\nB1 =\n\t"); for (i= 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t"); } } }*/ /* Display result time */ printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("Elapsed GPU Time = %g ms \n",milliseconds); printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6); float mean = N * log2((float)N) + N; float sd = N * log2((float)N) + (2*N) + (2*N*N); float norm = 2 * N * N; printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3)); //deallocate device memory below hipFree(input); hipFree(output); hipFree(mean_out); hipFree(sd_out); //deallocate Host Input and Host Output Matrix free(Host_Input); free(Host_Output); exit(0); }
cd6780c43f489754d1a50d1a49400461d83f7e91.cu
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> #define MAXN 8000 /* Max value of N */ int N; /* Matrix Dimension*/ int numThreads; /* Number of Threads */ /*Random*/ #define randm() 4|2[uid]&3 /*CUDA Function for calculating mean column-wise and then reducing each column's totals*/ /*This Function will be called Number of blocks times*/ __global__ void Mean_SD_Norm(float* input,float* output ,float* mean_out,float* sd_out, int dim1, int numThread,int eval_ceil) { extern __shared__ float mean[];//shared 1D-matrix for storing temporary results for mean of each threads extern __shared__ float sd[];//shared 1D-matrix for storing temporary results for sd of each threads __shared__ float meansum;//shared 1D-matrix for storing mean total of each threads __shared__ float sdsum;//shared 1D-matrix for storing SD total of each threads int idx_x = blockIdx.x * blockDim.x + threadIdx.x;//Getting Thread X Index for Particular Block int idx_y = blockIdx.y * blockDim.y + threadIdx.y;//Getting Thread Y Index for Particular Block int eva_block,index; unsigned int thread_id = threadIdx.y;//Getting Id of thread unsigned int j = idx_y * dim1 + idx_x;//calculating index for input matrix __syncthreads();//waiting for all threads mean[thread_id]=input[j];//Assigned each column element of matrix to each thread /*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/ for(int i=0;i<dim1;i+=numThread) { index=dim1*(numThread+thread_id+i);//calculating index of remaining element eva_block=index+blockIdx.x; if(eva_block < dim1*dim1) { mean[thread_id]+=input[index]; } } /*Reducing sum of each thread to final block sum*/ if(thread_id==0) { for(int i=0;i<numThread;i++) { meansum+=mean[thread_id+i]; } mean_out[blockIdx.x]=meansum/dim1;//Mean of block } __syncthreads(); sd[thread_id] = powf(input[j] - mean_out[blockIdx.x], 2.0);//evaluating SD for each thread for particular block /*If Dimension is more than Threads then reduce the remaining elements to assigned elements*/ for(int i=0;i<dim1;i+=numThread) { index=dim1*(numThread+thread_id+i); eva_block=index+blockIdx.x; if(eva_block < dim1*dim1) { sd[thread_id]+=powf(input[index] - mean_out[blockIdx.x], 2.0); } } /*Reducing SD Sum of each thread to final block SD sum*/ if(thread_id==0) { sdsum=0; for(int i=0;i<numThread;i++) { sdsum+=sd[thread_id+i];//calculating index of remaining element } sd_out[blockIdx.x]=sdsum/dim1;//SD of block } __syncthreads();//waiting for threads /*Normalization of each block data on basis of mean and sd of each block*/ output[blockIdx.x*dim1+thread_id] = (input[thread_id+blockIdx.x*dim1] - mean_out[blockIdx.x]) / sd_out[blockIdx.x]; /*Reducing Normalized Sum for remaining elements*/ for(int i=0;i<eval_ceil;i++){ if((numThread+thread_id)+blockIdx.x*dim1 < dim1*dim1) { output[(numThread+thread_id)+blockIdx.x*dim1] = (input[(numThread+thread_id)+blockIdx.x*dim1] - mean_out[blockIdx.x])/sd_out[blockIdx.x];//Normalizing the Matrix Indexes } } } /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the program parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ char uid[32]; /*User name */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc == 4) { seed = atoi(argv[3]); srand(seed); printf("Random seed = %i\n", seed); } if (argc >= 3) { N = atoi(argv[1]); numThreads = atoi(argv[2]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } /*Number of Threads should be less than or equal to 1024 else exit*/ if (numThreads > 1024) { printf("Number of threads cannot be more than %i.\n", 1024); exit(0); } } else { printf("Usage: %s <matrix_dimension> <Number of Threads> [random seed]\n",argv[0]); exit(0); } printf("\nMatrix dimension N = %i.\n", N); } int main(int argc, char **argv) { /* Timing variables */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; clock_t etstart2, etstop2; /* Elapsed times using times() */ unsigned long long usecstart, usecstop; struct tms cputstart, cputstop; /* CPU times for my processes */ /* Process program parameters */ parameters(argc, argv); float* Host_Input = new float [N * N];//Input Matrix float* Host_Output = new float [N * N];//Output Matrix int i,j; /*Initializing Input Matrix with random values*/ printf("\nInitializing...\n"); for(i=0;i<N;i++) { for(j=0;j<N;j++) { //Host_Input[j* N + i] = j+1; Host_Input[j* N + i] = (float)rand() / 32768.0; } } float* input;//Device Input Matrix float* output;//Device Output Matrix float* mean_out;//Device Mean Matrix float* sd_out;//Device SD Matrix size_t matrix_size_2d = N * N * sizeof(float);//Size of 2D Matrix size_t matrix_size_1d = N * sizeof(float);//Size of 1D Matrix //allocated the device memory for source array cudaMalloc(&input, matrix_size_2d); cudaMemcpy(input, Host_Input, matrix_size_2d, cudaMemcpyHostToDevice); //allocate the device memory for destination array cudaMalloc(&output, matrix_size_2d); //allocate the device memory for mean array cudaMalloc(&mean_out, matrix_size_1d); //allocate the device memory for sd array cudaMalloc(&sd_out, matrix_size_1d); dim3 dimBlock; dim3 dimGrid; /* Designing Decisions for number of blocks and number of threads in each block */ if( N < numThreads) { dimBlock.x = 1; dimBlock.y = N; dimGrid.x = N; dimGrid.y = 1; } else { dimBlock.x = 1; dimBlock.y = numThreads; dimGrid.x = N; dimGrid.y = 1; } /* Start Clock */ printf("\nStarting clock.\n"); cudaEventRecord(start); gettimeofday(&etstart,&tzdummy); etstart2 = times(&cputstart); double d_ceil=(double)N/(double)numThreads; int c=ceil(d_ceil); //printf("nt=%d\t c1=%ld\tc=%d\n",nt,c1,c); //Calling CUDA Kernel Function For Normalizing Matrix Mean_SD_Norm<<<dimGrid, dimBlock, matrix_size_1d>>>(input,output,mean_out,sd_out,N,numThreads,c); cudaDeviceSynchronize(); /* Stop Clock code below*/ cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); gettimeofday(&etstop, &tzdummy); etstop2 = times(&cputstop); printf("Stopped clock.\n"); /*Copying Output Device Matrix to Output Host Matrix*/ cudaMemcpy(Host_Output, output, N * N * sizeof(float), cudaMemcpyDeviceToHost); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; /* Display output */ /* if (N < 10) { printf("\nB1 =\n\t"); for (i= 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%1.10f%s", Host_Output[i* N + j], (j < N-1) ? ", " : ";\n\t"); } } }*/ /* Display result time */ printf("\nElapsed time CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("Elapsed GPU Time = %g ms \n",milliseconds); printf("Effective Bandwidth in (GB/s): %f \n", (2*matrix_size_2d/milliseconds)/1e6); float mean = N * log2((float)N) + N; float sd = N * log2((float)N) + (2*N) + (2*N*N); float norm = 2 * N * N; printf("Effective Throughput in (GFLOPS/s): %f \n", ((mean+sd+norm)*1e-9)/(milliseconds*1e-3)); //deallocate device memory below cudaFree(input); cudaFree(output); cudaFree(mean_out); cudaFree(sd_out); //deallocate Host Input and Host Output Matrix free(Host_Input); free(Host_Output); exit(0); }
67c3312803abe41e71bc329ccf592cee89d5aee1.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { thrust::host_vector<int> host_in = thrust::host_vector<int>(idata, idata + n); thrust::host_vector<int> host_out = thrust::host_vector<int>(odata, odata + n); thrust::device_vector<int> dev_in = thrust::device_vector<int>(host_in); thrust::device_vector<int> dev_out = thrust::device_vector<int>(host_out); // TODO timer().startGpuTimer(); thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin()); timer().endGpuTimer(); thrust::copy(dev_out.begin(), dev_out.end(), odata); } } }
67c3312803abe41e71bc329ccf592cee89d5aee1.cu
#include <cuda.h> #include <cuda_runtime.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/scan.h> #include "common.h" #include "thrust.h" namespace StreamCompaction { namespace Thrust { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { thrust::host_vector<int> host_in = thrust::host_vector<int>(idata, idata + n); thrust::host_vector<int> host_out = thrust::host_vector<int>(odata, odata + n); thrust::device_vector<int> dev_in = thrust::device_vector<int>(host_in); thrust::device_vector<int> dev_out = thrust::device_vector<int>(host_out); // TODO timer().startGpuTimer(); thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin()); timer().endGpuTimer(); thrust::copy(dev_out.begin(), dev_out.end(), odata); } } }
903a4f6c7c842573d461b8c8445cf9bc1a5bbd28.hip
// !!! This is a file automatically generated by hipify!!! /* * Compression using the parallel functions and GPU */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <limits.h> #include "../include/parallel.h" #include "../include/color.h" #define BLOCK_SIZE 1024 #define MIN_SCRATCH_SIZE 50 * 1024 * 1024 #define DEBUG 1 struct huffmanNode * huffmanTreeNode_head; struct huffmanDictionary huffmanDictionary; struct huffmanNode huffmanTreeNode[512]; unsigned char bitSequenceConstMemory[256][255]; unsigned int constMemoryFlag = 0; int main(int argc, char ** argv){ unsigned int distinctCharacterCount, mergedHuffmanNodes, inputFileLength; unsigned int frequency[256]; unsigned char * inputFileData, bitSequenceLength = 0, bitSequence[255]; unsigned int * compressedDataOffset, cpuTimeUsed; unsigned int integerOverflowFlag; long unsigned int memFree, memTotal, memRequired, memOffset, memData; int numberOfKernels; clock_t start, end; FILE * inputFile, * compressedFile; // check the arguments if(argc != 3){ printf("Arguments should be input file and output file"); return -1; } // read input file, get length and data inputFile = fopen(argv[1], "rb"); fseek(inputFile, 0, SEEK_END); inputFileLength = ftell(inputFile); fseek(inputFile, 0, SEEK_SET); inputFileData = (unsigned char *) malloc(inputFileLength * sizeof(unsigned char)); fread(inputFileData, sizeof(unsigned char), inputFileLength, inputFile); fclose(inputFile); // starting the clock, tick tick start = clock(); // find frequency of each symbols for(int i = 0; i < 256; i++) frequency[i] = 0; for(int i = 0; i < inputFileLength; i++) frequency[inputFileData[i]] ++; // initialize the nodes distinctCharacterCount = 0; for(int i = 0; i < 256; i++){ if(frequency[i] > 0){ huffmanTreeNode[distinctCharacterCount].frequency = frequency[i]; huffmanTreeNode[distinctCharacterCount].letter = i; huffmanTreeNode[distinctCharacterCount].left = NULL; huffmanTreeNode[distinctCharacterCount].right = NULL; distinctCharacterCount ++; } } // build the huffman tree for(int i = 0; i < distinctCharacterCount - 1; i++){ mergedHuffmanNodes = 2 * i; sortHuffmanTree(i, distinctCharacterCount, mergedHuffmanNodes); buildHuffmanTree(i, distinctCharacterCount, mergedHuffmanNodes); } if(distinctCharacterCount == 1){ huffmanTreeNode_head = & huffmanTreeNode[0]; } // build the huffman dictionary buildHuffmanDictionary(huffmanTreeNode_head, bitSequence, bitSequenceLength); // calculating memory requirements // gpu memory hipMemGetInfo(& memFree, & memTotal); // debug if(DEBUG) printf("Free Memory :: %lu \n", memFree); // offset array requirements memOffset = 0; for(int i = 0; i < 256; i++) memOffset += frequency[i] * huffmanDictionary.bitSequenceLength[i]; memOffset = memOffset % 8 == 0 ? memOffset : memOffset + 8 - memOffset % 8; // other memory requirements memData = inputFileLength + (inputFileLength + 1) * sizeof(unsigned int) + sizeof(huffmanDictionary); if(memFree - memData < MIN_SCRATCH_SIZE){ printf("\n%sExiting not enough memory on GPU :: \nMem Free :: %lu\nMin Required :: %lu\n", COLOR_ERROR, memFree, memData + MIN_SCRATCH_SIZE); return -1; } memRequired = memFree - memData - 10 * 1024 * 1024; numberOfKernels = ceil((double) memOffset / memRequired); integerOverflowFlag = memRequired + 255 <= UINT_MAX || memOffset + 255 <= UINT_MAX ? 0 : 1; if(DEBUG){ printf("\n\t%sInput File Size :: %u\n\tOutput Size :: %lu\n\tNumber of Kernels :: %d\n\tInteger Overflow flag :: %d\n", COLOR_DEBUG, inputFileLength, memOffset / 8, numberOfKernels, integerOverflowFlag); } // generate offset data array compressedDataOffset = (unsigned int * ) malloc((inputFileLength + 1) * sizeof(unsigned int)); // launch kernel launchCudaHuffmanCompress(inputFileData, compressedDataOffset, inputFileLength, numberOfKernels, integerOverflowFlag, memRequired); // end the clock, tick tick end = clock(); // writing the compressed file to the output compressedFile = fopen(argv[2], "wb"); fwrite(& inputFileLength, sizeof(unsigned int), 1, compressedFile); fwrite(frequency, sizeof(unsigned int), 256, compressedFile); fwrite(inputFileData, sizeof(unsigned char), memOffset / 8, compressedFile); fclose(compressedFile); cpuTimeUsed = ((end - start)) * 1000 / CLOCKS_PER_SEC; printf("\n\nTime taken :: %d:%d s\n", cpuTimeUsed / 1000, cpuTimeUsed % 1000); free(inputFileData); free(compressedDataOffset); return 0; }
903a4f6c7c842573d461b8c8445cf9bc1a5bbd28.cu
/* * Compression using the parallel functions and GPU */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <limits.h> #include "../include/parallel.h" #include "../include/color.h" #define BLOCK_SIZE 1024 #define MIN_SCRATCH_SIZE 50 * 1024 * 1024 #define DEBUG 1 struct huffmanNode * huffmanTreeNode_head; struct huffmanDictionary huffmanDictionary; struct huffmanNode huffmanTreeNode[512]; unsigned char bitSequenceConstMemory[256][255]; unsigned int constMemoryFlag = 0; int main(int argc, char ** argv){ unsigned int distinctCharacterCount, mergedHuffmanNodes, inputFileLength; unsigned int frequency[256]; unsigned char * inputFileData, bitSequenceLength = 0, bitSequence[255]; unsigned int * compressedDataOffset, cpuTimeUsed; unsigned int integerOverflowFlag; long unsigned int memFree, memTotal, memRequired, memOffset, memData; int numberOfKernels; clock_t start, end; FILE * inputFile, * compressedFile; // check the arguments if(argc != 3){ printf("Arguments should be input file and output file"); return -1; } // read input file, get length and data inputFile = fopen(argv[1], "rb"); fseek(inputFile, 0, SEEK_END); inputFileLength = ftell(inputFile); fseek(inputFile, 0, SEEK_SET); inputFileData = (unsigned char *) malloc(inputFileLength * sizeof(unsigned char)); fread(inputFileData, sizeof(unsigned char), inputFileLength, inputFile); fclose(inputFile); // starting the clock, tick tick start = clock(); // find frequency of each symbols for(int i = 0; i < 256; i++) frequency[i] = 0; for(int i = 0; i < inputFileLength; i++) frequency[inputFileData[i]] ++; // initialize the nodes distinctCharacterCount = 0; for(int i = 0; i < 256; i++){ if(frequency[i] > 0){ huffmanTreeNode[distinctCharacterCount].frequency = frequency[i]; huffmanTreeNode[distinctCharacterCount].letter = i; huffmanTreeNode[distinctCharacterCount].left = NULL; huffmanTreeNode[distinctCharacterCount].right = NULL; distinctCharacterCount ++; } } // build the huffman tree for(int i = 0; i < distinctCharacterCount - 1; i++){ mergedHuffmanNodes = 2 * i; sortHuffmanTree(i, distinctCharacterCount, mergedHuffmanNodes); buildHuffmanTree(i, distinctCharacterCount, mergedHuffmanNodes); } if(distinctCharacterCount == 1){ huffmanTreeNode_head = & huffmanTreeNode[0]; } // build the huffman dictionary buildHuffmanDictionary(huffmanTreeNode_head, bitSequence, bitSequenceLength); // calculating memory requirements // gpu memory cudaMemGetInfo(& memFree, & memTotal); // debug if(DEBUG) printf("Free Memory :: %lu \n", memFree); // offset array requirements memOffset = 0; for(int i = 0; i < 256; i++) memOffset += frequency[i] * huffmanDictionary.bitSequenceLength[i]; memOffset = memOffset % 8 == 0 ? memOffset : memOffset + 8 - memOffset % 8; // other memory requirements memData = inputFileLength + (inputFileLength + 1) * sizeof(unsigned int) + sizeof(huffmanDictionary); if(memFree - memData < MIN_SCRATCH_SIZE){ printf("\n%sExiting not enough memory on GPU :: \nMem Free :: %lu\nMin Required :: %lu\n", COLOR_ERROR, memFree, memData + MIN_SCRATCH_SIZE); return -1; } memRequired = memFree - memData - 10 * 1024 * 1024; numberOfKernels = ceil((double) memOffset / memRequired); integerOverflowFlag = memRequired + 255 <= UINT_MAX || memOffset + 255 <= UINT_MAX ? 0 : 1; if(DEBUG){ printf("\n\t%sInput File Size :: %u\n\tOutput Size :: %lu\n\tNumber of Kernels :: %d\n\tInteger Overflow flag :: %d\n", COLOR_DEBUG, inputFileLength, memOffset / 8, numberOfKernels, integerOverflowFlag); } // generate offset data array compressedDataOffset = (unsigned int * ) malloc((inputFileLength + 1) * sizeof(unsigned int)); // launch kernel launchCudaHuffmanCompress(inputFileData, compressedDataOffset, inputFileLength, numberOfKernels, integerOverflowFlag, memRequired); // end the clock, tick tick end = clock(); // writing the compressed file to the output compressedFile = fopen(argv[2], "wb"); fwrite(& inputFileLength, sizeof(unsigned int), 1, compressedFile); fwrite(frequency, sizeof(unsigned int), 256, compressedFile); fwrite(inputFileData, sizeof(unsigned char), memOffset / 8, compressedFile); fclose(compressedFile); cpuTimeUsed = ((end - start)) * 1000 / CLOCKS_PER_SEC; printf("\n\nTime taken :: %d:%d s\n", cpuTimeUsed / 1000, cpuTimeUsed % 1000); free(inputFileData); free(compressedDataOffset); return 0; }
74559321610054b34ea9a3b054ca5f0180a2e78e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> /***********************************************/ /* for debug: check the output */ /***********************************************/ void write_output(float *arr, int size, const char *filename) { FILE *fp; if((fp = fopen(filename, "w+")) == NULL) { fprintf(stderr, "File write error!\n"); } int i; for(i = 0; i < size; i++) { fprintf(fp, "%f ", arr[i]); if( i%10 == 0) fprintf(fp, "\n"); } fprintf(fp, "\n"); fclose(fp); } //device memory pointers static int *nd1_velD; static int *nd1_txyD; static int *nd1_txzD; static int *nd1_tyyD; static int *nd1_tyzD; static float *rhoD; static float *drvh1D; static float *drti1D; static float *drth1D; static float *damp1_xD; static float *damp1_yD; static int *idmat1D; static float *dxi1D; static float *dyi1D; static float *dzi1D; static float *dxh1D; static float *dyh1D; static float *dzh1D; static float *t1xxD; static float *t1xyD; static float *t1xzD; static float *t1yyD; static float *t1yzD; static float *t1zzD; static float *t1xx_pxD; static float *t1xy_pxD; static float *t1xz_pxD; static float *t1yy_pxD; static float *qt1xx_pxD; static float *qt1xy_pxD; static float *qt1xz_pxD; static float *qt1yy_pxD; static float *t1xx_pyD; static float *t1xy_pyD; static float *t1yy_pyD; static float *t1yz_pyD; static float *qt1xx_pyD; static float *qt1xy_pyD; static float *qt1yy_pyD; static float *qt1yz_pyD; static float *qt1xxD; static float *qt1xyD; static float *qt1xzD; static float *qt1yyD; static float *qt1yzD; static float *qt1zzD; static float *clamdaD; static float *cmuD; static float *epdtD; static float *qwpD; static float *qwsD; static float *qwt1D; static float *qwt2D; static float *v1xD; //output static float *v1yD; static float *v1zD; static float *v1x_pxD; static float *v1y_pxD; static float *v1z_pxD; static float *v1x_pyD; static float *v1y_pyD; static float *v1z_pyD; //for inner_II--------------------------------------------------------- static int *nd2_velD; static int *nd2_txyD; //int[18] static int *nd2_txzD; //int[18] static int *nd2_tyyD; //int[18] static int *nd2_tyzD; //int[18] static float *drvh2D; static float *drti2D; static float *drth2D; //float[mw2_pml1,0:1] static int *idmat2D; static float *damp2_xD; static float *damp2_yD; static float *damp2_zD; static float *dxi2D; static float *dyi2D; static float *dzi2D; static float *dxh2D; static float *dyh2D; static float *dzh2D; static float *t2xxD; static float *t2xyD; static float *t2xzD; static float *t2yyD; static float *t2yzD; static float *t2zzD; static float *qt2xxD; static float *qt2xyD; static float *qt2xzD; static float *qt2yyD; static float *qt2yzD; static float *qt2zzD; static float *t2xx_pxD; static float *t2xy_pxD; static float *t2xz_pxD; static float *t2yy_pxD; static float *qt2xx_pxD; static float *qt2xy_pxD; static float *qt2xz_pxD; static float *qt2yy_pxD; static float *t2xx_pyD; static float *t2xy_pyD; static float *t2yy_pyD; static float *t2yz_pyD; static float *qt2xx_pyD; static float *qt2xy_pyD; static float *qt2yy_pyD; static float *qt2yz_pyD; static float *t2xx_pzD; static float *t2xz_pzD; static float *t2yz_pzD; static float *t2zz_pzD; static float *qt2xx_pzD; static float *qt2xz_pzD; static float *qt2yz_pzD; static float *qt2zz_pzD; static float *v2xD; //output static float *v2yD; static float *v2zD; static float *v2x_pxD; static float *v2y_pxD; static float *v2z_pxD; static float *v2x_pyD; static float *v2y_pyD; static float *v2z_pyD; static float *v2x_pzD; static float *v2y_pzD; static float *v2z_pzD; #define CHECK_ERROR(err, str) \ if (err != hipSuccess) \ {\ printf("Error in \"%s\", %s\n", str, hipGetErrorString(err)); \ } //debug---------------------- float totalTimeH2DV, totalTimeD2HV; float totalTimeH2DS, totalTimeD2HS; float totalTimeCompV, totalTimeCompS; float tmpTime; struct timeval t1, t2; int procID; //-------------------------------- //!XSC-------------------------------------------------------------------- #define drvh1(i, j) drvh1M[(i) - 1 + (j) * mw1_pml1] #define drti1(i, j) drti1M[(i) - 1 + (j) * mw1_pml1] #define drth1(i, j) drth1M[(i) - 1 + (j) * mw1_pml1] #define damp1_x(i, j, k) damp1_xM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lbx0) * nytop)] #define damp1_y(i, j, k) damp1_yM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lby0) * nxtop)] #define idmat1(i, j, k) idmat1M[(i) + (nztop + 2) * ((j) - 1 + ((k) - 1) * (nxtop + 1))] #define v1x(i, j, k) v1xM[(i) + (nztop + 2) * ((j) + 1 + (k) * (nxtop + 3))] #define v1y(i, j, k) v1yM[(i) + (nztop + 2) * ((j) + ((k) + 1) * (nxtop + 3))] #define v1z(i, j, k) v1zM[(i) + (nztop + 2) * ((j) + (k) * (nxtop + 3))] //nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml #define v1x_px(i, j, k) v1x_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))] #define v1y_px(i, j, k) v1y_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))] #define v1z_px(i, j, k) v1z_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))] #define v1x_py(i, j, k) v1x_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define v1y_py(i, j, k) v1y_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define v1z_py(i, j, k) v1z_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define dxi1(i, j) dxi1M[((j) - 1) * 4 + (i) - 1] #define dyi1(i, j) dyi1M[((j) - 1) * 4 + (i) - 1] #define dzi1(i, j) dzi1M[((j) - 1) * 4 + (i) - 1] #define dxh1(i, j) dxh1M[((j) - 1) * 4 + (i) - 1] #define dyh1(i, j) dyh1M[((j) - 1) * 4 + (i) - 1] #define dzh1(i, j) dzh1M[((j) - 1) * 4 + (i) - 1] #define t1xx(i, j, k) t1xxM[(i) - 1 + nztop * ((j) + ((k) - 1) * (nxtop + 3))] #define t1xy(i, j, k) t1xyM[(i) - 1 + nztop * ((j) + 1 + ((k) + 1) * (nxtop + 3))] #define t1xz(i, j, k) t1xzM[(i) - 1 + (nztop + 1) * ((j) + 1 + ((k) - 1) * (nxtop + 3))] #define t1yy(i, j, k) t1yyM[(i) - 1 + nztop * (((j) - 1) + (k) * nxtop)] #define t1yz(i, j, k) t1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) + 1) * nxtop)] #define t1zz(i, j, k) t1zzM[(i) - 1 + nztop * ((j) - 1 + ((k) - 1) * nxtop)] //nti = (lbx(2) - lbx(1) + 1) * mw1_pml + lbx(2) //nth = (lbx(2) - lbx(1) + 1) * mw1_pml + 1 - lbx(1) #define t1xx_px(i, j, k) t1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] #define t1xy_px(i, j, k) t1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))] #define t1xz_px(i, j, k) t1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))] #define t1yy_px(i, j, k) t1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] #define qt1xx_px(i, j, k) qt1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] #define qt1xy_px(i, j, k) qt1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))] #define qt1xz_px(i, j, k) qt1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))] #define qt1yy_px(i, j, k) qt1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] //nti = (lby(2) - lby(1) + 1) * mw1_pml + lby(2) //nth = (lby(2) - lby(1) + 1) * mw1_pml + 1 - lby(1) #define t1xx_py(i, j, k) t1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define t1xy_py(i, j, k) t1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define t1yy_py(i, j, k) t1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define t1yz_py(i, j, k) t1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xx_py(i, j, k) qt1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xy_py(i, j, k) qt1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yy_py(i, j, k) qt1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yz_py(i, j, k) qt1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xx(i, j, k) qt1xxM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xy(i, j, k) qt1xyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xz(i, j, k) qt1xzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yy(i, j, k) qt1yyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yz(i, j, k) qt1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1zz(i, j, k) qt1zzM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define rho(i) rhoM[(i) - 1] #define clamda(i) clamdaM[(i) - 1] #define cmu(i) cmuM[(i) - 1] #define epdt(i) epdtM[(i) - 1] #define qwp(i) qwpM[(i) - 1] #define qws(i) qwsM[(i) - 1] #define qwt1(i) qwt1M[(i) - 1] #define qwt2(i) qwt2M[(i) - 1] //for inner_II #define drvh2(i, j) drvh2M[(i) - 1 + (j) * mw2_pml1] #define drti2(i, j) drti2M[(i) - 1 + (j) * mw2_pml1] #define drth2(i, j) drth2M[(i) - 1 + (j) * mw2_pml1] #define idmat2(i, j, k) idmat2M[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * (nxbtm + 1))] #define damp2_x(i, j, k) damp2_xM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lbx0) * nybtm)] #define damp2_y(i, j, k) damp2_yM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lby0) * nxbtm)] #define damp2_z(i, j) damp2_zM[(i) - 1 + nxbtm * ((j) - 1)] #define dxi2(i, j) dxi2M[(i) - 1 + 4 * ((j) - 1)] #define dyi2(i, j) dyi2M[(i) - 1 + 4 * ((j) - 1)] #define dzi2(i, j) dzi2M[(i) - 1 + 4 * ((j) - 1)] #define dxh2(i, j) dxh2M[(i) - 1 + 4 * ((j) - 1)] #define dyh2(i, j) dyh2M[(i) - 1 + 4 * ((j) - 1)] #define dzh2(i, j) dzh2M[(i) - 1 + 4 * ((j) - 1)] #define t2xx(i, j, k) t2xxM[(i) - 1 + nzbtm * ((j) + ((k) - 1) * (nxbtm + 3))] #define t2xy(i, j, k) t2xyM[(i) - 1 + nzbtm * ((j) + 1 + ((k) + 1) * (nxbtm + 3))] #define t2xz(i, j, k) t2xzM[(i) + (nzbtm + 1) * ((j) + 1 + ((k) - 1) * (nxbtm + 3))] #define t2yy(i, j, k) t2yyM[(i) - 1 + nzbtm * (((j) - 1) + (k) * nxbtm)] #define t2yz(i, j, k) t2yzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) + 1) * nxbtm)] #define t2zz(i, j, k) t2zzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * nxbtm)] #define qt2xx(i, j, k) qt2xxM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xy(i, j, k) qt2xyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xz(i, j, k) qt2xzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yy(i, j, k) qt2yyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yz(i, j, k) qt2yzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2zz(i, j, k) qt2zzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] //nti = (lbx(2) - lbx(1) + 1) * mw2_pml + lbx(2) //nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1) #define t2xx_px(i, j, k) t2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define t2xy_px(i, j, k) t2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define t2xz_px(i, j, k) t2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define t2yy_px(i, j, k) t2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define t2xx_py(i, j, k) t2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2xy_py(i, j, k) t2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2yy_py(i, j, k) t2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2yz_py(i, j, k) t2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2xx_pz(i, j, k) t2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define t2xz_pz(i, j, k) t2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define t2yz_pz(i, j, k) t2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define t2zz_pz(i, j, k) t2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xx_px(i, j, k) qt2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define qt2xy_px(i, j, k) qt2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define qt2xz_px(i, j, k) qt2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define qt2yy_px(i, j, k) qt2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define qt2xx_py(i, j, k) qt2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xy_py(i, j, k) qt2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yy_py(i, j, k) qt2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yz_py(i, j, k) qt2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xx_pz(i, j, k) qt2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xz_pz(i, j, k) qt2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yz_pz(i, j, k) qt2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2zz_pz(i, j, k) qt2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define v2x(i, j, k) v2xM[(i) + (nzbtm + 1) * ((j) + 1 + (nxbtm + 3) * (k))] #define v2y(i, j, k) v2yM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * ((k) + 1))] #define v2z(i, j, k) v2zM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * (k))] //nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml #define v2x_px(i, j, k) v2x_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))] #define v2y_px(i, j, k) v2y_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))] #define v2z_px(i, j, k) v2z_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))] #define v2x_py(i, j, k) v2x_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define v2y_py(i, j, k) v2y_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define v2z_py(i, j, k) v2z_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define v2x_pz(i, j, k) v2x_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define v2y_pz(i, j, k) v2y_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define v2z_pz(i, j, k) v2z_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] __global__ void velocity_inner_IC(int nztop, int nztm1, float ca, int *nd1_vel, float *rhoM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int nxtop, //dimension # int nytop, float *v1xM, //output float *v1yM, float *v1zM); __global__ void velocity_inner_IIC(float ca, int *nd2_vel, float *rhoM, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, int *idmat2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int nxbtm, //dimension #s int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z); __global__ void vel_PmlX_IC(float ca, int lbx0, int lbx1, int *nd1_vel, float *rhoM, float *drvh1, float *drti1, float *damp1_x, int *idmat1, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, int mw1_pml1, //dimension # int mw1_pml, int nxtop, int nytop, int nztop, float *v1x, //output float *v1y, float *v1z, float *v1x_px, float *v1y_px, float *v1z_px); __global__ void vel_PmlY_IC(int nztop, float ca, int lby0, int lby1, int *nd1_vel, float *rhoM, float *drvh1, float *drti1, int *idmat1, float *damp1_y, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, int mw1_pml1, //dimension #s int mw1_pml, int nxtop, int nytop, float *v1x, //output float *v1y, float *v1z, float *v1x_py, float *v1y_py, float *v1z_py); __global__ void vel_PmlX_IIC(int nzbm1, float ca, int lbx0, int lbx1, int *nd2_vel, float *drvh2, float *drti2, float *rhoM, float *damp2_x, int *idmat2, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z, float *v2x_px, float *v2y_px, float *v2z_px); __global__ void vel_PmlY_IIC(int nzbm1, float ca, int lby0, int lby1, int *nd2_vel, float *drvh2, float *drti2, float *rhoM, float *damp2_y, int *idmat2, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int mw2_pml1, int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z, float *v2x_py, float *v2y_py, float *v2z_py); __global__ void vel_PmlZ_IIC(int nzbm1, float ca, int *nd2_vel, float *drvh2, float *drti2, float *rhoM, float *damp2_z, int *idmat2, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z, float *v2x_pz, float *v2y_pz, float *v2z_pz); #ifdef __cplusplus extern "C" { #endif extern void compute_velocityCDebug( int *nztop, int *nztm1, float *ca, int *lbx, int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, float *damp1_xM, float *damp1_yM, int *idmat1M,float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM, float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM, int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M, int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp, float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM, float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM, float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1, int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml, int *nxbtm, int *nybtm, int *nzbtm); extern void compute_stressCDebug(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml, int *mw1_pml1, int *lbx, int *lby, int *nd1_txy, int *nd1_txz, int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM, float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM, float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM, float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp, int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz, int *nd2_tyy, int *nd2_tyz, int *idmat2M, float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM, float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M, float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM, float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM, float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM, float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM, void **v2xMp, void **v2yMp, void **v2zMp, int *myid); void set_deviceC(int *deviceID) { hipSetDevice(*deviceID); printf("[CUDA] device set success!\n"); } //=========================================================================== void allocate_gpu_memC(int *lbx, int *lby, int *nmat, //dimension #, int int *mw1_pml1, //int int *mw2_pml1, //int int *nxtop, //int int *nytop, //int int *nztop, int *mw1_pml, //int int *mw2_pml, //int int *nxbtm, //int int *nybtm, //int int *nzbtm, int *nzbm1, int *nll) { printf("[CUDA] allocation ..............."); int nv2, nti, nth; hipError_t cudaRes; // printf("lbx[1] = %d, lbx[0] = %d\n", lbx[1], lbx[0]); // printf("lby[1] = %d, lby[0] = %d\n", lby[1], lby[0]); // printf("nmat = %d\n", *nmat); // printf("mw1_pml1 = %d, mw2_pml1 = %d\n", *mw1_pml1, *mw2_pml1); // printf("mw1_pml = %d, mw2_pml = %d\n", *mw1_pml, *mw2_pml); // printf("nxtop = %d, nytop = %d, nztop = %d\n", *nxtop, *nytop, *nztop); // printf("nxbtm = %d, nybtm = %d, nzbtm = %d\n", *nxbtm, *nybtm, *nzbtm); // printf("nzbm1 = %d, nll = %d\n", *nzbm1, *nll); //debug----------------- totalTimeH2DV = 0.0f; totalTimeD2HV = 0.0f; totalTimeH2DS = 0.0f; totalTimeD2HS = 0.0f; totalTimeCompV = 0.0f; totalTimeCompS = 0.0f; //for inner_I cudaRes = hipMalloc((void **)&nd1_velD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_vel"); cudaRes = hipMalloc((void **)&nd1_txyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txy"); cudaRes = hipMalloc((void **)&nd1_txzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txz"); cudaRes = hipMalloc((void **)&nd1_tyyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyy"); cudaRes = hipMalloc((void **)&nd1_tyzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyz"); cudaRes = hipMalloc((void **)&rhoD, sizeof(float) * (*nmat)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, rho"); cudaRes = hipMalloc((void **)&drvh1D, sizeof(float) * (*mw1_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, drvh1"); cudaRes = hipMalloc((void **)&drti1D, sizeof(float) * (*mw1_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, drti1"); cudaRes = hipMalloc((void **)&drth1D, sizeof(float) * (*mw1_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, drth1"); if (lbx[1] >= lbx[0]) { cudaRes = hipMalloc((void **)&damp1_xD, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_x"); } if (lby[1] >= lby[0]) { cudaRes = hipMalloc((void **)&damp1_yD, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_y"); } cudaRes = hipMalloc((void **)&idmat1D, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, idmat1"); cudaRes = hipMalloc((void **)&dxi1D, sizeof(float) * 4 * (*nxtop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxi1"); cudaRes = hipMalloc((void **)&dyi1D, sizeof(float) * 4 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyi1"); cudaRes = hipMalloc((void **)&dzi1D, sizeof(float) * 4 * (*nztop + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzi1"); cudaRes = hipMalloc((void **)&dxh1D, sizeof(float) * 4 * (*nxtop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxh1"); cudaRes = hipMalloc((void **)&dyh1D, sizeof(float) * 4 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyh1"); cudaRes = hipMalloc((void **)&dzh1D, sizeof(float) * 4 * (*nztop + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzh1"); cudaRes = hipMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx"); cudaRes = hipMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy"); cudaRes = hipMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz"); cudaRes = hipMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy"); cudaRes = hipMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz"); cudaRes = hipMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz"); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0]; hipMalloc((void **)&t1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop)); hipMalloc((void **)&t1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop)); hipMalloc((void **)&t1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop)); hipMalloc((void **)&t1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop)); hipMalloc((void **)&qt1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop)); hipMalloc((void **)&qt1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop)); hipMalloc((void **)&qt1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop)); hipMalloc((void **)&qt1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop)); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0]; hipMalloc((void **)&t1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); hipMalloc((void **)&t1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth); hipMalloc((void **)&t1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); hipMalloc((void **)&t1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth); hipMalloc((void **)&qt1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); hipMalloc((void **)&qt1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth); hipMalloc((void **)&qt1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); hipMalloc((void **)&qt1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth); } hipMalloc((void **)&qt1xxD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); hipMalloc((void **)&qt1xyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); hipMalloc((void **)&qt1xzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop)); hipMalloc((void **)&qt1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); hipMalloc((void **)&qt1yzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop)); hipMalloc((void **)&qt1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); hipMalloc((void **)&clamdaD, sizeof(float) * (*nmat)); hipMalloc((void **)&cmuD, sizeof(float) * (*nmat)); hipMalloc((void **)&epdtD, sizeof(float) * (*nll)); hipMalloc((void **)&qwpD, sizeof(float) * (*nmat)); hipMalloc((void **)&qwsD, sizeof(float) * (*nmat)); hipMalloc((void **)&qwt1D, sizeof(float) * (*nll)); hipMalloc((void **)&qwt2D, sizeof(float) * (*nll)); cudaRes = hipMalloc((void **)&v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x"); cudaRes = hipMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y"); cudaRes = hipMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z"); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml); cudaRes = hipMalloc((void **)&v1x_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_px"); cudaRes = hipMalloc((void **)&v1y_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_px"); cudaRes = hipMalloc((void **)&v1z_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml); cudaRes = hipMalloc((void **)&v1x_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_py"); cudaRes = hipMalloc((void **)&v1y_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_py"); cudaRes = hipMalloc((void **)&v1z_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_py"); } //for inner_II----------------------------------------------------------------------------------------- cudaRes = hipMalloc((void **)&nd2_velD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_vel"); cudaRes = hipMalloc((void **)&nd2_txyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txy"); cudaRes = hipMalloc((void **)&nd2_txzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txz"); cudaRes = hipMalloc((void **)&nd2_tyyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyy"); cudaRes = hipMalloc((void **)&nd2_tyzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyz"); cudaRes = hipMalloc((void **)&drvh2D, sizeof(float) * (*mw2_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory, drvh2"); cudaRes = hipMalloc((void **)&drti2D, sizeof(float) * (*mw2_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory, drti2"); cudaRes = hipMalloc((void **)&drth2D, sizeof(float) * (*mw2_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory, drth2"); cudaRes = hipMalloc((void **)&idmat2D, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory, idmat2"); if (lbx[1] >= lbx[0]) { cudaRes = hipMalloc((void **)&damp2_xD, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_x"); } if (lby[1] >= lby[0]) { cudaRes = hipMalloc((void **)&damp2_yD, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_y"); } cudaRes = hipMalloc((void **)&damp2_zD, sizeof(float) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_z"); cudaRes = hipMalloc((void **)&dxi2D, sizeof(float) * 4 * (*nxbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dxi2"); cudaRes = hipMalloc((void **)&dyi2D, sizeof(float) * 4 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dyi2"); cudaRes = hipMalloc((void **)&dzi2D, sizeof(float) * 4 * (*nzbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dzi2"); cudaRes = hipMalloc((void **)&dxh2D, sizeof(float) * 4 * (*nxbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dxh2"); cudaRes = hipMalloc((void **)&dyh2D, sizeof(float) * 4 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dyh2"); cudaRes = hipMalloc((void **)&dzh2D, sizeof(float) * 4 * (*nzbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dzh2"); cudaRes = hipMalloc((void **)&t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xx"); cudaRes = hipMalloc((void **)&t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xy"); cudaRes = hipMalloc((void **)&t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xz"); cudaRes = hipMalloc((void **)&t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yy"); cudaRes = hipMalloc((void **)&t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yz"); cudaRes = hipMalloc((void **)&t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2zz"); hipMalloc((void **)&qt2xxD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2xyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2xzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2yzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2zzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0]; hipMalloc((void **)&t2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); hipMalloc((void **)&t2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); hipMalloc((void **)&t2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); hipMalloc((void **)&t2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); hipMalloc((void **)&qt2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); hipMalloc((void **)&qt2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); hipMalloc((void **)&qt2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); hipMalloc((void **)&qt2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0]; hipMalloc((void **)&t2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); hipMalloc((void **)&t2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); hipMalloc((void **)&t2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); hipMalloc((void **)&t2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); hipMalloc((void **)&qt2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); hipMalloc((void **)&qt2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); hipMalloc((void **)&qt2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); hipMalloc((void **)&qt2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); } hipMalloc((void **)&t2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&t2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&t2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&t2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&qt2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); hipMalloc((void **)&v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3)); hipMalloc((void **)&v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3)); hipMalloc((void **)&v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3)); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml); cudaRes = hipMalloc((void **)&v2x_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_px"); cudaRes = hipMalloc((void **)&v2y_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_px"); cudaRes = hipMalloc((void **)&v2z_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml); cudaRes = hipMalloc((void **)&v2x_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_py"); cudaRes = hipMalloc((void **)&v2y_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_py"); cudaRes = hipMalloc((void **)&v2z_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_py"); } cudaRes = hipMalloc((void **)&v2x_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_pz"); cudaRes = hipMalloc((void **)&v2y_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_pz"); cudaRes = hipMalloc((void **)&v2z_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_pz"); printf("done!\n"); return; } void cpy_h2d_velocityInputsCOneTime(int *lbx, int *lby, int *nd1_vel, float *rho, float *drvh1, float *drti1, float *damp1_x, float *damp1_y, int *idmat1, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *v1x_px, float *v1y_px, float *v1z_px, float *v1x_py, float *v1y_py, float *v1z_py, int *nd2_vel, float *drvh2, float *drti2, int *idmat2, float *damp2_x, float *damp2_y, float *damp2_z, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, float *v2x_px, float *v2y_px, float *v2z_px, float *v2x_py, float *v2y_py, float *v2z_py, float *v2x_pz, float *v2y_pz, float *v2z_pz, int *nmat, //dimension #, int int *mw1_pml1, //int int *mw2_pml1, //int int *nxtop, //int int *nytop, //int int *nztop, int *mw1_pml, //int int *mw2_pml, //int int *nxbtm, //int int *nybtm, //int int *nzbtm, int *nzbm1) { printf("[CUDA] initial h2d cpy for velocity ........"); hipError_t cudaRes; int nv2; // int i; // for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++) // { // printf("%f ", t2xy[i]); // } // printf("\n"); //for inner_I cudaRes = hipMemcpy(nd1_velD, nd1_vel, sizeof(int) * 18, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd1_vel"); cudaRes = hipMemcpy(rhoD, rho, sizeof(float) * (*nmat), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, rho"); cudaRes = hipMemcpy(drvh1D, drvh1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh1"); cudaRes = hipMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti1"); if (lbx[1] >= lbx[0]) { cudaRes = hipMemcpy(damp1_xD, damp1_x, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_x"); } if (lby[1] >= lby[0]) { cudaRes = hipMemcpy(damp1_yD, damp1_y, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_y"); } cudaRes = hipMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat1"); cudaRes = hipMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi1"); cudaRes = hipMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi1"); cudaRes = hipMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi1"); cudaRes = hipMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh1"); cudaRes = hipMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh1"); cudaRes = hipMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh1"); cudaRes = hipMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx"); cudaRes = hipMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy"); cudaRes = hipMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz"); cudaRes = hipMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy"); cudaRes = hipMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz"); cudaRes = hipMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz"); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml); cudaRes = hipMemcpy(v1x_pxD, v1x_px, sizeof(float) * (*nztop) * nv2 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_px"); cudaRes = hipMemcpy(v1y_pxD, v1y_px, sizeof(float) * (*nztop) * nv2 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_px"); cudaRes = hipMemcpy(v1z_pxD, v1z_px, sizeof(float) * (*nztop) * nv2 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml); cudaRes = hipMemcpy(v1x_pyD, v1x_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_py"); cudaRes = hipMemcpy(v1y_pyD, v1y_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_py"); cudaRes = hipMemcpy(v1z_pyD, v1z_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_py"); } //for inner_II cudaRes = hipMemcpy(nd2_velD, nd2_vel, sizeof(int) * 18, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd2_vel"); cudaRes = hipMemcpy(drvh2D, drvh2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh2"); cudaRes = hipMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti2"); cudaRes = hipMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat2"); if (lbx[1] >= lbx[0]) { cudaRes = hipMemcpy(damp2_xD, damp2_x, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_x"); } if (lby[1] >= lby[0]) { cudaRes = hipMemcpy(damp2_yD, damp2_y, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_y"); } cudaRes = hipMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_z"); cudaRes = hipMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi2"); cudaRes = hipMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi2"); cudaRes = hipMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi2"); cudaRes = hipMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh2"); cudaRes = hipMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh2"); cudaRes = hipMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh2"); cudaRes = hipMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx"); cudaRes = hipMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy"); cudaRes = hipMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz"); cudaRes = hipMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy"); cudaRes = hipMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz"); cudaRes = hipMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz"); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml); cudaRes = hipMemcpy(v2x_pxD, v2x_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_px"); cudaRes = hipMemcpy(v2y_pxD, v2y_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_px"); cudaRes = hipMemcpy(v2z_pxD, v2z_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml); cudaRes = hipMemcpy(v2x_pyD, v2x_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_py"); cudaRes = hipMemcpy(v2y_pyD, v2y_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_py"); cudaRes = hipMemcpy(v2z_pyD, v2z_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_py"); } cudaRes = hipMemcpy(v2x_pzD, v2x_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_pz"); cudaRes = hipMemcpy(v2y_pzD, v2y_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_pz"); cudaRes = hipMemcpy(v2z_pzD, v2z_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_pz"); printf("done!\n"); return; } void cpy_h2d_velocityInputsC(float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for input .........."); hipError_t cudaRes; //for inner_I cudaRes = hipMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx"); cudaRes = hipMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy"); cudaRes = hipMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz"); cudaRes = hipMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy"); cudaRes = hipMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz"); cudaRes = hipMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz"); //for inner_II cudaRes = hipMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx"); cudaRes = hipMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy"); cudaRes = hipMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz"); cudaRes = hipMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy"); cudaRes = hipMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz"); cudaRes = hipMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz"); printf("done!\n"); return; } //===================================================================== void cpy_h2d_stressInputsCOneTime(int *lbx, int *lby, int *nd1_txy, int *nd1_txz, int *nd1_tyy, int *nd1_tyz, float *drti1, float *drth1, float *damp1_x, float *damp1_y, int *idmat1, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *v1x, float *v1y, float *v1z, float *t1xx_px, float *t1xy_px, float *t1xz_px, float *t1yy_px, float *qt1xx_px, float *qt1xy_px, float *qt1xz_px, float *qt1yy_px, float *t1xx_py, float *t1xy_py, float *t1yy_py, float *t1yz_py, float *qt1xx_py, float *qt1xy_py, float *qt1yy_py, float *qt1yz_py, float *qt1xx, float *qt1xy, float *qt1xz, float *qt1yy, float *qt1yz, float *qt1zz, float *clamda, float *cmu, float *epdt, float *qwp, float *qws, float *qwt1, float *qwt2, int *nd2_txy, int *nd2_txz, int *nd2_tyy, int *nd2_tyz, float *drti2, float *drth2, int *idmat2, float *damp2_x, float *damp2_y, float *damp2_z, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *v2x, float *v2y, float *v2z, float *qt2xx, float *qt2xy, float *qt2xz, float *qt2yy, float *qt2yz, float *qt2zz, float *t2xx_px, float *t2xy_px, float *t2xz_px, float *t2yy_px, float *qt2xx_px, float *qt2xy_px, float *qt2xz_px, float *qt2yy_px, float *t2xx_py, float *t2xy_py, float *t2yy_py, float *t2yz_py, float *qt2xx_py, float *qt2xy_py, float *qt2yy_py, float *qt2yz_py, float *t2xx_pz, float *t2xz_pz, float *t2yz_pz, float *t2zz_pz, float *qt2xx_pz, float *qt2xz_pz, float *qt2yz_pz, float *qt2zz_pz, int *nmat, //dimension #, int int *mw1_pml1, //int int *mw2_pml1, //int int *nxtop, //int int *nytop, //int int *nztop, int *mw1_pml, //int int *mw2_pml, //int int *nxbtm, //int int *nybtm, //int int *nzbtm, int *nll) { printf("[CUDA] initial h2d cpy for stress ..........."); hipError_t cudaRes; int nti, nth; //for inner_I cudaRes = hipMemcpy(nd1_txyD, nd1_txy, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(nd1_txzD, nd1_txz, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(nd1_tyyD, nd1_tyy, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(nd1_tyzD, nd1_tyz, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti1"); cudaRes = hipMemcpy(drth1D, drth1, sizeof(float) * (*mw1_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth1"); if (lbx[1] >= lbx[0]) { cudaRes = hipMemcpy(damp1_xD, damp1_x, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_x"); } if (lby[1] >= lby[0]) { cudaRes = hipMemcpy(damp1_yD, damp1_y, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_y"); } cudaRes = hipMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat1"); cudaRes = hipMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi1"); cudaRes = hipMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi1"); cudaRes = hipMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi1"); cudaRes = hipMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh1"); cudaRes = hipMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh1"); cudaRes = hipMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh1"); hipMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); hipMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); hipMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0]; hipMemcpy(t1xx_pxD, t1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(t1xy_pxD, t1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), hipMemcpyHostToDevice); hipMemcpy(t1xz_pxD, t1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), hipMemcpyHostToDevice); hipMemcpy(t1yy_pxD, t1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1xx_pxD, qt1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1xy_pxD, qt1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1xz_pxD, qt1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1yy_pxD, qt1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), hipMemcpyHostToDevice); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0]; hipMemcpy(t1xx_pyD, t1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice); hipMemcpy(t1xy_pyD, t1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, hipMemcpyHostToDevice); hipMemcpy(t1yy_pyD, t1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice); hipMemcpy(t1yz_pyD, t1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, hipMemcpyHostToDevice); hipMemcpy(qt1xx_pyD, qt1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice); hipMemcpy(qt1xy_pyD, qt1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, hipMemcpyHostToDevice); hipMemcpy(qt1yy_pyD, qt1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, hipMemcpyHostToDevice); hipMemcpy(qt1yz_pyD, qt1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, hipMemcpyHostToDevice); } hipMemcpy(qt1xxD, qt1xx, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1xyD, qt1xy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1xzD, qt1xz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1yyD, qt1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1yzD, qt1yz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(qt1zzD, qt1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); hipMemcpy(clamdaD, clamda, sizeof(float) * (*nmat), hipMemcpyHostToDevice); hipMemcpy(cmuD, cmu, sizeof(float) * (*nmat), hipMemcpyHostToDevice); hipMemcpy(epdtD, epdt, sizeof(float) * (*nll), hipMemcpyHostToDevice); hipMemcpy(qwpD, qwp, sizeof(float) * (*nmat), hipMemcpyHostToDevice); hipMemcpy(qwsD, qws, sizeof(float) * (*nmat), hipMemcpyHostToDevice); hipMemcpy(qwt1D, qwt1, sizeof(float) * (*nll), hipMemcpyHostToDevice); hipMemcpy(qwt2D, qwt2, sizeof(float) * (*nll), hipMemcpyHostToDevice); //for inner_II cudaRes = hipMemcpy(nd2_txyD, nd2_txy, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(nd2_txzD, nd2_txz, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(nd2_tyyD, nd2_tyy, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(nd2_tyzD, nd2_tyz, sizeof(int) * 18, hipMemcpyHostToDevice); cudaRes = hipMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti2"); cudaRes = hipMemcpy(drth2D, drth2, sizeof(float) * (*mw2_pml1) * 2, hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth2"); cudaRes = hipMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat2"); if (lbx[1] >= lbx[0]) { cudaRes = hipMemcpy(damp2_xD, damp2_x, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_x"); } if (lby[1] >= lby[0]) { cudaRes = hipMemcpy(damp2_yD, damp2_y, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_y"); } cudaRes = hipMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_z"); cudaRes = hipMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi2"); cudaRes = hipMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi2"); cudaRes = hipMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi2"); cudaRes = hipMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh2"); cudaRes = hipMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh2"); cudaRes = hipMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh2"); hipMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); hipMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); hipMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); hipMemcpy(qt2xxD, qt2xx, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xyD, qt2xy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xzD, qt2xz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2yyD, qt2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2yzD, qt2yz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2zzD, qt2zz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0]; hipMemcpy(t2xx_pxD, t2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(t2xy_pxD, t2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(t2xz_pxD, t2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(t2yy_pxD, t2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xx_pxD, qt2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xy_pxD, qt2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xz_pxD, qt2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2yy_pxD, qt2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), hipMemcpyHostToDevice); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0]; hipMemcpy(t2xx_pyD, t2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice); hipMemcpy(t2xy_pyD, t2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice); hipMemcpy(t2yy_pyD, t2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice); hipMemcpy(t2yz_pyD, t2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice); hipMemcpy(qt2xx_pyD, qt2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice); hipMemcpy(qt2xy_pyD, qt2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice); hipMemcpy(qt2yy_pyD, qt2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, hipMemcpyHostToDevice); hipMemcpy(qt2yz_pyD, qt2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, hipMemcpyHostToDevice); } hipMemcpy(t2xx_pzD, t2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(t2xz_pzD, t2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(t2yz_pzD, t2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(t2zz_pzD, t2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xx_pzD, qt2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2xz_pzD, qt2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2yz_pzD, qt2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); hipMemcpy(qt2zz_pzD, qt2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); printf("done!\n"); return; } void cpy_h2d_stressInputsC(float *v1x, float *v1y, float *v1z, float *v2x, float *v2y, float *v2z, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for input .............."); hipError_t cudaRes; //for inner_I hipMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); hipMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); hipMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); //for inner_II hipMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); hipMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); hipMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); printf("done!\n"); return; } //===================================================================== void cpy_h2d_velocityOutputsC(float *v1x, float *v1y, float *v1z, float *v2x, float *v2y, float *v2z, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for output ........."); hipError_t cudaRes; //for inner_I cudaRes = hipMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x"); cudaRes = hipMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y"); cudaRes = hipMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z"); //for inner_II cudaRes = hipMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x"); cudaRes = hipMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y"); cudaRes = hipMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z"); printf("done!\n"); return; } //===================================================================== void cpy_d2h_velocityOutputsC(float *v1x, float *v1y, float *v1z, float *v2x, float *v2y, float *v2z, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] d2h cpy for output ........."); hipError_t cudaRes; //for inner_I cudaRes = hipMemcpy(v1x, v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1x"); cudaRes = hipMemcpy(v1y, v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1y"); cudaRes = hipMemcpy(v1z, v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1z"); //for inner_II cudaRes = hipMemcpy(v2x, v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2x"); cudaRes = hipMemcpy(v2y, v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2y"); cudaRes = hipMemcpy(v2z, v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, vzz"); printf("done!\n"); return; } void cpy_h2d_stressOutputsC(float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for output .............."); hipError_t cudaRes; int nth, nti; cudaRes = hipMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xx"); cudaRes = hipMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xy"); cudaRes = hipMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xz"); cudaRes = hipMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yy"); cudaRes = hipMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yz"); cudaRes = hipMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1zz"); //for inner_II cudaRes = hipMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xx"); cudaRes = hipMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xy"); cudaRes = hipMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xz"); cudaRes = hipMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yy"); cudaRes = hipMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yz"); cudaRes = hipMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2zz"); printf("done!\n"); return; } void cpy_d2h_stressOutputsC(float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] stress cpy d2h for output ....."); // printf("\nnxtop=%d, nytop=%d, nztop=%d\n", *nxtop, *nytop, *nztop); // printf("nxbtm=%d, nybtm=%d, nzbtm=%d\n", *nxbtm, *nybtm, *nzbtm); hipError_t cudaRes; cudaRes = hipMemcpy(t1xx, t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xx"); cudaRes = hipMemcpy(t1xy, t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xy"); cudaRes = hipMemcpy(t1xz, t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xz"); cudaRes = hipMemcpy(t1yy, t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yy"); cudaRes = hipMemcpy(t1yz, t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yz"); cudaRes = hipMemcpy(t1zz, t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1zz"); cudaRes = hipMemcpy(t2xx, t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xx"); cudaRes = hipMemcpy(t2xy, t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xy"); cudaRes = hipMemcpy(t2xz, t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xz"); cudaRes = hipMemcpy(t2yy, t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yy"); cudaRes = hipMemcpy(t2yz, t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yz"); cudaRes = hipMemcpy(t2zz, t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), hipMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2zz"); printf("done!\n"); // int i; // for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++) // { // printf("%f ", t2xx[i]); // } // printf("\n"); return; } void free_device_memC(int *lbx, int *lby) { //debug--------------------------------------------------- printf("[CUDA] id = %d, vel, H2D =, %.3f, D2H =, %.3f, comp =, %.3f\n", procID, totalTimeH2DV, totalTimeD2HV, totalTimeCompV); printf("[CUDA] id = %d, str, H2D =, %.3f, D2H =, %.3f, comp =, %.3f\n", procID, totalTimeH2DS, totalTimeD2HS, totalTimeCompS); //------------------------------------------------- hipFree(nd1_velD); hipFree(nd1_txyD); hipFree(nd1_txzD); hipFree(nd1_tyyD); hipFree(nd1_tyzD); hipFree(rhoD); hipFree(drvh1D); hipFree(drti1D); hipFree(drth1D); hipFree(idmat1D); hipFree(dxi1D); hipFree(dyi1D); hipFree(dzi1D); hipFree(dxh1D); hipFree(dyh1D); hipFree(dzh1D); hipFree(t1xxD); hipFree(t1xyD); hipFree(t1xzD); hipFree(t1yyD); hipFree(t1yzD); hipFree(t1zzD); hipFree(v1xD); //output hipFree(v1yD); hipFree(v1zD); if (lbx[1] >= lbx[0]) { hipFree(damp1_xD); hipFree(t1xx_pxD); hipFree(t1xy_pxD); hipFree(t1xz_pxD); hipFree(t1yy_pxD); hipFree(qt1xx_pxD); hipFree(qt1xy_pxD); hipFree(qt1xz_pxD); hipFree(qt1yy_pxD); hipFree(v1x_pxD); hipFree(v1y_pxD); hipFree(v1z_pxD); } if (lby[1] >= lby[0]) { hipFree(damp1_yD); hipFree(t1xx_pyD); hipFree(t1xy_pyD); hipFree(t1yy_pyD); hipFree(t1yz_pyD); hipFree(qt1xx_pyD); hipFree(qt1xy_pyD); hipFree(qt1yy_pyD); hipFree(qt1yz_pyD); hipFree(v1x_pyD); hipFree(v1y_pyD); hipFree(v1z_pyD); } hipFree(qt1xxD); hipFree(qt1xyD); hipFree(qt1xzD); hipFree(qt1yyD); hipFree(qt1yzD); hipFree(qt1zzD); hipFree(clamdaD); hipFree(cmuD); hipFree(epdtD); hipFree(qwpD); hipFree(qwsD); hipFree(qwt1D); hipFree(qwt2D); //------------------------------------- hipFree(nd2_velD); hipFree(nd2_txyD); hipFree(nd2_txzD); hipFree(nd2_tyyD); hipFree(nd2_tyzD); hipFree(drvh2D); hipFree(drti2D); hipFree(drth2D); hipFree(idmat2D); hipFree(damp2_zD); hipFree(dxi2D); hipFree(dyi2D); hipFree(dzi2D); hipFree(dxh2D); hipFree(dyh2D); hipFree(dzh2D); hipFree(t2xxD); hipFree(t2xyD); hipFree(t2xzD); hipFree(t2yyD); hipFree(t2yzD); hipFree(t2zzD); hipFree(qt2xxD); hipFree(qt2xyD); hipFree(qt2xzD); hipFree(qt2yyD); hipFree(qt2yzD); hipFree(qt2zzD); if (lbx[1] >= lbx[0]) { hipFree(damp2_xD); hipFree(t2xx_pxD); hipFree(t2xy_pxD); hipFree(t2xz_pxD); hipFree(t2yy_pxD); hipFree(qt2xx_pxD); hipFree(qt2xy_pxD); hipFree(qt2xz_pxD); hipFree(qt2yy_pxD); hipFree(v2x_pxD); hipFree(v2y_pxD); hipFree(v2z_pxD); } if (lby[1] >= lby[0]) { hipFree(damp2_yD); hipFree(t2xx_pyD); hipFree(t2xy_pyD); hipFree(t2yy_pyD); hipFree(t2yz_pyD); hipFree(qt2xx_pyD); hipFree(qt2xy_pyD); hipFree(qt2yy_pyD); hipFree(qt2yz_pyD); hipFree(v2x_pyD); hipFree(v2y_pyD); hipFree(v2z_pyD); } hipFree(t2xx_pzD); hipFree(t2xz_pzD); hipFree(t2yz_pzD); hipFree(t2zz_pzD); hipFree(qt2xx_pzD); hipFree(qt2xz_pzD); hipFree(qt2yz_pzD); hipFree(qt2zz_pzD); hipFree(v2xD); //output hipFree(v2yD); hipFree(v2zD); hipFree(v2x_pzD); hipFree(v2y_pzD); hipFree(v2z_pzD); printf("[CUDA] memory space is freed.\n"); return; } void compute_velocityC(int *nztop, int *nztm1, float *ca, int *lbx, int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, float *damp1_xM, float *damp1_yM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM, float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM, int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M, int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp, float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM, float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM, float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1, int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml, int *nxbtm, int *nybtm, int *nzbtm, int *myid) { printf("[CUDA] velocity computation:\n"); //difine the dimensions of different kernels int blockSizeX = 8; int blockSizeY = 8; float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM; // extract specific input/output pointers v1xM=(float *) *v1xMp; v1yM=(float *) *v1yMp; v1zM=(float *) *v1zMp; v2xM=(float *) *v2xMp; v2yM=(float *) *v2yMp; v2zM=(float *) *v2zMp; procID = *myid; gettimeofday(&t1, NULL); cpy_h2d_velocityInputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); cpy_h2d_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeH2DV += tmpTime; gettimeofday(&t1, NULL); dim3 dimBlock(blockSizeX, blockSizeY); int gridSizeX1 = (nd1_vel[3] - nd1_vel[2])/blockSizeX + 1; int gridSizeY1 = (nd1_vel[9] - nd1_vel[8])/blockSizeY + 1; dim3 dimGrid1(gridSizeX1, gridSizeY1); // printf("myid = %d, grid1 = (%d, %d)\n", *myid, gridSizeX1, gridSizeY1); //CUDA code hipLaunchKernelGGL(( velocity_inner_IC), dim3(dimGrid1), dim3(dimBlock), 0, 0, *nztop, *nztm1, *ca, nd1_velD, rhoD, idmat1D, dxi1D, dyi1D, dzi1D, dxh1D, dyh1D, dzh1D, t1xxD, t1xyD, t1xzD, t1yyD, t1yzD, t1zzD, *nxtop, //dimension # *nytop, v1xD, //output v1yD, v1zD); int gridSizeX2 = (nd1_vel[5] - nd1_vel[0])/blockSizeX + 1; int gridSizeY2 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid2(gridSizeX2, gridSizeY2); // printf("myid = %d, grid2 = (%d, %d)\n", *myid, gridSizeX2, gridSizeY2); if (lbx[1] >= lbx[0]) { hipLaunchKernelGGL(( vel_PmlX_IC), dim3(dimGrid2), dim3(dimBlock), 0, 0, *ca, lbx[0], lbx[1], nd1_velD, rhoD, drvh1D, drti1D, damp1_xD, idmat1D, dxi1D, dyi1D, dzi1D, dxh1D, dyh1D, dzh1D, t1xxD, t1xyD, t1xzD, t1yyD, t1yzD, t1zzD, *mw1_pml1, //dimension # *mw1_pml, *nxtop, *nytop, *nztop, v1xD, //output v1yD, v1zD, v1x_pxD, v1y_pxD, v1z_pxD); } int gridSizeX3 = (lby[1] - lby[0])/blockSizeX + 1; int gridSizeY3 = (nd1_vel[11] - nd1_vel[6])/blockSizeY + 1; dim3 dimGrid3(gridSizeX3, gridSizeY3); // printf("myid = %d, grid3 = (%d, %d)\n", *myid, gridSizeX3, gridSizeY3); if (lby[1] >= lby[0]) { /*vel_PmlY_IC<<<dimGrid3, dimBlock>>>(*nztop, *ca, lby[0], lby[1], nd1_velD, rhoD, drvh1D, drti1D, idmat1D, damp1_yD, dxi1D, dyi1D, dzi1D, dxh1D, dyh1D, dzh1D, t1xxD, t1xyD, t1xzD, t1yyD, t1yzD, t1zzD, *mw1_pml1, //dimension #s *mw1_pml, *nxtop, *nytop, v1xD, //output v1yD, v1zD, v1x_pyD, v1y_pyD, v1z_pyD); */ } int gridSizeX4 = (nd2_vel[3] - nd2_vel[2])/blockSizeX + 1; int gridSizeY4 = (nd2_vel[9] - nd2_vel[8])/blockSizeY + 1; dim3 dimGrid4(gridSizeX4, gridSizeY4); // printf("myid = %d, grid4 = (%d, %d)\n", *myid, gridSizeX4, gridSizeY4); hipLaunchKernelGGL(( velocity_inner_IIC), dim3(dimGrid4), dim3(dimBlock), 0, 0, *ca, nd2_velD, rhoD, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, idmat2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD); int gridSizeX5 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1; int gridSizeY5 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid5(gridSizeX5, gridSizeY5); // printf("myid = %d, grid5 = (%d, %d)\n", *myid, gridSizeX5, gridSizeY5); if (lbx[1] >= lbx[0]) { hipLaunchKernelGGL(( vel_PmlX_IIC), dim3(dimGrid5), dim3(dimBlock), 0, 0, *nzbm1, *ca, lbx[0], lbx[1], nd2_velD, drvh2D, drti2D, rhoD, damp2_xD, idmat2D, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *mw2_pml1, //dimension #s *mw2_pml, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD, v2x_pxD, v2y_pxD, v2z_pxD); } int gridSizeX6 = (lby[1] - lby[0])/blockSizeX + 1; int gridSizeY6 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1; dim3 dimGrid6(gridSizeX6, gridSizeY6); // printf("myid = %d, grid = (%d, %d)\n", *myid, gridSizeX6, gridSizeY6); if (lby[1] >= lby[0]) { hipLaunchKernelGGL(( vel_PmlY_IIC), dim3(dimGrid6), dim3(dimBlock), 0, 0, *nzbm1, *ca, lby[0], lby[1], nd2_velD, drvh2D, drti2D, rhoD, damp2_yD, idmat2D, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *mw2_pml1, //dimension #s *mw2_pml, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD, v2x_pyD, v2y_pyD, v2z_pyD); } int gridSizeX7 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1; int gridSizeY7 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1; dim3 dimGrid7(gridSizeX7, gridSizeY7); // printf("myid = %d, grid7 = (%d, %d)\n", *myid, gridSizeX7, gridSizeY7); hipLaunchKernelGGL(( vel_PmlZ_IIC), dim3(dimGrid7), dim3(dimBlock), 0, 0, *nzbm1, *ca, nd2_velD, drvh2D, drti2D, rhoD, damp2_zD, idmat2D, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *mw2_pml1, //dimension #s *mw2_pml, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD, v2x_pzD, v2y_pzD, v2z_pzD); hipDeviceSynchronize(); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeCompV += tmpTime; gettimeofday(&t1, NULL); cpy_d2h_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeD2HV += tmpTime; /*int size = (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); write_output(v1xM, size, "OUTPUT_ARRAYS/v1xM.txt"); write_output(v1yM, size, "OUTPUT_ARRAYS/v1yM.txt"); write_output(v1zM, size, "OUTPUT_ARRAYS/v1zM.txt"); size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3); write_output(v2xM, size, "OUTPUT_ARRAYS/v2xM.txt"); write_output(v2yM, size, "OUTPUT_ARRAYS/v2yM.txt"); write_output(v2zM, size, "OUTPUT_ARRAYS/v2zM.txt"); */ return; } #ifdef __cplusplus } #endif __global__ void velocity_inner_IC(int nztop, int nztm1, float ca, int *nd1_vel, float *rhoM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int nxtop, //dimension # int nytop, float *v1xM, //output float *v1yM, float *v1zM) { int i, j, k, k3; float dtxz, dtyz, dtzz; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd1_vel[8]; if (j > nd1_vel[3] || i > nd1_vel[9]) { return; } for (k3 = 1; k3 <= 3; k3++) { k=k3; if(k3==3) k=nztop; if(k==1) { dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j); dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j); dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j) -35./24.*t1zz(k+1,i,j)+ 21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j)); } else if(k==2) { dtxz=dzi1(2,k)*t1xz(2,i,j)+dzi1(3,k)*t1xz(3,i,j)+dzi1(4,k)*t1xz(4,i,j); dtyz=dzi1(2,k)*t1yz(2,i,j)+dzi1(3,k)*t1yz(3,i,j)+dzi1(4,k)*t1yz(4,i,j); dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j) +29./24.*t1zz(k,i,j)- 3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j)); } else { dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j)); dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j)); dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j)); } v1x(k,i,j)=v1x(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))* (dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+ dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+ dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+ dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+dtxz); v1y(k,i,j)=v1y(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))* (dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+ dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+ dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+ dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+dtyz); v1z(k,i,j)=v1z(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))* (dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+ dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+ dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+ dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+dtzz); } for (k = 3; k <=nztm1; k++) { v1x(k,i,j)=v1x(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))* (dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+ dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+ dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+ dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+ dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j)); v1y(k,i,j)=v1y(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))* (dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+ dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+ dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+ dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+ dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j)); v1z(k,i,j)=v1z(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))* (dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+ dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+ dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+ dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+ dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+ dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j)); } return; } //----------------------------------------------------------------------- __global__ void velocity_inner_IIC(float ca, int *nd2_vel, float *rhoM, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, int *idmat2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int nxbtm, //dimension #s int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM) { int i, j, k; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[8]; if (j > nd2_vel[3] || i > nd2_vel[9]) { return; } //for (j = nd2_vel(3); j <= nd2_vel(4); j++) //for (j = nd2_vel[2]; j <= nd2_vel[3]; j++) //{ //for (i = nd2_vel[8]; i <= nd2_vel[9]; i++) //{ k=1; v2x(k,i,j)=v2x(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+ dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+ dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j)); v2y(k,i,j)=v2y(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+ dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+ dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j)); v2z(k,i,j)=v2z(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+ dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+ dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j))); //for (k = 2; k <= nd2_vel(16); k++) for (k = 2; k <= nd2_vel[15]; k++) { v2x(k,i,j)=v2x(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+ dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+ dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j)); v2y(k,i,j)=v2y(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+ dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+ dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j)); v2z(k,i,j)=v2z(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+ dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+ dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+ dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j)); } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlX_IC(float ca, int lbx0, int lbx1, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, float *damp1_xM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int mw1_pml1, //dimension # int mw1_pml, int nxtop, int nytop, int nztop, float *v1xM, //output float *v1yM, float *v1zM, float *v1x_pxM, float *v1y_pxM, float *v1z_pxM) { // !Compute the velocities in region of PML-x-I // use grid_node_comm // use wave_field_comm // implicit NONE int i,j,k,lb,ib,kb; float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz, vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz,dtxy,dtyy,dtzy; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; //int nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml; int nv2x=(lbx1 - lbx0 + 1) * mw1_pml; //if ( lbx(1)>lbx(2) ) return; if (lbx0 > lbx1) { return; } if (j > nd1_vel[5] || lb > lbx1) { return; } //calculate the value of ib ib = 0; for (k = lbx0; k < lb; k++) { for (i = nd1_vel[6+4*k]; i <= nd1_vel[7+4*k]; i++) { ib++; } } //for (j = nd1_vel(1); j <= nd1_vel(6); j++) //for (j = nd1_vel[0]; j <= nd1_vel[5]; j++) //{ //ib=0; //for (lb = lbx(1); lb <= lbx(2); lb++) //for (lb = lbx[0]; lb <= lbx[1]; lb++) //{ kb=0; //for (i = nd1_vel(7+4*lb); i <= nd1_vel(8+4*lb); i++) for (i = nd1_vel[6+4*lb]; i <= nd1_vel[7+4*lb]; i++) { kb=kb+1; ib=ib+1; rth=drvh1(kb,lb); rti=drti1(kb,lb); for (k = 1; k <= nztop; k++) { damp0=damp1_x(k,j,lb); dmpx2=1./(1.+rth*damp0); dmpx1=dmpx2*2.-1.; dmpyz2=1./(1.+rti*damp0); dmpyz1=dmpyz2*2.-1.; ro1=rho(idmat1(k,i,j)); rox=0.5*(ro1+rho(idmat1(k,i+1,j))); roy=0.5*(ro1+rho(idmat1(k,i,j+1))); roz=0.5*(ro1+rho(idmat1(k-1,i,j))); vtmpx=v1x(k,i,j)-v1x_px(k,ib,j); vtmpy=v1y(k,i,j)-v1y_px(k,ib,j); vtmpz=v1z(k,i,j)-v1z_px(k,ib,j); //if(j>nd1_vel(2) && j<nd1_vel(5)) if(j>nd1_vel[1] && j<nd1_vel[4]) { dtxy=dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+ dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1); dtyy=dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+ dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2); dtzy=dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+ dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1); if(k==1) { dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j); dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j); dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+ 21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j)); } else if(k==2) { dtxz=dzi1(2,k)*t1xz(k,i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(2,k)*t1yz(k,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)- 3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j)); } else if(k==nztop) { dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j)); dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j)); dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j)); } else { dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+ dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j); } vtmpx=vtmpx+(dtxy+dtxz)*rox; vtmpy=vtmpy+(dtyy+dtyz)*roy; vtmpz=vtmpz+(dtzy+dtzz)*roz; } v1x_px(k,ib,j)=v1x_px(k,ib,j)*dmpx1+dmpx2*rox* dxi1(2,i)/ca*(t1xx(k,i,j)-t1xx(k,i+1,j)); v1x(k,i,j)=vtmpx+v1x_px(k,ib,j); v1y_px(k,ib,j)=v1y_px(k,ib,j)*dmpyz1+dmpyz2*roy* dxh1(2,i)/ca*(t1xy(k,i-1,j)-t1xy(k,i,j)); v1y(k,i,j)=vtmpy+v1y_px(k,ib,j); v1z_px(k,ib,j)=v1z_px(k,ib,j)*dmpyz1+dmpyz2*roz* dxh1(2,i)/ca*(t1xz(k,i-1,j)-t1xz(k,i,j)); v1z(k,i,j)=vtmpz+v1z_px(k,ib,j); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlY_IC(int nztop, float ca, int lby0, int lby1, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, int *idmat1M, float *damp1_yM, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int mw1_pml1, //dimension #s int mw1_pml, int nxtop, int nytop, float *v1xM, //output float *v1yM, float *v1zM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM) { int i,j,k,lb,jb,kb, jbIni; float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz, dtxz,dtyz,dtzz,vtmpx,vtmpy,vtmpz; //if( lby(1)>lby(2) ) if( lby0>lby1 ) return; lb = blockDim.x * blockIdx.x + threadIdx.x + lby0; i = blockDim.y * blockIdx.y + threadIdx.y + nd1_vel[6]; if (lb > lby1 || i > nd1_vel[11]) { return; } jbIni = 0; for (k = lby0; k < lb; i++) { for (j = nd1_vel[4*k]; j <= nd1_vel[1+4*lb]; j++) { jbIni++; } } jb = jbIni; kb = 0; //for (lb = lby(1); lb <= lby(2); lb++) //for (lb = lby0; lb <= lby1; lb++) //{ // kb=0; // //for (i = nd1_vel(7); i <= nd1_vel(12); i++) // for (i = nd1_vel[6]; i <= nd1_vel[11]; i++) // { //for (j = nd1_vel(1+4*lb); j <= nd1_vel(2+4*lb); j++) for (j = nd1_vel[4*lb]; j <= nd1_vel[1+4*lb]; j++) { kb=kb+1; jb=jb+1; rth=drvh1(kb,lb); rti=drti1(kb,lb); for (k = 1; k <= nztop; k++) { damp0=damp1_y(k,i,lb); dmpy2=1./(1.+rth*damp0); dmpy1=dmpy2*2.-1.; dmpxz2=1./(1.+rti*damp0); dmpxz1=dmpxz2*2.-1.; ro1=rho(idmat1(k,i,j)); rox=0.5*(ro1+rho(idmat1(k,i+1,j))); roy=0.5*(ro1+rho(idmat1(k,i,j+1))); roz=0.5*(ro1+rho(idmat1(k-1,i,j))); if(k==1) { dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j); dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j); dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+ 21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j)); } else if(k==2) { dtxz=dzi1(2,k)*t1xz(k,i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(2,k)*t1yz(k,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)- 3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j)); } else if(k==nztop) { dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j)); dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j)); dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j)); } else { dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+ dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j); } vtmpx=v1x(k,i,j)-v1x_py(k,i,jb)+dtxz*rox; vtmpy=v1y(k,i,j)-v1y_py(k,i,jb)+dtyz*roy; vtmpz=v1z(k,i,j)-v1z_py(k,i,jb)+dtzz*roz; //if(i>nd1_vel(8) && i<nd1_vel(11)) if(i>nd1_vel[7] && i<nd1_vel[10]) { vtmpx=vtmpx+ rox*(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+ dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)); vtmpy=vtmpy+ roy*(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+ dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)); vtmpz=vtmpz+ roz*(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+ dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)); } v1x_py(k,i,jb)=v1x_py(k,i,jb)*dmpxz1+dmpxz2* rox*dyh1(2,j)/ca*(t1xy(k,i,j-1)-t1xy(k,i,j)); v1x(k,i,j)=vtmpx+v1x_py(k,i,jb); v1y_py(k,i,jb)=v1y_py(k,i,jb)*dmpy1+dmpy2* roy*dyi1(2,j)/ca*(t1yy(k,i,j)-t1yy(k,i,j+1)); v1y(k,i,j)=vtmpy+v1y_py(k,i,jb); v1z_py(k,i,jb)=v1z_py(k,i,jb)*dmpxz1+dmpxz2* roz*dyh1(2,j)/ca*(t1yz(k,i,j-1)-t1yz(k,i,j)); v1z(k,i,j)=vtmpz+v1z_py(k,i,jb); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlX_IIC(int nzbm1, float ca, int lbx0, int lbx1, int *nd2_vel, float *drvh2M, float *drti2M, float *rhoM, float *damp2_xM, int *idmat2M, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM, float *v2x_pxM, float *v2y_pxM, float *v2z_pxM) { int i,j,k,lb,ib,kb; float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz, vtmpx,vtmpy,vtmpz,dtxy,dtyy,dtzy,dtxz,dtyz,dtzz; //int nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml; int nv2y = (lbx1 - lbx0 + 1) * mw2_pml; //if ( lbx(1)>lbx(2) ) return; if ( lbx0>lbx1 ) return; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_vel[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i = nd2_vel[6+4*k]; i < nd2_vel[7+4*k]; i++) { ib++; } } //for (j = nd2_vel(1); j <= nd2_vel(6); j++) //for (j = nd2_vel[0]; j <= nd2_vel[5]; j++) //{ //ib=0; //for (lb = lbx(1); lb <= lbx(2); lb++) //for (lb = lbx0; lb <= lbx1; lb++) //{ kb=0; //for (i = nd2_vel(7+4*lb); i <= nd2_vel(8+4*lb); i++) for (i = nd2_vel[6+4*lb]; i <= nd2_vel[7+4*lb]; i++) { kb=kb+1; ib=ib+1; rth=drvh2(kb,lb); rti=drti2(kb,lb); for (k = 1; k <= nzbm1; k++) { damp0=damp2_x(k,j,lb); dmpx2=1./(1.+rth*damp0); dmpx1=dmpx2*2.-1.; dmpyz2=1./(1.+rti*damp0); dmpyz1=dmpyz2*2.-1.; ro1=rho(idmat2(k,i,j)); rox=0.5*(ro1+rho(idmat2(k,i+1,j))); roy=0.5*(ro1+rho(idmat2(k,i,j+1))); roz=0.5*(ro1+rho(idmat2(k-1,i,j))); vtmpx=v2x(k,i,j)-v2x_px(k,ib,j); vtmpy=v2y(k,i,j)-v2y_px(k,ib,j); vtmpz=v2z(k,i,j)-v2z_px(k,ib,j); //if(j>nd2_vel(2) && j<nd2_vel(5)) if(j>nd2_vel[1] && j<nd2_vel[4]) { dtxy=dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1); dtyy=dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2); dtzy=dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1); if(k==1) { dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j)); dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j)); dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)); } //else if(k<nd2_vel(17)) else if(k<nd2_vel[16]) { dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j); dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j); dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+ dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j); } else { dtxz=0.0; dtyz=0.0; dtzz=0.0; } vtmpx=vtmpx+(dtxy+dtxz)*rox; vtmpy=vtmpy+(dtyy+dtyz)*roy; vtmpz=vtmpz+(dtzy+dtzz)*roz; } v2x_px(k,ib,j)=v2x_px(k,ib,j)*dmpx1+dmpx2* rox*dxi2(2,i)/ca*(t2xx(k,i,j)-t2xx(k,i+1,j)); v2x(k,i,j)=vtmpx+v2x_px(k,ib,j); v2y_px(k,ib,j)=v2y_px(k,ib,j)*dmpyz1+dmpyz2* roy*dxh2(2,i)/ca*(t2xy(k,i-1,j)-t2xy(k,i,j)); v2y(k,i,j)=vtmpy+v2y_px(k,ib,j); v2z_px(k,ib,j)=v2z_px(k,ib,j)*dmpyz1+dmpyz2* roz*dxh2(2,i)/ca*(t2xz(k,i-1,j)-t2xz(k,i,j)); v2z(k,i,j)=vtmpz+v2z_px(k,ib,j); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlY_IIC(int nzbm1, float ca, int lby0, int lby1, int *nd2_vel, float *drvh2M, float *drti2M, float *rhoM, float *damp2_yM, int *idmat2M, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int mw2_pml1, int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM, float *v2x_pyM, float *v2y_pyM, float *v2z_pyM) { int i,j,k,lb,jb,kb, jbIni; float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz, vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz; //if( lby(1)>lby(2) ) return; if( lby0>lby1 ) { return; } lb = blockIdx.x * blockDim.x + threadIdx.x + lby0; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6]; if (lb > lby1 || i > nd2_vel[11]) { return; } jbIni = 0; for (j = lby0; j < lb; j++) { for (k = nd2_vel[4*j]; k <= nd2_vel[1+4*j]; k++) { jbIni++; } } jb = jbIni; kb = 0; //for (lb = lby(1); lb <= lby(2); lb++) //for (lb = lby0; lb <= lby1; lb++) //{ //kb=0; //for (i = nd2_vel(7); i <= nd2_vel(12); i++) //for (i = nd2_vel[6]; i <= nd2_vel[11]; i++) //{ //for (j = nd2_vel(1+4*lb); j <= nd2_vel(2+4*lb); j++) for (j = nd2_vel[4*lb]; j <= nd2_vel[1+4*lb]; j++) { kb=kb+1; jb=jb+1; rth=drvh2(kb,lb); rti=drti2(kb,lb); for (k = 1; k <= nzbm1; k++) { damp0=damp2_y(k,i,lb); dmpy2=1./(1.+rth*damp0); dmpy1=dmpy2*2.-1.0; dmpxz2=1./(1.+rti*damp0); dmpxz1=dmpxz2*2.-1.; ro1=rho(idmat2(k,i,j)); rox=0.5*(ro1+rho(idmat2(k,i+1,j))); roy=0.5*(ro1+rho(idmat2(k,i,j+1))); roz=0.5*(ro1+rho(idmat2(k-1,i,j))); vtmpx=v2x(k,i,j)-v2x_py(k,i,jb); vtmpy=v2y(k,i,j)-v2y_py(k,i,jb); vtmpz=v2z(k,i,j)-v2z_py(k,i,jb); //if(k<nd2_vel(17)) if(k<nd2_vel[16]) { if(k>1) { dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j); dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j); dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+ dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j); } else { dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j)); dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j)); dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)); } //if(i>nd2_vel(8) && i<nd2_vel(11)) if(i>nd2_vel[7] && i<nd2_vel[10]) { vtmpx=vtmpx+rox*(dtxz+ dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)); vtmpy=vtmpy+roy*(dtyz+ dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)); vtmpz=vtmpz+roz*(dtzz+ dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)); } else { vtmpx=vtmpx+rox*dtxz; vtmpy=vtmpy+roy*dtyz; vtmpz=vtmpz+roz*dtzz; } } else { //if(i>nd2_vel(8) && i<nd2_vel(11)) if(i>nd2_vel[7] && i<nd2_vel[10]) { vtmpx=vtmpx+rox* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)); vtmpy=vtmpy+ roy* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)); vtmpz=vtmpz+ roz* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)); } } v2x_py(k,i,jb)=v2x_py(k,i,jb)*dmpxz1+dmpxz2*rox* dyh2(2,j)/ca*(t2xy(k,i,j-1)-t2xy(k,i,j)); v2x(k,i,j)=vtmpx+v2x_py(k,i,jb); v2y_py(k,i,jb)=v2y_py(k,i,jb)*dmpy1+dmpy2*roy* dyi2(2,j)/ca*(t2yy(k,i,j)-t2yy(k,i,j+1)); v2y(k,i,j)=vtmpy+v2y_py(k,i,jb); v2z_py(k,i,jb)=v2z_py(k,i,jb)*dmpxz1+dmpxz2*roz* dyh2(2,j)/ca*(t2yz(k,i,j-1)-t2yz(k,i,j)); v2z(k,i,j)=vtmpz+v2z_py(k,i,jb); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlZ_IIC(int nzbm1, float ca, int *nd2_vel, float *drvh2M, float *drti2M, float *rhoM, float *damp2_zM, int *idmat2M, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM, float *v2x_pzM, float *v2y_pzM, float *v2z_pzM) { int i,j,k,kb; float damp0,dmpz2,dmpz1,dmpxy2,dmpxy1,ro1,rox,roy,roz,vtmpx,vtmpy,vtmpz; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6]; if (j > nd2_vel[5] || i > nd2_vel[11]) { return; } //for (j = nd2_vel(1); j <= nd2_vel(6); j++) //for (j = nd2_vel[0]; j <= nd2_vel[5]; j++) //{ //for (i = nd2_vel(7); i <= nd2_vel(12); i++) //for (i = nd2_vel[6]; i <= nd2_vel[11]; i++) //{ kb=0; damp0=damp2_z(i,j); //for (k = nd2_vel(17); k <= nzbm1; k++) for (k = nd2_vel[16]; k <= nzbm1; k++) { kb=kb+1; dmpz2=1./(1.+damp0*drti2(kb,1)); dmpz1=dmpz2*2.-1.; dmpxy2=1./(1.+damp0*drvh2(kb,1)); dmpxy1=dmpxy2*2.-1.; ro1=rho(idmat2(k,i,j)); rox=0.5*(ro1+rho(idmat2(k,i+1,j))); roy=0.5*(ro1+rho(idmat2(k,i,j+1))); roz=0.5*(ro1+rho(idmat2(k-1,i,j))); vtmpx=v2x(k,i,j)-v2x_pz(kb,i,j); vtmpy=v2y(k,i,j)-v2y_pz(kb,i,j); vtmpz=v2z(k,i,j)-v2z_pz(kb,i,j); //if(j>nd2_vel(2) && j<nd2_vel(5) && // i>nd2_vel(8) && i<nd2_vel(11)) if(j>nd2_vel[1] && j<nd2_vel[4] && i>nd2_vel[7] && i<nd2_vel[10]) { vtmpx=vtmpx+rox* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+ dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)); vtmpy=vtmpy+roy* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+ dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)); vtmpz=vtmpz+roz* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+ dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)); } v2x_pz(kb,i,j)=v2x_pz(kb,i,j)*dmpxy1+dmpxy2*rox* dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j)); v2x(k,i,j)=vtmpx+v2x_pz(kb,i,j); v2y_pz(kb,i,j)=v2y_pz(kb,i,j)*dmpxy1+dmpxy2*roy* dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j)); v2y(k,i,j)=vtmpy+v2y_pz(kb,i,j); v2z_pz(kb,i,j)=v2z_pz(kb,i,j)*dmpz1+dmpz2*roz* dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)); v2z(k,i,j)=vtmpz+v2z_pz(kb,i,j); } //} //} return; } //stress computation---------------------------------------------- __global__ void stress_norm_xy_IC(int nxb1, int nyb1, int nxtop, int nztop, int *nd1_tyy, int *idmat1M, float ca, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1yyM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1yyM, float *qt1zzM, float *v1xM, float *v1yM, float *v1zM) { int i,j,k,jkq,kodd,inod,irw; float sxx,syy,szz,sxy,qxx,qyy,qzz,qxy,cusxy,sss,cl,sm2,pm,et,et1,wtp,wts; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyy[8]; if (j > nd1_tyy[3] || i > nd1_tyy[9]) { return; } // for (j = nd1_tyy[2]; j <= nd1_tyy[3]; j++) // { kodd = 2 * ((j + nyb1) & 1) + 1; // for (i = nd1_tyy[8]; i <= nd1_tyy[9]; i++) // { jkq=((i+nxb1) & 1) + kodd; for (k = nd1_tyy[12]; k <= nd1_tyy[17]; k++) { sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+ dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j); syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+ dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1); sxy=dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i, j)+ dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j)+ dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j )+ dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2); if(k==1) { szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)- 9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.0; } else if(k==nztop) { szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j)); } else { szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+ dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j); } inod=idmat1(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; cusxy=sxy/(1./sm2+.5/cmu(idmat1(k,i+1,j+1))); sss=sxx+syy+szz; irw=jkq+4*(k&1); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxx=qt1xx(k,i,j); qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1; t1xx(k,i,j)=t1xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j); qyy=qt1yy(k,i,j); qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1; t1yy(k,i,j)=t1yy(k,i,j)+sm2*syy+cl*sss-qyy-qt1yy(k,i,j); qzz=qt1zz(k,i,j); qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1; t1zz(k,i,j)=t1zz(k,i,j)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j); qxy=qt1xy(k,i,j); qt1xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1; t1xy(k,i,j)=t1xy(k,i,j)+cusxy-qxy-qt1xy(k,i,j); } // } // } return; } //----------------------------------------------------------------------------- __global__ void stress_xz_yz_IC(int nxb1, int nyb1, int nxtop, int nytop, int nztop, int *nd1_tyz, int *idmat1M, float ca, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dyi1M, float *dzh1M, float *v1xM, float *v1yM, float *v1zM, float *t1xzM, float *t1yzM, float *qt1xzM, float *qt1yzM) // Compute stress-XZand YZ component in Region I // use grid_node_comm // use wave_field_comm // implicit NONE // real, parameter:: tfr1=-577./528./ca,tfr2=201./176./ca, & // tfr3=-9./176./ca, tfr4=1./528./ca { // float tfr1 = -577./528./ca; // float tfr2 = 201./176./ca; // float tfr3 = -9./176./ca; // float tfr4=1./528./ca; int i,j,k,kodd,inod,jkq,irw; float dvzx,dvzy,dvxz,dvyz,sm,cusxz,cusyz,et,et1,dmws,qxz,qyz; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyz[8]; if (j > nd1_tyz[3] || i > nd1_tyz[9]) { return; } // for (j=nd1_tyz[2]; j <=nd1_tyz[3]; j++) // //do j=nd1_tyz(3),nd1_tyz(4) // { //kodd=2*mod(j+nyb1,2)+1 kodd=2*((j+nyb1)&1)+1; // for (i=nd1_tyz[8]; i<=nd1_tyz[9]; i++) // //do i=nd1_tyz(9),nd1_tyz(10) // { //jkq=mod(i+nxb1,2)+kodd jkq=((i+nxb1)&1)+kodd; for (k=nd1_tyz[12]; k<=nd1_tyz[17]; k++) //do k=nd1_tyz(13),nd1_tyz(18) { dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+ dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j); dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+ dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2); if(k<nztop) { dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+ dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j); dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+ dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j); } else { dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j)); dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j)); } inod=idmat1(k,i,j); sm=cmu(inod); cusxz=(dvzx+dvxz)/(.5/sm+.5/cmu(idmat1(k-1,i+1,j))); cusyz=(dvzy+dvyz)/(.5/sm+.5/cmu(idmat1(k-1,i,j+1))); //irw=jkq+4*mod(k,2); irw=jkq+4*(k&1); et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxz=qt1xz(k,i,j); qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1; t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j); qyz=qt1yz(k,i,j); qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1; t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j); } // } // } return; } __global__ void stress_resetVars(int ny1p1, int nx1p1, int nxtop, int nytop, int nztop, float *t1xzM, float *t1yzM) { int i, j; j = blockIdx.x * blockDim.x + threadIdx.x - 1; i = blockIdx.y * blockDim.y + threadIdx.y + 1; if (j <= ny1p1 && i <= nxtop) { t1yz(1, i, j) = 0.0f; } // for (j=-1; j<=ny1p1; j++) // { // for (i = 1; i <= nxtop; i++) // { // t1yz(1,i,j)=0.0; // } // } j = j + 2; i = i - 2; if (j <= nytop && i <= nx1p1) { t1xz(1, i, j) = 0.0; } // for (j=1; j <= nytop; j++) // { // for (i=-1; i <=nx1p1; i++) // { // t1xz(1,i,j)=0.0; // } // } return; } //------------------------------------------------------------------------------------ __global__ void stress_norm_PmlX_IC(int nxb1, int nyb1, int nxtop, int nytop, int nztop, int mw1_pml, int mw1_pml1, int lbx0, int lbx1, int *nd1_tyy, int *idmat1M, float ca, float *drti1M, float *damp1_xM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dzi1M, float *dxh1M, float *dyh1M, float *v1xM, float *v1yM, float *v1zM, float *t1xxM, float *t1yyM, float *t1zzM, float *t1xx_pxM, float *t1yy_pxM, float *qt1xxM, float *qt1yyM, float *qt1zzM, float *qt1xx_pxM, float *qt1yy_pxM) // Compute the velocity of PML-x-I region // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw // real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; int nti; //if (lbx[0] > lbx[1]) return; //if ( lbx(1)>lbx(2) ) return; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_tyy[5] || lb > lbx1) { return; } nti = (lbx1 - lbx0 + 1) * mw1_pml + lbx0; // for (j=nd1_tyy[0]; j <= nd1_tyy[5]; j++) // //do j=nd1_tyy(1),nd1_tyy(6) // { kodd=2*((j+nyb1)&1)+1; ib=0; for (k = lbx0; k < lb; k++) { ib++; } // for (lb=lbx[0]; lb <=lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i = nd1_tyy[6+4*lb]; i <= nd1_tyy[7+4*lb]; i++) //do i=nd1_tyy(7+4*lb),nd1_tyy(8+4*lb) { kb=kb+1; ib=ib+1; rti=drti1(kb,lb); jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++) //do k=nd1_tyy(13),nd1_tyy(18) { damp2=1./(1.+damp1_x(k,j,lb)*rti); damp1=damp2*2.0-1.; inod=idmat1(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t1xx(k,i,j)-t1xx_px(k,ib,j); taoyy=t1yy(k,i,j)-t1yy_px(k,ib,j); taozz=t1zz(k,i,j)-t1yy_px(k,ib,j); if(j>nd1_tyy[1] && j<nd1_tyy[4]) { //if(j>nd1_tyy(2) .and. j<nd1_tyy(5)) { syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+ dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1); if(k==1) { szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)- 9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.; } else if(k==nztop) { szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j)); } else { szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+ dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j); } sss=syy+szz; qxx=qt1xx(k,i,j); qt1xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1; taoxx=taoxx+cl*sss-qxx-qt1xx(k,i,j); qyy=qt1yy(k,i,j); qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1; taoyy=taoyy+sm2*syy+cl*sss-qyy-qt1yy(k,i,j); qzz=qt1zz(k,i,j); qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1; taozz=taozz+sm2*szz+cl*sss-qzz-qt1zz(k,i,j); } sxx=dxh1(2,i)/ca*(v1x(k,i-1,j)-v1x(k,i,j)); qxx=qt1xx_px(k,ib,j); qt1xx_px(k,ib,j)=qxx*et+wtp*sxx*et1; t1xx_px(k,ib,j)=damp1*t1xx_px(k,ib,j)+ damp2*(pm*sxx-qxx-qt1xx_px(k,ib,j)); t1xx(k,i,j)=taoxx+t1xx_px(k,ib,j); qyy=qt1yy_px(k,ib,j); qt1yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1; t1yy_px(k,ib,j)=damp1*t1yy_px(k,ib,j)+ damp2*(cl*sxx-qyy-qt1yy_px(k,ib,j)); t1yy(k,i,j)=taoyy+t1yy_px(k,ib,j); t1zz(k,i,j)=taozz+t1yy_px(k,ib,j); } } // } // } return; } __global__ void stress_norm_PmlY_IC(int nxb1, int nyb1, int mw1_pml1, int nxtop, int nztop, int lby0, int lby1, int *nd1_tyy, int *idmat1M, float ca, float *drti1M, float *damp1_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dzi1M, float *t1xxM, float *t1yyM, float *t1zzM, float *qt1xxM, float *qt1yyM, float *qt1zzM, float *t1xx_pyM, float *t1yy_pyM, float *qt1xx_pyM, float *qt1yy_pyM, float *v1xM, float *v1yM, float *v1zM) // Compute the velocity of PML-x-I region // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw // real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; //if(lby[0]>lby[1]) return; //if(lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_tyy[11] || lb > lby1) { return; } // for (i = nd1_tyy[6]; i <= nd1_tyy[11]; i++) // //do i=nd1_tyy(7),nd1_tyy(12) // { jb = 0; for (k = 0; k < lb; k++) { for (j = nd1_tyy[4*k]; j <= nd1_tyy[1+4*k]; j++) { jb++; } } // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j = nd1_tyy[4*lb]; j <= nd1_tyy[1+4*lb]; j++) //do j=nd1_tyy(1+4*lb),nd1_tyy(2+4*lb) { kb=kb+1; jb=jb+1; rti=drti1(kb,lb); kodd=2 * ((j + nyb1) & 1) + 1; //kodd=2*mod(j+nyb1,2)+1 jkq = ((i + nxb1) & 1) + kodd; //jkq=mod(i+nxb1,2)+kodd for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++) //do k=nd1_tyy(13),nd1_tyy(18) { damp2=1./(1.+damp1_y(k,i,lb)*rti); damp1=damp2*2.-1.; inod=idmat1(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; //irw=jkq+4*mod(k,2) irw=jkq + 4 * (k & 1); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); if (i>nd1_tyy[7] && i<nd1_tyy[10]) { //if(i>nd1_tyy(8) .and. i<nd1_tyy(11)) then sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+ dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j); } else { sxx=0.0; } if(k==1) { szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)- 9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.; } else if(k==nztop) { szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j)); } else { szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+ dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j); } sss=sxx+szz; qxx=qt1xx(k,i,j); qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1; taoxx=t1xx(k,i,j)-t1xx_py(k,i,jb)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j); qyy=qt1yy(k,i,j); qt1yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1; taoyy=t1yy(k,i,j)-t1yy_py(k,i,jb)+cl*sss-qyy-qt1yy(k,i,j); qzz=qt1zz(k,i,j); qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1; taozz=t1zz(k,i,j)-t1xx_py(k,i,jb)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j); syy=dyh1(2,j)/ca*(v1y(k,i,j-1)-v1y(k,i,j)); qxx=qt1xx_py(k,i,jb); qt1xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1; t1xx_py(k,i,jb)=damp1*t1xx_py(k,i,jb)+ damp2*(cl*syy-qxx-qt1xx_py(k,i,jb)); t1xx(k,i,j)=taoxx+t1xx_py(k,i,jb); t1zz(k,i,j)=taozz+t1xx_py(k,i,jb); qyy=qt1yy_py(k,i,jb); qt1yy_py(k,i,jb)=qyy*et+wtp*syy*et1; t1yy_py(k,i,jb)=damp1*t1yy_py(k,i,jb)+ damp2*(pm*syy-qyy-qt1yy_py(k,i,jb)); t1yy(k,i,j)=taoyy+t1yy_py(k,i,jb); } } // } // } return; } __global__ void stress_xy_PmlX_IC(int nxb1, int nyb1, int mw1_pml, int mw1_pml1, int nxtop, int nytop, int nztop, int lbx0, int lbx1, int *nd1_txy, int *idmat1M, float ca, float *drth1M, float *damp1_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dyi1M, float *t1xyM, float *qt1xyM, float *t1xy_pxM, float *qt1xy_pxM, float *v1xM, float *v1yM) // Compute the Stress-xy at region of PML-x-I // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw // real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1; int nth; nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_txy[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i = nd1_txy[6+4*k]; i <= nd1_txy[7+4*k]; i++) { ib++; } } //if (lbx[0] > lbx[1]) return; //if ( lbx(1)>lbx(2) ) return // for (j = nd1_txy[0]; j <= nd1_txy[5]; j++) // //do j=nd1_txy(1),nd1_txy(6) // { kodd = 2 * ((j + nyb1) & 1) + 1; //kodd=2*mod(j+nyb1,2)+1 // ib=0; // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i = nd1_txy[6+4*lb]; i <= nd1_txy[7+4*lb]; i++) //do i=nd1_txy(7+4*lb),nd1_txy(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth1(kb,lb); jkq=((i + nxb1) & 1) + kodd; //jkq=mod(i+nxb1,2)+kodd; for (k = nd1_txy[12]; k <= nd1_txy[17]; k++) //do k=nd1_txy(13),nd1_txy(18) { damp2=1./(1.+damp1_x(k,j,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1))); irw=jkq + 4 * (k & 1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t1xy(k,i,j)-t1xy_px(k,ib,j); if(j > nd1_txy[1] && j<nd1_txy[4]) { //if(j>nd1_txy(2) .and. j<nd1_txy(5)) then cusxy=(dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j)+ dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2))*sm; qxy=qt1xy(k,i,j); qt1xy(k,i,j)=qxy*et+dmws*cusxy*et1; taoxy=taoxy+cusxy-qxy-qt1xy(k,i,j); } cusxy=sm*dxi1(2,i)/ca*(v1y(k,i,j)-v1y(k,i+1,j)); qxy=qt1xy_px(k,ib,j); qt1xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1; t1xy_px(k,ib,j)=damp1*t1xy_px(k,ib,j)+ damp2*(cusxy-qxy-qt1xy_px(k,ib,j)); t1xy(k,i,j)=taoxy+t1xy_px(k,ib,j); } } // } // } return; } __global__ void stress_xy_PmlY_IC(int nxb1, int nyb1, int mw1_pml1, int nxtop, int nztop, int lby0, int lby1, int *nd1_txy, int *idmat1M, float ca, float *drth1M, float *damp1_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dyi1M, float *t1xyM, float *qt1xyM, float *t1xy_pyM, float *qt1xy_pyM, float *v1xM, float *v1yM) //Compute the Stress-xy at region of PML-y-I //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_txy[11] || lb > lby1) { return; } // for (i = nd1_txy[6]; i <= nd1_txy[11]; i++) // //do i=nd1_txy(7),nd1_txy(12) // { jb=0; for (k = lby0; k < lb; k++) { for (j = nd1_txy[4*k]; j <= nd1_txy[1 + 4 * k]; j++) { kb++; } } // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1), lby(2) // { kb=0; for (j = nd1_txy[4*lb]; j <= nd1_txy[1 + 4 * lb]; j++) //do j=nd1_txy(1+4*lb),nd1_txy(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth1(kb,lb); kodd=2 * ((j + nyb1) & 1) + 1; //kodd=2*mod(j+nyb1,2)+1; jkq=((i + nxb1) & 1) + kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_txy[12]; k <= nd1_txy[17]; k++) //do k=nd1_txy(13),nd1_txy(18) { damp2=1./(1.+damp1_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t1xy(k,i,j)-t1xy_py(k,i,jb); if(i > nd1_txy[7] && i<nd1_txy[10]) { //if(i>nd1_txy(8) .and. i<nd1_txy(11)) then cusyx=(dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i,j)+ dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j))*sm; qxy=qt1xy(k,i,j); qt1xy(k,i,j)=qxy*et+dmws*cusyx*et1; taoxy=taoxy+cusyx-qxy-qt1xy(k,i,j); } cusyx=sm*dyi1(2,j)/ca*(v1x(k,i,j)-v1x(k,i,j+1)); qxy=qt1xy_py(k,i,jb); qt1xy_py(k,i,jb)=qxy*et+dmws*cusyx*et1; t1xy_py(k,i,jb)=damp1*t1xy_py(k,i,jb)+ damp2*(cusyx-qxy-qt1xy_py(k,i,jb)); t1xy(k,i,j)=taoxy+t1xy_py(k,i,jb); } } // } // } return; } __global__ void stress_xz_PmlX_IC(int nxb1, int nyb1, int nxtop, int nytop, int nztop, int mw1_pml, int mw1_pml1, int lbx0, int lbx1, int *nd1_txz, int *idmat1M, float ca, float *drth1M, float *damp1_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dzh1M, float *t1xzM, float *qt1xzM, float *t1xz_pxM, float *qt1xz_pxM, float *v1xM, float *v1zM) //Compute the stress-xz at PML-x-I region //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1; int nth; //if (lbx[0] > lbx[1]) return; //if ( lbx(1)>lbx(2) ) return nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_txz[5] || lb > lbx1) { return; } // for (j = nd1_txz[0]; j <= nd1_txz[5]; j++) // //do j=nd1_txz(1),nd1_txz(6) // { kodd=2 * ((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1 ib=0; for (k = lbx0; k < lb; k++) { for (i = nd1_txz[6+4*k]; i <= nd1_txz[7+4*k]; i++) { ib++; } } // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i = nd1_txz[6+4*lb]; i <= nd1_txz[7+4*lb]; i++) //do i=nd1_txz(7+4*lb),nd1_txz(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth1(kb,lb); jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_txz[12]; k <= nd1_txz[17]; k++) //do k=nd1_txz(13),nd1_txz(18) { damp2=1./(1.+damp1_x(k,j,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); if(k<nztop) { dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+ dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j); } else { dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j)); } cusxz=dvxz*sm; qxz=qt1xz(k,i,j); qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1; taoxz=t1xz(k,i,j)-t1xz_px(k,ib,j)+cusxz-qxz-qt1xz(k,i,j); cusxz=sm*dxi1(2,i)/ca*(v1z(k,i,j)-v1z(k,i+1,j)); qxz=qt1xz_px(k,ib,j); qt1xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1; t1xz_px(k,ib,j)=damp1*t1xz_px(k,ib,j)+ damp2*(cusxz-qxz-qt1xz_px(k,ib,j)); t1xz(k,i,j)=taoxz+t1xz_px(k,ib,j); } } // } // } return; } __global__ void stress_xz_PmlY_IC(int nxb1, int nyb1, int nxtop, int nztop, int lby0, int lby1, int *nd1_txz, int *idmat1M, float ca, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dzh1M, float *t1xzM, float *qt1xzM, float *v1xM, float *v1zM) //Compute the stress-xz at PML-y-I region //use grid_node_comm //use wave_field_comm //implicit NONE { int i,j,k,lb,kodd,jkq,inod,irw; float cusxz,dvxz,dvzx,qxz,sm,dmws,et,et1; //if (lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[8]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_txz[9] || lb > lby1) { return; } // for (i = nd1_txz[8]; i <= nd1_txz[9]; i++) // //do i=nd1_txz(9),nd1_txz(10) // { // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { for (j = nd1_txz[4*lb]; j <= nd1_txz[1+4*lb]; j++) //do j=nd1_txz(1+4*lb),nd1_txz(2+4*lb) { kodd=2 * ((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1 jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_txz[12]; k <= nd1_txz[17]; k++) //do k=nd1_txz(13),nd1_txz(18) { inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+ dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j); if(k<nztop) { dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+ dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j); } else { dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j)); } cusxz=(dvzx+dvxz)*sm; qxz=qt1xz(k,i,j); qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1; t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j); } } // } // } return; } __global__ void stress_yz_PmlX_IC(int nxb1, int nyb1, int nztop, int nxtop, int lbx0, int lbx1, int *nd1_tyz, int *idmat1M, float ca, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi1M, float *dzh1M, float *t1yzM, float *qt1yzM, float *v1yM, float *v1zM) //Compute the stress-yz at PML-x-I region //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1; //if(lbx[0] > lbx[1]) return; //if(lbx(1)>lbx(2) ) return j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_tyz[3] || lb > lbx1) { return; } // for (j = nd1_tyz[2]; j <= nd1_tyz[3]; j++) // //do j=nd1_tyz(3),nd1_tyz(4) // { kodd=2 * ((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1 // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { for (i = nd1_tyz[6+4*lb]; i <= nd1_tyz[7+4*lb]; i++) //do i=nd1_tyz(7+4*lb),nd1_tyz(8+4*lb) { jkq = ((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_tyz[12]; k <= nd1_tyz[17]; k++) //do k=nd1_tyz(13),nd1_tyz(18) { inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+ dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2); if(k<nztop) { dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+ dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j); } else { dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j)); } cusyz=(dvzy+dvyz)*sm; qyz=qt1yz(k,i,j); qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1; t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j); } } // } // } return; } __global__ void stress_yz_PmlY_IC(int nxb1, int nyb1, int mw1_pml1, int nxtop, int nztop, int lby0, int lby1, int *nd1_tyz, int *idmat1M, float ca, float *drth1M, float *damp1_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi1M, float *dzh1M, float *t1yzM, float *qt1yzM, float *t1yz_pyM, float *qt1yz_pyM, float *v1yM, float *v1zM) //Compute the stress-yz at PML-y-I region //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_tyz[11] || lb > lby1) { return; } // for (i = nd1_tyz[6]; i <= nd1_tyz[11]; i++) // //do i=nd1_tyz(7),nd1_tyz(12) // { jb=0; for (k = lby0; k < lb; k++) { for (j = nd1_tyz[4*k]; j <= nd1_tyz[1+4*k]; j++) { jb++; } } // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j = nd1_tyz[4*lb]; j <= nd1_tyz[1+4*lb]; j++) //do j=nd1_tyz(1+4*lb),nd1_tyz(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth1(kb,lb); kodd=2*((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1; jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k=nd1_tyz[12]; k <= nd1_tyz[17]; k++) //do k=nd1_tyz(13),nd1_tyz(18) { damp2=1./(1.+damp1_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); if(k<nztop) { dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+ dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j); } else { dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j)); } cusyz=dvyz*sm; qyz=qt1yz(k,i,j); qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1; taoyz=t1yz(k,i,j)-t1yz_py(k,i,jb)+cusyz-qyz-qt1yz(k,i,j); cusyz=sm*dyi1(2,j)/ca*(v1z(k,i,j)-v1z(k,i,j+1)); qyz=qt1yz_py(k,i,jb); qt1yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1; t1yz_py(k,i,jb)=damp1*t1yz_py(k,i,jb)+ damp2*(cusyz-qyz-qt1yz_py(k,i,jb)); t1yz(k,i,j)=taoyz+t1yz_py(k,i,jb); } } // } // } return; } __global__ void stress_norm_xy_II(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int *nd2_tyy, int *idmat2M, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *t2xxM, float *t2xyM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2xyM, float *qt2yyM, float *qt2zzM, float *dxh2M, float *dyh2M, float *dxi2M, float *dyi2M, float *dzi2M, float *v2xM, float *v2yM, float *v2zM) // Compute stress-Norm and XY component in Region II // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,kodd,inod,jkq,irw // real:: sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy, & // cl,sm2,et,et1,dmws,pm,wtp,wts { int i,j,k,kodd,inod,jkq,irw; float sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy,cl,sm2,et,et1,dmws,pm,wtp,wts; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[8]; if (j > nd2_tyy[3] || i > nd2_tyy[9]) { return; } // for (j=nd2_tyy[2]; j <= nd2_tyy[3]; j++) // //do j=nd2_tyy(3),nd2_tyy(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_tyy[8]; i <= nd2_tyy[9]; i++) // //do i=nd2_tyy(9),nd2_tyy(10) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_tyy[12]; k <= nd2_tyy[15]; k++) //do k=nd2_tyy(13),nd2_tyy(16) { sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+ dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1); sxy=dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+ dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+ dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+ dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2); szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+ dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j); sss=sxx+syy+szz; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; cusxy=sxy/(1./sm2+.5/cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1; t2xx(k,i,j)=t2xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1; t2yy(k,i,j)=t2yy(k,i,j)+sm2*syy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1; t2zz(k,i,j)=t2zz(k,i,j)+sm2*szz+cl*sss-qzz-qt2zz(k,i,j); qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1; t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j); } // } // } return; } //call stress_xz_yz_II __global__ void stress_xz_yz_IIC(int nxb2, int nyb2, int nztop, int nxbtm, int nzbtm, int *nd2_tyz, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *dzh2M, float *t2xzM, float *t2yzM, float *qt2xzM, float *qt2yzM, float *v2xM, float *v2yM, float *v2zM) //Compute stress-XZ and YZ component in the Region II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,kodd,inod,jkq,irw //real:: qxz,qyz,cusxz,cusyz,sm,et,et1,dmws { int i,j,k,kodd,inod,jkq,irw; float qxz,qyz,cusxz,cusyz,sm,et,et1,dmws; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[8]; if (j > nd2_tyz[3] || i > nd2_tyz[9]) { return; } // for (j = nd2_tyz[2]; j <= nd2_tyz[3]; j++) // //do j=nd2_tyz(3),nd2_tyz(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_tyz[8]; i <= nd2_tyz[9]; i++) // //do i=nd2_tyz(9),nd2_tyz(10) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k=nd2_tyz[12]; k <= nd2_tyz[15]; k++) //do k=nd2_tyz(13),nd2_tyz(16) { inod=idmat2(k,i,j); sm=cmu(inod); cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+ dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j)+ dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+ dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j))/ (.5/sm+.5/cmu(idmat2(k-1,i+1,j))); cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+ dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+ dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+ dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/ (.5/sm+.5/cmu(idmat2(k-1,i,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j); qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j); } // } // } return; } //call stress_norm_PmlX_II __global__ void stress_norm_PmlX_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nztop, int nxbtm, int nybtm, int nzbtm, int lbx0, int lbx1, int *nd2_tyy, int *idmat2M, float ca, float *drti2M, float *damp2_xM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh2M, float *dyh2M, float *dzi2M, float *t2xxM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2yyM, float *qt2zzM, float *t2xx_pxM, float *t2yy_pxM, float *qt2xx_pxM, float *qt2yy_pxM, float *v2xM, float *v2yM, float *v2zM) //Compute the Stress-norm at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; int nti; //if(lbx[0] > lbx[1]) return; //if( lbx(1)>lbx(2) ) return nti = (lbx1 - lbx0 + 1) * mw2_pml + lbx1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_tyy[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i=nd2_tyy[6+4*k]; i <= nd2_tyy[7+4*k]; i++) { ib++; } } // for (j=nd2_tyy[0]; j <= nd2_tyy[5]; j++) // //do j=nd2_tyy(1),nd2_tyy(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // ib=0; // for (lb=lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i=nd2_tyy[6+4*lb]; i <= nd2_tyy[7+4*lb]; i++) //do i=nd2_tyy(7+4*lb),nd2_tyy(8+4*lb) { kb=kb+1; ib=ib+1; rti=drti2(kb,lb); jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd; for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++) //do k=nd2_tyy(13),nd2_tyy(18) { damp2=1./(1.+damp2_x(k,j,lb)*rti); damp1=damp2*2.0-1.0; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t2xx(k,i,j)-t2xx_px(k,ib,j); taoyy=t2yy(k,i,j)-t2yy_px(k,ib,j); taozz=t2zz(k,i,j)-t2yy_px(k,ib,j); if(j>nd2_tyy[1] && j<nd2_tyy[4]) { //if(j>nd2_tyy(2) .and. j<nd2_tyy(5)) { syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+ dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1); if(k<nd2_tyy[16]) { //if(k<nd2_tyy(17)) { szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+ dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j); } else { szz=0.0; } sss=syy+szz; qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1; taoxx=taoxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1; taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1; taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j); } sxx=dxh2(2,i)/ca*(v2x(k,i-1,j)-v2x(k,i,j)); qxx=qt2xx_px(k,ib,j); qt2xx_px(k,ib,j)=qxx*et+wtp*sxx*et1; t2xx_px(k,ib,j)=damp1*t2xx_px(k,ib,j)+ damp2*(pm*sxx-qxx-qt2xx_px(k,ib,j)); t2xx(k,i,j)=taoxx+t2xx_px(k,ib,j); qyy=qt2yy_px(k,ib,j); qt2yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1; t2yy_px(k,ib,j)=damp1*t2yy_px(k,ib,j)+ damp2*(cl*sxx-qyy-qt2yy_px(k,ib,j)); t2yy(k,i,j)=taoyy+t2yy_px(k,ib,j); t2zz(k,i,j)=taozz+t2yy_px(k,ib,j); } } // } // } return; } __global__ void stress_norm_PmlY_II(int nxb2, int nyb2, int nztop, int nxbtm, int nzbtm, int mw2_pml1, int lby0, int lby1, int *nd2_tyy, int *idmat2M, float ca, float *drti2M, float *damp2_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh2M, float *dyh2M, float *dzi2M, float *t2xxM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2yyM, float *qt2zzM, float *t2xx_pyM, float *t2yy_pyM, float *qt2xx_pyM, float *qt2yy_pyM, float *v2xM, float *v2yM, float *v2zM) //Compute the stress-norm at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; //if( lby[0] > lby[1] ) return; //if( lby(1)>lby(2) ) return; i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_tyy[11] || lb > lby1) { return; } jb = 0; for (k = lby0; k < lb; k++) { for (j=nd2_tyy[4*k]; j <= nd2_tyy[1+4*k]; j++) { jb++; } } // for (i = nd2_tyy[6]; i <= nd2_tyy[11]; i++) // //do i=nd2_tyy(7),nd2_tyy(12) // { // jb=0; // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j=nd2_tyy[4*lb]; j <= nd2_tyy[1+4*lb]; j++) //do j=nd2_tyy(1+4*lb),nd2_tyy(2+4*lb) { kb=kb+1; jb=jb+1; rti=drti2(kb,lb); kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1; jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++) //do k=nd2_tyy(13),nd2_tyy(18) { damp2=1./(1.+damp2_y(k,i,lb)*rti); damp1=damp2*2.0-1.; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t2xx(k,i,j)-t2xx_py(k,i,jb); taoyy=t2yy(k,i,j)-t2yy_py(k,i,jb); taozz=t2zz(k,i,j)-t2xx_py(k,i,jb); if(k<nd2_tyy[16]) { //if(k<nd2_tyy(17)) then szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+ dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j); if(i>nd2_tyy[7] && i<nd2_tyy[10]) { //if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) { sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); } else { sxx=0.0; } sss=sxx+szz; qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1; taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1; taoyy=taoyy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1; taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j); } else { if(i>nd2_tyy[7] && i<nd2_tyy[10]) { //if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) then sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+wtp*sxx*et1; taoxx=taoxx+pm*sxx-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp-wts)*sxx*et1; taoyy=taoyy+cl*sxx-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp-wts)*sxx*et1; taozz=taozz+cl*sxx-qzz-qt2zz(k,i,j); } } syy=dyh2(2,j)/ca*(v2y(k,i,j-1)-v2y(k,i,j)); qxx=qt2xx_py(k,i,jb); qt2xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1; t2xx_py(k,i,jb)=damp1*t2xx_py(k,i,jb)+damp2*(cl*syy-qxx-qt2xx_py(k,i,jb)); t2xx(k,i,j)=taoxx+t2xx_py(k,i,jb); t2zz(k,i,j)=taozz+t2xx_py(k,i,jb); qyy=qt2yy_py(k,i,jb); qt2yy_py(k,i,jb)=qyy*et+wtp*syy*et1; t2yy_py(k,i,jb)=damp1*t2yy_py(k,i,jb)+damp2*(pm*syy-qyy-qt2yy_py(k,i,jb)); t2yy(k,i,j)=taoyy+t2yy_py(k,i,jb); } } // } // } return; } __global__ void stress_norm_PmlZ_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nztop, int nxbtm, int nzbtm, int *nd2_tyy, int *idmat2M, float ca, float *damp2_zM, float *drth2M, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh2M, float *dyh2M, float *dzi2M, float *t2xxM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2yyM, float *qt2zzM, float *t2xx_pzM, float *t2zz_pzM, float *qt2xx_pzM, float *qt2zz_pzM, float *v2xM, float *v2yM, float *v2zM) //Compute the stress-norm at region of PML-z-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kb,kodd,jkq,inod,irw //real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[6]; if (j > nd2_tyy[5] || i > nd2_tyy[11]) { return; } // for (j = nd2_tyy[0]; j <= nd2_tyy[5]; j++) // //do j=nd2_tyy(1),nd2_tyy(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i=nd2_tyy[6]; i <= nd2_tyy[11]; i++) // //do i=nd2_tyy(7),nd2_tyy(12) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd kb=0; for (k = nd2_tyy[16]; k <= nd2_tyy[17]; k++) //do k=nd2_tyy(17),nd2_tyy(18) { kb=kb+1; damp2=1./(1.+damp2_z(i,j)*drth2(kb,1)); damp1=damp2*2.-1.; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t2xx(k,i,j)-t2xx_pz(kb,i,j); taoyy=t2yy(k,i,j)-t2xx_pz(kb,i,j); taozz=t2zz(k,i,j)-t2zz_pz(kb,i,j); if(i>nd2_tyy[7] && i<nd2_tyy[10] && j>nd2_tyy[1] && j<nd2_tyy[4]) { //if(i>nd2_tyy(8) .and. i<nd2_tyy(11) .and. & // j>nd2_tyy(2) .and. j<nd2_tyy(5)) then sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+ dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1); sss=sxx+syy; qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*syy)*et1; taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*sxx)*et1; taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp-wts)*sss*et1; taozz=taozz+cl*sss-qzz-qt2zz(k,i,j); } szz=dzi2(2,k)/ca*(v2z(k,i,j)-v2z(k+1,i,j)); qxx=qt2xx_pz(kb,i,j); qt2xx_pz(kb,i,j)=qxx*et+(wtp-wts)*szz*et1; t2xx_pz(kb,i,j)=damp1*t2xx_pz(kb,i,j)+ damp2*(cl*szz-qxx-qt2xx_pz(kb,i,j)); t2xx(k,i,j)=taoxx+t2xx_pz(kb,i,j); t2yy(k,i,j)=taoyy+t2xx_pz(kb,i,j); qzz=qt2zz_pz(kb,i,j); qt2zz_pz(kb,i,j)=qzz*et+wtp*szz*et1; t2zz_pz(kb,i,j)=damp1*t2zz_pz(kb,i,j)+ damp2*(pm*szz-qzz-qt2zz_pz(kb,i,j)); t2zz(k,i,j)=taozz+t2zz_pz(kb,i,j); } // } // } return; } __global__ void stress_xy_PmlX_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nxbtm, int nybtm, int nzbtm, int nztop, int lbx0, int lbx1, int *nd2_txy, int *idmat2M, float ca, float *drth2M, float *damp2_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *t2xyM, float *qt2xyM, float *t2xy_pxM, float *qt2xy_pxM, float *v2xM, float *v2yM) //Compute the Stress-xy at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1; int nth; //if(lbx[0] > lbx[1]) return; nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0; //nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1) j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_txy[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i=nd2_txy[6+4*k]; i <= nd2_txy[7+4*k]; i++) { ib++; } } // for (j = nd2_txy[0]; j <= nd2_txy[5]; j++) // //do j=nd2_txy(1),nd2_txy(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // ib=0; // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i=nd2_txy[6+4*lb]; i <= nd2_txy[7+4*lb]; i++) //do i=nd2_txy(7+4*lb),nd2_txy(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth2(kb,lb); jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txy[12]; k <= nd2_txy[17]; k++) //do k=nd2_txy(13),nd2_txy(18) { damp2=1./(1.+damp2_x(k,j,lb)*rth); damp1=damp2*2.0-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t2xy(k,i,j)-t2xy_px(k,ib,j); if(j > nd2_txy[1] && j<nd2_txy[4]) { //if(j>nd2_txy(2) .and. j<nd2_txy(5)) then cusxy=(dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j)+ dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm; qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1; taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j); } cusxy=sm*dxi2(2,i)/ca*(v2y(k,i,j)-v2y(k,i+1,j)); qxy=qt2xy_px(k,ib,j); qt2xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1; t2xy_px(k,ib,j)=damp1*t2xy_px(k,ib,j)+ damp2*(cusxy-qxy-qt2xy_px(k,ib,j)); t2xy(k,i,j)=taoxy+t2xy_px(k,ib,j); } } // } // } return; } __global__ void stress_xy_PmlY_IIC(int nxb2, int nyb2, int mw2_pml1, int nztop, int nxbtm, int nzbtm, int lby0, int lby1, int *nd2_txy, int *idmat2M, float ca, float *drth2M, float *damp2_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *t2xyM, float *qt2xyM, float *t2xy_pyM, float *qt2xy_pyM, float *v2xM, float *v2yM) //Compute the Stress-xy at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_txy[11] || lb > lby1) { return; } jb = 0; for (k = lby0; k < lb; k++) { for (j=nd2_txy[4*k]; j <= nd2_txy[1+4*k]; j++) { jb++; } } // for (i = nd2_txy[6]; i <= nd2_txy[11]; i++) // //do i=nd2_txy(7),nd2_txy(12) // { // jb=0; // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j=nd2_txy[4*lb]; j <= nd2_txy[1+4*lb]; j++) //do j=nd2_txy(1+4*lb),nd2_txy(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth2(kb,lb); kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txy[12]; k <= nd2_txy[17]; k++) //do k=nd2_txy(13),nd2_txy(18) { damp2=1./(1.+damp2_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t2xy(k,i,j)-t2xy_py(k,i,jb); if(i>nd2_txy[7] && i<nd2_txy[10]) { //if(i>nd2_txy(8) .and. i<nd2_txy(11)) then cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i,j)+ dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j))*sm; qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1; taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j); } cusxy=sm*dyi2(2,j)/ca*(v2x(k,i,j)-v2x(k,i,j+1)); qxy=qt2xy_py(k,i,jb); qt2xy_py(k,i,jb)=qxy*et+dmws*cusxy*et1; t2xy_py(k,i,jb)=damp1*t2xy_py(k,i,jb)+ damp2*(cusxy-qxy-qt2xy_py(k,i,jb)); t2xy(k,i,j)=taoxy+t2xy_py(k,i,jb); } } // } // } return; } __global__ void stress_xy_PmlZ_II(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int *nd2_txy, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *t2xyM, float *qt2xyM, float *v2xM, float *v2yM) //Compute the Stress-xy at region of PML-z-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: cusxy,qxy,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float cusxy,qxy,sm,dmws,et,et1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txy[8]; if (j > nd2_txy[3] || i > nd2_txy[9]) { return; } // for (j = nd2_txy[2]; j <= nd2_txy[3]; j++) // //do j=nd2_txy(3),nd2_txy(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_txy[8]; i <= nd2_txy[9]; i++) // //do i=nd2_txy(9),nd2_txy(10) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k=nd2_txy[16]; k <= nd2_txy[17]; k++) //do k=nd2_txy(17),nd2_txy(18) { inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2); et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+ dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+ dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+ dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm; qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1; t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j); } // } // } return; } __global__ void stress_xz_PmlX_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nxbtm, int nybtm, int nzbtm, int nztop, int lbx0, int lbx1, int *nd2_txz, int *idmat2M, float ca, float *drth2M, float *damp2_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dzh2M, float *t2xzM, float *qt2xzM, float *t2xz_pxM, float *qt2xz_pxM, float *v2xM, float *v2zM) //Compute the stress-xz at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1; int nth; //if(lbx[0] > lbx[1]) return; nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_txz[5] || lb > lbx1) { return; } ib=0; for (k = lbx0; k < lb; k++) { for (i=nd2_txz[6+4*k]; i <= nd2_txz[7+4*k]; i++) { ib++; } } // for (j = nd2_txz[0]; j <= nd2_txz[5]; j++) // //do j=nd2_txz(1),nd2_txz(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // ib=0; // for (lb=lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i=nd2_txz[6+4*lb]; i <= nd2_txz[7+4*lb]; i++) //do i=nd2_txz(7+4*lb),nd2_txz(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth2(kb,lb); jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txz[12]; k <= nd2_txz[17]; k++) //do k=nd2_txz(13),nd2_txz(18) { damp2=1./(1.+damp2_x(k,j,lb)*rth); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxz=t2xz(k,i,j)-t2xz_px(k,ib,j); if(k < nd2_txz[16]) { //if(k<nd2_txz(17)) then cusxz=(dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+ dzh2(3,k)*v2x(k,i,j)+dzh2(4,k)*v2x(k+1,i,j))*sm; qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j); } cusxz=sm*dxi2(2,i)/ca*(v2z(k,i,j)-v2z(k,i+1,j)); qxz=qt2xz_px(k,ib,j); qt2xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1; t2xz_px(k,ib,j)=damp1*t2xz_px(k,ib,j)+ damp2*(cusxz-qxz-qt2xz_px(k,ib,j)); t2xz(k,i,j)=taoxz+t2xz_px(k,ib,j); } } // } // } return; } __global__ void stress_xz_PmlY_IIC(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int lby0, int lby1, int *nd2_txz, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dzh2M, float *v2xM, float *v2zM, float *t2xzM, float *qt2xzM) //Compute the stress-xz at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[8]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_txz[9] || lb > lby1) { return; } // for (i = nd2_txz[8]; i <= nd2_txz[9]; i++) // //do i=nd2_txz(9),nd2_txz(10) // { // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { for (j=nd2_txz[4*lb]; j <= nd2_txz[1+4*lb]; j++) //do j=nd2_txz(1+4*lb),nd2_txz(2+4*lb) { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txz[12]; k <= nd2_txz[15]; k++) //do k=nd2_txz(13),nd2_txz(16) { inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); dvzx=dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+ dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j); dvxz=dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+ dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j); cusxz=(dvzx+dvxz)*sm; qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j); } } // } // } return; } __global__ void stress_xz_PmlZ_IIC(int nxb2, int nyb2, int mw2_pml1, int nxbtm, int nzbtm, int nztop, int *nd2_txz, int *idmat2M, float ca, float *drti2M, float *damp2_zM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dzh2M, float *t2xzM, float *qt2xzM, float *t2xz_pzM, float *qt2xz_pzM, float *v2xM, float *v2zM) //Compute the stress-xz at region of PML-z-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kb,kodd,jkq,inod,irw //real:: taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,kb,kodd,jkq,inod,irw; float taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txz[6]; if (j > nd2_txz[5] || i > nd2_txz[11]) { return; } // for (j = nd2_txz[0]; j <= nd2_txz[5]; j++) // //do j=nd2_txz(1),nd2_txz(6) // { kodd = 2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_txz[6]; i <= nd2_txz[11]; i++) // //do i=nd2_txz(7),nd2_txz(12) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd kb=0; for (k = nd2_txz[16]; k <= nd2_txz[17]; k++) //do k=nd2_txz(17),nd2_txz(18) { kb=kb+1; damp2=1./(1.+damp2_z(i,j)*drti2(kb,1)); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxz=t2xz(k,i,j)-t2xz_pz(kb,i,j); if(i > nd2_txz[7] && i<nd2_txz[10]) { //if(i>nd2_txz(8) .and. i<nd2_txz(11)) then cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+ dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j))*sm; qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j); } cusxz=sm*dzh2(2,k)/ca*(v2x(k-1,i,j)-v2x(k,i,j)); qxz=qt2xz_pz(kb,i,j); qt2xz_pz(kb,i,j)=qxz*et+dmws*cusxz*et1; t2xz_pz(kb,i,j)=damp1*t2xz_pz(kb,i,j)+ damp2*(cusxz-qxz-qt2xz_pz(kb,i,j)); t2xz(k,i,j)=taoxz+t2xz_pz(kb,i,j); } // } // } return; } //call stress_yz_PmlX_II __global__ void stress_yz_PmlX_IIC(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int lbx0, int lbx1, int *nd2_tyz, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi2M, float *dzh2M, float *t2yzM, float *qt2yzM, float *v2yM, float *v2zM) //Compute the stress-yz at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: cusyz,qyz,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float cusyz,qyz,sm,dmws,et,et1; //if(lbx[0] > lbx[1]) return; //if( lbx(1)>lbx(2) ) return j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_tyz[3] || lb > lbx1) { return; } // for (j=nd2_tyz[2]; j <= nd2_tyz[3]; j++) // //do j=nd2_tyz(3),nd2_tyz(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { for (i = nd2_tyz[6+4*lb]; i <= nd2_tyz[7+4*lb]; i++) //do i=nd2_tyz(7+4*lb),nd2_tyz(8+4*lb) { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_tyz[12]; k <= nd2_tyz[15]; k++) //do k=nd2_tyz(13),nd2_tyz(16) { inod=idmat2(k,i,j); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+ dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+ dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+ dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/ (.5/cmu(inod)+.5/cmu(idmat2(k-1,i,j+1))); qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j); } } // } // } return; } //call stress_yz_PmlY_II __global__ void stress_yz_PmlY_IIC(int nxb2, int nyb2, int mw2_pml1, int nxbtm, int nzbtm, int nztop, int lby0, int lby1, int *nd2_tyz, int *idmat2M, float ca, float *drth2M, float *damp2_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi2M, float *dzh2M, float *t2yzM, float *qt2yzM, float *t2yz_pyM, float *qt2yz_pyM, float *v2yM, float *v2zM) //Compute the stress-yz at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_tyz[11] || lb > lby1) { return; } jb = 0; for (k = lby0; k < lb; k++) { for (j = nd2_tyz[4*k]; j <= nd2_tyz[1+4*k]; j++) { jb++; } } // for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++) // //do i=nd2_tyz(7),nd2_tyz(12) // { // jb=0; // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j = nd2_tyz[4*lb]; j <= nd2_tyz[1+4*lb]; j++) //do j=nd2_tyz(1+4*lb),nd2_tyz(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth2(kb,lb); kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 jkq = ((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_tyz[12]; k <= nd2_tyz[17]; k++) //do k=nd2_tyz(13),nd2_tyz(18) { damp2=1./(1.+damp2_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoyz=t2yz(k,i,j)-t2yz_py(k,i,jb); if(k<nd2_tyz[16]) { //if(k<nd2_tyz(17)) { cusyz=(dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+ dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))*sm; qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j); } cusyz=sm*dyi2(2,j)/ca*(v2z(k,i,j)-v2z(k,i,j+1)); qyz=qt2yz_py(k,i,jb); qt2yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1; t2yz_py(k,i,jb)=damp1*t2yz_py(k,i,jb)+ damp2*(cusyz-qyz-qt2yz_py(k,i,jb)); t2yz(k,i,j)=taoyz+t2yz_py(k,i,jb); } } // } // } return; } //call stress_yz_PmlZ_II __global__ void stress_yz_PmlZ_IIC(int nxb2, int nyb2, int mw2_pml1, int nxbtm, int nzbtm, int nztop, int *nd2_tyz, int *idmat2M, float ca, float *drti2M, float *damp2_zM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi2M, float *dzh2M, float *t2yzM, float *qt2yzM, float *t2yz_pzM, float *qt2yz_pzM, float *v2yM, float *v2zM) //Compute the stress-yz at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kb,kodd,jkq,inod,irw //real:: taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,kb,kodd,jkq,inod,irw; float taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[6]; if (j > nd2_tyz[5] || i > nd2_tyz[11]) { return; } // for (j = nd2_tyz[0]; j <= nd2_tyz[5]; j++) // //do j=nd2_tyz(1),nd2_tyz(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++) // //do i=nd2_tyz(7),nd2_tyz(12) // { jkq = ((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd kb=0; for (k = nd2_tyz[16]; k <= nd2_tyz[17]; k++) //do k=nd2_tyz(17),nd2_tyz(18) { kb=kb+1; damp2=1./(1.+damp2_z(i,j)*drti2(kb,1)); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2); et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoyz=t2yz(k,i,j)-t2yz_pz(kb,i,j); if (j > nd2_tyz[1] && j<nd2_tyz[4]) { //if(j>nd2_tyz(2) .and. j<nd2_tyz(5)) then cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j)+ dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2))*sm; qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j); } cusyz=sm*dzh2(2,k)/ca*(v2y(k-1,i,j)-v2y(k,i,j)); qyz=qt2yz_pz(kb,i,j); qt2yz_pz(kb,i,j)=qyz*et+dmws*cusyz*et1; t2yz_pz(kb,i,j)=damp1*t2yz_pz(kb,i,j)+ damp2*(cusyz-qyz-qt2yz_pz(kb,i,j)); t2yz(k,i,j)=taoyz+t2yz_pz(kb,i,j); } // } // } return; } #ifdef __cplusplus extern "C" { #endif void compute_stressC(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml, int *mw1_pml1, int *nmat, int *nll, int *lbx, int *lby, int *nd1_txy, int *nd1_txz, int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM, float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM, float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM, float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp, int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz, int *nd2_tyy, int *nd2_tyz, int *idmat2M, float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM, float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M, float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM, float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM, float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM, float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM, void **v2xMp, void **v2yMp, void **v2zMp, int *myid) { printf("[CUDA] stress computation:\n"); float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM; int blockSizeX = 8; int blockSizeY = 8; dim3 dimBlock(blockSizeX, blockSizeY); v1xM = (float *) *v1xMp; v1yM = (float *) *v1yMp; v1zM = (float *) *v1zMp; v2xM = (float *) *v2xMp; v2yM = (float *) *v2yMp; v2zM = (float *) *v2zMp; gettimeofday(&t1, NULL); cpy_h2d_stressInputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); cpy_h2d_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeH2DS += tmpTime; gettimeofday(&t1, NULL); int gridSizeX1 = (nd1_tyy[3] - nd1_tyy[2])/blockSizeX + 1; int gridSizeY1 = (nd1_tyy[9] - nd1_tyy[8])/blockSizeY + 1; dim3 dimGrid1(gridSizeX1, gridSizeY1); hipLaunchKernelGGL(( stress_norm_xy_IC), dim3(dimGrid1), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *nxtop, *nztop, nd1_tyyD, idmat1D, *ca, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh1D, dyh1D, dxi1D, dyi1D, dzi1D, t1xxD, t1xyD, t1yyD, t1zzD, qt1xxD, qt1xyD, qt1yyD, qt1zzD, v1xD, v1yD, v1zD); int gridSizeX2 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1; int gridSizeY2 = (nd1_tyz[9] - nd1_tyz[8])/blockSizeY + 1; dim3 dimGrid2(gridSizeX2, gridSizeY2); hipLaunchKernelGGL(( stress_xz_yz_IC), dim3(dimGrid2), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *nxtop, *nytop, *nztop, nd1_tyzD, idmat1D, *ca, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dyi1D, dzh1D, v1xD, v1yD, v1zD, t1xzD, t1yzD, qt1xzD, qt1yzD); int gridSizeX3Temp1 = ((*ny1p1) + 1)/blockSizeX + 1; int gridSizeX3Temp2 = ((*nytop) - 1)/blockSizeX + 1; int gridSizeY3Temp1 = ((*nxtop) - 1)/blockSizeY + 1; int gridSizeY3Temp2 = ((*nx1p1) + 1)/blockSizeY + 1; int gridSizeX3 = (gridSizeX3Temp1 > gridSizeX3Temp2) ? gridSizeX3Temp1 : gridSizeX3Temp2; int gridSizeY3 = (gridSizeY3Temp1 > gridSizeY3Temp2) ? gridSizeY3Temp1 : gridSizeY3Temp2; dim3 dimGrid3(gridSizeX3, gridSizeY3); hipLaunchKernelGGL(( stress_resetVars), dim3(dimGrid3), dim3(dimBlock), 0, 0, *ny1p1, *nx1p1, *nxtop, *nytop, *nztop, t1xzD, t1yzD); if (lbx[1] >= lbx[0]) { int gridSizeX4 = (nd1_tyy[5] - nd1_tyy[0])/blockSizeX + 1; int gridSizeY4 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid4(gridSizeX4, gridSizeY4); hipLaunchKernelGGL(( stress_norm_PmlX_IC), dim3(dimGrid4), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *nxtop, *nytop, *nztop, *mw1_pml, *mw1_pml1, lbx[0], lbx[1], nd1_tyyD, idmat1D, *ca, drti1D, damp1_xD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dzi1D, dxh1D, dyh1D, v1xD, v1yD, v1zD, t1xxD, t1yyD, t1zzD, t1xx_pxD, t1yy_pxD, qt1xxD, qt1yyD, qt1zzD, qt1xx_pxD, qt1yy_pxD); } if (lby[1] >= lby[0]) { int gridSizeX5 = (nd1_tyy[11] - nd1_tyy[6])/blockSizeX + 1; int gridSizeY5 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid5(gridSizeX5, gridSizeY5); hipLaunchKernelGGL(( stress_norm_PmlY_IC), dim3(dimGrid5), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *mw1_pml1, *nxtop, *nztop, lby[0], lby[1], nd1_tyyD, idmat1D, *ca, drti1D, damp1_yD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh1D, dyh1D, dzi1D, t1xxD, t1yyD, t1zzD, qt1xxD, qt1yyD, qt1zzD, t1xx_pyD, t1yy_pyD, qt1xx_pyD, qt1yy_pyD, v1xD, v1yD, v1zD); } if (lbx[1] >= lbx[0]) { int gridSizeX6 = (nd1_txy[5] - nd1_txy[0])/blockSizeX + 1; int gridSizeY6 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid6(gridSizeX6, gridSizeY6); hipLaunchKernelGGL(( stress_xy_PmlX_IC), dim3(dimGrid6), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *mw1_pml, *mw1_pml1, *nxtop, *nytop, *nztop, lbx[0], lbx[1], nd1_txyD, idmat1D, *ca, drth1D, damp1_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dyi1D, t1xyD, qt1xyD, t1xy_pxD, qt1xy_pxD, v1xD, v1yD); } if (lby[1] >= lby[0]) { int gridSizeX7 = (nd1_txy[11] - nd1_txy[6])/blockSizeX + 1; int gridSizeY7 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid7(gridSizeX7, gridSizeY7); hipLaunchKernelGGL(( stress_xy_PmlY_IC), dim3(dimGrid7), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *mw1_pml1, *nxtop, *nztop, lby[0], lby[1], nd1_txyD, idmat1D, *ca, drth1D, damp1_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dyi1D, t1xyD, qt1xyD, t1xy_pyD, qt1xy_pyD, v1xD, v1yD); } if (lbx[1] >= lbx[0]) { int gridSizeX8 = (nd1_txz[5] - nd1_txz[0])/blockSizeX + 1; int gridSizeY8 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid8(gridSizeX8, gridSizeY8); hipLaunchKernelGGL(( stress_xz_PmlX_IC), dim3(dimGrid8), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *nxtop, *nytop, *nztop, *mw1_pml, *mw1_pml1, lbx[0], lbx[1], nd1_txzD, idmat1D, *ca, drth1D, damp1_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dzh1D, t1xzD, qt1xzD, t1xz_pxD, qt1xz_pxD, v1xD, v1zD); } if (lby[1] >= lby[0]) { int gridSizeX9 = (nd1_txz[9] - nd1_txz[8])/blockSizeX + 1; int gridSizeY9 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid9(gridSizeX9, gridSizeY9); hipLaunchKernelGGL(( stress_xz_PmlY_IC), dim3(dimGrid9), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *nxtop, *nztop, lby[0], lby[1], nd1_txzD, idmat1D, *ca, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dzh1D, t1xzD, qt1xzD, v1xD, v1zD); } if (lbx[1] >= lbx[0]) { int gridSizeX10 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1; int gridSizeY10 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid10(gridSizeX10, gridSizeY10); hipLaunchKernelGGL(( stress_yz_PmlX_IC), dim3(dimGrid10), dim3(dimBlock), 0, 0, *nxb1, *nyb1, *nztop, *nxtop, lbx[0], lbx[1], nd1_tyzD, idmat1D, *ca, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi1D, dzh1D, t1yzD, qt1yzD, v1yD, v1zD); } if (lby[1] >= lby[0]) { int gridSizeX11 = (nd1_tyz[11] - nd1_tyz[6])/blockSizeX + 1; int gridSizeY11 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid11(gridSizeX11, gridSizeY11); hipLaunchKernelGGL(( stress_yz_PmlY_IC), dim3(dimGrid11),dim3(dimBlock), 0, 0, *nxb1, *nyb1, *mw1_pml1, *nxtop, *nztop, lby[0], lby[1], nd1_tyzD, idmat1D, *ca, drth1D, damp1_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi1D, dzh1D, t1yzD, qt1yzD, t1yz_pyD, qt1yz_pyD, v1yD, v1zD); } int gridSizeX12 = (nd2_tyy[3] - nd2_tyy[2])/blockSizeX + 1; int gridSizeY12 = (nd2_tyy[9] - nd2_tyy[8])/blockSizeY + 1; dim3 dimGrid12(gridSizeX12, gridSizeY12); hipLaunchKernelGGL(( stress_norm_xy_II), dim3(dimGrid12), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, nd2_tyyD, idmat2D, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, t2xxD, t2xyD, t2yyD, t2zzD, qt2xxD, qt2xyD, qt2yyD, qt2zzD, dxh2D, dyh2D, dxi2D, dyi2D, dzi2D, v2xD, v2yD, v2zD); int gridSizeX13 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1; int gridSizeY13 = (nd2_tyz[9] - nd2_tyz[8])/blockSizeY + 1; dim3 dimGrid13(gridSizeX13, gridSizeY13); hipLaunchKernelGGL(( stress_xz_yz_IIC), dim3(dimGrid13), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *nztop, *nxbtm, *nzbtm, nd2_tyzD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, dzh2D, t2xzD, t2yzD, qt2xzD, qt2yzD, v2xD, v2yD, v2zD); if (lbx[1] >= lbx[0]) { int gridSizeX14 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1; int gridSizeY14 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid14(gridSizeX14, gridSizeY14); hipLaunchKernelGGL(( stress_norm_PmlX_IIC), dim3(dimGrid14), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nztop, *nxbtm, *nybtm, *nzbtm, lbx[0], lbx[1], nd2_tyyD, idmat2D, *ca, drti2D, damp2_xD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh2D, dyh2D, dzi2D, t2xxD, t2yyD, t2zzD, qt2xxD, qt2yyD, qt2zzD, t2xx_pxD, t2yy_pxD, qt2xx_pxD, qt2yy_pxD, v2xD, v2yD, v2zD); } if (lby[1] >= lby[0]) { int gridSizeX15 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeX + 1; int gridSizeY15 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid15(gridSizeX15, gridSizeY15); hipLaunchKernelGGL(( stress_norm_PmlY_II), dim3(dimGrid15), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *nztop, *nxbtm, *nzbtm, *mw2_pml1, lby[0], lby[1], nd2_tyyD, idmat2D, *ca, drti2D, damp2_yD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh2D, dyh2D, dzi2D, t2xxD, t2yyD, t2zzD, qt2xxD, qt2yyD, qt2zzD, t2xx_pyD, t2yy_pyD, qt2xx_pyD, qt2yy_pyD, v2xD, v2yD, v2zD); } int gridSizeX16 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1; int gridSizeY16 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeY + 1; dim3 dimGrid16(gridSizeX16, gridSizeY16); hipLaunchKernelGGL(( stress_norm_PmlZ_IIC), dim3(dimGrid16), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nztop, *nxbtm, *nzbtm, nd2_tyyD, idmat2D, *ca, damp2_zD, drth2D, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh2D, dyh2D, dzi2D, t2xxD, t2yyD, t2zzD, qt2xxD, qt2yyD, qt2zzD, t2xx_pzD, t2zz_pzD, qt2xx_pzD, qt2zz_pzD, v2xD, v2yD, v2zD); if (lbx[1] >= lbx[0]) { int gridSizeX17 = (nd2_txy[5] - nd2_txy[0])/blockSizeX + 1; int gridSizeY17 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid17(gridSizeX17, gridSizeY17); hipLaunchKernelGGL(( stress_xy_PmlX_IIC), dim3(dimGrid17), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nxbtm, *nybtm, *nzbtm, *nztop, lbx[0], lbx[1], nd2_txyD, idmat2D, *ca, drth2D, damp2_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, t2xyD, qt2xyD, t2xy_pxD, qt2xy_pxD, v2xD, v2yD); } if (lby[1] >= lby[0]) { int gridSizeX18 = (nd2_txy[11] - nd2_txy[6])/blockSizeX + 1; int gridSizeY18 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid18(gridSizeX18, gridSizeY18); hipLaunchKernelGGL(( stress_xy_PmlY_IIC), dim3(dimGrid18), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml1, *nztop, *nxbtm, *nzbtm, lby[0], lby[1], nd2_txyD, idmat2D, *ca, drth2D, damp2_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, t2xyD, qt2xyD, t2xy_pyD, qt2xy_pyD, v2xD, v2yD); } int gridSizeX19 = (nd2_txy[3] - nd2_txy[2])/blockSizeX + 1; int gridSizeY19 = (nd2_txy[9] - nd2_txy[8])/blockSizeY + 1; dim3 dimGrid19(gridSizeX19, gridSizeY19); hipLaunchKernelGGL(( stress_xy_PmlZ_II), dim3(dimGrid19), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, nd2_txyD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, t2xyD, qt2xyD, v2xD, v2yD); if (lbx[1] >= lbx[0]) { int gridSizeX20 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1; int gridSizeY20 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid20(gridSizeX20, gridSizeY20); hipLaunchKernelGGL(( stress_xz_PmlX_IIC), dim3(dimGrid20), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nxbtm, *nybtm, *nzbtm, *nztop, lbx[0], lbx[1], nd2_txzD, idmat2D, *ca, drth2D, damp2_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dzh2D, t2xzD, qt2xzD, t2xz_pxD, qt2xz_pxD, v2xD, v2zD); } if (lby[1] >= lby[0]) { int gridSizeX21 = (nd2_txz[9] - nd2_txz[8])/blockSizeX + 1; int gridSizeY21 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid21(gridSizeX21, gridSizeY21); hipLaunchKernelGGL(( stress_xz_PmlY_IIC), dim3(dimGrid21), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, lby[0], lby[1], nd2_txzD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dzh2D, v2xD, v2zD, t2xzD, qt2xzD); } int gridSizeX22 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1; int gridSizeY22 = (nd2_txz[11] - nd2_txz[6])/blockSizeY + 1; dim3 dimGrid22(gridSizeX22, gridSizeY22); hipLaunchKernelGGL(( stress_xz_PmlZ_IIC), dim3(dimGrid22), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml1, *nxbtm, *nzbtm, *nztop, nd2_txzD, idmat2D, *ca, drti2D, damp2_zD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dzh2D, t2xzD, qt2xzD, t2xz_pzD, qt2xz_pzD, v2xD, v2zD); if (lbx[1] >= lbx[0]) { int gridSizeX23 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1; int gridSizeY23 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid23(gridSizeX23, gridSizeY23); hipLaunchKernelGGL(( stress_yz_PmlX_IIC), dim3(dimGrid23), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, lbx[0], lbx[1], nd2_tyzD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi2D, dzh2D, t2yzD, qt2yzD, v2yD, v2zD); } if (lby[1] >= lby[0]) { int gridSizeX24 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeX + 1; int gridSizeY24 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid24(gridSizeX24, gridSizeY24); hipLaunchKernelGGL(( stress_yz_PmlY_IIC), dim3(dimGrid24), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml1, *nxbtm, *nzbtm, *nztop, lby[0], lby[1], nd2_tyzD, idmat2D, *ca, drth2D, damp2_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi2D, dzh2D, t2yzD, qt2yzD, t2yz_pyD, qt2yz_pyD, v2yD, v2zD); } int gridSizeX25 = (nd2_tyz[5] - nd2_tyz[0])/blockSizeX + 1; int gridSizeY25 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeY + 1; dim3 dimGrid25(gridSizeX25, gridSizeY25); hipLaunchKernelGGL(( stress_yz_PmlZ_IIC), dim3(dimGrid25), dim3(dimBlock), 0, 0, *nxb2, *nyb2, *mw2_pml1, *nxbtm, *nzbtm, *nztop, nd2_tyzD, idmat2D, *ca, drti2D, damp2_zD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi2D, dzh2D, t2yzD, qt2yzD, t2yz_pzD, qt2yz_pzD, v2yD, v2zD); hipDeviceSynchronize(); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeCompS += tmpTime; gettimeofday(&t1, NULL); cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeD2HS += tmpTime; /*int size = (*nztop) * (*nxtop + 3) * (*nytop); write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM.txt"); size = (*nztop) * (*nxtop + 3) * (*nytop + 3); write_output(t1xyM, size, "OUTPUT_ARRAYS/t1xyM.txt"); size = (*nztop + 1) * (*nxtop + 3) * (*nytop); write_output(t1xzM, size, "OUTPUT_ARRAYS/t1xzM.txt"); size = (*nztop) * (*nxtop) * (*nytop + 3); write_output(t1yyM, size, "OUTPUT_ARRAYS/t1yyM.txt"); size = (*nztop + 1) * (*nxtop) * (*nytop + 3); write_output(t1yzM, size, "OUTPUT_ARRAYS/t1yzM.txt"); size = (*nztop) * (*nxtop) * (*nytop); write_output(t1zzM, size, "OUTPUT_ARRAYS/t1zzM.txt"); size = (*nzbtm) * (*nxbtm + 3) * (*nybtm); write_output(t2xxM, size, "OUTPUT_ARRAYS/t2xxM.txt"); size = (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3); write_output(t2xyM, size, "OUTPUT_ARRAYS/t2xyM.txt"); size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm); write_output(t2xzM, size, "OUTPUT_ARRAYS/t2xzM.txt"); size = (*nzbtm) * (*nxbtm) * (*nybtm + 3); write_output(t2yyM, size, "OUTPUT_ARRAYS/t2yyM.txt"); size = (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3); write_output(t2yzM, size, "OUTPUT_ARRAYS/t2yzM.txt"); size = (*nzbtm + 1) * (*nxbtm) * (*nybtm); write_output(t2zzM, size, "OUTPUT_ARRAYS/t2zzM.txt"); */ /*************** correctness *******************/ /* FILE *fp; // cudaRes = hipMalloc((void **)&v1xD, sizeof(float) * y(*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x"); // cudaRes = hipMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y"); // cudaRes = hipMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z"); const char* filename = "v1x.txt"; const char* filename1 = "v1y.txt"; const char* filename2 = "v1z.txt"; int i; if((fp = fopen(filename, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ ) { fprintf(fp, "%f ", v1xM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename1, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ ) { fprintf(fp, "%f ", v1yM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename2, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ ) { fprintf(fp, "%f ", v1zM[i]); } fprintf(fp, "\n"); fclose(fp); // cudaRes = hipMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx"); // cudaRes = hipMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy"); // cudaRes = hipMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz"); // cudaRes = hipMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy"); // cudaRes = hipMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz"); // cudaRes = hipMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz"); const char* filename3 = "x_t1xx.txt"; const char* filename4 = "x_t1xy.txt"; const char* filename5 = "x_t1xz.txt"; if((fp = fopen(filename3, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop); i++ ) { fprintf(fp, "%f ", t1xxM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename4, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop+3); i++ ) { fprintf(fp, "%f ", t1xyM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename5, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop+1) * (*nxtop + 3) * (*nytop); i++ ) { fprintf(fp, "%f ", t1xzM[i]); } fprintf(fp, "\n"); fclose(fp); */ return; } #ifdef __cplusplus } #endif
74559321610054b34ea9a3b054ca5f0180a2e78e.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> /***********************************************/ /* for debug: check the output */ /***********************************************/ void write_output(float *arr, int size, const char *filename) { FILE *fp; if((fp = fopen(filename, "w+")) == NULL) { fprintf(stderr, "File write error!\n"); } int i; for(i = 0; i < size; i++) { fprintf(fp, "%f ", arr[i]); if( i%10 == 0) fprintf(fp, "\n"); } fprintf(fp, "\n"); fclose(fp); } //device memory pointers static int *nd1_velD; static int *nd1_txyD; static int *nd1_txzD; static int *nd1_tyyD; static int *nd1_tyzD; static float *rhoD; static float *drvh1D; static float *drti1D; static float *drth1D; static float *damp1_xD; static float *damp1_yD; static int *idmat1D; static float *dxi1D; static float *dyi1D; static float *dzi1D; static float *dxh1D; static float *dyh1D; static float *dzh1D; static float *t1xxD; static float *t1xyD; static float *t1xzD; static float *t1yyD; static float *t1yzD; static float *t1zzD; static float *t1xx_pxD; static float *t1xy_pxD; static float *t1xz_pxD; static float *t1yy_pxD; static float *qt1xx_pxD; static float *qt1xy_pxD; static float *qt1xz_pxD; static float *qt1yy_pxD; static float *t1xx_pyD; static float *t1xy_pyD; static float *t1yy_pyD; static float *t1yz_pyD; static float *qt1xx_pyD; static float *qt1xy_pyD; static float *qt1yy_pyD; static float *qt1yz_pyD; static float *qt1xxD; static float *qt1xyD; static float *qt1xzD; static float *qt1yyD; static float *qt1yzD; static float *qt1zzD; static float *clamdaD; static float *cmuD; static float *epdtD; static float *qwpD; static float *qwsD; static float *qwt1D; static float *qwt2D; static float *v1xD; //output static float *v1yD; static float *v1zD; static float *v1x_pxD; static float *v1y_pxD; static float *v1z_pxD; static float *v1x_pyD; static float *v1y_pyD; static float *v1z_pyD; //for inner_II--------------------------------------------------------- static int *nd2_velD; static int *nd2_txyD; //int[18] static int *nd2_txzD; //int[18] static int *nd2_tyyD; //int[18] static int *nd2_tyzD; //int[18] static float *drvh2D; static float *drti2D; static float *drth2D; //float[mw2_pml1,0:1] static int *idmat2D; static float *damp2_xD; static float *damp2_yD; static float *damp2_zD; static float *dxi2D; static float *dyi2D; static float *dzi2D; static float *dxh2D; static float *dyh2D; static float *dzh2D; static float *t2xxD; static float *t2xyD; static float *t2xzD; static float *t2yyD; static float *t2yzD; static float *t2zzD; static float *qt2xxD; static float *qt2xyD; static float *qt2xzD; static float *qt2yyD; static float *qt2yzD; static float *qt2zzD; static float *t2xx_pxD; static float *t2xy_pxD; static float *t2xz_pxD; static float *t2yy_pxD; static float *qt2xx_pxD; static float *qt2xy_pxD; static float *qt2xz_pxD; static float *qt2yy_pxD; static float *t2xx_pyD; static float *t2xy_pyD; static float *t2yy_pyD; static float *t2yz_pyD; static float *qt2xx_pyD; static float *qt2xy_pyD; static float *qt2yy_pyD; static float *qt2yz_pyD; static float *t2xx_pzD; static float *t2xz_pzD; static float *t2yz_pzD; static float *t2zz_pzD; static float *qt2xx_pzD; static float *qt2xz_pzD; static float *qt2yz_pzD; static float *qt2zz_pzD; static float *v2xD; //output static float *v2yD; static float *v2zD; static float *v2x_pxD; static float *v2y_pxD; static float *v2z_pxD; static float *v2x_pyD; static float *v2y_pyD; static float *v2z_pyD; static float *v2x_pzD; static float *v2y_pzD; static float *v2z_pzD; #define CHECK_ERROR(err, str) \ if (err != cudaSuccess) \ {\ printf("Error in \"%s\", %s\n", str, cudaGetErrorString(err)); \ } //debug---------------------- float totalTimeH2DV, totalTimeD2HV; float totalTimeH2DS, totalTimeD2HS; float totalTimeCompV, totalTimeCompS; float tmpTime; struct timeval t1, t2; int procID; //-------------------------------- //!XSC-------------------------------------------------------------------- #define drvh1(i, j) drvh1M[(i) - 1 + (j) * mw1_pml1] #define drti1(i, j) drti1M[(i) - 1 + (j) * mw1_pml1] #define drth1(i, j) drth1M[(i) - 1 + (j) * mw1_pml1] #define damp1_x(i, j, k) damp1_xM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lbx0) * nytop)] #define damp1_y(i, j, k) damp1_yM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) - lby0) * nxtop)] #define idmat1(i, j, k) idmat1M[(i) + (nztop + 2) * ((j) - 1 + ((k) - 1) * (nxtop + 1))] #define v1x(i, j, k) v1xM[(i) + (nztop + 2) * ((j) + 1 + (k) * (nxtop + 3))] #define v1y(i, j, k) v1yM[(i) + (nztop + 2) * ((j) + ((k) + 1) * (nxtop + 3))] #define v1z(i, j, k) v1zM[(i) + (nztop + 2) * ((j) + (k) * (nxtop + 3))] //nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml #define v1x_px(i, j, k) v1x_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))] #define v1y_px(i, j, k) v1y_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))] #define v1z_px(i, j, k) v1z_pxM[(i) - 1 + nztop * ((j) - 1 + nv2x * ((k) - 1))] #define v1x_py(i, j, k) v1x_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define v1y_py(i, j, k) v1y_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define v1z_py(i, j, k) v1z_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define dxi1(i, j) dxi1M[((j) - 1) * 4 + (i) - 1] #define dyi1(i, j) dyi1M[((j) - 1) * 4 + (i) - 1] #define dzi1(i, j) dzi1M[((j) - 1) * 4 + (i) - 1] #define dxh1(i, j) dxh1M[((j) - 1) * 4 + (i) - 1] #define dyh1(i, j) dyh1M[((j) - 1) * 4 + (i) - 1] #define dzh1(i, j) dzh1M[((j) - 1) * 4 + (i) - 1] #define t1xx(i, j, k) t1xxM[(i) - 1 + nztop * ((j) + ((k) - 1) * (nxtop + 3))] #define t1xy(i, j, k) t1xyM[(i) - 1 + nztop * ((j) + 1 + ((k) + 1) * (nxtop + 3))] #define t1xz(i, j, k) t1xzM[(i) - 1 + (nztop + 1) * ((j) + 1 + ((k) - 1) * (nxtop + 3))] #define t1yy(i, j, k) t1yyM[(i) - 1 + nztop * (((j) - 1) + (k) * nxtop)] #define t1yz(i, j, k) t1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + ((k) + 1) * nxtop)] #define t1zz(i, j, k) t1zzM[(i) - 1 + nztop * ((j) - 1 + ((k) - 1) * nxtop)] //nti = (lbx(2) - lbx(1) + 1) * mw1_pml + lbx(2) //nth = (lbx(2) - lbx(1) + 1) * mw1_pml + 1 - lbx(1) #define t1xx_px(i, j, k) t1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] #define t1xy_px(i, j, k) t1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))] #define t1xz_px(i, j, k) t1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))] #define t1yy_px(i, j, k) t1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] #define qt1xx_px(i, j, k) qt1xx_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] #define qt1xy_px(i, j, k) qt1xy_pxM[(i) - 1 + nztop * ((j) - 1 + nth * ((k) - 1))] #define qt1xz_px(i, j, k) qt1xz_pxM[(i) - 1 + (nztop + 1) * ((j) - 1 + nth * ((k) - 1))] #define qt1yy_px(i, j, k) qt1yy_pxM[(i) - 1 + nztop * ((j) - 1 + nti * ((k) - 1))] //nti = (lby(2) - lby(1) + 1) * mw1_pml + lby(2) //nth = (lby(2) - lby(1) + 1) * mw1_pml + 1 - lby(1) #define t1xx_py(i, j, k) t1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define t1xy_py(i, j, k) t1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define t1yy_py(i, j, k) t1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define t1yz_py(i, j, k) t1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xx_py(i, j, k) qt1xx_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xy_py(i, j, k) qt1xy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yy_py(i, j, k) qt1yy_pyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yz_py(i, j, k) qt1yz_pyM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xx(i, j, k) qt1xxM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xy(i, j, k) qt1xyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1xz(i, j, k) qt1xzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yy(i, j, k) qt1yyM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define qt1yz(i, j, k) qt1yzM[(i) - 1 + (nztop + 1) * ((j) - 1 + nxtop * ((k) - 1))] #define qt1zz(i, j, k) qt1zzM[(i) - 1 + nztop * ((j) - 1 + nxtop * ((k) - 1))] #define rho(i) rhoM[(i) - 1] #define clamda(i) clamdaM[(i) - 1] #define cmu(i) cmuM[(i) - 1] #define epdt(i) epdtM[(i) - 1] #define qwp(i) qwpM[(i) - 1] #define qws(i) qwsM[(i) - 1] #define qwt1(i) qwt1M[(i) - 1] #define qwt2(i) qwt2M[(i) - 1] //for inner_II #define drvh2(i, j) drvh2M[(i) - 1 + (j) * mw2_pml1] #define drti2(i, j) drti2M[(i) - 1 + (j) * mw2_pml1] #define drth2(i, j) drth2M[(i) - 1 + (j) * mw2_pml1] #define idmat2(i, j, k) idmat2M[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * (nxbtm + 1))] #define damp2_x(i, j, k) damp2_xM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lbx0) * nybtm)] #define damp2_y(i, j, k) damp2_yM[(i) - 1 + nzbtm * ((j) - 1 + ((k) - lby0) * nxbtm)] #define damp2_z(i, j) damp2_zM[(i) - 1 + nxbtm * ((j) - 1)] #define dxi2(i, j) dxi2M[(i) - 1 + 4 * ((j) - 1)] #define dyi2(i, j) dyi2M[(i) - 1 + 4 * ((j) - 1)] #define dzi2(i, j) dzi2M[(i) - 1 + 4 * ((j) - 1)] #define dxh2(i, j) dxh2M[(i) - 1 + 4 * ((j) - 1)] #define dyh2(i, j) dyh2M[(i) - 1 + 4 * ((j) - 1)] #define dzh2(i, j) dzh2M[(i) - 1 + 4 * ((j) - 1)] #define t2xx(i, j, k) t2xxM[(i) - 1 + nzbtm * ((j) + ((k) - 1) * (nxbtm + 3))] #define t2xy(i, j, k) t2xyM[(i) - 1 + nzbtm * ((j) + 1 + ((k) + 1) * (nxbtm + 3))] #define t2xz(i, j, k) t2xzM[(i) + (nzbtm + 1) * ((j) + 1 + ((k) - 1) * (nxbtm + 3))] #define t2yy(i, j, k) t2yyM[(i) - 1 + nzbtm * (((j) - 1) + (k) * nxbtm)] #define t2yz(i, j, k) t2yzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) + 1) * nxbtm)] #define t2zz(i, j, k) t2zzM[(i) + (nzbtm + 1) * ((j) - 1 + ((k) - 1) * nxbtm)] #define qt2xx(i, j, k) qt2xxM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xy(i, j, k) qt2xyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xz(i, j, k) qt2xzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yy(i, j, k) qt2yyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yz(i, j, k) qt2yzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2zz(i, j, k) qt2zzM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] //nti = (lbx(2) - lbx(1) + 1) * mw2_pml + lbx(2) //nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1) #define t2xx_px(i, j, k) t2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define t2xy_px(i, j, k) t2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define t2xz_px(i, j, k) t2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define t2yy_px(i, j, k) t2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define t2xx_py(i, j, k) t2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2xy_py(i, j, k) t2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2yy_py(i, j, k) t2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2yz_py(i, j, k) t2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define t2xx_pz(i, j, k) t2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define t2xz_pz(i, j, k) t2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define t2yz_pz(i, j, k) t2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define t2zz_pz(i, j, k) t2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xx_px(i, j, k) qt2xx_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define qt2xy_px(i, j, k) qt2xy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define qt2xz_px(i, j, k) qt2xz_pxM[(i) - 1 + nzbtm * ((j) - 1 + nth * ((k) - 1))] #define qt2yy_px(i, j, k) qt2yy_pxM[(i) - 1 + nzbtm * ((j) - 1 + nti * ((k) - 1))] #define qt2xx_py(i, j, k) qt2xx_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xy_py(i, j, k) qt2xy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yy_py(i, j, k) qt2yy_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yz_py(i, j, k) qt2yz_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xx_pz(i, j, k) qt2xx_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2xz_pz(i, j, k) qt2xz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2yz_pz(i, j, k) qt2yz_pzM[(i) - 1 + mw2_pml1 * ((j) - 1 + nxbtm * ((k) - 1))] #define qt2zz_pz(i, j, k) qt2zz_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define v2x(i, j, k) v2xM[(i) + (nzbtm + 1) * ((j) + 1 + (nxbtm + 3) * (k))] #define v2y(i, j, k) v2yM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * ((k) + 1))] #define v2z(i, j, k) v2zM[(i) + (nzbtm + 1) * ((j) + (nxbtm + 3) * (k))] //nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml #define v2x_px(i, j, k) v2x_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))] #define v2y_px(i, j, k) v2y_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))] #define v2z_px(i, j, k) v2z_pxM[(i) - 1 + nzbtm * ((j) - 1 + nv2y * ((k) - 1))] #define v2x_py(i, j, k) v2x_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define v2y_py(i, j, k) v2y_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define v2z_py(i, j, k) v2z_pyM[(i) - 1 + nzbtm * ((j) - 1 + nxbtm * ((k) - 1))] #define v2x_pz(i, j, k) v2x_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define v2y_pz(i, j, k) v2y_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] #define v2z_pz(i, j, k) v2z_pzM[(i) - 1 + mw2_pml * ((j) - 1 + nxbtm * ((k) - 1))] __global__ void velocity_inner_IC(int nztop, int nztm1, float ca, int *nd1_vel, float *rhoM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int nxtop, //dimension # int nytop, float *v1xM, //output float *v1yM, float *v1zM); __global__ void velocity_inner_IIC(float ca, int *nd2_vel, float *rhoM, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, int *idmat2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int nxbtm, //dimension #s int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z); __global__ void vel_PmlX_IC(float ca, int lbx0, int lbx1, int *nd1_vel, float *rhoM, float *drvh1, float *drti1, float *damp1_x, int *idmat1, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, int mw1_pml1, //dimension # int mw1_pml, int nxtop, int nytop, int nztop, float *v1x, //output float *v1y, float *v1z, float *v1x_px, float *v1y_px, float *v1z_px); __global__ void vel_PmlY_IC(int nztop, float ca, int lby0, int lby1, int *nd1_vel, float *rhoM, float *drvh1, float *drti1, int *idmat1, float *damp1_y, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, int mw1_pml1, //dimension #s int mw1_pml, int nxtop, int nytop, float *v1x, //output float *v1y, float *v1z, float *v1x_py, float *v1y_py, float *v1z_py); __global__ void vel_PmlX_IIC(int nzbm1, float ca, int lbx0, int lbx1, int *nd2_vel, float *drvh2, float *drti2, float *rhoM, float *damp2_x, int *idmat2, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z, float *v2x_px, float *v2y_px, float *v2z_px); __global__ void vel_PmlY_IIC(int nzbm1, float ca, int lby0, int lby1, int *nd2_vel, float *drvh2, float *drti2, float *rhoM, float *damp2_y, int *idmat2, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int mw2_pml1, int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z, float *v2x_py, float *v2y_py, float *v2z_py); __global__ void vel_PmlZ_IIC(int nzbm1, float ca, int *nd2_vel, float *drvh2, float *drti2, float *rhoM, float *damp2_z, int *idmat2, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2x, //output float *v2y, float *v2z, float *v2x_pz, float *v2y_pz, float *v2z_pz); #ifdef __cplusplus extern "C" { #endif extern void compute_velocityCDebug( int *nztop, int *nztm1, float *ca, int *lbx, int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, float *damp1_xM, float *damp1_yM, int *idmat1M,float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM, float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM, int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M, int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp, float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM, float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM, float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1, int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml, int *nxbtm, int *nybtm, int *nzbtm); extern void compute_stressCDebug(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml, int *mw1_pml1, int *lbx, int *lby, int *nd1_txy, int *nd1_txz, int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM, float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM, float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM, float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp, int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz, int *nd2_tyy, int *nd2_tyz, int *idmat2M, float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM, float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M, float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM, float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM, float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM, float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM, void **v2xMp, void **v2yMp, void **v2zMp, int *myid); void set_deviceC(int *deviceID) { cudaSetDevice(*deviceID); printf("[CUDA] device set success!\n"); } //=========================================================================== void allocate_gpu_memC(int *lbx, int *lby, int *nmat, //dimension #, int int *mw1_pml1, //int int *mw2_pml1, //int int *nxtop, //int int *nytop, //int int *nztop, int *mw1_pml, //int int *mw2_pml, //int int *nxbtm, //int int *nybtm, //int int *nzbtm, int *nzbm1, int *nll) { printf("[CUDA] allocation ..............."); int nv2, nti, nth; cudaError_t cudaRes; // printf("lbx[1] = %d, lbx[0] = %d\n", lbx[1], lbx[0]); // printf("lby[1] = %d, lby[0] = %d\n", lby[1], lby[0]); // printf("nmat = %d\n", *nmat); // printf("mw1_pml1 = %d, mw2_pml1 = %d\n", *mw1_pml1, *mw2_pml1); // printf("mw1_pml = %d, mw2_pml = %d\n", *mw1_pml, *mw2_pml); // printf("nxtop = %d, nytop = %d, nztop = %d\n", *nxtop, *nytop, *nztop); // printf("nxbtm = %d, nybtm = %d, nzbtm = %d\n", *nxbtm, *nybtm, *nzbtm); // printf("nzbm1 = %d, nll = %d\n", *nzbm1, *nll); //debug----------------- totalTimeH2DV = 0.0f; totalTimeD2HV = 0.0f; totalTimeH2DS = 0.0f; totalTimeD2HS = 0.0f; totalTimeCompV = 0.0f; totalTimeCompS = 0.0f; //for inner_I cudaRes = cudaMalloc((void **)&nd1_velD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_vel"); cudaRes = cudaMalloc((void **)&nd1_txyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txy"); cudaRes = cudaMalloc((void **)&nd1_txzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_txz"); cudaRes = cudaMalloc((void **)&nd1_tyyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyy"); cudaRes = cudaMalloc((void **)&nd1_tyzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory1, nd1_tyz"); cudaRes = cudaMalloc((void **)&rhoD, sizeof(float) * (*nmat)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, rho"); cudaRes = cudaMalloc((void **)&drvh1D, sizeof(float) * (*mw1_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, drvh1"); cudaRes = cudaMalloc((void **)&drti1D, sizeof(float) * (*mw1_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, drti1"); cudaRes = cudaMalloc((void **)&drth1D, sizeof(float) * (*mw1_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, drth1"); if (lbx[1] >= lbx[0]) { cudaRes = cudaMalloc((void **)&damp1_xD, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_x"); } if (lby[1] >= lby[0]) { cudaRes = cudaMalloc((void **)&damp1_yD, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, damp1_y"); } cudaRes = cudaMalloc((void **)&idmat1D, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, idmat1"); cudaRes = cudaMalloc((void **)&dxi1D, sizeof(float) * 4 * (*nxtop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxi1"); cudaRes = cudaMalloc((void **)&dyi1D, sizeof(float) * 4 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyi1"); cudaRes = cudaMalloc((void **)&dzi1D, sizeof(float) * 4 * (*nztop + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzi1"); cudaRes = cudaMalloc((void **)&dxh1D, sizeof(float) * 4 * (*nxtop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dxh1"); cudaRes = cudaMalloc((void **)&dyh1D, sizeof(float) * 4 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dyh1"); cudaRes = cudaMalloc((void **)&dzh1D, sizeof(float) * 4 * (*nztop + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, dzh1"); cudaRes = cudaMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx"); cudaRes = cudaMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy"); cudaRes = cudaMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz"); cudaRes = cudaMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy"); cudaRes = cudaMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz"); cudaRes = cudaMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz"); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0]; cudaMalloc((void **)&t1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop)); cudaMalloc((void **)&t1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop)); cudaMalloc((void **)&t1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop)); cudaMalloc((void **)&t1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop)); cudaMalloc((void **)&qt1xx_pxD, sizeof(float) * (*nztop) * (nti) * (*nytop)); cudaMalloc((void **)&qt1xy_pxD, sizeof(float) * (*nztop) * nth * (*nytop)); cudaMalloc((void **)&qt1xz_pxD, sizeof(float) * (*nztop+1) * nth * (*nytop)); cudaMalloc((void **)&qt1yy_pxD, sizeof(float) * (*nztop) * nti * (*nytop)); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0]; cudaMalloc((void **)&t1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); cudaMalloc((void **)&t1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth); cudaMalloc((void **)&t1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); cudaMalloc((void **)&t1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth); cudaMalloc((void **)&qt1xx_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); cudaMalloc((void **)&qt1xy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nth); cudaMalloc((void **)&qt1yy_pyD, sizeof(float) * (*nztop) * (*nxtop) * nti); cudaMalloc((void **)&qt1yz_pyD, sizeof(float) * (*nztop+1) * (*nxtop) * nth); } cudaMalloc((void **)&qt1xxD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); cudaMalloc((void **)&qt1xyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); cudaMalloc((void **)&qt1xzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop)); cudaMalloc((void **)&qt1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); cudaMalloc((void **)&qt1yzD, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop)); cudaMalloc((void **)&qt1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); cudaMalloc((void **)&clamdaD, sizeof(float) * (*nmat)); cudaMalloc((void **)&cmuD, sizeof(float) * (*nmat)); cudaMalloc((void **)&epdtD, sizeof(float) * (*nll)); cudaMalloc((void **)&qwpD, sizeof(float) * (*nmat)); cudaMalloc((void **)&qwsD, sizeof(float) * (*nmat)); cudaMalloc((void **)&qwt1D, sizeof(float) * (*nll)); cudaMalloc((void **)&qwt2D, sizeof(float) * (*nll)); cudaRes = cudaMalloc((void **)&v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x"); cudaRes = cudaMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y"); cudaRes = cudaMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z"); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml); cudaRes = cudaMalloc((void **)&v1x_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_px"); cudaRes = cudaMalloc((void **)&v1y_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_px"); cudaRes = cudaMalloc((void **)&v1z_pxD, sizeof(float) * (*nztop) * nv2 * (*nytop)); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml); cudaRes = cudaMalloc((void **)&v1x_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x_py"); cudaRes = cudaMalloc((void **)&v1y_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y_py"); cudaRes = cudaMalloc((void **)&v1z_pyD, sizeof(float) * (*nztop) * (*nxtop) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z_py"); } //for inner_II----------------------------------------------------------------------------------------- cudaRes = cudaMalloc((void **)&nd2_velD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_vel"); cudaRes = cudaMalloc((void **)&nd2_txyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txy"); cudaRes = cudaMalloc((void **)&nd2_txzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_txz"); cudaRes = cudaMalloc((void **)&nd2_tyyD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyy"); cudaRes = cudaMalloc((void **)&nd2_tyzD, sizeof(int) * 18); CHECK_ERROR(cudaRes, "Allocate Device Memory, nd2_tyz"); cudaRes = cudaMalloc((void **)&drvh2D, sizeof(float) * (*mw2_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory, drvh2"); cudaRes = cudaMalloc((void **)&drti2D, sizeof(float) * (*mw2_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory, drti2"); cudaRes = cudaMalloc((void **)&drth2D, sizeof(float) * (*mw2_pml1) * 2); CHECK_ERROR(cudaRes, "Allocate Device Memory, drth2"); cudaRes = cudaMalloc((void **)&idmat2D, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory, idmat2"); if (lbx[1] >= lbx[0]) { cudaRes = cudaMalloc((void **)&damp2_xD, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_x"); } if (lby[1] >= lby[0]) { cudaRes = cudaMalloc((void **)&damp2_yD, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1)); CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_y"); } cudaRes = cudaMalloc((void **)&damp2_zD, sizeof(float) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, damp2_z"); cudaRes = cudaMalloc((void **)&dxi2D, sizeof(float) * 4 * (*nxbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dxi2"); cudaRes = cudaMalloc((void **)&dyi2D, sizeof(float) * 4 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dyi2"); cudaRes = cudaMalloc((void **)&dzi2D, sizeof(float) * 4 * (*nzbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dzi2"); cudaRes = cudaMalloc((void **)&dxh2D, sizeof(float) * 4 * (*nxbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dxh2"); cudaRes = cudaMalloc((void **)&dyh2D, sizeof(float) * 4 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dyh2"); cudaRes = cudaMalloc((void **)&dzh2D, sizeof(float) * 4 * (*nzbtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, dzh2"); cudaRes = cudaMalloc((void **)&t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xx"); cudaRes = cudaMalloc((void **)&t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xy"); cudaRes = cudaMalloc((void **)&t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2xz"); cudaRes = cudaMalloc((void **)&t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yy"); cudaRes = cudaMalloc((void **)&t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2yz"); cudaRes = cudaMalloc((void **)&t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, t2zz"); cudaMalloc((void **)&qt2xxD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2xyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2xzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2yzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2zzD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm)); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0]; cudaMalloc((void **)&t2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); cudaMalloc((void **)&t2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); cudaMalloc((void **)&t2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); cudaMalloc((void **)&t2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); cudaMalloc((void **)&qt2xx_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); cudaMalloc((void **)&qt2xy_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); cudaMalloc((void **)&qt2xz_pxD, sizeof(float) * (*nzbtm) * nth * (*nybtm)); cudaMalloc((void **)&qt2yy_pxD, sizeof(float) * (*nzbtm) * nti * (*nybtm)); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0]; cudaMalloc((void **)&t2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); cudaMalloc((void **)&t2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); cudaMalloc((void **)&t2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); cudaMalloc((void **)&t2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); cudaMalloc((void **)&qt2xx_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); cudaMalloc((void **)&qt2xy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); cudaMalloc((void **)&qt2yy_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nti); cudaMalloc((void **)&qt2yz_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nth); } cudaMalloc((void **)&t2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&t2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&t2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&t2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2xx_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2xz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2yz_pzD, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&qt2zz_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); cudaMalloc((void **)&v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3)); cudaMalloc((void **)&v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3)); cudaMalloc((void **)&v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3)); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml); cudaRes = cudaMalloc((void **)&v2x_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_px"); cudaRes = cudaMalloc((void **)&v2y_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_px"); cudaRes = cudaMalloc((void **)&v2z_pxD, sizeof(float) * (*nzbtm) * nv2 * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml); cudaRes = cudaMalloc((void **)&v2x_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_py"); cudaRes = cudaMalloc((void **)&v2y_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_py"); cudaRes = cudaMalloc((void **)&v2z_pyD, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_py"); } cudaRes = cudaMalloc((void **)&v2x_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2x_pz"); cudaRes = cudaMalloc((void **)&v2y_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2y_pz"); cudaRes = cudaMalloc((void **)&v2z_pzD, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm)); CHECK_ERROR(cudaRes, "Allocate Device Memory, v2z_pz"); printf("done!\n"); return; } void cpy_h2d_velocityInputsCOneTime(int *lbx, int *lby, int *nd1_vel, float *rho, float *drvh1, float *drti1, float *damp1_x, float *damp1_y, int *idmat1, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *v1x_px, float *v1y_px, float *v1z_px, float *v1x_py, float *v1y_py, float *v1z_py, int *nd2_vel, float *drvh2, float *drti2, int *idmat2, float *damp2_x, float *damp2_y, float *damp2_z, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, float *v2x_px, float *v2y_px, float *v2z_px, float *v2x_py, float *v2y_py, float *v2z_py, float *v2x_pz, float *v2y_pz, float *v2z_pz, int *nmat, //dimension #, int int *mw1_pml1, //int int *mw2_pml1, //int int *nxtop, //int int *nytop, //int int *nztop, int *mw1_pml, //int int *mw2_pml, //int int *nxbtm, //int int *nybtm, //int int *nzbtm, int *nzbm1) { printf("[CUDA] initial h2d cpy for velocity ........"); cudaError_t cudaRes; int nv2; // int i; // for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++) // { // printf("%f ", t2xy[i]); // } // printf("\n"); //for inner_I cudaRes = cudaMemcpy(nd1_velD, nd1_vel, sizeof(int) * 18, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd1_vel"); cudaRes = cudaMemcpy(rhoD, rho, sizeof(float) * (*nmat), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, rho"); cudaRes = cudaMemcpy(drvh1D, drvh1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh1"); cudaRes = cudaMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti1"); if (lbx[1] >= lbx[0]) { cudaRes = cudaMemcpy(damp1_xD, damp1_x, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_x"); } if (lby[1] >= lby[0]) { cudaRes = cudaMemcpy(damp1_yD, damp1_y, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp1_y"); } cudaRes = cudaMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat1"); cudaRes = cudaMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi1"); cudaRes = cudaMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi1"); cudaRes = cudaMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi1"); cudaRes = cudaMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh1"); cudaRes = cudaMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh1"); cudaRes = cudaMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh1"); cudaRes = cudaMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx"); cudaRes = cudaMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy"); cudaRes = cudaMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz"); cudaRes = cudaMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy"); cudaRes = cudaMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz"); cudaRes = cudaMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz"); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw1_pml); cudaRes = cudaMemcpy(v1x_pxD, v1x_px, sizeof(float) * (*nztop) * nv2 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_px"); cudaRes = cudaMemcpy(v1y_pxD, v1y_px, sizeof(float) * (*nztop) * nv2 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_px"); cudaRes = cudaMemcpy(v1z_pxD, v1z_px, sizeof(float) * (*nztop) * nv2 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw1_pml); cudaRes = cudaMemcpy(v1x_pyD, v1x_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x_py"); cudaRes = cudaMemcpy(v1y_pyD, v1y_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y_py"); cudaRes = cudaMemcpy(v1z_pyD, v1z_py, sizeof(float) * (*nztop) * (*nxtop) * nv2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z_py"); } //for inner_II cudaRes = cudaMemcpy(nd2_velD, nd2_vel, sizeof(int) * 18, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, nd2_vel"); cudaRes = cudaMemcpy(drvh2D, drvh2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drvh2"); cudaRes = cudaMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, drti2"); cudaRes = cudaMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, idmat2"); if (lbx[1] >= lbx[0]) { cudaRes = cudaMemcpy(damp2_xD, damp2_x, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_x"); } if (lby[1] >= lby[0]) { cudaRes = cudaMemcpy(damp2_yD, damp2_y, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_y"); } cudaRes = cudaMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, damp2_z"); cudaRes = cudaMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxi2"); cudaRes = cudaMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyi2"); cudaRes = cudaMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzi2"); cudaRes = cudaMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dxh2"); cudaRes = cudaMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dyh2"); cudaRes = cudaMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, dzh2"); cudaRes = cudaMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx"); cudaRes = cudaMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy"); cudaRes = cudaMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz"); cudaRes = cudaMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy"); cudaRes = cudaMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz"); cudaRes = cudaMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz"); if (lbx[1] >= lbx[0]) { nv2 = (lbx[1] - lbx[0] + 1) * (*mw2_pml); cudaRes = cudaMemcpy(v2x_pxD, v2x_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_px"); cudaRes = cudaMemcpy(v2y_pxD, v2y_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_px"); cudaRes = cudaMemcpy(v2z_pxD, v2z_px, sizeof(float) * (*nzbtm) * nv2 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_px"); } if (lby[1] >= lby[0]) { nv2 = (lby[1] - lby[0] + 1) * (*mw2_pml); cudaRes = cudaMemcpy(v2x_pyD, v2x_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_py"); cudaRes = cudaMemcpy(v2y_pyD, v2y_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_py"); cudaRes = cudaMemcpy(v2z_pyD, v2z_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nv2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_py"); } cudaRes = cudaMemcpy(v2x_pzD, v2x_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x_pz"); cudaRes = cudaMemcpy(v2y_pzD, v2y_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y_pz"); cudaRes = cudaMemcpy(v2z_pzD, v2z_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z_pz"); printf("done!\n"); return; } void cpy_h2d_velocityInputsC(float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for input .........."); cudaError_t cudaRes; //for inner_I cudaRes = cudaMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xx"); cudaRes = cudaMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xy"); cudaRes = cudaMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1xz"); cudaRes = cudaMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yy"); cudaRes = cudaMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1yz"); cudaRes = cudaMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t1zz"); //for inner_II cudaRes = cudaMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xx"); cudaRes = cudaMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xy"); cudaRes = cudaMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2xz"); cudaRes = cudaMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yy"); cudaRes = cudaMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2yz"); cudaRes = cudaMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice1, t2zz"); printf("done!\n"); return; } //===================================================================== void cpy_h2d_stressInputsCOneTime(int *lbx, int *lby, int *nd1_txy, int *nd1_txz, int *nd1_tyy, int *nd1_tyz, float *drti1, float *drth1, float *damp1_x, float *damp1_y, int *idmat1, float *dxi1, float *dyi1, float *dzi1, float *dxh1, float *dyh1, float *dzh1, float *v1x, float *v1y, float *v1z, float *t1xx_px, float *t1xy_px, float *t1xz_px, float *t1yy_px, float *qt1xx_px, float *qt1xy_px, float *qt1xz_px, float *qt1yy_px, float *t1xx_py, float *t1xy_py, float *t1yy_py, float *t1yz_py, float *qt1xx_py, float *qt1xy_py, float *qt1yy_py, float *qt1yz_py, float *qt1xx, float *qt1xy, float *qt1xz, float *qt1yy, float *qt1yz, float *qt1zz, float *clamda, float *cmu, float *epdt, float *qwp, float *qws, float *qwt1, float *qwt2, int *nd2_txy, int *nd2_txz, int *nd2_tyy, int *nd2_tyz, float *drti2, float *drth2, int *idmat2, float *damp2_x, float *damp2_y, float *damp2_z, float *dxi2, float *dyi2, float *dzi2, float *dxh2, float *dyh2, float *dzh2, float *v2x, float *v2y, float *v2z, float *qt2xx, float *qt2xy, float *qt2xz, float *qt2yy, float *qt2yz, float *qt2zz, float *t2xx_px, float *t2xy_px, float *t2xz_px, float *t2yy_px, float *qt2xx_px, float *qt2xy_px, float *qt2xz_px, float *qt2yy_px, float *t2xx_py, float *t2xy_py, float *t2yy_py, float *t2yz_py, float *qt2xx_py, float *qt2xy_py, float *qt2yy_py, float *qt2yz_py, float *t2xx_pz, float *t2xz_pz, float *t2yz_pz, float *t2zz_pz, float *qt2xx_pz, float *qt2xz_pz, float *qt2yz_pz, float *qt2zz_pz, int *nmat, //dimension #, int int *mw1_pml1, //int int *mw2_pml1, //int int *nxtop, //int int *nytop, //int int *nztop, int *mw1_pml, //int int *mw2_pml, //int int *nxbtm, //int int *nybtm, //int int *nzbtm, int *nll) { printf("[CUDA] initial h2d cpy for stress ..........."); cudaError_t cudaRes; int nti, nth; //for inner_I cudaRes = cudaMemcpy(nd1_txyD, nd1_txy, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(nd1_txzD, nd1_txz, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(nd1_tyyD, nd1_tyy, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(nd1_tyzD, nd1_tyz, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(drti1D, drti1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti1"); cudaRes = cudaMemcpy(drth1D, drth1, sizeof(float) * (*mw1_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth1"); if (lbx[1] >= lbx[0]) { cudaRes = cudaMemcpy(damp1_xD, damp1_x, sizeof(float) * (*nztop + 1) * (*nytop) * (lbx[1] - lbx[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_x"); } if (lby[1] >= lby[0]) { cudaRes = cudaMemcpy(damp1_yD, damp1_y, sizeof(float) * (*nztop + 1) * (*nxtop) * (lby[1] - lby[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp1_y"); } cudaRes = cudaMemcpy(idmat1D, idmat1, sizeof(int) * (*nztop + 2) * (*nxtop + 1) * (*nytop + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat1"); cudaRes = cudaMemcpy(dxi1D, dxi1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi1"); cudaRes = cudaMemcpy(dyi1D, dyi1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi1"); cudaRes = cudaMemcpy(dzi1D, dzi1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi1"); cudaRes = cudaMemcpy(dxh1D, dxh1, sizeof(float) * 4 * (*nxtop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh1"); cudaRes = cudaMemcpy(dyh1D, dyh1, sizeof(float) * 4 * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh1"); cudaRes = cudaMemcpy(dzh1D, dzh1, sizeof(float) * 4 * (*nztop + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh1"); cudaMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); cudaMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); cudaMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw1_pml) + 1 - lbx[0]; cudaMemcpy(t1xx_pxD, t1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(t1xy_pxD, t1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(t1xz_pxD, t1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(t1yy_pxD, t1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1xx_pxD, qt1xx_px, sizeof(float) * (*nztop) * (nti) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1xy_pxD, qt1xy_px, sizeof(float) * (*nztop) * nth * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1xz_pxD, qt1xz_px, sizeof(float) * (*nztop+1) * nth * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1yy_pxD, qt1yy_px, sizeof(float) * (*nztop) * nti * (*nytop), cudaMemcpyHostToDevice); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw1_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw1_pml) + 1 - lby[0]; cudaMemcpy(t1xx_pyD, t1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice); cudaMemcpy(t1xy_pyD, t1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, cudaMemcpyHostToDevice); cudaMemcpy(t1yy_pyD, t1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice); cudaMemcpy(t1yz_pyD, t1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, cudaMemcpyHostToDevice); cudaMemcpy(qt1xx_pyD, qt1xx_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice); cudaMemcpy(qt1xy_pyD, qt1xy_py, sizeof(float) * (*nztop) * (*nxtop) * nth, cudaMemcpyHostToDevice); cudaMemcpy(qt1yy_pyD, qt1yy_py, sizeof(float) * (*nztop) * (*nxtop) * nti, cudaMemcpyHostToDevice); cudaMemcpy(qt1yz_pyD, qt1yz_py, sizeof(float) * (*nztop+1) * (*nxtop) * nth, cudaMemcpyHostToDevice); } cudaMemcpy(qt1xxD, qt1xx, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1xyD, qt1xy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1xzD, qt1xz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1yyD, qt1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1yzD, qt1yz, sizeof(float) * (*nztop+1) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(qt1zzD, qt1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); cudaMemcpy(clamdaD, clamda, sizeof(float) * (*nmat), cudaMemcpyHostToDevice); cudaMemcpy(cmuD, cmu, sizeof(float) * (*nmat), cudaMemcpyHostToDevice); cudaMemcpy(epdtD, epdt, sizeof(float) * (*nll), cudaMemcpyHostToDevice); cudaMemcpy(qwpD, qwp, sizeof(float) * (*nmat), cudaMemcpyHostToDevice); cudaMemcpy(qwsD, qws, sizeof(float) * (*nmat), cudaMemcpyHostToDevice); cudaMemcpy(qwt1D, qwt1, sizeof(float) * (*nll), cudaMemcpyHostToDevice); cudaMemcpy(qwt2D, qwt2, sizeof(float) * (*nll), cudaMemcpyHostToDevice); //for inner_II cudaRes = cudaMemcpy(nd2_txyD, nd2_txy, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(nd2_txzD, nd2_txz, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(nd2_tyyD, nd2_tyy, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(nd2_tyzD, nd2_tyz, sizeof(int) * 18, cudaMemcpyHostToDevice); cudaRes = cudaMemcpy(drti2D, drti2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drti2"); cudaRes = cudaMemcpy(drth2D, drth2, sizeof(float) * (*mw2_pml1) * 2, cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, drth2"); cudaRes = cudaMemcpy(idmat2D, idmat2, sizeof(int) * (*nzbtm + 1) * (*nxbtm + 1) * (*nybtm +1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, idmat2"); if (lbx[1] >= lbx[0]) { cudaRes = cudaMemcpy(damp2_xD, damp2_x, sizeof(float) * (*nzbtm) * (*nybtm) * (lbx[1] - lbx[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_x"); } if (lby[1] >= lby[0]) { cudaRes = cudaMemcpy(damp2_yD, damp2_y, sizeof(float) * (*nzbtm) * (*nxbtm) * (lby[1] - lby[0] + 1), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_y"); } cudaRes = cudaMemcpy(damp2_zD, damp2_z, sizeof(float) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, damp2_z"); cudaRes = cudaMemcpy(dxi2D, dxi2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxi2"); cudaRes = cudaMemcpy(dyi2D, dyi2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyi2"); cudaRes = cudaMemcpy(dzi2D, dzi2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzi2"); cudaRes = cudaMemcpy(dxh2D, dxh2, sizeof(float) * 4 * (*nxbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dxh2"); cudaRes = cudaMemcpy(dyh2D, dyh2, sizeof(float) * 4 * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dyh2"); cudaRes = cudaMemcpy(dzh2D, dzh2, sizeof(float) * 4 * (*nzbtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "InputDataCopyHostToDevice, dzh2"); cudaMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); cudaMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); cudaMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); cudaMemcpy(qt2xxD, qt2xx, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xyD, qt2xy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xzD, qt2xz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2yyD, qt2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2yzD, qt2yz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2zzD, qt2zz, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); if (lbx[1] >= lbx[0]) { nti = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + lbx[1]; nth = (lbx[1] - lbx[0] + 1) * (*mw2_pml) + 1 - lbx[0]; cudaMemcpy(t2xx_pxD, t2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(t2xy_pxD, t2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(t2xz_pxD, t2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(t2yy_pxD, t2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xx_pxD, qt2xx_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xy_pxD, qt2xy_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xz_pxD, qt2xz_px, sizeof(float) * (*nzbtm) * nth * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2yy_pxD, qt2yy_px, sizeof(float) * (*nzbtm) * nti * (*nybtm), cudaMemcpyHostToDevice); } if (lby[1] >= lby[0]) { nti = (lby[1] - lby[0] + 1) * (*mw2_pml) + lby[1]; nth = (lby[1] - lby[0] + 1) * (*mw2_pml) + 1 - lby[0]; cudaMemcpy(t2xx_pyD, t2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice); cudaMemcpy(t2xy_pyD, t2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice); cudaMemcpy(t2yy_pyD, t2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice); cudaMemcpy(t2yz_pyD, t2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice); cudaMemcpy(qt2xx_pyD, qt2xx_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice); cudaMemcpy(qt2xy_pyD, qt2xy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice); cudaMemcpy(qt2yy_pyD, qt2yy_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nti, cudaMemcpyHostToDevice); cudaMemcpy(qt2yz_pyD, qt2yz_py, sizeof(float) * (*nzbtm) * (*nxbtm) * nth, cudaMemcpyHostToDevice); } cudaMemcpy(t2xx_pzD, t2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(t2xz_pzD, t2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(t2yz_pzD, t2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(t2zz_pzD, t2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xx_pzD, qt2xx_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2xz_pzD, qt2xz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2yz_pzD, qt2yz_pz, sizeof(float) * (*mw2_pml1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); cudaMemcpy(qt2zz_pzD, qt2zz_pz, sizeof(float) * (*mw2_pml) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); printf("done!\n"); return; } void cpy_h2d_stressInputsC(float *v1x, float *v1y, float *v1z, float *v2x, float *v2y, float *v2z, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for input .............."); cudaError_t cudaRes; //for inner_I cudaMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); cudaMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); cudaMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); //for inner_II cudaMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); cudaMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); cudaMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); printf("done!\n"); return; } //===================================================================== void cpy_h2d_velocityOutputsC(float *v1x, float *v1y, float *v1z, float *v2x, float *v2y, float *v2z, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for output ........."); cudaError_t cudaRes; //for inner_I cudaRes = cudaMemcpy(v1xD, v1x, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1x"); cudaRes = cudaMemcpy(v1yD, v1y, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1y"); cudaRes = cudaMemcpy(v1zD, v1z, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v1z"); //for inner_II cudaRes = cudaMemcpy(v2xD, v2x, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2x"); cudaRes = cudaMemcpy(v2yD, v2y, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2y"); cudaRes = cudaMemcpy(v2zD, v2z, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice1, v2z"); printf("done!\n"); return; } //===================================================================== void cpy_d2h_velocityOutputsC(float *v1x, float *v1y, float *v1z, float *v2x, float *v2y, float *v2z, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] d2h cpy for output ........."); cudaError_t cudaRes; //for inner_I cudaRes = cudaMemcpy(v1x, v1xD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1x"); cudaRes = cudaMemcpy(v1y, v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1y"); cudaRes = cudaMemcpy(v1z, v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v1z"); //for inner_II cudaRes = cudaMemcpy(v2x, v2xD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2x"); cudaRes = cudaMemcpy(v2y, v2yD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, v2y"); cudaRes = cudaMemcpy(v2z, v2zD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost1, vzz"); printf("done!\n"); return; } void cpy_h2d_stressOutputsC(float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] h2d cpy for output .............."); cudaError_t cudaRes; int nth, nti; cudaRes = cudaMemcpy(t1xxD, t1xx, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xx"); cudaRes = cudaMemcpy(t1xyD, t1xy, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xy"); cudaRes = cudaMemcpy(t1xzD, t1xz, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1xz"); cudaRes = cudaMemcpy(t1yyD, t1yy, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yy"); cudaRes = cudaMemcpy(t1yzD, t1yz, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1yz"); cudaRes = cudaMemcpy(t1zzD, t1zz, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t1zz"); //for inner_II cudaRes = cudaMemcpy(t2xxD, t2xx, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xx"); cudaRes = cudaMemcpy(t2xyD, t2xy, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xy"); cudaRes = cudaMemcpy(t2xzD, t2xz, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2xz"); cudaRes = cudaMemcpy(t2yyD, t2yy, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yy"); cudaRes = cudaMemcpy(t2yzD, t2yz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2yz"); cudaRes = cudaMemcpy(t2zzD, t2zz, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyHostToDevice); CHECK_ERROR(cudaRes, "outputDataCopyHostToDevice, t2zz"); printf("done!\n"); return; } void cpy_d2h_stressOutputsC(float *t1xx, float *t1xy, float *t1xz, float *t1yy, float *t1yz, float *t1zz, float *t2xx, float *t2xy, float *t2xz, float *t2yy, float *t2yz, float *t2zz, int *nxtop, int *nytop, int *nztop, int *nxbtm, int *nybtm, int *nzbtm) { printf("[CUDA] stress cpy d2h for output ....."); // printf("\nnxtop=%d, nytop=%d, nztop=%d\n", *nxtop, *nytop, *nztop); // printf("nxbtm=%d, nybtm=%d, nzbtm=%d\n", *nxbtm, *nybtm, *nzbtm); cudaError_t cudaRes; cudaRes = cudaMemcpy(t1xx, t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xx"); cudaRes = cudaMemcpy(t1xy, t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xy"); cudaRes = cudaMemcpy(t1xz, t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1xz"); cudaRes = cudaMemcpy(t1yy, t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yy"); cudaRes = cudaMemcpy(t1yz, t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1yz"); cudaRes = cudaMemcpy(t1zz, t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t1zz"); cudaRes = cudaMemcpy(t2xx, t2xxD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xx"); cudaRes = cudaMemcpy(t2xy, t2xyD, sizeof(float) * (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xy"); cudaRes = cudaMemcpy(t2xz, t2xzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2xz"); cudaRes = cudaMemcpy(t2yy, t2yyD, sizeof(float) * (*nzbtm) * (*nxbtm) * (*nybtm + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yy"); cudaRes = cudaMemcpy(t2yz, t2yzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2yz"); cudaRes = cudaMemcpy(t2zz, t2zzD, sizeof(float) * (*nzbtm + 1) * (*nxbtm) * (*nybtm), cudaMemcpyDeviceToHost); CHECK_ERROR(cudaRes, "outputDataCopyDeviceToHost, t2zz"); printf("done!\n"); // int i; // for(i=0; i<(*nzbtm) * (*nxbtm + 3) * (*nybtm); i++) // { // printf("%f ", t2xx[i]); // } // printf("\n"); return; } void free_device_memC(int *lbx, int *lby) { //debug--------------------------------------------------- printf("[CUDA] id = %d, vel, H2D =, %.3f, D2H =, %.3f, comp =, %.3f\n", procID, totalTimeH2DV, totalTimeD2HV, totalTimeCompV); printf("[CUDA] id = %d, str, H2D =, %.3f, D2H =, %.3f, comp =, %.3f\n", procID, totalTimeH2DS, totalTimeD2HS, totalTimeCompS); //------------------------------------------------- cudaFree(nd1_velD); cudaFree(nd1_txyD); cudaFree(nd1_txzD); cudaFree(nd1_tyyD); cudaFree(nd1_tyzD); cudaFree(rhoD); cudaFree(drvh1D); cudaFree(drti1D); cudaFree(drth1D); cudaFree(idmat1D); cudaFree(dxi1D); cudaFree(dyi1D); cudaFree(dzi1D); cudaFree(dxh1D); cudaFree(dyh1D); cudaFree(dzh1D); cudaFree(t1xxD); cudaFree(t1xyD); cudaFree(t1xzD); cudaFree(t1yyD); cudaFree(t1yzD); cudaFree(t1zzD); cudaFree(v1xD); //output cudaFree(v1yD); cudaFree(v1zD); if (lbx[1] >= lbx[0]) { cudaFree(damp1_xD); cudaFree(t1xx_pxD); cudaFree(t1xy_pxD); cudaFree(t1xz_pxD); cudaFree(t1yy_pxD); cudaFree(qt1xx_pxD); cudaFree(qt1xy_pxD); cudaFree(qt1xz_pxD); cudaFree(qt1yy_pxD); cudaFree(v1x_pxD); cudaFree(v1y_pxD); cudaFree(v1z_pxD); } if (lby[1] >= lby[0]) { cudaFree(damp1_yD); cudaFree(t1xx_pyD); cudaFree(t1xy_pyD); cudaFree(t1yy_pyD); cudaFree(t1yz_pyD); cudaFree(qt1xx_pyD); cudaFree(qt1xy_pyD); cudaFree(qt1yy_pyD); cudaFree(qt1yz_pyD); cudaFree(v1x_pyD); cudaFree(v1y_pyD); cudaFree(v1z_pyD); } cudaFree(qt1xxD); cudaFree(qt1xyD); cudaFree(qt1xzD); cudaFree(qt1yyD); cudaFree(qt1yzD); cudaFree(qt1zzD); cudaFree(clamdaD); cudaFree(cmuD); cudaFree(epdtD); cudaFree(qwpD); cudaFree(qwsD); cudaFree(qwt1D); cudaFree(qwt2D); //------------------------------------- cudaFree(nd2_velD); cudaFree(nd2_txyD); cudaFree(nd2_txzD); cudaFree(nd2_tyyD); cudaFree(nd2_tyzD); cudaFree(drvh2D); cudaFree(drti2D); cudaFree(drth2D); cudaFree(idmat2D); cudaFree(damp2_zD); cudaFree(dxi2D); cudaFree(dyi2D); cudaFree(dzi2D); cudaFree(dxh2D); cudaFree(dyh2D); cudaFree(dzh2D); cudaFree(t2xxD); cudaFree(t2xyD); cudaFree(t2xzD); cudaFree(t2yyD); cudaFree(t2yzD); cudaFree(t2zzD); cudaFree(qt2xxD); cudaFree(qt2xyD); cudaFree(qt2xzD); cudaFree(qt2yyD); cudaFree(qt2yzD); cudaFree(qt2zzD); if (lbx[1] >= lbx[0]) { cudaFree(damp2_xD); cudaFree(t2xx_pxD); cudaFree(t2xy_pxD); cudaFree(t2xz_pxD); cudaFree(t2yy_pxD); cudaFree(qt2xx_pxD); cudaFree(qt2xy_pxD); cudaFree(qt2xz_pxD); cudaFree(qt2yy_pxD); cudaFree(v2x_pxD); cudaFree(v2y_pxD); cudaFree(v2z_pxD); } if (lby[1] >= lby[0]) { cudaFree(damp2_yD); cudaFree(t2xx_pyD); cudaFree(t2xy_pyD); cudaFree(t2yy_pyD); cudaFree(t2yz_pyD); cudaFree(qt2xx_pyD); cudaFree(qt2xy_pyD); cudaFree(qt2yy_pyD); cudaFree(qt2yz_pyD); cudaFree(v2x_pyD); cudaFree(v2y_pyD); cudaFree(v2z_pyD); } cudaFree(t2xx_pzD); cudaFree(t2xz_pzD); cudaFree(t2yz_pzD); cudaFree(t2zz_pzD); cudaFree(qt2xx_pzD); cudaFree(qt2xz_pzD); cudaFree(qt2yz_pzD); cudaFree(qt2zz_pzD); cudaFree(v2xD); //output cudaFree(v2yD); cudaFree(v2zD); cudaFree(v2x_pzD); cudaFree(v2y_pzD); cudaFree(v2z_pzD); printf("[CUDA] memory space is freed.\n"); return; } void compute_velocityC(int *nztop, int *nztm1, float *ca, int *lbx, int *lby, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, float *damp1_xM, float *damp1_yM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, void **v1xMp, void **v1yMp, void **v1zMp, float *v1x_pxM, float *v1y_pxM, float *v1z_pxM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM, int *nzbm1, int *nd2_vel, float *drvh2M, float *drti2M, int *idmat2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, void **v2xMp, void **v2yMp, void **v2zMp, float *v2x_pxM, float *v2y_pxM, float *v2z_pxM, float *v2x_pyM, float *v2y_pyM, float *v2z_pyM, float *v2x_pzM, float *v2y_pzM, float *v2z_pzM, int *nmat, int *mw1_pml1, int *mw2_pml1, int *nxtop, int *nytop, int *mw1_pml, int *mw2_pml, int *nxbtm, int *nybtm, int *nzbtm, int *myid) { printf("[CUDA] velocity computation:\n"); //difine the dimensions of different kernels int blockSizeX = 8; int blockSizeY = 8; float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM; // extract specific input/output pointers v1xM=(float *) *v1xMp; v1yM=(float *) *v1yMp; v1zM=(float *) *v1zMp; v2xM=(float *) *v2xMp; v2yM=(float *) *v2yMp; v2zM=(float *) *v2zMp; procID = *myid; gettimeofday(&t1, NULL); cpy_h2d_velocityInputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); cpy_h2d_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeH2DV += tmpTime; gettimeofday(&t1, NULL); dim3 dimBlock(blockSizeX, blockSizeY); int gridSizeX1 = (nd1_vel[3] - nd1_vel[2])/blockSizeX + 1; int gridSizeY1 = (nd1_vel[9] - nd1_vel[8])/blockSizeY + 1; dim3 dimGrid1(gridSizeX1, gridSizeY1); // printf("myid = %d, grid1 = (%d, %d)\n", *myid, gridSizeX1, gridSizeY1); //CUDA code velocity_inner_IC<<<dimGrid1, dimBlock>>>(*nztop, *nztm1, *ca, nd1_velD, rhoD, idmat1D, dxi1D, dyi1D, dzi1D, dxh1D, dyh1D, dzh1D, t1xxD, t1xyD, t1xzD, t1yyD, t1yzD, t1zzD, *nxtop, //dimension # *nytop, v1xD, //output v1yD, v1zD); int gridSizeX2 = (nd1_vel[5] - nd1_vel[0])/blockSizeX + 1; int gridSizeY2 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid2(gridSizeX2, gridSizeY2); // printf("myid = %d, grid2 = (%d, %d)\n", *myid, gridSizeX2, gridSizeY2); if (lbx[1] >= lbx[0]) { vel_PmlX_IC<<<dimGrid2, dimBlock>>>(*ca, lbx[0], lbx[1], nd1_velD, rhoD, drvh1D, drti1D, damp1_xD, idmat1D, dxi1D, dyi1D, dzi1D, dxh1D, dyh1D, dzh1D, t1xxD, t1xyD, t1xzD, t1yyD, t1yzD, t1zzD, *mw1_pml1, //dimension # *mw1_pml, *nxtop, *nytop, *nztop, v1xD, //output v1yD, v1zD, v1x_pxD, v1y_pxD, v1z_pxD); } int gridSizeX3 = (lby[1] - lby[0])/blockSizeX + 1; int gridSizeY3 = (nd1_vel[11] - nd1_vel[6])/blockSizeY + 1; dim3 dimGrid3(gridSizeX3, gridSizeY3); // printf("myid = %d, grid3 = (%d, %d)\n", *myid, gridSizeX3, gridSizeY3); if (lby[1] >= lby[0]) { /*vel_PmlY_IC<<<dimGrid3, dimBlock>>>(*nztop, *ca, lby[0], lby[1], nd1_velD, rhoD, drvh1D, drti1D, idmat1D, damp1_yD, dxi1D, dyi1D, dzi1D, dxh1D, dyh1D, dzh1D, t1xxD, t1xyD, t1xzD, t1yyD, t1yzD, t1zzD, *mw1_pml1, //dimension #s *mw1_pml, *nxtop, *nytop, v1xD, //output v1yD, v1zD, v1x_pyD, v1y_pyD, v1z_pyD); */ } int gridSizeX4 = (nd2_vel[3] - nd2_vel[2])/blockSizeX + 1; int gridSizeY4 = (nd2_vel[9] - nd2_vel[8])/blockSizeY + 1; dim3 dimGrid4(gridSizeX4, gridSizeY4); // printf("myid = %d, grid4 = (%d, %d)\n", *myid, gridSizeX4, gridSizeY4); velocity_inner_IIC<<<dimGrid4, dimBlock>>>(*ca, nd2_velD, rhoD, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, idmat2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD); int gridSizeX5 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1; int gridSizeY5 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid5(gridSizeX5, gridSizeY5); // printf("myid = %d, grid5 = (%d, %d)\n", *myid, gridSizeX5, gridSizeY5); if (lbx[1] >= lbx[0]) { vel_PmlX_IIC<<<dimGrid5, dimBlock>>>(*nzbm1, *ca, lbx[0], lbx[1], nd2_velD, drvh2D, drti2D, rhoD, damp2_xD, idmat2D, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *mw2_pml1, //dimension #s *mw2_pml, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD, v2x_pxD, v2y_pxD, v2z_pxD); } int gridSizeX6 = (lby[1] - lby[0])/blockSizeX + 1; int gridSizeY6 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1; dim3 dimGrid6(gridSizeX6, gridSizeY6); // printf("myid = %d, grid = (%d, %d)\n", *myid, gridSizeX6, gridSizeY6); if (lby[1] >= lby[0]) { vel_PmlY_IIC<<<dimGrid6, dimBlock>>>(*nzbm1, *ca, lby[0], lby[1], nd2_velD, drvh2D, drti2D, rhoD, damp2_yD, idmat2D, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *mw2_pml1, //dimension #s *mw2_pml, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD, v2x_pyD, v2y_pyD, v2z_pyD); } int gridSizeX7 = (nd2_vel[5] - nd2_vel[0])/blockSizeX + 1; int gridSizeY7 = (nd2_vel[11] - nd2_vel[6])/blockSizeY + 1; dim3 dimGrid7(gridSizeX7, gridSizeY7); // printf("myid = %d, grid7 = (%d, %d)\n", *myid, gridSizeX7, gridSizeY7); vel_PmlZ_IIC<<<dimGrid7, dimBlock>>>(*nzbm1, *ca, nd2_velD, drvh2D, drti2D, rhoD, damp2_zD, idmat2D, dxi2D, dyi2D, dzi2D, dxh2D, dyh2D, dzh2D, t2xxD, t2xyD, t2xzD, t2yyD, t2yzD, t2zzD, *mw2_pml1, //dimension #s *mw2_pml, *nxbtm, *nybtm, *nzbtm, v2xD, //output v2yD, v2zD, v2x_pzD, v2y_pzD, v2z_pzD); cudaThreadSynchronize(); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeCompV += tmpTime; gettimeofday(&t1, NULL); cpy_d2h_velocityOutputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeD2HV += tmpTime; /*int size = (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); write_output(v1xM, size, "OUTPUT_ARRAYS/v1xM.txt"); write_output(v1yM, size, "OUTPUT_ARRAYS/v1yM.txt"); write_output(v1zM, size, "OUTPUT_ARRAYS/v1zM.txt"); size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm + 3); write_output(v2xM, size, "OUTPUT_ARRAYS/v2xM.txt"); write_output(v2yM, size, "OUTPUT_ARRAYS/v2yM.txt"); write_output(v2zM, size, "OUTPUT_ARRAYS/v2zM.txt"); */ return; } #ifdef __cplusplus } #endif __global__ void velocity_inner_IC(int nztop, int nztm1, float ca, int *nd1_vel, float *rhoM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int nxtop, //dimension # int nytop, float *v1xM, //output float *v1yM, float *v1zM) { int i, j, k, k3; float dtxz, dtyz, dtzz; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd1_vel[8]; if (j > nd1_vel[3] || i > nd1_vel[9]) { return; } for (k3 = 1; k3 <= 3; k3++) { k=k3; if(k3==3) k=nztop; if(k==1) { dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j); dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j); dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j) -35./24.*t1zz(k+1,i,j)+ 21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j)); } else if(k==2) { dtxz=dzi1(2,k)*t1xz(2,i,j)+dzi1(3,k)*t1xz(3,i,j)+dzi1(4,k)*t1xz(4,i,j); dtyz=dzi1(2,k)*t1yz(2,i,j)+dzi1(3,k)*t1yz(3,i,j)+dzi1(4,k)*t1yz(4,i,j); dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j) +29./24.*t1zz(k,i,j)- 3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j)); } else { dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j)); dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j)); dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j)); } v1x(k,i,j)=v1x(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))* (dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+ dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+ dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+ dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+dtxz); v1y(k,i,j)=v1y(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))* (dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+ dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+ dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+ dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+dtyz); v1z(k,i,j)=v1z(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))* (dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+ dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+ dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+ dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+dtzz); } for (k = 3; k <=nztm1; k++) { v1x(k,i,j)=v1x(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i+1,j)))* (dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+ dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)+ dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+ dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1)+ dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j)); v1y(k,i,j)=v1y(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k,i,j+1)))* (dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+ dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)+ dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+ dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2)+ dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j)); v1z(k,i,j)=v1z(k,i,j)+ 0.5*(rho(idmat1(k,i,j))+rho(idmat1(k-1,i,j)))* (dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+ dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)+ dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+ dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1)+ dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+ dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j)); } return; } //----------------------------------------------------------------------- __global__ void velocity_inner_IIC(float ca, int *nd2_vel, float *rhoM, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, int *idmat2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int nxbtm, //dimension #s int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM) { int i, j, k; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[8]; if (j > nd2_vel[3] || i > nd2_vel[9]) { return; } //for (j = nd2_vel(3); j <= nd2_vel(4); j++) //for (j = nd2_vel[2]; j <= nd2_vel[3]; j++) //{ //for (i = nd2_vel[8]; i <= nd2_vel[9]; i++) //{ k=1; v2x(k,i,j)=v2x(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+ dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+ dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j)); v2y(k,i,j)=v2y(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+ dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+ dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j)); v2z(k,i,j)=v2z(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+ dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+ dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j))); //for (k = 2; k <= nd2_vel(16); k++) for (k = 2; k <= nd2_vel[15]; k++) { v2x(k,i,j)=v2x(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i+1,j)))* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+ dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)+ dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j )+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j)); v2y(k,i,j)=v2y(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k,i,j+1)))* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+ dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)+ dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j)); v2z(k,i,j)=v2z(k,i,j)+ 0.5*(rho(idmat2(k,i,j))+rho(idmat2(k-1,i,j)))* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+ dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)+ dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+ dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j)); } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlX_IC(float ca, int lbx0, int lbx1, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, float *damp1_xM, int *idmat1M, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int mw1_pml1, //dimension # int mw1_pml, int nxtop, int nytop, int nztop, float *v1xM, //output float *v1yM, float *v1zM, float *v1x_pxM, float *v1y_pxM, float *v1z_pxM) { // !Compute the velocities in region of PML-x-I // use grid_node_comm // use wave_field_comm // implicit NONE int i,j,k,lb,ib,kb; float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz, vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz,dtxy,dtyy,dtzy; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_vel[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; //int nv2x=(lbx(2) - lbx(1) + 1) * mw1_pml; int nv2x=(lbx1 - lbx0 + 1) * mw1_pml; //if ( lbx(1)>lbx(2) ) return; if (lbx0 > lbx1) { return; } if (j > nd1_vel[5] || lb > lbx1) { return; } //calculate the value of ib ib = 0; for (k = lbx0; k < lb; k++) { for (i = nd1_vel[6+4*k]; i <= nd1_vel[7+4*k]; i++) { ib++; } } //for (j = nd1_vel(1); j <= nd1_vel(6); j++) //for (j = nd1_vel[0]; j <= nd1_vel[5]; j++) //{ //ib=0; //for (lb = lbx(1); lb <= lbx(2); lb++) //for (lb = lbx[0]; lb <= lbx[1]; lb++) //{ kb=0; //for (i = nd1_vel(7+4*lb); i <= nd1_vel(8+4*lb); i++) for (i = nd1_vel[6+4*lb]; i <= nd1_vel[7+4*lb]; i++) { kb=kb+1; ib=ib+1; rth=drvh1(kb,lb); rti=drti1(kb,lb); for (k = 1; k <= nztop; k++) { damp0=damp1_x(k,j,lb); dmpx2=1./(1.+rth*damp0); dmpx1=dmpx2*2.-1.; dmpyz2=1./(1.+rti*damp0); dmpyz1=dmpyz2*2.-1.; ro1=rho(idmat1(k,i,j)); rox=0.5*(ro1+rho(idmat1(k,i+1,j))); roy=0.5*(ro1+rho(idmat1(k,i,j+1))); roz=0.5*(ro1+rho(idmat1(k-1,i,j))); vtmpx=v1x(k,i,j)-v1x_px(k,ib,j); vtmpy=v1y(k,i,j)-v1y_px(k,ib,j); vtmpz=v1z(k,i,j)-v1z_px(k,ib,j); //if(j>nd1_vel(2) && j<nd1_vel(5)) if(j>nd1_vel[1] && j<nd1_vel[4]) { dtxy=dyh1(1,j)*t1xy(k,i,j-2)+dyh1(2,j)*t1xy(k,i,j-1)+ dyh1(3,j)*t1xy(k,i,j )+dyh1(4,j)*t1xy(k,i,j+1); dtyy=dyi1(1,j)*t1yy(k,i,j-1)+dyi1(2,j)*t1yy(k,i,j )+ dyi1(3,j)*t1yy(k,i,j+1)+dyi1(4,j)*t1yy(k,i,j+2); dtzy=dyh1(1,j)*t1yz(k,i,j-2)+dyh1(2,j)*t1yz(k,i,j-1)+ dyh1(3,j)*t1yz(k,i,j )+dyh1(4,j)*t1yz(k,i,j+1); if(k==1) { dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j); dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j); dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+ 21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j)); } else if(k==2) { dtxz=dzi1(2,k)*t1xz(k,i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(2,k)*t1yz(k,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)- 3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j)); } else if(k==nztop) { dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j)); dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j)); dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j)); } else { dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+ dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j); } vtmpx=vtmpx+(dtxy+dtxz)*rox; vtmpy=vtmpy+(dtyy+dtyz)*roy; vtmpz=vtmpz+(dtzy+dtzz)*roz; } v1x_px(k,ib,j)=v1x_px(k,ib,j)*dmpx1+dmpx2*rox* dxi1(2,i)/ca*(t1xx(k,i,j)-t1xx(k,i+1,j)); v1x(k,i,j)=vtmpx+v1x_px(k,ib,j); v1y_px(k,ib,j)=v1y_px(k,ib,j)*dmpyz1+dmpyz2*roy* dxh1(2,i)/ca*(t1xy(k,i-1,j)-t1xy(k,i,j)); v1y(k,i,j)=vtmpy+v1y_px(k,ib,j); v1z_px(k,ib,j)=v1z_px(k,ib,j)*dmpyz1+dmpyz2*roz* dxh1(2,i)/ca*(t1xz(k,i-1,j)-t1xz(k,i,j)); v1z(k,i,j)=vtmpz+v1z_px(k,ib,j); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlY_IC(int nztop, float ca, int lby0, int lby1, int *nd1_vel, float *rhoM, float *drvh1M, float *drti1M, int *idmat1M, float *damp1_yM, float *dxi1M, float *dyi1M, float *dzi1M, float *dxh1M, float *dyh1M, float *dzh1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, int mw1_pml1, //dimension #s int mw1_pml, int nxtop, int nytop, float *v1xM, //output float *v1yM, float *v1zM, float *v1x_pyM, float *v1y_pyM, float *v1z_pyM) { int i,j,k,lb,jb,kb, jbIni; float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz, dtxz,dtyz,dtzz,vtmpx,vtmpy,vtmpz; //if( lby(1)>lby(2) ) if( lby0>lby1 ) return; lb = blockDim.x * blockIdx.x + threadIdx.x + lby0; i = blockDim.y * blockIdx.y + threadIdx.y + nd1_vel[6]; if (lb > lby1 || i > nd1_vel[11]) { return; } jbIni = 0; for (k = lby0; k < lb; i++) { for (j = nd1_vel[4*k]; j <= nd1_vel[1+4*lb]; j++) { jbIni++; } } jb = jbIni; kb = 0; //for (lb = lby(1); lb <= lby(2); lb++) //for (lb = lby0; lb <= lby1; lb++) //{ // kb=0; // //for (i = nd1_vel(7); i <= nd1_vel(12); i++) // for (i = nd1_vel[6]; i <= nd1_vel[11]; i++) // { //for (j = nd1_vel(1+4*lb); j <= nd1_vel(2+4*lb); j++) for (j = nd1_vel[4*lb]; j <= nd1_vel[1+4*lb]; j++) { kb=kb+1; jb=jb+1; rth=drvh1(kb,lb); rti=drti1(kb,lb); for (k = 1; k <= nztop; k++) { damp0=damp1_y(k,i,lb); dmpy2=1./(1.+rth*damp0); dmpy1=dmpy2*2.-1.; dmpxz2=1./(1.+rti*damp0); dmpxz1=dmpxz2*2.-1.; ro1=rho(idmat1(k,i,j)); rox=0.5*(ro1+rho(idmat1(k,i+1,j))); roy=0.5*(ro1+rho(idmat1(k,i,j+1))); roz=0.5*(ro1+rho(idmat1(k-1,i,j))); if(k==1) { dtxz=(dzi1(3,k)-dzi1(1,k))*t1xz(2,i,j)+dzi1(4,k)*t1xz(3,i,j); dtyz=(dzi1(3,k)-dzi1(1,k))*t1yz(2,i,j)+dzi1(4,k)*t1yz(3,i,j); dtzz=dzh1(3,k)/ca*(35./8.*t1zz(k,i,j)-35./24.*t1zz(k+1,i,j)+ 21./40.*t1zz(k+2,i,j)-5./56.*t1zz(k+3,i,j)); } else if(k==2) { dtxz=dzi1(2,k)*t1xz(k,i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(2,k)*t1yz(k,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(3,k)/ca*(-31./24.*t1zz(k-1,i,j)+29./24.*t1zz(k,i,j)- 3./40.*t1zz(k+1,i,j)+1./168.*t1zz(k+2,i,j)); } else if(k==nztop) { dtxz=dzi1(2,k)/ca*(t1xz(k,i,j)-t1xz(k+1,i,j)); dtyz=dzi1(2,k)/ca*(t1yz(k,i,j)-t1yz(k+1,i,j)); dtzz=dzh1(2,k)/ca*(t1zz(k-1,i,j)-t1zz(k,i,j)); } else { dtxz=dzi1(1,k)*t1xz(k-1,i,j)+dzi1(2,k)*t1xz(k, i,j)+ dzi1(3,k)*t1xz(k+1,i,j)+dzi1(4,k)*t1xz(k+2,i,j); dtyz=dzi1(1,k)*t1yz(k-1,i,j)+dzi1(2,k)*t1yz(k ,i,j)+ dzi1(3,k)*t1yz(k+1,i,j)+dzi1(4,k)*t1yz(k+2,i,j); dtzz=dzh1(1,k)*t1zz(k-2,i,j)+dzh1(2,k)*t1zz(k-1,i,j)+ dzh1(3,k)*t1zz(k ,i,j)+dzh1(4,k)*t1zz(k+1,i,j); } vtmpx=v1x(k,i,j)-v1x_py(k,i,jb)+dtxz*rox; vtmpy=v1y(k,i,j)-v1y_py(k,i,jb)+dtyz*roy; vtmpz=v1z(k,i,j)-v1z_py(k,i,jb)+dtzz*roz; //if(i>nd1_vel(8) && i<nd1_vel(11)) if(i>nd1_vel[7] && i<nd1_vel[10]) { vtmpx=vtmpx+ rox*(dxi1(1,i)*t1xx(k,i-1,j)+dxi1(2,i)*t1xx(k,i, j)+ dxi1(3,i)*t1xx(k,i+1,j)+dxi1(4,i)*t1xx(k,i+2,j)); vtmpy=vtmpy+ roy*(dxh1(1,i)*t1xy(k,i-2,j)+dxh1(2,i)*t1xy(k,i-1,j)+ dxh1(3,i)*t1xy(k,i, j)+dxh1(4,i)*t1xy(k,i+1,j)); vtmpz=vtmpz+ roz*(dxh1(1,i)*t1xz(k,i-2,j)+dxh1(2,i)*t1xz(k,i-1,j)+ dxh1(3,i)*t1xz(k,i, j)+dxh1(4,i)*t1xz(k,i+1,j)); } v1x_py(k,i,jb)=v1x_py(k,i,jb)*dmpxz1+dmpxz2* rox*dyh1(2,j)/ca*(t1xy(k,i,j-1)-t1xy(k,i,j)); v1x(k,i,j)=vtmpx+v1x_py(k,i,jb); v1y_py(k,i,jb)=v1y_py(k,i,jb)*dmpy1+dmpy2* roy*dyi1(2,j)/ca*(t1yy(k,i,j)-t1yy(k,i,j+1)); v1y(k,i,j)=vtmpy+v1y_py(k,i,jb); v1z_py(k,i,jb)=v1z_py(k,i,jb)*dmpxz1+dmpxz2* roz*dyh1(2,j)/ca*(t1yz(k,i,j-1)-t1yz(k,i,j)); v1z(k,i,j)=vtmpz+v1z_py(k,i,jb); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlX_IIC(int nzbm1, float ca, int lbx0, int lbx1, int *nd2_vel, float *drvh2M, float *drti2M, float *rhoM, float *damp2_xM, int *idmat2M, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM, float *v2x_pxM, float *v2y_pxM, float *v2z_pxM) { int i,j,k,lb,ib,kb; float rth,rti,damp0,dmpx2,dmpx1,dmpyz2,dmpyz1,ro1,rox,roy,roz, vtmpx,vtmpy,vtmpz,dtxy,dtyy,dtzy,dtxz,dtyz,dtzz; //int nv2y = (lbx(2) - lbx(1) + 1) * mw2_pml; int nv2y = (lbx1 - lbx0 + 1) * mw2_pml; //if ( lbx(1)>lbx(2) ) return; if ( lbx0>lbx1 ) return; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_vel[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i = nd2_vel[6+4*k]; i < nd2_vel[7+4*k]; i++) { ib++; } } //for (j = nd2_vel(1); j <= nd2_vel(6); j++) //for (j = nd2_vel[0]; j <= nd2_vel[5]; j++) //{ //ib=0; //for (lb = lbx(1); lb <= lbx(2); lb++) //for (lb = lbx0; lb <= lbx1; lb++) //{ kb=0; //for (i = nd2_vel(7+4*lb); i <= nd2_vel(8+4*lb); i++) for (i = nd2_vel[6+4*lb]; i <= nd2_vel[7+4*lb]; i++) { kb=kb+1; ib=ib+1; rth=drvh2(kb,lb); rti=drti2(kb,lb); for (k = 1; k <= nzbm1; k++) { damp0=damp2_x(k,j,lb); dmpx2=1./(1.+rth*damp0); dmpx1=dmpx2*2.-1.; dmpyz2=1./(1.+rti*damp0); dmpyz1=dmpyz2*2.-1.; ro1=rho(idmat2(k,i,j)); rox=0.5*(ro1+rho(idmat2(k,i+1,j))); roy=0.5*(ro1+rho(idmat2(k,i,j+1))); roz=0.5*(ro1+rho(idmat2(k-1,i,j))); vtmpx=v2x(k,i,j)-v2x_px(k,ib,j); vtmpy=v2y(k,i,j)-v2y_px(k,ib,j); vtmpz=v2z(k,i,j)-v2z_px(k,ib,j); //if(j>nd2_vel(2) && j<nd2_vel(5)) if(j>nd2_vel[1] && j<nd2_vel[4]) { dtxy=dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1); dtyy=dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2); dtzy=dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1); if(k==1) { dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j)); dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j)); dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)); } //else if(k<nd2_vel(17)) else if(k<nd2_vel[16]) { dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j); dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j); dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+ dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j); } else { dtxz=0.0; dtyz=0.0; dtzz=0.0; } vtmpx=vtmpx+(dtxy+dtxz)*rox; vtmpy=vtmpy+(dtyy+dtyz)*roy; vtmpz=vtmpz+(dtzy+dtzz)*roz; } v2x_px(k,ib,j)=v2x_px(k,ib,j)*dmpx1+dmpx2* rox*dxi2(2,i)/ca*(t2xx(k,i,j)-t2xx(k,i+1,j)); v2x(k,i,j)=vtmpx+v2x_px(k,ib,j); v2y_px(k,ib,j)=v2y_px(k,ib,j)*dmpyz1+dmpyz2* roy*dxh2(2,i)/ca*(t2xy(k,i-1,j)-t2xy(k,i,j)); v2y(k,i,j)=vtmpy+v2y_px(k,ib,j); v2z_px(k,ib,j)=v2z_px(k,ib,j)*dmpyz1+dmpyz2* roz*dxh2(2,i)/ca*(t2xz(k,i-1,j)-t2xz(k,i,j)); v2z(k,i,j)=vtmpz+v2z_px(k,ib,j); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlY_IIC(int nzbm1, float ca, int lby0, int lby1, int *nd2_vel, float *drvh2M, float *drti2M, float *rhoM, float *damp2_yM, int *idmat2M, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int mw2_pml1, int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM, float *v2x_pyM, float *v2y_pyM, float *v2z_pyM) { int i,j,k,lb,jb,kb, jbIni; float rth,rti,damp0,dmpy2,dmpy1,dmpxz2,dmpxz1,ro1,rox,roy,roz, vtmpx,vtmpy,vtmpz,dtxz,dtyz,dtzz; //if( lby(1)>lby(2) ) return; if( lby0>lby1 ) { return; } lb = blockIdx.x * blockDim.x + threadIdx.x + lby0; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6]; if (lb > lby1 || i > nd2_vel[11]) { return; } jbIni = 0; for (j = lby0; j < lb; j++) { for (k = nd2_vel[4*j]; k <= nd2_vel[1+4*j]; k++) { jbIni++; } } jb = jbIni; kb = 0; //for (lb = lby(1); lb <= lby(2); lb++) //for (lb = lby0; lb <= lby1; lb++) //{ //kb=0; //for (i = nd2_vel(7); i <= nd2_vel(12); i++) //for (i = nd2_vel[6]; i <= nd2_vel[11]; i++) //{ //for (j = nd2_vel(1+4*lb); j <= nd2_vel(2+4*lb); j++) for (j = nd2_vel[4*lb]; j <= nd2_vel[1+4*lb]; j++) { kb=kb+1; jb=jb+1; rth=drvh2(kb,lb); rti=drti2(kb,lb); for (k = 1; k <= nzbm1; k++) { damp0=damp2_y(k,i,lb); dmpy2=1./(1.+rth*damp0); dmpy1=dmpy2*2.-1.0; dmpxz2=1./(1.+rti*damp0); dmpxz1=dmpxz2*2.-1.; ro1=rho(idmat2(k,i,j)); rox=0.5*(ro1+rho(idmat2(k,i+1,j))); roy=0.5*(ro1+rho(idmat2(k,i,j+1))); roz=0.5*(ro1+rho(idmat2(k-1,i,j))); vtmpx=v2x(k,i,j)-v2x_py(k,i,jb); vtmpy=v2y(k,i,j)-v2y_py(k,i,jb); vtmpz=v2z(k,i,j)-v2z_py(k,i,jb); //if(k<nd2_vel(17)) if(k<nd2_vel[16]) { if(k>1) { dtxz=dzi2(1,k)*t2xz(k-1,i,j)+dzi2(2,k)*t2xz(k,i,j)+ dzi2(3,k)*t2xz(k+1,i,j)+dzi2(4,k)*t2xz(k+2,i,j); dtyz=dzi2(1,k)*t2yz(k-1,i,j)+dzi2(2,k)*t2yz(k,i,j)+ dzi2(3,k)*t2yz(k+1,i,j)+dzi2(4,k)*t2yz(k+2,i,j); dtzz=dzh2(1,k)*t2zz(k-2,i,j)+dzh2(2,k)*t2zz(k-1,i,j)+ dzh2(3,k)*t2zz(k, i,j)+dzh2(4,k)*t2zz(k+1,i,j); } else { dtxz=dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j)); dtyz=dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j)); dtzz=dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)); } //if(i>nd2_vel(8) && i<nd2_vel(11)) if(i>nd2_vel[7] && i<nd2_vel[10]) { vtmpx=vtmpx+rox*(dtxz+ dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)); vtmpy=vtmpy+roy*(dtyz+ dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)); vtmpz=vtmpz+roz*(dtzz+ dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)); } else { vtmpx=vtmpx+rox*dtxz; vtmpy=vtmpy+roy*dtyz; vtmpz=vtmpz+roz*dtzz; } } else { //if(i>nd2_vel(8) && i<nd2_vel(11)) if(i>nd2_vel[7] && i<nd2_vel[10]) { vtmpx=vtmpx+rox* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)); vtmpy=vtmpy+ roy* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)); vtmpz=vtmpz+ roz* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)); } } v2x_py(k,i,jb)=v2x_py(k,i,jb)*dmpxz1+dmpxz2*rox* dyh2(2,j)/ca*(t2xy(k,i,j-1)-t2xy(k,i,j)); v2x(k,i,j)=vtmpx+v2x_py(k,i,jb); v2y_py(k,i,jb)=v2y_py(k,i,jb)*dmpy1+dmpy2*roy* dyi2(2,j)/ca*(t2yy(k,i,j)-t2yy(k,i,j+1)); v2y(k,i,j)=vtmpy+v2y_py(k,i,jb); v2z_py(k,i,jb)=v2z_py(k,i,jb)*dmpxz1+dmpxz2*roz* dyh2(2,j)/ca*(t2yz(k,i,j-1)-t2yz(k,i,j)); v2z(k,i,j)=vtmpz+v2z_py(k,i,jb); } } //} //} return; } //----------------------------------------------------------------------- __global__ void vel_PmlZ_IIC(int nzbm1, float ca, int *nd2_vel, float *drvh2M, float *drti2M, float *rhoM, float *damp2_zM, int *idmat2M, float *dxi2M, float *dyi2M, float *dzi2M, float *dxh2M, float *dyh2M, float *dzh2M, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, int mw2_pml1, //dimension #s int mw2_pml, int nxbtm, int nybtm, int nzbtm, float *v2xM, //output float *v2yM, float *v2zM, float *v2x_pzM, float *v2y_pzM, float *v2z_pzM) { int i,j,k,kb; float damp0,dmpz2,dmpz1,dmpxy2,dmpxy1,ro1,rox,roy,roz,vtmpx,vtmpy,vtmpz; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_vel[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_vel[6]; if (j > nd2_vel[5] || i > nd2_vel[11]) { return; } //for (j = nd2_vel(1); j <= nd2_vel(6); j++) //for (j = nd2_vel[0]; j <= nd2_vel[5]; j++) //{ //for (i = nd2_vel(7); i <= nd2_vel(12); i++) //for (i = nd2_vel[6]; i <= nd2_vel[11]; i++) //{ kb=0; damp0=damp2_z(i,j); //for (k = nd2_vel(17); k <= nzbm1; k++) for (k = nd2_vel[16]; k <= nzbm1; k++) { kb=kb+1; dmpz2=1./(1.+damp0*drti2(kb,1)); dmpz1=dmpz2*2.-1.; dmpxy2=1./(1.+damp0*drvh2(kb,1)); dmpxy1=dmpxy2*2.-1.; ro1=rho(idmat2(k,i,j)); rox=0.5*(ro1+rho(idmat2(k,i+1,j))); roy=0.5*(ro1+rho(idmat2(k,i,j+1))); roz=0.5*(ro1+rho(idmat2(k-1,i,j))); vtmpx=v2x(k,i,j)-v2x_pz(kb,i,j); vtmpy=v2y(k,i,j)-v2y_pz(kb,i,j); vtmpz=v2z(k,i,j)-v2z_pz(kb,i,j); //if(j>nd2_vel(2) && j<nd2_vel(5) && // i>nd2_vel(8) && i<nd2_vel(11)) if(j>nd2_vel[1] && j<nd2_vel[4] && i>nd2_vel[7] && i<nd2_vel[10]) { vtmpx=vtmpx+rox* (dxi2(1,i)*t2xx(k,i-1,j)+dxi2(2,i)*t2xx(k,i, j)+ dxi2(3,i)*t2xx(k,i+1,j)+dxi2(4,i)*t2xx(k,i+2,j)+ dyh2(1,j)*t2xy(k,i,j-2)+dyh2(2,j)*t2xy(k,i,j-1)+ dyh2(3,j)*t2xy(k,i,j )+dyh2(4,j)*t2xy(k,i,j+1)); vtmpy=vtmpy+roy* (dxh2(1,i)*t2xy(k,i-2,j)+dxh2(2,i)*t2xy(k,i-1,j)+ dxh2(3,i)*t2xy(k,i, j)+dxh2(4,i)*t2xy(k,i+1,j)+ dyi2(1,j)*t2yy(k,i,j-1)+dyi2(2,j)*t2yy(k,i,j)+ dyi2(3,j)*t2yy(k,i,j+1)+dyi2(4,j)*t2yy(k,i,j+2)); vtmpz=vtmpz+roz* (dxh2(1,i)*t2xz(k,i-2,j)+dxh2(2,i)*t2xz(k,i-1,j)+ dxh2(3,i)*t2xz(k,i, j)+dxh2(4,i)*t2xz(k,i+1,j)+ dyh2(1,j)*t2yz(k,i,j-2)+dyh2(2,j)*t2yz(k,i,j-1)+ dyh2(3,j)*t2yz(k,i,j )+dyh2(4,j)*t2yz(k,i,j+1)); } v2x_pz(kb,i,j)=v2x_pz(kb,i,j)*dmpxy1+dmpxy2*rox* dzi2(2,k)/ca*(t2xz(k,i,j)-t2xz(k+1,i,j)); v2x(k,i,j)=vtmpx+v2x_pz(kb,i,j); v2y_pz(kb,i,j)=v2y_pz(kb,i,j)*dmpxy1+dmpxy2*roy* dzi2(2,k)/ca*(t2yz(k,i,j)-t2yz(k+1,i,j)); v2y(k,i,j)=vtmpy+v2y_pz(kb,i,j); v2z_pz(kb,i,j)=v2z_pz(kb,i,j)*dmpz1+dmpz2*roz* dzh2(2,k)/ca*(t2zz(k-1,i,j)-t2zz(k,i,j)); v2z(k,i,j)=vtmpz+v2z_pz(kb,i,j); } //} //} return; } //stress computation---------------------------------------------- __global__ void stress_norm_xy_IC(int nxb1, int nyb1, int nxtop, int nztop, int *nd1_tyy, int *idmat1M, float ca, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1yyM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1yyM, float *qt1zzM, float *v1xM, float *v1yM, float *v1zM) { int i,j,k,jkq,kodd,inod,irw; float sxx,syy,szz,sxy,qxx,qyy,qzz,qxy,cusxy,sss,cl,sm2,pm,et,et1,wtp,wts; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyy[8]; if (j > nd1_tyy[3] || i > nd1_tyy[9]) { return; } // for (j = nd1_tyy[2]; j <= nd1_tyy[3]; j++) // { kodd = 2 * ((j + nyb1) & 1) + 1; // for (i = nd1_tyy[8]; i <= nd1_tyy[9]; i++) // { jkq=((i+nxb1) & 1) + kodd; for (k = nd1_tyy[12]; k <= nd1_tyy[17]; k++) { sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+ dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j); syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+ dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1); sxy=dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i, j)+ dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j)+ dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j )+ dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2); if(k==1) { szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)- 9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.0; } else if(k==nztop) { szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j)); } else { szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+ dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j); } inod=idmat1(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; cusxy=sxy/(1./sm2+.5/cmu(idmat1(k,i+1,j+1))); sss=sxx+syy+szz; irw=jkq+4*(k&1); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxx=qt1xx(k,i,j); qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1; t1xx(k,i,j)=t1xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j); qyy=qt1yy(k,i,j); qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1; t1yy(k,i,j)=t1yy(k,i,j)+sm2*syy+cl*sss-qyy-qt1yy(k,i,j); qzz=qt1zz(k,i,j); qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1; t1zz(k,i,j)=t1zz(k,i,j)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j); qxy=qt1xy(k,i,j); qt1xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1; t1xy(k,i,j)=t1xy(k,i,j)+cusxy-qxy-qt1xy(k,i,j); } // } // } return; } //----------------------------------------------------------------------------- __global__ void stress_xz_yz_IC(int nxb1, int nyb1, int nxtop, int nytop, int nztop, int *nd1_tyz, int *idmat1M, float ca, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dyi1M, float *dzh1M, float *v1xM, float *v1yM, float *v1zM, float *t1xzM, float *t1yzM, float *qt1xzM, float *qt1yzM) // Compute stress-XZand YZ component in Region I // use grid_node_comm // use wave_field_comm // implicit NONE // real, parameter:: tfr1=-577./528./ca,tfr2=201./176./ca, & // tfr3=-9./176./ca, tfr4=1./528./ca { // float tfr1 = -577./528./ca; // float tfr2 = 201./176./ca; // float tfr3 = -9./176./ca; // float tfr4=1./528./ca; int i,j,k,kodd,inod,jkq,irw; float dvzx,dvzy,dvxz,dvyz,sm,cusxz,cusyz,et,et1,dmws,qxz,qyz; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd1_tyz[8]; if (j > nd1_tyz[3] || i > nd1_tyz[9]) { return; } // for (j=nd1_tyz[2]; j <=nd1_tyz[3]; j++) // //do j=nd1_tyz(3),nd1_tyz(4) // { //kodd=2*mod(j+nyb1,2)+1 kodd=2*((j+nyb1)&1)+1; // for (i=nd1_tyz[8]; i<=nd1_tyz[9]; i++) // //do i=nd1_tyz(9),nd1_tyz(10) // { //jkq=mod(i+nxb1,2)+kodd jkq=((i+nxb1)&1)+kodd; for (k=nd1_tyz[12]; k<=nd1_tyz[17]; k++) //do k=nd1_tyz(13),nd1_tyz(18) { dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+ dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j); dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+ dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2); if(k<nztop) { dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+ dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j); dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+ dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j); } else { dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j)); dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j)); } inod=idmat1(k,i,j); sm=cmu(inod); cusxz=(dvzx+dvxz)/(.5/sm+.5/cmu(idmat1(k-1,i+1,j))); cusyz=(dvzy+dvyz)/(.5/sm+.5/cmu(idmat1(k-1,i,j+1))); //irw=jkq+4*mod(k,2); irw=jkq+4*(k&1); et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxz=qt1xz(k,i,j); qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1; t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j); qyz=qt1yz(k,i,j); qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1; t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j); } // } // } return; } __global__ void stress_resetVars(int ny1p1, int nx1p1, int nxtop, int nytop, int nztop, float *t1xzM, float *t1yzM) { int i, j; j = blockIdx.x * blockDim.x + threadIdx.x - 1; i = blockIdx.y * blockDim.y + threadIdx.y + 1; if (j <= ny1p1 && i <= nxtop) { t1yz(1, i, j) = 0.0f; } // for (j=-1; j<=ny1p1; j++) // { // for (i = 1; i <= nxtop; i++) // { // t1yz(1,i,j)=0.0; // } // } j = j + 2; i = i - 2; if (j <= nytop && i <= nx1p1) { t1xz(1, i, j) = 0.0; } // for (j=1; j <= nytop; j++) // { // for (i=-1; i <=nx1p1; i++) // { // t1xz(1,i,j)=0.0; // } // } return; } //------------------------------------------------------------------------------------ __global__ void stress_norm_PmlX_IC(int nxb1, int nyb1, int nxtop, int nytop, int nztop, int mw1_pml, int mw1_pml1, int lbx0, int lbx1, int *nd1_tyy, int *idmat1M, float ca, float *drti1M, float *damp1_xM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dzi1M, float *dxh1M, float *dyh1M, float *v1xM, float *v1yM, float *v1zM, float *t1xxM, float *t1yyM, float *t1zzM, float *t1xx_pxM, float *t1yy_pxM, float *qt1xxM, float *qt1yyM, float *qt1zzM, float *qt1xx_pxM, float *qt1yy_pxM) // Compute the velocity of PML-x-I region // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw // real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; int nti; //if (lbx[0] > lbx[1]) return; //if ( lbx(1)>lbx(2) ) return; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_tyy[5] || lb > lbx1) { return; } nti = (lbx1 - lbx0 + 1) * mw1_pml + lbx0; // for (j=nd1_tyy[0]; j <= nd1_tyy[5]; j++) // //do j=nd1_tyy(1),nd1_tyy(6) // { kodd=2*((j+nyb1)&1)+1; ib=0; for (k = lbx0; k < lb; k++) { ib++; } // for (lb=lbx[0]; lb <=lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i = nd1_tyy[6+4*lb]; i <= nd1_tyy[7+4*lb]; i++) //do i=nd1_tyy(7+4*lb),nd1_tyy(8+4*lb) { kb=kb+1; ib=ib+1; rti=drti1(kb,lb); jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++) //do k=nd1_tyy(13),nd1_tyy(18) { damp2=1./(1.+damp1_x(k,j,lb)*rti); damp1=damp2*2.0-1.; inod=idmat1(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t1xx(k,i,j)-t1xx_px(k,ib,j); taoyy=t1yy(k,i,j)-t1yy_px(k,ib,j); taozz=t1zz(k,i,j)-t1yy_px(k,ib,j); if(j>nd1_tyy[1] && j<nd1_tyy[4]) { //if(j>nd1_tyy(2) .and. j<nd1_tyy(5)) { syy=dyh1(1,j)*v1y(k,i,j-2)+dyh1(2,j)*v1y(k,i,j-1)+ dyh1(3,j)*v1y(k,i ,j)+dyh1(4,j)*v1y(k,i,j+1); if(k==1) { szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)- 9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.; } else if(k==nztop) { szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j)); } else { szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+ dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j); } sss=syy+szz; qxx=qt1xx(k,i,j); qt1xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1; taoxx=taoxx+cl*sss-qxx-qt1xx(k,i,j); qyy=qt1yy(k,i,j); qt1yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1; taoyy=taoyy+sm2*syy+cl*sss-qyy-qt1yy(k,i,j); qzz=qt1zz(k,i,j); qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1; taozz=taozz+sm2*szz+cl*sss-qzz-qt1zz(k,i,j); } sxx=dxh1(2,i)/ca*(v1x(k,i-1,j)-v1x(k,i,j)); qxx=qt1xx_px(k,ib,j); qt1xx_px(k,ib,j)=qxx*et+wtp*sxx*et1; t1xx_px(k,ib,j)=damp1*t1xx_px(k,ib,j)+ damp2*(pm*sxx-qxx-qt1xx_px(k,ib,j)); t1xx(k,i,j)=taoxx+t1xx_px(k,ib,j); qyy=qt1yy_px(k,ib,j); qt1yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1; t1yy_px(k,ib,j)=damp1*t1yy_px(k,ib,j)+ damp2*(cl*sxx-qyy-qt1yy_px(k,ib,j)); t1yy(k,i,j)=taoyy+t1yy_px(k,ib,j); t1zz(k,i,j)=taozz+t1yy_px(k,ib,j); } } // } // } return; } __global__ void stress_norm_PmlY_IC(int nxb1, int nyb1, int mw1_pml1, int nxtop, int nztop, int lby0, int lby1, int *nd1_tyy, int *idmat1M, float ca, float *drti1M, float *damp1_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dzi1M, float *t1xxM, float *t1yyM, float *t1zzM, float *qt1xxM, float *qt1yyM, float *qt1zzM, float *t1xx_pyM, float *t1yy_pyM, float *qt1xx_pyM, float *qt1yy_pyM, float *v1xM, float *v1yM, float *v1zM) // Compute the velocity of PML-x-I region // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw // real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; //if(lby[0]>lby[1]) return; //if(lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_tyy[11] || lb > lby1) { return; } // for (i = nd1_tyy[6]; i <= nd1_tyy[11]; i++) // //do i=nd1_tyy(7),nd1_tyy(12) // { jb = 0; for (k = 0; k < lb; k++) { for (j = nd1_tyy[4*k]; j <= nd1_tyy[1+4*k]; j++) { jb++; } } // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j = nd1_tyy[4*lb]; j <= nd1_tyy[1+4*lb]; j++) //do j=nd1_tyy(1+4*lb),nd1_tyy(2+4*lb) { kb=kb+1; jb=jb+1; rti=drti1(kb,lb); kodd=2 * ((j + nyb1) & 1) + 1; //kodd=2*mod(j+nyb1,2)+1 jkq = ((i + nxb1) & 1) + kodd; //jkq=mod(i+nxb1,2)+kodd for (k=nd1_tyy[12]; k <=nd1_tyy[17]; k++) //do k=nd1_tyy(13),nd1_tyy(18) { damp2=1./(1.+damp1_y(k,i,lb)*rti); damp1=damp2*2.-1.; inod=idmat1(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; //irw=jkq+4*mod(k,2) irw=jkq + 4 * (k & 1); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); if (i>nd1_tyy[7] && i<nd1_tyy[10]) { //if(i>nd1_tyy(8) .and. i<nd1_tyy(11)) then sxx=dxh1(1,i)*v1x(k,i-2,j)+dxh1(2,i)*v1x(k,i-1,j)+ dxh1(3,i)*v1x(k,i ,j)+dxh1(4,i)*v1x(k,i+1,j); } else { sxx=0.0; } if(k==1) { szz=dzi1(2,k)/ca*(22.*v1z(k,i,j)-17.*v1z(k+1,i,j)- 9.*v1z(k+2,i,j)+5.*v1z(k+3,i,j)-v1z(k+4,i,j))/24.; } else if(k==nztop) { szz=dzi1(2,k)/ca*(v1z(k,i,j)-v1z(k+1,i,j)); } else { szz=dzi1(1,k)*v1z(k-1,i,j)+dzi1(2,k)*v1z(k, i,j)+ dzi1(3,k)*v1z(k+1,i,j)+dzi1(4,k)*v1z(k+2,i,j); } sss=sxx+szz; qxx=qt1xx(k,i,j); qt1xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1; taoxx=t1xx(k,i,j)-t1xx_py(k,i,jb)+sm2*sxx+cl*sss-qxx-qt1xx(k,i,j); qyy=qt1yy(k,i,j); qt1yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1; taoyy=t1yy(k,i,j)-t1yy_py(k,i,jb)+cl*sss-qyy-qt1yy(k,i,j); qzz=qt1zz(k,i,j); qt1zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1; taozz=t1zz(k,i,j)-t1xx_py(k,i,jb)+sm2*szz+cl*sss-qzz-qt1zz(k,i,j); syy=dyh1(2,j)/ca*(v1y(k,i,j-1)-v1y(k,i,j)); qxx=qt1xx_py(k,i,jb); qt1xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1; t1xx_py(k,i,jb)=damp1*t1xx_py(k,i,jb)+ damp2*(cl*syy-qxx-qt1xx_py(k,i,jb)); t1xx(k,i,j)=taoxx+t1xx_py(k,i,jb); t1zz(k,i,j)=taozz+t1xx_py(k,i,jb); qyy=qt1yy_py(k,i,jb); qt1yy_py(k,i,jb)=qyy*et+wtp*syy*et1; t1yy_py(k,i,jb)=damp1*t1yy_py(k,i,jb)+ damp2*(pm*syy-qyy-qt1yy_py(k,i,jb)); t1yy(k,i,j)=taoyy+t1yy_py(k,i,jb); } } // } // } return; } __global__ void stress_xy_PmlX_IC(int nxb1, int nyb1, int mw1_pml, int mw1_pml1, int nxtop, int nytop, int nztop, int lbx0, int lbx1, int *nd1_txy, int *idmat1M, float ca, float *drth1M, float *damp1_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dyi1M, float *t1xyM, float *qt1xyM, float *t1xy_pxM, float *qt1xy_pxM, float *v1xM, float *v1yM) // Compute the Stress-xy at region of PML-x-I // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw // real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1; int nth; nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_txy[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i = nd1_txy[6+4*k]; i <= nd1_txy[7+4*k]; i++) { ib++; } } //if (lbx[0] > lbx[1]) return; //if ( lbx(1)>lbx(2) ) return // for (j = nd1_txy[0]; j <= nd1_txy[5]; j++) // //do j=nd1_txy(1),nd1_txy(6) // { kodd = 2 * ((j + nyb1) & 1) + 1; //kodd=2*mod(j+nyb1,2)+1 // ib=0; // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i = nd1_txy[6+4*lb]; i <= nd1_txy[7+4*lb]; i++) //do i=nd1_txy(7+4*lb),nd1_txy(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth1(kb,lb); jkq=((i + nxb1) & 1) + kodd; //jkq=mod(i+nxb1,2)+kodd; for (k = nd1_txy[12]; k <= nd1_txy[17]; k++) //do k=nd1_txy(13),nd1_txy(18) { damp2=1./(1.+damp1_x(k,j,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1))); irw=jkq + 4 * (k & 1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t1xy(k,i,j)-t1xy_px(k,ib,j); if(j > nd1_txy[1] && j<nd1_txy[4]) { //if(j>nd1_txy(2) .and. j<nd1_txy(5)) then cusxy=(dyi1(1,j)*v1x(k,i,j-1)+dyi1(2,j)*v1x(k,i,j)+ dyi1(3,j)*v1x(k,i,j+1)+dyi1(4,j)*v1x(k,i,j+2))*sm; qxy=qt1xy(k,i,j); qt1xy(k,i,j)=qxy*et+dmws*cusxy*et1; taoxy=taoxy+cusxy-qxy-qt1xy(k,i,j); } cusxy=sm*dxi1(2,i)/ca*(v1y(k,i,j)-v1y(k,i+1,j)); qxy=qt1xy_px(k,ib,j); qt1xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1; t1xy_px(k,ib,j)=damp1*t1xy_px(k,ib,j)+ damp2*(cusxy-qxy-qt1xy_px(k,ib,j)); t1xy(k,i,j)=taoxy+t1xy_px(k,ib,j); } } // } // } return; } __global__ void stress_xy_PmlY_IC(int nxb1, int nyb1, int mw1_pml1, int nxtop, int nztop, int lby0, int lby1, int *nd1_txy, int *idmat1M, float ca, float *drth1M, float *damp1_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dyi1M, float *t1xyM, float *qt1xyM, float *t1xy_pyM, float *qt1xy_pyM, float *v1xM, float *v1yM) //Compute the Stress-xy at region of PML-y-I //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxy,cusyx,qxy,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_txy[11] || lb > lby1) { return; } // for (i = nd1_txy[6]; i <= nd1_txy[11]; i++) // //do i=nd1_txy(7),nd1_txy(12) // { jb=0; for (k = lby0; k < lb; k++) { for (j = nd1_txy[4*k]; j <= nd1_txy[1 + 4 * k]; j++) { kb++; } } // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1), lby(2) // { kb=0; for (j = nd1_txy[4*lb]; j <= nd1_txy[1 + 4 * lb]; j++) //do j=nd1_txy(1+4*lb),nd1_txy(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth1(kb,lb); kodd=2 * ((j + nyb1) & 1) + 1; //kodd=2*mod(j+nyb1,2)+1; jkq=((i + nxb1) & 1) + kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_txy[12]; k <= nd1_txy[17]; k++) //do k=nd1_txy(13),nd1_txy(18) { damp2=1./(1.+damp1_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k,i+1,j+1))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t1xy(k,i,j)-t1xy_py(k,i,jb); if(i > nd1_txy[7] && i<nd1_txy[10]) { //if(i>nd1_txy(8) .and. i<nd1_txy(11)) then cusyx=(dxi1(1,i)*v1y(k,i-1,j)+dxi1(2,i)*v1y(k,i,j)+ dxi1(3,i)*v1y(k,i+1,j)+dxi1(4,i)*v1y(k,i+2,j))*sm; qxy=qt1xy(k,i,j); qt1xy(k,i,j)=qxy*et+dmws*cusyx*et1; taoxy=taoxy+cusyx-qxy-qt1xy(k,i,j); } cusyx=sm*dyi1(2,j)/ca*(v1x(k,i,j)-v1x(k,i,j+1)); qxy=qt1xy_py(k,i,jb); qt1xy_py(k,i,jb)=qxy*et+dmws*cusyx*et1; t1xy_py(k,i,jb)=damp1*t1xy_py(k,i,jb)+ damp2*(cusyx-qxy-qt1xy_py(k,i,jb)); t1xy(k,i,j)=taoxy+t1xy_py(k,i,jb); } } // } // } return; } __global__ void stress_xz_PmlX_IC(int nxb1, int nyb1, int nxtop, int nytop, int nztop, int mw1_pml, int mw1_pml1, int lbx0, int lbx1, int *nd1_txz, int *idmat1M, float ca, float *drth1M, float *damp1_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dzh1M, float *t1xzM, float *qt1xzM, float *t1xz_pxM, float *qt1xz_pxM, float *v1xM, float *v1zM) //Compute the stress-xz at PML-x-I region //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxz,cusxz,dvxz,qxz,rth,damp2,damp1,sm,dmws,et,et1; int nth; //if (lbx[0] > lbx[1]) return; //if ( lbx(1)>lbx(2) ) return nth = (lbx1 - lbx0 + 1) * mw1_pml + 1 - lbx0; j = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_txz[5] || lb > lbx1) { return; } // for (j = nd1_txz[0]; j <= nd1_txz[5]; j++) // //do j=nd1_txz(1),nd1_txz(6) // { kodd=2 * ((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1 ib=0; for (k = lbx0; k < lb; k++) { for (i = nd1_txz[6+4*k]; i <= nd1_txz[7+4*k]; i++) { ib++; } } // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i = nd1_txz[6+4*lb]; i <= nd1_txz[7+4*lb]; i++) //do i=nd1_txz(7+4*lb),nd1_txz(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth1(kb,lb); jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_txz[12]; k <= nd1_txz[17]; k++) //do k=nd1_txz(13),nd1_txz(18) { damp2=1./(1.+damp1_x(k,j,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); if(k<nztop) { dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+ dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j); } else { dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j)); } cusxz=dvxz*sm; qxz=qt1xz(k,i,j); qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1; taoxz=t1xz(k,i,j)-t1xz_px(k,ib,j)+cusxz-qxz-qt1xz(k,i,j); cusxz=sm*dxi1(2,i)/ca*(v1z(k,i,j)-v1z(k,i+1,j)); qxz=qt1xz_px(k,ib,j); qt1xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1; t1xz_px(k,ib,j)=damp1*t1xz_px(k,ib,j)+ damp2*(cusxz-qxz-qt1xz_px(k,ib,j)); t1xz(k,i,j)=taoxz+t1xz_px(k,ib,j); } } // } // } return; } __global__ void stress_xz_PmlY_IC(int nxb1, int nyb1, int nxtop, int nztop, int lby0, int lby1, int *nd1_txz, int *idmat1M, float ca, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi1M, float *dzh1M, float *t1xzM, float *qt1xzM, float *v1xM, float *v1zM) //Compute the stress-xz at PML-y-I region //use grid_node_comm //use wave_field_comm //implicit NONE { int i,j,k,lb,kodd,jkq,inod,irw; float cusxz,dvxz,dvzx,qxz,sm,dmws,et,et1; //if (lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_txz[8]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_txz[9] || lb > lby1) { return; } // for (i = nd1_txz[8]; i <= nd1_txz[9]; i++) // //do i=nd1_txz(9),nd1_txz(10) // { // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { for (j = nd1_txz[4*lb]; j <= nd1_txz[1+4*lb]; j++) //do j=nd1_txz(1+4*lb),nd1_txz(2+4*lb) { kodd=2 * ((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1 jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_txz[12]; k <= nd1_txz[17]; k++) //do k=nd1_txz(13),nd1_txz(18) { inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i+1,j))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); dvzx=dxi1(1,i)*v1z(k,i-1,j)+dxi1(2,i)*v1z(k,i, j)+ dxi1(3,i)*v1z(k,i+1,j)+dxi1(4,i)*v1z(k,i+2,j); if(k<nztop) { dvxz=dzh1(1,k)*v1x(k-2,i,j)+dzh1(2,k)*v1x(k-1,i,j)+ dzh1(3,k)*v1x(k, i,j)+dzh1(4,k)*v1x(k+1,i,j); } else { dvxz=dzh1(2,k)/ca*(v1x(k-1,i,j)-v1x(k,i,j)); } cusxz=(dvzx+dvxz)*sm; qxz=qt1xz(k,i,j); qt1xz(k,i,j)=qxz*et+dmws*cusxz*et1; t1xz(k,i,j)=t1xz(k,i,j)+cusxz-qxz-qt1xz(k,i,j); } } // } // } return; } __global__ void stress_yz_PmlX_IC(int nxb1, int nyb1, int nztop, int nxtop, int lbx0, int lbx1, int *nd1_tyz, int *idmat1M, float ca, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi1M, float *dzh1M, float *t1yzM, float *qt1yzM, float *v1yM, float *v1zM) //Compute the stress-yz at PML-x-I region //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float cusyz,dvyz,dvzy,qyz,sm,dmws,et,et1; //if(lbx[0] > lbx[1]) return; //if(lbx(1)>lbx(2) ) return j = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[2]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd1_tyz[3] || lb > lbx1) { return; } // for (j = nd1_tyz[2]; j <= nd1_tyz[3]; j++) // //do j=nd1_tyz(3),nd1_tyz(4) // { kodd=2 * ((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1 // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { for (i = nd1_tyz[6+4*lb]; i <= nd1_tyz[7+4*lb]; i++) //do i=nd1_tyz(7+4*lb),nd1_tyz(8+4*lb) { jkq = ((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k = nd1_tyz[12]; k <= nd1_tyz[17]; k++) //do k=nd1_tyz(13),nd1_tyz(18) { inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); dvzy=dyi1(1,j)*v1z(k,i,j-1)+dyi1(2,j)*v1z(k,i,j )+ dyi1(3,j)*v1z(k,i,j+1)+dyi1(4,j)*v1z(k,i,j+2); if(k<nztop) { dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+ dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j); } else { dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j)); } cusyz=(dvzy+dvyz)*sm; qyz=qt1yz(k,i,j); qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1; t1yz(k,i,j)=t1yz(k,i,j)+cusyz-qyz-qt1yz(k,i,j); } } // } // } return; } __global__ void stress_yz_PmlY_IC(int nxb1, int nyb1, int mw1_pml1, int nxtop, int nztop, int lby0, int lby1, int *nd1_tyz, int *idmat1M, float ca, float *drth1M, float *damp1_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi1M, float *dzh1M, float *t1yzM, float *qt1yzM, float *t1yz_pyM, float *qt1yz_pyM, float *v1yM, float *v1zM) //Compute the stress-yz at PML-y-I region //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoyz,cusyz,dvyz,qyz,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd1_tyz[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd1_tyz[11] || lb > lby1) { return; } // for (i = nd1_tyz[6]; i <= nd1_tyz[11]; i++) // //do i=nd1_tyz(7),nd1_tyz(12) // { jb=0; for (k = lby0; k < lb; k++) { for (j = nd1_tyz[4*k]; j <= nd1_tyz[1+4*k]; j++) { jb++; } } // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j = nd1_tyz[4*lb]; j <= nd1_tyz[1+4*lb]; j++) //do j=nd1_tyz(1+4*lb),nd1_tyz(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth1(kb,lb); kodd=2*((j+nyb1)&1)+1; //kodd=2*mod(j+nyb1,2)+1; jkq=((i+nxb1)&1)+kodd; //jkq=mod(i+nxb1,2)+kodd for (k=nd1_tyz[12]; k <= nd1_tyz[17]; k++) //do k=nd1_tyz(13),nd1_tyz(18) { damp2=1./(1.+damp1_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat1(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat1(k-1,i,j+1))); irw=jkq+4*(k&1); //irw=jkq+4*mod(k,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); if(k<nztop) { dvyz=dzh1(1,k)*v1y(k-2,i,j)+dzh1(2,k)*v1y(k-1,i,j)+ dzh1(3,k)*v1y(k, i,j)+dzh1(4,k)*v1y(k+1,i,j); } else { dvyz=dzh1(2,k)/ca*(v1y(k-1,i,j)-v1y(k,i,j)); } cusyz=dvyz*sm; qyz=qt1yz(k,i,j); qt1yz(k,i,j)=qyz*et+dmws*cusyz*et1; taoyz=t1yz(k,i,j)-t1yz_py(k,i,jb)+cusyz-qyz-qt1yz(k,i,j); cusyz=sm*dyi1(2,j)/ca*(v1z(k,i,j)-v1z(k,i,j+1)); qyz=qt1yz_py(k,i,jb); qt1yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1; t1yz_py(k,i,jb)=damp1*t1yz_py(k,i,jb)+ damp2*(cusyz-qyz-qt1yz_py(k,i,jb)); t1yz(k,i,j)=taoyz+t1yz_py(k,i,jb); } } // } // } return; } __global__ void stress_norm_xy_II(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int *nd2_tyy, int *idmat2M, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *t2xxM, float *t2xyM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2xyM, float *qt2yyM, float *qt2zzM, float *dxh2M, float *dyh2M, float *dxi2M, float *dyi2M, float *dzi2M, float *v2xM, float *v2yM, float *v2zM) // Compute stress-Norm and XY component in Region II // use grid_node_comm // use wave_field_comm // implicit NONE // integer:: i,j,k,kodd,inod,jkq,irw // real:: sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy, & // cl,sm2,et,et1,dmws,pm,wtp,wts { int i,j,k,kodd,inod,jkq,irw; float sxx,syy,szz,sxy,sss,qxx,qyy,qzz,qxy,cusxy,cl,sm2,et,et1,dmws,pm,wtp,wts; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[8]; if (j > nd2_tyy[3] || i > nd2_tyy[9]) { return; } // for (j=nd2_tyy[2]; j <= nd2_tyy[3]; j++) // //do j=nd2_tyy(3),nd2_tyy(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_tyy[8]; i <= nd2_tyy[9]; i++) // //do i=nd2_tyy(9),nd2_tyy(10) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_tyy[12]; k <= nd2_tyy[15]; k++) //do k=nd2_tyy(13),nd2_tyy(16) { sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+ dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1); sxy=dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+ dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+ dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+ dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2); szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+ dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j); sss=sxx+syy+szz; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; cusxy=sxy/(1./sm2+.5/cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2); et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*(syy+szz))*et1; t2xx(k,i,j)=t2xx(k,i,j)+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*(sxx+szz))*et1; t2yy(k,i,j)=t2yy(k,i,j)+sm2*syy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*(sxx+syy))*et1; t2zz(k,i,j)=t2zz(k,i,j)+sm2*szz+cl*sss-qzz-qt2zz(k,i,j); qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+wts/sm2*cusxy*et1; t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j); } // } // } return; } //call stress_xz_yz_II __global__ void stress_xz_yz_IIC(int nxb2, int nyb2, int nztop, int nxbtm, int nzbtm, int *nd2_tyz, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *dzh2M, float *t2xzM, float *t2yzM, float *qt2xzM, float *qt2yzM, float *v2xM, float *v2yM, float *v2zM) //Compute stress-XZ and YZ component in the Region II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,kodd,inod,jkq,irw //real:: qxz,qyz,cusxz,cusyz,sm,et,et1,dmws { int i,j,k,kodd,inod,jkq,irw; float qxz,qyz,cusxz,cusyz,sm,et,et1,dmws; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[8]; if (j > nd2_tyz[3] || i > nd2_tyz[9]) { return; } // for (j = nd2_tyz[2]; j <= nd2_tyz[3]; j++) // //do j=nd2_tyz(3),nd2_tyz(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_tyz[8]; i <= nd2_tyz[9]; i++) // //do i=nd2_tyz(9),nd2_tyz(10) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k=nd2_tyz[12]; k <= nd2_tyz[15]; k++) //do k=nd2_tyz(13),nd2_tyz(16) { inod=idmat2(k,i,j); sm=cmu(inod); cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+ dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j)+ dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+ dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j))/ (.5/sm+.5/cmu(idmat2(k-1,i+1,j))); cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+ dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+ dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+ dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/ (.5/sm+.5/cmu(idmat2(k-1,i,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j); qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j); } // } // } return; } //call stress_norm_PmlX_II __global__ void stress_norm_PmlX_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nztop, int nxbtm, int nybtm, int nzbtm, int lbx0, int lbx1, int *nd2_tyy, int *idmat2M, float ca, float *drti2M, float *damp2_xM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh2M, float *dyh2M, float *dzi2M, float *t2xxM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2yyM, float *qt2zzM, float *t2xx_pxM, float *t2yy_pxM, float *qt2xx_pxM, float *qt2yy_pxM, float *v2xM, float *v2yM, float *v2zM) //Compute the Stress-norm at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; int nti; //if(lbx[0] > lbx[1]) return; //if( lbx(1)>lbx(2) ) return nti = (lbx1 - lbx0 + 1) * mw2_pml + lbx1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_tyy[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i=nd2_tyy[6+4*k]; i <= nd2_tyy[7+4*k]; i++) { ib++; } } // for (j=nd2_tyy[0]; j <= nd2_tyy[5]; j++) // //do j=nd2_tyy(1),nd2_tyy(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // ib=0; // for (lb=lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i=nd2_tyy[6+4*lb]; i <= nd2_tyy[7+4*lb]; i++) //do i=nd2_tyy(7+4*lb),nd2_tyy(8+4*lb) { kb=kb+1; ib=ib+1; rti=drti2(kb,lb); jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd; for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++) //do k=nd2_tyy(13),nd2_tyy(18) { damp2=1./(1.+damp2_x(k,j,lb)*rti); damp1=damp2*2.0-1.0; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t2xx(k,i,j)-t2xx_px(k,ib,j); taoyy=t2yy(k,i,j)-t2yy_px(k,ib,j); taozz=t2zz(k,i,j)-t2yy_px(k,ib,j); if(j>nd2_tyy[1] && j<nd2_tyy[4]) { //if(j>nd2_tyy(2) .and. j<nd2_tyy(5)) { syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+ dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1); if(k<nd2_tyy[16]) { //if(k<nd2_tyy(17)) { szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+ dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j); } else { szz=0.0; } sss=syy+szz; qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp-wts)*sss*et1; taoxx=taoxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*szz)*et1; taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*syy)*et1; taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j); } sxx=dxh2(2,i)/ca*(v2x(k,i-1,j)-v2x(k,i,j)); qxx=qt2xx_px(k,ib,j); qt2xx_px(k,ib,j)=qxx*et+wtp*sxx*et1; t2xx_px(k,ib,j)=damp1*t2xx_px(k,ib,j)+ damp2*(pm*sxx-qxx-qt2xx_px(k,ib,j)); t2xx(k,i,j)=taoxx+t2xx_px(k,ib,j); qyy=qt2yy_px(k,ib,j); qt2yy_px(k,ib,j)=qyy*et+(wtp-wts)*sxx*et1; t2yy_px(k,ib,j)=damp1*t2yy_px(k,ib,j)+ damp2*(cl*sxx-qyy-qt2yy_px(k,ib,j)); t2yy(k,i,j)=taoyy+t2yy_px(k,ib,j); t2zz(k,i,j)=taozz+t2yy_px(k,ib,j); } } // } // } return; } __global__ void stress_norm_PmlY_II(int nxb2, int nyb2, int nztop, int nxbtm, int nzbtm, int mw2_pml1, int lby0, int lby1, int *nd2_tyy, int *idmat2M, float ca, float *drti2M, float *damp2_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh2M, float *dyh2M, float *dzi2M, float *t2xxM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2yyM, float *qt2zzM, float *t2xx_pyM, float *t2yy_pyM, float *qt2xx_pyM, float *qt2yy_pyM, float *v2xM, float *v2yM, float *v2zM) //Compute the stress-norm at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,rti,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; //if( lby[0] > lby[1] ) return; //if( lby(1)>lby(2) ) return; i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_tyy[11] || lb > lby1) { return; } jb = 0; for (k = lby0; k < lb; k++) { for (j=nd2_tyy[4*k]; j <= nd2_tyy[1+4*k]; j++) { jb++; } } // for (i = nd2_tyy[6]; i <= nd2_tyy[11]; i++) // //do i=nd2_tyy(7),nd2_tyy(12) // { // jb=0; // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j=nd2_tyy[4*lb]; j <= nd2_tyy[1+4*lb]; j++) //do j=nd2_tyy(1+4*lb),nd2_tyy(2+4*lb) { kb=kb+1; jb=jb+1; rti=drti2(kb,lb); kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1; jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k=nd2_tyy[12]; k <= nd2_tyy[17]; k++) //do k=nd2_tyy(13),nd2_tyy(18) { damp2=1./(1.+damp2_y(k,i,lb)*rti); damp1=damp2*2.0-1.; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t2xx(k,i,j)-t2xx_py(k,i,jb); taoyy=t2yy(k,i,j)-t2yy_py(k,i,jb); taozz=t2zz(k,i,j)-t2xx_py(k,i,jb); if(k<nd2_tyy[16]) { //if(k<nd2_tyy(17)) then szz=dzi2(1,k)*v2z(k-1,i,j)+dzi2(2,k)*v2z(k, i,j)+ dzi2(3,k)*v2z(k+1,i,j)+dzi2(4,k)*v2z(k+2,i,j); if(i>nd2_tyy[7] && i<nd2_tyy[10]) { //if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) { sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); } else { sxx=0.0; } sss=sxx+szz; qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*szz)*et1; taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp-wts)*sss*et1; taoyy=taoyy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp*sss-wts*sxx)*et1; taozz=taozz+sm2*szz+cl*sss-qzz-qt2zz(k,i,j); } else { if(i>nd2_tyy[7] && i<nd2_tyy[10]) { //if(i>nd2_tyy(8) .and. i<nd2_tyy(11)) then sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+wtp*sxx*et1; taoxx=taoxx+pm*sxx-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp-wts)*sxx*et1; taoyy=taoyy+cl*sxx-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp-wts)*sxx*et1; taozz=taozz+cl*sxx-qzz-qt2zz(k,i,j); } } syy=dyh2(2,j)/ca*(v2y(k,i,j-1)-v2y(k,i,j)); qxx=qt2xx_py(k,i,jb); qt2xx_py(k,i,jb)=qxx*et+(wtp-wts)*syy*et1; t2xx_py(k,i,jb)=damp1*t2xx_py(k,i,jb)+damp2*(cl*syy-qxx-qt2xx_py(k,i,jb)); t2xx(k,i,j)=taoxx+t2xx_py(k,i,jb); t2zz(k,i,j)=taozz+t2xx_py(k,i,jb); qyy=qt2yy_py(k,i,jb); qt2yy_py(k,i,jb)=qyy*et+wtp*syy*et1; t2yy_py(k,i,jb)=damp1*t2yy_py(k,i,jb)+damp2*(pm*syy-qyy-qt2yy_py(k,i,jb)); t2yy(k,i,j)=taoyy+t2yy_py(k,i,jb); } } // } // } return; } __global__ void stress_norm_PmlZ_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nztop, int nxbtm, int nzbtm, int *nd2_tyy, int *idmat2M, float ca, float *damp2_zM, float *drth2M, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh2M, float *dyh2M, float *dzi2M, float *t2xxM, float *t2yyM, float *t2zzM, float *qt2xxM, float *qt2yyM, float *qt2zzM, float *t2xx_pzM, float *t2zz_pzM, float *qt2xx_pzM, float *qt2zz_pzM, float *v2xM, float *v2yM, float *v2zM) //Compute the stress-norm at region of PML-z-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kb,kodd,jkq,inod,irw //real:: taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz, & // damp2,damp1,cl,sm2,pm,et,et1,wtp,wts { int i,j,k,lb,kb,kodd,jkq,inod,irw; float taoxx,taoyy,taozz,sxx,syy,szz,sss,qxx,qyy,qzz,damp2,damp1,cl,sm2,pm,et,et1,wtp,wts; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyy[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyy[6]; if (j > nd2_tyy[5] || i > nd2_tyy[11]) { return; } // for (j = nd2_tyy[0]; j <= nd2_tyy[5]; j++) // //do j=nd2_tyy(1),nd2_tyy(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i=nd2_tyy[6]; i <= nd2_tyy[11]; i++) // //do i=nd2_tyy(7),nd2_tyy(12) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd kb=0; for (k = nd2_tyy[16]; k <= nd2_tyy[17]; k++) //do k=nd2_tyy(17),nd2_tyy(18) { kb=kb+1; damp2=1./(1.+damp2_z(i,j)*drth2(kb,1)); damp1=damp2*2.-1.; inod=idmat2(k,i,j); cl=clamda(inod); sm2=2.*cmu(inod); pm=cl+sm2; irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; wtp= pm*qwp(inod)*(qwp(inod)*qwt1(irw)+qwt2(irw)); wts=sm2*qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxx=t2xx(k,i,j)-t2xx_pz(kb,i,j); taoyy=t2yy(k,i,j)-t2xx_pz(kb,i,j); taozz=t2zz(k,i,j)-t2zz_pz(kb,i,j); if(i>nd2_tyy[7] && i<nd2_tyy[10] && j>nd2_tyy[1] && j<nd2_tyy[4]) { //if(i>nd2_tyy(8) .and. i<nd2_tyy(11) .and. & // j>nd2_tyy(2) .and. j<nd2_tyy(5)) then sxx=dxh2(1,i)*v2x(k,i-2,j)+dxh2(2,i)*v2x(k,i-1,j)+ dxh2(3,i)*v2x(k,i ,j)+dxh2(4,i)*v2x(k,i+1,j); syy=dyh2(1,j)*v2y(k,i,j-2)+dyh2(2,j)*v2y(k,i,j-1)+ dyh2(3,j)*v2y(k,i ,j)+dyh2(4,j)*v2y(k,i,j+1); sss=sxx+syy; qxx=qt2xx(k,i,j); qt2xx(k,i,j)=qxx*et+(wtp*sss-wts*syy)*et1; taoxx=taoxx+sm2*sxx+cl*sss-qxx-qt2xx(k,i,j); qyy=qt2yy(k,i,j); qt2yy(k,i,j)=qyy*et+(wtp*sss-wts*sxx)*et1; taoyy=taoyy+sm2*syy+cl*sss-qyy-qt2yy(k,i,j); qzz=qt2zz(k,i,j); qt2zz(k,i,j)=qzz*et+(wtp-wts)*sss*et1; taozz=taozz+cl*sss-qzz-qt2zz(k,i,j); } szz=dzi2(2,k)/ca*(v2z(k,i,j)-v2z(k+1,i,j)); qxx=qt2xx_pz(kb,i,j); qt2xx_pz(kb,i,j)=qxx*et+(wtp-wts)*szz*et1; t2xx_pz(kb,i,j)=damp1*t2xx_pz(kb,i,j)+ damp2*(cl*szz-qxx-qt2xx_pz(kb,i,j)); t2xx(k,i,j)=taoxx+t2xx_pz(kb,i,j); t2yy(k,i,j)=taoyy+t2xx_pz(kb,i,j); qzz=qt2zz_pz(kb,i,j); qt2zz_pz(kb,i,j)=qzz*et+wtp*szz*et1; t2zz_pz(kb,i,j)=damp1*t2zz_pz(kb,i,j)+ damp2*(pm*szz-qzz-qt2zz_pz(kb,i,j)); t2zz(k,i,j)=taozz+t2zz_pz(kb,i,j); } // } // } return; } __global__ void stress_xy_PmlX_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nxbtm, int nybtm, int nzbtm, int nztop, int lbx0, int lbx1, int *nd2_txy, int *idmat2M, float ca, float *drth2M, float *damp2_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *t2xyM, float *qt2xyM, float *t2xy_pxM, float *qt2xy_pxM, float *v2xM, float *v2yM) //Compute the Stress-xy at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1; int nth; //if(lbx[0] > lbx[1]) return; nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0; //nth = (lbx(2) - lbx(1) + 1) * mw2_pml + 1 - lbx(1) j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_txy[5] || lb > lbx1) { return; } ib = 0; for (k = lbx0; k < lb; k++) { for (i=nd2_txy[6+4*k]; i <= nd2_txy[7+4*k]; i++) { ib++; } } // for (j = nd2_txy[0]; j <= nd2_txy[5]; j++) // //do j=nd2_txy(1),nd2_txy(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // ib=0; // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i=nd2_txy[6+4*lb]; i <= nd2_txy[7+4*lb]; i++) //do i=nd2_txy(7+4*lb),nd2_txy(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth2(kb,lb); jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txy[12]; k <= nd2_txy[17]; k++) //do k=nd2_txy(13),nd2_txy(18) { damp2=1./(1.+damp2_x(k,j,lb)*rth); damp1=damp2*2.0-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t2xy(k,i,j)-t2xy_px(k,ib,j); if(j > nd2_txy[1] && j<nd2_txy[4]) { //if(j>nd2_txy(2) .and. j<nd2_txy(5)) then cusxy=(dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j)+ dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm; qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1; taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j); } cusxy=sm*dxi2(2,i)/ca*(v2y(k,i,j)-v2y(k,i+1,j)); qxy=qt2xy_px(k,ib,j); qt2xy_px(k,ib,j)=qxy*et+dmws*cusxy*et1; t2xy_px(k,ib,j)=damp1*t2xy_px(k,ib,j)+ damp2*(cusxy-qxy-qt2xy_px(k,ib,j)); t2xy(k,i,j)=taoxy+t2xy_px(k,ib,j); } } // } // } return; } __global__ void stress_xy_PmlY_IIC(int nxb2, int nyb2, int mw2_pml1, int nztop, int nxbtm, int nzbtm, int lby0, int lby1, int *nd2_txy, int *idmat2M, float ca, float *drth2M, float *damp2_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *t2xyM, float *qt2xyM, float *t2xy_pyM, float *qt2xy_pyM, float *v2xM, float *v2yM) //Compute the Stress-xy at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoxy,cusxy,qxy,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_txy[11] || lb > lby1) { return; } jb = 0; for (k = lby0; k < lb; k++) { for (j=nd2_txy[4*k]; j <= nd2_txy[1+4*k]; j++) { jb++; } } // for (i = nd2_txy[6]; i <= nd2_txy[11]; i++) // //do i=nd2_txy(7),nd2_txy(12) // { // jb=0; // for (lb=lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j=nd2_txy[4*lb]; j <= nd2_txy[1+4*lb]; j++) //do j=nd2_txy(1+4*lb),nd2_txy(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth2(kb,lb); kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txy[12]; k <= nd2_txy[17]; k++) //do k=nd2_txy(13),nd2_txy(18) { damp2=1./(1.+damp2_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxy=t2xy(k,i,j)-t2xy_py(k,i,jb); if(i>nd2_txy[7] && i<nd2_txy[10]) { //if(i>nd2_txy(8) .and. i<nd2_txy(11)) then cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i,j)+ dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j))*sm; qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1; taoxy=taoxy+cusxy-qxy-qt2xy(k,i,j); } cusxy=sm*dyi2(2,j)/ca*(v2x(k,i,j)-v2x(k,i,j+1)); qxy=qt2xy_py(k,i,jb); qt2xy_py(k,i,jb)=qxy*et+dmws*cusxy*et1; t2xy_py(k,i,jb)=damp1*t2xy_py(k,i,jb)+ damp2*(cusxy-qxy-qt2xy_py(k,i,jb)); t2xy(k,i,j)=taoxy+t2xy_py(k,i,jb); } } // } // } return; } __global__ void stress_xy_PmlZ_II(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int *nd2_txy, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dyi2M, float *t2xyM, float *qt2xyM, float *v2xM, float *v2yM) //Compute the Stress-xy at region of PML-z-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: cusxy,qxy,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float cusxy,qxy,sm,dmws,et,et1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txy[2]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txy[8]; if (j > nd2_txy[3] || i > nd2_txy[9]) { return; } // for (j = nd2_txy[2]; j <= nd2_txy[3]; j++) // //do j=nd2_txy(3),nd2_txy(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_txy[8]; i <= nd2_txy[9]; i++) // //do i=nd2_txy(9),nd2_txy(10) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k=nd2_txy[16]; k <= nd2_txy[17]; k++) //do k=nd2_txy(17),nd2_txy(18) { inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k,i+1,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2); et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); cusxy=(dxi2(1,i)*v2y(k,i-1,j)+dxi2(2,i)*v2y(k,i, j)+ dxi2(3,i)*v2y(k,i+1,j)+dxi2(4,i)*v2y(k,i+2,j)+ dyi2(1,j)*v2x(k,i,j-1)+dyi2(2,j)*v2x(k,i,j )+ dyi2(3,j)*v2x(k,i,j+1)+dyi2(4,j)*v2x(k,i,j+2))*sm; qxy=qt2xy(k,i,j); qt2xy(k,i,j)=qxy*et+dmws*cusxy*et1; t2xy(k,i,j)=t2xy(k,i,j)+cusxy-qxy-qt2xy(k,i,j); } // } // } return; } __global__ void stress_xz_PmlX_IIC(int nxb2, int nyb2, int mw2_pml, int mw2_pml1, int nxbtm, int nybtm, int nzbtm, int nztop, int lbx0, int lbx1, int *nd2_txz, int *idmat2M, float ca, float *drth2M, float *damp2_xM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dzh2M, float *t2xzM, float *qt2xzM, float *t2xz_pxM, float *qt2xz_pxM, float *v2xM, float *v2zM) //Compute the stress-xz at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,ib,kb,kodd,jkq,inod,irw //real:: taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,ib,kb,kodd,jkq,inod,irw; float taoxz,cusxz,qxz,rth,damp2,damp1,sm,dmws,et,et1; int nth; //if(lbx[0] > lbx[1]) return; nth = (lbx1 - lbx0 + 1) * mw2_pml + 1 - lbx0; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_txz[5] || lb > lbx1) { return; } ib=0; for (k = lbx0; k < lb; k++) { for (i=nd2_txz[6+4*k]; i <= nd2_txz[7+4*k]; i++) { ib++; } } // for (j = nd2_txz[0]; j <= nd2_txz[5]; j++) // //do j=nd2_txz(1),nd2_txz(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // ib=0; // for (lb=lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { kb=0; for (i=nd2_txz[6+4*lb]; i <= nd2_txz[7+4*lb]; i++) //do i=nd2_txz(7+4*lb),nd2_txz(8+4*lb) { kb=kb+1; ib=ib+1; rth=drth2(kb,lb); jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txz[12]; k <= nd2_txz[17]; k++) //do k=nd2_txz(13),nd2_txz(18) { damp2=1./(1.+damp2_x(k,j,lb)*rth); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxz=t2xz(k,i,j)-t2xz_px(k,ib,j); if(k < nd2_txz[16]) { //if(k<nd2_txz(17)) then cusxz=(dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+ dzh2(3,k)*v2x(k,i,j)+dzh2(4,k)*v2x(k+1,i,j))*sm; qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j); } cusxz=sm*dxi2(2,i)/ca*(v2z(k,i,j)-v2z(k,i+1,j)); qxz=qt2xz_px(k,ib,j); qt2xz_px(k,ib,j)=qxz*et+dmws*cusxz*et1; t2xz_px(k,ib,j)=damp1*t2xz_px(k,ib,j)+ damp2*(cusxz-qxz-qt2xz_px(k,ib,j)); t2xz(k,i,j)=taoxz+t2xz_px(k,ib,j); } } // } // } return; } __global__ void stress_xz_PmlY_IIC(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int lby0, int lby1, int *nd2_txz, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dzh2M, float *v2xM, float *v2zM, float *t2xzM, float *qt2xzM) //Compute the stress-xz at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float dvxz,dvzx,cusxz,qxz,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[8]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_txz[9] || lb > lby1) { return; } // for (i = nd2_txz[8]; i <= nd2_txz[9]; i++) // //do i=nd2_txz(9),nd2_txz(10) // { // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { for (j=nd2_txz[4*lb]; j <= nd2_txz[1+4*lb]; j++) //do j=nd2_txz(1+4*lb),nd2_txz(2+4*lb) { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_txz[12]; k <= nd2_txz[15]; k++) //do k=nd2_txz(13),nd2_txz(16) { inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); dvzx=dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+ dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j); dvxz=dzh2(1,k)*v2x(k-2,i,j)+dzh2(2,k)*v2x(k-1,i,j)+ dzh2(3,k)*v2x(k, i,j)+dzh2(4,k)*v2x(k+1,i,j); cusxz=(dvzx+dvxz)*sm; qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; t2xz(k,i,j)=t2xz(k,i,j)+cusxz-qxz-qt2xz(k,i,j); } } // } // } return; } __global__ void stress_xz_PmlZ_IIC(int nxb2, int nyb2, int mw2_pml1, int nxbtm, int nzbtm, int nztop, int *nd2_txz, int *idmat2M, float ca, float *drti2M, float *damp2_zM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dxi2M, float *dzh2M, float *t2xzM, float *qt2xzM, float *t2xz_pzM, float *qt2xz_pzM, float *v2xM, float *v2zM) //Compute the stress-xz at region of PML-z-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kb,kodd,jkq,inod,irw //real:: taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,kb,kodd,jkq,inod,irw; float taoxz,cusxz,qxz,damp2,damp1,sm,dmws,et,et1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_txz[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_txz[6]; if (j > nd2_txz[5] || i > nd2_txz[11]) { return; } // for (j = nd2_txz[0]; j <= nd2_txz[5]; j++) // //do j=nd2_txz(1),nd2_txz(6) // { kodd = 2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_txz[6]; i <= nd2_txz[11]; i++) // //do i=nd2_txz(7),nd2_txz(12) // { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd kb=0; for (k = nd2_txz[16]; k <= nd2_txz[17]; k++) //do k=nd2_txz(17),nd2_txz(18) { kb=kb+1; damp2=1./(1.+damp2_z(i,j)*drti2(kb,1)); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i+1,j))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoxz=t2xz(k,i,j)-t2xz_pz(kb,i,j); if(i > nd2_txz[7] && i<nd2_txz[10]) { //if(i>nd2_txz(8) .and. i<nd2_txz(11)) then cusxz=(dxi2(1,i)*v2z(k,i-1,j)+dxi2(2,i)*v2z(k,i, j)+ dxi2(3,i)*v2z(k,i+1,j)+dxi2(4,i)*v2z(k,i+2,j))*sm; qxz=qt2xz(k,i,j); qt2xz(k,i,j)=qxz*et+dmws*cusxz*et1; taoxz=taoxz+cusxz-qxz-qt2xz(k,i,j); } cusxz=sm*dzh2(2,k)/ca*(v2x(k-1,i,j)-v2x(k,i,j)); qxz=qt2xz_pz(kb,i,j); qt2xz_pz(kb,i,j)=qxz*et+dmws*cusxz*et1; t2xz_pz(kb,i,j)=damp1*t2xz_pz(kb,i,j)+ damp2*(cusxz-qxz-qt2xz_pz(kb,i,j)); t2xz(k,i,j)=taoxz+t2xz_pz(kb,i,j); } // } // } return; } //call stress_yz_PmlX_II __global__ void stress_yz_PmlX_IIC(int nxb2, int nyb2, int nxbtm, int nzbtm, int nztop, int lbx0, int lbx1, int *nd2_tyz, int *idmat2M, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi2M, float *dzh2M, float *t2yzM, float *qt2yzM, float *v2yM, float *v2zM) //Compute the stress-yz at region of PML-x-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kodd,jkq,inod,irw //real:: cusyz,qyz,sm,dmws,et,et1 { int i,j,k,lb,kodd,jkq,inod,irw; float cusyz,qyz,sm,dmws,et,et1; //if(lbx[0] > lbx[1]) return; //if( lbx(1)>lbx(2) ) return j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[2]; lb = blockIdx.y * blockDim.y + threadIdx.y + lbx0; if (j > nd2_tyz[3] || lb > lbx1) { return; } // for (j=nd2_tyz[2]; j <= nd2_tyz[3]; j++) // //do j=nd2_tyz(3),nd2_tyz(4) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (lb = lbx[0]; lb <= lbx[1]; lb++) // //do lb=lbx(1),lbx(2) // { for (i = nd2_tyz[6+4*lb]; i <= nd2_tyz[7+4*lb]; i++) //do i=nd2_tyz(7+4*lb),nd2_tyz(8+4*lb) { jkq=((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_tyz[12]; k <= nd2_tyz[15]; k++) //do k=nd2_tyz(13),nd2_tyz(16) { inod=idmat2(k,i,j); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j )+ dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2)+ dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+ dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))/ (.5/cmu(inod)+.5/cmu(idmat2(k-1,i,j+1))); qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; t2yz(k,i,j)=t2yz(k,i,j)+cusyz-qyz-qt2yz(k,i,j); } } // } // } return; } //call stress_yz_PmlY_II __global__ void stress_yz_PmlY_IIC(int nxb2, int nyb2, int mw2_pml1, int nxbtm, int nzbtm, int nztop, int lby0, int lby1, int *nd2_tyz, int *idmat2M, float ca, float *drth2M, float *damp2_yM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi2M, float *dzh2M, float *t2yzM, float *qt2yzM, float *t2yz_pyM, float *qt2yz_pyM, float *v2yM, float *v2zM) //Compute the stress-yz at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,jb,kb,kodd,jkq,inod,irw //real:: taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,jb,kb,kodd,jkq,inod,irw; float taoyz,cusyz,qyz,rth,damp2,damp1,sm,dmws,et,et1; //if(lby[0] > lby[1]) return; //if( lby(1)>lby(2) ) return i = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[6]; lb = blockIdx.y * blockDim.y + threadIdx.y + lby0; if (i > nd2_tyz[11] || lb > lby1) { return; } jb = 0; for (k = lby0; k < lb; k++) { for (j = nd2_tyz[4*k]; j <= nd2_tyz[1+4*k]; j++) { jb++; } } // for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++) // //do i=nd2_tyz(7),nd2_tyz(12) // { // jb=0; // for (lb = lby[0]; lb <= lby[1]; lb++) // //do lb=lby(1),lby(2) // { kb=0; for (j = nd2_tyz[4*lb]; j <= nd2_tyz[1+4*lb]; j++) //do j=nd2_tyz(1+4*lb),nd2_tyz(2+4*lb) { kb=kb+1; jb=jb+1; rth=drth2(kb,lb); kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 jkq = ((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd for (k = nd2_tyz[12]; k <= nd2_tyz[17]; k++) //do k=nd2_tyz(13),nd2_tyz(18) { damp2=1./(1.+damp2_y(k,i,lb)*rth); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2) et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoyz=t2yz(k,i,j)-t2yz_py(k,i,jb); if(k<nd2_tyz[16]) { //if(k<nd2_tyz(17)) { cusyz=(dzh2(1,k)*v2y(k-2,i,j)+dzh2(2,k)*v2y(k-1,i,j)+ dzh2(3,k)*v2y(k, i,j)+dzh2(4,k)*v2y(k+1,i,j))*sm; qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j); } cusyz=sm*dyi2(2,j)/ca*(v2z(k,i,j)-v2z(k,i,j+1)); qyz=qt2yz_py(k,i,jb); qt2yz_py(k,i,jb)=qyz*et+dmws*cusyz*et1; t2yz_py(k,i,jb)=damp1*t2yz_py(k,i,jb)+ damp2*(cusyz-qyz-qt2yz_py(k,i,jb)); t2yz(k,i,j)=taoyz+t2yz_py(k,i,jb); } } // } // } return; } //call stress_yz_PmlZ_II __global__ void stress_yz_PmlZ_IIC(int nxb2, int nyb2, int mw2_pml1, int nxbtm, int nzbtm, int nztop, int *nd2_tyz, int *idmat2M, float ca, float *drti2M, float *damp2_zM, float *cmuM, float *epdtM, float *qwsM, float *qwt1M, float *qwt2M, float *dyi2M, float *dzh2M, float *t2yzM, float *qt2yzM, float *t2yz_pzM, float *qt2yz_pzM, float *v2yM, float *v2zM) //Compute the stress-yz at region of PML-y-II //use grid_node_comm //use wave_field_comm //implicit NONE //integer:: i,j,k,lb,kb,kodd,jkq,inod,irw //real:: taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1 { int i,j,k,lb,kb,kodd,jkq,inod,irw; float taoyz,cusyz,qyz,damp2,damp1,sm,dmws,et,et1; j = blockIdx.x * blockDim.x + threadIdx.x + nd2_tyz[0]; i = blockIdx.y * blockDim.y + threadIdx.y + nd2_tyz[6]; if (j > nd2_tyz[5] || i > nd2_tyz[11]) { return; } // for (j = nd2_tyz[0]; j <= nd2_tyz[5]; j++) // //do j=nd2_tyz(1),nd2_tyz(6) // { kodd=2*((j+nyb2)&1)+1; //kodd=2*mod(j+nyb2,2)+1 // for (i = nd2_tyz[6]; i <= nd2_tyz[11]; i++) // //do i=nd2_tyz(7),nd2_tyz(12) // { jkq = ((i+nxb2)&1)+kodd; //jkq=mod(i+nxb2,2)+kodd kb=0; for (k = nd2_tyz[16]; k <= nd2_tyz[17]; k++) //do k=nd2_tyz(17),nd2_tyz(18) { kb=kb+1; damp2=1./(1.+damp2_z(i,j)*drti2(kb,1)); damp1=damp2*2.-1.; inod=idmat2(k,i,j); sm=2./(1./cmu(inod)+1./cmu(idmat2(k-1,i,j+1))); irw=jkq+4*((k+nztop)&1); //irw=jkq+4*mod(k+nztop,2); et=epdt(irw); et1=1.0-et; dmws=qws(inod)*(qws(inod)*qwt1(irw)+qwt2(irw)); taoyz=t2yz(k,i,j)-t2yz_pz(kb,i,j); if (j > nd2_tyz[1] && j<nd2_tyz[4]) { //if(j>nd2_tyz(2) .and. j<nd2_tyz(5)) then cusyz=(dyi2(1,j)*v2z(k,i,j-1)+dyi2(2,j)*v2z(k,i,j)+ dyi2(3,j)*v2z(k,i,j+1)+dyi2(4,j)*v2z(k,i,j+2))*sm; qyz=qt2yz(k,i,j); qt2yz(k,i,j)=qyz*et+dmws*cusyz*et1; taoyz=taoyz+cusyz-qyz-qt2yz(k,i,j); } cusyz=sm*dzh2(2,k)/ca*(v2y(k-1,i,j)-v2y(k,i,j)); qyz=qt2yz_pz(kb,i,j); qt2yz_pz(kb,i,j)=qyz*et+dmws*cusyz*et1; t2yz_pz(kb,i,j)=damp1*t2yz_pz(kb,i,j)+ damp2*(cusyz-qyz-qt2yz_pz(kb,i,j)); t2yz(k,i,j)=taoyz+t2yz_pz(kb,i,j); } // } // } return; } #ifdef __cplusplus extern "C" { #endif void compute_stressC(int *nxb1, int *nyb1, int *nx1p1, int *ny1p1, int *nxtop, int *nytop, int *nztop, int *mw1_pml, int *mw1_pml1, int *nmat, int *nll, int *lbx, int *lby, int *nd1_txy, int *nd1_txz, int *nd1_tyy, int *nd1_tyz, int *idmat1M, float *ca, float *drti1M, float *drth1M, float *damp1_xM, float *damp1_yM, float *clamdaM, float *cmuM, float *epdtM, float *qwpM, float *qwsM, float *qwt1M, float *qwt2M, float *dxh1M, float *dyh1M, float *dzh1M, float *dxi1M, float *dyi1M, float *dzi1M, float *t1xxM, float *t1xyM, float *t1xzM, float *t1yyM, float *t1yzM, float *t1zzM, float *qt1xxM, float *qt1xyM, float *qt1xzM, float *qt1yyM, float *qt1yzM, float *qt1zzM, float *t1xx_pxM, float *t1xy_pxM, float *t1xz_pxM, float *t1yy_pxM, float *qt1xx_pxM, float *qt1xy_pxM, float *qt1xz_pxM, float *qt1yy_pxM, float *t1xx_pyM, float *t1xy_pyM, float *t1yy_pyM, float *t1yz_pyM, float *qt1xx_pyM, float *qt1xy_pyM, float *qt1yy_pyM, float *qt1yz_pyM, void **v1xMp, void **v1yMp, void **v1zMp, int *nxb2, int *nyb2, int *nxbtm, int *nybtm, int *nzbtm, int *mw2_pml, int *mw2_pml1, int *nd2_txy, int *nd2_txz, int *nd2_tyy, int *nd2_tyz, int *idmat2M, float *drti2M, float *drth2M, float *damp2_xM, float *damp2_yM, float *damp2_zM, float *t2xxM, float *t2xyM, float *t2xzM, float *t2yyM, float *t2yzM, float *t2zzM, float *qt2xxM, float *qt2xyM, float *qt2xzM, float *qt2yyM, float *qt2yzM, float *qt2zzM, float *dxh2M, float *dyh2M, float *dzh2M, float *dxi2M, float *dyi2M, float *dzi2M, float *t2xx_pxM, float *t2xy_pxM, float *t2xz_pxM, float *t2yy_pxM, float *t2xx_pyM, float *t2xy_pyM, float *t2yy_pyM, float *t2yz_pyM, float *t2xx_pzM, float *t2xz_pzM, float *t2yz_pzM, float *t2zz_pzM, float *qt2xx_pxM, float *qt2xy_pxM, float *qt2xz_pxM, float *qt2yy_pxM, float *qt2xx_pyM, float *qt2xy_pyM, float *qt2yy_pyM, float *qt2yz_pyM, float *qt2xx_pzM, float *qt2xz_pzM, float *qt2yz_pzM, float *qt2zz_pzM, void **v2xMp, void **v2yMp, void **v2zMp, int *myid) { printf("[CUDA] stress computation:\n"); float *v1xM, *v1yM, *v1zM, *v2xM, *v2yM, *v2zM; int blockSizeX = 8; int blockSizeY = 8; dim3 dimBlock(blockSizeX, blockSizeY); v1xM = (float *) *v1xMp; v1yM = (float *) *v1yMp; v1zM = (float *) *v1zMp; v2xM = (float *) *v2xMp; v2yM = (float *) *v2yMp; v2zM = (float *) *v2zMp; gettimeofday(&t1, NULL); cpy_h2d_stressInputsC(v1xM, v1yM, v1zM, v2xM, v2yM, v2zM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); cpy_h2d_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeH2DS += tmpTime; gettimeofday(&t1, NULL); int gridSizeX1 = (nd1_tyy[3] - nd1_tyy[2])/blockSizeX + 1; int gridSizeY1 = (nd1_tyy[9] - nd1_tyy[8])/blockSizeY + 1; dim3 dimGrid1(gridSizeX1, gridSizeY1); stress_norm_xy_IC<<<dimGrid1, dimBlock>>>(*nxb1, *nyb1, *nxtop, *nztop, nd1_tyyD, idmat1D, *ca, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh1D, dyh1D, dxi1D, dyi1D, dzi1D, t1xxD, t1xyD, t1yyD, t1zzD, qt1xxD, qt1xyD, qt1yyD, qt1zzD, v1xD, v1yD, v1zD); int gridSizeX2 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1; int gridSizeY2 = (nd1_tyz[9] - nd1_tyz[8])/blockSizeY + 1; dim3 dimGrid2(gridSizeX2, gridSizeY2); stress_xz_yz_IC<<<dimGrid2, dimBlock>>>(*nxb1, *nyb1, *nxtop, *nytop, *nztop, nd1_tyzD, idmat1D, *ca, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dyi1D, dzh1D, v1xD, v1yD, v1zD, t1xzD, t1yzD, qt1xzD, qt1yzD); int gridSizeX3Temp1 = ((*ny1p1) + 1)/blockSizeX + 1; int gridSizeX3Temp2 = ((*nytop) - 1)/blockSizeX + 1; int gridSizeY3Temp1 = ((*nxtop) - 1)/blockSizeY + 1; int gridSizeY3Temp2 = ((*nx1p1) + 1)/blockSizeY + 1; int gridSizeX3 = (gridSizeX3Temp1 > gridSizeX3Temp2) ? gridSizeX3Temp1 : gridSizeX3Temp2; int gridSizeY3 = (gridSizeY3Temp1 > gridSizeY3Temp2) ? gridSizeY3Temp1 : gridSizeY3Temp2; dim3 dimGrid3(gridSizeX3, gridSizeY3); stress_resetVars<<<dimGrid3, dimBlock>>>(*ny1p1, *nx1p1, *nxtop, *nytop, *nztop, t1xzD, t1yzD); if (lbx[1] >= lbx[0]) { int gridSizeX4 = (nd1_tyy[5] - nd1_tyy[0])/blockSizeX + 1; int gridSizeY4 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid4(gridSizeX4, gridSizeY4); stress_norm_PmlX_IC<<<dimGrid4, dimBlock>>>(*nxb1, *nyb1, *nxtop, *nytop, *nztop, *mw1_pml, *mw1_pml1, lbx[0], lbx[1], nd1_tyyD, idmat1D, *ca, drti1D, damp1_xD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dzi1D, dxh1D, dyh1D, v1xD, v1yD, v1zD, t1xxD, t1yyD, t1zzD, t1xx_pxD, t1yy_pxD, qt1xxD, qt1yyD, qt1zzD, qt1xx_pxD, qt1yy_pxD); } if (lby[1] >= lby[0]) { int gridSizeX5 = (nd1_tyy[11] - nd1_tyy[6])/blockSizeX + 1; int gridSizeY5 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid5(gridSizeX5, gridSizeY5); stress_norm_PmlY_IC<<<dimGrid5, dimBlock>>>(*nxb1, *nyb1, *mw1_pml1, *nxtop, *nztop, lby[0], lby[1], nd1_tyyD, idmat1D, *ca, drti1D, damp1_yD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh1D, dyh1D, dzi1D, t1xxD, t1yyD, t1zzD, qt1xxD, qt1yyD, qt1zzD, t1xx_pyD, t1yy_pyD, qt1xx_pyD, qt1yy_pyD, v1xD, v1yD, v1zD); } if (lbx[1] >= lbx[0]) { int gridSizeX6 = (nd1_txy[5] - nd1_txy[0])/blockSizeX + 1; int gridSizeY6 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid6(gridSizeX6, gridSizeY6); stress_xy_PmlX_IC<<<dimGrid6, dimBlock>>>(*nxb1, *nyb1, *mw1_pml, *mw1_pml1, *nxtop, *nytop, *nztop, lbx[0], lbx[1], nd1_txyD, idmat1D, *ca, drth1D, damp1_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dyi1D, t1xyD, qt1xyD, t1xy_pxD, qt1xy_pxD, v1xD, v1yD); } if (lby[1] >= lby[0]) { int gridSizeX7 = (nd1_txy[11] - nd1_txy[6])/blockSizeX + 1; int gridSizeY7 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid7(gridSizeX7, gridSizeY7); stress_xy_PmlY_IC<<<dimGrid7, dimBlock>>>(*nxb1, *nyb1, *mw1_pml1, *nxtop, *nztop, lby[0], lby[1], nd1_txyD, idmat1D, *ca, drth1D, damp1_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dyi1D, t1xyD, qt1xyD, t1xy_pyD, qt1xy_pyD, v1xD, v1yD); } if (lbx[1] >= lbx[0]) { int gridSizeX8 = (nd1_txz[5] - nd1_txz[0])/blockSizeX + 1; int gridSizeY8 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid8(gridSizeX8, gridSizeY8); stress_xz_PmlX_IC<<<dimGrid8, dimBlock>>>(*nxb1, *nyb1, *nxtop, *nytop, *nztop, *mw1_pml, *mw1_pml1, lbx[0], lbx[1], nd1_txzD, idmat1D, *ca, drth1D, damp1_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dzh1D, t1xzD, qt1xzD, t1xz_pxD, qt1xz_pxD, v1xD, v1zD); } if (lby[1] >= lby[0]) { int gridSizeX9 = (nd1_txz[9] - nd1_txz[8])/blockSizeX + 1; int gridSizeY9 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid9(gridSizeX9, gridSizeY9); stress_xz_PmlY_IC<<<dimGrid9, dimBlock>>>(*nxb1, *nyb1, *nxtop, *nztop, lby[0], lby[1], nd1_txzD, idmat1D, *ca, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi1D, dzh1D, t1xzD, qt1xzD, v1xD, v1zD); } if (lbx[1] >= lbx[0]) { int gridSizeX10 = (nd1_tyz[3] - nd1_tyz[2])/blockSizeX + 1; int gridSizeY10 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid10(gridSizeX10, gridSizeY10); stress_yz_PmlX_IC<<<dimGrid10, dimBlock>>>(*nxb1, *nyb1, *nztop, *nxtop, lbx[0], lbx[1], nd1_tyzD, idmat1D, *ca, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi1D, dzh1D, t1yzD, qt1yzD, v1yD, v1zD); } if (lby[1] >= lby[0]) { int gridSizeX11 = (nd1_tyz[11] - nd1_tyz[6])/blockSizeX + 1; int gridSizeY11 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid11(gridSizeX11, gridSizeY11); stress_yz_PmlY_IC<<<dimGrid11,dimBlock>>>(*nxb1, *nyb1, *mw1_pml1, *nxtop, *nztop, lby[0], lby[1], nd1_tyzD, idmat1D, *ca, drth1D, damp1_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi1D, dzh1D, t1yzD, qt1yzD, t1yz_pyD, qt1yz_pyD, v1yD, v1zD); } int gridSizeX12 = (nd2_tyy[3] - nd2_tyy[2])/blockSizeX + 1; int gridSizeY12 = (nd2_tyy[9] - nd2_tyy[8])/blockSizeY + 1; dim3 dimGrid12(gridSizeX12, gridSizeY12); stress_norm_xy_II<<<dimGrid12, dimBlock>>>(*nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, nd2_tyyD, idmat2D, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, t2xxD, t2xyD, t2yyD, t2zzD, qt2xxD, qt2xyD, qt2yyD, qt2zzD, dxh2D, dyh2D, dxi2D, dyi2D, dzi2D, v2xD, v2yD, v2zD); int gridSizeX13 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1; int gridSizeY13 = (nd2_tyz[9] - nd2_tyz[8])/blockSizeY + 1; dim3 dimGrid13(gridSizeX13, gridSizeY13); stress_xz_yz_IIC<<<dimGrid13, dimBlock>>>(*nxb2, *nyb2, *nztop, *nxbtm, *nzbtm, nd2_tyzD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, dzh2D, t2xzD, t2yzD, qt2xzD, qt2yzD, v2xD, v2yD, v2zD); if (lbx[1] >= lbx[0]) { int gridSizeX14 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1; int gridSizeY14 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid14(gridSizeX14, gridSizeY14); stress_norm_PmlX_IIC<<<dimGrid14, dimBlock>>>(*nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nztop, *nxbtm, *nybtm, *nzbtm, lbx[0], lbx[1], nd2_tyyD, idmat2D, *ca, drti2D, damp2_xD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh2D, dyh2D, dzi2D, t2xxD, t2yyD, t2zzD, qt2xxD, qt2yyD, qt2zzD, t2xx_pxD, t2yy_pxD, qt2xx_pxD, qt2yy_pxD, v2xD, v2yD, v2zD); } if (lby[1] >= lby[0]) { int gridSizeX15 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeX + 1; int gridSizeY15 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid15(gridSizeX15, gridSizeY15); stress_norm_PmlY_II<<<dimGrid15, dimBlock>>>(*nxb2, *nyb2, *nztop, *nxbtm, *nzbtm, *mw2_pml1, lby[0], lby[1], nd2_tyyD, idmat2D, *ca, drti2D, damp2_yD, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh2D, dyh2D, dzi2D, t2xxD, t2yyD, t2zzD, qt2xxD, qt2yyD, qt2zzD, t2xx_pyD, t2yy_pyD, qt2xx_pyD, qt2yy_pyD, v2xD, v2yD, v2zD); } int gridSizeX16 = (nd2_tyy[5] - nd2_tyy[0])/blockSizeX + 1; int gridSizeY16 = (nd2_tyy[11] - nd2_tyy[6])/blockSizeY + 1; dim3 dimGrid16(gridSizeX16, gridSizeY16); stress_norm_PmlZ_IIC<<<dimGrid16, dimBlock>>>(*nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nztop, *nxbtm, *nzbtm, nd2_tyyD, idmat2D, *ca, damp2_zD, drth2D, clamdaD, cmuD, epdtD, qwpD, qwsD, qwt1D, qwt2D, dxh2D, dyh2D, dzi2D, t2xxD, t2yyD, t2zzD, qt2xxD, qt2yyD, qt2zzD, t2xx_pzD, t2zz_pzD, qt2xx_pzD, qt2zz_pzD, v2xD, v2yD, v2zD); if (lbx[1] >= lbx[0]) { int gridSizeX17 = (nd2_txy[5] - nd2_txy[0])/blockSizeX + 1; int gridSizeY17 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid17(gridSizeX17, gridSizeY17); stress_xy_PmlX_IIC<<<dimGrid17, dimBlock>>>(*nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nxbtm, *nybtm, *nzbtm, *nztop, lbx[0], lbx[1], nd2_txyD, idmat2D, *ca, drth2D, damp2_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, t2xyD, qt2xyD, t2xy_pxD, qt2xy_pxD, v2xD, v2yD); } if (lby[1] >= lby[0]) { int gridSizeX18 = (nd2_txy[11] - nd2_txy[6])/blockSizeX + 1; int gridSizeY18 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid18(gridSizeX18, gridSizeY18); stress_xy_PmlY_IIC<<<dimGrid18, dimBlock>>>(*nxb2, *nyb2, *mw2_pml1, *nztop, *nxbtm, *nzbtm, lby[0], lby[1], nd2_txyD, idmat2D, *ca, drth2D, damp2_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, t2xyD, qt2xyD, t2xy_pyD, qt2xy_pyD, v2xD, v2yD); } int gridSizeX19 = (nd2_txy[3] - nd2_txy[2])/blockSizeX + 1; int gridSizeY19 = (nd2_txy[9] - nd2_txy[8])/blockSizeY + 1; dim3 dimGrid19(gridSizeX19, gridSizeY19); stress_xy_PmlZ_II<<<dimGrid19, dimBlock>>>(*nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, nd2_txyD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dyi2D, t2xyD, qt2xyD, v2xD, v2yD); if (lbx[1] >= lbx[0]) { int gridSizeX20 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1; int gridSizeY20 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid20(gridSizeX20, gridSizeY20); stress_xz_PmlX_IIC<<<dimGrid20, dimBlock>>>(*nxb2, *nyb2, *mw2_pml, *mw2_pml1, *nxbtm, *nybtm, *nzbtm, *nztop, lbx[0], lbx[1], nd2_txzD, idmat2D, *ca, drth2D, damp2_xD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dzh2D, t2xzD, qt2xzD, t2xz_pxD, qt2xz_pxD, v2xD, v2zD); } if (lby[1] >= lby[0]) { int gridSizeX21 = (nd2_txz[9] - nd2_txz[8])/blockSizeX + 1; int gridSizeY21 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid21(gridSizeX21, gridSizeY21); stress_xz_PmlY_IIC<<<dimGrid21, dimBlock>>>(*nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, lby[0], lby[1], nd2_txzD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dzh2D, v2xD, v2zD, t2xzD, qt2xzD); } int gridSizeX22 = (nd2_txz[5] - nd2_txz[0])/blockSizeX + 1; int gridSizeY22 = (nd2_txz[11] - nd2_txz[6])/blockSizeY + 1; dim3 dimGrid22(gridSizeX22, gridSizeY22); stress_xz_PmlZ_IIC<<<dimGrid22, dimBlock>>>(*nxb2, *nyb2, *mw2_pml1, *nxbtm, *nzbtm, *nztop, nd2_txzD, idmat2D, *ca, drti2D, damp2_zD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dxi2D, dzh2D, t2xzD, qt2xzD, t2xz_pzD, qt2xz_pzD, v2xD, v2zD); if (lbx[1] >= lbx[0]) { int gridSizeX23 = (nd2_tyz[3] - nd2_tyz[2])/blockSizeX + 1; int gridSizeY23 = (lbx[1] - lbx[0])/blockSizeY + 1; dim3 dimGrid23(gridSizeX23, gridSizeY23); stress_yz_PmlX_IIC<<<dimGrid23, dimBlock>>>(*nxb2, *nyb2, *nxbtm, *nzbtm, *nztop, lbx[0], lbx[1], nd2_tyzD, idmat2D, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi2D, dzh2D, t2yzD, qt2yzD, v2yD, v2zD); } if (lby[1] >= lby[0]) { int gridSizeX24 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeX + 1; int gridSizeY24 = (lby[1] - lby[0])/blockSizeY + 1; dim3 dimGrid24(gridSizeX24, gridSizeY24); stress_yz_PmlY_IIC<<<dimGrid24, dimBlock>>>(*nxb2, *nyb2, *mw2_pml1, *nxbtm, *nzbtm, *nztop, lby[0], lby[1], nd2_tyzD, idmat2D, *ca, drth2D, damp2_yD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi2D, dzh2D, t2yzD, qt2yzD, t2yz_pyD, qt2yz_pyD, v2yD, v2zD); } int gridSizeX25 = (nd2_tyz[5] - nd2_tyz[0])/blockSizeX + 1; int gridSizeY25 = (nd2_tyz[11] - nd2_tyz[6])/blockSizeY + 1; dim3 dimGrid25(gridSizeX25, gridSizeY25); stress_yz_PmlZ_IIC<<<dimGrid25, dimBlock>>>(*nxb2, *nyb2, *mw2_pml1, *nxbtm, *nzbtm, *nztop, nd2_tyzD, idmat2D, *ca, drti2D, damp2_zD, cmuD, epdtD, qwsD, qwt1D, qwt2D, dyi2D, dzh2D, t2yzD, qt2yzD, t2yz_pzD, qt2yz_pzD, v2yD, v2zD); cudaThreadSynchronize(); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeCompS += tmpTime; gettimeofday(&t1, NULL); cpy_d2h_stressOutputsC(t1xxM, t1xyM, t1xzM, t1yyM, t1yzM, t1zzM, t2xxM, t2xyM, t2xzM, t2yyM, t2yzM, t2zzM, nxtop, nytop, nztop, nxbtm, nybtm, nzbtm); gettimeofday(&t2, NULL); tmpTime = 1000.0 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec) / 1000.0; totalTimeD2HS += tmpTime; /*int size = (*nztop) * (*nxtop + 3) * (*nytop); write_output(t1xxM, size, "OUTPUT_ARRAYS/t1xxM.txt"); size = (*nztop) * (*nxtop + 3) * (*nytop + 3); write_output(t1xyM, size, "OUTPUT_ARRAYS/t1xyM.txt"); size = (*nztop + 1) * (*nxtop + 3) * (*nytop); write_output(t1xzM, size, "OUTPUT_ARRAYS/t1xzM.txt"); size = (*nztop) * (*nxtop) * (*nytop + 3); write_output(t1yyM, size, "OUTPUT_ARRAYS/t1yyM.txt"); size = (*nztop + 1) * (*nxtop) * (*nytop + 3); write_output(t1yzM, size, "OUTPUT_ARRAYS/t1yzM.txt"); size = (*nztop) * (*nxtop) * (*nytop); write_output(t1zzM, size, "OUTPUT_ARRAYS/t1zzM.txt"); size = (*nzbtm) * (*nxbtm + 3) * (*nybtm); write_output(t2xxM, size, "OUTPUT_ARRAYS/t2xxM.txt"); size = (*nzbtm) * (*nxbtm + 3) * (*nybtm + 3); write_output(t2xyM, size, "OUTPUT_ARRAYS/t2xyM.txt"); size = (*nzbtm + 1) * (*nxbtm + 3) * (*nybtm); write_output(t2xzM, size, "OUTPUT_ARRAYS/t2xzM.txt"); size = (*nzbtm) * (*nxbtm) * (*nybtm + 3); write_output(t2yyM, size, "OUTPUT_ARRAYS/t2yyM.txt"); size = (*nzbtm + 1) * (*nxbtm) * (*nybtm + 3); write_output(t2yzM, size, "OUTPUT_ARRAYS/t2yzM.txt"); size = (*nzbtm + 1) * (*nxbtm) * (*nybtm); write_output(t2zzM, size, "OUTPUT_ARRAYS/t2zzM.txt"); */ /*************** correctness *******************/ /* FILE *fp; // cudaRes = cudaMalloc((void **)&v1xD, sizeof(float) * y(*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1x"); // cudaRes = cudaMalloc((void **)&v1yD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1y"); // cudaRes = cudaMalloc((void **)&v1zD, sizeof(float) * (*nztop + 2) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, v1z"); const char* filename = "v1x.txt"; const char* filename1 = "v1y.txt"; const char* filename2 = "v1z.txt"; int i; if((fp = fopen(filename, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ ) { fprintf(fp, "%f ", v1xM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename1, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ ) { fprintf(fp, "%f ", v1yM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename2, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop + 2) * (*nxtop + 3) * (*nytop + 3); i++ ) { fprintf(fp, "%f ", v1zM[i]); } fprintf(fp, "\n"); fclose(fp); // cudaRes = cudaMalloc((void **)&t1xxD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xx"); // cudaRes = cudaMalloc((void **)&t1xyD, sizeof(float) * (*nztop) * (*nxtop + 3) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xy"); // cudaRes = cudaMalloc((void **)&t1xzD, sizeof(float) * (*nztop + 1) * (*nxtop + 3) * (*nytop)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1xz"); // cudaRes = cudaMalloc((void **)&t1yyD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yy"); // cudaRes = cudaMalloc((void **)&t1yzD, sizeof(float) * (*nztop + 1) * (*nxtop) * (*nytop + 3)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1yz"); // cudaRes = cudaMalloc((void **)&t1zzD, sizeof(float) * (*nztop) * (*nxtop) * (*nytop)); // CHECK_ERROR(cudaRes, "Allocate Device Memory1, t1zz"); const char* filename3 = "x_t1xx.txt"; const char* filename4 = "x_t1xy.txt"; const char* filename5 = "x_t1xz.txt"; if((fp = fopen(filename3, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop); i++ ) { fprintf(fp, "%f ", t1xxM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename4, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop) * (*nxtop + 3) * (*nytop+3); i++ ) { fprintf(fp, "%f ", t1xyM[i]); } fprintf(fp, "\n"); fclose(fp); if((fp = fopen(filename5, "w+")) == NULL) fprintf(stderr, "File write error!\n"); for(i = 0; i< (*nztop+1) * (*nxtop + 3) * (*nytop); i++ ) { fprintf(fp, "%f ", t1xzM[i]); } fprintf(fp, "\n"); fclose(fp); */ return; } #ifdef __cplusplus } #endif
4dec729cbc3d67bc25cc1aa779cea3669c744f95.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <device_launch_parameters.h> #include "CudaLaunchHelpers.cuh" #include "CDerivativeSolver.cuh" #include "CMortonTreeTraversal.cuh" using namespace wing2d::simulation::cuda; static __device__ float2 SpringDamper(const float2& normal, const float2& vel1, const float2& vel2, float springLen) { constexpr float stiffness = 10000.0f; constexpr float damp = 50.0f; auto v = dot(vel1 - vel2, normal); return normal * (springLen * stiffness + v * damp) * -1.0f; } static __device__ float2 SpringDamper2(const float2& normal, const float2& vel1, const float2& vel2, float springLen) { constexpr float stiffness = 50000.0f; constexpr float damp = 50.0f; auto v = dot(vel1 - vel2, normal); return normal * (springLen * stiffness + v * damp) * -1.0f; } static __global__ void ParticleToWallKernel(CDerivativeSolver::SIntermediateSimState state, SLineSegmentsSOA walls) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= state.particles) return; const auto pos = state.pos[threadId]; const auto vel = state.vel[threadId]; auto force = make_float2(0.0f); for (size_t i = 0; i < walls.lineSegments; ++i) { const auto d = walls.DistanceToLine(i, pos) - state.particleRad; if (d < 0.0f) { force += SpringDamper(walls.normal[i], vel, make_float2(0.0f), d); } } state.force[threadId] += force; } static __global__ void AddGravityKernel(float2* forces, unsigned n) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= n) return; forces[threadId].x += 0.5f; } static __global__ void BuildAirfoilBoxesKernel(SLineSegmentsSOA airfoil, SBoundingBoxesAoS boxes) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= airfoil.lineSegments) return; const auto f = airfoil.first[threadId]; const auto s = airfoil.second[threadId]; const auto minCorner = fminf(f, s); const auto maxCorner = fmaxf(f, s); boxes.boxes[threadId] = { minCorner, maxCorner }; } static __global__ void ReorderAirfoilKernel(const SLineSegmentsSOA oldAirfoil, SLineSegmentsSOA newAirfoil, const TIndex* __restrict__ oldIndices) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= oldAirfoil.lineSegments) return; auto oldIdx = oldIndices[threadId]; newAirfoil.first[threadId] = oldAirfoil.first[oldIdx]; newAirfoil.second[threadId] = oldAirfoil.second[oldIdx]; newAirfoil.ray[threadId] = oldAirfoil.ray[oldIdx]; newAirfoil.normal[threadId] = oldAirfoil.normal[oldIdx]; newAirfoil.length[threadId] = oldAirfoil.length[oldIdx]; } static __global__ void BuildParticlesBoundingBoxesKernel(const float2* __restrict__ particlePos, float particleRad, SBoundingBoxesAoS boundingBoxes) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= boundingBoxes.count) return; const auto pos = particlePos[threadId]; auto minCorner = make_float2(pos.x - particleRad, pos.y - particleRad); auto maxCorner = make_float2(pos.x + particleRad, pos.y + particleRad); boundingBoxes.boxes[threadId] = { minCorner, maxCorner }; } static __global__ void ReorderParticlesKernel(const float2* __restrict__ originalStateVector, const TIndex* __restrict__ oldIndices, CDerivativeSolver::SIntermediateSimState simState) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= simState.particles) return; auto oldPos = originalStateVector; auto oldVel = oldPos + simState.particles; auto oldIdx = oldIndices[threadId]; simState.pos[threadId] = oldPos[oldIdx]; simState.vel[threadId] = oldVel[oldIdx]; } static __global__ void ResetParticlesStateKernel(CDerivativeSolver::SIntermediateSimState simState) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= simState.particles) return; simState.force[threadId] = make_float2(0.0f); simState.pressure[threadId] = 0.0f; } static __global__ void InverseForcesOrderKernel(const CDerivativeSolver::SIntermediateSimState simState, const TIndex* __restrict__ oldIndices, float2* __restrict__ unorderedForces) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= simState.particles) return; auto oldIdx = oldIndices[threadId]; unorderedForces[oldIdx] = simState.force[threadId]; } // // // struct SParticleParticleCollisionSolver { struct SDeviceSideSolver { CDerivativeSolver::SIntermediateSimState& simState; TIndex curIdx; float2 pos1; float2 vel1; float2 totalForce; float totalPressure; __device__ SDeviceSideSolver(CDerivativeSolver::SIntermediateSimState& state) : simState(state) { } __device__ void OnPreTraversal(TIndex curLeafIdx) { curIdx = curLeafIdx; pos1 = simState.pos[curLeafIdx]; vel1 = simState.vel[curLeafIdx]; totalForce = make_float2(0.0f); totalPressure = 0.0f; } __device__ void OnCollisionDetected(TIndex anotherLeafIdx) { const auto pos2 = simState.pos[anotherLeafIdx]; const auto deltaPos = pos2 - pos1; const auto distanceSq = dot(deltaPos, deltaPos); if (distanceSq > simState.diameterSq || distanceSq < 1e-8f) return; const auto vel2 = simState.vel[anotherLeafIdx]; auto dist = sqrtf(distanceSq); auto dir = deltaPos / dist; auto springLen = simState.diameter - dist; auto force = SpringDamper(dir, vel1, vel2, springLen); auto pressure = length(force); totalForce += force; totalPressure += pressure; atomicAdd(&simState.force[anotherLeafIdx].x, -force.x); atomicAdd(&simState.force[anotherLeafIdx].y, -force.y); atomicAdd(&simState.pressure[anotherLeafIdx], pressure); } __device__ void OnPostTraversal() { atomicAdd(&simState.force[curIdx].x, totalForce.x); atomicAdd(&simState.force[curIdx].y, totalForce.y); atomicAdd(&simState.pressure[curIdx], totalPressure); } }; CDerivativeSolver::SIntermediateSimState simState; SParticleParticleCollisionSolver(const CDerivativeSolver::SIntermediateSimState& state) : simState(state) { } __device__ SDeviceSideSolver Create() { return SDeviceSideSolver(simState); } }; struct SParticleWingCollisionSolver { struct SDeviceSideSolver { CDerivativeSolver::SIntermediateSimState& simState; const SLineSegmentsSOA& airfoil; TIndex particleIdx; float2 particlePos; struct SSegment { float2 first; float2 ray; float2 normal; float length = 0.0f; float distanceTo = INFINITY; } closest; __device__ SDeviceSideSolver(CDerivativeSolver::SIntermediateSimState& state, const SLineSegmentsSOA& airfoil_) : simState(state), airfoil(airfoil_) { } __device__ void OnPreTraversal(TIndex particleIdx_) { particlePos = simState.pos[particleIdx_]; particleIdx = particleIdx_; } __device__ void OnCollisionDetected(TIndex wingSegmentIdx) { SSegment cur = { airfoil.first[wingSegmentIdx], airfoil.ray[wingSegmentIdx], airfoil.normal[wingSegmentIdx], airfoil.length[wingSegmentIdx], INFINITY }; const auto projection = dot(cur.ray, particlePos - cur.first); float2 closestPoint; if (projection < 0.0f) { closestPoint = cur.first; } else if (projection >= 0.0f && projection <= cur.length) { closestPoint = cur.first + cur.ray * projection; } else { closestPoint = cur.first + cur.ray * cur.length; } cur.distanceTo = length(particlePos - closestPoint); if (cur.distanceTo < closest.distanceTo) { closest = cur; } else if (cur.distanceTo == closest.distanceTo) { auto delta1 = particlePos - closest.first; auto delta2 = particlePos - cur.first; auto dot1 = dot(delta1, closest.normal); auto dot2 = dot(delta2, cur.normal); if (dot2 > dot1) closest = cur; } } __device__ void OnPostTraversal() { if (closest.distanceTo == INFINITY) return; const auto height = dot(closest.normal, particlePos - closest.first); if (height <= simState.particleRad) { const auto penetration = height - simState.particleRad; auto force = SpringDamper2(closest.normal, simState.vel[particleIdx], make_float2(0.0f), penetration); auto pressure = length(force); simState.force[particleIdx] += force; simState.pressure[particleIdx] += pressure; } } }; CDerivativeSolver::SIntermediateSimState simState; const SLineSegmentsSOA airfoil; SParticleWingCollisionSolver(const CDerivativeSolver::SIntermediateSimState& state, const SLineSegmentsSOA& airfoil_) : simState(state), airfoil(airfoil_) { } __device__ SDeviceSideSolver Create() { return SDeviceSideSolver(simState, airfoil); } }; // // // CDerivativeSolver::CDerivativeSolver(size_t particles, float radius, const Segments_t& airfoil, const Segments_t& walls) : m_airfoilStorage(airfoil.size()), m_wallsStorage(walls), m_particlesBoxes(particles), m_particlesExtendedBoxes(particles), m_particles(particles, radius) { auto tempAirfoilStorage = CLineSegmentsStorage(airfoil); auto tempAirfoilBoxes = thrust::device_vector<SBoundingBox>(airfoil.size()); dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(airfoil.size(), kBlockSize)); hipLaunchKernelGGL(( BuildAirfoilBoxesKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, tempAirfoilStorage.get(), SBoundingBoxesAoS::Create(tempAirfoilBoxes)); CudaCheckError(); m_airfoilTree.Build(tempAirfoilBoxes); hipLaunchKernelGGL(( ReorderAirfoilKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, tempAirfoilStorage.get(), m_airfoilStorage.get(), m_airfoilTree.GetSortedIndices().data().get()); CudaCheckError(); } void CDerivativeSolver::Derive(const OdeState_t& curState, OdeState_t& outDerivative) { BuildParticlesTree(curState); ReorderParticles(curState); ResetParticlesState(); ResolveParticleParticleCollisions(); ResolveParticleWingCollisions(); ParticleToWall(); ApplyGravity(); BuildDerivative(curState, outDerivative); } CDerivativeSolver::SIntermediateSimState CDerivativeSolver::SParticlesState::GetSimState() { return { count, radius, radius * 2.0f, radius * radius * 4.0f, reorderedPositions.data().get(), reorderedVelocities.data().get(), forces.data().get(), pressures.data().get(), }; } void CDerivativeSolver::BuildParticlesTree(const OdeState_t& curState) { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); hipLaunchKernelGGL(( BuildParticlesBoundingBoxesKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, curState.data().get(), m_particles.radius, SBoundingBoxesAoS::Create(m_particlesBoxes)); CudaCheckError(); m_particlesTree.Build(m_particlesBoxes); } void CDerivativeSolver::ReorderParticles(const OdeState_t& curState) { auto oldIndices = m_particlesTree.GetSortedIndices().data().get(); dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); hipLaunchKernelGGL(( ReorderParticlesKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, curState.data().get(), oldIndices, m_particles.GetSimState()); hipLaunchKernelGGL(( BuildParticlesBoundingBoxesKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, m_particles.reorderedPositions.data().get(), m_particles.radius * 4.0f, SBoundingBoxesAoS::Create(m_particlesExtendedBoxes)); CudaCheckError(); } void CDerivativeSolver::ResetParticlesState() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); hipLaunchKernelGGL(( ResetParticlesStateKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, m_particles.GetSimState()); } void CDerivativeSolver::ResolveParticleParticleCollisions() { m_particlesTree.TraverseReflexive<SParticleParticleCollisionSolver, 24>(SParticleParticleCollisionSolver(m_particles.GetSimState())); CudaCheckError(); } void CDerivativeSolver::ResolveParticleWingCollisions() { m_airfoilTree.Traverse<SParticleWingCollisionSolver, 24>( m_particlesExtendedBoxes, SParticleWingCollisionSolver(m_particles.GetSimState(), m_airfoilStorage.get())); CudaCheckError(); } void CDerivativeSolver::ParticleToWall() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); hipLaunchKernelGGL(( ParticleToWallKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, m_particles.GetSimState(), m_wallsStorage.get()); CudaCheckError(); } void CDerivativeSolver::ApplyGravity() { auto elements = unsigned(m_particles.count); dim3 blockDim(kBlockSize); dim3 gridDim((elements - 1) / blockDim.x + 1); hipLaunchKernelGGL(( AddGravityKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, m_particles.forces.data().get(), elements); CudaCheckError(); } void CDerivativeSolver::BuildDerivative(const OdeState_t& curState, OdeState_t& outDerivative) { const float2* d_velocities = curState.data().get() + m_particles.count; //TODO: reorder forces back const float2* d_forces = m_particles.forces.data().get(); float2* d_derivative = outDerivative.data().get(); const size_t dataBlockSize = m_particles.count * sizeof(float2); CudaSafeCall(hipMemcpyAsync(d_derivative, d_velocities, dataBlockSize, hipMemcpyKind::hipMemcpyDeviceToDevice)); dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); hipLaunchKernelGGL(( InverseForcesOrderKernel) , dim3(gridDim), dim3(blockDim) , 0, 0, m_particles.GetSimState(), m_particlesTree.GetSortedIndices().data().get(), d_derivative + m_particles.count ); CudaCheckError(); } const thrust::device_vector<float>& CDerivativeSolver::GetPressures() const { return m_particles.pressures; } const thrust::device_vector<TIndex>& CDerivativeSolver::GetParticlesIndices() const { return m_particlesTree.GetSortedIndices(); }
4dec729cbc3d67bc25cc1aa779cea3669c744f95.cu
#include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "CudaLaunchHelpers.cuh" #include "CDerivativeSolver.cuh" #include "CMortonTreeTraversal.cuh" using namespace wing2d::simulation::cuda; static __device__ float2 SpringDamper(const float2& normal, const float2& vel1, const float2& vel2, float springLen) { constexpr float stiffness = 10000.0f; constexpr float damp = 50.0f; auto v = dot(vel1 - vel2, normal); return normal * (springLen * stiffness + v * damp) * -1.0f; } static __device__ float2 SpringDamper2(const float2& normal, const float2& vel1, const float2& vel2, float springLen) { constexpr float stiffness = 50000.0f; constexpr float damp = 50.0f; auto v = dot(vel1 - vel2, normal); return normal * (springLen * stiffness + v * damp) * -1.0f; } static __global__ void ParticleToWallKernel(CDerivativeSolver::SIntermediateSimState state, SLineSegmentsSOA walls) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= state.particles) return; const auto pos = state.pos[threadId]; const auto vel = state.vel[threadId]; auto force = make_float2(0.0f); for (size_t i = 0; i < walls.lineSegments; ++i) { const auto d = walls.DistanceToLine(i, pos) - state.particleRad; if (d < 0.0f) { force += SpringDamper(walls.normal[i], vel, make_float2(0.0f), d); } } state.force[threadId] += force; } static __global__ void AddGravityKernel(float2* forces, unsigned n) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= n) return; forces[threadId].x += 0.5f; } static __global__ void BuildAirfoilBoxesKernel(SLineSegmentsSOA airfoil, SBoundingBoxesAoS boxes) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= airfoil.lineSegments) return; const auto f = airfoil.first[threadId]; const auto s = airfoil.second[threadId]; const auto minCorner = fminf(f, s); const auto maxCorner = fmaxf(f, s); boxes.boxes[threadId] = { minCorner, maxCorner }; } static __global__ void ReorderAirfoilKernel(const SLineSegmentsSOA oldAirfoil, SLineSegmentsSOA newAirfoil, const TIndex* __restrict__ oldIndices) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= oldAirfoil.lineSegments) return; auto oldIdx = oldIndices[threadId]; newAirfoil.first[threadId] = oldAirfoil.first[oldIdx]; newAirfoil.second[threadId] = oldAirfoil.second[oldIdx]; newAirfoil.ray[threadId] = oldAirfoil.ray[oldIdx]; newAirfoil.normal[threadId] = oldAirfoil.normal[oldIdx]; newAirfoil.length[threadId] = oldAirfoil.length[oldIdx]; } static __global__ void BuildParticlesBoundingBoxesKernel(const float2* __restrict__ particlePos, float particleRad, SBoundingBoxesAoS boundingBoxes) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= boundingBoxes.count) return; const auto pos = particlePos[threadId]; auto minCorner = make_float2(pos.x - particleRad, pos.y - particleRad); auto maxCorner = make_float2(pos.x + particleRad, pos.y + particleRad); boundingBoxes.boxes[threadId] = { minCorner, maxCorner }; } static __global__ void ReorderParticlesKernel(const float2* __restrict__ originalStateVector, const TIndex* __restrict__ oldIndices, CDerivativeSolver::SIntermediateSimState simState) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= simState.particles) return; auto oldPos = originalStateVector; auto oldVel = oldPos + simState.particles; auto oldIdx = oldIndices[threadId]; simState.pos[threadId] = oldPos[oldIdx]; simState.vel[threadId] = oldVel[oldIdx]; } static __global__ void ResetParticlesStateKernel(CDerivativeSolver::SIntermediateSimState simState) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= simState.particles) return; simState.force[threadId] = make_float2(0.0f); simState.pressure[threadId] = 0.0f; } static __global__ void InverseForcesOrderKernel(const CDerivativeSolver::SIntermediateSimState simState, const TIndex* __restrict__ oldIndices, float2* __restrict__ unorderedForces) { const auto threadId = blockIdx.x * blockDim.x + threadIdx.x; if (threadId >= simState.particles) return; auto oldIdx = oldIndices[threadId]; unorderedForces[oldIdx] = simState.force[threadId]; } // // // struct SParticleParticleCollisionSolver { struct SDeviceSideSolver { CDerivativeSolver::SIntermediateSimState& simState; TIndex curIdx; float2 pos1; float2 vel1; float2 totalForce; float totalPressure; __device__ SDeviceSideSolver(CDerivativeSolver::SIntermediateSimState& state) : simState(state) { } __device__ void OnPreTraversal(TIndex curLeafIdx) { curIdx = curLeafIdx; pos1 = simState.pos[curLeafIdx]; vel1 = simState.vel[curLeafIdx]; totalForce = make_float2(0.0f); totalPressure = 0.0f; } __device__ void OnCollisionDetected(TIndex anotherLeafIdx) { const auto pos2 = simState.pos[anotherLeafIdx]; const auto deltaPos = pos2 - pos1; const auto distanceSq = dot(deltaPos, deltaPos); if (distanceSq > simState.diameterSq || distanceSq < 1e-8f) return; const auto vel2 = simState.vel[anotherLeafIdx]; auto dist = sqrtf(distanceSq); auto dir = deltaPos / dist; auto springLen = simState.diameter - dist; auto force = SpringDamper(dir, vel1, vel2, springLen); auto pressure = length(force); totalForce += force; totalPressure += pressure; atomicAdd(&simState.force[anotherLeafIdx].x, -force.x); atomicAdd(&simState.force[anotherLeafIdx].y, -force.y); atomicAdd(&simState.pressure[anotherLeafIdx], pressure); } __device__ void OnPostTraversal() { atomicAdd(&simState.force[curIdx].x, totalForce.x); atomicAdd(&simState.force[curIdx].y, totalForce.y); atomicAdd(&simState.pressure[curIdx], totalPressure); } }; CDerivativeSolver::SIntermediateSimState simState; SParticleParticleCollisionSolver(const CDerivativeSolver::SIntermediateSimState& state) : simState(state) { } __device__ SDeviceSideSolver Create() { return SDeviceSideSolver(simState); } }; struct SParticleWingCollisionSolver { struct SDeviceSideSolver { CDerivativeSolver::SIntermediateSimState& simState; const SLineSegmentsSOA& airfoil; TIndex particleIdx; float2 particlePos; struct SSegment { float2 first; float2 ray; float2 normal; float length = 0.0f; float distanceTo = INFINITY; } closest; __device__ SDeviceSideSolver(CDerivativeSolver::SIntermediateSimState& state, const SLineSegmentsSOA& airfoil_) : simState(state), airfoil(airfoil_) { } __device__ void OnPreTraversal(TIndex particleIdx_) { particlePos = simState.pos[particleIdx_]; particleIdx = particleIdx_; } __device__ void OnCollisionDetected(TIndex wingSegmentIdx) { SSegment cur = { airfoil.first[wingSegmentIdx], airfoil.ray[wingSegmentIdx], airfoil.normal[wingSegmentIdx], airfoil.length[wingSegmentIdx], INFINITY }; const auto projection = dot(cur.ray, particlePos - cur.first); float2 closestPoint; if (projection < 0.0f) { closestPoint = cur.first; } else if (projection >= 0.0f && projection <= cur.length) { closestPoint = cur.first + cur.ray * projection; } else { closestPoint = cur.first + cur.ray * cur.length; } cur.distanceTo = length(particlePos - closestPoint); if (cur.distanceTo < closest.distanceTo) { closest = cur; } else if (cur.distanceTo == closest.distanceTo) { auto delta1 = particlePos - closest.first; auto delta2 = particlePos - cur.first; auto dot1 = dot(delta1, closest.normal); auto dot2 = dot(delta2, cur.normal); if (dot2 > dot1) closest = cur; } } __device__ void OnPostTraversal() { if (closest.distanceTo == INFINITY) return; const auto height = dot(closest.normal, particlePos - closest.first); if (height <= simState.particleRad) { const auto penetration = height - simState.particleRad; auto force = SpringDamper2(closest.normal, simState.vel[particleIdx], make_float2(0.0f), penetration); auto pressure = length(force); simState.force[particleIdx] += force; simState.pressure[particleIdx] += pressure; } } }; CDerivativeSolver::SIntermediateSimState simState; const SLineSegmentsSOA airfoil; SParticleWingCollisionSolver(const CDerivativeSolver::SIntermediateSimState& state, const SLineSegmentsSOA& airfoil_) : simState(state), airfoil(airfoil_) { } __device__ SDeviceSideSolver Create() { return SDeviceSideSolver(simState, airfoil); } }; // // // CDerivativeSolver::CDerivativeSolver(size_t particles, float radius, const Segments_t& airfoil, const Segments_t& walls) : m_airfoilStorage(airfoil.size()), m_wallsStorage(walls), m_particlesBoxes(particles), m_particlesExtendedBoxes(particles), m_particles(particles, radius) { auto tempAirfoilStorage = CLineSegmentsStorage(airfoil); auto tempAirfoilBoxes = thrust::device_vector<SBoundingBox>(airfoil.size()); dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(airfoil.size(), kBlockSize)); BuildAirfoilBoxesKernel <<<gridDim, blockDim >>> (tempAirfoilStorage.get(), SBoundingBoxesAoS::Create(tempAirfoilBoxes)); CudaCheckError(); m_airfoilTree.Build(tempAirfoilBoxes); ReorderAirfoilKernel <<<gridDim, blockDim >>> (tempAirfoilStorage.get(), m_airfoilStorage.get(), m_airfoilTree.GetSortedIndices().data().get()); CudaCheckError(); } void CDerivativeSolver::Derive(const OdeState_t& curState, OdeState_t& outDerivative) { BuildParticlesTree(curState); ReorderParticles(curState); ResetParticlesState(); ResolveParticleParticleCollisions(); ResolveParticleWingCollisions(); ParticleToWall(); ApplyGravity(); BuildDerivative(curState, outDerivative); } CDerivativeSolver::SIntermediateSimState CDerivativeSolver::SParticlesState::GetSimState() { return { count, radius, radius * 2.0f, radius * radius * 4.0f, reorderedPositions.data().get(), reorderedVelocities.data().get(), forces.data().get(), pressures.data().get(), }; } void CDerivativeSolver::BuildParticlesTree(const OdeState_t& curState) { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); BuildParticlesBoundingBoxesKernel <<<gridDim, blockDim >>> (curState.data().get(), m_particles.radius, SBoundingBoxesAoS::Create(m_particlesBoxes)); CudaCheckError(); m_particlesTree.Build(m_particlesBoxes); } void CDerivativeSolver::ReorderParticles(const OdeState_t& curState) { auto oldIndices = m_particlesTree.GetSortedIndices().data().get(); dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); ReorderParticlesKernel <<<gridDim, blockDim >>> (curState.data().get(), oldIndices, m_particles.GetSimState()); BuildParticlesBoundingBoxesKernel <<<gridDim, blockDim >>> (m_particles.reorderedPositions.data().get(), m_particles.radius * 4.0f, SBoundingBoxesAoS::Create(m_particlesExtendedBoxes)); CudaCheckError(); } void CDerivativeSolver::ResetParticlesState() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); ResetParticlesStateKernel <<<gridDim, blockDim >>> (m_particles.GetSimState()); } void CDerivativeSolver::ResolveParticleParticleCollisions() { m_particlesTree.TraverseReflexive<SParticleParticleCollisionSolver, 24>(SParticleParticleCollisionSolver(m_particles.GetSimState())); CudaCheckError(); } void CDerivativeSolver::ResolveParticleWingCollisions() { m_airfoilTree.Traverse<SParticleWingCollisionSolver, 24>( m_particlesExtendedBoxes, SParticleWingCollisionSolver(m_particles.GetSimState(), m_airfoilStorage.get())); CudaCheckError(); } void CDerivativeSolver::ParticleToWall() { dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); ParticleToWallKernel <<<gridDim, blockDim >>> (m_particles.GetSimState(), m_wallsStorage.get()); CudaCheckError(); } void CDerivativeSolver::ApplyGravity() { auto elements = unsigned(m_particles.count); dim3 blockDim(kBlockSize); dim3 gridDim((elements - 1) / blockDim.x + 1); AddGravityKernel <<<gridDim, blockDim >>> (m_particles.forces.data().get(), elements); CudaCheckError(); } void CDerivativeSolver::BuildDerivative(const OdeState_t& curState, OdeState_t& outDerivative) { const float2* d_velocities = curState.data().get() + m_particles.count; //TODO: reorder forces back const float2* d_forces = m_particles.forces.data().get(); float2* d_derivative = outDerivative.data().get(); const size_t dataBlockSize = m_particles.count * sizeof(float2); CudaSafeCall(cudaMemcpyAsync(d_derivative, d_velocities, dataBlockSize, cudaMemcpyKind::cudaMemcpyDeviceToDevice)); dim3 blockDim(kBlockSize); dim3 gridDim(GridSize(m_particles.count, kBlockSize)); InverseForcesOrderKernel <<<gridDim, blockDim >>> ( m_particles.GetSimState(), m_particlesTree.GetSortedIndices().data().get(), d_derivative + m_particles.count ); CudaCheckError(); } const thrust::device_vector<float>& CDerivativeSolver::GetPressures() const { return m_particles.pressures; } const thrust::device_vector<TIndex>& CDerivativeSolver::GetParticlesIndices() const { return m_particlesTree.GetSortedIndices(); }
935d6f1be0ce2123f91dd94235e569bad80db2a8.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/Exceptions.h> #include "multi_tensor_apply.cuh" #include "compat.h" #include <assert.h> #include <hip/hip_runtime.h> #define BLOCK_SIZE 512 #define ILP 4 /** * Perform fused SGD on multiple buffers * N: number of tensors * tl[0] : gradients * tl[1] : weights * tl[2] : momentum buffers * tl[3] : fp16 weights (if appropriate) * wd : weight_decay (scalar) * momentum : momentum (scalar) * dampening : momentum dampening (scalar) * lr : learning rate (scalar) * nesterov : enable nesterov (bool) * first run : necessary for proper momentum handling & init * wd_after_momentum : apply weight decay _after_ momentum instead of before **/ template<int N, typename T_grad, typename T_weight> struct SGDFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<N>& tl, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { // Early exit if we don't need to do anything if (*noop_gmem) return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T_grad* grad_in = (T_grad*)tl.addresses[0][tensor_loc]; grad_in += chunk_idx*chunk_size; T_weight* weight_in = (T_weight*)tl.addresses[1][tensor_loc]; weight_in += chunk_idx*chunk_size; T_weight* mom_in = (T_weight*)tl.addresses[2][tensor_loc]; mom_in += chunk_idx*chunk_size; at::Half *model_weights_out = nullptr; if(N == 4) { model_weights_out = (at::Half*)tl.addresses[3][tensor_loc]; model_weights_out += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; // Non-divergent exit condition for the __syncthreads float incoming_grads[ILP]; float incoming_weights[ILP]; float incoming_moms[ILP]; for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_grads[ii] = 0; incoming_weights[ii] = 0; incoming_moms[ii] = 0; int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { incoming_grads[ii] = static_cast<float>(grad_in[i])*scale; incoming_weights[ii] = static_cast<float>(weight_in[i]); incoming_moms[ii] = static_cast<float>(mom_in[i]); } } // note for clarification to future michael: // From a pure memory dependency perspective, there's likely no point unrolling // the write loop, since writes just fire off once their LDGs arrive. // Put another way, the STGs are dependent on the LDGs, but not on each other. // There is still compute ILP benefit from unrolling the loop though. #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { // apply weight decay before momentum if necessary if(wd != 0.f && !wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; if(momentum != 0.f) { if(!first_run) incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii]; else // initialize momentums to current incoming grads incoming_moms[ii] = incoming_grads[ii]; if(nesterov) incoming_grads[ii] += momentum * incoming_moms[ii]; else incoming_grads[ii] = incoming_moms[ii]; } // Apply WD after momentum if desired if(wd != 0.f && wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; // adjust the weight and write out weight_in[i] += (-lr * incoming_grads[ii]); // if necessary, write out an fp16 copy of the weights if(N == 4) model_weights_out[i] = static_cast<at::Half>(weight_in[i]); // also write out the new momentum if(momentum != 0.f) mom_in[i] = incoming_moms[ii]; } } } } }; void multi_tensor_sgd_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { auto num_tensors = tensor_lists.size(); auto grad_type = tensor_lists[0][0].scalar_type(); auto weight_type = tensor_lists[1][0].scalar_type(); if(num_tensors == 4) for(int i = 0; i < tensor_lists[3].size(); i++) TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half, "Additional output tensors should always be fp16."); TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors"); // We have 3 possibilities to handle here, in terms of // grad_type, param_type, momentum_type, requires_fp16_copy // 1. fp16, fp16, fp16, No // 2. fp32, fp32, fp32, No // 3. fp16, fp32, fp32, Yes // 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case // 5. bfp16, bfp16, bfp16, No // 6. bfp16, fp32, fp32, Yes // It's easier to hardcode these possibilities than to use // switches etc. to handle the cross-product of cases where // we don't want the majority of them. // Case 1. fp16, fp16, fp16, No if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Half && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, at::Half, at::Half>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 2. fp16, fp32, fp32, No // else if (grad_type == at::ScalarType::Half && // weight_type == at::ScalarType::Float && // num_tensors == 3) { // multi_tensor_apply<3>( // BLOCK_SIZE, // chunk_size, // noop_flag, // tensor_lists, // SGDFunctor<3, at::Half, float>(), // wd, // momentum, // dampening, // lr, // nesterov, // first_run, // wd_after_momentum); // } // Case 2. fp32, fp32, fp32, No else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 3. fp16, fp32, fp32, Yes else if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, at::Half, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 4. fp32, fp32, fp32, Yes else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 5. bfp16, bfp16, bfp16, No else if(grad_type == at::ScalarType::BFloat16 && weight_type == at::ScalarType::BFloat16 && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, at::BFloat16, at::BFloat16>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 6. bfp16, fp32, fp32, Yes else if(grad_type == at::ScalarType::BFloat16 && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, at::BFloat16, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } else { AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ", "gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors); } AT_CUDA_CHECK(hipGetLastError()); }
935d6f1be0ce2123f91dd94235e569bad80db2a8.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/Exceptions.h> #include "multi_tensor_apply.cuh" #include "compat.h" #include <assert.h> #include <cuda_runtime.h> #define BLOCK_SIZE 512 #define ILP 4 /** * Perform fused SGD on multiple buffers * N: number of tensors * tl[0] : gradients * tl[1] : weights * tl[2] : momentum buffers * tl[3] : fp16 weights (if appropriate) * wd : weight_decay (scalar) * momentum : momentum (scalar) * dampening : momentum dampening (scalar) * lr : learning rate (scalar) * nesterov : enable nesterov (bool) * first run : necessary for proper momentum handling & init * wd_after_momentum : apply weight decay _after_ momentum instead of before **/ template<int N, typename T_grad, typename T_weight> struct SGDFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<N>& tl, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { // Early exit if we don't need to do anything if (*noop_gmem) return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T_grad* grad_in = (T_grad*)tl.addresses[0][tensor_loc]; grad_in += chunk_idx*chunk_size; T_weight* weight_in = (T_weight*)tl.addresses[1][tensor_loc]; weight_in += chunk_idx*chunk_size; T_weight* mom_in = (T_weight*)tl.addresses[2][tensor_loc]; mom_in += chunk_idx*chunk_size; at::Half *model_weights_out = nullptr; if(N == 4) { model_weights_out = (at::Half*)tl.addresses[3][tensor_loc]; model_weights_out += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; // Non-divergent exit condition for the __syncthreads float incoming_grads[ILP]; float incoming_weights[ILP]; float incoming_moms[ILP]; for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_grads[ii] = 0; incoming_weights[ii] = 0; incoming_moms[ii] = 0; int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { incoming_grads[ii] = static_cast<float>(grad_in[i])*scale; incoming_weights[ii] = static_cast<float>(weight_in[i]); incoming_moms[ii] = static_cast<float>(mom_in[i]); } } // note for clarification to future michael: // From a pure memory dependency perspective, there's likely no point unrolling // the write loop, since writes just fire off once their LDGs arrive. // Put another way, the STGs are dependent on the LDGs, but not on each other. // There is still compute ILP benefit from unrolling the loop though. #pragma unroll for(int ii = 0; ii < ILP; ii++) { int i = i_start + threadIdx.x + ii*blockDim.x; if(i < n && i < chunk_size) { // apply weight decay before momentum if necessary if(wd != 0.f && !wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; if(momentum != 0.f) { if(!first_run) incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii]; else // initialize momentums to current incoming grads incoming_moms[ii] = incoming_grads[ii]; if(nesterov) incoming_grads[ii] += momentum * incoming_moms[ii]; else incoming_grads[ii] = incoming_moms[ii]; } // Apply WD after momentum if desired if(wd != 0.f && wd_after_momentum) incoming_grads[ii] += wd * incoming_weights[ii]; // adjust the weight and write out weight_in[i] += (-lr * incoming_grads[ii]); // if necessary, write out an fp16 copy of the weights if(N == 4) model_weights_out[i] = static_cast<at::Half>(weight_in[i]); // also write out the new momentum if(momentum != 0.f) mom_in[i] = incoming_moms[ii]; } } } } }; void multi_tensor_sgd_cuda( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, float wd, float momentum, float dampening, float lr, bool nesterov, bool first_run, bool wd_after_momentum, float scale) { auto num_tensors = tensor_lists.size(); auto grad_type = tensor_lists[0][0].scalar_type(); auto weight_type = tensor_lists[1][0].scalar_type(); if(num_tensors == 4) for(int i = 0; i < tensor_lists[3].size(); i++) TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half, "Additional output tensors should always be fp16."); TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors"); // We have 3 possibilities to handle here, in terms of // grad_type, param_type, momentum_type, requires_fp16_copy // 1. fp16, fp16, fp16, No // 2. fp32, fp32, fp32, No // 3. fp16, fp32, fp32, Yes // 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case // 5. bfp16, bfp16, bfp16, No // 6. bfp16, fp32, fp32, Yes // It's easier to hardcode these possibilities than to use // switches etc. to handle the cross-product of cases where // we don't want the majority of them. // Case 1. fp16, fp16, fp16, No if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Half && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, at::Half, at::Half>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 2. fp16, fp32, fp32, No // else if (grad_type == at::ScalarType::Half && // weight_type == at::ScalarType::Float && // num_tensors == 3) { // multi_tensor_apply<3>( // BLOCK_SIZE, // chunk_size, // noop_flag, // tensor_lists, // SGDFunctor<3, at::Half, float>(), // wd, // momentum, // dampening, // lr, // nesterov, // first_run, // wd_after_momentum); // } // Case 2. fp32, fp32, fp32, No else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 3. fp16, fp32, fp32, Yes else if(grad_type == at::ScalarType::Half && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, at::Half, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 4. fp32, fp32, fp32, Yes else if(grad_type == at::ScalarType::Float && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, float, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 5. bfp16, bfp16, bfp16, No else if(grad_type == at::ScalarType::BFloat16 && weight_type == at::ScalarType::BFloat16 && num_tensors == 3) { multi_tensor_apply<3>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<3, at::BFloat16, at::BFloat16>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } // Case 6. bfp16, fp32, fp32, Yes else if(grad_type == at::ScalarType::BFloat16 && weight_type == at::ScalarType::Float && num_tensors == 4) { multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, SGDFunctor<4, at::BFloat16, float>(), wd, momentum, dampening, lr, nesterov, first_run, wd_after_momentum, scale); } else { AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ", "gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors); } AT_CUDA_CHECK(cudaGetLastError()); }
3530ffaa4caef925b1fa82e20f79f4fe379af79a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * jacobi1D.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Will Killian <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <unistd.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <math.h> #define POLYBENCH_TIME 1 #include "jacobi1D.cuh" #include <polybench.h> #include <polybenchUtilFuncts.h> //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define RUN_ON_CPU void init_array(int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n)) { int i; for (i = 0; i < n; i++) { A[i] = ((DATA_TYPE) 4 * i + 10) / N; B[i] = ((DATA_TYPE) 7 * i + 11) / N; } } void runJacobi1DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n)) { for (int t = 0; t < _PB_TSTEPS; t++) { for (int i = 2; i < _PB_N - 1; i++) { B[i] = 0.33333 * (A[i-1] + A[i] + A[i + 1]); } for (int j = 2; j < _PB_N - 1; j++) { A[j] = B[j]; } } } __global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ((i > 1) && (i < (_PB_N-1))) { B[i] = 0.33333f * (A[i-1] + A[i] + A[i + 1]); } } __global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B) { int j = blockIdx.x * blockDim.x + threadIdx.x; if ((j > 1) && (j < (_PB_N-1))) { A[j] = B[j]; } } void compareResults(int n, DATA_TYPE POLYBENCH_1D(a,N,n), DATA_TYPE POLYBENCH_1D(a_outputFromGpu,N,n), DATA_TYPE POLYBENCH_1D(b,N,n), DATA_TYPE POLYBENCH_1D(b_outputFromGpu,N,n)) { int i, fail; fail = 0; // Compare a and c for (i=0; i < n; i++) { if (percentDiff(a[i], a_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i=0; i < n; i++) { if (percentDiff(b[i], b_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void runJacobi1DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n), DATA_TYPE POLYBENCH_1D(A_outputFromGpu,N,n), DATA_TYPE POLYBENCH_1D(B_outputFromGpu,N,n)) { DATA_TYPE* Agpu; DATA_TYPE* Bgpu; hipMalloc(&Agpu, N * sizeof(DATA_TYPE)); hipMalloc(&Bgpu, N * sizeof(DATA_TYPE)); hipMemcpy(Agpu, A, N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); hipMemcpy(Bgpu, B, N * sizeof(DATA_TYPE), hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); /* Start timer. */ polybench_start_instruments; for (int t = 0; t < _PB_TSTEPS ; t++) { hipLaunchKernelGGL(( runJacobiCUDA_kernel1) , dim3(grid), dim3(block) , 0, 0, n, Agpu, Bgpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( runJacobiCUDA_kernel2) , dim3(grid), dim3(block), 0, 0, n, Agpu, Bgpu); hipDeviceSynchronize(); } /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; hipMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost); hipMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost); hipFree(Agpu); hipFree(Bgpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_1D(A,N,n)) { int i; for (i = 0; i < n; i++) { fprintf(stderr, DATA_PRINTF_MODIFIER, A[i]); if (i % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int tsteps = TSTEPS; POLYBENCH_1D_ARRAY_DECL(a,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(b,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,n); init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); runJacobi1DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; runJacobi1DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(a); POLYBENCH_FREE_ARRAY(a_outputFromGpu); POLYBENCH_FREE_ARRAY(b); POLYBENCH_FREE_ARRAY(b_outputFromGpu); return 0; } #include <polybench.c>
3530ffaa4caef925b1fa82e20f79f4fe379af79a.cu
/** * jacobi1D.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Will Killian <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <unistd.h> #include <time.h> #include <sys/time.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include <math.h> #define POLYBENCH_TIME 1 #include "jacobi1D.cuh" #include <polybench.h> #include <polybenchUtilFuncts.h> //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define RUN_ON_CPU void init_array(int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n)) { int i; for (i = 0; i < n; i++) { A[i] = ((DATA_TYPE) 4 * i + 10) / N; B[i] = ((DATA_TYPE) 7 * i + 11) / N; } } void runJacobi1DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n)) { for (int t = 0; t < _PB_TSTEPS; t++) { for (int i = 2; i < _PB_N - 1; i++) { B[i] = 0.33333 * (A[i-1] + A[i] + A[i + 1]); } for (int j = 2; j < _PB_N - 1; j++) { A[j] = B[j]; } } } __global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ((i > 1) && (i < (_PB_N-1))) { B[i] = 0.33333f * (A[i-1] + A[i] + A[i + 1]); } } __global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B) { int j = blockIdx.x * blockDim.x + threadIdx.x; if ((j > 1) && (j < (_PB_N-1))) { A[j] = B[j]; } } void compareResults(int n, DATA_TYPE POLYBENCH_1D(a,N,n), DATA_TYPE POLYBENCH_1D(a_outputFromGpu,N,n), DATA_TYPE POLYBENCH_1D(b,N,n), DATA_TYPE POLYBENCH_1D(b_outputFromGpu,N,n)) { int i, fail; fail = 0; // Compare a and c for (i=0; i < n; i++) { if (percentDiff(a[i], a_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } for (i=0; i < n; i++) { if (percentDiff(b[i], b_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void runJacobi1DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_1D(A,N,n), DATA_TYPE POLYBENCH_1D(B,N,n), DATA_TYPE POLYBENCH_1D(A_outputFromGpu,N,n), DATA_TYPE POLYBENCH_1D(B_outputFromGpu,N,n)) { DATA_TYPE* Agpu; DATA_TYPE* Bgpu; cudaMalloc(&Agpu, N * sizeof(DATA_TYPE)); cudaMalloc(&Bgpu, N * sizeof(DATA_TYPE)); cudaMemcpy(Agpu, A, N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); cudaMemcpy(Bgpu, B, N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); /* Start timer. */ polybench_start_instruments; for (int t = 0; t < _PB_TSTEPS ; t++) { runJacobiCUDA_kernel1 <<< grid, block >>> (n, Agpu, Bgpu); cudaThreadSynchronize(); runJacobiCUDA_kernel2 <<< grid, block>>> (n, Agpu, Bgpu); cudaThreadSynchronize(); } /* Stop and print timer. */ printf("GPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; cudaMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost); cudaMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost); cudaFree(Agpu); cudaFree(Bgpu); } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int n, DATA_TYPE POLYBENCH_1D(A,N,n)) { int i; for (i = 0; i < n; i++) { fprintf(stderr, DATA_PRINTF_MODIFIER, A[i]); if (i % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int n = N; int tsteps = TSTEPS; POLYBENCH_1D_ARRAY_DECL(a,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(b,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,n); POLYBENCH_1D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,n); init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); runJacobi1DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu)); #ifdef RUN_ON_CPU /* Start timer. */ polybench_start_instruments; runJacobi1DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b)); /* Stop and print timer. */ printf("CPU Time in seconds:\n"); polybench_stop_instruments; polybench_print_instruments; compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu)); #else //prevent dead code elimination polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu))); #endif //RUN_ON_CPU POLYBENCH_FREE_ARRAY(a); POLYBENCH_FREE_ARRAY(a_outputFromGpu); POLYBENCH_FREE_ARRAY(b); POLYBENCH_FREE_ARRAY(b_outputFromGpu); return 0; } #include <polybench.c>
536a51162ce691b9e4bcc9887fd88e1558c07112.hip
// !!! This is a file automatically generated by hipify!!! /** * An example program utilizing most/all calls from the CUDA * Runtime API module: * * Event Management * */ #include "cuda/api_wrappers.h" #include <hip/hip_runtime_api.h> #include <iostream> #include <cstdlib> #include <cstring> #include <cassert> [[noreturn]] void die(const std::string& message) { std::cerr << message << "\n"; exit(EXIT_FAILURE); } template <typename T, size_t N> struct poor_mans_array { T data[N]; __host__ __device__ operator T*() { return data; } __host__ __device__ operator const T*() const { return data; } __host__ __device__ T& operator [](off_t offset) { return data[offset]; } __host__ __device__ const T& operator [](off_t offset) const { return data[offset]; } }; template <size_t N> poor_mans_array<char, N> message(const char* message_str) { poor_mans_array<char, N> a; assert(std::strlen(message_str) < N); std::strcpy(a.data, message_str); return a; } template <size_t N> poor_mans_array<char, N> message(const std::string& message_str) { return message<N>(message_str.c_str()); } template <size_t N, unsigned Index> __global__ void print_message(poor_mans_array<char, N> message) { if (threadIdx.x == 0 && blockIdx.x == 0) { printf("Kernel no. %u says: %s\n", Index, (const char*) message); } } __global__ void increment(char* data, size_t length) { size_t global_index = threadIdx.x + blockIdx.x * blockDim.x; if (global_index < length) data[global_index]++; } inline void report_occurrence( const std::string& prefix_message, const cuda::event_t& e1, const cuda::event_t& e2) { std::cout << prefix_message << ": " << "Event 1 has " << (e1.has_occurred() ? "" : "not ") << "occurred; " << "event 2 has " << (e2.has_occurred() ? "" : "not ") << "occurred.\n"; } int main(int argc, char **argv) { if (cuda::device::count() == 0) { die("No CUDA devices on this system"); } static constexpr size_t N = 40; // Being very cavalier about our command-line arguments here... cuda::device::id_t device_id = (argc > 1) ? std::stoi(argv[1]) : cuda::device::default_device_id; cuda::device::current::set(device_id); auto device = cuda::device::current::get(); std::cout << "Working with CUDA device " << device.name() << " (having ID " << device.id() << ")\n"; // Everything else - Enqueueing kernels, events, callbacks // and memory attachments, recording and waiting on events //-------------------------------------------------------------- auto stream = cuda::device::current::get().create_stream( cuda::stream::no_implicit_synchronization_with_default_stream); auto default_stream = cuda::device::current::get().default_stream(); { auto event = cuda::event::create(device); } auto event_1 = cuda::event::create( device.id(), cuda::event::sync_by_blocking, cuda::event::do_record_timings, cuda::event::not_interprocess); auto event_2 = cuda::event::create( device, cuda::event::sync_by_blocking, cuda::event::do_record_timings, cuda::event::not_interprocess); auto event_3 = device.create_event( cuda::event::sync_by_blocking, cuda::event::do_record_timings, cuda::event::not_interprocess); constexpr size_t buffer_size = 12345678; auto buffer = cuda::memory::managed::make_unique<char[]>( buffer_size, cuda::memory::managed::initial_visibility_t::to_all_devices); auto threads_per_block = cuda::device_function_t(increment).attributes().maxThreadsPerBlock; auto num_blocks = (buffer_size + threads_per_block - 1) / threads_per_block; auto launch_config = cuda::make_launch_config(num_blocks, threads_per_block); stream.enqueue.kernel_launch(print_message<N,1>, { 1, 1 }, message<N>("I am launched before the first event")); stream.enqueue.event(event_1); stream.enqueue.callback( [&event_1, &event_2](cuda::stream::id_t stream_id, cuda::status_t status) { report_occurrence("In first callback (enqueued after first event but before first kernel)", event_1, event_2); } ); stream.enqueue.kernel_launch(increment, launch_config, buffer.get(), buffer_size); stream.enqueue.callback( [&event_1, &event_2](cuda::stream::id_t stream_id, cuda::status_t status) { report_occurrence("In second callback (enqueued after the first kernel but before the second event)", event_1, event_2); } ); stream.enqueue.event(event_2); stream.enqueue.kernel_launch(print_message<N,3>, { 1, 1 }, message<N>("I am launched after the second event")); stream.enqueue.event(event_3); stream.enqueue.kernel_launch(print_message<N,4>, { 1, 1 }, message<N>("I am launched after the third event")); try { cuda::event::milliseconds_elapsed_between(event_1, event_2); std::cerr << "Attempting to obtain the elapsed time between two events on a" "stream which does not auto-sync with the default stream and has not been " "synchronized should fail - but it didn't\n"; exit(EXIT_FAILURE); } catch(cuda::runtime_error& e) { assert(e.code() == cuda::status::not_ready); } event_2.synchronize(); report_occurrence("After synchronizing on event_2, but before synchronizing on the stream", event_1, event_2); std::cout << cuda::event::milliseconds_elapsed_between(event_1, event_2) << " msec have elapsed, " << "executing the second kernel (\"increment\") on a buffer of " << buffer_size << " chars and triggering two callbacks.\n"; // ... and this should make the third kernel execute stream.synchronize(); std::cout << "\nSUCCESS\n"; return EXIT_SUCCESS; }
536a51162ce691b9e4bcc9887fd88e1558c07112.cu
/** * An example program utilizing most/all calls from the CUDA * Runtime API module: * * Event Management * */ #include "cuda/api_wrappers.h" #include <cuda_runtime_api.h> #include <iostream> #include <cstdlib> #include <cstring> #include <cassert> [[noreturn]] void die(const std::string& message) { std::cerr << message << "\n"; exit(EXIT_FAILURE); } template <typename T, size_t N> struct poor_mans_array { T data[N]; __host__ __device__ operator T*() { return data; } __host__ __device__ operator const T*() const { return data; } __host__ __device__ T& operator [](off_t offset) { return data[offset]; } __host__ __device__ const T& operator [](off_t offset) const { return data[offset]; } }; template <size_t N> poor_mans_array<char, N> message(const char* message_str) { poor_mans_array<char, N> a; assert(std::strlen(message_str) < N); std::strcpy(a.data, message_str); return a; } template <size_t N> poor_mans_array<char, N> message(const std::string& message_str) { return message<N>(message_str.c_str()); } template <size_t N, unsigned Index> __global__ void print_message(poor_mans_array<char, N> message) { if (threadIdx.x == 0 && blockIdx.x == 0) { printf("Kernel no. %u says: %s\n", Index, (const char*) message); } } __global__ void increment(char* data, size_t length) { size_t global_index = threadIdx.x + blockIdx.x * blockDim.x; if (global_index < length) data[global_index]++; } inline void report_occurrence( const std::string& prefix_message, const cuda::event_t& e1, const cuda::event_t& e2) { std::cout << prefix_message << ": " << "Event 1 has " << (e1.has_occurred() ? "" : "not ") << "occurred; " << "event 2 has " << (e2.has_occurred() ? "" : "not ") << "occurred.\n"; } int main(int argc, char **argv) { if (cuda::device::count() == 0) { die("No CUDA devices on this system"); } static constexpr size_t N = 40; // Being very cavalier about our command-line arguments here... cuda::device::id_t device_id = (argc > 1) ? std::stoi(argv[1]) : cuda::device::default_device_id; cuda::device::current::set(device_id); auto device = cuda::device::current::get(); std::cout << "Working with CUDA device " << device.name() << " (having ID " << device.id() << ")\n"; // Everything else - Enqueueing kernels, events, callbacks // and memory attachments, recording and waiting on events //-------------------------------------------------------------- auto stream = cuda::device::current::get().create_stream( cuda::stream::no_implicit_synchronization_with_default_stream); auto default_stream = cuda::device::current::get().default_stream(); { auto event = cuda::event::create(device); } auto event_1 = cuda::event::create( device.id(), cuda::event::sync_by_blocking, cuda::event::do_record_timings, cuda::event::not_interprocess); auto event_2 = cuda::event::create( device, cuda::event::sync_by_blocking, cuda::event::do_record_timings, cuda::event::not_interprocess); auto event_3 = device.create_event( cuda::event::sync_by_blocking, cuda::event::do_record_timings, cuda::event::not_interprocess); constexpr size_t buffer_size = 12345678; auto buffer = cuda::memory::managed::make_unique<char[]>( buffer_size, cuda::memory::managed::initial_visibility_t::to_all_devices); auto threads_per_block = cuda::device_function_t(increment).attributes().maxThreadsPerBlock; auto num_blocks = (buffer_size + threads_per_block - 1) / threads_per_block; auto launch_config = cuda::make_launch_config(num_blocks, threads_per_block); stream.enqueue.kernel_launch(print_message<N,1>, { 1, 1 }, message<N>("I am launched before the first event")); stream.enqueue.event(event_1); stream.enqueue.callback( [&event_1, &event_2](cuda::stream::id_t stream_id, cuda::status_t status) { report_occurrence("In first callback (enqueued after first event but before first kernel)", event_1, event_2); } ); stream.enqueue.kernel_launch(increment, launch_config, buffer.get(), buffer_size); stream.enqueue.callback( [&event_1, &event_2](cuda::stream::id_t stream_id, cuda::status_t status) { report_occurrence("In second callback (enqueued after the first kernel but before the second event)", event_1, event_2); } ); stream.enqueue.event(event_2); stream.enqueue.kernel_launch(print_message<N,3>, { 1, 1 }, message<N>("I am launched after the second event")); stream.enqueue.event(event_3); stream.enqueue.kernel_launch(print_message<N,4>, { 1, 1 }, message<N>("I am launched after the third event")); try { cuda::event::milliseconds_elapsed_between(event_1, event_2); std::cerr << "Attempting to obtain the elapsed time between two events on a" "stream which does not auto-sync with the default stream and has not been " "synchronized should fail - but it didn't\n"; exit(EXIT_FAILURE); } catch(cuda::runtime_error& e) { assert(e.code() == cuda::status::not_ready); } event_2.synchronize(); report_occurrence("After synchronizing on event_2, but before synchronizing on the stream", event_1, event_2); std::cout << cuda::event::milliseconds_elapsed_between(event_1, event_2) << " msec have elapsed, " << "executing the second kernel (\"increment\") on a buffer of " << buffer_size << " chars and triggering two callbacks.\n"; // ... and this should make the third kernel execute stream.synchronize(); std::cout << "\nSUCCESS\n"; return EXIT_SUCCESS; }
57d3f9be1768fc8898a67cdcb6788f58b126b049.hip
// !!! This is a file automatically generated by hipify!!! /* getgpuinfo.cu -- Prints information of the installed CUDA GPU-card(s). */ /* A. Goude 2011-04-12 */ #include<stdio.h> #include "hip/hip_runtime.h" #ifndef C_CODE /*if in C file mode, redefine all mex functions to c functions*/ #include "mex.h" #endif #ifdef C_CODE /*if in C file mode, redefine all mex functions to c functions*/ #define mxFree free #define mxMalloc malloc #define mxCalloc calloc #define mexPrintf printf #define mxLogical int #endif /*------------------------------------------------------------------------*/ void cudasafe(hipError_t error,char* message) /* Function-call wrapper. */ { if (error != hipSuccess) { mexPrintf("ERROR: %s : %i\n",message,error); exit(-1); } } /*------------------------------------------------------------------------*/ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { hipDeviceProp_t info; int count,i; cudasafe(hipGetDeviceCount(&count),"hipGetDeviceCount"); for(i = 0; i < count; i++) { cudasafe(hipGetDeviceProperties(&info,i),"hipGetDeviceProperties"); mexPrintf("\nDevice #%d\n",i); mexPrintf("---------\n"); mexPrintf("Device name:\t\t%s\n",info.name); mexPrintf("totalGlobalMem:\t\t%d bytes\n",info.totalGlobalMem); mexPrintf("sharedMemPerBlock:\t%d bytes\n",info.sharedMemPerBlock); mexPrintf("regsPerBlock:\t\t%d\n",info.regsPerBlock); mexPrintf("warpSize:\t\t%d threads\n",info.warpSize); mexPrintf("memPitch:\t\t%d bytes\n",info.memPitch); mexPrintf("maxThreadsPerBlock:\t%d\n",info.maxThreadsPerBlock); mexPrintf("maxThreadsDim:\t\t[%d %d %d]\n", info.maxThreadsDim[0], info.maxThreadsDim[1], info.maxThreadsDim[2]); mexPrintf("maxGridSize:\t\t[%d %d %d]\n", info.maxGridSize[0], info.maxGridSize[1], info.maxGridSize[2]); mexPrintf("totalConstMem:\t\t%d bytes\n\n",info.totalConstMem); mexPrintf("Compute Capability:\t%d.%d\n",info.major,info.minor); mexPrintf("clockRate:\t\t%d kHz\n",info.clockRate); mexPrintf("textureAlignment:\t%d\n",info.textureAlignment); mexPrintf("deviceOverlap:\t\t%d\n",info.deviceOverlap); mexPrintf("multiProcessorCount:\t%d\n",info.multiProcessorCount); if (info.kernelExecTimeoutEnabled) mexPrintf("kernelExecTimeout:\tEnabled\n"); else mexPrintf("kernelExecTimeout:\tDisabled\n"); if (info.integrated) mexPrintf("integrated:\t\tmotherboard GPU\n"); else mexPrintf("integrated:\t\tcomponent\n"); mexPrintf("canMapHostMemory:\t%d\n",info.canMapHostMemory); switch (info.computeMode) { case hipComputeModeDefault: mexPrintf("computeMode:\t\tcudaComputeModeDefault\n"); break; case hipComputeModeExclusive: mexPrintf("computeMode:\t\tcudaComputeModeExclusive\n"); break; case hipComputeModeProhibited: mexPrintf("computeMode:\t\tcudaComputeModeProhibited\n"); break; default: mexPrintf("computeMode:\t\tUNKNOWN\n"); break; } mexPrintf("maxTexture1D:\t\t%d\n",info.maxTexture1D); mexPrintf("maxTexture2D:\t\t[%d %d]\n\n", info.maxTexture2D[0], info.maxTexture2D[1]); mexPrintf("maxTexture3D:\t\t[%d %d %d]\n", info.maxTexture3D[0], info.maxTexture3D[1], info.maxTexture3D[2]); /* mexPrintf("maxTexture2DArray:\t[%d %d %d]\n", info.maxTexture2DArray[0], info.maxTexture2DArray[1], info.maxTexture2DArray[2]); */ mexPrintf("concurrentKernels:\t%d\n",info.concurrentKernels); mexPrintf("ECCEnabled:\t\t%d\n",info.ECCEnabled); mexPrintf("pciBusID:\t\t%d\n",info.pciBusID); mexPrintf("pciDeviceID:\t\t%d\n",info.pciDeviceID); mexPrintf("tccDriver:\t\t%d\n",info.tccDriver); } } /*------------------------------------------------------------------------*/
57d3f9be1768fc8898a67cdcb6788f58b126b049.cu
/* getgpuinfo.cu -- Prints information of the installed CUDA GPU-card(s). */ /* A. Goude 2011-04-12 */ #include<stdio.h> #include "cuda.h" #ifndef C_CODE /*if in C file mode, redefine all mex functions to c functions*/ #include "mex.h" #endif #ifdef C_CODE /*if in C file mode, redefine all mex functions to c functions*/ #define mxFree free #define mxMalloc malloc #define mxCalloc calloc #define mexPrintf printf #define mxLogical int #endif /*------------------------------------------------------------------------*/ void cudasafe(cudaError_t error,char* message) /* Function-call wrapper. */ { if (error != cudaSuccess) { mexPrintf("ERROR: %s : %i\n",message,error); exit(-1); } } /*------------------------------------------------------------------------*/ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { cudaDeviceProp info; int count,i; cudasafe(cudaGetDeviceCount(&count),"cudaGetDeviceCount"); for(i = 0; i < count; i++) { cudasafe(cudaGetDeviceProperties(&info,i),"cudaGetDeviceProperties"); mexPrintf("\nDevice #%d\n",i); mexPrintf("---------\n"); mexPrintf("Device name:\t\t%s\n",info.name); mexPrintf("totalGlobalMem:\t\t%d bytes\n",info.totalGlobalMem); mexPrintf("sharedMemPerBlock:\t%d bytes\n",info.sharedMemPerBlock); mexPrintf("regsPerBlock:\t\t%d\n",info.regsPerBlock); mexPrintf("warpSize:\t\t%d threads\n",info.warpSize); mexPrintf("memPitch:\t\t%d bytes\n",info.memPitch); mexPrintf("maxThreadsPerBlock:\t%d\n",info.maxThreadsPerBlock); mexPrintf("maxThreadsDim:\t\t[%d %d %d]\n", info.maxThreadsDim[0], info.maxThreadsDim[1], info.maxThreadsDim[2]); mexPrintf("maxGridSize:\t\t[%d %d %d]\n", info.maxGridSize[0], info.maxGridSize[1], info.maxGridSize[2]); mexPrintf("totalConstMem:\t\t%d bytes\n\n",info.totalConstMem); mexPrintf("Compute Capability:\t%d.%d\n",info.major,info.minor); mexPrintf("clockRate:\t\t%d kHz\n",info.clockRate); mexPrintf("textureAlignment:\t%d\n",info.textureAlignment); mexPrintf("deviceOverlap:\t\t%d\n",info.deviceOverlap); mexPrintf("multiProcessorCount:\t%d\n",info.multiProcessorCount); if (info.kernelExecTimeoutEnabled) mexPrintf("kernelExecTimeout:\tEnabled\n"); else mexPrintf("kernelExecTimeout:\tDisabled\n"); if (info.integrated) mexPrintf("integrated:\t\tmotherboard GPU\n"); else mexPrintf("integrated:\t\tcomponent\n"); mexPrintf("canMapHostMemory:\t%d\n",info.canMapHostMemory); switch (info.computeMode) { case cudaComputeModeDefault: mexPrintf("computeMode:\t\tcudaComputeModeDefault\n"); break; case cudaComputeModeExclusive: mexPrintf("computeMode:\t\tcudaComputeModeExclusive\n"); break; case cudaComputeModeProhibited: mexPrintf("computeMode:\t\tcudaComputeModeProhibited\n"); break; default: mexPrintf("computeMode:\t\tUNKNOWN\n"); break; } mexPrintf("maxTexture1D:\t\t%d\n",info.maxTexture1D); mexPrintf("maxTexture2D:\t\t[%d %d]\n\n", info.maxTexture2D[0], info.maxTexture2D[1]); mexPrintf("maxTexture3D:\t\t[%d %d %d]\n", info.maxTexture3D[0], info.maxTexture3D[1], info.maxTexture3D[2]); /* mexPrintf("maxTexture2DArray:\t[%d %d %d]\n", info.maxTexture2DArray[0], info.maxTexture2DArray[1], info.maxTexture2DArray[2]); */ mexPrintf("concurrentKernels:\t%d\n",info.concurrentKernels); mexPrintf("ECCEnabled:\t\t%d\n",info.ECCEnabled); mexPrintf("pciBusID:\t\t%d\n",info.pciBusID); mexPrintf("pciDeviceID:\t\t%d\n",info.pciDeviceID); mexPrintf("tccDriver:\t\t%d\n",info.tccDriver); } } /*------------------------------------------------------------------------*/
c0b9e14c3bc768e9c9efaf8369a9cf271287f9ba.hip
// !!! This is a file automatically generated by hipify!!! /* */ #include <iostream> #include <hip/hip_runtime.h> #include <vector> #include <hip/hip_runtime.h> #include "../include/mycudaheader.h" using namespace std; __device__ double laplacian_GPU( double *array, size_t ind, size_t N ) { double value = 4.0 * array[ind]; // east element if ( (ind + 1) % N != 0 ) value += -1.0 * array[ind + 1]; // north element if ( ind + N < N*N ) // TODO: N*N --> dim value += -1.0 * array[ind + N]; // west element if ( ind % N != 0 ) value += -1.0 * array[ind - 1]; // south element if ( ind >= N ) value += -1.0 * array[ind - N]; return value; } // __global__ // void calcLambdaUpper(double* lambda_u, double *p, double beta, double *laplacian, double eta) // { // getMax(float *array, float *max, int *mutex, unsigned int n) // } __global__ void calcLambdaLower(double *array, double *min, int *mutex, double beta, double *laplacian, double eta, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[256]; *min = 1.0e9; double temp = 1.0e9; while(index + offset < n){ temp = fminf(temp, ( array[index + offset] + ( beta * laplacian[index] ) - eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *min = fminf(*min, cache[0]); atomicExch(mutex, 0); //unlock } } __global__ void calcLambdaUpper(double *array, double *max, int *mutex, double beta, double *laplacian, double eta, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[256]; *max = -1.0e9; double temp = -1.0e9; while(index + offset < n){ temp = fmaxf(temp, ( array[index + offset] + ( beta * laplacian[index] ) + eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *max = fmaxf(*max, cache[0]); atomicExch(mutex, 0); //unlock } } double laplacian(double *array, size_t ind, size_t N) { double value = 4.0 * array[ind]; // east element if ( (ind + 1) % N != 0 ) value += -1.0 * array[ind + 1]; // north element if ( ind + N < N*N ) // TODO: N*N --> dim value += -1.0 * array[ind + N]; // west element if ( ind % N != 0 ) value += -1.0 * array[ind - 1]; // south element if ( ind >= N ) value += -1.0 * array[ind - N]; return value; } // TODO: change kai to something else __global__ void getKaiTrial( double *kai, double *p, double *lambda_trial, double del_t, double eta, double beta, double* kai_trial, size_t numElements ) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; __shared__ double del_kai[256]; del_kai[id] = ( del_t / eta ) * ( p[id] - *lambda_trial + beta*( laplacian_GPU( kai, id, numElements ) ) ); if ( del_kai[id] + kai[id] > 1 ) kai_trial[id] = 1; else if ( del_kai[id] + kai[id] < 1e-9 ) kai_trial[id] = 1e-9; else kai_trial[id] = del_kai[id] + kai[id]; } __global__ void sumOfVector_GPU(double* sum, double* x, size_t n) { int id = threadIdx.x + blockDim.x*blockIdx.x; int stride = blockDim.x*gridDim.x; __shared__ double cache[1024]; cache[id] = 0; double temp = 0.0; while(id < n) { temp += x[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reset id id = threadIdx.x + blockDim.x*blockIdx.x; // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __global__ void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial) { if ( *rho_trial > rho ) *lambda_l = *lambda_trial; else *lambda_u = *lambda_trial; *lambda_trial = 0.5 * ( *lambda_l + *lambda_u ); } __global__ void checkKaiConvergence(bool *foo, double *rho_trial, double rho) { if ( *rho_trial - rho < 1e-7 ) *foo = false; } int main() { size_t N = 2; double rho = 0.4; double lambda_trial = 0; double lambda_min; double lambda_max; double del_t = 1; double eta = 12; double beta = 0.5; size_t numElements = 4; vector<double> elements = { 0.4, 0.4, 0.4, 0.4 }; vector<double> laplace_array(4); double* h_elements = &elements[0]; vector<double> p = { 1.0, 2.03, 3.09, 9.05 }; double* h_p = &p[0]; // for ( int i = 0 ; i < numElements ; i++ ) // { // laplace_array[i] = laplacian( h_elements, i, N ); // cout << laplace_array[i] << endl; // } // CUDA double *d_elements; double *d_kai_trial; double *d_laplacian; double *d_dummy; double *d_rho_trial; double *d_rho; double *d_max; double *d_p; int *d_mutex; double *d_lambda_l; double *d_lambda_u; double *d_lambda_tr; hipMalloc( (void**)&d_laplacian, sizeof(double) * 4 ); hipMalloc( (void**)&d_elements, sizeof(double) * 4 ); hipMalloc( (void**)&d_kai_trial, sizeof(double) * 4 ); hipMalloc( (void**)&d_p, sizeof(double) * 4 ); hipMalloc( (void**)&d_rho_trial, sizeof(double) ); hipMalloc( (void**)&d_rho, sizeof(double) ); hipMalloc( (void**)&d_dummy, sizeof(double) * 4 ); hipMalloc( (void**)&d_mutex, sizeof(int) ); hipMalloc( (void**)&d_max, sizeof(double) ); hipMalloc( (void**)&d_lambda_l, sizeof(double) ); hipMalloc( (void**)&d_lambda_u, sizeof(double) ); hipMalloc( (void**)&d_lambda_tr, sizeof(double) ); hipMemset( d_mutex, 0, sizeof(int) ); hipMemset( d_max, 0, sizeof(double) ); hipMemset( d_rho, 0, sizeof(double) ); hipMemset( d_rho_trial, 0, sizeof(double) ); hipMemset( d_lambda_tr, 0, sizeof(double) ); hipMemset( d_lambda_u, 0, sizeof(double) ); hipMemset( d_lambda_l, 0, sizeof(double) ); hipMemcpy(d_elements, &elements[0], sizeof(double) * 4, hipMemcpyHostToDevice); hipMemcpy(d_laplacian, &laplace_array[0], sizeof(double) * 4, hipMemcpyHostToDevice); hipMemcpy(d_p, &p[0], sizeof(double) * 4, hipMemcpyHostToDevice); // can run these concurrently hipLaunchKernelGGL(( calcLambdaUpper), dim3(1), dim3(4) , 0, 0, d_p, d_lambda_u, d_mutex, 1.0, d_laplacian, 12, 4); hipLaunchKernelGGL(( calcLambdaLower), dim3(1), dim3(4) , 0, 0, d_p, d_lambda_l, d_mutex, 1.0, d_laplacian, 12, 4); bool foo = true; bool *d_foo; hipMalloc( (void**)&d_foo, sizeof(bool) ); hipMemset( d_foo, 1, sizeof(bool) ); while ( foo ) { hipLaunchKernelGGL(( getKaiTrial), dim3(1),dim3(4), 0, 0, d_elements, d_p, d_lambda_tr, del_t, eta, beta, d_kai_trial, 4); // calcRhoTrial = sum hipLaunchKernelGGL(( sumOfVector_GPU), dim3(1),dim3(4), 0, 0, d_rho_trial, d_elements, 4); // determine lambda_trial hipLaunchKernelGGL(( calcLambdaTrial), dim3(1),dim3(1) , 0, 0, d_rho_trial, rho, d_lambda_l, d_lambda_u, d_lambda_tr ); hipLaunchKernelGGL(( checkKaiConvergence), dim3(1),dim3(1), 0, 0, d_foo, d_rho_trial, rho); hipDeviceSynchronize(); hipMemcpy(&foo, d_foo, sizeof(bool), hipMemcpyDeviceToHost); } hipLaunchKernelGGL(( print_GPU), dim3(1),dim3(1), 0, 0, d_lambda_tr); hipDeviceSynchronize(); }
c0b9e14c3bc768e9c9efaf8369a9cf271287f9ba.cu
/* */ #include <iostream> #include <cuda.h> #include <vector> #include <cuda_runtime.h> #include "../include/mycudaheader.h" using namespace std; __device__ double laplacian_GPU( double *array, size_t ind, size_t N ) { double value = 4.0 * array[ind]; // east element if ( (ind + 1) % N != 0 ) value += -1.0 * array[ind + 1]; // north element if ( ind + N < N*N ) // TODO: N*N --> dim value += -1.0 * array[ind + N]; // west element if ( ind % N != 0 ) value += -1.0 * array[ind - 1]; // south element if ( ind >= N ) value += -1.0 * array[ind - N]; return value; } // __global__ // void calcLambdaUpper(double* lambda_u, double *p, double beta, double *laplacian, double eta) // { // getMax(float *array, float *max, int *mutex, unsigned int n) // } __global__ void calcLambdaLower(double *array, double *min, int *mutex, double beta, double *laplacian, double eta, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[256]; *min = 1.0e9; double temp = 1.0e9; while(index + offset < n){ temp = fminf(temp, ( array[index + offset] + ( beta * laplacian[index] ) - eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *min = fminf(*min, cache[0]); atomicExch(mutex, 0); //unlock } } __global__ void calcLambdaUpper(double *array, double *max, int *mutex, double beta, double *laplacian, double eta, unsigned int n) { unsigned int index = threadIdx.x + blockIdx.x*blockDim.x; unsigned int stride = gridDim.x*blockDim.x; unsigned int offset = 0; __shared__ double cache[256]; *max = -1.0e9; double temp = -1.0e9; while(index + offset < n){ temp = fmaxf(temp, ( array[index + offset] + ( beta * laplacian[index] ) + eta ) ); offset += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]); } __syncthreads(); i /= 2; } if(threadIdx.x == 0){ while(atomicCAS(mutex,0,1) != 0); //lock *max = fmaxf(*max, cache[0]); atomicExch(mutex, 0); //unlock } } double laplacian(double *array, size_t ind, size_t N) { double value = 4.0 * array[ind]; // east element if ( (ind + 1) % N != 0 ) value += -1.0 * array[ind + 1]; // north element if ( ind + N < N*N ) // TODO: N*N --> dim value += -1.0 * array[ind + N]; // west element if ( ind % N != 0 ) value += -1.0 * array[ind - 1]; // south element if ( ind >= N ) value += -1.0 * array[ind - N]; return value; } // TODO: change kai to something else __global__ void getKaiTrial( double *kai, double *p, double *lambda_trial, double del_t, double eta, double beta, double* kai_trial, size_t numElements ) { unsigned int id = threadIdx.x + blockIdx.x*blockDim.x; __shared__ double del_kai[256]; del_kai[id] = ( del_t / eta ) * ( p[id] - *lambda_trial + beta*( laplacian_GPU( kai, id, numElements ) ) ); if ( del_kai[id] + kai[id] > 1 ) kai_trial[id] = 1; else if ( del_kai[id] + kai[id] < 1e-9 ) kai_trial[id] = 1e-9; else kai_trial[id] = del_kai[id] + kai[id]; } __global__ void sumOfVector_GPU(double* sum, double* x, size_t n) { int id = threadIdx.x + blockDim.x*blockIdx.x; int stride = blockDim.x*gridDim.x; __shared__ double cache[1024]; cache[id] = 0; double temp = 0.0; while(id < n) { temp += x[id]; id += stride; } cache[threadIdx.x] = temp; __syncthreads(); // reduction unsigned int i = blockDim.x/2; while(i != 0){ if(threadIdx.x < i){ cache[threadIdx.x] += cache[threadIdx.x + i]; } __syncthreads(); i /= 2; } // reset id id = threadIdx.x + blockDim.x*blockIdx.x; // reduce sum from all blocks' cache if(threadIdx.x == 0) atomicAdd_double(sum, cache[0]); } __global__ void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial) { if ( *rho_trial > rho ) *lambda_l = *lambda_trial; else *lambda_u = *lambda_trial; *lambda_trial = 0.5 * ( *lambda_l + *lambda_u ); } __global__ void checkKaiConvergence(bool *foo, double *rho_trial, double rho) { if ( *rho_trial - rho < 1e-7 ) *foo = false; } int main() { size_t N = 2; double rho = 0.4; double lambda_trial = 0; double lambda_min; double lambda_max; double del_t = 1; double eta = 12; double beta = 0.5; size_t numElements = 4; vector<double> elements = { 0.4, 0.4, 0.4, 0.4 }; vector<double> laplace_array(4); double* h_elements = &elements[0]; vector<double> p = { 1.0, 2.03, 3.09, 9.05 }; double* h_p = &p[0]; // for ( int i = 0 ; i < numElements ; i++ ) // { // laplace_array[i] = laplacian( h_elements, i, N ); // cout << laplace_array[i] << endl; // } // CUDA double *d_elements; double *d_kai_trial; double *d_laplacian; double *d_dummy; double *d_rho_trial; double *d_rho; double *d_max; double *d_p; int *d_mutex; double *d_lambda_l; double *d_lambda_u; double *d_lambda_tr; cudaMalloc( (void**)&d_laplacian, sizeof(double) * 4 ); cudaMalloc( (void**)&d_elements, sizeof(double) * 4 ); cudaMalloc( (void**)&d_kai_trial, sizeof(double) * 4 ); cudaMalloc( (void**)&d_p, sizeof(double) * 4 ); cudaMalloc( (void**)&d_rho_trial, sizeof(double) ); cudaMalloc( (void**)&d_rho, sizeof(double) ); cudaMalloc( (void**)&d_dummy, sizeof(double) * 4 ); cudaMalloc( (void**)&d_mutex, sizeof(int) ); cudaMalloc( (void**)&d_max, sizeof(double) ); cudaMalloc( (void**)&d_lambda_l, sizeof(double) ); cudaMalloc( (void**)&d_lambda_u, sizeof(double) ); cudaMalloc( (void**)&d_lambda_tr, sizeof(double) ); cudaMemset( d_mutex, 0, sizeof(int) ); cudaMemset( d_max, 0, sizeof(double) ); cudaMemset( d_rho, 0, sizeof(double) ); cudaMemset( d_rho_trial, 0, sizeof(double) ); cudaMemset( d_lambda_tr, 0, sizeof(double) ); cudaMemset( d_lambda_u, 0, sizeof(double) ); cudaMemset( d_lambda_l, 0, sizeof(double) ); cudaMemcpy(d_elements, &elements[0], sizeof(double) * 4, cudaMemcpyHostToDevice); cudaMemcpy(d_laplacian, &laplace_array[0], sizeof(double) * 4, cudaMemcpyHostToDevice); cudaMemcpy(d_p, &p[0], sizeof(double) * 4, cudaMemcpyHostToDevice); // can run these concurrently calcLambdaUpper<<< 1, 4 >>>(d_p, d_lambda_u, d_mutex, 1.0, d_laplacian, 12, 4); calcLambdaLower<<< 1, 4 >>>(d_p, d_lambda_l, d_mutex, 1.0, d_laplacian, 12, 4); bool foo = true; bool *d_foo; cudaMalloc( (void**)&d_foo, sizeof(bool) ); cudaMemset( d_foo, 1, sizeof(bool) ); while ( foo ) { getKaiTrial<<<1,4>>>(d_elements, d_p, d_lambda_tr, del_t, eta, beta, d_kai_trial, 4); // calcRhoTrial = sum sumOfVector_GPU<<<1,4>>>(d_rho_trial, d_elements, 4); // determine lambda_trial calcLambdaTrial<<< 1,1 >>> ( d_rho_trial, rho, d_lambda_l, d_lambda_u, d_lambda_tr ); checkKaiConvergence<<<1,1>>>(d_foo, d_rho_trial, rho); cudaDeviceSynchronize(); cudaMemcpy(&foo, d_foo, sizeof(bool), cudaMemcpyDeviceToHost); } print_GPU<<<1,1>>>(d_lambda_tr); cudaDeviceSynchronize(); }
9d80c4ce810e47f231a6aa6f20d60ad210e8bc0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Part of the following code in this file refs to // https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_convolution.cu // // Copyright (c) 2017 Microsoft // Licensed under The Apache-2.0 License [see LICENSE for details] // \file deformable_psroi_pooling.cu // \brief // \author Yi Li, Guodong Zhang, Jifeng Dai #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/deformable_conv_filter.cu.h" #include "paddle/fluid/operators/deformable_conv_func.h" #include "paddle/fluid/operators/deformable_conv_v1_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; static constexpr int kNumCUDAThread = 512; static constexpr int kNumMaximumNumBlock = 4096; static inline int NumBlock(const int N) { return ::min((N + kNumCUDAThread - 1) / kNumCUDAThread, kNumMaximumNumBlock); } template <typename T> __global__ void DeformableCol2imCUDAKernel( const int nthreads, const T* data_col, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, T* grad_im) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t thread = index; thread < nthreads; thread += offset) { const int j = (thread / width_col / height_col / batch_size) % kernel_w; const int i = (thread / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = thread / width_col / height_col / batch_size / kernel_w / kernel_h; const int deformable_group_index = c / channel_per_deformable_group; int w_out = thread % width_col; int h_out = (thread / width_col) % height_col; int b = (thread / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; const T cur_inv_h_data = h_in + i * dilation_h + offset_h; const T cur_inv_w_data = w_in + j * dilation_w + offset_w; const T cur_top_grad = data_col[thread]; const int cur_h = static_cast<int>(cur_inv_h_data); const int cur_w = static_cast<int>(cur_inv_w_data); for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; T weight = DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); platform::CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename T> inline void DeformableCol2im(const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> pad, const std::vector<int> stride, const std::vector<int> dilation, const int deformable_group, T* grad_im) { int channel_per_deformable_group = im_shape[0] / deformable_group; int num_kernels = col_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; hipLaunchKernelGGL(( DeformableCol2imCUDAKernel<T>), dim3(blocks), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(), num_kernels, data_col, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im); } template <typename T> __global__ void DeformableCol2imCoordCUDAKernel( const int nthreads, const T* data_col, const T* data_im, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, T* grad_offset) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { T val = 0, mval = 0; const int w = i % width_col; const int h = (i / width_col) % height_col; const int c = (i / width_col / height_col) % offset_channels; const int b = (i / width_col / height_col) / offset_channels; const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const T* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const T* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = offset_c / 2; col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T inv_h = h_in + i * dilation_h + offset_h; T inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const T weight = DmcnGetCoordinateWeight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[i] = val; } } template <typename T> inline void DeformableCol2imCoord( const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* grad_offset) { int num_kernels = 2 * kernel_shape[2] * kernel_shape[3] * col_shape[1] * col_shape[2] * col_shape[3] * deformable_groups; int channel_per_deformable_group = col_shape[0] / deformable_groups; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; hipLaunchKernelGGL(( DeformableCol2imCoordCUDAKernel<T>), dim3(blocks), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(), num_kernels, data_col, data_im, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], 2 * kernel_shape[2] * kernel_shape[3] * deformable_groups, deformable_groups, col_shape[2], col_shape[3], grad_offset); } template <typename T> __global__ void DeformableIm2colCUDAKernel( const int nthreads, const T* data_im, const T* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, T* data_col) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { const int w_col = i % width_col; const int h_col = (i / width_col) % height_col; const int b_col = (i / width_col) / height_col % batch_size; const int c_im = (i / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; T* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; const T* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const T* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T val = static_cast<T>(0); const T h_im = h_in + i * dilation_h + offset_h; const T w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename T> inline void DeformableIm2col(const platform::CUDADeviceContext& ctx, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> filter_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* data_col) { int channel_per_deformable_group = im_shape[0] / deformable_groups; int num_kernels = im_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; // get outputs of im2col with offset by bilinear interpolation hipLaunchKernelGGL(( DeformableIm2colCUDAKernel<T>), dim3(blocks), dim3(threads), 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream(), num_kernels, data_im, data_offset, im_shape[1], im_shape[2], filter_shape[2], filter_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], im_shape[0], deformable_groups, col_shape[2], col_shape[3], data_col); } template <typename T> class DeformableConvV1CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* input = ctx.Input<Tensor>("Input"); const Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); Tensor* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int groups = ctx.Attr<int>("groups"); const int deformable_groups = ctx.Attr<int>("deformable_groups"); const int im2col_step = ctx.Attr<int>("im2col_step"); const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); const std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); const std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); const int batch_size = static_cast<int>(input->dims()[0]); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims())); // col_shape_vec: {c_i * k_h * k_w, im2col_step, o_h, o_w} std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); int64_t M = output_shape_vec[1] / groups; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = input->dims()[1] * filter_shape_vec[2] * filter_shape_vec[3] / groups; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize( framework::make_ddim({groups, M, K})); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer) .Resize(framework::make_ddim({groups, K, N})); Tensor output_4d; output_4d.ShareDataWith(output_buffer) .Resize(framework::make_ddim({batch_size / im2col_step, groups, M, N})); output_4d.mutable_data<T>(ctx.GetPlace()); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); col_buffer.mutable_data<T>(ctx.GetPlace()); T* col_buffer_ptr = col_buffer.data<T>(); for (int i = 0; i < batch_size / im2col_step; ++i) { DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); Tensor output_3d = output_4d.Slice(i, i + 1).Resize( framework::slice_ddim(output_4d.dims(), 1, output_4d.dims().size())); // get the product of pixel and weight for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor output_3d_slice = output_3d.Slice(g, g + 1).Resize(framework::slice_ddim( output_3d.dims(), 1, output_3d.dims().size())); blas.MatMul(weight_3d_slice, false, col_buffer_3d_slice, false, T(1.0), &output_3d_slice, T(0.0)); } } output->ShareDataWith(output_buffer) .Resize(framework::make_ddim(output_shape_vec)); } }; template <typename T> class DeformableConvV1GradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); Tensor* offset_grad = ctx.Output<Tensor>(framework::GradVarName("Offset")); const Tensor* input = ctx.Input<Tensor>("Input"); Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); if (!input_grad && !filter_grad && !offset_grad) return; int groups = ctx.Attr<int>("groups"); int deformable_groups = ctx.Attr<int>("deformable_groups"); int im2col_step = ctx.Attr<int>("im2col_step"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int batch_size = static_cast<int>(input->dims()[0]); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec( framework::vectorize(output_grad->dims())); std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); output_buffer.ShareDataWith(*output_grad); int64_t M = input_shape_vec[0] / groups * filter_shape_vec[2] * filter_shape_vec[3]; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = output_shape_vec[1] / groups; framework::DDim weight_3d_shape = {groups, K, M}; framework::DDim out_grad_4d_shape = {batch_size / im2col_step, groups, K, N}; framework::DDim col_buffer_3d_shape = {groups, M, N}; framework::DDim filter_grad_shape = {groups, K, M}; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize(weight_3d_shape); Tensor out_grad_4d; out_grad_4d.ShareDataWith(output_buffer).Resize(out_grad_4d_shape); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer).Resize(col_buffer_3d_shape); math::SetConstant<CUDADeviceContext, T> set_zero; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); col_buffer.mutable_data<T>(ctx.GetPlace()); col_buffer_3d.mutable_data<T>(ctx.GetPlace()); out_grad_4d.mutable_data<T>(ctx.GetPlace()); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; if (filter_grad) { filter_grad->mutable_data<T>(ctx.GetPlace()); filter_grad->Resize(filter_grad_shape); set_zero(dev_ctx, filter_grad, static_cast<T>(0)); } if (input_grad) { input_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, input_grad, static_cast<T>(0)); } if (offset_grad) { offset_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, offset_grad, static_cast<T>(0)); } for (int i = 0; i < batch_size / im2col_step; ++i) { Tensor out_grad_3d = out_grad_4d.Slice(i, i + 1).Resize(framework::slice_ddim( out_grad_4d.dims(), 1, out_grad_4d.dims().size())); for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); blas.MatMul(weight_3d_slice, true, out_grad_3d_slice, false, T(1.0), &col_buffer_3d_slice, T(0.0)); } col_buffer.Resize(col_shape); T* col_buffer_ptr = col_buffer.data<T>(); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); if (offset_grad) { T* offset_grad_ptr = offset_grad->data<T>(); // get grad of offset DeformableCol2imCoord( dev_ctx, col_buffer_ptr, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, offset_grad_ptr + i * im2col_step * input_offset_dim); } if (input_grad) { T* input_grad_ptr = input_grad->data<T>(); // get grad of input DeformableCol2im(dev_ctx, col_buffer_ptr, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, input_grad_ptr + i * im2col_step * input_dim); input_grad->Resize(input->dims()); } DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); col_buffer_3d.Resize(col_buffer_3d_shape); if (filter_grad) { Tensor dweight_3d; dweight_3d = ctx.AllocateTmpTensor<T, CUDADeviceContext>( filter_grad_shape, dev_ctx); for (int g = 0; g < groups; ++g) { Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor dweight_3d_slice = dweight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( dweight_3d.dims(), 1, dweight_3d.dims().size())); blas.MatMul(out_grad_3d_slice, false, col_buffer_3d_slice, true, T(1.0), &dweight_3d_slice, T(0.0)); } hipLaunchKernelGGL(( FilterGradAddupCUDAKernel<T>), dim3(NumBlock(dweight_3d.numel())), dim3(kNumCUDAThread), 0, dev_ctx.stream(), dweight_3d.numel(), groups, K, M, dweight_3d.data<T>(), filter_grad->data<T>()); } } if (filter_grad) { filter_grad->Resize(filter.dims()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(deformable_conv_v1, ops::DeformableConvV1CUDAKernel<float>, ops::DeformableConvV1CUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(deformable_conv_v1_grad, ops::DeformableConvV1GradCUDAKernel<float>, ops::DeformableConvV1GradCUDAKernel<double>);
9d80c4ce810e47f231a6aa6f20d60ad210e8bc0b.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // // Part of the following code in this file refs to // https://github.com/msracver/Deformable-ConvNets/blob/master/faster_rcnn/operator_cxx/deformable_convolution.cu // // Copyright (c) 2017 Microsoft // Licensed under The Apache-2.0 License [see LICENSE for details] // \file deformable_psroi_pooling.cu // \brief // \author Yi Li, Guodong Zhang, Jifeng Dai #pragma once #include <algorithm> #include <vector> #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/deformable_conv_filter.cu.h" #include "paddle/fluid/operators/deformable_conv_func.h" #include "paddle/fluid/operators/deformable_conv_v1_op.h" #include "paddle/fluid/operators/math/blas.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; static constexpr int kNumCUDAThread = 512; static constexpr int kNumMaximumNumBlock = 4096; static inline int NumBlock(const int N) { return std::min((N + kNumCUDAThread - 1) / kNumCUDAThread, kNumMaximumNumBlock); } template <typename T> __global__ void DeformableCol2imCUDAKernel( const int nthreads, const T* data_col, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int deformable_group, const int height_col, const int width_col, T* grad_im) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t thread = index; thread < nthreads; thread += offset) { const int j = (thread / width_col / height_col / batch_size) % kernel_w; const int i = (thread / width_col / height_col / batch_size / kernel_w) % kernel_h; const int c = thread / width_col / height_col / batch_size / kernel_w / kernel_h; const int deformable_group_index = c / channel_per_deformable_group; int w_out = thread % width_col; int h_out = (thread / width_col) % height_col; int b = (thread / width_col / height_col) % batch_size; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; const T cur_inv_h_data = h_in + i * dilation_h + offset_h; const T cur_inv_w_data = w_in + j * dilation_w + offset_w; const T cur_top_grad = data_col[thread]; const int cur_h = static_cast<int>(cur_inv_h_data); const int cur_w = static_cast<int>(cur_inv_w_data); for (int dy = -2; dy <= 2; dy++) { for (int dx = -2; dx <= 2; dx++) { if (cur_h + dy >= 0 && cur_h + dy < height && cur_w + dx >= 0 && cur_w + dx < width && abs(cur_inv_h_data - (cur_h + dy)) < 1 && abs(cur_inv_w_data - (cur_w + dx)) < 1) { int cur_bottom_grad_pos = ((b * channels + c) * height + cur_h + dy) * width + cur_w + dx; T weight = DmcnGetGradientWeight(cur_inv_h_data, cur_inv_w_data, cur_h + dy, cur_w + dx, height, width); platform::CudaAtomicAdd(grad_im + cur_bottom_grad_pos, weight * cur_top_grad); } } } } } template <typename T> inline void DeformableCol2im(const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> pad, const std::vector<int> stride, const std::vector<int> dilation, const int deformable_group, T* grad_im) { int channel_per_deformable_group = im_shape[0] / deformable_group; int num_kernels = col_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; DeformableCol2imCUDAKernel<T><<< blocks, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>( num_kernels, data_col, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], pad[0], pad[1], stride[0], stride[1], dilation[0], dilation[1], channel_per_deformable_group, col_shape[1], deformable_group, col_shape[2], col_shape[3], grad_im); } template <typename T> __global__ void DeformableCol2imCoordCUDAKernel( const int nthreads, const T* data_col, const T* data_im, const T* data_offset, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int offset_channels, const int deformable_group, const int height_col, const int width_col, T* grad_offset) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { T val = 0, mval = 0; const int w = i % width_col; const int h = (i / width_col) % height_col; const int c = (i / width_col / height_col) % offset_channels; const int b = (i / width_col / height_col) / offset_channels; const int deformable_group_index = c / (2 * kernel_h * kernel_w); const int col_step = kernel_h * kernel_w; int cnt = 0; const T* data_col_ptr = data_col + deformable_group_index * channel_per_deformable_group * batch_size * width_col * height_col; const T* data_im_ptr = data_im + (b * deformable_group + deformable_group_index) * channel_per_deformable_group / kernel_h / kernel_w * height * width; const T* data_offset_ptr = data_offset + (b * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; const int offset_c = c - deformable_group_index * 2 * kernel_h * kernel_w; for (int col_c = offset_c / 2; col_c < channel_per_deformable_group; col_c += col_step) { const int col_pos = (((col_c * batch_size + b) * height_col) + h) * width_col + w; const int bp_dir = offset_c % 2; int j = (col_pos / width_col / height_col / batch_size) % kernel_w; int i = (col_pos / width_col / height_col / batch_size / kernel_w) % kernel_h; int w_out = col_pos % width_col; int h_out = (col_pos / width_col) % height_col; int w_in = w_out * stride_w - pad_w; int h_in = h_out * stride_h - pad_h; const int data_offset_h_ptr = (((2 * (i * kernel_w + j)) * height_col + h_out) * width_col + w_out); const int data_offset_w_ptr = (((2 * (i * kernel_w + j) + 1) * height_col + h_out) * width_col + w_out); const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T inv_h = h_in + i * dilation_h + offset_h; T inv_w = w_in + j * dilation_w + offset_w; if (inv_h <= -1 || inv_w <= -1 || inv_h >= height || inv_w >= width) { inv_h = inv_w = -2; } else { mval += data_col_ptr[col_pos] * DmcnIm2colBilinear(data_im_ptr + cnt * height * width, width, height, width, inv_h, inv_w); } const T weight = DmcnGetCoordinateWeight( inv_h, inv_w, height, width, data_im_ptr + cnt * height * width, width, bp_dir); val += weight * data_col_ptr[col_pos]; cnt += 1; } grad_offset[i] = val; } } template <typename T> inline void DeformableCol2imCoord( const platform::CUDADeviceContext& ctx, const T* data_col, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> kernel_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* grad_offset) { int num_kernels = 2 * kernel_shape[2] * kernel_shape[3] * col_shape[1] * col_shape[2] * col_shape[3] * deformable_groups; int channel_per_deformable_group = col_shape[0] / deformable_groups; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; DeformableCol2imCoordCUDAKernel<T><<< blocks, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>( num_kernels, data_col, data_im, data_offset, im_shape[0], im_shape[1], im_shape[2], kernel_shape[2], kernel_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], 2 * kernel_shape[2] * kernel_shape[3] * deformable_groups, deformable_groups, col_shape[2], col_shape[3], grad_offset); } template <typename T> __global__ void DeformableIm2colCUDAKernel( const int nthreads, const T* data_im, const T* data_offset, const int height, const int width, const int kernel_h, const int kernel_w, const int pad_h, const int pad_w, const int stride_h, const int stride_w, const int dilation_h, const int dilation_w, const int channel_per_deformable_group, const int batch_size, const int num_channels, const int deformable_group, const int height_col, const int width_col, T* data_col) { int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { const int w_col = i % width_col; const int h_col = (i / width_col) % height_col; const int b_col = (i / width_col) / height_col % batch_size; const int c_im = (i / width_col / height_col) / batch_size; const int c_col = c_im * kernel_h * kernel_w; const int deformable_group_index = c_im / channel_per_deformable_group; const int h_in = h_col * stride_h - pad_h; const int w_in = w_col * stride_w - pad_w; T* data_col_ptr = data_col + ((c_col * batch_size + b_col) * height_col + h_col) * width_col + w_col; const T* data_im_ptr = data_im + (b_col * num_channels + c_im) * height * width; const T* data_offset_ptr = data_offset + (b_col * deformable_group + deformable_group_index) * 2 * kernel_h * kernel_w * height_col * width_col; for (int i = 0; i < kernel_h; ++i) { for (int j = 0; j < kernel_w; ++j) { const int data_offset_h_ptr = ((2 * (i * kernel_w + j)) * height_col + h_col) * width_col + w_col; const int data_offset_w_ptr = ((2 * (i * kernel_w + j) + 1) * height_col + h_col) * width_col + w_col; const T offset_h = data_offset_ptr[data_offset_h_ptr]; const T offset_w = data_offset_ptr[data_offset_w_ptr]; T val = static_cast<T>(0); const T h_im = h_in + i * dilation_h + offset_h; const T w_im = w_in + j * dilation_w + offset_w; if (h_im > -1 && w_im > -1 && h_im < height && w_im < width) { val = DmcnIm2colBilinear(data_im_ptr, width, height, width, h_im, w_im); } *data_col_ptr = val; data_col_ptr += batch_size * height_col * width_col; } } } } template <typename T> inline void DeformableIm2col(const platform::CUDADeviceContext& ctx, const T* data_im, const T* data_offset, const std::vector<int64_t> im_shape, const std::vector<int64_t> col_shape, const std::vector<int64_t> filter_shape, const std::vector<int> paddings, const std::vector<int> strides, const std::vector<int> dilations, const int deformable_groups, T* data_col) { int channel_per_deformable_group = im_shape[0] / deformable_groups; int num_kernels = im_shape[0] * col_shape[1] * col_shape[2] * col_shape[3]; int blocks = NumBlock(num_kernels); int threads = kNumCUDAThread; // get outputs of im2col with offset by bilinear interpolation DeformableIm2colCUDAKernel<T><<< blocks, threads, 0, reinterpret_cast<const platform::CUDADeviceContext&>(ctx).stream()>>>( num_kernels, data_im, data_offset, im_shape[1], im_shape[2], filter_shape[2], filter_shape[3], paddings[0], paddings[1], strides[0], strides[1], dilations[0], dilations[1], channel_per_deformable_group, col_shape[1], im_shape[0], deformable_groups, col_shape[2], col_shape[3], data_col); } template <typename T> class DeformableConvV1CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* input = ctx.Input<Tensor>("Input"); const Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); Tensor* output = ctx.Output<Tensor>("Output"); output->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int groups = ctx.Attr<int>("groups"); const int deformable_groups = ctx.Attr<int>("deformable_groups"); const int im2col_step = ctx.Attr<int>("im2col_step"); const std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); const std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); const std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); const int batch_size = static_cast<int>(input->dims()[0]); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec(framework::vectorize(output->dims())); // col_shape_vec: {c_i * k_h * k_w, im2col_step, o_h, o_w} std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); int64_t M = output_shape_vec[1] / groups; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = input->dims()[1] * filter_shape_vec[2] * filter_shape_vec[3] / groups; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize( framework::make_ddim({groups, M, K})); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer) .Resize(framework::make_ddim({groups, K, N})); Tensor output_4d; output_4d.ShareDataWith(output_buffer) .Resize(framework::make_ddim({batch_size / im2col_step, groups, M, N})); output_4d.mutable_data<T>(ctx.GetPlace()); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); col_buffer.mutable_data<T>(ctx.GetPlace()); T* col_buffer_ptr = col_buffer.data<T>(); for (int i = 0; i < batch_size / im2col_step; ++i) { DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); Tensor output_3d = output_4d.Slice(i, i + 1).Resize( framework::slice_ddim(output_4d.dims(), 1, output_4d.dims().size())); // get the product of pixel and weight for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor output_3d_slice = output_3d.Slice(g, g + 1).Resize(framework::slice_ddim( output_3d.dims(), 1, output_3d.dims().size())); blas.MatMul(weight_3d_slice, false, col_buffer_3d_slice, false, T(1.0), &output_3d_slice, T(0.0)); } } output->ShareDataWith(output_buffer) .Resize(framework::make_ddim(output_shape_vec)); } }; template <typename T> class DeformableConvV1GradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* output_grad = ctx.Input<Tensor>(framework::GradVarName("Output")); Tensor* input_grad = ctx.Output<Tensor>(framework::GradVarName("Input")); Tensor* filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter")); Tensor* offset_grad = ctx.Output<Tensor>(framework::GradVarName("Offset")); const Tensor* input = ctx.Input<Tensor>("Input"); Tensor offset = *ctx.Input<Tensor>("Offset"); Tensor filter = *ctx.Input<Tensor>("Filter"); if (!input_grad && !filter_grad && !offset_grad) return; int groups = ctx.Attr<int>("groups"); int deformable_groups = ctx.Attr<int>("deformable_groups"); int im2col_step = ctx.Attr<int>("im2col_step"); std::vector<int> strides = ctx.Attr<std::vector<int>>("strides"); std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings"); std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations"); auto& dev_ctx = ctx.template device_context<CUDADeviceContext>(); const int batch_size = static_cast<int>(input->dims()[0]); framework::DDim input_shape = framework::slice_ddim(input->dims(), 1, input->dims().size()); std::vector<int64_t> input_shape_vec = framework::vectorize(input_shape); std::vector<int64_t> filter_shape_vec(framework::vectorize(filter.dims())); std::vector<int64_t> output_shape_vec( framework::vectorize(output_grad->dims())); std::vector<int64_t> col_buffer_shape_vec(filter_shape_vec.size()); col_buffer_shape_vec[0] = input->dims()[1] * filter.dims()[2] * filter.dims()[3]; col_buffer_shape_vec[1] = im2col_step; for (size_t j = 0; j < filter_shape_vec.size() - 2; ++j) { col_buffer_shape_vec[j + 2] = output_shape_vec[j + 2]; } framework::DDim col_shape(framework::make_ddim(col_buffer_shape_vec)); std::vector<int64_t> output_buffer_shape_vec(1); output_buffer_shape_vec[0] = batch_size * output_shape_vec[1] * output_shape_vec[2] * output_shape_vec[3]; framework::DDim output_shape(framework::make_ddim(output_buffer_shape_vec)); Tensor col_buffer; Tensor output_buffer; col_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(col_shape, dev_ctx); output_buffer = ctx.AllocateTmpTensor<T, CUDADeviceContext>(output_shape, dev_ctx); output_buffer.ShareDataWith(*output_grad); int64_t M = input_shape_vec[0] / groups * filter_shape_vec[2] * filter_shape_vec[3]; int64_t N = im2col_step * output_shape_vec[2] * output_shape_vec[3]; int64_t K = output_shape_vec[1] / groups; framework::DDim weight_3d_shape = {groups, K, M}; framework::DDim out_grad_4d_shape = {batch_size / im2col_step, groups, K, N}; framework::DDim col_buffer_3d_shape = {groups, M, N}; framework::DDim filter_grad_shape = {groups, K, M}; Tensor weight_3d; weight_3d.ShareDataWith(filter).Resize(weight_3d_shape); Tensor out_grad_4d; out_grad_4d.ShareDataWith(output_buffer).Resize(out_grad_4d_shape); Tensor col_buffer_3d; col_buffer_3d.ShareDataWith(col_buffer).Resize(col_buffer_3d_shape); math::SetConstant<CUDADeviceContext, T> set_zero; auto blas = math::GetBlas<CUDADeviceContext, T>(dev_ctx); col_buffer.mutable_data<T>(ctx.GetPlace()); col_buffer_3d.mutable_data<T>(ctx.GetPlace()); out_grad_4d.mutable_data<T>(ctx.GetPlace()); int input_dim = input->numel() / input->dims()[0]; int input_offset_dim = offset.numel() / offset.dims()[0]; if (filter_grad) { filter_grad->mutable_data<T>(ctx.GetPlace()); filter_grad->Resize(filter_grad_shape); set_zero(dev_ctx, filter_grad, static_cast<T>(0)); } if (input_grad) { input_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, input_grad, static_cast<T>(0)); } if (offset_grad) { offset_grad->mutable_data<T>(ctx.GetPlace()); set_zero(dev_ctx, offset_grad, static_cast<T>(0)); } for (int i = 0; i < batch_size / im2col_step; ++i) { Tensor out_grad_3d = out_grad_4d.Slice(i, i + 1).Resize(framework::slice_ddim( out_grad_4d.dims(), 1, out_grad_4d.dims().size())); for (int g = 0; g < groups; ++g) { Tensor weight_3d_slice = weight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( weight_3d.dims(), 1, weight_3d.dims().size())); Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); blas.MatMul(weight_3d_slice, true, out_grad_3d_slice, false, T(1.0), &col_buffer_3d_slice, T(0.0)); } col_buffer.Resize(col_shape); T* col_buffer_ptr = col_buffer.data<T>(); const T* input_ptr = input->data<T>(); const T* offset_ptr = offset.data<T>(); if (offset_grad) { T* offset_grad_ptr = offset_grad->data<T>(); // get grad of offset DeformableCol2imCoord( dev_ctx, col_buffer_ptr, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, offset_grad_ptr + i * im2col_step * input_offset_dim); } if (input_grad) { T* input_grad_ptr = input_grad->data<T>(); // get grad of input DeformableCol2im(dev_ctx, col_buffer_ptr, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, input_grad_ptr + i * im2col_step * input_dim); input_grad->Resize(input->dims()); } DeformableIm2col(dev_ctx, input_ptr + i * im2col_step * input_dim, offset_ptr + i * im2col_step * input_offset_dim, input_shape_vec, col_buffer_shape_vec, filter_shape_vec, paddings, strides, dilations, deformable_groups, col_buffer_ptr); col_buffer_3d.Resize(col_buffer_3d_shape); if (filter_grad) { Tensor dweight_3d; dweight_3d = ctx.AllocateTmpTensor<T, CUDADeviceContext>( filter_grad_shape, dev_ctx); for (int g = 0; g < groups; ++g) { Tensor out_grad_3d_slice = out_grad_3d.Slice(g, g + 1).Resize(framework::slice_ddim( out_grad_3d.dims(), 1, out_grad_3d.dims().size())); Tensor col_buffer_3d_slice = col_buffer_3d.Slice(g, g + 1).Resize(framework::slice_ddim( col_buffer_3d.dims(), 1, col_buffer_3d.dims().size())); Tensor dweight_3d_slice = dweight_3d.Slice(g, g + 1).Resize(framework::slice_ddim( dweight_3d.dims(), 1, dweight_3d.dims().size())); blas.MatMul(out_grad_3d_slice, false, col_buffer_3d_slice, true, T(1.0), &dweight_3d_slice, T(0.0)); } FilterGradAddupCUDAKernel<T><<<NumBlock(dweight_3d.numel()), kNumCUDAThread, 0, dev_ctx.stream()>>>( dweight_3d.numel(), groups, K, M, dweight_3d.data<T>(), filter_grad->data<T>()); } } if (filter_grad) { filter_grad->Resize(filter.dims()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(deformable_conv_v1, ops::DeformableConvV1CUDAKernel<float>, ops::DeformableConvV1CUDAKernel<double>); REGISTER_OP_CUDA_KERNEL(deformable_conv_v1_grad, ops::DeformableConvV1GradCUDAKernel<float>, ops::DeformableConvV1GradCUDAKernel<double>);
4e70f4c6e922285b50fbadb136d1d97206f5da27.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include <conv_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void printDiff(float*, float*, int, int); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); // set seed for rand() srand(2006); // allocate host memory for matrices A and B unsigned int size_A = WIDTH_A * HEIGHT_A; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = (WIDTH_B<WIDTH_PADDING?WIDTH_PADDING:WIDTH_B) * HEIGHT_B; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); for (int i=0; i<WIDTH_A; i++) { for (int j=0; j<HEIGHT_A; j++) { if (i<WIDTH_B||j<HEIGHT_B) { h_A[j*WIDTH_A+i] = 0.0f; } } } // allocate device memory float* d_A; CUDA_SAFE_CALL(hipMalloc((void**) &d_A, mem_size_A)); float* d_B; CUDA_SAFE_CALL(hipMalloc((void**) &d_B, mem_size_B)); // copy host memory to device CUDA_SAFE_CALL(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) ); CUDA_SAFE_CALL(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) ); // allocate device memory for result unsigned int size_C = WIDTH_C * HEIGHT_C; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // create and start timer unsigned int timer = 0; // compute reference solution float* reference = (float*) malloc(mem_size_C); computeGold(reference, h_A, h_B, WIDTH_INPUT, HEIGHT_INPUT, HEIGHT_B, WIDTH_B); CUTBoolean res; { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(16, 16); dim3 grid(WIDTH_INPUT / threads.x, HEIGHT_INPUT / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); hipDeviceSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel hipLaunchKernelGGL(( conv_naive), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B); // stop and destroy timer hipDeviceSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) ); printf("conv_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(hipFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(32, 1); dim3 grid(WIDTH_INPUT / threads.x, HEIGHT_INPUT / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); hipDeviceSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel hipLaunchKernelGGL(( conv_coalesced), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B); // stop and destroy timer hipDeviceSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) ); printf("conv_coalesced Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(hipFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(hipMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(256, 1); dim3 grid(WIDTH_INPUT / threads.x, HEIGHT_INPUT / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); hipDeviceSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel hipLaunchKernelGGL(( conv_opt), dim3(grid), dim3(threads) , 0, 0, d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B); // stop and destroy timer hipDeviceSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) ); printf("conv_shared_x_256 Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(hipFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); // clean up memory free(h_A); free(h_B); free(h_C); free(reference); CUDA_SAFE_CALL(hipFree(d_A)); CUDA_SAFE_CALL(hipFree(d_B)); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height) { int i,j,k; int error_count=0; for (j=0; j<height; j++) { for (i=0; i<width; i++) { k = j*width+i; if (data1[k] != data2[k]) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]); error_count++; } } } printf(" nTotal Errors = %d n", error_count); }
4e70f4c6e922285b50fbadb136d1d97206f5da27.cu
// includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cutil.h> // includes, kernels #include <conv_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char** argv); void randomInit(float*, int); void printDiff(float*, float*, int, int); extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int, unsigned int); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { runTest(argc, argv); CUT_EXIT(argc, argv); } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// void runTest(int argc, char** argv) { CUT_DEVICE_INIT(argc, argv); // set seed for rand() srand(2006); // allocate host memory for matrices A and B unsigned int size_A = WIDTH_A * HEIGHT_A; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = (WIDTH_B<WIDTH_PADDING?WIDTH_PADDING:WIDTH_B) * HEIGHT_B; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); for (int i=0; i<WIDTH_A; i++) { for (int j=0; j<HEIGHT_A; j++) { if (i<WIDTH_B||j<HEIGHT_B) { h_A[j*WIDTH_A+i] = 0.0f; } } } // allocate device memory float* d_A; CUDA_SAFE_CALL(cudaMalloc((void**) &d_A, mem_size_A)); float* d_B; CUDA_SAFE_CALL(cudaMalloc((void**) &d_B, mem_size_B)); // copy host memory to device CUDA_SAFE_CALL(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ); CUDA_SAFE_CALL(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ); // allocate device memory for result unsigned int size_C = WIDTH_C * HEIGHT_C; unsigned int mem_size_C = sizeof(float) * size_C; // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); // create and start timer unsigned int timer = 0; // compute reference solution float* reference = (float*) malloc(mem_size_C); computeGold(reference, h_A, h_B, WIDTH_INPUT, HEIGHT_INPUT, HEIGHT_B, WIDTH_B); CUTBoolean res; { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(16, 16); dim3 grid(WIDTH_INPUT / threads.x, HEIGHT_INPUT / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); cudaThreadSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel conv_naive<<< grid, threads >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B); // stop and destroy timer cudaThreadSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); printf("conv_naive Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(cudaFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(32, 1); dim3 grid(WIDTH_INPUT / threads.x, HEIGHT_INPUT / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); cudaThreadSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel conv_coalesced<<< grid, threads >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B); // stop and destroy timer cudaThreadSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); printf("conv_coalesced Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(cudaFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); { free(h_C); h_C = (float*) malloc(mem_size_C); float* d_C; CUDA_SAFE_CALL(cudaMalloc((void**) &d_C, mem_size_C)); // setup execution parameters dim3 threads(256, 1); dim3 grid(WIDTH_INPUT / threads.x, HEIGHT_INPUT / threads.y); CUT_SAFE_CALL(cutCreateTimer(&timer)); cudaThreadSynchronize(); CUT_SAFE_CALL(cutStartTimer(timer)); // execute the kernel conv_opt<<< grid, threads >>>(d_A, d_B, d_C, WIDTH_A, HEIGHT_A, HEIGHT_B, WIDTH_B); // stop and destroy timer cudaThreadSynchronize(); CUT_SAFE_CALL(cutStopTimer(timer)); // check if kernel execution generated and error CUT_CHECK_ERROR("Kernel execution failed"); // copy result from device to host CUDA_SAFE_CALL(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); printf("conv_shared_x_256 Processing time: %f (ms), %f Gflops \n", cutGetTimerValue(timer), 2000.0*HEIGHT_B*WIDTH_B*HEIGHT_INPUT*WIDTH_INPUT/cutGetTimerValue(timer)/1024/1024/1024); CUT_SAFE_CALL(cutDeleteTimer(timer)); CUDA_SAFE_CALL(cudaFree(d_C)); } // check result res = cutCompareL2fe(reference, h_C, size_C, 1e-6f); printf("Test %s \n", (1 == res) ? "PASSED" : "FAILED"); // clean up memory free(h_A); free(h_B); free(h_C); free(reference); CUDA_SAFE_CALL(cudaFree(d_A)); CUDA_SAFE_CALL(cudaFree(d_B)); } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void printDiff(float *data1, float *data2, int width, int height) { int i,j,k; int error_count=0; for (j=0; j<height; j++) { for (i=0; i<width; i++) { k = j*width+i; if (data1[k] != data2[k]) { printf("diff(%d,%d) CPU=%4.4f, GPU=%4.4f n", i,j, data1[k], data2[k]); error_count++; } } } printf(" nTotal Errors = %d n", error_count); }
5e65a52bc34a0ad2134b482c63812697414e92ce.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHAtomics.cuh> #include <THH/THHDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignRotatedForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_center_w = offset_bottom_rois[1] * spatial_scale; T roi_center_h = offset_bottom_rois[2] * spatial_scale; T roi_width = offset_bottom_rois[3] * spatial_scale; T roi_height = offset_bottom_rois[4] * spatial_scale; T theta = offset_bottom_rois[5] * M_PI / 180.0; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. T roi_start_h = -roi_height / 2.0; T roi_start_w = -roi_width / 2.0; T cosTheta = cos(theta); T sinTheta = sin(theta); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // Rotate by theta around the center and translate T x = xx * cosTheta + yy * sinTheta + roi_center_w; T y = yy * cosTheta - xx * sinTheta + roi_center_h; T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignRotatedBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_center_w = offset_bottom_rois[1] * spatial_scale; T roi_center_h = offset_bottom_rois[2] * spatial_scale; T roi_width = offset_bottom_rois[3] * spatial_scale; T roi_height = offset_bottom_rois[4] * spatial_scale; T theta = offset_bottom_rois[5] * M_PI / 180.0; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. T roi_start_h = -roi_height / 2.0; T roi_start_w = -roi_width / 2.0; T cosTheta = cos(theta); T sinTheta = sin(theta); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // Rotate by theta around the center and translate T x = xx * cosTheta + yy * sinTheta + roi_center_w; T y = yy * cosTheta - xx * sinTheta + roi_center_h; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignRotatedBackward at::Tensor ROIAlignRotated_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(hipGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignRotated_forward", [&] { hipLaunchKernelGGL(( RoIAlignRotatedForward<scalar_t>), dim3(grid), dim3(block), 0, stream, output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIAlignRotated_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); dim3 grid(::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(hipGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlignRotated_backward", [&] { hipLaunchKernelGGL(( RoIAlignRotatedBackwardFeature<scalar_t>), dim3(grid), dim3(block), 0, stream, grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(hipGetLastError()); return grad_input; }
5e65a52bc34a0ad2134b482c63812697414e92ce.cu
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCAtomics.cuh> #include <THC/THCDeviceUtils.cuh> // TODO make it in a common file #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) template <typename T> __device__ T bilinear_interpolate(const T* bottom_data, const int height, const int width, T y, T x, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int) y; int x_low = (int) x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // do bilinear interpolation T v1 = bottom_data[y_low * width + x_low]; T v2 = bottom_data[y_low * width + x_high]; T v3 = bottom_data[y_high * width + x_low]; T v4 = bottom_data[y_high * width + x_high]; T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); return val; } template <typename T> __global__ void RoIAlignRotatedForward(const int nthreads, const T* bottom_data, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, const T* bottom_rois, T* top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_center_w = offset_bottom_rois[1] * spatial_scale; T roi_center_h = offset_bottom_rois[2] * spatial_scale; T roi_width = offset_bottom_rois[3] * spatial_scale; T roi_height = offset_bottom_rois[4] * spatial_scale; T theta = offset_bottom_rois[5] * M_PI / 180.0; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); const T* offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. T roi_start_h = -roi_height / 2.0; T roi_start_w = -roi_width / 2.0; T cosTheta = cos(theta); T sinTheta = sin(theta); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // Rotate by theta around the center and translate T x = xx * cosTheta + yy * sinTheta + roi_center_w; T y = yy * cosTheta - xx * sinTheta + roi_center_h; T val = bilinear_interpolate(offset_bottom_data, height, width, y, x, index); output_val += val; } } output_val /= count; top_data[index] = output_val; } } template <typename T> __device__ void bilinear_interpolate_gradient( const int height, const int width, T y, T x, T & w1, T & w2, T & w3, T & w4, int & x_low, int & x_high, int & y_low, int & y_high, const int index /* index for debug only*/) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { //empty w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int) y; x_low = (int) x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (T) y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (T) x_low; } else { x_high = x_low + 1; } T ly = y - y_low; T lx = x - x_low; T hy = 1. - ly, hx = 1. - lx; // reference in forward // T v1 = bottom_data[y_low * width + x_low]; // T v2 = bottom_data[y_low * width + x_high]; // T v3 = bottom_data[y_high * width + x_low]; // T v4 = bottom_data[y_high * width + x_high]; // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename T> __global__ void RoIAlignRotatedBackwardFeature(const int nthreads, const T* top_diff, const int num_rois, const T spatial_scale, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int sampling_ratio, T* bottom_diff, const T* bottom_rois) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the pooled output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const T* offset_bottom_rois = bottom_rois + n * 6; int roi_batch_ind = offset_bottom_rois[0]; // Do not using rounding; this implementation detail is critical T roi_center_w = offset_bottom_rois[1] * spatial_scale; T roi_center_h = offset_bottom_rois[2] * spatial_scale; T roi_width = offset_bottom_rois[3] * spatial_scale; T roi_height = offset_bottom_rois[4] * spatial_scale; T theta = offset_bottom_rois[5] * M_PI / 180.0; // T roi_start_w = round(offset_bottom_rois[1] * spatial_scale); // T roi_start_h = round(offset_bottom_rois[2] * spatial_scale); // T roi_end_w = round(offset_bottom_rois[3] * spatial_scale); // T roi_end_h = round(offset_bottom_rois[4] * spatial_scale); // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); T* offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T* offset_top_diff = top_diff + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y). // Appropriate translation needs to be applied after. T roi_start_h = -roi_height / 2.0; T roi_start_w = -roi_width / 2.0; T cosTheta = cos(theta); T sinTheta = sin(theta); // We do average (integral) pooling inside a bin const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 for (int iy = 0; iy < roi_bin_grid_h; iy ++) // e.g., iy = 0, 1 { const T yy = roi_start_h + ph * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix ++) { const T xx = roi_start_w + pw * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // Rotate by theta around the center and translate T x = xx * cosTheta + yy * sinTheta + roi_center_w; T y = yy * cosTheta - xx * sinTheta + roi_center_h; T w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient(height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high, index); T g1 = top_diff_this_bin * w1 / count; T g2 = top_diff_this_bin * w2 / count; T g3 = top_diff_this_bin * w3 / count; T g4 = top_diff_this_bin * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, static_cast<T>(g1)); atomicAdd(offset_bottom_diff + y_low * width + x_high, static_cast<T>(g2)); atomicAdd(offset_bottom_diff + y_high * width + x_low, static_cast<T>(g3)); atomicAdd(offset_bottom_diff + y_high * width + x_high, static_cast<T>(g4)); } // if } // ix } // iy } // CUDA_1D_KERNEL_LOOP } // RoIAlignRotatedBackward at::Tensor ROIAlignRotated_forward_cuda(const at::Tensor& input, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int sampling_ratio) { AT_ASSERTM(input.type().is_cuda(), "input must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto channels = input.size(1); auto height = input.size(2); auto width = input.size(3); auto output = at::empty({num_rois, channels, pooled_height, pooled_width}, input.options()); auto output_size = num_rois * pooled_height * pooled_width * channels; cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)output_size, 512L), 4096L)); dim3 block(512); if (output.numel() == 0) { THCudaCheck(cudaGetLastError()); return output; } AT_DISPATCH_FLOATING_TYPES(input.type(), "ROIAlignRotated_forward", [&] { RoIAlignRotatedForward<scalar_t><<<grid, block, 0, stream>>>( output_size, input.contiguous().data<scalar_t>(), spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, rois.contiguous().data<scalar_t>(), output.data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return output; } // TODO remove the dependency on input and use instead its sizes -> save memory at::Tensor ROIAlignRotated_backward_cuda(const at::Tensor& grad, const at::Tensor& rois, const float spatial_scale, const int pooled_height, const int pooled_width, const int batch_size, const int channels, const int height, const int width, const int sampling_ratio) { AT_ASSERTM(grad.type().is_cuda(), "grad must be a CUDA tensor"); AT_ASSERTM(rois.type().is_cuda(), "rois must be a CUDA tensor"); auto num_rois = rois.size(0); auto grad_input = at::zeros({batch_size, channels, height, width}, grad.options()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); dim3 grid(std::min(THCCeilDiv((long)grad.numel(), 512L), 4096L)); dim3 block(512); // handle possibly empty gradients if (grad.numel() == 0) { THCudaCheck(cudaGetLastError()); return grad_input; } AT_DISPATCH_FLOATING_TYPES(grad.type(), "ROIAlignRotated_backward", [&] { RoIAlignRotatedBackwardFeature<scalar_t><<<grid, block, 0, stream>>>( grad.numel(), grad.contiguous().data<scalar_t>(), num_rois, spatial_scale, channels, height, width, pooled_height, pooled_width, sampling_ratio, grad_input.data<scalar_t>(), rois.contiguous().data<scalar_t>()); }); THCudaCheck(cudaGetLastError()); return grad_input; }
4ea5ede87bfb1326f122752d78a9493c6ea9fcc7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" /* #ifdef LOCKFREE __device__ inline void lockfree_barrier(volatile uint *counter, int *partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx==0) { // if(bx>0) while(counter[bx-1] != 1) {} if(bx>0) while(counter[bx] != 1) {} } __syncthreads(); if(tx==0) { predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __threadfence(); if(tx==0) { // counter[bx] = 1; counter[bx+1] = 1; } // __syncthreads(); } #endif //*/ ///* #ifdef LOCKFREE __device__ inline void lockfree_barrier(volatile unsigned *counter, int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx==0) { // while(1) { // unsigned flag = counter[bx]; // if(bx==0 || flag!=0) break; // } // end while if(bx>0) while(counter[bx] != 1) {} predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __threadfence(); // __syncthreads(); if(tx==0) { counter[bx+1] = 1; } } #endif //*/ #ifdef ATOMIC __device__ inline void atomic_barrier(unsigned *counter, int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx==0) { unsigned flag = 0; while(1) { flag = atomicAdd(counter+bx, flag); if(bx==0 || flag!=0) { break; } } // end while predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __threadfence(); if(tx==0) { atomicAdd(counter+bx+1, 1); } } #endif #ifdef FREE __device__ inline void free_barrier(int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { __syncthreads(); if(tx==0) { // predicate[0] += predicate[0]; predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __syncthreads(); } #endif #ifdef UNSAFE __device__ inline void unsafe_barrier(volatile int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { __syncthreads(); if(tx==0) { int cc=0; while(1) { cc = partial_sums[bx]; if(bx==0 || cc!=0) { break; } } // printf("bx=%d, cc=%d, current_value=%d\n", bx, cc, current_value); predicate[0] = cc; partial_sums[bx+1] = predicate[0] + current_value; // printf("partial_sums[%d]=%d\n", bx+1, partial_sums[bx+1]); } __syncthreads(); } #endif #ifdef FORWARD //double the size of partial_sums __device__ inline void forward_barrier(volatile int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx<2) { int cc; while(1) { cc = partial_sums[2*bx+tx]; // cc 0: flag 1: value if(tx==1) predicate[0]=cc; //check value if( bx==0|| predicate[0]!=0) { //assume initialized as 0 break; } //if value == initialized //check flag if(tx==0) predicate[0]=cc; if(predicate[0]==1) break; //repeat polling } // end while //got value if(tx==1) predicate[0]=cc; if(tx==0) { //if value is equal to initialized if((predicate[0]+current_value)==0) { //write flag partial_sums[2*bx+2]=1; } else { //write value partial_sums[2*bx+3]=predicate[0]+current_value; } } } } #endif #ifdef FORWARD_ATOMIC //double the size of partial_sums __device__ inline void forward_atomic_barrier(int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx<2) { int cc=0; while(1) { cc = atomicExch(partial_sums+2*bx+tx,cc); // cc 0: flag 1: value if(tx==1) predicate[0]=cc; //check value if( bx==0|| predicate[0]!=0) { //assume initialized as 0 break; } //if value == initialized //check flag if(tx==0) predicate[0]=cc; if(predicate[0]==1) break; //repeat polling } //got value if(tx==1) predicate[0]=cc; if(tx==0) { //if value is equal to initialized if((predicate[0]+current_value)==0) { //write flag // partial_sums[2*bx+2]=1; atomicAdd(partial_sums+2*bx+2,1); } else { //write value // partial_sums[2*bx+3]=predicate[0]+warp_sums[15]; atomicAdd(partial_sums+2*bx+3,predicate[0]+current_value); } } } } #endif
4ea5ede87bfb1326f122752d78a9493c6ea9fcc7.cu
#include "common.h" /* #ifdef LOCKFREE __device__ inline void lockfree_barrier(volatile uint *counter, int *partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx==0) { // if(bx>0) while(counter[bx-1] != 1) {} if(bx>0) while(counter[bx] != 1) {} } __syncthreads(); if(tx==0) { predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __threadfence(); if(tx==0) { // counter[bx] = 1; counter[bx+1] = 1; } // __syncthreads(); } #endif //*/ ///* #ifdef LOCKFREE __device__ inline void lockfree_barrier(volatile unsigned *counter, int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx==0) { // while(1) { // unsigned flag = counter[bx]; // if(bx==0 || flag!=0) break; // } // end while if(bx>0) while(counter[bx] != 1) {} predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __threadfence(); // __syncthreads(); if(tx==0) { counter[bx+1] = 1; } } #endif //*/ #ifdef ATOMIC __device__ inline void atomic_barrier(unsigned *counter, int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx==0) { unsigned flag = 0; while(1) { flag = atomicAdd(counter+bx, flag); if(bx==0 || flag!=0) { break; } } // end while predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __threadfence(); if(tx==0) { atomicAdd(counter+bx+1, 1); } } #endif #ifdef FREE __device__ inline void free_barrier(int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { __syncthreads(); if(tx==0) { // predicate[0] += predicate[0]; predicate[0] = partial_sums[bx]; partial_sums[bx+1] = predicate[0] + current_value; } __syncthreads(); } #endif #ifdef UNSAFE __device__ inline void unsafe_barrier(volatile int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { __syncthreads(); if(tx==0) { int cc=0; while(1) { cc = partial_sums[bx]; if(bx==0 || cc!=0) { break; } } // printf("bx=%d, cc=%d, current_value=%d\n", bx, cc, current_value); predicate[0] = cc; partial_sums[bx+1] = predicate[0] + current_value; // printf("partial_sums[%d]=%d\n", bx+1, partial_sums[bx+1]); } __syncthreads(); } #endif #ifdef FORWARD //double the size of partial_sums __device__ inline void forward_barrier(volatile int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx<2) { int cc; while(1) { cc = partial_sums[2*bx+tx]; // cc 0: flag 1: value if(tx==1) predicate[0]=cc; //check value if( bx==0|| predicate[0]!=0) { //assume initialized as 0 break; } //if value == initialized //check flag if(tx==0) predicate[0]=cc; if(predicate[0]==1) break; //repeat polling } // end while //got value if(tx==1) predicate[0]=cc; if(tx==0) { //if value is equal to initialized if((predicate[0]+current_value)==0) { //write flag partial_sums[2*bx+2]=1; } else { //write value partial_sums[2*bx+3]=predicate[0]+current_value; } } } } #endif #ifdef FORWARD_ATOMIC //double the size of partial_sums __device__ inline void forward_atomic_barrier(int* partial_sums, int *predicate, int current_value, uint bx, uint tx) { if(tx<2) { int cc=0; while(1) { cc = atomicExch(partial_sums+2*bx+tx,cc); // cc 0: flag 1: value if(tx==1) predicate[0]=cc; //check value if( bx==0|| predicate[0]!=0) { //assume initialized as 0 break; } //if value == initialized //check flag if(tx==0) predicate[0]=cc; if(predicate[0]==1) break; //repeat polling } //got value if(tx==1) predicate[0]=cc; if(tx==0) { //if value is equal to initialized if((predicate[0]+current_value)==0) { //write flag // partial_sums[2*bx+2]=1; atomicAdd(partial_sums+2*bx+2,1); } else { //write value // partial_sums[2*bx+3]=predicate[0]+warp_sums[15]; atomicAdd(partial_sums+2*bx+3,predicate[0]+current_value); } } } } #endif
93b9da86f5ff3c352b0902e7608f23a72e240580.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph.hpp> #include <rmm/thrust_rmm_allocator.h> #include <utilities/cuda_utils.cuh> #include <utilities/graph_utils.cuh> //#define TIMING #ifdef TIMING #include <utilities/high_res_timer.hpp> #endif #include <converters/COOtoCSR.cuh> namespace cugraph { namespace detail { namespace { // anonym. constexpr int BLOCK_SIZE_1D = 64; } template <typename vertex_t, typename edge_t, typename weight_t> __global__ // void compute_vertex_sums(vertex_t n_vertex, edge_t const *offsets, weight_t const *weights, weight_t *output) { int src = blockDim.x * blockIdx.x + threadIdx.x; if ((src < n_vertex)) { weight_t sum{0.0}; for (int i = offsets[src]; i < offsets[src + 1]; ++i) { sum += weights[i]; } output[src] = sum; } } template <typename vertex_t, typename edge_t, typename weight_t> weight_t modularity(weight_t total_edge_weight, weight_t resolution, GraphCSRView<vertex_t, edge_t, weight_t> const &graph, vertex_t const *d_cluster, hipStream_t stream) { vertex_t n_verts = graph.number_of_vertices; rmm::device_vector<weight_t> inc(n_verts, weight_t{0.0}); rmm::device_vector<weight_t> deg(n_verts, weight_t{0.0}); edge_t const *d_offsets = graph.offsets; vertex_t const *d_indices = graph.indices; weight_t const *d_weights = graph.edge_data; weight_t *d_inc = inc.data().get(); weight_t *d_deg = deg.data().get(); // FIXME: Already have weighted degree computed in main loop, // could pass that in rather than computing d_deg... which // would save an atomicAdd (synchronization) // thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(graph.number_of_vertices), [d_inc, d_deg, d_offsets, d_indices, d_weights, d_cluster] __device__(vertex_t v) { vertex_t community = d_cluster[v]; weight_t increase{0.0}; weight_t degree{0.0}; for (edge_t loc = d_offsets[v]; loc < d_offsets[v + 1]; ++loc) { vertex_t neighbor = d_indices[loc]; degree += d_weights[loc]; if (d_cluster[neighbor] == community) { increase += d_weights[loc]; } } if (degree > weight_t{0.0}) atomicAdd(d_deg + community, degree); if (increase > weight_t{0.0}) atomicAdd(d_inc + community, increase); }); weight_t Q = thrust::transform_reduce( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(graph.number_of_vertices), [d_deg, d_inc, total_edge_weight, resolution] __device__(vertex_t community) { #ifdef DEBUG printf(" d_inc[%d] = %g, d_deg = %g, return = %g\n", community, d_inc[community], d_deg[community], ((d_inc[community] / total_edge_weight) - resolution * pow(d_deg[community] / total_edge_weight, 2))); #endif return ((d_inc[community] / total_edge_weight) - resolution * (d_deg[community] * d_deg[community]) / (total_edge_weight * total_edge_weight)); }, weight_t{0.0}, thrust::plus<weight_t>()); return Q; } template <typename vertex_t, typename edge_t, typename weight_t> void generate_superverticies_graph(cugraph::GraphCSRView<vertex_t, edge_t, weight_t> &current_graph, rmm::device_vector<vertex_t> &src_indices_v, vertex_t new_number_of_vertices, rmm::device_vector<vertex_t> &cluster_v, hipStream_t stream) { rmm::device_vector<vertex_t> new_src_v(current_graph.number_of_edges); rmm::device_vector<vertex_t> new_dst_v(current_graph.number_of_edges); rmm::device_vector<weight_t> new_weight_v(current_graph.number_of_edges); vertex_t *d_old_src = src_indices_v.data().get(); vertex_t *d_old_dst = current_graph.indices; weight_t *d_old_weight = current_graph.edge_data; vertex_t *d_new_src = new_src_v.data().get(); vertex_t *d_new_dst = new_dst_v.data().get(); vertex_t *d_clusters = cluster_v.data().get(); weight_t *d_new_weight = new_weight_v.data().get(); // // Renumber the COO // thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(current_graph.number_of_edges), [d_old_src, d_old_dst, d_new_src, d_new_dst, d_clusters, d_new_weight, d_old_weight] __device__( edge_t e) { d_new_src[e] = d_clusters[d_old_src[e]]; d_new_dst[e] = d_clusters[d_old_dst[e]]; d_new_weight[e] = d_old_weight[e]; }); thrust::stable_sort_by_key( rmm::exec_policy(stream)->on(stream), d_new_dst, d_new_dst + current_graph.number_of_edges, thrust::make_zip_iterator(thrust::make_tuple(d_new_src, d_new_weight))); thrust::stable_sort_by_key( rmm::exec_policy(stream)->on(stream), d_new_src, d_new_src + current_graph.number_of_edges, thrust::make_zip_iterator(thrust::make_tuple(d_new_dst, d_new_weight))); // // Now we reduce by key to combine the weights of duplicate // edges. // auto start = thrust::make_zip_iterator(thrust::make_tuple(d_new_src, d_new_dst)); auto new_start = thrust::make_zip_iterator(thrust::make_tuple(d_old_src, d_old_dst)); auto new_end = thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), start, start + current_graph.number_of_edges, d_new_weight, new_start, d_old_weight, thrust::equal_to<thrust::tuple<vertex_t, vertex_t>>(), thrust::plus<weight_t>()); current_graph.number_of_edges = thrust::distance(new_start, new_end.first); current_graph.number_of_vertices = new_number_of_vertices; detail::fill_offset(d_old_src, current_graph.offsets, new_number_of_vertices, current_graph.number_of_edges, stream); CHECK_CUDA(stream); src_indices_v.resize(current_graph.number_of_edges); } template <typename vertex_t, typename edge_t, typename weight_t> void compute_vertex_sums(GraphCSRView<vertex_t, edge_t, weight_t> const &graph, rmm::device_vector<weight_t> &sums, hipStream_t stream) { dim3 block_size_1d = dim3((graph.number_of_vertices + BLOCK_SIZE_1D * 4 - 1) / BLOCK_SIZE_1D * 4, 1, 1); dim3 grid_size_1d = dim3(BLOCK_SIZE_1D * 4, 1, 1); hipLaunchKernelGGL(( compute_vertex_sums<vertex_t, edge_t, weight_t>), dim3(block_size_1d), dim3(grid_size_1d), 0, 0, graph.number_of_vertices, graph.offsets, graph.edge_data, sums.data().get()); } template <typename vertex_t> vertex_t renumber_clusters(vertex_t graph_num_vertices, rmm::device_vector<vertex_t> &cluster, rmm::device_vector<vertex_t> &temp_array, rmm::device_vector<vertex_t> &cluster_inverse, vertex_t *cluster_vec, hipStream_t stream) { // // Now we're going to renumber the clusters from 0 to (k-1), where k is the number of // clusters in this level of the dendogram. // thrust::copy(cluster.begin(), cluster.end(), temp_array.begin()); thrust::sort(temp_array.begin(), temp_array.end()); auto tmp_end = thrust::unique(temp_array.begin(), temp_array.end()); vertex_t old_num_clusters = cluster.size(); vertex_t new_num_clusters = thrust::distance(temp_array.begin(), tmp_end); cluster.resize(new_num_clusters); temp_array.resize(new_num_clusters); thrust::fill(cluster_inverse.begin(), cluster_inverse.end(), vertex_t{-1}); vertex_t *d_tmp_array = temp_array.data().get(); vertex_t *d_cluster_inverse = cluster_inverse.data().get(); vertex_t *d_cluster = cluster.data().get(); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(new_num_clusters), [d_tmp_array, d_cluster_inverse] __device__(vertex_t i) { d_cluster_inverse[d_tmp_array[i]] = i; }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(old_num_clusters), [d_cluster, d_cluster_inverse] __device__(vertex_t i) { d_cluster[i] = d_cluster_inverse[d_cluster[i]]; }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(graph_num_vertices), [cluster_vec, d_cluster] __device__(vertex_t i) { cluster_vec[i] = d_cluster[cluster_vec[i]]; }); return new_num_clusters; } template <typename vertex_t, typename edge_t, typename weight_t> void compute_delta_modularity(weight_t total_edge_weight, weight_t resolution, GraphCSRView<vertex_t, edge_t, weight_t> const &graph, rmm::device_vector<vertex_t> const &src_indices_v, rmm::device_vector<weight_t> const &vertex_weights_v, rmm::device_vector<weight_t> const &cluster_weights_v, rmm::device_vector<vertex_t> const &cluster_v, rmm::device_vector<vertex_t> &cluster_hash_v, rmm::device_vector<weight_t> &delta_Q_v, rmm::device_vector<weight_t> &tmp_size_V_v, hipStream_t stream) { vertex_t const *d_src_indices = src_indices_v.data().get(); vertex_t const *d_dst_indices = graph.indices; edge_t const *d_offsets = graph.offsets; weight_t const *d_weights = graph.edge_data; vertex_t const *d_cluster = cluster_v.data().get(); weight_t const *d_vertex_weights = vertex_weights_v.data().get(); weight_t const *d_cluster_weights = cluster_weights_v.data().get(); vertex_t *d_cluster_hash = cluster_hash_v.data().get(); weight_t *d_delta_Q = delta_Q_v.data().get(); weight_t *d_old_cluster_sum = tmp_size_V_v.data().get(); weight_t *d_new_cluster_sum = d_delta_Q; thrust::fill(cluster_hash_v.begin(), cluster_hash_v.end(), vertex_t{-1}); thrust::fill(delta_Q_v.begin(), delta_Q_v.end(), weight_t{0.0}); thrust::fill(tmp_size_V_v.begin(), tmp_size_V_v.end(), weight_t{0.0}); // // For each source vertex, we're going to build a hash // table to the destination cluster ids. We can use // the offsets ranges to define the bounds of the hash // table. // thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [d_src_indices, d_dst_indices, d_cluster, d_offsets, d_cluster_hash, d_new_cluster_sum, d_weights, d_old_cluster_sum] __device__(edge_t loc) { vertex_t src = d_src_indices[loc]; vertex_t dst = d_dst_indices[loc]; if (src != dst) { vertex_t old_cluster = d_cluster[src]; vertex_t new_cluster = d_cluster[dst]; edge_t hash_base = d_offsets[src]; edge_t n_edges = d_offsets[src + 1] - hash_base; int h = (new_cluster % n_edges); edge_t offset = hash_base + h; while (d_cluster_hash[offset] != new_cluster) { if (d_cluster_hash[offset] == -1) { atomicCAS(d_cluster_hash + offset, -1, new_cluster); } else { h = (h + 1) % n_edges; offset = hash_base + h; } } atomicAdd(d_new_cluster_sum + offset, d_weights[loc]); if (old_cluster == new_cluster) atomicAdd(d_old_cluster_sum + src, d_weights[loc]); } }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [total_edge_weight, resolution, d_cluster_hash, d_src_indices, d_cluster, d_vertex_weights, d_delta_Q, d_new_cluster_sum, d_old_cluster_sum, d_cluster_weights] __device__(edge_t loc) { vertex_t new_cluster = d_cluster_hash[loc]; if (new_cluster >= 0) { vertex_t src = d_src_indices[loc]; vertex_t old_cluster = d_cluster[src]; weight_t k_k = d_vertex_weights[src]; weight_t a_old = d_cluster_weights[old_cluster]; weight_t a_new = d_cluster_weights[new_cluster]; // NOTE: d_delta_Q and d_new_cluster_sum are aliases // for same device array to save memory d_delta_Q[loc] = 2 * (((d_new_cluster_sum[loc] - d_old_cluster_sum[src]) / total_edge_weight) - resolution * (a_new * k_k - a_old * k_k + k_k * k_k) / (total_edge_weight * total_edge_weight)); #ifdef DEBUG printf( "src = %d, new cluster = %d, d_delta_Q[%d] = %g, new_cluster_sum = %g, " "old_cluster_sum = %g, a_new = %g, a_old = %g, k_k = %g\n", src, new_cluster, loc, d_delta_Q[loc], d_new_cluster_sum[loc], d_old_cluster_sum[src], a_new, a_old, k_k); #endif } else { d_delta_Q[loc] = weight_t{0.0}; } }); } template <typename vertex_t, typename edge_t, typename weight_t> weight_t update_clustering_by_delta_modularity( weight_t total_edge_weight, weight_t resolution, GraphCSRView<vertex_t, edge_t, weight_t> const &graph, rmm::device_vector<vertex_t> const &src_indices, rmm::device_vector<weight_t> const &vertex_weights, rmm::device_vector<weight_t> &cluster_weights, rmm::device_vector<vertex_t> &cluster, hipStream_t stream) { rmm::device_vector<vertex_t> next_cluster(cluster); rmm::device_vector<weight_t> delta_Q(graph.number_of_edges); rmm::device_vector<vertex_t> cluster_hash(graph.number_of_edges); rmm::device_vector<weight_t> old_cluster_sum(graph.number_of_vertices); vertex_t *d_cluster_hash = cluster_hash.data().get(); vertex_t *d_cluster = cluster.data().get(); weight_t const *d_vertex_weights = vertex_weights.data().get(); weight_t *d_cluster_weights = cluster_weights.data().get(); weight_t *d_delta_Q = delta_Q.data().get(); weight_t new_Q = modularity<vertex_t, edge_t, weight_t>( total_edge_weight, resolution, graph, cluster.data().get(), stream); weight_t cur_Q = new_Q - 1; // To avoid the potential of having two vertices swap clusters // we will only allow vertices to move up (true) or down (false) // during each iteration of the loop bool up_down = true; while (new_Q > (cur_Q + 0.0001)) { cur_Q = new_Q; compute_delta_modularity(total_edge_weight, resolution, graph, src_indices, vertex_weights, cluster_weights, cluster, cluster_hash, delta_Q, old_cluster_sum, stream); rmm::device_vector<vertex_t> temp_vertices(graph.number_of_vertices); rmm::device_vector<vertex_t> temp_cluster(graph.number_of_vertices, vertex_t{-1}); rmm::device_vector<weight_t> temp_delta_Q(graph.number_of_vertices, weight_t{0.0}); auto cluster_reduce_iterator = thrust::make_zip_iterator(thrust::make_tuple(d_cluster_hash, d_delta_Q)); auto output_edge_iterator2 = thrust::make_zip_iterator( thrust::make_tuple(temp_cluster.data().get(), temp_delta_Q.data().get())); auto cluster_reduce_end = thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), src_indices.begin(), src_indices.end(), cluster_reduce_iterator, temp_vertices.data().get(), output_edge_iterator2, thrust::equal_to<vertex_t>(), [] __device__(auto pair1, auto pair2) { if (thrust::get<1>(pair1) > thrust::get<1>(pair2)) return pair1; else return pair2; }); vertex_t final_size = thrust::distance(temp_vertices.data().get(), cluster_reduce_end.first); vertex_t *d_temp_vertices = temp_vertices.data().get(); vertex_t *d_temp_clusters = temp_cluster.data().get(); vertex_t *d_next_cluster = next_cluster.data().get(); weight_t *d_temp_delta_Q = temp_delta_Q.data().get(); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(final_size), [d_temp_delta_Q, up_down, d_next_cluster, d_temp_vertices, d_vertex_weights, d_temp_clusters, d_cluster_weights] __device__(vertex_t id) { if ((d_temp_clusters[id] >= 0) && (d_temp_delta_Q[id] > weight_t{0.0})) { vertex_t new_cluster = d_temp_clusters[id]; vertex_t old_cluster = d_next_cluster[d_temp_vertices[id]]; if ((new_cluster > old_cluster) == up_down) { #ifdef DEBUG printf( "%s moving vertex %d from cluster %d to cluster %d - deltaQ = %g\n", (up_down ? "up" : "down"), d_temp_vertices[id], d_next_cluster[d_temp_vertices[id]], d_temp_clusters[id], d_temp_delta_Q[id]); #endif weight_t src_weight = d_vertex_weights[d_temp_vertices[id]]; d_next_cluster[d_temp_vertices[id]] = d_temp_clusters[id]; atomicAdd(d_cluster_weights + new_cluster, src_weight); atomicAdd(d_cluster_weights + old_cluster, -src_weight); } } }); up_down = !up_down; new_Q = modularity<vertex_t, edge_t, weight_t>( total_edge_weight, resolution, graph, next_cluster.data().get(), stream); if (new_Q > cur_Q) { thrust::copy(next_cluster.begin(), next_cluster.end(), cluster.begin()); } } return cur_Q; } template <typename vertex_t, typename edge_t, typename weight_t> void louvain(GraphCSRView<vertex_t, edge_t, weight_t> const &graph, weight_t *final_modularity, int *num_level, vertex_t *cluster_vec, int max_level, weight_t resolution, hipStream_t stream) { #ifdef TIMING HighResTimer hr_timer; #endif *num_level = 0; // // Vectors to create a copy of the graph // rmm::device_vector<edge_t> offsets_v(graph.offsets, graph.offsets + graph.number_of_vertices + 1); rmm::device_vector<vertex_t> indices_v(graph.indices, graph.indices + graph.number_of_edges); rmm::device_vector<weight_t> weights_v(graph.edge_data, graph.edge_data + graph.number_of_edges); rmm::device_vector<vertex_t> src_indices_v(graph.number_of_edges); // // Weights and clustering across iterations of algorithm // rmm::device_vector<weight_t> vertex_weights_v(graph.number_of_vertices); rmm::device_vector<weight_t> cluster_weights_v(graph.number_of_vertices); rmm::device_vector<vertex_t> cluster_v(graph.number_of_vertices); // // Temporaries used within kernels. Each iteration uses less // of this memory // rmm::device_vector<vertex_t> tmp_arr_v(graph.number_of_vertices); rmm::device_vector<vertex_t> cluster_inverse_v(graph.number_of_vertices); weight_t total_edge_weight = thrust::reduce(rmm::exec_policy(stream)->on(stream), weights_v.begin(), weights_v.end()); weight_t best_modularity = -1; // // Initialize every cluster to reference each vertex to itself // thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end()); thrust::copy(cluster_v.begin(), cluster_v.end(), cluster_vec); // // Our copy of the graph. Each iteration of the outer loop will // shrink this copy of the graph. // GraphCSRView<vertex_t, edge_t, weight_t> current_graph(offsets_v.data().get(), indices_v.data().get(), weights_v.data().get(), graph.number_of_vertices, graph.number_of_edges); current_graph.get_source_indices(src_indices_v.data().get()); while (*num_level < max_level) { // // Sum the weights of all edges departing a vertex. This is // loop invariant, so we'll compute it here. // // Cluster weights are equivalent to vertex weights with this initial // graph // #ifdef TIMING hr_timer.start("init"); #endif cugraph::detail::compute_vertex_sums(current_graph, vertex_weights_v, stream); thrust::copy(vertex_weights_v.begin(), vertex_weights_v.end(), cluster_weights_v.begin()); #ifdef TIMING hr_timer.stop(); hr_timer.start("update_clustering"); #endif weight_t new_Q = update_clustering_by_delta_modularity(total_edge_weight, resolution, current_graph, src_indices_v, vertex_weights_v, cluster_weights_v, cluster_v, stream); #ifdef TIMING hr_timer.stop(); #endif if (new_Q <= best_modularity) { break; } best_modularity = new_Q; #ifdef TIMING hr_timer.start("shrinking graph"); #endif // renumber the clusters to the range 0..(num_clusters-1) vertex_t num_clusters = renumber_clusters( graph.number_of_vertices, cluster_v, tmp_arr_v, cluster_inverse_v, cluster_vec, stream); cluster_weights_v.resize(num_clusters); // shrink our graph to represent the graph of supervertices generate_superverticies_graph(current_graph, src_indices_v, num_clusters, cluster_v, stream); // assign each new vertex to its own cluster thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end()); #ifdef TIMING hr_timer.stop(); #endif (*num_level)++; } #ifdef TIMING hr_timer.display(std::cout); #endif *final_modularity = best_modularity; } template void louvain(GraphCSRView<int32_t, int32_t, float> const &, float *, int *, int32_t *, int, float, hipStream_t); template void louvain(GraphCSRView<int32_t, int32_t, double> const &, double *, int *, int32_t *, int, double, hipStream_t); } // namespace detail } // namespace cugraph
93b9da86f5ff3c352b0902e7608f23a72e240580.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <graph.hpp> #include <rmm/thrust_rmm_allocator.h> #include <utilities/cuda_utils.cuh> #include <utilities/graph_utils.cuh> //#define TIMING #ifdef TIMING #include <utilities/high_res_timer.hpp> #endif #include <converters/COOtoCSR.cuh> namespace cugraph { namespace detail { namespace { // anonym. constexpr int BLOCK_SIZE_1D = 64; } template <typename vertex_t, typename edge_t, typename weight_t> __global__ // void compute_vertex_sums(vertex_t n_vertex, edge_t const *offsets, weight_t const *weights, weight_t *output) { int src = blockDim.x * blockIdx.x + threadIdx.x; if ((src < n_vertex)) { weight_t sum{0.0}; for (int i = offsets[src]; i < offsets[src + 1]; ++i) { sum += weights[i]; } output[src] = sum; } } template <typename vertex_t, typename edge_t, typename weight_t> weight_t modularity(weight_t total_edge_weight, weight_t resolution, GraphCSRView<vertex_t, edge_t, weight_t> const &graph, vertex_t const *d_cluster, cudaStream_t stream) { vertex_t n_verts = graph.number_of_vertices; rmm::device_vector<weight_t> inc(n_verts, weight_t{0.0}); rmm::device_vector<weight_t> deg(n_verts, weight_t{0.0}); edge_t const *d_offsets = graph.offsets; vertex_t const *d_indices = graph.indices; weight_t const *d_weights = graph.edge_data; weight_t *d_inc = inc.data().get(); weight_t *d_deg = deg.data().get(); // FIXME: Already have weighted degree computed in main loop, // could pass that in rather than computing d_deg... which // would save an atomicAdd (synchronization) // thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(graph.number_of_vertices), [d_inc, d_deg, d_offsets, d_indices, d_weights, d_cluster] __device__(vertex_t v) { vertex_t community = d_cluster[v]; weight_t increase{0.0}; weight_t degree{0.0}; for (edge_t loc = d_offsets[v]; loc < d_offsets[v + 1]; ++loc) { vertex_t neighbor = d_indices[loc]; degree += d_weights[loc]; if (d_cluster[neighbor] == community) { increase += d_weights[loc]; } } if (degree > weight_t{0.0}) atomicAdd(d_deg + community, degree); if (increase > weight_t{0.0}) atomicAdd(d_inc + community, increase); }); weight_t Q = thrust::transform_reduce( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(graph.number_of_vertices), [d_deg, d_inc, total_edge_weight, resolution] __device__(vertex_t community) { #ifdef DEBUG printf(" d_inc[%d] = %g, d_deg = %g, return = %g\n", community, d_inc[community], d_deg[community], ((d_inc[community] / total_edge_weight) - resolution * pow(d_deg[community] / total_edge_weight, 2))); #endif return ((d_inc[community] / total_edge_weight) - resolution * (d_deg[community] * d_deg[community]) / (total_edge_weight * total_edge_weight)); }, weight_t{0.0}, thrust::plus<weight_t>()); return Q; } template <typename vertex_t, typename edge_t, typename weight_t> void generate_superverticies_graph(cugraph::GraphCSRView<vertex_t, edge_t, weight_t> &current_graph, rmm::device_vector<vertex_t> &src_indices_v, vertex_t new_number_of_vertices, rmm::device_vector<vertex_t> &cluster_v, cudaStream_t stream) { rmm::device_vector<vertex_t> new_src_v(current_graph.number_of_edges); rmm::device_vector<vertex_t> new_dst_v(current_graph.number_of_edges); rmm::device_vector<weight_t> new_weight_v(current_graph.number_of_edges); vertex_t *d_old_src = src_indices_v.data().get(); vertex_t *d_old_dst = current_graph.indices; weight_t *d_old_weight = current_graph.edge_data; vertex_t *d_new_src = new_src_v.data().get(); vertex_t *d_new_dst = new_dst_v.data().get(); vertex_t *d_clusters = cluster_v.data().get(); weight_t *d_new_weight = new_weight_v.data().get(); // // Renumber the COO // thrust::for_each( rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(current_graph.number_of_edges), [d_old_src, d_old_dst, d_new_src, d_new_dst, d_clusters, d_new_weight, d_old_weight] __device__( edge_t e) { d_new_src[e] = d_clusters[d_old_src[e]]; d_new_dst[e] = d_clusters[d_old_dst[e]]; d_new_weight[e] = d_old_weight[e]; }); thrust::stable_sort_by_key( rmm::exec_policy(stream)->on(stream), d_new_dst, d_new_dst + current_graph.number_of_edges, thrust::make_zip_iterator(thrust::make_tuple(d_new_src, d_new_weight))); thrust::stable_sort_by_key( rmm::exec_policy(stream)->on(stream), d_new_src, d_new_src + current_graph.number_of_edges, thrust::make_zip_iterator(thrust::make_tuple(d_new_dst, d_new_weight))); // // Now we reduce by key to combine the weights of duplicate // edges. // auto start = thrust::make_zip_iterator(thrust::make_tuple(d_new_src, d_new_dst)); auto new_start = thrust::make_zip_iterator(thrust::make_tuple(d_old_src, d_old_dst)); auto new_end = thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), start, start + current_graph.number_of_edges, d_new_weight, new_start, d_old_weight, thrust::equal_to<thrust::tuple<vertex_t, vertex_t>>(), thrust::plus<weight_t>()); current_graph.number_of_edges = thrust::distance(new_start, new_end.first); current_graph.number_of_vertices = new_number_of_vertices; detail::fill_offset(d_old_src, current_graph.offsets, new_number_of_vertices, current_graph.number_of_edges, stream); CHECK_CUDA(stream); src_indices_v.resize(current_graph.number_of_edges); } template <typename vertex_t, typename edge_t, typename weight_t> void compute_vertex_sums(GraphCSRView<vertex_t, edge_t, weight_t> const &graph, rmm::device_vector<weight_t> &sums, cudaStream_t stream) { dim3 block_size_1d = dim3((graph.number_of_vertices + BLOCK_SIZE_1D * 4 - 1) / BLOCK_SIZE_1D * 4, 1, 1); dim3 grid_size_1d = dim3(BLOCK_SIZE_1D * 4, 1, 1); compute_vertex_sums<vertex_t, edge_t, weight_t><<<block_size_1d, grid_size_1d>>>( graph.number_of_vertices, graph.offsets, graph.edge_data, sums.data().get()); } template <typename vertex_t> vertex_t renumber_clusters(vertex_t graph_num_vertices, rmm::device_vector<vertex_t> &cluster, rmm::device_vector<vertex_t> &temp_array, rmm::device_vector<vertex_t> &cluster_inverse, vertex_t *cluster_vec, cudaStream_t stream) { // // Now we're going to renumber the clusters from 0 to (k-1), where k is the number of // clusters in this level of the dendogram. // thrust::copy(cluster.begin(), cluster.end(), temp_array.begin()); thrust::sort(temp_array.begin(), temp_array.end()); auto tmp_end = thrust::unique(temp_array.begin(), temp_array.end()); vertex_t old_num_clusters = cluster.size(); vertex_t new_num_clusters = thrust::distance(temp_array.begin(), tmp_end); cluster.resize(new_num_clusters); temp_array.resize(new_num_clusters); thrust::fill(cluster_inverse.begin(), cluster_inverse.end(), vertex_t{-1}); vertex_t *d_tmp_array = temp_array.data().get(); vertex_t *d_cluster_inverse = cluster_inverse.data().get(); vertex_t *d_cluster = cluster.data().get(); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(new_num_clusters), [d_tmp_array, d_cluster_inverse] __device__(vertex_t i) { d_cluster_inverse[d_tmp_array[i]] = i; }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(old_num_clusters), [d_cluster, d_cluster_inverse] __device__(vertex_t i) { d_cluster[i] = d_cluster_inverse[d_cluster[i]]; }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(graph_num_vertices), [cluster_vec, d_cluster] __device__(vertex_t i) { cluster_vec[i] = d_cluster[cluster_vec[i]]; }); return new_num_clusters; } template <typename vertex_t, typename edge_t, typename weight_t> void compute_delta_modularity(weight_t total_edge_weight, weight_t resolution, GraphCSRView<vertex_t, edge_t, weight_t> const &graph, rmm::device_vector<vertex_t> const &src_indices_v, rmm::device_vector<weight_t> const &vertex_weights_v, rmm::device_vector<weight_t> const &cluster_weights_v, rmm::device_vector<vertex_t> const &cluster_v, rmm::device_vector<vertex_t> &cluster_hash_v, rmm::device_vector<weight_t> &delta_Q_v, rmm::device_vector<weight_t> &tmp_size_V_v, cudaStream_t stream) { vertex_t const *d_src_indices = src_indices_v.data().get(); vertex_t const *d_dst_indices = graph.indices; edge_t const *d_offsets = graph.offsets; weight_t const *d_weights = graph.edge_data; vertex_t const *d_cluster = cluster_v.data().get(); weight_t const *d_vertex_weights = vertex_weights_v.data().get(); weight_t const *d_cluster_weights = cluster_weights_v.data().get(); vertex_t *d_cluster_hash = cluster_hash_v.data().get(); weight_t *d_delta_Q = delta_Q_v.data().get(); weight_t *d_old_cluster_sum = tmp_size_V_v.data().get(); weight_t *d_new_cluster_sum = d_delta_Q; thrust::fill(cluster_hash_v.begin(), cluster_hash_v.end(), vertex_t{-1}); thrust::fill(delta_Q_v.begin(), delta_Q_v.end(), weight_t{0.0}); thrust::fill(tmp_size_V_v.begin(), tmp_size_V_v.end(), weight_t{0.0}); // // For each source vertex, we're going to build a hash // table to the destination cluster ids. We can use // the offsets ranges to define the bounds of the hash // table. // thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [d_src_indices, d_dst_indices, d_cluster, d_offsets, d_cluster_hash, d_new_cluster_sum, d_weights, d_old_cluster_sum] __device__(edge_t loc) { vertex_t src = d_src_indices[loc]; vertex_t dst = d_dst_indices[loc]; if (src != dst) { vertex_t old_cluster = d_cluster[src]; vertex_t new_cluster = d_cluster[dst]; edge_t hash_base = d_offsets[src]; edge_t n_edges = d_offsets[src + 1] - hash_base; int h = (new_cluster % n_edges); edge_t offset = hash_base + h; while (d_cluster_hash[offset] != new_cluster) { if (d_cluster_hash[offset] == -1) { atomicCAS(d_cluster_hash + offset, -1, new_cluster); } else { h = (h + 1) % n_edges; offset = hash_base + h; } } atomicAdd(d_new_cluster_sum + offset, d_weights[loc]); if (old_cluster == new_cluster) atomicAdd(d_old_cluster_sum + src, d_weights[loc]); } }); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<edge_t>(0), thrust::make_counting_iterator<edge_t>(graph.number_of_edges), [total_edge_weight, resolution, d_cluster_hash, d_src_indices, d_cluster, d_vertex_weights, d_delta_Q, d_new_cluster_sum, d_old_cluster_sum, d_cluster_weights] __device__(edge_t loc) { vertex_t new_cluster = d_cluster_hash[loc]; if (new_cluster >= 0) { vertex_t src = d_src_indices[loc]; vertex_t old_cluster = d_cluster[src]; weight_t k_k = d_vertex_weights[src]; weight_t a_old = d_cluster_weights[old_cluster]; weight_t a_new = d_cluster_weights[new_cluster]; // NOTE: d_delta_Q and d_new_cluster_sum are aliases // for same device array to save memory d_delta_Q[loc] = 2 * (((d_new_cluster_sum[loc] - d_old_cluster_sum[src]) / total_edge_weight) - resolution * (a_new * k_k - a_old * k_k + k_k * k_k) / (total_edge_weight * total_edge_weight)); #ifdef DEBUG printf( "src = %d, new cluster = %d, d_delta_Q[%d] = %g, new_cluster_sum = %g, " "old_cluster_sum = %g, a_new = %g, a_old = %g, k_k = %g\n", src, new_cluster, loc, d_delta_Q[loc], d_new_cluster_sum[loc], d_old_cluster_sum[src], a_new, a_old, k_k); #endif } else { d_delta_Q[loc] = weight_t{0.0}; } }); } template <typename vertex_t, typename edge_t, typename weight_t> weight_t update_clustering_by_delta_modularity( weight_t total_edge_weight, weight_t resolution, GraphCSRView<vertex_t, edge_t, weight_t> const &graph, rmm::device_vector<vertex_t> const &src_indices, rmm::device_vector<weight_t> const &vertex_weights, rmm::device_vector<weight_t> &cluster_weights, rmm::device_vector<vertex_t> &cluster, cudaStream_t stream) { rmm::device_vector<vertex_t> next_cluster(cluster); rmm::device_vector<weight_t> delta_Q(graph.number_of_edges); rmm::device_vector<vertex_t> cluster_hash(graph.number_of_edges); rmm::device_vector<weight_t> old_cluster_sum(graph.number_of_vertices); vertex_t *d_cluster_hash = cluster_hash.data().get(); vertex_t *d_cluster = cluster.data().get(); weight_t const *d_vertex_weights = vertex_weights.data().get(); weight_t *d_cluster_weights = cluster_weights.data().get(); weight_t *d_delta_Q = delta_Q.data().get(); weight_t new_Q = modularity<vertex_t, edge_t, weight_t>( total_edge_weight, resolution, graph, cluster.data().get(), stream); weight_t cur_Q = new_Q - 1; // To avoid the potential of having two vertices swap clusters // we will only allow vertices to move up (true) or down (false) // during each iteration of the loop bool up_down = true; while (new_Q > (cur_Q + 0.0001)) { cur_Q = new_Q; compute_delta_modularity(total_edge_weight, resolution, graph, src_indices, vertex_weights, cluster_weights, cluster, cluster_hash, delta_Q, old_cluster_sum, stream); rmm::device_vector<vertex_t> temp_vertices(graph.number_of_vertices); rmm::device_vector<vertex_t> temp_cluster(graph.number_of_vertices, vertex_t{-1}); rmm::device_vector<weight_t> temp_delta_Q(graph.number_of_vertices, weight_t{0.0}); auto cluster_reduce_iterator = thrust::make_zip_iterator(thrust::make_tuple(d_cluster_hash, d_delta_Q)); auto output_edge_iterator2 = thrust::make_zip_iterator( thrust::make_tuple(temp_cluster.data().get(), temp_delta_Q.data().get())); auto cluster_reduce_end = thrust::reduce_by_key(rmm::exec_policy(stream)->on(stream), src_indices.begin(), src_indices.end(), cluster_reduce_iterator, temp_vertices.data().get(), output_edge_iterator2, thrust::equal_to<vertex_t>(), [] __device__(auto pair1, auto pair2) { if (thrust::get<1>(pair1) > thrust::get<1>(pair2)) return pair1; else return pair2; }); vertex_t final_size = thrust::distance(temp_vertices.data().get(), cluster_reduce_end.first); vertex_t *d_temp_vertices = temp_vertices.data().get(); vertex_t *d_temp_clusters = temp_cluster.data().get(); vertex_t *d_next_cluster = next_cluster.data().get(); weight_t *d_temp_delta_Q = temp_delta_Q.data().get(); thrust::for_each(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator<vertex_t>(0), thrust::make_counting_iterator<vertex_t>(final_size), [d_temp_delta_Q, up_down, d_next_cluster, d_temp_vertices, d_vertex_weights, d_temp_clusters, d_cluster_weights] __device__(vertex_t id) { if ((d_temp_clusters[id] >= 0) && (d_temp_delta_Q[id] > weight_t{0.0})) { vertex_t new_cluster = d_temp_clusters[id]; vertex_t old_cluster = d_next_cluster[d_temp_vertices[id]]; if ((new_cluster > old_cluster) == up_down) { #ifdef DEBUG printf( "%s moving vertex %d from cluster %d to cluster %d - deltaQ = %g\n", (up_down ? "up" : "down"), d_temp_vertices[id], d_next_cluster[d_temp_vertices[id]], d_temp_clusters[id], d_temp_delta_Q[id]); #endif weight_t src_weight = d_vertex_weights[d_temp_vertices[id]]; d_next_cluster[d_temp_vertices[id]] = d_temp_clusters[id]; atomicAdd(d_cluster_weights + new_cluster, src_weight); atomicAdd(d_cluster_weights + old_cluster, -src_weight); } } }); up_down = !up_down; new_Q = modularity<vertex_t, edge_t, weight_t>( total_edge_weight, resolution, graph, next_cluster.data().get(), stream); if (new_Q > cur_Q) { thrust::copy(next_cluster.begin(), next_cluster.end(), cluster.begin()); } } return cur_Q; } template <typename vertex_t, typename edge_t, typename weight_t> void louvain(GraphCSRView<vertex_t, edge_t, weight_t> const &graph, weight_t *final_modularity, int *num_level, vertex_t *cluster_vec, int max_level, weight_t resolution, cudaStream_t stream) { #ifdef TIMING HighResTimer hr_timer; #endif *num_level = 0; // // Vectors to create a copy of the graph // rmm::device_vector<edge_t> offsets_v(graph.offsets, graph.offsets + graph.number_of_vertices + 1); rmm::device_vector<vertex_t> indices_v(graph.indices, graph.indices + graph.number_of_edges); rmm::device_vector<weight_t> weights_v(graph.edge_data, graph.edge_data + graph.number_of_edges); rmm::device_vector<vertex_t> src_indices_v(graph.number_of_edges); // // Weights and clustering across iterations of algorithm // rmm::device_vector<weight_t> vertex_weights_v(graph.number_of_vertices); rmm::device_vector<weight_t> cluster_weights_v(graph.number_of_vertices); rmm::device_vector<vertex_t> cluster_v(graph.number_of_vertices); // // Temporaries used within kernels. Each iteration uses less // of this memory // rmm::device_vector<vertex_t> tmp_arr_v(graph.number_of_vertices); rmm::device_vector<vertex_t> cluster_inverse_v(graph.number_of_vertices); weight_t total_edge_weight = thrust::reduce(rmm::exec_policy(stream)->on(stream), weights_v.begin(), weights_v.end()); weight_t best_modularity = -1; // // Initialize every cluster to reference each vertex to itself // thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end()); thrust::copy(cluster_v.begin(), cluster_v.end(), cluster_vec); // // Our copy of the graph. Each iteration of the outer loop will // shrink this copy of the graph. // GraphCSRView<vertex_t, edge_t, weight_t> current_graph(offsets_v.data().get(), indices_v.data().get(), weights_v.data().get(), graph.number_of_vertices, graph.number_of_edges); current_graph.get_source_indices(src_indices_v.data().get()); while (*num_level < max_level) { // // Sum the weights of all edges departing a vertex. This is // loop invariant, so we'll compute it here. // // Cluster weights are equivalent to vertex weights with this initial // graph // #ifdef TIMING hr_timer.start("init"); #endif cugraph::detail::compute_vertex_sums(current_graph, vertex_weights_v, stream); thrust::copy(vertex_weights_v.begin(), vertex_weights_v.end(), cluster_weights_v.begin()); #ifdef TIMING hr_timer.stop(); hr_timer.start("update_clustering"); #endif weight_t new_Q = update_clustering_by_delta_modularity(total_edge_weight, resolution, current_graph, src_indices_v, vertex_weights_v, cluster_weights_v, cluster_v, stream); #ifdef TIMING hr_timer.stop(); #endif if (new_Q <= best_modularity) { break; } best_modularity = new_Q; #ifdef TIMING hr_timer.start("shrinking graph"); #endif // renumber the clusters to the range 0..(num_clusters-1) vertex_t num_clusters = renumber_clusters( graph.number_of_vertices, cluster_v, tmp_arr_v, cluster_inverse_v, cluster_vec, stream); cluster_weights_v.resize(num_clusters); // shrink our graph to represent the graph of supervertices generate_superverticies_graph(current_graph, src_indices_v, num_clusters, cluster_v, stream); // assign each new vertex to its own cluster thrust::sequence(rmm::exec_policy(stream)->on(stream), cluster_v.begin(), cluster_v.end()); #ifdef TIMING hr_timer.stop(); #endif (*num_level)++; } #ifdef TIMING hr_timer.display(std::cout); #endif *final_modularity = best_modularity; } template void louvain(GraphCSRView<int32_t, int32_t, float> const &, float *, int *, int32_t *, int, float, cudaStream_t); template void louvain(GraphCSRView<int32_t, int32_t, double> const &, double *, int *, int32_t *, int, double, cudaStream_t); } // namespace detail } // namespace cugraph
8059cf3a24929d0d886dc3592ef193047585228e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #define PRECISION 0.00001 #define TAM_BLOCO 8 #define uN 5.0 #define uS 5.0 #define uW 0.0 #define uE 10.0 #define GPU_ZERO 0 #define GPU_ONE 1 //Variveis CPU double h_h1, h_h2; double h_denominador1, h_denominador2; double *h0_m, *d0_m, *h1_m, *d1_m; double h_parcial1, h_parcial2; int h_dimensaoX, h_dimensaoY, laps = 0, i; //Variveis GPU __constant__ double omega = 1.5; __constant__ double d_h1, d_h2; __constant__ double d_denominador1, d_denominador2; __constant__ int d_dimensaoX, d_dimensaoY; __constant__ double d_parcial1, d_parcial2; FILE *arquivo; clock_t start, end; double tempo; //Funes da CPU //Funcao que imprime a matriz no arquivo de saida void printMat(){ int i, j; for(i = 0; i < h_dimensaoX; i++){ for(j = 0; j < h_dimensaoY; j++){ if(j < h_dimensaoY/2){ fprintf(arquivo, "%lf", h0_m[i * h_dimensaoY + j]); if(j != h_dimensaoY - 1) fprintf(arquivo, " "); }else{ fprintf(arquivo, "%lf", h1_m[i * h_dimensaoY + j]); if(j != h_dimensaoY - 1) fprintf(arquivo, " "); } } if(i != h_dimensaoX - 1) fprintf(arquivo, "\n"); } } //Funcao que inicializa a matriz com os valores de contorno especificados pelo problema void setupM(){ int i,j; for(i = 0; i < h_dimensaoX; i++){ for(j = 0; j < h_dimensaoY; j++){ if(i == 0){ h1_m[i * h_dimensaoY + j] = uN; }else if(i == (h_dimensaoX - 1)){ h1_m[i * h_dimensaoY + j] = uS; }else if(j == 0){ h1_m[i * h_dimensaoY + j] = uW; }else if(j == h_dimensaoY - 1){ h1_m[i * h_dimensaoY + j] = uE; } } } } //Funes da GPU //Funcoes "a" e "b" especificada pelo problema __device__ double a(int i, int j){ double x = i * d_h1; double y = j * d_h2; return 500 * x * (1 - x) * (0.5 - y); } __device__ double b(int i, int j){ double x = i * d_h1; double y = j * d_h2; return 500 * y * (1 - y) * (x - 0.5); } //Funcoes "n", "s", "w", "e" especificadas pelo problema __device__ double n(int i, int j){ return (d_parcial2 - (d_h2 * b(i,j))/d_denominador2); } __device__ double s(int i, int j){ return (d_parcial2 + (d_h2 * b(i,j))/d_denominador2); } __device__ double e(int i, int j){ return (d_parcial1 - (d_h1 * a(i,j))/d_denominador1); } __device__ double w(int i, int j){ return (d_parcial1 + (d_h1 * a(i,j))/d_denominador1); } //Funcao que faz a media ponderada dos valores vizinhos ao ponto que est sendo atualizado __device__ double somaDosPontosVizinhos(int i, int j, double *m){ double temp = 0; temp += w(i,j) * m[(i - 1) * d_dimensaoY + j]; temp += e(i,j) * m[(i + 1) * d_dimensaoY + j]; temp += s(i,j) * m[i * d_dimensaoY + (j - 1)]; temp += n(i,j) * m[i * d_dimensaoY + (j + 1)]; return temp; } //Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz //fazendo uma media ponderada entre o valor atual do ponto que est sendo analisado e //seus quatro pontos adjacentes. O quanto cada valor vai pesar determinado pelo mega //da funcao que, nesse caso, fixo __global__ void vermelhos(double *m, int device){ int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; if(device == 0){ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < (d_dimensaoY - 1)/2){ if((tidx + tidy) % 2 == 0){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } }else{ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy >= (d_dimensaoY - 1)/2 && tidy < d_dimensaoY - 1){ if((tidx + tidy) % 2 == 0){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } } } __global__ void azuis(double *m, int device){ int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; if(device == 0){ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < (d_dimensaoY - 1)/2){ if((tidx + tidy) % 2 == 1){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } }else{ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy >= (d_dimensaoY - 1)/2 && tidy < d_dimensaoY - 1){ if((tidx + tidy) % 2 == 1){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } } } int main(int argc, char** argv){ //Especificacoes iniciais para garantir que o programa ser rodado com as //condicoes iniciais corretas if(argc != 4){ printf("Nmero incorreto de parmetros:\n"); printf("Insira as dimensoes e a quantidade de iteraes\n"); printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iteraes>\n", argv[0]); exit(-1); } //Inicializando todos os valores necessrios para transferir para a GPU e para realizar //os calculos do programa h_dimensaoX = atoi(argv[1]); h_dimensaoY = atoi(argv[2]); laps = atoi(argv[3]); h_h1 = 1.0/(h_dimensaoX + 1); h_h2 = 1.0/(h_dimensaoY + 1); h_dimensaoX += 2; h_dimensaoY += 2; h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2))); h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2))); h_parcial1 = 2/h_denominador1; h_parcial2 = 2/h_denominador2; //Alocando a matriz na CPU e inicializando h0_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double)); h1_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double)); setupM(); //Escolhendo a GPU 0 para transferir dados hipSetDevice(GPU_ZERO); //Alocando a matriz na GPU 0 hipMalloc(&d0_m, h_dimensaoX * h_dimensaoY * sizeof(double)); //Transferindo as informaes necessrias para a GPU 0 hipMemcpy(d0_m, h1_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyHostToDevice); hipMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, hipMemcpyHostToDevice); //Escolhendo a GPU 1 para transferir dados hipSetDevice(GPU_ONE); //Alocando a matriz na GPU 1 hipMalloc(&d1_m, h_dimensaoX * h_dimensaoY * sizeof(double)); //Transferindo as informaes necessrias para a GPU 1 hipMemcpy(d1_m, h1_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyHostToDevice); hipMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, hipMemcpyHostToDevice); //Iniciando a contagem do tempo start = clock(); //Calculando a quantidade de blocos e threads que serao lancados dim3 nthreads(TAM_BLOCO,TAM_BLOCO); dim3 nblocos((h_dimensaoX + nthreads.x - 1)/nthreads.x, (h_dimensaoY + nthreads.y - 1)/nthreads.y); //Fazendo os clculos for(i = 0; i < laps; i++){ hipSetDevice(GPU_ZERO); hipLaunchKernelGGL(( vermelhos), dim3(nblocos), dim3(nthreads), 0, 0, d0_m, GPU_ZERO); hipLaunchKernelGGL(( azuis), dim3(nblocos), dim3(nthreads), 0, 0, d0_m, GPU_ZERO); //gpuErrchk( hipPeekAtLastError() ); hipSetDevice(GPU_ONE); hipLaunchKernelGGL(( vermelhos), dim3(nblocos), dim3(nthreads), 0, 0, d1_m, GPU_ONE); hipLaunchKernelGGL(( azuis), dim3(nblocos), dim3(nthreads), 0, 0, d1_m, GPU_ONE); } hipSetDevice(GPU_ZERO); //Trazendo a matriz de volta para a CPU hipMemcpy(h0_m, d0_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyDeviceToHost); //Reseta a GPU para liberar todos os recursos hipDeviceReset(); hipSetDevice(GPU_ONE); //Trazendo a matriz de volta para a CPU hipMemcpy(h1_m, d1_m, h_dimensaoX * h_dimensaoY * sizeof(double), hipMemcpyDeviceToHost); //Reseta a GPU para liberar todos os recursos hipDeviceReset(); //Imprimindo a matriz no arquivo e fechando-o arquivo = fopen("sample.txt", "w"); printMat(); fclose(arquivo); //Termina de calcular o tempo que demorou o programa end = clock(); tempo = ((double) (end - start))/CLOCKS_PER_SEC; printf("Tempo total: %lfs...\n", tempo); return 0; }
8059cf3a24929d0d886dc3592ef193047585228e.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #define PRECISION 0.00001 #define TAM_BLOCO 8 #define uN 5.0 #define uS 5.0 #define uW 0.0 #define uE 10.0 #define GPU_ZERO 0 #define GPU_ONE 1 //Variáveis CPU double h_h1, h_h2; double h_denominador1, h_denominador2; double *h0_m, *d0_m, *h1_m, *d1_m; double h_parcial1, h_parcial2; int h_dimensaoX, h_dimensaoY, laps = 0, i; //Variáveis GPU __constant__ double omega = 1.5; __constant__ double d_h1, d_h2; __constant__ double d_denominador1, d_denominador2; __constant__ int d_dimensaoX, d_dimensaoY; __constant__ double d_parcial1, d_parcial2; FILE *arquivo; clock_t start, end; double tempo; //Funções da CPU //Funcao que imprime a matriz no arquivo de saida void printMat(){ int i, j; for(i = 0; i < h_dimensaoX; i++){ for(j = 0; j < h_dimensaoY; j++){ if(j < h_dimensaoY/2){ fprintf(arquivo, "%lf", h0_m[i * h_dimensaoY + j]); if(j != h_dimensaoY - 1) fprintf(arquivo, " "); }else{ fprintf(arquivo, "%lf", h1_m[i * h_dimensaoY + j]); if(j != h_dimensaoY - 1) fprintf(arquivo, " "); } } if(i != h_dimensaoX - 1) fprintf(arquivo, "\n"); } } //Funcao que inicializa a matriz com os valores de contorno especificados pelo problema void setupM(){ int i,j; for(i = 0; i < h_dimensaoX; i++){ for(j = 0; j < h_dimensaoY; j++){ if(i == 0){ h1_m[i * h_dimensaoY + j] = uN; }else if(i == (h_dimensaoX - 1)){ h1_m[i * h_dimensaoY + j] = uS; }else if(j == 0){ h1_m[i * h_dimensaoY + j] = uW; }else if(j == h_dimensaoY - 1){ h1_m[i * h_dimensaoY + j] = uE; } } } } //Funções da GPU //Funcoes "a" e "b" especificada pelo problema __device__ double a(int i, int j){ double x = i * d_h1; double y = j * d_h2; return 500 * x * (1 - x) * (0.5 - y); } __device__ double b(int i, int j){ double x = i * d_h1; double y = j * d_h2; return 500 * y * (1 - y) * (x - 0.5); } //Funcoes "n", "s", "w", "e" especificadas pelo problema __device__ double n(int i, int j){ return (d_parcial2 - (d_h2 * b(i,j))/d_denominador2); } __device__ double s(int i, int j){ return (d_parcial2 + (d_h2 * b(i,j))/d_denominador2); } __device__ double e(int i, int j){ return (d_parcial1 - (d_h1 * a(i,j))/d_denominador1); } __device__ double w(int i, int j){ return (d_parcial1 + (d_h1 * a(i,j))/d_denominador1); } //Funcao que faz a media ponderada dos valores vizinhos ao ponto que está sendo atualizado __device__ double somaDosPontosVizinhos(int i, int j, double *m){ double temp = 0; temp += w(i,j) * m[(i - 1) * d_dimensaoY + j]; temp += e(i,j) * m[(i + 1) * d_dimensaoY + j]; temp += s(i,j) * m[i * d_dimensaoY + (j - 1)]; temp += n(i,j) * m[i * d_dimensaoY + (j + 1)]; return temp; } //Kernels principais do programa. Cada um trabalho em um conjunto de pontos da matriz //fazendo uma media ponderada entre o valor atual do ponto que está sendo analisado e //seus quatro pontos adjacentes. O quanto cada valor vai pesar é determinado pelo ômega //da funcao que, nesse caso, é fixo __global__ void vermelhos(double *m, int device){ int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; if(device == 0){ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < (d_dimensaoY - 1)/2){ if((tidx + tidy) % 2 == 0){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } }else{ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy >= (d_dimensaoY - 1)/2 && tidy < d_dimensaoY - 1){ if((tidx + tidy) % 2 == 0){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } } } __global__ void azuis(double *m, int device){ int tidx = blockIdx.x * blockDim.x + threadIdx.x; int tidy = blockIdx.y * blockDim.y + threadIdx.y; if(device == 0){ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy != 0 && tidy < (d_dimensaoY - 1)/2){ if((tidx + tidy) % 2 == 1){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } }else{ if(tidx != 0 && tidx < d_dimensaoX - 1 && tidy >= (d_dimensaoY - 1)/2 && tidy < d_dimensaoY - 1){ if((tidx + tidy) % 2 == 1){ m[tidx * d_dimensaoY + tidy] *= (1 - omega); m[tidx * d_dimensaoY + tidy] += omega * somaDosPontosVizinhos(tidx, tidy, m); } } } } int main(int argc, char** argv){ //Especificacoes iniciais para garantir que o programa será rodado com as //condicoes iniciais corretas if(argc != 4){ printf("Número incorreto de parâmetros:\n"); printf("Insira as dimensoes e a quantidade de iterações\n"); printf("\tUtilize o formato: %s <Dimensao X> <Dimensao Y> <Iterações>\n", argv[0]); exit(-1); } //Inicializando todos os valores necessários para transferir para a GPU e para realizar //os calculos do programa h_dimensaoX = atoi(argv[1]); h_dimensaoY = atoi(argv[2]); laps = atoi(argv[3]); h_h1 = 1.0/(h_dimensaoX + 1); h_h2 = 1.0/(h_dimensaoY + 1); h_dimensaoX += 2; h_dimensaoY += 2; h_denominador1 = 4*(1 + (pow(h_h1,2)/pow(h_h2,2))); h_denominador2 = 4*(1 + (pow(h_h2,2)/pow(h_h1,2))); h_parcial1 = 2/h_denominador1; h_parcial2 = 2/h_denominador2; //Alocando a matriz na CPU e inicializando h0_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double)); h1_m = (double *) calloc(h_dimensaoX * h_dimensaoY, sizeof(double)); setupM(); //Escolhendo a GPU 0 para transferir dados cudaSetDevice(GPU_ZERO); //Alocando a matriz na GPU 0 cudaMalloc(&d0_m, h_dimensaoX * h_dimensaoY * sizeof(double)); //Transferindo as informações necessárias para a GPU 0 cudaMemcpy(d0_m, h1_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, cudaMemcpyHostToDevice); //Escolhendo a GPU 1 para transferir dados cudaSetDevice(GPU_ONE); //Alocando a matriz na GPU 1 cudaMalloc(&d1_m, h_dimensaoX * h_dimensaoY * sizeof(double)); //Transferindo as informações necessárias para a GPU 1 cudaMemcpy(d1_m, h1_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_denominador1, &h_denominador1, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_denominador2, &h_denominador2, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_dimensaoX, &h_dimensaoX, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_dimensaoY, &h_dimensaoY, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_h1, &h_h1, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_h2, &h_h2, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_parcial1, &h_parcial1, sizeof(double), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_parcial2, &h_parcial2, sizeof(double), 0, cudaMemcpyHostToDevice); //Iniciando a contagem do tempo start = clock(); //Calculando a quantidade de blocos e threads que serao lancados dim3 nthreads(TAM_BLOCO,TAM_BLOCO); dim3 nblocos((h_dimensaoX + nthreads.x - 1)/nthreads.x, (h_dimensaoY + nthreads.y - 1)/nthreads.y); //Fazendo os cálculos for(i = 0; i < laps; i++){ cudaSetDevice(GPU_ZERO); vermelhos<<<nblocos, nthreads>>>(d0_m, GPU_ZERO); azuis<<<nblocos, nthreads>>>(d0_m, GPU_ZERO); //gpuErrchk( cudaPeekAtLastError() ); cudaSetDevice(GPU_ONE); vermelhos<<<nblocos, nthreads>>>(d1_m, GPU_ONE); azuis<<<nblocos, nthreads>>>(d1_m, GPU_ONE); } cudaSetDevice(GPU_ZERO); //Trazendo a matriz de volta para a CPU cudaMemcpy(h0_m, d0_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyDeviceToHost); //Reseta a GPU para liberar todos os recursos cudaDeviceReset(); cudaSetDevice(GPU_ONE); //Trazendo a matriz de volta para a CPU cudaMemcpy(h1_m, d1_m, h_dimensaoX * h_dimensaoY * sizeof(double), cudaMemcpyDeviceToHost); //Reseta a GPU para liberar todos os recursos cudaDeviceReset(); //Imprimindo a matriz no arquivo e fechando-o arquivo = fopen("sample.txt", "w"); printMat(); fclose(arquivo); //Termina de calcular o tempo que demorou o programa end = clock(); tempo = ((double) (end - start))/CLOCKS_PER_SEC; printf("Tempo total: %lfs...\n", tempo); return 0; }
95f16e470a144d9c2bf91175b982904f19c22ac2.hip
// !!! This is a file automatically generated by hipify!!! /* * CLF.cu * * Created on: Nov 8, 2017 * Author: zhanghui */ #include "CLF.cuh" #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/Coodinate.cuh" #include "./../model/Interval.cuh" #include "./../model/Priority.cuh" #include "./../model/FullCoveredInfo.cuh" #include "./../model/Classification.cuh" #include "./../model/Limit.h" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "./../ErrorHandle/ErrorHandle.cuh" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> #include <iostream> #include <climits> using namespace std; /* * case0 * * > >= * 0 abs(y1-y2)<=Min && y1 >=0 && y2>=0 * abs(y1-y2)>Min && y1>=0 && y2>=0 * case1: abs(y1-y2)<=Min && y1 <0 && y2<0 * case2: abs(y1-y2)>Min && z>x1 && z<x2 && y1>0 (x1,z) * case3: abs(y1-y2)>Min && z>x1 && z<x2 && y2>0 (z,x2) * case4: abs(y1-y2)>Min && z<x1 && y1 <0 && y2<0 (xbefore,d) * case5: abs(y1-y2)>Min && z>x2 && y1 <0 && y2<0 (d,xafter) * * */ //case: __device__ bool case0(Coodinate* a,Coodinate* b,const int cmpType) { bool a1 = (cmpType==ConstantValue::Equal); bool a2 = (cmpType==ConstantValue::NotEqual); bool a3 = (cmpType==ConstantValue::GreatOrGreatEqual) && (a->y >=0) && (b->y >=0); return a1 || a2 || a3; } // __device__ bool case1(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)<=Limit::FloatMin) && ((a->y <=0) || (a->y <=-0.0)) && ((b->y <=0) || (b->y <=-0.0)); } // __device__ bool case2(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y > 0) && (b->y < 0); } // __device__ bool case3(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y < 0) && (b->y > 0); } // __device__ bool case4(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y > b->y);; } // __device__ bool case5(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y < b->y); }
95f16e470a144d9c2bf91175b982904f19c22ac2.cu
/* * CLF.cu * * Created on: Nov 8, 2017 * Author: zhanghui */ #include "CLF.cuh" #include "./../ConstraintParser/ConstraintParameter.cuh" #include "./../model/Coodinate.cuh" #include "./../model/Interval.cuh" #include "./../model/Priority.cuh" #include "./../model/FullCoveredInfo.cuh" #include "./../model/Classification.cuh" #include "./../model/Limit.h" #include "./../solver/ATG.h" #include "./../solver/PCATG.h" #include "./../solver/ConstantValue.h" #include "./../ErrorHandle/ErrorHandle.cuh" #include "ExcuteConstraint.cuh" #include "HardwareStrategy.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.hpp" #include "math_functions.h" #include <stdio.h> #include <iostream> #include <climits> using namespace std; /* * case:0 * 等号和不等号 全区间 * 判断是否是> 和 >=比较运算符号的处理 * 斜率为0: abs(y1-y2)<=Min && y1 >=0 && y2>=0 全区间 * 斜率存在: abs(y1-y2)>Min && y1>=0 && y2>=0 全区间 * case1: abs(y1-y2)<=Min && y1 <0 && y2<0 无解 * case2: abs(y1-y2)>Min && z>x1 && z<x2 && y1>0 (x1,z) * case3: abs(y1-y2)>Min && z>x1 && z<x2 && y2>0 (z,x2) * case4: abs(y1-y2)>Min && z<x1 && y1 <0 && y2<0 无解(xbefore,d) * case5: abs(y1-y2)>Min && z>x2 && y1 <0 && y2<0 无解(d,xafter) * 下面是所有的分类的变量的设置 * */ //全区间case:等式、不等式、全在上方 __device__ bool case0(Coodinate* a,Coodinate* b,const int cmpType) { bool a1 = (cmpType==ConstantValue::Equal); bool a2 = (cmpType==ConstantValue::NotEqual); bool a3 = (cmpType==ConstantValue::GreatOrGreatEqual) && (a->y >=0) && (b->y >=0); return a1 || a2 || a3; } //斜率不存在,导致无解 __device__ bool case1(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)<=Limit::FloatMin) && ((a->y <=0) || (a->y <=-0.0)) && ((b->y <=0) || (b->y <=-0.0)); } //零点落在中间,左边部分解 __device__ bool case2(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y > 0) && (b->y < 0); } //零点落在中间,右边部分解 __device__ bool case3(Coodinate* a,Coodinate* b,const int cmpType) { return (cmpType==ConstantValue::GreatOrGreatEqual) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y < 0) && (b->y > 0); } //无解,零点落在左边 __device__ bool case4(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y > b->y);; } //无解,零点落在右边 __device__ bool case5(Coodinate* a,Coodinate* b,const int cmpType) { return ( cmpType==ConstantValue::GreatOrGreatEqual || cmpType==ConstantValue::Equal ) && (abs(a->y - b->y)>Limit::FloatMin) && (a->y <=0) && (b->y <=0) && (a->y < b->y); }
e297723a185b8d583da18364dc9c9d4bb0d22d4a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #define STB_IMAGE_IMPLEMENTATION #include "../../stb/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "../../stb/stb_image_write.h" #include <omp.h> #include <sys/time.h> #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 #define THREAD_COUNT 16 #define WINDOW 6 #define NUM_ITER 64 #define PI 3.14159265358979323846 __global__ void conv2(float *A, float *B,uint8_t *C, int height, int width, int window){ int row = threadIdx.x + blockIdx.x*blockDim.x; int col = threadIdx.y + blockIdx.y*blockDim.y; int i = row + col*width; float sum = 0; if(i<height*width){ for(int p = -window; p <= window; ++p){ //loop over window for(int q = -window; q <= window; ++q){ int pixel = i + p*width + q; if((pixel < 0) | (pixel >= height*width)) //if outside of vertical bound, continue continue; int temp = i % width; if((temp + q < 0) | (temp + q >= width)) //if outside of horiz bound, continue continue; sum += A[pixel]*B[(p + window)*(2*window + 1)+(q + window)]; } } C[i] = (uint8_t)((int)sum); } } int main(int argc, char** argv) { //Initiialize variables for image int width, height, bpp; //initialize timing variables for openmp timing struct timeval start, end; //load in image uint8_t* img = stbi_load("../../images/cat.jpg", &width, &height, &bpp, 3); if(img == NULL) { printf("Error in loading the image\n"); exit(1); } //convert to grayscale and free image float* grayimg = new float[width*height]; for(int i = 0; i < width*height; ++i) grayimg[i] = (.3*(int)img[3*i] + .59*(int)img[3*i+1]+.11*(int)img[3*i+2]); stbi_image_free(img); //initialize device copy of grayscale image, allocate it, and copy to device float *d_gray; hipMalloc((void**)&d_gray,height*width*sizeof(float)); hipMemcpy(d_gray,grayimg,height*width*sizeof(float),hipMemcpyHostToDevice); //initialize image to store result, initialize variable for device, and allocate space in device uint8_t* finalimg = new uint8_t[width*height]; uint8_t* d_finalimg; hipMalloc((void**)&d_finalimg,width*height*sizeof(uint8_t)); //Initialize and define scale-space kernel int winsize = (2*WINDOW+1)*(2*WINDOW+1); float* scalekernel = new float[winsize]; float t; //initialize device copy of scale-space kernel, allocate space, and copy to device float *d_scalekernel; hipMalloc((void**)&d_scalekernel,winsize*sizeof(float)); //start timer call omp, start loop gettimeofday(&start,NULL); #pragma omp parallel for num_threads(THREAD_COUNT) for(int iter = 1; iter <= NUM_ITER; ++iter){ t = iter*1.0; for (int i = -WINDOW; i <= WINDOW; ++i){ for(int j = -WINDOW; j <= WINDOW; ++j){ scalekernel[j+WINDOW + (i+WINDOW)*(2*WINDOW+1)] = exp(-(i*i+j*j)/(2*t))/(2*PI*t); } } hipMemcpy(d_scalekernel,scalekernel,winsize*sizeof(float),hipMemcpyHostToDevice); //initialize blocksize and gridsize dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y); dim3 dimGrid(ceil(width/(float)dimBlock.x),ceil(height/(float)dimBlock.y)); // Loop over image pixels by calling kernel hipLaunchKernelGGL(( conv2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_gray, d_scalekernel, d_finalimg, height, width, WINDOW); //copy result to host hipMemcpy(finalimg,d_finalimg,height*width*sizeof(uint8_t),hipMemcpyDeviceToHost); //save image! char filename[64]; sprintf (filename, "../../images/Ompcudascalespace/ompcuda%i.jpg", iter); stbi_write_jpg(filename, width, height, 1, finalimg, 100); } //stop timer and print time taken gettimeofday(&end,NULL); std::cout << (double)((end.tv_sec-start.tv_sec)*1000000 + end.tv_usec-start.tv_usec)/1000000.0 << '\n'; //free device variables hipFree(d_gray); hipFree(d_scalekernel); hipFree(d_finalimg); //free host variables delete [] grayimg; delete [] scalekernel; delete [] finalimg; return 0; }
e297723a185b8d583da18364dc9c9d4bb0d22d4a.cu
#include <iostream> #define STB_IMAGE_IMPLEMENTATION #include "../../stb/stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "../../stb/stb_image_write.h" #include <omp.h> #include <sys/time.h> #define BLOCK_SIZE_X 16 #define BLOCK_SIZE_Y 16 #define THREAD_COUNT 16 #define WINDOW 6 #define NUM_ITER 64 #define PI 3.14159265358979323846 __global__ void conv2(float *A, float *B,uint8_t *C, int height, int width, int window){ int row = threadIdx.x + blockIdx.x*blockDim.x; int col = threadIdx.y + blockIdx.y*blockDim.y; int i = row + col*width; float sum = 0; if(i<height*width){ for(int p = -window; p <= window; ++p){ //loop over window for(int q = -window; q <= window; ++q){ int pixel = i + p*width + q; if((pixel < 0) | (pixel >= height*width)) //if outside of vertical bound, continue continue; int temp = i % width; if((temp + q < 0) | (temp + q >= width)) //if outside of horiz bound, continue continue; sum += A[pixel]*B[(p + window)*(2*window + 1)+(q + window)]; } } C[i] = (uint8_t)((int)sum); } } int main(int argc, char** argv) { //Initiialize variables for image int width, height, bpp; //initialize timing variables for openmp timing struct timeval start, end; //load in image uint8_t* img = stbi_load("../../images/cat.jpg", &width, &height, &bpp, 3); if(img == NULL) { printf("Error in loading the image\n"); exit(1); } //convert to grayscale and free image float* grayimg = new float[width*height]; for(int i = 0; i < width*height; ++i) grayimg[i] = (.3*(int)img[3*i] + .59*(int)img[3*i+1]+.11*(int)img[3*i+2]); stbi_image_free(img); //initialize device copy of grayscale image, allocate it, and copy to device float *d_gray; cudaMalloc((void**)&d_gray,height*width*sizeof(float)); cudaMemcpy(d_gray,grayimg,height*width*sizeof(float),cudaMemcpyHostToDevice); //initialize image to store result, initialize variable for device, and allocate space in device uint8_t* finalimg = new uint8_t[width*height]; uint8_t* d_finalimg; cudaMalloc((void**)&d_finalimg,width*height*sizeof(uint8_t)); //Initialize and define scale-space kernel int winsize = (2*WINDOW+1)*(2*WINDOW+1); float* scalekernel = new float[winsize]; float t; //initialize device copy of scale-space kernel, allocate space, and copy to device float *d_scalekernel; cudaMalloc((void**)&d_scalekernel,winsize*sizeof(float)); //start timer call omp, start loop gettimeofday(&start,NULL); #pragma omp parallel for num_threads(THREAD_COUNT) for(int iter = 1; iter <= NUM_ITER; ++iter){ t = iter*1.0; for (int i = -WINDOW; i <= WINDOW; ++i){ for(int j = -WINDOW; j <= WINDOW; ++j){ scalekernel[j+WINDOW + (i+WINDOW)*(2*WINDOW+1)] = exp(-(i*i+j*j)/(2*t))/(2*PI*t); } } cudaMemcpy(d_scalekernel,scalekernel,winsize*sizeof(float),cudaMemcpyHostToDevice); //initialize blocksize and gridsize dim3 dimBlock(BLOCK_SIZE_X,BLOCK_SIZE_Y); dim3 dimGrid(ceil(width/(float)dimBlock.x),ceil(height/(float)dimBlock.y)); // Loop over image pixels by calling kernel conv2<<<dimGrid,dimBlock>>>(d_gray, d_scalekernel, d_finalimg, height, width, WINDOW); //copy result to host cudaMemcpy(finalimg,d_finalimg,height*width*sizeof(uint8_t),cudaMemcpyDeviceToHost); //save image! char filename[64]; sprintf (filename, "../../images/Ompcudascalespace/ompcuda%i.jpg", iter); stbi_write_jpg(filename, width, height, 1, finalimg, 100); } //stop timer and print time taken gettimeofday(&end,NULL); std::cout << (double)((end.tv_sec-start.tv_sec)*1000000 + end.tv_usec-start.tv_usec)/1000000.0 << '\n'; //free device variables cudaFree(d_gray); cudaFree(d_scalekernel); cudaFree(d_finalimg); //free host variables delete [] grayimg; delete [] scalekernel; delete [] finalimg; return 0; }
a9f89a1f28479ff9b2483f7869b246541f6345e7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THH/generic/THHTensorMathMagma.hip" #else #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k) { int64_t size[1] = { k }; int64_t stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(scalar_t); THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n) { int64_t size[2] = { m, n }; int64_t stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(scalar_t); THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self) { THAssert(self->dim() == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(hipMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, hipMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->dim() == 2); if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0)) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); int64_t size[2] = { src->size(0), src->size(1) }; int64_t stride[2] = { 1, src->size(0) }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_, const char *uplo, const char *trans, const char *diag) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square"); THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible"); magma_side_t sz = MagmaLeft; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans; magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit; scalar_t alpha = 1; int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); scalar_t *a_data = THCTensor_(data)(state, a); scalar_t *b_data = THCTensor_(data)(state, b); #if defined(THC_REAL_IS_FLOAT) magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #else magma_dtrsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #endif THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(trtrs)); #endif } void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional"); AT_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size " "at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows"); THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have " "m >= n. The case for m < n is not implemented yet."); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); scalar_t *a_data = THCTensor_(data)(state, a); scalar_t *b_data = THCTensor_(data)(state, b); int64_t m = a->size(0); int64_t n = a->size(1); int64_t nrhs = b->size(1); scalar_t wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int64_t n = THTensor_sizeLegacyNoScalars(a, 0); int64_t lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); scalar_t *input_data = THCTensor_(data)(state, input); if (n > 0) { // eigen values and workspace scalar_t *w = th_magma_malloc_pinned<scalar_t>(n); scalar_t *wA = th_magma_malloc_pinned<scalar_t>(lda * n); // compute optimal size of work array int info; scalar_t lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif scalar_t *work = th_magma_malloc_pinned<scalar_t>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); } if (jobzs[0] == 'N') { // If eigenvector is not needed, fill the result with zeros. THCTensor_(zero)(state, rv_); THCTensor_(free)(state, input); } else { THCTensor_(freeCopyTo)(state, input, rv_); } #else THError(NoMagma(syev)); #endif } void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int64_t n = a_->size(0); scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); scalar_t *wr = th_magma_malloc_pinned<scalar_t>(n); scalar_t *wi = th_magma_malloc_pinned<scalar_t>(n); scalar_t *vr_data = NULL; int64_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<scalar_t>(n * n); ldvr = n; } scalar_t *work_data = nullptr; if (n > 0) { int info; scalar_t wkopt; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; work_data = th_magma_malloc_pinned<scalar_t>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); } { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); if (n > 0) { THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), hipMemcpyHostToDevice)); THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), hipMemcpyHostToDevice)); } THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } void THCTensor_(gesdd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *some, const char* compute_uv) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesdd2)(state, ru_, rs_, rv_, ra_, a, some, compute_uv); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesdd)); #endif } void THCTensor_(gesdd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *some, const char* compute_uv) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); char jobus = compute_uv[0] == 'N' ? 'N' : some[0]; magma_vec_t jobz = jobus == 'A' ? MagmaAllVec : jobus == 'S' ? MagmaSomeVec : jobus == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = m < n ? m : n; int64_t j = (jobz == MagmaAllVec) ? m : k; int64_t jv = (jobz == MagmaAllVec) ? n : k; scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); scalar_t *rs_data = th_magma_malloc_pinned<scalar_t>(k); scalar_t *ru_data = NULL; scalar_t *rv_data = NULL; if (jobz != MagmaNoVec) { ru_data = th_magma_malloc_pinned<scalar_t>(m * j); rv_data = th_magma_malloc_pinned<scalar_t>(n * n); } scalar_t wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; scalar_t *work_data = th_magma_malloc_pinned<scalar_t>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); if (jobz != MagmaNoVec) { THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); magma_free_pinned(rv_data); magma_free_pinned(ru_data); } else { THCTensor_(resize2d)(state, rv_, n, n); THCTensor_(zero)(state, rv_); THCTensor_(resize2d)(state, ru_, m, m); THCTensor_(zero)(state, ru_); } magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesdd2)); #endif } void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); #ifdef USE_MAGMA int info; int64_t n = a->size(0); int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); scalar_t *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); scalar_t *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int64_t n = a->size(0); // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input)); scalar_t *matrices1[1] = { THCTensor_(data)(state, input) }; scalar_t *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. auto d_matrices1 = static_cast<scalar_t**>(THCudaMalloc(state, sizeof(scalar_t*))); auto d_matrices2 = static_cast<scalar_t**>(THCudaMalloc(state, sizeof(scalar_t*))); THCudaCheck(hipMemcpyAsync(d_matrices1, matrices1, sizeof(scalar_t*), hipMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(hipMemcpyAsync(d_matrices2, matrices2, sizeof(scalar_t*), hipMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; auto info_gpu = static_cast<int*>(THCudaMalloc(state, sizeof(int))); auto ipiv_gpu = static_cast<int*>(THCudaMalloc(state, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const scalar_t**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const scalar_t**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(hipMemcpy(&info, info_gpu, sizeof(int), hipMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaFree(state, ipiv_gpu); THCudaFree(state, info_gpu); THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); scalar_t *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); hipStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); } else { hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k); scalar_t *a_data = THCTensor_(data)(state, a); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(copyArray1d)(state, rtau_, rtau_data, k); magma_free_pinned(rtau_data); #else THError(NoMagma(geqrf)); #endif } void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif scalar_t *a_data = THCTensor_(data)(state, a); scalar_t *tau_data = th_magma_malloc_pinned<scalar_t>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); scalar_t *work_data = THCTensor_(data)(state, work); int info; // We need to call two different versions of ?geqrf: // ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give // R properly. Note that the MAGMA documentation for this method is wrong. // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 // ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); scalar_t *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, a); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
a9f89a1f28479ff9b2483f7869b246541f6345e7.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "THC/generic/THCTensorMathMagma.cu" #else #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) #ifdef USE_MAGMA static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k) { int64_t size[1] = { k }; int64_t stride[1] = { 1 }; THCTensor_(resizeNd)(state, self, 1, size, stride); size_t len = k * sizeof(scalar_t); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n) { int64_t size[2] = { m, n }; int64_t stride[2] = { 1, m }; THCTensor_(resizeNd)(state, self, 2, size, stride); size_t len = m * n * sizeof(scalar_t); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice)); } static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self) { THAssert(self->dim() == 2); size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t); THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1); THCTensor *selfc = THCTensor_(newContiguous)(state, temp); THCudaCheck(cudaMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost)); THCTensor_(free)(state, temp); THCTensor_(free)(state, selfc); } #endif // USE_MAGMA static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src) { THAssert(src->dim() == 2); if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0)) { THCTensor_(retain)(state, self); return self; } if (self == src) self = THCTensor_(new)(state); else THCTensor_(retain)(state, self); int64_t size[2] = { src->size(0), src->size(1) }; int64_t stride[2] = { 1, src->size(0) }; THCTensor_(resizeNd)(state, self, 2, size, stride); THCTensor_(copy)(state, self, src); return self; } void THCTensor_(trtrs)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_, const char *uplo, const char *trans, const char *diag) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 2, "b should be (non-empty) 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 1, "A should be square"); THArgCheck(b_->size(0) == a_->size(0), 2, "A,b size incompatible"); magma_side_t sz = MagmaLeft; magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; magma_trans_t ts = trans[0] == 'N' ? MagmaNoTrans : MagmaTrans; magma_diag_t dg = diag[0] == 'U' ? MagmaUnit : MagmaNonUnit; scalar_t alpha = 1; int64_t n = a_->size(0); int64_t nrhs = b_->size(1); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); scalar_t *a_data = THCTensor_(data)(state, a); scalar_t *b_data = THCTensor_(data)(state, b); #if defined(THC_REAL_IS_FLOAT) magma_strsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #else magma_dtrsm(sz, ul, ts, dg, n, nrhs, alpha, a_data, n, b_data, n); #endif THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(trtrs)); #endif } void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional"); THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional"); AT_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size " "at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows"); THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have " "m >= n. The case for m < n is not implemented yet."); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_); scalar_t *a_data = THCTensor_(data)(state, a); scalar_t *b_data = THCTensor_(data)(state, b); int64_t m = a->size(0); int64_t n = a->size(1); int64_t nrhs = b->size(1); scalar_t wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info); #endif scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt); #if defined(THC_REAL_IS_FLOAT) magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #else magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info); #endif magma_free_pinned(hwork); if (info != 0) THError("MAGMA gels : Argument %d : illegal value", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(freeCopyTo)(state, b, rb_); #else THError(NoMagma(gels)); #endif } void THCTensor_(syev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a, const char *jobzs, const char *uplos) { #ifdef USE_MAGMA int64_t n = THTensor_sizeLegacyNoScalars(a, 0); int64_t lda = n; magma_uplo_t uplo = uplos[0] == 'U' ? MagmaUpper : MagmaLower; magma_vec_t jobz = jobzs[0] == 'N' ? MagmaNoVec : MagmaVec; THCTensor *input = THCTensor_(newColumnMajor)(state, rv_, a); scalar_t *input_data = THCTensor_(data)(state, input); if (n > 0) { // eigen values and workspace scalar_t *w = th_magma_malloc_pinned<scalar_t>(n); scalar_t *wA = th_magma_malloc_pinned<scalar_t>(lda * n); // compute optimal size of work array int info; scalar_t lwork; int liwork; #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, &lwork, -1, &liwork, -1, &info); #endif scalar_t *work = th_magma_malloc_pinned<scalar_t>((size_t)lwork); int *iwork = th_magma_malloc_pinned<int>(liwork); // compute eigenvalues and, optionally, eigenvectors #if defined(THC_REAL_IS_FLOAT) magma_ssyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #else magma_dsyevd_gpu(jobz, uplo, n, input_data, lda, w, wA, n, work, (int) lwork, iwork, liwork, &info); #endif // copy eigen values from w to re_ if (info == 0) THCTensor_(copyArray1d)(state, re_, w, n); magma_free_pinned(iwork); magma_free_pinned(work); magma_free_pinned(wA); magma_free_pinned(w); // check error value if (info > 0) THError("MAGMA syev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA syev : Argument %d : illegal value", -info); } if (jobzs[0] == 'N') { // If eigenvector is not needed, fill the result with zeros. THCTensor_(zero)(state, rv_); THCTensor_(free)(state, input); } else { THCTensor_(freeCopyTo)(state, input, rv_); } #else THError(NoMagma(syev)); #endif } void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, const char *jobvrs) { #ifdef USE_MAGMA THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional"); THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square"); magma_vec_t jobvr = jobvrs[0] == 'N' ? MagmaNoVec : MagmaVec; int64_t n = a_->size(0); scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(n * n); THCTensor_(copyTensor2d)(state, a_data, a_); scalar_t *wr = th_magma_malloc_pinned<scalar_t>(n); scalar_t *wi = th_magma_malloc_pinned<scalar_t>(n); scalar_t *vr_data = NULL; int64_t ldvr = 1; if (jobvr == MagmaVec) { vr_data = th_magma_malloc_pinned<scalar_t>(n * n); ldvr = n; } scalar_t *work_data = nullptr; if (n > 0) { int info; scalar_t wkopt; #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info); #endif int lwork = (int) wkopt; work_data = th_magma_malloc_pinned<scalar_t>(lwork); #if defined(THC_REAL_IS_FLOAT) magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #else magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info); else if (info < 0) THError("MAGMA geev : Argument %d : illegal value", -info); } { THCTensor_(resize2d)(state, re_, 2, n); THCTensor *re = THCTensor_(newContiguous)(state, re_); if (n > 0) { THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), cudaMemcpyHostToDevice)); THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), cudaMemcpyHostToDevice)); } THCTensor_(freeCopyTo)(state, re, re_); THCTensor_(transpose)(state, re_, NULL, 0, 1); } if (jobvr == MagmaVec) THCTensor_(copyArray2d)(state, rv_, vr_data, n, n); magma_free_pinned(work_data); magma_free_pinned(vr_data); magma_free_pinned(wi); magma_free_pinned(wr); magma_free_pinned(a_data); #else THError(NoMagma(geev)); #endif } void THCTensor_(gesdd)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *a, const char *some, const char* compute_uv) { #ifdef USE_MAGMA THCTensor *ra_ = THCTensor_(new)(state); THCTensor_(gesdd2)(state, ru_, rs_, rv_, ra_, a, some, compute_uv); THCTensor_(free)(state, ra_); #else THError(NoMagma(gesdd)); #endif } void THCTensor_(gesdd2)(THCState *state, THCTensor *ru_, THCTensor *rs_, THCTensor *rv_, THCTensor *ra_, THCTensor *a, const char *some, const char* compute_uv) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); char jobus = compute_uv[0] == 'N' ? 'N' : some[0]; magma_vec_t jobz = jobus == 'A' ? MagmaAllVec : jobus == 'S' ? MagmaSomeVec : jobus == 'O' ? MagmaOverwriteVec : MagmaNoVec; int iunused[1]; int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = m < n ? m : n; int64_t j = (jobz == MagmaAllVec) ? m : k; int64_t jv = (jobz == MagmaAllVec) ? n : k; scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(m * n); THCTensor_(copyTensor2d)(state, a_data, a); scalar_t *rs_data = th_magma_malloc_pinned<scalar_t>(k); scalar_t *ru_data = NULL; scalar_t *rv_data = NULL; if (jobz != MagmaNoVec) { ru_data = th_magma_malloc_pinned<scalar_t>(m * j); rv_data = th_magma_malloc_pinned<scalar_t>(n * n); } scalar_t wkopt; int info; #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, &wkopt, -1, iunused, &info); #endif int lwork = (int) wkopt; scalar_t *work_data = th_magma_malloc_pinned<scalar_t>(lwork); int *iwork = th_magma_malloc_pinned<int>(8 * k); #if defined(THC_REAL_IS_FLOAT) magma_sgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #else magma_dgesdd(jobz, m, n, a_data, m, rs_data, ru_data, m, rv_data, n, work_data, lwork, iwork, &info); #endif if (info > 0) THError("MAGMA gesdd : the updating process of SBDSDC did not converge (error: %d)", info); else if (info < 0) THError("MAGMA gesdd : Argument %d : illegal value", -info); THCTensor_(copyArray1d)(state, rs_, rs_data, k); THCTensor_(copyArray2d)(state, ra_, a_data, m, n); if (jobz != MagmaNoVec) { THCTensor_(copyArray2d)(state, rv_, rv_data, n, n); THCTensor_(transpose)(state, rv_, NULL, 0, 1); if (jobz != MagmaAllVec) THCTensor_(narrow)(state, rv_, rv_, 1, 0, jv); THCTensor_(copyArray2d)(state, ru_, ru_data, m, j); magma_free_pinned(rv_data); magma_free_pinned(ru_data); } else { THCTensor_(resize2d)(state, rv_, n, n); THCTensor_(zero)(state, rv_); THCTensor_(resize2d)(state, ru_, m, m); THCTensor_(zero)(state, ru_); } magma_free_pinned(work_data); magma_free_pinned(iwork); magma_free_pinned(rs_data); magma_free_pinned(a_data); #else THError(NoMagma(gesdd2)); #endif } void THCTensor_(getri)(THCState *state, THCTensor *ra_, THCTensor *a) { THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); #ifdef USE_MAGMA int info; int64_t n = a->size(0); int lwork = n * magma_get_sgetri_nb(n); THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); scalar_t *input_data = THCTensor_(data)(state, input); int *ipiv = th_magma_malloc_pinned<int>(n); THCTensor *work = THCTensor_(newWithSize1d)(state, lwork); scalar_t *work_data = THCTensor_(data)(state, work); // Run LU #if defined(THC_REAL_IS_FLOAT) magma_sgetrf_gpu(n, n, input_data, n, ipiv, &info); #else magma_dgetrf_gpu(n, n, input_data, n, ipiv, &info); #endif if (info > 0) THError("MAGMA getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) magma_sgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #else magma_dgetri_gpu(n, input_data, n, ipiv, work_data, lwork, &info); #endif if (info > 0) THError("MAGMA getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("MAGMA getri : Argument %d : illegal value", -info); THCTensor_(free)(state, work); magma_free_pinned(ipiv); THCTensor_(freeCopyTo)(state, input, ra_); #else int64_t n = a->size(0); // input THCTensor *input = THCTensor_(newColumnMajor)(state, a, a); THCTensor_(resizeNd)(state, ra_, 2, THTensor_getSizePtr(input), THTensor_getStridePtr(input)); scalar_t *matrices1[1] = { THCTensor_(data)(state, input) }; scalar_t *matrices2[1] = { THCTensor_(data)(state, ra_) }; // Copy pointers to device. auto d_matrices1 = static_cast<scalar_t**>(THCudaMalloc(state, sizeof(scalar_t*))); auto d_matrices2 = static_cast<scalar_t**>(THCudaMalloc(state, sizeof(scalar_t*))); THCudaCheck(cudaMemcpyAsync(d_matrices1, matrices1, sizeof(scalar_t*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); THCudaCheck(cudaMemcpyAsync(d_matrices2, matrices2, sizeof(scalar_t*), cudaMemcpyHostToDevice, THCState_getCurrentStream(state))); int info; auto info_gpu = static_cast<int*>(THCudaMalloc(state, sizeof(int))); auto ipiv_gpu = static_cast<int*>(THCudaMalloc(state, n * sizeof(int))); // Run LU #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #else THCudaBlas_Dgetrf(state, n, d_matrices1, n, ipiv_gpu, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getrf : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getrf : Argument %d : illegal value", -info); // Inverse #if defined(THC_REAL_IS_FLOAT) THCudaBlas_Sgetri(state, n, (const scalar_t**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #else THCudaBlas_Dgetri(state, n, (const scalar_t**)d_matrices1, n, ipiv_gpu, d_matrices2, n, info_gpu, 1); #endif THCudaCheck(cudaMemcpy(&info, info_gpu, sizeof(int), cudaMemcpyDeviceToHost)); if (info > 0) THError("CUBLAS getri : U(%d,%d) is 0, U is singular", info, info); else if (info < 0) THError("CUBLAS getri : Argument %d : illegal value", -info); THCudaFree(state, ipiv_gpu); THCudaFree(state, info_gpu); THCudaFree(state, d_matrices1); THCudaFree(state, d_matrices2); THCTensor_(free)(state, input); #endif } __global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r > c) { input[idx] = input[r*n + c]; } } } __global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len) { for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) { const int r = idx % n; const int c = idx / n; if (r < c) { input[idx] = input[r*n + c]; } } } void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, const char *uplo) { #ifdef USE_MAGMA THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional"); THArgCheck(a->size(0) == a->size(1), 2, "A should be square"); int64_t n = a->size(0); magma_uplo_t ul = uplo[0] == 'U' ? MagmaUpper : MagmaLower; THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a); scalar_t *input_data = THCTensor_(data)(state, input); int info; #if defined(THC_REAL_IS_FLOAT) magma_spotri_gpu(ul, n, input_data, n, &info); #else magma_dpotri_gpu(ul, n, input_data, n, &info); #endif if (info > 0) THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info); else if (info < 0) THError("MAGMA potri : Argument %d : illegal value", -info); cudaStream_t stream = THCState_getCurrentStream(state); const int len = n*n; dim3 blocks(std::min(DIVUP(len, 128), 65535)); dim3 threads(128); if (uplo[0] == 'U') { THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } else { THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len); } THCTensor_(freeCopyTo)(state, input, ra_); #else THError(NoMagma(potri)); #endif } void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k); scalar_t *a_data = THCTensor_(data)(state, a); int info; #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(freeCopyTo)(state, a, ra_); THCTensor_(copyArray1d)(state, rtau_, rtau_data, k); magma_free_pinned(rtau_data); #else THError(NoMagma(geqrf)); #endif } void THCTensor_(qr)(THCState *state, THCTensor *rq_, THCTensor *rr_, THCTensor *a_) { #ifdef USE_MAGMA THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional"); THCTensor *a = THCTensor_(newColumnMajor)(state, rr_, a_); int64_t m = a->size(0); int64_t n = a->size(1); int64_t k = (m < n ? m : n); #if defined(THC_REAL_IS_FLOAT) int64_t nb = magma_get_sgeqrf_nb(m, n); #else int64_t nb = magma_get_dgeqrf_nb(m, n); #endif scalar_t *a_data = THCTensor_(data)(state, a); scalar_t *tau_data = th_magma_malloc_pinned<scalar_t>(k); THCTensor *work = THCTensor_(newWithSize1d)(state, (2*k + magma_roundup(n, 32))*nb); scalar_t *work_data = THCTensor_(data)(state, work); int info; // We need to call two different versions of ?geqrf: // ?geqrf_gpu allows fast computation of Q via ?orqrf_gpu, but doesn't give // R properly. Note that the MAGMA documentation for this method is wrong. // http://icl.cs.utk.edu/magma/forum/viewtopic.php?f=2&t=1015&p=2800&hilit=geqrf_gpu#p2800 // ?geqrf2_gpu gives correct R, but doesn't allow computation of Q via ?orqrf_gpu #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #else magma_dgeqrf2_gpu(m, n, a_data, m, tau_data, &info); #endif if (info != 0) THError("MAGMA geqrf2 : Argument %d : illegal value.", -info); THCTensor_(narrow)(state, a, a, 0, 0, k); THCTensor_(triu)(state, rr_, a, 0); THCTensor_(free)(state, a); a = THCTensor_(newColumnMajor)(state, rq_, a_); a_data = THCTensor_(data)(state, a); #if defined(THC_REAL_IS_FLOAT) magma_sgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #else magma_dgeqrf_gpu(m, n, a_data, m, tau_data, work_data, &info); #endif if (info != 0) THError("MAGMA geqrf : Argument %d : illegal value.", -info); THCTensor *q = THCTensor_(newColumnMajor)(state, rq_, a); scalar_t *q_data = THCTensor_(data)(state, q); #if defined(THC_REAL_IS_FLOAT) magma_sorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #else magma_dorgqr_gpu(m, k, k, q_data, m, tau_data, work_data, nb, &info); #endif if (info != 0) THError("MAGMA orgqr : Argument %d : illegal value.", -info); THCTensor_(free)(state, a); THCTensor_(free)(state, work); magma_free_pinned(tau_data); THCTensor_(narrow)(state, q, q, 1, 0, k); THCTensor_(freeCopyTo)(state, q, rq_); #else THError(NoMagma(qr)); #endif } #endif #endif
5bace05262b69da7fdc32090c19b8a1b37c0c5a7.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int TILE_WIDTH = 4; __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < Width / TILE_WIDTH; ++m) { Mds[ty][tx] = d_M[Row * Width + m * TILE_WIDTH + tx]; Nds[ty][tx] = d_N[(m * TILE_WIDTH + ty) * Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row * Width + Col] = Pvalue; } int main() { int Width = 12; float *h_M, *h_N, *h_P; int size = sizeof(float) * Width * Width; h_M = (float *) malloc(size); h_N = (float *) malloc(size); h_P = (float *) malloc(size); int i, j; srand(time(0)); for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { h_M[i * Width + j] = rand() % 1000; h_N[i * Width + j] = rand() % 1000; } } hipError_t err = hipSuccess; float *d_M, *d_N, *d_P; err = hipMalloc((void **) &d_M, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **) &d_N, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector N (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE);#include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int TILE_WIDTH = 4; __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < Width / TILE_WIDTH; ++m) { Mds[ty][tx] = d_M[Row * Width + m * TILE_WIDTH + tx]; Nds[ty][tx] = d_N[(m * TILE_WIDTH + ty) * Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row * Width + Col] = Pvalue; } int main() { int Width = 12; float *h_M, *h_N, *h_P; int size = sizeof(float) * Width * Width; h_M = (float *) malloc(size); h_N = (float *) malloc(size); h_P = (float *) malloc(size); int i, j; srand(time(0)); for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { h_M[i * Width + j] = rand() % 1000; h_N[i * Width + j] = rand() % 1000; } } hipError_t err = hipSuccess; float *d_M, *d_N, *d_P; err = hipMalloc((void **) &d_M, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **) &d_N, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector N (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMalloc((void **) &d_P, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector V (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_M, h_M, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector N from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid(Width / TILE_WIDTH, Width / TILE_WIDTH, 1); dim3 block(TILE_WIDTH, TILE_WIDTH, 1); printf("Launching MatrixMulKernel kernel with grid dimensions: (%d, %d, %d) and block dimensions: (%d, %d, %d).\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid), dim3(block), 0, 0, d_M, d_N, d_P, Width); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel MatrixMulKernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector P from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float sum = 0; int k; for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { sum = 0; for (k = 0; k < Width; ++k) { sum += h_M[i * Width + k] * h_N[k * Width + j]; } if (fabs(sum - h_P[i * Width + j]) > 1e-5) { fprintf(stderr, "%f %f\n", sum, h_P[i * Width + j]); fprintf(stderr, "Kernel MatMulKernel does not multiply the matrices properly.\n"); exit(EXIT_FAILURE); } } } printf("TEST PASSED.\n"); } } err = hipMalloc((void **) &d_P, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector V (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_M, h_M, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_N, h_N, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector N from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid(Width / TILE_WIDTH, Width / TILE_WIDTH, 1); dim3 block(TILE_WIDTH, TILE_WIDTH, 1); printf("Launching MatrixMulKernel kernel with grid dimensions: (%d, %d, %d) and block dimensions: (%d, %d, %d).\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); hipLaunchKernelGGL(( MatrixMulKernel), dim3(grid), dim3(block), 0, 0, d_M, d_N, d_P, Width); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch kernel MatrixMulKernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(h_P, d_P, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector P from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float sum = 0; int k; for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { sum = 0; for (k = 0; k < Width; ++k) { sum += h_M[i * Width + k] * h_N[k * Width + j]; } if (fabs(sum - h_P[i * Width + j]) > 1e-5) { fprintf(stderr, "%f %f\n", sum, h_P[i * Width + j]); fprintf(stderr, "Kernel MatMulKernel does not multiply the matrices properly.\n"); exit(EXIT_FAILURE); } } } printf("TEST PASSED.\n"); }
5bace05262b69da7fdc32090c19b8a1b37c0c5a7.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int TILE_WIDTH = 4; __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < Width / TILE_WIDTH; ++m) { Mds[ty][tx] = d_M[Row * Width + m * TILE_WIDTH + tx]; Nds[ty][tx] = d_N[(m * TILE_WIDTH + ty) * Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row * Width + Col] = Pvalue; } int main() { int Width = 12; float *h_M, *h_N, *h_P; int size = sizeof(float) * Width * Width; h_M = (float *) malloc(size); h_N = (float *) malloc(size); h_P = (float *) malloc(size); int i, j; srand(time(0)); for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { h_M[i * Width + j] = rand() % 1000; h_N[i * Width + j] = rand() % 1000; } } cudaError_t err = cudaSuccess; float *d_M, *d_N, *d_P; err = cudaMalloc((void **) &d_M, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_N, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector N (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE);#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> const int TILE_WIDTH = 4; __global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for (int m = 0; m < Width / TILE_WIDTH; ++m) { Mds[ty][tx] = d_M[Row * Width + m * TILE_WIDTH + tx]; Nds[ty][tx] = d_N[(m * TILE_WIDTH + ty) * Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row * Width + Col] = Pvalue; } int main() { int Width = 12; float *h_M, *h_N, *h_P; int size = sizeof(float) * Width * Width; h_M = (float *) malloc(size); h_N = (float *) malloc(size); h_P = (float *) malloc(size); int i, j; srand(time(0)); for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { h_M[i * Width + j] = rand() % 1000; h_N[i * Width + j] = rand() % 1000; } } cudaError_t err = cudaSuccess; float *d_M, *d_N, *d_P; err = cudaMalloc((void **) &d_M, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector M (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_N, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector N (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void **) &d_P, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector V (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector N from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid(Width / TILE_WIDTH, Width / TILE_WIDTH, 1); dim3 block(TILE_WIDTH, TILE_WIDTH, 1); printf("Launching MatrixMulKernel kernel with grid dimensions: (%d, %d, %d) and block dimensions: (%d, %d, %d).\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); MatrixMulKernel<<<grid, block>>>(d_M, d_N, d_P, Width); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel MatrixMulKernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector P from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float sum = 0; int k; for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { sum = 0; for (k = 0; k < Width; ++k) { sum += h_M[i * Width + k] * h_N[k * Width + j]; } if (fabs(sum - h_P[i * Width + j]) > 1e-5) { fprintf(stderr, "%f %f\n", sum, h_P[i * Width + j]); fprintf(stderr, "Kernel MatMulKernel does not multiply the matrices properly.\n"); exit(EXIT_FAILURE); } } } printf("TEST PASSED.\n"); } } err = cudaMalloc((void **) &d_P, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector V (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_M, h_M, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector M from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_N, h_N, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector N from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid(Width / TILE_WIDTH, Width / TILE_WIDTH, 1); dim3 block(TILE_WIDTH, TILE_WIDTH, 1); printf("Launching MatrixMulKernel kernel with grid dimensions: (%d, %d, %d) and block dimensions: (%d, %d, %d).\n", grid.x, grid.y, grid.z, block.x, block.y, block.z); MatrixMulKernel<<<grid, block>>>(d_M, d_N, d_P, Width); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch kernel MatrixMulKernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(h_P, d_P, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector P from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float sum = 0; int k; for (i = 0; i < Width; ++i) { for (j = 0; j < Width; ++j) { sum = 0; for (k = 0; k < Width; ++k) { sum += h_M[i * Width + k] * h_N[k * Width + j]; } if (fabs(sum - h_P[i * Width + j]) > 1e-5) { fprintf(stderr, "%f %f\n", sum, h_P[i * Width + j]); fprintf(stderr, "Kernel MatMulKernel does not multiply the matrices properly.\n"); exit(EXIT_FAILURE); } } } printf("TEST PASSED.\n"); }
5cc9a3eaa17c101ace44784a99e60a6224dfa151.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Zheyong Fan, Ville Vierimaa, and Ari Harju This file is part of GPUQT. GPUQT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GPUQT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GPUQT. If not, see <http://www.gnu.org/licenses/>. */ #include "vector.h" #include <string.h> // memcpy #define BLOCK_SIZE 256 #ifndef CPU_ONLY __global__ void gpu_set_zero(int number_of_elements, real* __restrict__ g_state_real, real* __restrict__ g_state_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_elements) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #else void cpu_set_zero(int number_of_elements, real* g_state_real, real* g_state_imag) { for (int n = 0; n < number_of_elements; ++n) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #endif #ifndef CPU_ONLY void Vector::initialize_gpu(int n) { this->n = n; array_size = n * sizeof(real); CHECK(hipMalloc((void**)&real_part, array_size)); CHECK(hipMalloc((void**)&imag_part, array_size)); } #else void Vector::initialize_cpu(int n) { this->n = n; array_size = n * sizeof(real); real_part = new real[n]; imag_part = new real[n]; } #endif Vector::Vector(int n) { #ifndef CPU_ONLY initialize_gpu(n); hipLaunchKernelGGL(gpu_set_zero, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, real_part, imag_part); CHECK(hipGetLastError()); #else initialize_cpu(n); cpu_set_zero(n, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_copy_state( const int N, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #else void cpu_copy_state(int N, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int n = 0; n < N; ++n) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #endif Vector::Vector(Vector& original) { // Just teach myself: one can access private members of another instance // of the class from within the class #ifndef CPU_ONLY initialize_gpu(original.n); hipLaunchKernelGGL(gpu_copy_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, original.real_part, original.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else initialize_cpu(original.n); cpu_copy_state(n, original.real_part, original.imag_part, real_part, imag_part); #endif } Vector::~Vector() { #ifndef CPU_ONLY CHECK(hipFree(real_part)); CHECK(hipFree(imag_part)); #else delete[] real_part; delete[] imag_part; #endif } #ifndef CPU_ONLY __global__ void gpu_add_state( const int n, const real*__restrict__ in_real, const real*__restrict__ in_imag, real*__restrict__ out_real, real*__restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #else void cpu_add_state(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #endif void Vector::add(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_add_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_add_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_copy_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_copy_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_sz( const int n, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #else void cpu_apply_sz(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #endif void Vector::apply_sz(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_sz, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_apply_sz(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy_from_host(real* other_real, real* other_imag) { #ifndef CPU_ONLY CHECK(hipMemcpy(real_part, other_real, array_size, hipMemcpyHostToDevice)); CHECK(hipMemcpy(imag_part, other_imag, array_size, hipMemcpyHostToDevice)); #else memcpy(real_part, other_real, array_size); memcpy(imag_part, other_imag, array_size); #endif } void Vector::copy_to_host(real* target_real, real* target_imag) { #ifndef CPU_ONLY CHECK(hipMemcpy(target_real, real_part, array_size, hipMemcpyDeviceToHost)); CHECK(hipMemcpy(target_imag, imag_part, array_size, hipMemcpyDeviceToHost)); #else memcpy(target_real, real_part, array_size); memcpy(target_imag, imag_part, array_size); #endif } void Vector::swap(Vector& other) { real* tmp_real = real_part; real* tmp_imag = imag_part; real_part = other.real_part, imag_part = other.imag_part; other.real_part = tmp_real; other.imag_part = tmp_imag; } #ifndef CPU_ONLY __device__ void warp_reduce(volatile real* s, int t) { s[t] += s[t + 32]; s[t] += s[t + 16]; s[t] += s[t + 8]; s[t] += s[t + 4]; s[t] += s[t + 2]; s[t] += s[t + 1]; } #endif #ifndef CPU_ONLY __global__ void gpu_find_inner_product_1( const int number_of_atoms, const real* __restrict__ g_final_state_real, const real* __restrict__ g_final_state_imag, const real* __restrict__ g_random_state_real, const real* __restrict__ g_random_state_imag, real* __restrict__ g_inner_product_real, real* __restrict__ g_inner_product_imag, const int g_offset) { int tid = threadIdx.x; int n = blockIdx.x * blockDim.x + tid; int m; real a, b, c, d; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; if (n < number_of_atoms) { a = g_final_state_real[n]; b = g_final_state_imag[n]; c = g_random_state_real[n]; d = g_random_state_imag[n]; s_data_real[tid] = (a * c + b * d); s_data_imag[tid] = (b * c - a * d); } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_real[blockIdx.x + g_offset] = s_data_real[0]; g_inner_product_imag[blockIdx.x + g_offset] = s_data_imag[0]; } } #else void cpu_find_inner_product_1( int grid_size, int number_of_atoms, real* g_final_state_real, real* g_final_state_imag, real* g_random_state_real, real* g_random_state_imag, real* g_inner_product_real, real* g_inner_product_imag, int g_offset) { for (int m = 0; m < grid_size; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < BLOCK_SIZE; ++k) { int n = m * BLOCK_SIZE + k; if (n < number_of_atoms) { real a = g_final_state_real[n]; real b = g_final_state_imag[n]; real c = g_random_state_real[n]; real d = g_random_state_imag[n]; s_data_real += (a * c + b * d); s_data_imag += (b * c - a * d); } } g_inner_product_real[m + g_offset] = s_data_real; g_inner_product_imag[m + g_offset] = s_data_imag; } } #endif void Vector::inner_product_1(int number_of_atoms, Vector& other, Vector& target, int offset) { int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_find_inner_product_1, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); CHECK(hipGetLastError()); #else cpu_find_inner_product_1( grid_size, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); #endif } #ifndef CPU_ONLY __global__ void gpu_find_inner_product_2( const int number_of_atoms, const real* __restrict__ g_inner_product_1_real, const real* __restrict__ g_inner_product_1_imag, real* __restrict__ g_inner_product_2_real, real* __restrict__ g_inner_product_2_imag) { //<<<para.number_of_energy_points, BLOCK_SIZE)>>> int tid = threadIdx.x; int patch, n, m; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; int number_of_blocks = (number_of_atoms - 1) / BLOCK_SIZE + 1; int number_of_patches = (number_of_blocks - 1) / BLOCK_SIZE + 1; for (patch = 0; patch < number_of_patches; ++patch) { n = tid + patch * BLOCK_SIZE; if (n < number_of_blocks) { m = blockIdx.x * number_of_blocks + n; s_data_real[tid] += g_inner_product_1_real[m]; s_data_imag[tid] += g_inner_product_1_imag[m]; } } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_2_real[blockIdx.x] = s_data_real[0]; g_inner_product_2_imag[blockIdx.x] = s_data_imag[0]; } } #else void cpu_find_inner_product_2( int number_of_moments, int grid_size, real* g_inner_product_1_real, real* g_inner_product_1_imag, real* g_inner_product_2_real, real* g_inner_product_2_imag) { for (int m = 0; m < number_of_moments; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < grid_size; ++k) { int n = m * grid_size + k; s_data_real += g_inner_product_1_real[n]; s_data_imag += g_inner_product_1_imag[n]; } g_inner_product_2_real[m] = s_data_real; g_inner_product_2_imag[m] = s_data_imag; } } #endif void Vector::inner_product_2(int number_of_atoms, int number_of_moments, Vector& target) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_find_inner_product_2, dim3(number_of_moments), dim3(BLOCK_SIZE), 0, 0, number_of_atoms, real_part, imag_part, target.real_part, target.imag_part); CHECK(hipGetLastError()); #else int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; cpu_find_inner_product_2( number_of_moments, grid_size, real_part, imag_part, target.real_part, target.imag_part); #endif }
5cc9a3eaa17c101ace44784a99e60a6224dfa151.cu
/* Copyright 2017 Zheyong Fan, Ville Vierimaa, and Ari Harju This file is part of GPUQT. GPUQT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GPUQT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GPUQT. If not, see <http://www.gnu.org/licenses/>. */ #include "vector.h" #include <string.h> // memcpy #define BLOCK_SIZE 256 #ifndef CPU_ONLY __global__ void gpu_set_zero(int number_of_elements, real* __restrict__ g_state_real, real* __restrict__ g_state_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_elements) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #else void cpu_set_zero(int number_of_elements, real* g_state_real, real* g_state_imag) { for (int n = 0; n < number_of_elements; ++n) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #endif #ifndef CPU_ONLY void Vector::initialize_gpu(int n) { this->n = n; array_size = n * sizeof(real); CHECK(hipMalloc((void**)&real_part, array_size)); CHECK(hipMalloc((void**)&imag_part, array_size)); } #else void Vector::initialize_cpu(int n) { this->n = n; array_size = n * sizeof(real); real_part = new real[n]; imag_part = new real[n]; } #endif Vector::Vector(int n) { #ifndef CPU_ONLY initialize_gpu(n); hipLaunchKernelGGL(gpu_set_zero, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, real_part, imag_part); CHECK(hipGetLastError()); #else initialize_cpu(n); cpu_set_zero(n, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_copy_state( const int N, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #else void cpu_copy_state(int N, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int n = 0; n < N; ++n) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #endif Vector::Vector(Vector& original) { // Just teach myself: one can access private members of another instance // of the class from within the class #ifndef CPU_ONLY initialize_gpu(original.n); hipLaunchKernelGGL(gpu_copy_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, original.real_part, original.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else initialize_cpu(original.n); cpu_copy_state(n, original.real_part, original.imag_part, real_part, imag_part); #endif } Vector::~Vector() { #ifndef CPU_ONLY CHECK(hipFree(real_part)); CHECK(hipFree(imag_part)); #else delete[] real_part; delete[] imag_part; #endif } #ifndef CPU_ONLY __global__ void gpu_add_state( const int n, const real*__restrict__ in_real, const real*__restrict__ in_imag, real*__restrict__ out_real, real*__restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #else void cpu_add_state(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #endif void Vector::add(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_add_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_add_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_copy_state, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_copy_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_sz( const int n, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #else void cpu_apply_sz(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #endif void Vector::apply_sz(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_apply_sz, dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_apply_sz(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy_from_host(real* other_real, real* other_imag) { #ifndef CPU_ONLY CHECK(hipMemcpy(real_part, other_real, array_size, hipMemcpyHostToDevice)); CHECK(hipMemcpy(imag_part, other_imag, array_size, hipMemcpyHostToDevice)); #else memcpy(real_part, other_real, array_size); memcpy(imag_part, other_imag, array_size); #endif } void Vector::copy_to_host(real* target_real, real* target_imag) { #ifndef CPU_ONLY CHECK(hipMemcpy(target_real, real_part, array_size, hipMemcpyDeviceToHost)); CHECK(hipMemcpy(target_imag, imag_part, array_size, hipMemcpyDeviceToHost)); #else memcpy(target_real, real_part, array_size); memcpy(target_imag, imag_part, array_size); #endif } void Vector::swap(Vector& other) { real* tmp_real = real_part; real* tmp_imag = imag_part; real_part = other.real_part, imag_part = other.imag_part; other.real_part = tmp_real; other.imag_part = tmp_imag; } #ifndef CPU_ONLY __device__ void warp_reduce(volatile real* s, int t) { s[t] += s[t + 32]; s[t] += s[t + 16]; s[t] += s[t + 8]; s[t] += s[t + 4]; s[t] += s[t + 2]; s[t] += s[t + 1]; } #endif #ifndef CPU_ONLY __global__ void gpu_find_inner_product_1( const int number_of_atoms, const real* __restrict__ g_final_state_real, const real* __restrict__ g_final_state_imag, const real* __restrict__ g_random_state_real, const real* __restrict__ g_random_state_imag, real* __restrict__ g_inner_product_real, real* __restrict__ g_inner_product_imag, const int g_offset) { int tid = threadIdx.x; int n = blockIdx.x * blockDim.x + tid; int m; real a, b, c, d; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; if (n < number_of_atoms) { a = g_final_state_real[n]; b = g_final_state_imag[n]; c = g_random_state_real[n]; d = g_random_state_imag[n]; s_data_real[tid] = (a * c + b * d); s_data_imag[tid] = (b * c - a * d); } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_real[blockIdx.x + g_offset] = s_data_real[0]; g_inner_product_imag[blockIdx.x + g_offset] = s_data_imag[0]; } } #else void cpu_find_inner_product_1( int grid_size, int number_of_atoms, real* g_final_state_real, real* g_final_state_imag, real* g_random_state_real, real* g_random_state_imag, real* g_inner_product_real, real* g_inner_product_imag, int g_offset) { for (int m = 0; m < grid_size; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < BLOCK_SIZE; ++k) { int n = m * BLOCK_SIZE + k; if (n < number_of_atoms) { real a = g_final_state_real[n]; real b = g_final_state_imag[n]; real c = g_random_state_real[n]; real d = g_random_state_imag[n]; s_data_real += (a * c + b * d); s_data_imag += (b * c - a * d); } } g_inner_product_real[m + g_offset] = s_data_real; g_inner_product_imag[m + g_offset] = s_data_imag; } } #endif void Vector::inner_product_1(int number_of_atoms, Vector& other, Vector& target, int offset) { int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_find_inner_product_1, dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); CHECK(hipGetLastError()); #else cpu_find_inner_product_1( grid_size, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); #endif } #ifndef CPU_ONLY __global__ void gpu_find_inner_product_2( const int number_of_atoms, const real* __restrict__ g_inner_product_1_real, const real* __restrict__ g_inner_product_1_imag, real* __restrict__ g_inner_product_2_real, real* __restrict__ g_inner_product_2_imag) { //<<<para.number_of_energy_points, BLOCK_SIZE)>>> int tid = threadIdx.x; int patch, n, m; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; int number_of_blocks = (number_of_atoms - 1) / BLOCK_SIZE + 1; int number_of_patches = (number_of_blocks - 1) / BLOCK_SIZE + 1; for (patch = 0; patch < number_of_patches; ++patch) { n = tid + patch * BLOCK_SIZE; if (n < number_of_blocks) { m = blockIdx.x * number_of_blocks + n; s_data_real[tid] += g_inner_product_1_real[m]; s_data_imag[tid] += g_inner_product_1_imag[m]; } } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_2_real[blockIdx.x] = s_data_real[0]; g_inner_product_2_imag[blockIdx.x] = s_data_imag[0]; } } #else void cpu_find_inner_product_2( int number_of_moments, int grid_size, real* g_inner_product_1_real, real* g_inner_product_1_imag, real* g_inner_product_2_real, real* g_inner_product_2_imag) { for (int m = 0; m < number_of_moments; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < grid_size; ++k) { int n = m * grid_size + k; s_data_real += g_inner_product_1_real[n]; s_data_imag += g_inner_product_1_imag[n]; } g_inner_product_2_real[m] = s_data_real; g_inner_product_2_imag[m] = s_data_imag; } } #endif void Vector::inner_product_2(int number_of_atoms, int number_of_moments, Vector& target) { #ifndef CPU_ONLY hipLaunchKernelGGL(gpu_find_inner_product_2, dim3(number_of_moments), dim3(BLOCK_SIZE), 0, 0, number_of_atoms, real_part, imag_part, target.real_part, target.imag_part); CHECK(hipGetLastError()); #else int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; cpu_find_inner_product_2( number_of_moments, grid_size, real_part, imag_part, target.real_part, target.imag_part); #endif }
494c8dcb92ecc82239b3ae773489d1028f24ed86.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const int *A, const int *B, int *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(int); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A int *h_A = (int *)malloc(size); // Allocate the host input vector B int *h_B = (int *)malloc(size); // Allocate the host output vector C int *h_C = (int *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(int)RAND_MAX; h_B[i] = rand()/(int)RAND_MAX; } // Allocate the device input vector A int *d_A = NULL; err = hipMalloc((void **)&d_A, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B int *d_B = NULL; err = hipMalloc((void **)&d_B, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C int *d_C = NULL; err = hipMalloc((void **)&d_C, size); if (err != hipSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( vectorAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C, numElements); err = hipGetLastError(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if (err != hipSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = hipFree(d_A); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_B); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_C); if (err != hipSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits err = hipDeviceReset(); if (err != hipSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
494c8dcb92ecc82239b3ae773489d1028f24ed86.cu
/** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const int *A, const int *B, int *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 50000; size_t size = numElements * sizeof(int); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A int *h_A = (int *)malloc(size); // Allocate the host input vector B int *h_B = (int *)malloc(size); // Allocate the host output vector C int *h_C = (int *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(int)RAND_MAX; h_B[i] = rand()/(int)RAND_MAX; } // Allocate the device input vector A int *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B int *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C int *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
875324ecc96aeaa4ce550cca0420bb28fbf2eddf.hip
// !!! This is a file automatically generated by hipify!!! #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <iostream> #include "device_launch_parameters.h" #include "hip/hip_runtime.h" using namespace std; int main(int argc, char ** argv) { int deviceCount; hipGetDeviceCount(&deviceCount); for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { // Ne detecte pas CUDA return -1; } else if (deviceCount == 1) { // Ne supporte pas CUDA cout << "Il y a un seul deviceCount" << endl; } else { // Afficher le nombre de deviceCount cout << "Le nombre de deviceCount est de :" << deviceCount << "\n" << endl; } } // Afficher le nom de la device cout << "Le nom de la device est :" << deviceProp.name << "\n" << endl; // Donner le numero de version majeur et mineur cout << "Les numeros de version majeur et mineur sont :" << deviceProp.major << " " << deviceProp.minor << "\n" << endl; // Donner la taille de la memoire globale cout << "La taille de la memoire globale est de :" << deviceProp.totalGlobalMem << "\n" << endl; // Donner la taille de la memoire constante cout << "La taille de la memoire constante est de :" << deviceProp.totalConstMem << "\n" << endl; // Donner la taille de la memoire partagee par bloc cout << "La taille de la memoire partagee est de :" << deviceProp.sharedMemPerBlock << "\n" << endl; // Donner le nombre de thread max dans chacune des directions cout << "La nombre de threads max par direction est :" << deviceProp.maxThreadsDim[0] << " " << deviceProp.maxThreadsDim[1] << " " << deviceProp.maxThreadsDim[2] << "\n" << endl; cout << "Nombre max thread par bloc " << deviceProp.maxThreadsPerBlock << "\n" << endl; // Donner le taille maximum de la grille pour chaque direction cout << "Nombre de block par grille :" << deviceProp.maxGridSize[0] << " " << deviceProp.maxGridSize[1] << " " << deviceProp.maxGridSize[2] << "\n" << endl; // Donner la taille du warp cout << "La taille du warp est de :" << deviceProp.warpSize << "\n" << endl; } system("pause"); return 0; }
875324ecc96aeaa4ce550cca0420bb28fbf2eddf.cu
#include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <iostream> #include "device_launch_parameters.h" #include "cuda_runtime.h" using namespace std; int main(int argc, char ** argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { // Ne detecte pas CUDA return -1; } else if (deviceCount == 1) { // Ne supporte pas CUDA cout << "Il y a un seul deviceCount" << endl; } else { // Afficher le nombre de deviceCount cout << "Le nombre de deviceCount est de :" << deviceCount << "\n" << endl; } } // Afficher le nom de la device cout << "Le nom de la device est :" << deviceProp.name << "\n" << endl; // Donner le numero de version majeur et mineur cout << "Les numeros de version majeur et mineur sont :" << deviceProp.major << " " << deviceProp.minor << "\n" << endl; // Donner la taille de la memoire globale cout << "La taille de la memoire globale est de :" << deviceProp.totalGlobalMem << "\n" << endl; // Donner la taille de la memoire constante cout << "La taille de la memoire constante est de :" << deviceProp.totalConstMem << "\n" << endl; // Donner la taille de la memoire partagee par bloc cout << "La taille de la memoire partagee est de :" << deviceProp.sharedMemPerBlock << "\n" << endl; // Donner le nombre de thread max dans chacune des directions cout << "La nombre de threads max par direction est :" << deviceProp.maxThreadsDim[0] << " " << deviceProp.maxThreadsDim[1] << " " << deviceProp.maxThreadsDim[2] << "\n" << endl; cout << "Nombre max thread par bloc " << deviceProp.maxThreadsPerBlock << "\n" << endl; // Donner le taille maximum de la grille pour chaque direction cout << "Nombre de block par grille :" << deviceProp.maxGridSize[0] << " " << deviceProp.maxGridSize[1] << " " << deviceProp.maxGridSize[2] << "\n" << endl; // Donner la taille du warp cout << "La taille du warp est de :" << deviceProp.warpSize << "\n" << endl; } system("pause"); return 0; }
3087810ff41f7faf77ad7273142588f1f8455001.hip
// !!! This is a file automatically generated by hipify!!! /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file quantized_fully_connected.cu * \brief * \author Ziheng Jiang, Jun Wu */ #include "./quantization_utils.h" #include "../mxnet_op.h" #include "../nn/fully_connected-inl.h" namespace mxnet { namespace op { #if TORCH_HIP_VERSION >= 8000 // value + bias_value * (range1 / limit_range1) * (limit_range2 / range2) struct QuantizedBiasAddKernel { MSHADOW_XINLINE static void Map(int i, size_t k, int32_t* out, const int8_t* bias, const float* min_out, const float* max_out, const float* min_bias, const float* max_bias) { typedef int32_t T1; typedef int8_t T2; using mshadow::red::limits::MaxValue; using mshadow::red::limits::MinValue; float float_for_one_out_quant = MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<T1>()); float float_for_one_bias_quant = MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<T2>()); out[i] = (out[i] * float_for_one_out_quant + bias[i % k] * float_for_one_bias_quant) / float_for_one_out_quant; } }; #endif // TORCH_HIP_VERSION >= 8000 template <typename SrcType, typename DstType, typename CmpType> void QuantizedFullyConnectedForwardGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { #if TORCH_HIP_VERSION >= 8000 const FullyConnectedParam& param = nnvm::get<FullyConnectedParam>(attrs.parsed); using namespace mshadow; using namespace mxnet_op; size_t num_inputs = param.no_bias ? 2 : 3; CHECK_EQ(inputs.size(), num_inputs * 3); CHECK_EQ(outputs.size(), 3U); Stream<gpu>* s = ctx.get_stream<gpu>(); CHECK_EQ(s->blas_handle_ownership_, Stream<gpu>::OwnHandle); const TBlob& data = inputs[0]; const TBlob& weight = inputs[1]; const TBlob& out = outputs[0]; mxnet::TShape dshape = data.shape_; mxnet::TShape wshape = weight.shape_; mxnet::TShape oshape = out.shape_; // (m, n) * (k, n).T = (m, k) // A * B.T = C if (dshape.ndim() != 2) { CHECK(param.flatten) << "Currently, QuantizedFullyConnected Op only supports flatten=true " << "when ishape.ndim()!=2 for GPU."; } // row_C = col_C(T) = cublas(col_B * col_A(T)) = cublas(row_B(T), row_A) // row_C = col_C(T) = cublas(col_B(T) * col_A(T)) = cublas(row_B, row_A) const int m = dshape[0], n = dshape.ProdShape(1, dshape.ndim()), k = wshape[0]; CmpType alpha = 1.0f; CmpType beta = 0.0f; const hipDataType src_type = mshadow::DataType<SrcType>::kCudaFlag; const hipDataType dst_type = mshadow::DataType<DstType>::kCudaFlag; const hipDataType cmp_type = mshadow::DataType<CmpType>::kCudaFlag; CUBLAS_CALL(hipblasGemmEx(s->blas_handle_, HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, weight.dptr_, src_type, n, data.dptr_, src_type, n, &beta, out.dptr_, dst_type, k, cmp_type, CUBLAS_GEMM_DFALT)); Kernel<QuantizationRangeForS8S8MultiplicationStruct, gpu>::Launch( s, 1, outputs[1].dptr<float>(), outputs[2].dptr<float>(), inputs[num_inputs].dptr<float>(), inputs[num_inputs + 1].dptr<float>(), inputs[num_inputs + 2].dptr<float>(), inputs[num_inputs + 3].dptr<float>()); if (!param.no_bias) { const TBlob& bias = inputs[2]; Kernel<QuantizedBiasAddKernel, gpu>::Launch(s, out.Size(), k, out.dptr<int32_t>(), bias.dptr<int8_t>(), outputs[1].dptr<float>(), outputs[2].dptr<float>(), inputs[7].dptr<float>(), inputs[8].dptr<float>()); } #else LOG(FATAL) << "QuantizedFullyConnectedForwardGPU only supports CUDA >= 8.0"; #endif // TORCH_HIP_VERSION >= 8000 } NNVM_REGISTER_OP(_contrib_quantized_fully_connected) .set_attr<FCompute>("FCompute<gpu>", QuantizedFullyConnectedForwardGPU<int8_t, int32_t, int32_t>); } // namespace op } // namespace mxnet
3087810ff41f7faf77ad7273142588f1f8455001.cu
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file quantized_fully_connected.cu * \brief * \author Ziheng Jiang, Jun Wu */ #include "./quantization_utils.h" #include "../mxnet_op.h" #include "../nn/fully_connected-inl.h" namespace mxnet { namespace op { #if CUDA_VERSION >= 8000 // value + bias_value * (range1 / limit_range1) * (limit_range2 / range2) struct QuantizedBiasAddKernel { MSHADOW_XINLINE static void Map(int i, size_t k, int32_t* out, const int8_t* bias, const float* min_out, const float* max_out, const float* min_bias, const float* max_bias) { typedef int32_t T1; typedef int8_t T2; using mshadow::red::limits::MaxValue; using mshadow::red::limits::MinValue; float float_for_one_out_quant = MaxAbs(*min_out, *max_out) / static_cast<double>(MaxValue<T1>()); float float_for_one_bias_quant = MaxAbs(*min_bias, *max_bias) / static_cast<double>(MaxValue<T2>()); out[i] = (out[i] * float_for_one_out_quant + bias[i % k] * float_for_one_bias_quant) / float_for_one_out_quant; } }; #endif // CUDA_VERSION >= 8000 template <typename SrcType, typename DstType, typename CmpType> void QuantizedFullyConnectedForwardGPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { #if CUDA_VERSION >= 8000 const FullyConnectedParam& param = nnvm::get<FullyConnectedParam>(attrs.parsed); using namespace mshadow; using namespace mxnet_op; size_t num_inputs = param.no_bias ? 2 : 3; CHECK_EQ(inputs.size(), num_inputs * 3); CHECK_EQ(outputs.size(), 3U); Stream<gpu>* s = ctx.get_stream<gpu>(); CHECK_EQ(s->blas_handle_ownership_, Stream<gpu>::OwnHandle); const TBlob& data = inputs[0]; const TBlob& weight = inputs[1]; const TBlob& out = outputs[0]; mxnet::TShape dshape = data.shape_; mxnet::TShape wshape = weight.shape_; mxnet::TShape oshape = out.shape_; // (m, n) * (k, n).T = (m, k) // A * B.T = C if (dshape.ndim() != 2) { CHECK(param.flatten) << "Currently, QuantizedFullyConnected Op only supports flatten=true " << "when ishape.ndim()!=2 for GPU."; } // row_C = col_C(T) = cublas(col_B * col_A(T)) = cublas(row_B(T), row_A) // row_C = col_C(T) = cublas(col_B(T) * col_A(T)) = cublas(row_B, row_A) const int m = dshape[0], n = dshape.ProdShape(1, dshape.ndim()), k = wshape[0]; CmpType alpha = 1.0f; CmpType beta = 0.0f; const cudaDataType src_type = mshadow::DataType<SrcType>::kCudaFlag; const cudaDataType dst_type = mshadow::DataType<DstType>::kCudaFlag; const cudaDataType cmp_type = mshadow::DataType<CmpType>::kCudaFlag; CUBLAS_CALL(cublasGemmEx(s->blas_handle_, CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, weight.dptr_, src_type, n, data.dptr_, src_type, n, &beta, out.dptr_, dst_type, k, cmp_type, CUBLAS_GEMM_DFALT)); Kernel<QuantizationRangeForS8S8MultiplicationStruct, gpu>::Launch( s, 1, outputs[1].dptr<float>(), outputs[2].dptr<float>(), inputs[num_inputs].dptr<float>(), inputs[num_inputs + 1].dptr<float>(), inputs[num_inputs + 2].dptr<float>(), inputs[num_inputs + 3].dptr<float>()); if (!param.no_bias) { const TBlob& bias = inputs[2]; Kernel<QuantizedBiasAddKernel, gpu>::Launch(s, out.Size(), k, out.dptr<int32_t>(), bias.dptr<int8_t>(), outputs[1].dptr<float>(), outputs[2].dptr<float>(), inputs[7].dptr<float>(), inputs[8].dptr<float>()); } #else LOG(FATAL) << "QuantizedFullyConnectedForwardGPU only supports CUDA >= 8.0"; #endif // CUDA_VERSION >= 8000 } NNVM_REGISTER_OP(_contrib_quantized_fully_connected) .set_attr<FCompute>("FCompute<gpu>", QuantizedFullyConnectedForwardGPU<int8_t, int32_t, int32_t>); } // namespace op } // namespace mxnet
256c6bcd7952541a73461ebfbbbdc719a1f1fa5e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zgetrf_batched_smallsq_noshfl.cu, normal z -> s, Mon Jun 25 18:24:15 2018 */ #include "magma_internal.h" #include "magma_templates.h" #include "sync.cuh" #include "shuffle.cuh" #include "batched_kernel_param.h" // This kernel uses registers for matrix storage, shared mem. for communication. // It also uses lazy swap. extern __shared__ float zdata[]; template<int N, int NPOW2> __global__ void sgetrf_batched_smallsq_noshfl_kernel( float** dA_array, int ldda, magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int batchid = blockIdx.x * blockDim.y + ty; if(batchid >= batchCount) return; float* dA = dA_array[batchid]; magma_int_t* ipiv = ipiv_array[batchid]; magma_int_t* info = &info_array[batchid]; float rA[N] = {MAGMA_S_ZERO}; float reg = MAGMA_S_ZERO; int max_id, rowid = tx; int linfo = 0; float rx_abs_max = MAGMA_D_ZERO; float *sx = (float*)(zdata); float* dsx = (float*)(sx + blockDim.y * NPOW2); int* sipiv = (int*)(dsx + blockDim.y * NPOW2); sx += ty * NPOW2; dsx += ty * NPOW2; sipiv += ty * NPOW2; // read if( tx < N ){ #pragma unroll for(int i = 0; i < N; i++){ rA[i] = dA[ i * ldda + tx ]; } } #pragma unroll for(int i = 0; i < N; i++){ // isamax and find pivot dsx[ rowid ] = fabs(MAGMA_S_REAL( rA[i] )) + fabs(MAGMA_S_IMAG( rA[i] )); magmablas_syncwarp(); rx_abs_max = dsx[i]; max_id = i; #pragma unroll for(int j = i+1; j < N; j++){ if( dsx[j] > rx_abs_max){ max_id = j; rx_abs_max = dsx[j]; } } linfo = ( rx_abs_max == MAGMA_D_ZERO ) ? i+1 : 0; if(rowid == max_id){ sipiv[i] = max_id; rowid = i; #pragma unroll for(int j = i; j < N; j++){ sx[j] = rA[j]; } } else if(rowid == i){ rowid = max_id; } magmablas_syncwarp(); reg = MAGMA_S_DIV(MAGMA_S_ONE, sx[i] ); // scal and ger if( rowid > i ){ rA[i] *= reg; #pragma unroll for(int j = i+1; j < N; j++){ rA[j] -= rA[i] * sx[j]; } } magmablas_syncwarp(); } if(tx == 0){ (*info) = (magma_int_t)( linfo ); } // write if(tx < N) { ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); // fortran indexing #pragma unroll for(int i = 0; i < N; i++){ dA[ i * ldda + rowid ] = rA[i]; } } } /***************************************************************************//** Purpose ------- sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A using partial pivoting with row interchanges. This routine can deal only with square matrices of size up to 32 The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 3 BLAS version of the algorithm. This is a batched version that factors batchCount M-by-N matrices in parallel. dA, ipiv, and info become arrays with one entry per matrix. Arguments --------- @param[in] n INTEGER The size of each matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a REAL array on the GPU, dimension (LDDA,N). On entry, each pointer is an M-by-N matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of each array A. LDDA >= max(1,M). @param[out] ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices. Each is an INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value or another error occured, such as memory allocation failed. - > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_getrf_batched *******************************************************************************/ extern "C" magma_int_t magma_sgetrf_batched_smallsq_noshfl( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t arginfo = 0; magma_int_t m = n; if( (m < 0) || ( m > 32 ) ){ arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( m == 0) return 0; const magma_int_t ntcol = magma_get_sgetrf_batched_ntcol(m, n); magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int); shmem += ntcol * magma_ceilpow2(m) * sizeof(float); shmem += ntcol * magma_ceilpow2(m) * sizeof(float); dim3 threads(magma_ceilpow2(m), ntcol, 1); const magma_int_t gridx = magma_ceildiv(batchCount, ntcol); dim3 grid(gridx, 1, 1); switch(m){ case 1:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 1, magma_ceilpow2( 1)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 2:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 2, magma_ceilpow2( 2)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 3:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 3, magma_ceilpow2( 3)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 4:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 4, magma_ceilpow2( 4)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 5:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 5, magma_ceilpow2( 5)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 6:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 6, magma_ceilpow2( 6)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 7:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 7, magma_ceilpow2( 7)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 8:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 8, magma_ceilpow2( 8)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 9:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel< 9, magma_ceilpow2( 9)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 10:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<10, magma_ceilpow2(10)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 11:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<11, magma_ceilpow2(11)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 12:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<12, magma_ceilpow2(12)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 13:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<13, magma_ceilpow2(13)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 14:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<14, magma_ceilpow2(14)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 15:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<15, magma_ceilpow2(15)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 16:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<16, magma_ceilpow2(16)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 17:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<17, magma_ceilpow2(17)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 18:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<18, magma_ceilpow2(18)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 19:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<19, magma_ceilpow2(19)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 20:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<20, magma_ceilpow2(20)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 21:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<21, magma_ceilpow2(21)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 22:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<22, magma_ceilpow2(22)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 23:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<23, magma_ceilpow2(23)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 24:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<24, magma_ceilpow2(24)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 25:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<25, magma_ceilpow2(25)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 26:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<26, magma_ceilpow2(26)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 27:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<27, magma_ceilpow2(27)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 28:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<28, magma_ceilpow2(28)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 29:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<29, magma_ceilpow2(29)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 30:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<30, magma_ceilpow2(30)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 31:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<31, magma_ceilpow2(31)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; case 32:hipLaunchKernelGGL(( sgetrf_batched_smallsq_noshfl_kernel<32, magma_ceilpow2(32)>), dim3(grid), dim3(threads), shmem, queue->cuda_stream(), dA_array, ldda, ipiv_array, info_array, batchCount); break; default: printf("error: size %lld is not supported\n", (long long) m); } return arginfo; }
256c6bcd7952541a73461ebfbbbdc719a1f1fa5e.cu
/* -- MAGMA (version 2.4.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date June 2018 @author Azzam Haidar @author Ahmad Abdelfattah @generated from magmablas/zgetrf_batched_smallsq_noshfl.cu, normal z -> s, Mon Jun 25 18:24:15 2018 */ #include "magma_internal.h" #include "magma_templates.h" #include "sync.cuh" #include "shuffle.cuh" #include "batched_kernel_param.h" // This kernel uses registers for matrix storage, shared mem. for communication. // It also uses lazy swap. extern __shared__ float zdata[]; template<int N, int NPOW2> __global__ void sgetrf_batched_smallsq_noshfl_kernel( float** dA_array, int ldda, magma_int_t** ipiv_array, magma_int_t *info_array, int batchCount) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int batchid = blockIdx.x * blockDim.y + ty; if(batchid >= batchCount) return; float* dA = dA_array[batchid]; magma_int_t* ipiv = ipiv_array[batchid]; magma_int_t* info = &info_array[batchid]; float rA[N] = {MAGMA_S_ZERO}; float reg = MAGMA_S_ZERO; int max_id, rowid = tx; int linfo = 0; float rx_abs_max = MAGMA_D_ZERO; float *sx = (float*)(zdata); float* dsx = (float*)(sx + blockDim.y * NPOW2); int* sipiv = (int*)(dsx + blockDim.y * NPOW2); sx += ty * NPOW2; dsx += ty * NPOW2; sipiv += ty * NPOW2; // read if( tx < N ){ #pragma unroll for(int i = 0; i < N; i++){ rA[i] = dA[ i * ldda + tx ]; } } #pragma unroll for(int i = 0; i < N; i++){ // isamax and find pivot dsx[ rowid ] = fabs(MAGMA_S_REAL( rA[i] )) + fabs(MAGMA_S_IMAG( rA[i] )); magmablas_syncwarp(); rx_abs_max = dsx[i]; max_id = i; #pragma unroll for(int j = i+1; j < N; j++){ if( dsx[j] > rx_abs_max){ max_id = j; rx_abs_max = dsx[j]; } } linfo = ( rx_abs_max == MAGMA_D_ZERO ) ? i+1 : 0; if(rowid == max_id){ sipiv[i] = max_id; rowid = i; #pragma unroll for(int j = i; j < N; j++){ sx[j] = rA[j]; } } else if(rowid == i){ rowid = max_id; } magmablas_syncwarp(); reg = MAGMA_S_DIV(MAGMA_S_ONE, sx[i] ); // scal and ger if( rowid > i ){ rA[i] *= reg; #pragma unroll for(int j = i+1; j < N; j++){ rA[j] -= rA[i] * sx[j]; } } magmablas_syncwarp(); } if(tx == 0){ (*info) = (magma_int_t)( linfo ); } // write if(tx < N) { ipiv[ tx ] = (magma_int_t)(sipiv[tx] + 1); // fortran indexing #pragma unroll for(int i = 0; i < N; i++){ dA[ i * ldda + rowid ] = rA[i]; } } } /***************************************************************************//** Purpose ------- sgetrf_batched_smallsq_noshfl computes the LU factorization of a square N-by-N matrix A using partial pivoting with row interchanges. This routine can deal only with square matrices of size up to 32 The factorization has the form A = P * L * U where P is a permutation matrix, L is lower triangular with unit diagonal elements (lower trapezoidal if m > n), and U is upper triangular (upper trapezoidal if m < n). This is the right-looking Level 3 BLAS version of the algorithm. This is a batched version that factors batchCount M-by-N matrices in parallel. dA, ipiv, and info become arrays with one entry per matrix. Arguments --------- @param[in] n INTEGER The size of each matrix A. N >= 0. @param[in,out] dA_array Array of pointers, dimension (batchCount). Each is a REAL array on the GPU, dimension (LDDA,N). On entry, each pointer is an M-by-N matrix to be factored. On exit, the factors L and U from the factorization A = P*L*U; the unit diagonal elements of L are not stored. @param[in] ldda INTEGER The leading dimension of each array A. LDDA >= max(1,M). @param[out] ipiv_array Array of pointers, dimension (batchCount), for corresponding matrices. Each is an INTEGER array, dimension (min(M,N)) The pivot indices; for 1 <= i <= min(M,N), row i of the matrix was interchanged with row IPIV(i). @param[out] info_array Array of INTEGERs, dimension (batchCount), for corresponding matrices. - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value or another error occured, such as memory allocation failed. - > 0: if INFO = i, U(i,i) is exactly zero. The factorization has been completed, but the factor U is exactly singular, and division by zero will occur if it is used to solve a system of equations. @param[in] batchCount INTEGER The number of matrices to operate on. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_getrf_batched *******************************************************************************/ extern "C" magma_int_t magma_sgetrf_batched_smallsq_noshfl( magma_int_t n, float** dA_array, magma_int_t ldda, magma_int_t** ipiv_array, magma_int_t* info_array, magma_int_t batchCount, magma_queue_t queue ) { magma_int_t arginfo = 0; magma_int_t m = n; if( (m < 0) || ( m > 32 ) ){ arginfo = -1; } if (arginfo != 0) { magma_xerbla( __func__, -(arginfo) ); return arginfo; } if( m == 0) return 0; const magma_int_t ntcol = magma_get_sgetrf_batched_ntcol(m, n); magma_int_t shmem = ntcol * magma_ceilpow2(m) * sizeof(int); shmem += ntcol * magma_ceilpow2(m) * sizeof(float); shmem += ntcol * magma_ceilpow2(m) * sizeof(float); dim3 threads(magma_ceilpow2(m), ntcol, 1); const magma_int_t gridx = magma_ceildiv(batchCount, ntcol); dim3 grid(gridx, 1, 1); switch(m){ case 1: sgetrf_batched_smallsq_noshfl_kernel< 1, magma_ceilpow2( 1)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 2: sgetrf_batched_smallsq_noshfl_kernel< 2, magma_ceilpow2( 2)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 3: sgetrf_batched_smallsq_noshfl_kernel< 3, magma_ceilpow2( 3)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 4: sgetrf_batched_smallsq_noshfl_kernel< 4, magma_ceilpow2( 4)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 5: sgetrf_batched_smallsq_noshfl_kernel< 5, magma_ceilpow2( 5)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 6: sgetrf_batched_smallsq_noshfl_kernel< 6, magma_ceilpow2( 6)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 7: sgetrf_batched_smallsq_noshfl_kernel< 7, magma_ceilpow2( 7)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 8: sgetrf_batched_smallsq_noshfl_kernel< 8, magma_ceilpow2( 8)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 9: sgetrf_batched_smallsq_noshfl_kernel< 9, magma_ceilpow2( 9)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 10: sgetrf_batched_smallsq_noshfl_kernel<10, magma_ceilpow2(10)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 11: sgetrf_batched_smallsq_noshfl_kernel<11, magma_ceilpow2(11)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 12: sgetrf_batched_smallsq_noshfl_kernel<12, magma_ceilpow2(12)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 13: sgetrf_batched_smallsq_noshfl_kernel<13, magma_ceilpow2(13)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 14: sgetrf_batched_smallsq_noshfl_kernel<14, magma_ceilpow2(14)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 15: sgetrf_batched_smallsq_noshfl_kernel<15, magma_ceilpow2(15)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 16: sgetrf_batched_smallsq_noshfl_kernel<16, magma_ceilpow2(16)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 17: sgetrf_batched_smallsq_noshfl_kernel<17, magma_ceilpow2(17)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 18: sgetrf_batched_smallsq_noshfl_kernel<18, magma_ceilpow2(18)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 19: sgetrf_batched_smallsq_noshfl_kernel<19, magma_ceilpow2(19)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 20: sgetrf_batched_smallsq_noshfl_kernel<20, magma_ceilpow2(20)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 21: sgetrf_batched_smallsq_noshfl_kernel<21, magma_ceilpow2(21)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 22: sgetrf_batched_smallsq_noshfl_kernel<22, magma_ceilpow2(22)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 23: sgetrf_batched_smallsq_noshfl_kernel<23, magma_ceilpow2(23)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 24: sgetrf_batched_smallsq_noshfl_kernel<24, magma_ceilpow2(24)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 25: sgetrf_batched_smallsq_noshfl_kernel<25, magma_ceilpow2(25)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 26: sgetrf_batched_smallsq_noshfl_kernel<26, magma_ceilpow2(26)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 27: sgetrf_batched_smallsq_noshfl_kernel<27, magma_ceilpow2(27)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 28: sgetrf_batched_smallsq_noshfl_kernel<28, magma_ceilpow2(28)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 29: sgetrf_batched_smallsq_noshfl_kernel<29, magma_ceilpow2(29)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 30: sgetrf_batched_smallsq_noshfl_kernel<30, magma_ceilpow2(30)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 31: sgetrf_batched_smallsq_noshfl_kernel<31, magma_ceilpow2(31)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; case 32: sgetrf_batched_smallsq_noshfl_kernel<32, magma_ceilpow2(32)><<<grid, threads, shmem, queue->cuda_stream()>>>(dA_array, ldda, ipiv_array, info_array, batchCount); break; default: printf("error: size %lld is not supported\n", (long long) m); } return arginfo; }
89e4c1ecc674779b494cd959ea604fa080304da0.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bestFilter.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *Params = NULL; hipMalloc(&Params, XSIZE*YSIZE); const float *data = NULL; hipMalloc(&data, XSIZE*YSIZE); const float *mu = NULL; hipMalloc(&mu, XSIZE*YSIZE); float *err = NULL; hipMalloc(&err, XSIZE*YSIZE); float *eloss = NULL; hipMalloc(&eloss, XSIZE*YSIZE); int *ftype = NULL; hipMalloc(&ftype, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bestFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,data,mu,err,eloss,ftype); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bestFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,data,mu,err,eloss,ftype); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bestFilter), dim3(gridBlock),dim3(threadBlock), 0, 0, Params,data,mu,err,eloss,ftype); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
89e4c1ecc674779b494cd959ea604fa080304da0.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bestFilter.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *Params = NULL; cudaMalloc(&Params, XSIZE*YSIZE); const float *data = NULL; cudaMalloc(&data, XSIZE*YSIZE); const float *mu = NULL; cudaMalloc(&mu, XSIZE*YSIZE); float *err = NULL; cudaMalloc(&err, XSIZE*YSIZE); float *eloss = NULL; cudaMalloc(&eloss, XSIZE*YSIZE); int *ftype = NULL; cudaMalloc(&ftype, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bestFilter<<<gridBlock,threadBlock>>>(Params,data,mu,err,eloss,ftype); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bestFilter<<<gridBlock,threadBlock>>>(Params,data,mu,err,eloss,ftype); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bestFilter<<<gridBlock,threadBlock>>>(Params,data,mu,err,eloss,ftype); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
66e6cf23ffe3d23040b9982b211c8d00ffab8da3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <caffe2/core/context_gpu.h> #include "caffe2/operator/affine_scale_op.h" namespace caffe2 { namespace { __global__ void AffineScaleKernel(const int N, const int C, const float* X, const float* M, const float* S, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] * S[i / C] + M[i / C]; } } __global__ void AffineScaleInverseKernel(const int N, const int C, const float* X, const float* M, const float* S, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = (X[i] - M[i / C]) / (S[i / C] + 1e-8); } } } // namespace template <> bool AffineScaleOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& M = Input(1); auto& S = Input(2); auto* Y = Output(0); DCHECK_EQ(M.size(), X.dim(0)); DCHECK_EQ(S.size(), X.dim(0)); Y->ResizeLike(X); if (X.size() > 0) { auto size = X.size() / X.dim(0); if (inverse_) { hipLaunchKernelGGL(( AffineScaleInverseKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(), Y->mutable_data<float>()); } else { hipLaunchKernelGGL(( AffineScaleKernel), dim3(CAFFE_GET_BLOCKS(X.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(), Y->mutable_data<float>()); } } return true; } namespace { __global__ void AffineScaleGradientKernel(const int N, const int C, const float* dY, const float* S, float* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] * S[i / C]; } } __global__ void AffineScaleInverseGradientKernel(const int N, const int C, const float* dY, const float* S, float* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] / (S[i / C] + 1e-8); } } } // namespace template <> bool AffineScaleGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& S = Input(1); auto& dY = Input(2); auto* dX = Output(0); DCHECK_EQ(S.size(), X.dim(0)); DCHECK_EQ(dY.size(), X.size()); dX->ResizeLike(X); if (X.size() > 0) { auto size = X.size() / X.dim(0); if (inverse_) { hipLaunchKernelGGL(( AffineScaleInverseGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.size(), size, dY.data<float>(), S.data<float>(), dX->mutable_data<float>()); } else { hipLaunchKernelGGL(( AffineScaleGradientKernel), dim3(CAFFE_GET_BLOCKS(dY.size())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), dY.size(), size, dY.data<float>(), S.data<float>(), dX->mutable_data<float>()); } } return true; } REGISTER_CUDA_OPERATOR(AffineScale, AffineScaleOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(AffineScaleGradient, AffineScaleGradientOp<float, CUDAContext>); } // namespace caffe2
66e6cf23ffe3d23040b9982b211c8d00ffab8da3.cu
#include <caffe2/core/context_gpu.h> #include "caffe2/operator/affine_scale_op.h" namespace caffe2 { namespace { __global__ void AffineScaleKernel(const int N, const int C, const float* X, const float* M, const float* S, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = X[i] * S[i / C] + M[i / C]; } } __global__ void AffineScaleInverseKernel(const int N, const int C, const float* X, const float* M, const float* S, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { Y[i] = (X[i] - M[i / C]) / (S[i / C] + 1e-8); } } } // namespace template <> bool AffineScaleOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& M = Input(1); auto& S = Input(2); auto* Y = Output(0); DCHECK_EQ(M.size(), X.dim(0)); DCHECK_EQ(S.size(), X.dim(0)); Y->ResizeLike(X); if (X.size() > 0) { auto size = X.size() / X.dim(0); if (inverse_) { AffineScaleInverseKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(), Y->mutable_data<float>()); } else { AffineScaleKernel<<<CAFFE_GET_BLOCKS(X.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.size(), size, X.data<float>(), M.data<float>(), S.data<float>(), Y->mutable_data<float>()); } } return true; } namespace { __global__ void AffineScaleGradientKernel(const int N, const int C, const float* dY, const float* S, float* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] * S[i / C]; } } __global__ void AffineScaleInverseGradientKernel(const int N, const int C, const float* dY, const float* S, float* dX) { CUDA_1D_KERNEL_LOOP(i, N) { dX[i] = dY[i] / (S[i / C] + 1e-8); } } } // namespace template <> bool AffineScaleGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& S = Input(1); auto& dY = Input(2); auto* dX = Output(0); DCHECK_EQ(S.size(), X.dim(0)); DCHECK_EQ(dY.size(), X.size()); dX->ResizeLike(X); if (X.size() > 0) { auto size = X.size() / X.dim(0); if (inverse_) { AffineScaleInverseGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.size(), size, dY.data<float>(), S.data<float>(), dX->mutable_data<float>()); } else { AffineScaleGradientKernel<<<CAFFE_GET_BLOCKS(dY.size()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( dY.size(), size, dY.data<float>(), S.data<float>(), dX->mutable_data<float>()); } } return true; } REGISTER_CUDA_OPERATOR(AffineScale, AffineScaleOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(AffineScaleGradient, AffineScaleGradientOp<float, CUDAContext>); } // namespace caffe2
6226acf48e180f8dcee81308f0d7ba2a7438e8d2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2012-2013 The Ohio State University. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> #include <string.h> #include <unistd.h> #include <time.h> #include <algorithm> #include <vector> #include "scanImpl.cu" #include "../include/common.h" #include "../include/gpuCudaLib.h" using namespace std; //#define TEST 1 #define CHECK_POINTER(p) do { \ if(p == NULL){ \ perror("Failed to allocate host memory"); \ exit(-1); \ }} while(0) const int know_stop_size=10000010; vector<int > know_stop_num[know_stop_size]; type nlz(utype x){ type n; if (x == 0) return(32); n = 1; if ((x >> 16) == 0) {n = n +16; x = x <<16;} if ((x >> 24) == 0) {n = n + 8; x = x << 8;} if ((x >> 28) == 0) {n = n + 4; x = x << 4;} if ((x >> 30) == 0) {n = n + 2; x = x << 2;} n = n - (x >> 31); } __global__ void static equal(int * a, int n, unsigned int constC){ int offset = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=offset; i<n; i+=stride){ a[i] = constC; } } __global__ void static genScanFilter_int_lth_bit(int * col,int n, int *constC,int group_size, int * lt, int * eq){ int offset = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=offset; i < n; i+=stride){ lt[i] = lt[i] | (eq[i] & ~constC[i/group_size] & col[i]); eq[i] = eq[i] & ~(col[i] ^ constC[i/group_size]); //printf(" %d %u %u %u\n",i,lt[i],eq[i],col[i]); } } inline hipError_t checkCuda(hipError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result)); assert(result == hipSuccess); } #endif return result; } inline int bit_constC(int where,int j){ int constC = (((1U << (31 - j )) & where)>>(31 - j)); if(constC != 0) constC = (1L << 32) - 1; return constC; } void profilebitweavscan(int *h_a, int *h_b, int *d, int *lt, int *eq, int *know_stop_len_cpu, int *know_stop_constC_cpu, int early_size, int group_size, int n, int *gpu_constC, int *constC, int queryC, char *desc, unsigned int loopTotal) { dim3 block(256); dim3 grid(2048); float time,stime; // events for timing hipEvent_t startEvent, stopEvent; checkCuda( hipEventCreate(&startEvent) ); checkCuda( hipEventCreate(&stopEvent) ); stime=0; int bytes=n * sizeof(int); for(int loop = 1; loop <= loopTotal; loop++){ checkCuda( hipEventRecord(startEvent, 0) ); #ifndef TEST queryC = rand()% (1U<<31); #endif for(int i = 0 ; i < n/group_size; ++i){ assert(queryC > 0); vector<int>::iterator ii = lower_bound(know_stop_num[i]. begin(),know_stop_num[i].end(),queryC); know_stop_constC_cpu[i] = *ii; int last_constC = *(--ii); know_stop_len_cpu[i] =nlz(last_constC ^ know_stop_constC_cpu[i]) + 1; // printf(" %d %d ",last_constC,know_stop_constC_cpu[i], know_stop_len_cpu[i]); } unsigned int c = 0; for(int i = 0;i < 32;i++) c += (1u << i); hipLaunchKernelGGL(( equal), dim3(grid),dim3(block), 0, 0, lt, n/32, 0) ; hipLaunchKernelGGL(( equal), dim3(grid),dim3(block), 0, 0, eq, n/32, c) ; checkCuda(hipDeviceSynchronize()); double len_max=0.0, len_sum = 0.0; for(int k = 0; k < n ; k += early_size ){ //printf("%d \n", know_stop_len_cpu[k / (early_size / 32)]); int len = 0; for(int i = 0; i < early_size / group_size; i++) len = max (len , know_stop_len_cpu[k / group_size + i]); checkCuda( hipMemcpy(d + k, h_a + k, early_size / 32 *4 * len, hipMemcpyHostToDevice) ); len_max=max(len_max,(double)len); len_sum=len_sum + len; for(int j = 0; j < len; ++j){ for(int i = 0; i < early_size / group_size; i ++ ) { constC[i] = bit_constC(know_stop_constC_cpu[k / group_size + i], j); //printf("constC=%d\n",constC[i]); } checkCuda( hipMemcpy(gpu_constC, constC, early_size / group_size *4 , hipMemcpyHostToDevice) ); int place = k + early_size / 32 * j; hipLaunchKernelGGL(( genScanFilter_int_lth_bit), dim3(grid),dim3(block), 0, 0, d + place, early_size / 32, gpu_constC, group_size/32, lt + k/32, eq + k/32); checkCuda(hipDeviceSynchronize()); } } printf("%.2f %.2f\n",len_max, len_sum*1.0/(n/early_size)); checkCuda( hipMemcpy(h_b, lt, n / 32 * 4, hipMemcpyDeviceToHost) ); checkCuda( hipMemcpy(h_b + n / 32 , eq, n / 32 * 4, hipMemcpyDeviceToHost) ); checkCuda(hipDeviceSynchronize()); checkCuda( hipEventRecord(stopEvent, 0) ); checkCuda( hipEventSynchronize(stopEvent) ); checkCuda( hipEventElapsedTime(&time, startEvent, stopEvent) ); stime += time; //printf("%f\n" ,bytes * 1e-6/(stime / loop)); //printf("%f\n",stime); } printf("%d %f\n" ,group_size,bytes * 1e-6/(stime / loopTotal)); checkCuda( hipEventDestroy(startEvent) ); checkCuda( hipEventDestroy(stopEvent) ); } int main(int argc, char ** argv) { #ifdef TEST freopen("scan.in","r",stdin); freopen("scan_smallstore.out","w",stdout); #endif dim3 block(256); dim3 grid(2048); int inputN; sscanf(argv[1],"%d",&inputN); unsigned int nElements = inputN; #ifdef TEST scanf("%d",&nElements); #endif const unsigned int bytes = nElements * sizeof(int); // host arrays int *h_aPageable, *h_bPageable,*h_bitPageable,*know_stop_len_cpu,*know_stop_constC_cpu; int *h_aPinned, *h_bPinned; // device array int *d_a,*lt,*eq; // allocate and initialize h_aPageable = (int*)malloc(bytes ); h_bPageable = (int*)malloc(bytes ); h_bitPageable =(int *)malloc(bytes ); know_stop_len_cpu = (int *)malloc(bytes ); know_stop_constC_cpu = (int *)malloc(bytes ); // host pageable checkCuda( hipHostMalloc((void**)&h_aPinned, bytes ) ); // host pinned checkCuda( hipHostMalloc((void**)&h_bPinned, bytes ) ); checkCuda( hipMalloc((void**)&d_a, bytes ) ); // device checkCuda( hipMalloc((void**)&lt, bytes ) ); // device return checkCuda( hipMalloc((void**)&eq, bytes ) ); int early_size = 1024*1024; int group_size = 1024*64; sscanf(argv[2],"%d",&early_size); sscanf(argv[3],"%d",&group_size); srand(time(0)); for (int i = 0; i < nElements; ++i) h_aPageable[i] = rand()%(1U<<31); #ifdef TEST for (int i = 0; i < nElements; ++i) scanf("%d",h_aPageable + i); #endif for (int i = 0;i < nElements/ group_size; i++){ know_stop_num[i].push_back(0); know_stop_num[i].push_back((1U<<31)-1); //0----2^31-1 for(int j=0;j < group_size; j++) know_stop_num[i].push_back(h_aPageable[i * group_size + j]); sort(know_stop_num[i].begin(),know_stop_num[i].end()); know_stop_num[i].erase(unique(know_stop_num[i].begin(), know_stop_num[i].end()), know_stop_num[i].end()); } for (int i = 0; i < nElements; i += early_size) for(int j = 31; j >= 0; --j) for(int k = 0; k < early_size; ++k) { h_bitPageable[i + (31 -j) * early_size / 32 + k / 32] += (((h_aPageable[i + k] &(1<<j))>>j)<<(31 - k % 32)); } // for(int i = 0;i < nElements; i++) // for(int j = 0 ;j < 32;j++)h_bitPageable[i] = rand()%(1<<31); memcpy(h_aPinned, h_aPageable, bytes ); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); memset(know_stop_len_cpu, 0, bytes); // output device info and transfer size hipDeviceProp_t prop; checkCuda( hipGetDeviceProperties(&prop, 0) ); int constC = rand()%(1U<<31); #ifdef TEST scanf("%d",&constC); #endif for(int i = 0 ; i < nElements/group_size; ++i){ assert(constC > 0); know_stop_constC_cpu[i] = *lower_bound(know_stop_num[i]. begin(),know_stop_num[i].end(),constC); int last_constC = *(--lower_bound(know_stop_num[i]. begin(),know_stop_num[i].end(),constC)); know_stop_len_cpu[i] =nlz(last_constC ^ know_stop_constC_cpu[i]) + 1; // printf(" %d %d ",last_constC,know_stop_constC_cpu[i], know_stop_len_cpu[i]); } // perform scan eq // profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20); //profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20); int *gpu_constC,*cpu_constC; checkCuda( hipMalloc((void**)&gpu_constC, bytes) ); cpu_constC=(int*)malloc(bytes); profilebitweavscan(h_bitPageable, h_bPageable, d_a, lt, eq, know_stop_len_cpu, know_stop_constC_cpu,early_size, group_size, nElements,gpu_constC,cpu_constC,constC,"Pageable",1); // printf("constC=%d\n",constC); // for(int i = 0; i < nElements; i++) printf("%u ",h_aPageable[i]);printf("\n"); // for(int i = 0; i < nElements; i++) printf("%u ",((h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n"); // for(int i = 0; i < nElements; i++) printf("%u ",((h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n"); // //for(int i = 0; i < nElements; i++) printf("%3u ",h_bitPageable[i]);printf("\n"); #ifdef TEST for(int i = 0; i < nElements; i++) { int x =(h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32); int y =(h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32); if(x ==0 && y == 0) printf("%d\n",1); else printf("%d\n", 0); // printf("%d|%d\n",x,y); } #endif // cleanup hipFree(lt); hipFree(eq); hipHostFree(h_aPinned); hipHostFree(h_bPinned); free(h_aPageable); }
6226acf48e180f8dcee81308f0d7ba2a7438e8d2.cu
/* Copyright (c) 2012-2013 The Ohio State University. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <stdlib.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> #include <string.h> #include <unistd.h> #include <time.h> #include <algorithm> #include <vector> #include "scanImpl.cu" #include "../include/common.h" #include "../include/gpuCudaLib.h" using namespace std; //#define TEST 1 #define CHECK_POINTER(p) do { \ if(p == NULL){ \ perror("Failed to allocate host memory"); \ exit(-1); \ }} while(0) const int know_stop_size=10000010; vector<int > know_stop_num[know_stop_size]; type nlz(utype x){ type n; if (x == 0) return(32); n = 1; if ((x >> 16) == 0) {n = n +16; x = x <<16;} if ((x >> 24) == 0) {n = n + 8; x = x << 8;} if ((x >> 28) == 0) {n = n + 4; x = x << 4;} if ((x >> 30) == 0) {n = n + 2; x = x << 2;} n = n - (x >> 31); } __global__ void static equal(int * a, int n, unsigned int constC){ int offset = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=offset; i<n; i+=stride){ a[i] = constC; } } __global__ void static genScanFilter_int_lth_bit(int * col,int n, int *constC,int group_size, int * lt, int * eq){ int offset = blockIdx.x*blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=offset; i < n; i+=stride){ lt[i] = lt[i] | (eq[i] & ~constC[i/group_size] & col[i]); eq[i] = eq[i] & ~(col[i] ^ constC[i/group_size]); //printf(" %d %u %u %u\n",i,lt[i],eq[i],col[i]); } } inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } inline int bit_constC(int where,int j){ int constC = (((1U << (31 - j )) & where)>>(31 - j)); if(constC != 0) constC = (1L << 32) - 1; return constC; } void profilebitweavscan(int *h_a, int *h_b, int *d, int *lt, int *eq, int *know_stop_len_cpu, int *know_stop_constC_cpu, int early_size, int group_size, int n, int *gpu_constC, int *constC, int queryC, char *desc, unsigned int loopTotal) { dim3 block(256); dim3 grid(2048); float time,stime; // events for timing cudaEvent_t startEvent, stopEvent; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); stime=0; int bytes=n * sizeof(int); for(int loop = 1; loop <= loopTotal; loop++){ checkCuda( cudaEventRecord(startEvent, 0) ); #ifndef TEST queryC = rand()% (1U<<31); #endif for(int i = 0 ; i < n/group_size; ++i){ assert(queryC > 0); vector<int>::iterator ii = lower_bound(know_stop_num[i]. begin(),know_stop_num[i].end(),queryC); know_stop_constC_cpu[i] = *ii; int last_constC = *(--ii); know_stop_len_cpu[i] =nlz(last_constC ^ know_stop_constC_cpu[i]) + 1; // printf(" %d %d ",last_constC,know_stop_constC_cpu[i], know_stop_len_cpu[i]); } unsigned int c = 0; for(int i = 0;i < 32;i++) c += (1u << i); equal<<<grid,block>>>(lt, n/32, 0) ; equal<<<grid,block>>>(eq, n/32, c) ; checkCuda(cudaThreadSynchronize()); double len_max=0.0, len_sum = 0.0; for(int k = 0; k < n ; k += early_size ){ //printf("%d \n", know_stop_len_cpu[k / (early_size / 32)]); int len = 0; for(int i = 0; i < early_size / group_size; i++) len = max (len , know_stop_len_cpu[k / group_size + i]); checkCuda( cudaMemcpy(d + k, h_a + k, early_size / 32 *4 * len, cudaMemcpyHostToDevice) ); len_max=max(len_max,(double)len); len_sum=len_sum + len; for(int j = 0; j < len; ++j){ for(int i = 0; i < early_size / group_size; i ++ ) { constC[i] = bit_constC(know_stop_constC_cpu[k / group_size + i], j); //printf("constC=%d\n",constC[i]); } checkCuda( cudaMemcpy(gpu_constC, constC, early_size / group_size *4 , cudaMemcpyHostToDevice) ); int place = k + early_size / 32 * j; genScanFilter_int_lth_bit<<<grid,block>>>(d + place, early_size / 32, gpu_constC, group_size/32, lt + k/32, eq + k/32); checkCuda(cudaThreadSynchronize()); } } printf("%.2f %.2f\n",len_max, len_sum*1.0/(n/early_size)); checkCuda( cudaMemcpy(h_b, lt, n / 32 * 4, cudaMemcpyDeviceToHost) ); checkCuda( cudaMemcpy(h_b + n / 32 , eq, n / 32 * 4, cudaMemcpyDeviceToHost) ); checkCuda(cudaThreadSynchronize()); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&time, startEvent, stopEvent) ); stime += time; //printf("%f\n" ,bytes * 1e-6/(stime / loop)); //printf("%f\n",stime); } printf("%d %f\n" ,group_size,bytes * 1e-6/(stime / loopTotal)); checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); } int main(int argc, char ** argv) { #ifdef TEST freopen("scan.in","r",stdin); freopen("scan_smallstore.out","w",stdout); #endif dim3 block(256); dim3 grid(2048); int inputN; sscanf(argv[1],"%d",&inputN); unsigned int nElements = inputN; #ifdef TEST scanf("%d",&nElements); #endif const unsigned int bytes = nElements * sizeof(int); // host arrays int *h_aPageable, *h_bPageable,*h_bitPageable,*know_stop_len_cpu,*know_stop_constC_cpu; int *h_aPinned, *h_bPinned; // device array int *d_a,*lt,*eq; // allocate and initialize h_aPageable = (int*)malloc(bytes ); h_bPageable = (int*)malloc(bytes ); h_bitPageable =(int *)malloc(bytes ); know_stop_len_cpu = (int *)malloc(bytes ); know_stop_constC_cpu = (int *)malloc(bytes ); // host pageable checkCuda( cudaMallocHost((void**)&h_aPinned, bytes ) ); // host pinned checkCuda( cudaMallocHost((void**)&h_bPinned, bytes ) ); checkCuda( cudaMalloc((void**)&d_a, bytes ) ); // device checkCuda( cudaMalloc((void**)&lt, bytes ) ); // device return checkCuda( cudaMalloc((void**)&eq, bytes ) ); int early_size = 1024*1024; int group_size = 1024*64; sscanf(argv[2],"%d",&early_size); sscanf(argv[3],"%d",&group_size); srand(time(0)); for (int i = 0; i < nElements; ++i) h_aPageable[i] = rand()%(1U<<31); #ifdef TEST for (int i = 0; i < nElements; ++i) scanf("%d",h_aPageable + i); #endif for (int i = 0;i < nElements/ group_size; i++){ know_stop_num[i].push_back(0); know_stop_num[i].push_back((1U<<31)-1); //0----2^31-1 for(int j=0;j < group_size; j++) know_stop_num[i].push_back(h_aPageable[i * group_size + j]); sort(know_stop_num[i].begin(),know_stop_num[i].end()); know_stop_num[i].erase(unique(know_stop_num[i].begin(), know_stop_num[i].end()), know_stop_num[i].end()); } for (int i = 0; i < nElements; i += early_size) for(int j = 31; j >= 0; --j) for(int k = 0; k < early_size; ++k) { h_bitPageable[i + (31 -j) * early_size / 32 + k / 32] += (((h_aPageable[i + k] &(1<<j))>>j)<<(31 - k % 32)); } // for(int i = 0;i < nElements; i++) // for(int j = 0 ;j < 32;j++)h_bitPageable[i] = rand()%(1<<31); memcpy(h_aPinned, h_aPageable, bytes ); memset(h_bPageable, 0, bytes); memset(h_bPinned, 0, bytes); memset(know_stop_len_cpu, 0, bytes); // output device info and transfer size cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, 0) ); int constC = rand()%(1U<<31); #ifdef TEST scanf("%d",&constC); #endif for(int i = 0 ; i < nElements/group_size; ++i){ assert(constC > 0); know_stop_constC_cpu[i] = *lower_bound(know_stop_num[i]. begin(),know_stop_num[i].end(),constC); int last_constC = *(--lower_bound(know_stop_num[i]. begin(),know_stop_num[i].end(),constC)); know_stop_len_cpu[i] =nlz(last_constC ^ know_stop_constC_cpu[i]) + 1; // printf(" %d %d ",last_constC,know_stop_constC_cpu[i], know_stop_len_cpu[i]); } // perform scan eq // profilescan(h_aPageable, h_bPageable, d_a, filter, nElements, constC,"Pageable",20); //profilescan(h_aPinned, h_bPinned, d_a, filter,nElements, constC,"Pinned",20); int *gpu_constC,*cpu_constC; checkCuda( cudaMalloc((void**)&gpu_constC, bytes) ); cpu_constC=(int*)malloc(bytes); profilebitweavscan(h_bitPageable, h_bPageable, d_a, lt, eq, know_stop_len_cpu, know_stop_constC_cpu,early_size, group_size, nElements,gpu_constC,cpu_constC,constC,"Pageable",1); // printf("constC=%d\n",constC); // for(int i = 0; i < nElements; i++) printf("%u ",h_aPageable[i]);printf("\n"); // for(int i = 0; i < nElements; i++) printf("%u ",((h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n"); // for(int i = 0; i < nElements; i++) printf("%u ",((h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32)));printf("\n"); // //for(int i = 0; i < nElements; i++) printf("%3u ",h_bitPageable[i]);printf("\n"); #ifdef TEST for(int i = 0; i < nElements; i++) { int x =(h_bPageable[i/32] & (1u << (31 - i % 32)))>> (31 - i % 32); int y =(h_bPageable[i/32 + nElements/32] & (1u << (31 - i % 32)))>> (31 - i % 32); if(x ==0 && y == 0) printf("%d\n",1); else printf("%d\n", 0); // printf("%d|%d\n",x,y); } #endif // cleanup cudaFree(lt); cudaFree(eq); cudaFreeHost(h_aPinned); cudaFreeHost(h_bPinned); free(h_aPageable); }
cc650bd8c83a4bb25bf1bd110adb44049e6bdc08.hip
// !!! This is a file automatically generated by hipify!!! #ifndef __HIPCC__ #define __HIPCC__ #endif #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/device_functions.h> #include <hip/hip_runtime_api.h> #include "stdio.h" #include <stdlib.h> #include <time.h> #include <chrono> #include <iostream> #include <algorithm> using namespace std; const int d_size = 1024; const int block_size = 16; int *d; int *default_d; int *true_result; int *device_d; dim3 ud_threads(block_size, block_size, 1); dim3 ud_grid(1, 1, 1); dim3 sd_threads(block_size, block_size, 1); dim3 sd_grid(d_size / block_size, 2, 1); dim3 dd_threads(block_size, block_size, 1); dim3 dd_grid(d_size / block_size, d_size / block_size, 1); void init() { d = new int[d_size * d_size]; default_d = new int[d_size * d_size]; true_result = new int[d_size * d_size]; srand(time(NULL)); for (int i = 0; i < d_size * d_size; i++) { d[i] = rand() % 100 + 1; } copy(d, d + d_size * d_size, default_d); } void print_first_elements() { for (int i = 0; i < 10; i++) { cout << d[i] << " "; } cout << endl; } void set_default_d() { copy(default_d, default_d + d_size * d_size, d); } void set_true_result() { copy(d, d + d_size * d_size, true_result); } bool equals(int *first, int *second) { if (sizeof(first) != sizeof(second)) return false; for (int i = 0; i < sizeof(first) / sizeof(int); i++) { if (first[i] != second[i]) return false; } return true; } void print_equals_matrix(int *first, int *second) { if (sizeof(first) != sizeof(second)) return; int count = 0; for (int i = 0; i < d_size; i++) { for (int j = 0; j < d_size; j++) { bool temprary = first[i * d_size + j] == second[i * d_size + j] ? true : false; cout << temprary << " "; count += temprary; } cout << endl; } cout << "COUNT " << count; } void print(int * array) { for (int i = 0; i < d_size; i++) { for (int j = 0; j < d_size; j++) { cout << array[i * d_size + j] << "\t"; } cout << endl; } } __global__ void ud_gpu_floyd(int *d, int k_bl) { __shared__ int leadBlock[block_size * block_size]; int i = block_size * k_bl + threadIdx.x; int j = block_size * k_bl + threadIdx.y; leadBlock[threadIdx.x * block_size + threadIdx.y] = d[i * d_size + j]; __syncthreads(); int local_i = threadIdx.x; int local_j = threadIdx.y; #pragma unroll for (int local_k = 0; local_k < block_size; local_k++) { leadBlock[local_i * block_size + local_j] = min(leadBlock[local_i * block_size + local_j], leadBlock[local_i * block_size + local_k] + leadBlock[local_k * block_size + local_j]); } d[i * d_size + j] = leadBlock[threadIdx.x * block_size + threadIdx.y]; } __global__ void sd_gpu_floyd(int *d, int k_bl) { if (blockIdx.x == k_bl) return; //ignore lead block __shared__ int leadBlock[block_size * block_size]; leadBlock[threadIdx.x * block_size + threadIdx.y] = d[(block_size * k_bl + threadIdx.x) * d_size + block_size * k_bl + threadIdx.y]; bool isColomn = blockIdx.y == 0 ? true : false; int d_i, d_j; d_i = block_size * (isColomn ? blockIdx.x : k_bl) + threadIdx.x; d_j = block_size * (isColomn ? k_bl : blockIdx.x) + threadIdx.y; __shared__ int currentBlock[block_size * block_size]; currentBlock[threadIdx.x * block_size + threadIdx.y] = d[d_i * d_size + d_j]; __syncthreads(); #pragma unroll for (int local_k = 0; local_k < block_size; local_k++) { currentBlock[threadIdx.x * block_size + threadIdx.y] = isColomn ? min(currentBlock[threadIdx.x * block_size + threadIdx.y], currentBlock[threadIdx.x * block_size + local_k] + leadBlock[local_k * block_size + threadIdx.y]) : min(currentBlock[threadIdx.x * block_size + threadIdx.y], leadBlock[threadIdx.x * block_size + local_k] + currentBlock[local_k * block_size + threadIdx.y]); } d[d_i * d_size + d_j] = currentBlock[threadIdx.x * block_size + threadIdx.y]; } __global__ void dd_gpu_floyd(int *d, int k_bl) { if (blockIdx.x == k_bl || blockIdx.y == k_bl) return; //ignore lead raw and column blocks __shared__ int leadColumnBlock[block_size * block_size]; int column_i_gl = block_size * k_bl + threadIdx.x; int column_j_gl = block_size * blockIdx.y + threadIdx.y; leadColumnBlock[threadIdx.x * block_size + threadIdx.y] = d[column_i_gl * d_size + column_j_gl]; __shared__ int leadRawBlock[block_size * block_size]; int raw_i_gl = block_size * blockIdx.x + threadIdx.x; int raw_j_gl = block_size * k_bl + threadIdx.y; leadRawBlock[threadIdx.x * block_size + threadIdx.y] = d[raw_i_gl * d_size + raw_j_gl]; int currentValue = d[(block_size * blockIdx.x + threadIdx.x) * d_size + block_size * blockIdx.y + threadIdx.y]; __syncthreads(); #pragma unroll for (int local_k = 0; local_k < block_size; local_k++) { currentValue = min(currentValue, leadColumnBlock[local_k * block_size + threadIdx.y] + leadRawBlock[threadIdx.x * block_size + local_k]); } d[(block_size * blockIdx.x + threadIdx.x) * d_size + block_size * blockIdx.y + threadIdx.y] = currentValue; } void cpu_floyd() { auto start = std::chrono::steady_clock::now(); for (int k = 0; k < d_size; k++) { for (int i = 0; i < d_size; i++) { for (int j = 0; j < d_size; j++) { d[i * d_size + j] = min(d[i * d_size + j], d[i * d_size + k] + d[k * d_size + j]); } } } auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); cout << "cpu_floyd " << duration.count() << endl; set_true_result(); } inline void gpu_floyd(int k_bl) { ud_gpu_floyd << <ud_grid, ud_threads >> >(device_d, k_bl); sd_gpu_floyd << <sd_grid, sd_threads >> >(device_d, k_bl); dd_gpu_floyd << <dd_grid, dd_threads >> >(device_d, k_bl); } void gpu_floyd_launch() { set_default_d(); auto start = std::chrono::steady_clock::now(); hipSetDevice(0); hipMalloc(&device_d, d_size * d_size * sizeof(int)); hipMemcpy(device_d, d, d_size * d_size * sizeof(int), hipMemcpyHostToDevice); int *currentBlockPtr; hipMalloc(&currentBlockPtr, d_size * d_size * sizeof(int)); for (int k = 0; k < d_size / block_size; k++) { gpu_floyd(k); } hipMemcpy(d, device_d, d_size * d_size * sizeof(int), hipMemcpyDeviceToHost); hipFree(device_d); hipDeviceReset(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); cout << "gpu_floyd " << duration.count() << endl; } int main() { init(); cpu_floyd(); gpu_floyd_launch(); cout << (equals(d, true_result) ? "SUCCES" : "NOT SUCCES"); getchar(); return 0; }
cc650bd8c83a4bb25bf1bd110adb44049e6bdc08.cu
#ifndef __CUDACC__ #define __CUDACC__ #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <device_functions.h> #include <cuda_runtime_api.h> #include "stdio.h" #include <stdlib.h> #include <time.h> #include <chrono> #include <iostream> #include <algorithm> using namespace std; const int d_size = 1024; const int block_size = 16; int *d; int *default_d; int *true_result; int *device_d; dim3 ud_threads(block_size, block_size, 1); dim3 ud_grid(1, 1, 1); dim3 sd_threads(block_size, block_size, 1); dim3 sd_grid(d_size / block_size, 2, 1); dim3 dd_threads(block_size, block_size, 1); dim3 dd_grid(d_size / block_size, d_size / block_size, 1); void init() { d = new int[d_size * d_size]; default_d = new int[d_size * d_size]; true_result = new int[d_size * d_size]; srand(time(NULL)); for (int i = 0; i < d_size * d_size; i++) { d[i] = rand() % 100 + 1; } copy(d, d + d_size * d_size, default_d); } void print_first_elements() { for (int i = 0; i < 10; i++) { cout << d[i] << " "; } cout << endl; } void set_default_d() { copy(default_d, default_d + d_size * d_size, d); } void set_true_result() { copy(d, d + d_size * d_size, true_result); } bool equals(int *first, int *second) { if (sizeof(first) != sizeof(second)) return false; for (int i = 0; i < sizeof(first) / sizeof(int); i++) { if (first[i] != second[i]) return false; } return true; } void print_equals_matrix(int *first, int *second) { if (sizeof(first) != sizeof(second)) return; int count = 0; for (int i = 0; i < d_size; i++) { for (int j = 0; j < d_size; j++) { bool temprary = first[i * d_size + j] == second[i * d_size + j] ? true : false; cout << temprary << " "; count += temprary; } cout << endl; } cout << "COUNT " << count; } void print(int * array) { for (int i = 0; i < d_size; i++) { for (int j = 0; j < d_size; j++) { cout << array[i * d_size + j] << "\t"; } cout << endl; } } __global__ void ud_gpu_floyd(int *d, int k_bl) { __shared__ int leadBlock[block_size * block_size]; int i = block_size * k_bl + threadIdx.x; int j = block_size * k_bl + threadIdx.y; leadBlock[threadIdx.x * block_size + threadIdx.y] = d[i * d_size + j]; __syncthreads(); int local_i = threadIdx.x; int local_j = threadIdx.y; #pragma unroll for (int local_k = 0; local_k < block_size; local_k++) { leadBlock[local_i * block_size + local_j] = min(leadBlock[local_i * block_size + local_j], leadBlock[local_i * block_size + local_k] + leadBlock[local_k * block_size + local_j]); } d[i * d_size + j] = leadBlock[threadIdx.x * block_size + threadIdx.y]; } __global__ void sd_gpu_floyd(int *d, int k_bl) { if (blockIdx.x == k_bl) return; //ignore lead block __shared__ int leadBlock[block_size * block_size]; leadBlock[threadIdx.x * block_size + threadIdx.y] = d[(block_size * k_bl + threadIdx.x) * d_size + block_size * k_bl + threadIdx.y]; bool isColomn = blockIdx.y == 0 ? true : false; int d_i, d_j; d_i = block_size * (isColomn ? blockIdx.x : k_bl) + threadIdx.x; d_j = block_size * (isColomn ? k_bl : blockIdx.x) + threadIdx.y; __shared__ int currentBlock[block_size * block_size]; currentBlock[threadIdx.x * block_size + threadIdx.y] = d[d_i * d_size + d_j]; __syncthreads(); #pragma unroll for (int local_k = 0; local_k < block_size; local_k++) { currentBlock[threadIdx.x * block_size + threadIdx.y] = isColomn ? min(currentBlock[threadIdx.x * block_size + threadIdx.y], currentBlock[threadIdx.x * block_size + local_k] + leadBlock[local_k * block_size + threadIdx.y]) : min(currentBlock[threadIdx.x * block_size + threadIdx.y], leadBlock[threadIdx.x * block_size + local_k] + currentBlock[local_k * block_size + threadIdx.y]); } d[d_i * d_size + d_j] = currentBlock[threadIdx.x * block_size + threadIdx.y]; } __global__ void dd_gpu_floyd(int *d, int k_bl) { if (blockIdx.x == k_bl || blockIdx.y == k_bl) return; //ignore lead raw and column blocks __shared__ int leadColumnBlock[block_size * block_size]; int column_i_gl = block_size * k_bl + threadIdx.x; int column_j_gl = block_size * blockIdx.y + threadIdx.y; leadColumnBlock[threadIdx.x * block_size + threadIdx.y] = d[column_i_gl * d_size + column_j_gl]; __shared__ int leadRawBlock[block_size * block_size]; int raw_i_gl = block_size * blockIdx.x + threadIdx.x; int raw_j_gl = block_size * k_bl + threadIdx.y; leadRawBlock[threadIdx.x * block_size + threadIdx.y] = d[raw_i_gl * d_size + raw_j_gl]; int currentValue = d[(block_size * blockIdx.x + threadIdx.x) * d_size + block_size * blockIdx.y + threadIdx.y]; __syncthreads(); #pragma unroll for (int local_k = 0; local_k < block_size; local_k++) { currentValue = min(currentValue, leadColumnBlock[local_k * block_size + threadIdx.y] + leadRawBlock[threadIdx.x * block_size + local_k]); } d[(block_size * blockIdx.x + threadIdx.x) * d_size + block_size * blockIdx.y + threadIdx.y] = currentValue; } void cpu_floyd() { auto start = std::chrono::steady_clock::now(); for (int k = 0; k < d_size; k++) { for (int i = 0; i < d_size; i++) { for (int j = 0; j < d_size; j++) { d[i * d_size + j] = min(d[i * d_size + j], d[i * d_size + k] + d[k * d_size + j]); } } } auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); cout << "cpu_floyd " << duration.count() << endl; set_true_result(); } inline void gpu_floyd(int k_bl) { ud_gpu_floyd << <ud_grid, ud_threads >> >(device_d, k_bl); sd_gpu_floyd << <sd_grid, sd_threads >> >(device_d, k_bl); dd_gpu_floyd << <dd_grid, dd_threads >> >(device_d, k_bl); } void gpu_floyd_launch() { set_default_d(); auto start = std::chrono::steady_clock::now(); cudaSetDevice(0); cudaMalloc(&device_d, d_size * d_size * sizeof(int)); cudaMemcpy(device_d, d, d_size * d_size * sizeof(int), cudaMemcpyHostToDevice); int *currentBlockPtr; cudaMalloc(&currentBlockPtr, d_size * d_size * sizeof(int)); for (int k = 0; k < d_size / block_size; k++) { gpu_floyd(k); } cudaMemcpy(d, device_d, d_size * d_size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(device_d); cudaDeviceReset(); auto duration = std::chrono::duration_cast<std::chrono::milliseconds> (std::chrono::steady_clock::now() - start); cout << "gpu_floyd " << duration.count() << endl; } int main() { init(); cpu_floyd(); gpu_floyd_launch(); cout << (equals(d, true_result) ? "SUCCES" : "NOT SUCCES"); getchar(); return 0; }
ac8cacacc58eb25f04e8614ee9f2313102a409cb.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright (c) 2021 by Contributors * \file graph/sampling/get_node_types_gpu.cu * \brief DGL sampler */ #include <dgl/array.h> #include <dgl/base_heterograph.h> #include <dgl/runtime/device_api.h> #include <hip/hip_runtime.h> #include <utility> #include "randomwalks_impl.h" namespace dgl { using namespace dgl::runtime; using namespace dgl::aten; namespace sampling { namespace impl { template<DLDeviceType XPU, typename IdxType> TypeArray GetNodeTypesFromMetapath( const HeteroGraphPtr hg, const TypeArray metapath) { uint64_t num_etypes = metapath->shape[0]; auto cpu_ctx = DGLContext{kDLCPU, 0}; auto metapath_ctx = metapath->ctx; // use default stream hipStream_t stream = 0; TypeArray h_result = TypeArray::Empty( {metapath->shape[0] + 1}, metapath->dtype, cpu_ctx); auto h_result_data = h_result.Ptr<IdxType>(); auto h_metapath = metapath.CopyTo(cpu_ctx, stream); DeviceAPI::Get(metapath_ctx)->StreamSync(metapath_ctx, stream); const IdxType *h_metapath_data = h_metapath.Ptr<IdxType>(); dgl_type_t curr_type = hg->GetEndpointTypes(h_metapath_data[0]).first; h_result_data[0] = curr_type; for (uint64_t i = 0; i < num_etypes; ++i) { auto src_dst_type = hg->GetEndpointTypes(h_metapath_data[i]); dgl_type_t srctype = src_dst_type.first; dgl_type_t dsttype = src_dst_type.second; if (srctype != curr_type) { LOG(FATAL) << "source of edge type #" << i << " does not match destination of edge type #" << i - 1; } curr_type = dsttype; h_result_data[i + 1] = dsttype; } auto result = h_result.CopyTo(metapath->ctx, stream); DeviceAPI::Get(metapath_ctx)->StreamSync(metapath_ctx, stream); return result; } template TypeArray GetNodeTypesFromMetapath<kDLGPU, int32_t>( const HeteroGraphPtr hg, const TypeArray metapath); template TypeArray GetNodeTypesFromMetapath<kDLGPU, int64_t>( const HeteroGraphPtr hg, const TypeArray metapath); }; // namespace impl }; // namespace sampling }; // namespace dgl
ac8cacacc58eb25f04e8614ee9f2313102a409cb.cu
/*! * Copyright (c) 2021 by Contributors * \file graph/sampling/get_node_types_gpu.cu * \brief DGL sampler */ #include <dgl/array.h> #include <dgl/base_heterograph.h> #include <dgl/runtime/device_api.h> #include <cuda_runtime.h> #include <utility> #include "randomwalks_impl.h" namespace dgl { using namespace dgl::runtime; using namespace dgl::aten; namespace sampling { namespace impl { template<DLDeviceType XPU, typename IdxType> TypeArray GetNodeTypesFromMetapath( const HeteroGraphPtr hg, const TypeArray metapath) { uint64_t num_etypes = metapath->shape[0]; auto cpu_ctx = DGLContext{kDLCPU, 0}; auto metapath_ctx = metapath->ctx; // use default stream cudaStream_t stream = 0; TypeArray h_result = TypeArray::Empty( {metapath->shape[0] + 1}, metapath->dtype, cpu_ctx); auto h_result_data = h_result.Ptr<IdxType>(); auto h_metapath = metapath.CopyTo(cpu_ctx, stream); DeviceAPI::Get(metapath_ctx)->StreamSync(metapath_ctx, stream); const IdxType *h_metapath_data = h_metapath.Ptr<IdxType>(); dgl_type_t curr_type = hg->GetEndpointTypes(h_metapath_data[0]).first; h_result_data[0] = curr_type; for (uint64_t i = 0; i < num_etypes; ++i) { auto src_dst_type = hg->GetEndpointTypes(h_metapath_data[i]); dgl_type_t srctype = src_dst_type.first; dgl_type_t dsttype = src_dst_type.second; if (srctype != curr_type) { LOG(FATAL) << "source of edge type #" << i << " does not match destination of edge type #" << i - 1; } curr_type = dsttype; h_result_data[i + 1] = dsttype; } auto result = h_result.CopyTo(metapath->ctx, stream); DeviceAPI::Get(metapath_ctx)->StreamSync(metapath_ctx, stream); return result; } template TypeArray GetNodeTypesFromMetapath<kDLGPU, int32_t>( const HeteroGraphPtr hg, const TypeArray metapath); template TypeArray GetNodeTypesFromMetapath<kDLGPU, int64_t>( const HeteroGraphPtr hg, const TypeArray metapath); }; // namespace impl }; // namespace sampling }; // namespace dgl
420d24b42df896040ce55324d797155033ce4d5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <stdio.h> #include <climits> #define BLOCK_SIZE 512 /* * Definitions: * d[i] = shortest path so far from source to i * U = unvisited verts * F = frontier verts * del = biggest d[i] (i from U) that we can add to frontier * del[i] = min(d[u] + del[u] for all u in U) (ith iteration) * del[u] = minimum weight of its outgoing edges * */ // find min edge out __global__ void findAllMins(int* adjMat, int* outVec, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; int ind = globalThreadId * gSize; int min = INT_MAX; if(globalThreadId < gSize) { for(int i = 0; i < gSize; i++) { if(adjMat[ind + i] < min && adjMat[ind + i] > 0) { min = adjMat[ind + i]; } } outVec[globalThreadId] = min; } } /* * forall i in parallel do * if (F[i]) * forall j predecessor of i do * if (U[j]) * c[i]= min(c[i],c[j]+w[j,i]); * */ __global__ void relax(int* U, int* F, int* d, size_t gSize, int* adjMat) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { if (F[globalThreadId]) { for (int i = 0; i < gSize; i++) { if(adjMat[globalThreadId*gSize + i] && i != globalThreadId && U[i]) { atomicMin(&d[i], d[globalThreadId] + adjMat[globalThreadId * gSize + i]); } } } } } __global__ void min(int* U, int* d, int* outDel, int* minOutEdges, size_t gSize, int useD) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; int pos1 = 2*globalThreadId; int pos2 = 2*globalThreadId + 1; int val1, val2; if(pos1 < gSize) { val1 = minOutEdges[pos1] + (useD ? d[pos1] : 0); if(pos2 < gSize) { val2 = minOutEdges[pos2] + (useD ? d[pos2] : 0); val1 = val1 <= 0 ? INT_MAX : val1; val2 = val2 <= 0 ? INT_MAX : val2; if(useD) { val1 = U[pos1] ? val1 : INT_MAX; val2 = U[pos2] ? val2 : INT_MAX; } if(val1 > val2) { outDel[globalThreadId] = val2; } else{ outDel[globalThreadId] = val1; } } else { val1 = val1 <= 0 ? INT_MAX : val1; if(useD) { val1 = U[pos1] ? val1 : INT_MAX; } outDel[globalThreadId] = val1; } } } /* * F[tid] = false * if(U[tid] and d[tid] < del) * U[tid] = false * F[tid] = true * */ __global__ void update(int* U, int* F, int* d, int* del, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { F[globalThreadId] = 0; if(U[globalThreadId] && d[globalThreadId] < del[0]) { U[globalThreadId] = 0; F[globalThreadId] = 1; } } } /* * U[tid] = true * F[tid] = false * d[tid] = -1 */ __global__ void init(int* U, int* F, int* d, int startNode, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { U[globalThreadId] = 1; F[globalThreadId] = 0; d[globalThreadId] = INT_MAX; } if(globalThreadId == 0) { d[globalThreadId] = 0; U[globalThreadId] = 0; F[globalThreadId] = 1; } } void doShortest(int* adjMat, int* shortestOut, size_t gSize, int startingNode, int* _d_adjMat, int* _d_outVec, int* _d_unvisited, int* _d_frontier, int* _d_estimates, int* _d_delta, int* _d_minOutEdge) { int del; int numBlocks = (gSize / BLOCK_SIZE) + 1; // O(n) but total algo is larger than O(n) so who cares hipLaunchKernelGGL(( findAllMins), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, _d_adjMat, _d_minOutEdge, gSize); /* * pseudo-code algo * * init<<<>>>(U, F, d) * while(del != -1) * relax<<<>>>(U, F, d) * del = min<<<>>>(U, d) * update<<<>>>(U, F, d, del) */ int curSize = gSize; int dFlag; int* _d_minTemp1; int* _d_minTemp2; int* tempDebug = (int*) malloc(sizeof(int) * gSize); hipMalloc((void**) &_d_minTemp1 , sizeof(int) * gSize); hipLaunchKernelGGL(( init), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, _d_unvisited, _d_frontier, _d_estimates, startingNode, gSize); do { dFlag = 1; curSize = gSize; hipMemcpy(_d_minTemp1, _d_minOutEdge, sizeof(int) * gSize, hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( relax), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, _d_unvisited, _d_frontier, _d_estimates, gSize, _d_adjMat); do { hipLaunchKernelGGL(( min), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, _d_unvisited, _d_estimates, _d_delta, _d_minTemp1, curSize, dFlag); _d_minTemp2 = _d_minTemp1; _d_minTemp1 = _d_delta; _d_delta = _d_minTemp2; curSize /= 2; dFlag = 0; } while (curSize > 0); _d_minTemp2 = _d_minTemp1; _d_minTemp1 = _d_delta; _d_delta = _d_minTemp2; hipLaunchKernelGGL(( update), dim3(numBlocks), dim3(BLOCK_SIZE), 0, 0, _d_unvisited, _d_frontier, _d_estimates, _d_delta, gSize); hipMemcpy(&del, _d_delta, sizeof(int), hipMemcpyDeviceToHost); } while(del != INT_MAX); hipMemcpy(shortestOut, _d_estimates, sizeof(int) * gSize, hipMemcpyDeviceToHost); #ifndef NO_PRINT for(int i = 0; i < gSize; i++){ printf("shotest path from %d to %d is %d long.\n", startingNode, i, shortestOut[i]); } printf("\n"); #endif hipFree(_d_minTemp1); }
420d24b42df896040ce55324d797155033ce4d5f.cu
#include <iostream> #include <stdio.h> #include <climits> #define BLOCK_SIZE 512 /* * Definitions: * d[i] = shortest path so far from source to i * U = unvisited verts * F = frontier verts * del = biggest d[i] (i from U) that we can add to frontier * del[i] = min(d[u] + del[u] for all u in U) (ith iteration) * del[u] = minimum weight of its outgoing edges * */ // find min edge out __global__ void findAllMins(int* adjMat, int* outVec, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; int ind = globalThreadId * gSize; int min = INT_MAX; if(globalThreadId < gSize) { for(int i = 0; i < gSize; i++) { if(adjMat[ind + i] < min && adjMat[ind + i] > 0) { min = adjMat[ind + i]; } } outVec[globalThreadId] = min; } } /* * forall i in parallel do * if (F[i]) * forall j predecessor of i do * if (U[j]) * c[i]= min(c[i],c[j]+w[j,i]); * */ __global__ void relax(int* U, int* F, int* d, size_t gSize, int* adjMat) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { if (F[globalThreadId]) { for (int i = 0; i < gSize; i++) { if(adjMat[globalThreadId*gSize + i] && i != globalThreadId && U[i]) { atomicMin(&d[i], d[globalThreadId] + adjMat[globalThreadId * gSize + i]); } } } } } __global__ void min(int* U, int* d, int* outDel, int* minOutEdges, size_t gSize, int useD) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; int pos1 = 2*globalThreadId; int pos2 = 2*globalThreadId + 1; int val1, val2; if(pos1 < gSize) { val1 = minOutEdges[pos1] + (useD ? d[pos1] : 0); if(pos2 < gSize) { val2 = minOutEdges[pos2] + (useD ? d[pos2] : 0); val1 = val1 <= 0 ? INT_MAX : val1; val2 = val2 <= 0 ? INT_MAX : val2; if(useD) { val1 = U[pos1] ? val1 : INT_MAX; val2 = U[pos2] ? val2 : INT_MAX; } if(val1 > val2) { outDel[globalThreadId] = val2; } else{ outDel[globalThreadId] = val1; } } else { val1 = val1 <= 0 ? INT_MAX : val1; if(useD) { val1 = U[pos1] ? val1 : INT_MAX; } outDel[globalThreadId] = val1; } } } /* * F[tid] = false * if(U[tid] and d[tid] < del) * U[tid] = false * F[tid] = true * */ __global__ void update(int* U, int* F, int* d, int* del, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { F[globalThreadId] = 0; if(U[globalThreadId] && d[globalThreadId] < del[0]) { U[globalThreadId] = 0; F[globalThreadId] = 1; } } } /* * U[tid] = true * F[tid] = false * d[tid] = -1 */ __global__ void init(int* U, int* F, int* d, int startNode, size_t gSize) { int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x; if (globalThreadId < gSize) { U[globalThreadId] = 1; F[globalThreadId] = 0; d[globalThreadId] = INT_MAX; } if(globalThreadId == 0) { d[globalThreadId] = 0; U[globalThreadId] = 0; F[globalThreadId] = 1; } } void doShortest(int* adjMat, int* shortestOut, size_t gSize, int startingNode, int* _d_adjMat, int* _d_outVec, int* _d_unvisited, int* _d_frontier, int* _d_estimates, int* _d_delta, int* _d_minOutEdge) { int del; int numBlocks = (gSize / BLOCK_SIZE) + 1; // O(n) but total algo is larger than O(n) so who cares findAllMins<<<numBlocks, BLOCK_SIZE>>>(_d_adjMat, _d_minOutEdge, gSize); /* * pseudo-code algo * * init<<<>>>(U, F, d) * while(del != -1) * relax<<<>>>(U, F, d) * del = min<<<>>>(U, d) * update<<<>>>(U, F, d, del) */ int curSize = gSize; int dFlag; int* _d_minTemp1; int* _d_minTemp2; int* tempDebug = (int*) malloc(sizeof(int) * gSize); cudaMalloc((void**) &_d_minTemp1 , sizeof(int) * gSize); init<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_frontier, _d_estimates, startingNode, gSize); do { dFlag = 1; curSize = gSize; cudaMemcpy(_d_minTemp1, _d_minOutEdge, sizeof(int) * gSize, cudaMemcpyDeviceToDevice); relax<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_frontier, _d_estimates, gSize, _d_adjMat); do { min<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_estimates, _d_delta, _d_minTemp1, curSize, dFlag); _d_minTemp2 = _d_minTemp1; _d_minTemp1 = _d_delta; _d_delta = _d_minTemp2; curSize /= 2; dFlag = 0; } while (curSize > 0); _d_minTemp2 = _d_minTemp1; _d_minTemp1 = _d_delta; _d_delta = _d_minTemp2; update<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_frontier, _d_estimates, _d_delta, gSize); cudaMemcpy(&del, _d_delta, sizeof(int), cudaMemcpyDeviceToHost); } while(del != INT_MAX); cudaMemcpy(shortestOut, _d_estimates, sizeof(int) * gSize, cudaMemcpyDeviceToHost); #ifndef NO_PRINT for(int i = 0; i < gSize; i++){ printf("shotest path from %d to %d is %d long.\n", startingNode, i, shortestOut[i]); } printf("\n"); #endif cudaFree(_d_minTemp1); }
3fa3dea92a2f082b74905f9ba710fb17327c79bb.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020-2022 by XGBoost contributors */ #include <gtest/gtest.h> #include "../../../../src/tree/gpu_hist/evaluate_splits.cuh" #include "../../helpers.h" #include "../../histogram_helpers.h" #include "../test_evaluate_splits.h" // TestPartitionBasedSplit namespace xgboost { namespace tree { namespace { auto ZeroParam() { auto args = Args{{"min_child_weight", "0"}, {"lambda", "0"}}; TrainParam tparam; tparam.UpdateAllowUnknown(args); return tparam; } } // anonymous namespace void TestEvaluateSingleSplit(bool is_categorical) { GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); GPUTrainingParam param{tparam}; common::HistogramCuts cuts; cuts.cut_values_.HostVector() = std::vector<float>{1.0, 2.0, 11.0, 12.0}; cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 2, 4}; cuts.min_vals_.HostVector() = std::vector<float>{0.0, 0.0}; cuts.cut_ptrs_.SetDevice(0); cuts.cut_values_.SetDevice(0); cuts.min_vals_.SetDevice(0); thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1}; // Setup gradients so that second feature gets higher gain thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}}; thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0); dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical); common::Span<FeatureType> d_feature_types; if (is_categorical) { auto max_cat = *std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end()); cuts.SetCategorical(true, max_cat); d_feature_types = dh::ToSpan(feature_types); } EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, d_feature_types, cuts.cut_ptrs_.ConstDeviceSpan(), cuts.cut_values_.ConstDeviceSpan(), cuts.min_vals_.ConstDeviceSpan(), }; GPUHistEvaluator evaluator{ tparam, static_cast<bst_feature_t>(feature_set.size()), 0}; evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad()); EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess()); } TEST(GpuHist, EvaluateSingleSplit) { TestEvaluateSingleSplit(false); } TEST(GpuHist, EvaluateCategoricalSplit) { TestEvaluateSingleSplit(true); } TEST(GpuHist, EvaluateSingleSplitMissing) { GradientPairPrecise parent_sum(1.0, 1.5); TrainParam tparam = ZeroParam(); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0}; thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{{-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0); EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator(tparam, feature_set.size(), 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); EXPECT_EQ(result.dir, kRightDir); EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5)); EXPECT_EQ(result.right_sum, GradientPairPrecise(1.5, 1.0)); } TEST(GpuHist, EvaluateSingleSplitEmpty) { TrainParam tparam = ZeroParam(); GPUHistEvaluator evaluator(tparam, 1, 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split; EXPECT_EQ(result.findex, -1); EXPECT_LT(result.loss_chg, 0.0f); } // Feature 0 has a better split, but the algorithm must select feature 1 TEST(GpuHist, EvaluateSingleSplitFeatureSampling) { GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); tparam.UpdateAllowUnknown(Args{}); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{1}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2, 4}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0, 11.0, 12.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0, 10.0}; thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{ {-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(2, 0); EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5)); EXPECT_EQ(result.right_sum, GradientPairPrecise(0.5, 0.5)); } // Features 0 and 1 have identical gain, the algorithm must select 0 TEST(GpuHist, EvaluateSingleSplitBreakTies) { GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); tparam.UpdateAllowUnknown(Args{}); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2, 4}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0, 11.0, 12.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0, 10.0}; thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{ {-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(2, 0); EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); } TEST(GpuHist, EvaluateSplits) { thrust::device_vector<DeviceSplitCandidate> out_splits(2); GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); tparam.UpdateAllowUnknown(Args{}); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2, 4}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0, 11.0, 12.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0, 0.0}; thrust::device_vector<GradientPairPrecise> feature_histogram_left = std::vector<GradientPairPrecise>{ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}}; thrust::device_vector<GradientPairPrecise> feature_histogram_right = std::vector<GradientPairPrecise>{ {-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0); EvaluateSplitInputs input_left{ 1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram_left)}; EvaluateSplitInputs input_right{ 2,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram_right)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator{ tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0}; dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input_left,input_right}; evaluator.LaunchEvaluateSplits(input_left.feature_set.size(),dh::ToSpan(inputs),shared_inputs, evaluator.GetEvaluator(), dh::ToSpan(out_splits)); DeviceSplitCandidate result_left = out_splits[0]; EXPECT_EQ(result_left.findex, 1); EXPECT_EQ(result_left.fvalue, 11.0); DeviceSplitCandidate result_right = out_splits[1]; EXPECT_EQ(result_right.findex, 0); EXPECT_EQ(result_right.fvalue, 1.0); } TEST_F(TestPartitionBasedSplit, GpuHist) { dh::device_vector<FeatureType> ft{std::vector<FeatureType>{FeatureType::kCategorical}}; GPUHistEvaluator evaluator{param_, static_cast<bst_feature_t>(info_.num_col_), 0}; cuts_.cut_ptrs_.SetDevice(0); cuts_.cut_values_.SetDevice(0); cuts_.min_vals_.SetDevice(0); evaluator.Reset(cuts_, dh::ToSpan(ft), info_.num_col_, param_, 0); dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size()); auto node_hist = hist_[0]; dh::safe_cuda(hipMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(), hipMemcpyHostToDevice)); dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}}; EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)}; EvaluateSplitSharedInputs shared_inputs{ GPUTrainingParam{param_}, dh::ToSpan(ft), cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(), cuts_.min_vals_.ConstDeviceSpan(), }; auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split; ASSERT_NEAR(split.loss_chg, best_score_, 1e-16); } } // namespace tree } // namespace xgboost
3fa3dea92a2f082b74905f9ba710fb17327c79bb.cu
/*! * Copyright 2020-2022 by XGBoost contributors */ #include <gtest/gtest.h> #include "../../../../src/tree/gpu_hist/evaluate_splits.cuh" #include "../../helpers.h" #include "../../histogram_helpers.h" #include "../test_evaluate_splits.h" // TestPartitionBasedSplit namespace xgboost { namespace tree { namespace { auto ZeroParam() { auto args = Args{{"min_child_weight", "0"}, {"lambda", "0"}}; TrainParam tparam; tparam.UpdateAllowUnknown(args); return tparam; } } // anonymous namespace void TestEvaluateSingleSplit(bool is_categorical) { GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); GPUTrainingParam param{tparam}; common::HistogramCuts cuts; cuts.cut_values_.HostVector() = std::vector<float>{1.0, 2.0, 11.0, 12.0}; cuts.cut_ptrs_.HostVector() = std::vector<uint32_t>{0, 2, 4}; cuts.min_vals_.HostVector() = std::vector<float>{0.0, 0.0}; cuts.cut_ptrs_.SetDevice(0); cuts.cut_values_.SetDevice(0); cuts.min_vals_.SetDevice(0); thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1}; // Setup gradients so that second feature gets higher gain thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}}; thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0); dh::device_vector<FeatureType> feature_types(feature_set.size(), FeatureType::kCategorical); common::Span<FeatureType> d_feature_types; if (is_categorical) { auto max_cat = *std::max_element(cuts.cut_values_.HostVector().begin(), cuts.cut_values_.HostVector().end()); cuts.SetCategorical(true, max_cat); d_feature_types = dh::ToSpan(feature_types); } EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, d_feature_types, cuts.cut_ptrs_.ConstDeviceSpan(), cuts.cut_values_.ConstDeviceSpan(), cuts.min_vals_.ConstDeviceSpan(), }; GPUHistEvaluator evaluator{ tparam, static_cast<bst_feature_t>(feature_set.size()), 0}; evaluator.Reset(cuts, dh::ToSpan(feature_types), feature_set.size(), tparam, 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); EXPECT_FLOAT_EQ(result.left_sum.GetGrad() + result.right_sum.GetGrad(), parent_sum.GetGrad()); EXPECT_FLOAT_EQ(result.left_sum.GetHess() + result.right_sum.GetHess(), parent_sum.GetHess()); } TEST(GpuHist, EvaluateSingleSplit) { TestEvaluateSingleSplit(false); } TEST(GpuHist, EvaluateCategoricalSplit) { TestEvaluateSingleSplit(true); } TEST(GpuHist, EvaluateSingleSplitMissing) { GradientPairPrecise parent_sum(1.0, 1.5); TrainParam tparam = ZeroParam(); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0}; thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{{-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0); EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator(tparam, feature_set.size(), 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); EXPECT_EQ(result.dir, kRightDir); EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5)); EXPECT_EQ(result.right_sum, GradientPairPrecise(1.5, 1.0)); } TEST(GpuHist, EvaluateSingleSplitEmpty) { TrainParam tparam = ZeroParam(); GPUHistEvaluator evaluator(tparam, 1, 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(EvaluateSplitInputs{}, EvaluateSplitSharedInputs{}).split; EXPECT_EQ(result.findex, -1); EXPECT_LT(result.loss_chg, 0.0f); } // Feature 0 has a better split, but the algorithm must select feature 1 TEST(GpuHist, EvaluateSingleSplitFeatureSampling) { GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); tparam.UpdateAllowUnknown(Args{}); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{1}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2, 4}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0, 11.0, 12.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0, 10.0}; thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{ {-10.0, 0.5}, {10.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(2, 0); EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input, shared_inputs).split; EXPECT_EQ(result.findex, 1); EXPECT_EQ(result.fvalue, 11.0); EXPECT_EQ(result.left_sum, GradientPairPrecise(-0.5, 0.5)); EXPECT_EQ(result.right_sum, GradientPairPrecise(0.5, 0.5)); } // Features 0 and 1 have identical gain, the algorithm must select 0 TEST(GpuHist, EvaluateSingleSplitBreakTies) { GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); tparam.UpdateAllowUnknown(Args{}); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2, 4}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0, 11.0, 12.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0, 10.0}; thrust::device_vector<GradientPairPrecise> feature_histogram = std::vector<GradientPairPrecise>{ {-0.5, 0.5}, {0.5, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(2, 0); EvaluateSplitInputs input{1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator(tparam, feature_min_values.size(), 0); DeviceSplitCandidate result = evaluator.EvaluateSingleSplit(input,shared_inputs).split; EXPECT_EQ(result.findex, 0); EXPECT_EQ(result.fvalue, 1.0); } TEST(GpuHist, EvaluateSplits) { thrust::device_vector<DeviceSplitCandidate> out_splits(2); GradientPairPrecise parent_sum(0.0, 1.0); TrainParam tparam = ZeroParam(); tparam.UpdateAllowUnknown(Args{}); GPUTrainingParam param{tparam}; thrust::device_vector<bst_feature_t> feature_set = std::vector<bst_feature_t>{0, 1}; thrust::device_vector<uint32_t> feature_segments = std::vector<bst_row_t>{0, 2, 4}; thrust::device_vector<float> feature_values = std::vector<float>{1.0, 2.0, 11.0, 12.0}; thrust::device_vector<float> feature_min_values = std::vector<float>{0.0, 0.0}; thrust::device_vector<GradientPairPrecise> feature_histogram_left = std::vector<GradientPairPrecise>{ {-0.5, 0.5}, {0.5, 0.5}, {-1.0, 0.5}, {1.0, 0.5}}; thrust::device_vector<GradientPairPrecise> feature_histogram_right = std::vector<GradientPairPrecise>{ {-1.0, 0.5}, {1.0, 0.5}, {-0.5, 0.5}, {0.5, 0.5}}; thrust::device_vector<int> monotonic_constraints(feature_set.size(), 0); EvaluateSplitInputs input_left{ 1,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram_left)}; EvaluateSplitInputs input_right{ 2,0, parent_sum, dh::ToSpan(feature_set), dh::ToSpan(feature_histogram_right)}; EvaluateSplitSharedInputs shared_inputs{ param, {}, dh::ToSpan(feature_segments), dh::ToSpan(feature_values), dh::ToSpan(feature_min_values), }; GPUHistEvaluator evaluator{ tparam, static_cast<bst_feature_t>(feature_min_values.size()), 0}; dh::device_vector<EvaluateSplitInputs> inputs = std::vector<EvaluateSplitInputs>{input_left,input_right}; evaluator.LaunchEvaluateSplits(input_left.feature_set.size(),dh::ToSpan(inputs),shared_inputs, evaluator.GetEvaluator(), dh::ToSpan(out_splits)); DeviceSplitCandidate result_left = out_splits[0]; EXPECT_EQ(result_left.findex, 1); EXPECT_EQ(result_left.fvalue, 11.0); DeviceSplitCandidate result_right = out_splits[1]; EXPECT_EQ(result_right.findex, 0); EXPECT_EQ(result_right.fvalue, 1.0); } TEST_F(TestPartitionBasedSplit, GpuHist) { dh::device_vector<FeatureType> ft{std::vector<FeatureType>{FeatureType::kCategorical}}; GPUHistEvaluator evaluator{param_, static_cast<bst_feature_t>(info_.num_col_), 0}; cuts_.cut_ptrs_.SetDevice(0); cuts_.cut_values_.SetDevice(0); cuts_.min_vals_.SetDevice(0); evaluator.Reset(cuts_, dh::ToSpan(ft), info_.num_col_, param_, 0); dh::device_vector<GradientPairPrecise> d_hist(hist_[0].size()); auto node_hist = hist_[0]; dh::safe_cuda(cudaMemcpy(d_hist.data().get(), node_hist.data(), node_hist.size_bytes(), cudaMemcpyHostToDevice)); dh::device_vector<bst_feature_t> feature_set{std::vector<bst_feature_t>{0}}; EvaluateSplitInputs input{0, 0, total_gpair_, dh::ToSpan(feature_set), dh::ToSpan(d_hist)}; EvaluateSplitSharedInputs shared_inputs{ GPUTrainingParam{param_}, dh::ToSpan(ft), cuts_.cut_ptrs_.ConstDeviceSpan(), cuts_.cut_values_.ConstDeviceSpan(), cuts_.min_vals_.ConstDeviceSpan(), }; auto split = evaluator.EvaluateSingleSplit(input, shared_inputs).split; ASSERT_NEAR(split.loss_chg, best_score_, 1e-16); } } // namespace tree } // namespace xgboost
7503bfd3bce35fee473a218e756267b55f297476.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pytorch_cuda_helper.hpp" #include "sigmoid_focal_loss_cuda_kernel.cuh" #include "softmax_focal_loss_cuda_kernel.cuh" void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target, Tensor weight, Tensor output, const float gamma, const float alpha) { int output_size = output.numel(); int num_classes = input.size(1); AT_ASSERTM(target.max().item<long>() <= (long)num_classes, "target label should smaller or equal than num classes"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(input.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "sigmoid_focal_loss_forward_cuda_kernel", [&] { hipLaunchKernelGGL(( sigmoid_focal_loss_forward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, input.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(hipGetLastError()); } void SigmoidFocalLossBackwardCUDAKernelLauncher(Tensor input, Tensor target, Tensor weight, Tensor grad_input, const float gamma, const float alpha) { int output_size = grad_input.numel(); int num_classes = input.size(1); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_input.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "sigmoid_focal_loss_backward_cuda_kernel", [&] { hipLaunchKernelGGL(( sigmoid_focal_loss_backward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, input.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), grad_input.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(hipGetLastError()); } void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target, Tensor weight, Tensor output, const float gamma, const float alpha) { int output_size = output.numel(); int num_classes = softmax.size(1); AT_ASSERTM(target.max().item<long>() <= (long)num_classes, "target label should smaller or equal than num classes"); at::hip::HIPGuardMasqueradingAsCUDA device_guard(softmax.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( softmax.scalar_type(), "softmax_focal_loss_forward_cuda_kernel", [&] { hipLaunchKernelGGL(( softmax_focal_loss_forward_cuda_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, softmax.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(hipGetLastError()); } void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target, Tensor weight, Tensor buff, Tensor grad_input, const float gamma, const float alpha) { int num_classes = softmax.size(1); int output_size = buff.numel(); at::hip::HIPGuardMasqueradingAsCUDA device_guard(grad_input.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_input.scalar_type(), "softmax_focal_loss_backward_cuda1_kernel", [&] { hipLaunchKernelGGL(( softmax_focal_loss_backward_cuda1_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, softmax.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), buff.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(hipGetLastError()); output_size = grad_input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_input.scalar_type(), "softmax_focal_loss_backward_cuda2_kernel", [&] { hipLaunchKernelGGL(( softmax_focal_loss_backward_cuda2_kernel<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, stream, output_size, softmax.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), buff.data_ptr<scalar_t>(), grad_input.data_ptr<scalar_t>(), num_classes); }); AT_CUDA_CHECK(hipGetLastError()); }
7503bfd3bce35fee473a218e756267b55f297476.cu
#include "pytorch_cuda_helper.hpp" #include "sigmoid_focal_loss_cuda_kernel.cuh" #include "softmax_focal_loss_cuda_kernel.cuh" void SigmoidFocalLossForwardCUDAKernelLauncher(Tensor input, Tensor target, Tensor weight, Tensor output, const float gamma, const float alpha) { int output_size = output.numel(); int num_classes = input.size(1); AT_ASSERTM(target.max().item<long>() <= (long)num_classes, "target label should smaller or equal than num classes"); at::cuda::CUDAGuard device_guard(input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "sigmoid_focal_loss_forward_cuda_kernel", [&] { sigmoid_focal_loss_forward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, input.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(cudaGetLastError()); } void SigmoidFocalLossBackwardCUDAKernelLauncher(Tensor input, Tensor target, Tensor weight, Tensor grad_input, const float gamma, const float alpha) { int output_size = grad_input.numel(); int num_classes = input.size(1); at::cuda::CUDAGuard device_guard(grad_input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "sigmoid_focal_loss_backward_cuda_kernel", [&] { sigmoid_focal_loss_backward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, input.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), grad_input.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(cudaGetLastError()); } void SoftmaxFocalLossForwardCUDAKernelLauncher(Tensor softmax, Tensor target, Tensor weight, Tensor output, const float gamma, const float alpha) { int output_size = output.numel(); int num_classes = softmax.size(1); AT_ASSERTM(target.max().item<long>() <= (long)num_classes, "target label should smaller or equal than num classes"); at::cuda::CUDAGuard device_guard(softmax.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( softmax.scalar_type(), "softmax_focal_loss_forward_cuda_kernel", [&] { softmax_focal_loss_forward_cuda_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, softmax.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), output.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(cudaGetLastError()); } void SoftmaxFocalLossBackwardCUDAKernelLauncher(Tensor softmax, Tensor target, Tensor weight, Tensor buff, Tensor grad_input, const float gamma, const float alpha) { int num_classes = softmax.size(1); int output_size = buff.numel(); at::cuda::CUDAGuard device_guard(grad_input.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_input.scalar_type(), "softmax_focal_loss_backward_cuda1_kernel", [&] { softmax_focal_loss_backward_cuda1_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, softmax.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), weight.data_ptr<scalar_t>(), buff.data_ptr<scalar_t>(), gamma, alpha, num_classes); }); AT_CUDA_CHECK(cudaGetLastError()); output_size = grad_input.numel(); AT_DISPATCH_FLOATING_TYPES_AND_HALF( grad_input.scalar_type(), "softmax_focal_loss_backward_cuda2_kernel", [&] { softmax_focal_loss_backward_cuda2_kernel<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK, 0, stream>>>( output_size, softmax.data_ptr<scalar_t>(), target.data_ptr<int64_t>(), buff.data_ptr<scalar_t>(), grad_input.data_ptr<scalar_t>(), num_classes); }); AT_CUDA_CHECK(cudaGetLastError()); }
4b6e81b2411e98f2d944c4e808aa89f12846314e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "computeSumPerBlock.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( computeSumPerBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, a,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( computeSumPerBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, a,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( computeSumPerBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, a,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4b6e81b2411e98f2d944c4e808aa89f12846314e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "computeSumPerBlock.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); computeSumPerBlock<<<gridBlock,threadBlock>>>(a,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { computeSumPerBlock<<<gridBlock,threadBlock>>>(a,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { computeSumPerBlock<<<gridBlock,threadBlock>>>(a,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a906aa832c468ca552259c59bb7706056e4bd04a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _COMPACT_KERNEL_H_ #define _COMPACT_KERNEL_H_ __global__ void scan_kernel(float *out, float *in, int n) { /* Allocated shared memory for storing the scan array */ __shared__ float temp[2 * NUM_ELEMENTS]; int tid = threadIdx.x; /* Indices for the ping-pong buffers */ int pout = 0; int pin = 1; /* Load the in array from global memory into shared memory */ if (tid > 0) temp[pout * n + tid] = in[tid - 1]; else temp[pout * n + tid] = 0; int offset; for (offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout * n + tid] = temp[pin * n + tid]; if (tid >= offset) temp[pout * n + tid] += temp[pin * n + tid - offset]; } __syncthreads(); out[tid] = temp[pout * n + tid]; } __global__ void compact_kernel(int *out, int *in, int n, int *new_n, int *scanned_flag) { __shared__ int temp[NUM_ELEMENTS]; __shared__ int flag[2 * NUM_ELEMENTS]; int tid = threadIdx.x; /* Load input elements from global memory into shared memory */ if (tid < NUM_ELEMENTS) temp[tid] = in[tid]; else temp[tid] = 0; __syncthreads(); /* Examine element in the temp array and if > 0 flag it as such */ int pout = 0; int pin = 1; if (tid > 0) { if (temp[tid - 1] > 0) flag[pout * n + tid] = 1; else flag[pout * n + tid] = 0; } else flag[pout * n + tid] = 0; __syncthreads(); /* Scan the flag array to generate addresses for the output elements. * We are performing an exclusive scan. */ int offset; for (offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); flag[pout * n + tid] = flag[pin * n + tid]; if (tid >= offset) flag[pout * n + tid] += flag[pin * n + tid - offset]; } __syncthreads(); /* Write out the scanned flag values */ scanned_flag[tid] = flag[pout * n + tid]; /* Write output elements to their corresponding addresses */ if (temp[tid] > 0) out[flag[pout * n + tid]] = temp[tid]; /* Calculate number of compacted elements */ if (tid == (blockDim.x - 1)) { if (temp[tid] > 0) *new_n = flag[pout * n + tid] + 1; else *new_n = flag[pout * n + tid]; } } #endif /* _COMPACT_KERNEL_H_ */
a906aa832c468ca552259c59bb7706056e4bd04a.cu
#ifndef _COMPACT_KERNEL_H_ #define _COMPACT_KERNEL_H_ __global__ void scan_kernel(float *out, float *in, int n) { /* Allocated shared memory for storing the scan array */ __shared__ float temp[2 * NUM_ELEMENTS]; int tid = threadIdx.x; /* Indices for the ping-pong buffers */ int pout = 0; int pin = 1; /* Load the in array from global memory into shared memory */ if (tid > 0) temp[pout * n + tid] = in[tid - 1]; else temp[pout * n + tid] = 0; int offset; for (offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout * n + tid] = temp[pin * n + tid]; if (tid >= offset) temp[pout * n + tid] += temp[pin * n + tid - offset]; } __syncthreads(); out[tid] = temp[pout * n + tid]; } __global__ void compact_kernel(int *out, int *in, int n, int *new_n, int *scanned_flag) { __shared__ int temp[NUM_ELEMENTS]; __shared__ int flag[2 * NUM_ELEMENTS]; int tid = threadIdx.x; /* Load input elements from global memory into shared memory */ if (tid < NUM_ELEMENTS) temp[tid] = in[tid]; else temp[tid] = 0; __syncthreads(); /* Examine element in the temp array and if > 0 flag it as such */ int pout = 0; int pin = 1; if (tid > 0) { if (temp[tid - 1] > 0) flag[pout * n + tid] = 1; else flag[pout * n + tid] = 0; } else flag[pout * n + tid] = 0; __syncthreads(); /* Scan the flag array to generate addresses for the output elements. * We are performing an exclusive scan. */ int offset; for (offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); flag[pout * n + tid] = flag[pin * n + tid]; if (tid >= offset) flag[pout * n + tid] += flag[pin * n + tid - offset]; } __syncthreads(); /* Write out the scanned flag values */ scanned_flag[tid] = flag[pout * n + tid]; /* Write output elements to their corresponding addresses */ if (temp[tid] > 0) out[flag[pout * n + tid]] = temp[tid]; /* Calculate number of compacted elements */ if (tid == (blockDim.x - 1)) { if (temp[tid] > 0) *new_n = flag[pout * n + tid] + 1; else *new_n = flag[pout * n + tid]; } } #endif /* _COMPACT_KERNEL_H_ */
4d1072ec7547b61064f37568a604fdcceb622b04.hip
// !!! This is a file automatically generated by hipify!!! /****************************************************************************** * * (C) Copyright 2014 The Board of Trustees of the * Florida Institute of Technology * All Rights Reserved * * Lab Image Filters ******************************************************************************/ #include "VideoFilters.h" //CUDA #include <helper_cuda.h> #include <helper_functions.h> #include <device_launch_parameters.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <helper_string.h> #include <hip/hip_texture_types.h> #include <texture_fetch_functions.h> /*TEXTURES*/ texture<uchar4, 2, hipReadModeNormalizedFloat> texImage_rgb; hipChannelFormatDesc uchar4tex = hipCreateChannelDesc<uchar4>(); hipArray *cu_image; extern "C" void CUDA_CreateMemoryArray(int imageW,int imageH){ hipMallocArray(&cu_image, &uchar4tex, imageW, imageH); } extern "C" void CUDA_BindTextureToArray(){ hipBindTextureToArray(texImage_rgb,cu_image); } extern "C" void CUDA_FreeArrays(){ hipFreeArray(cu_image); } extern "C" void CUDA_MemcpyToArray(uchar4 *src,int imageW,int imageH){ hipMemcpyToArray( cu_image, 0, 0,src, imageW * imageH * sizeof(uchar4), hipMemcpyDeviceToDevice); } /*************************************** Box Filter *****************************************/ __constant__ float MeanKernel[9] = {1,1,1, 1,1,1, 1,1,1}; /*************************************** Median Filter Kernel Function *****************************************/ __global__ void MeanFilter(uchar4 *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x=(float)ix+0.5f; const float y=(float)iy+0.5f; int win_W=1; if(ix < w && iy < h){ float4 pixval; float3 sum; sum.x=0.0f; sum.y=0.0f; sum.z=0.0f; int k=0; for(int ii=-win_W;ii<=win_W;ii++){ for(int jj=-win_W;jj<=win_W;jj++){ pixval=tex2D(texImage_rgb,x+ii,y+jj); sum.x+=pixval.x*MeanKernel[k]; sum.y+=pixval.y*MeanKernel[k]; sum.z+=pixval.z*MeanKernel[k]; k++; } } Image_dev[w*iy+ix].x=(unsigned char)((sum.x/9)*255); Image_dev[w*iy+ix].y=(unsigned char)((sum.y/9)*255); Image_dev[w*iy+ix].z=(unsigned char)((sum.z/9)*255); } } /*************************************** Median Filter Calling Function *****************************************/ extern "C" void CUDA_MeanFilter(uchar4 *Image_dev,int imageW,int imageH,dim3 grid,dim3 threads){ hipLaunchKernelGGL(( MeanFilter), dim3(grid),dim3(threads), 0, 0, Image_dev,imageW,imageH); } /*************************************** * Gaussian Filter *****************************************/ __constant__ float GaussianKernel[9] = {1,2,1, 2,4,2, 1,2,1}; __constant__ float GaussianKernel55[25] = {1,4,7,4,1, 4,16,26,16,4, 7,26,41,26,7, 4,16,26,16,4, 1,4,7,4,1}; /*************************************** Gaussian Filter Kernel Function *****************************************/ __global__ void GaussianFilter(uchar4 *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; //int win_W = 1; int win_W = 2; //int normalizingFactor = 16; int normalizingFactor_55 = 273; if (ix < w && iy < h){ float4 pixval; float3 sum; sum.x = 0.0f; sum.y = 0.0f; sum.z = 0.0f; int k = 0; for (int ii = -win_W; ii <= win_W; ii++){ for (int jj = -win_W; jj <= win_W; jj++){ pixval = tex2D(texImage_rgb, x + ii, y + jj); //sum.x += pixval.x*GaussianKernel[k]; //sum.y += pixval.y*GaussianKernel[k]; //sum.z += pixval.z*GaussianKernel[k]; sum.x += pixval.x*GaussianKernel55[k]; sum.y += pixval.y*GaussianKernel55[k]; sum.z += pixval.z*GaussianKernel55[k]; k++; } } //Image_dev[w*iy + ix].x = (unsigned char)((sum.x / normalizingFactor) * 255); //Image_dev[w*iy + ix].y = (unsigned char)((sum.y / normalizingFactor) * 255); //Image_dev[w*iy + ix].z = (unsigned char)((sum.z / normalizingFactor) * 255); Image_dev[w*iy + ix].x = (unsigned char)((sum.x / normalizingFactor_55) * 255); Image_dev[w*iy + ix].y = (unsigned char)((sum.y / normalizingFactor_55) * 255); Image_dev[w*iy + ix].z = (unsigned char)((sum.z / normalizingFactor_55) * 255); } } /*************************************** Gaussian Filter Calling Function *****************************************/ extern "C" void CUDA_GaussianFilter(uchar4 *Image_dev, int imageW, int imageH, dim3 grid, dim3 threads){ GaussianFilter << <grid, threads >> >(Image_dev, imageW, imageH); } /*************************************** * Laplacian Filter *****************************************/ __constant__ float LaplacianKernel[9] = { 0, 1, 0, 1, -4, 1, 0, 1, 0 }; /*************************************** Laplacian Filter Kernel Function *****************************************/ __global__ void LaplacianFilter(float *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; int win_W = 1; if (ix < w && iy < h){ float4 pixval; float sum = 0.0f; int k = 0; for (int ii = -win_W; ii <= win_W; ii++){ for (int jj = -win_W; jj <= win_W; jj++){ pixval = tex2D(texImage_rgb, x + ii, y + jj); float Intensidad = (pixval.x + pixval.y + pixval.z) / 3.0f; sum += Intensidad*LaplacianKernel[k]; k++; } } Image_dev[w*iy + ix] = sum; } } /*************************************** Laplacian Filter Calling Function *****************************************/ void CUDA_LaplacianFilter(float *Image_dev, int imageW, int imageH, dim3 grid, dim3 threads){ LaplacianFilter << <grid, threads >> >(Image_dev, imageW, imageH); }
4d1072ec7547b61064f37568a604fdcceb622b04.cu
/****************************************************************************** * * (C) Copyright 2014 The Board of Trustees of the * Florida Institute of Technology * All Rights Reserved * * Lab Image Filters ******************************************************************************/ #include "VideoFilters.h" //CUDA #include <helper_cuda.h> #include <helper_functions.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include <cuda.h> #include <helper_string.h> #include <cuda_texture_types.h> #include <texture_fetch_functions.h> /*TEXTURES*/ texture<uchar4, 2, cudaReadModeNormalizedFloat> texImage_rgb; cudaChannelFormatDesc uchar4tex = cudaCreateChannelDesc<uchar4>(); cudaArray *cu_image; extern "C" void CUDA_CreateMemoryArray(int imageW,int imageH){ cudaMallocArray(&cu_image, &uchar4tex, imageW, imageH); } extern "C" void CUDA_BindTextureToArray(){ cudaBindTextureToArray(texImage_rgb,cu_image); } extern "C" void CUDA_FreeArrays(){ cudaFreeArray(cu_image); } extern "C" void CUDA_MemcpyToArray(uchar4 *src,int imageW,int imageH){ cudaMemcpyToArray( cu_image, 0, 0,src, imageW * imageH * sizeof(uchar4), cudaMemcpyDeviceToDevice); } /*************************************** Box Filter *****************************************/ __constant__ float MeanKernel[9] = {1,1,1, 1,1,1, 1,1,1}; /*************************************** Median Filter Kernel Function *****************************************/ __global__ void MeanFilter(uchar4 *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x=(float)ix+0.5f; const float y=(float)iy+0.5f; int win_W=1; if(ix < w && iy < h){ float4 pixval; float3 sum; sum.x=0.0f; sum.y=0.0f; sum.z=0.0f; int k=0; for(int ii=-win_W;ii<=win_W;ii++){ for(int jj=-win_W;jj<=win_W;jj++){ pixval=tex2D(texImage_rgb,x+ii,y+jj); sum.x+=pixval.x*MeanKernel[k]; sum.y+=pixval.y*MeanKernel[k]; sum.z+=pixval.z*MeanKernel[k]; k++; } } Image_dev[w*iy+ix].x=(unsigned char)((sum.x/9)*255); Image_dev[w*iy+ix].y=(unsigned char)((sum.y/9)*255); Image_dev[w*iy+ix].z=(unsigned char)((sum.z/9)*255); } } /*************************************** Median Filter Calling Function *****************************************/ extern "C" void CUDA_MeanFilter(uchar4 *Image_dev,int imageW,int imageH,dim3 grid,dim3 threads){ MeanFilter<<<grid,threads>>>(Image_dev,imageW,imageH); } /*************************************** * Gaussian Filter *****************************************/ __constant__ float GaussianKernel[9] = {1,2,1, 2,4,2, 1,2,1}; __constant__ float GaussianKernel55[25] = {1,4,7,4,1, 4,16,26,16,4, 7,26,41,26,7, 4,16,26,16,4, 1,4,7,4,1}; /*************************************** Gaussian Filter Kernel Function *****************************************/ __global__ void GaussianFilter(uchar4 *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; //int win_W = 1; int win_W = 2; //int normalizingFactor = 16; int normalizingFactor_55 = 273; if (ix < w && iy < h){ float4 pixval; float3 sum; sum.x = 0.0f; sum.y = 0.0f; sum.z = 0.0f; int k = 0; for (int ii = -win_W; ii <= win_W; ii++){ for (int jj = -win_W; jj <= win_W; jj++){ pixval = tex2D(texImage_rgb, x + ii, y + jj); //sum.x += pixval.x*GaussianKernel[k]; //sum.y += pixval.y*GaussianKernel[k]; //sum.z += pixval.z*GaussianKernel[k]; sum.x += pixval.x*GaussianKernel55[k]; sum.y += pixval.y*GaussianKernel55[k]; sum.z += pixval.z*GaussianKernel55[k]; k++; } } //Image_dev[w*iy + ix].x = (unsigned char)((sum.x / normalizingFactor) * 255); //Image_dev[w*iy + ix].y = (unsigned char)((sum.y / normalizingFactor) * 255); //Image_dev[w*iy + ix].z = (unsigned char)((sum.z / normalizingFactor) * 255); Image_dev[w*iy + ix].x = (unsigned char)((sum.x / normalizingFactor_55) * 255); Image_dev[w*iy + ix].y = (unsigned char)((sum.y / normalizingFactor_55) * 255); Image_dev[w*iy + ix].z = (unsigned char)((sum.z / normalizingFactor_55) * 255); } } /*************************************** Gaussian Filter Calling Function *****************************************/ extern "C" void CUDA_GaussianFilter(uchar4 *Image_dev, int imageW, int imageH, dim3 grid, dim3 threads){ GaussianFilter << <grid, threads >> >(Image_dev, imageW, imageH); } /*************************************** * Laplacian Filter *****************************************/ __constant__ float LaplacianKernel[9] = { 0, 1, 0, 1, -4, 1, 0, 1, 0 }; /*************************************** Laplacian Filter Kernel Function *****************************************/ __global__ void LaplacianFilter(float *Image_dev, int w, int h){ const int ix = blockDim.x * blockIdx.x + threadIdx.x; const int iy = blockDim.y * blockIdx.y + threadIdx.y; const float x = (float)ix + 0.5f; const float y = (float)iy + 0.5f; int win_W = 1; if (ix < w && iy < h){ float4 pixval; float sum = 0.0f; int k = 0; for (int ii = -win_W; ii <= win_W; ii++){ for (int jj = -win_W; jj <= win_W; jj++){ pixval = tex2D(texImage_rgb, x + ii, y + jj); float Intensidad = (pixval.x + pixval.y + pixval.z) / 3.0f; sum += Intensidad*LaplacianKernel[k]; k++; } } Image_dev[w*iy + ix] = sum; } } /*************************************** Laplacian Filter Calling Function *****************************************/ void CUDA_LaplacianFilter(float *Image_dev, int imageW, int imageH, dim3 grid, dim3 threads){ LaplacianFilter << <grid, threads >> >(Image_dev, imageW, imageH); }
40d99c3b30d3b86821b517632df61ad362f4dc8f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #pragma once #include"cuda_helper.cuh" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; // Choose which GPU to run on, change this on a multi-GPU system. checkCudaErrors(hipSetDevice(0)); // Allocate GPU buffers for three vectors (two input, one output) . checkCudaErrors(hipMalloc((void**)&dev_c, size * sizeof(int))); checkCudaErrors(hipMalloc((void**)&dev_a, size * sizeof(int))); checkCudaErrors(hipMalloc((void**)&dev_b, size * sizeof(int))); // Copy input vectors from host memory to GPU buffers. checkCudaErrors(hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice)); // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel checkCudaErrors(hipGetLastError()); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. checkCudaErrors(hipDeviceSynchronize()); // Copy output vector from GPU buffer to host memory. checkCudaErrors(hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost)); checkCudaErrors(hipFree(dev_c)); checkCudaErrors(hipFree(dev_a)); checkCudaErrors(hipFree(dev_b)); } void _addTest(){ const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. addWithCuda(c, a, b, arraySize); printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); checkCudaErrors(hipDeviceReset()); } //int main(){ // _addTest(); // return 0; //}
40d99c3b30d3b86821b517632df61ad362f4dc8f.cu
#pragma once #include"cuda_helper.cuh" __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to add vectors in parallel. void addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; // Choose which GPU to run on, change this on a multi-GPU system. checkCudaErrors(cudaSetDevice(0)); // Allocate GPU buffers for three vectors (two input, one output) . checkCudaErrors(cudaMalloc((void**)&dev_c, size * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&dev_a, size * sizeof(int))); checkCudaErrors(cudaMalloc((void**)&dev_b, size * sizeof(int))); // Copy input vectors from host memory to GPU buffers. checkCudaErrors(cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice)); // Launch a kernel on the GPU with one thread for each element. addKernel <<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel checkCudaErrors(cudaGetLastError()); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. checkCudaErrors(cudaDeviceSynchronize()); // Copy output vector from GPU buffer to host memory. checkCudaErrors(cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaFree(dev_c)); checkCudaErrors(cudaFree(dev_a)); checkCudaErrors(cudaFree(dev_b)); } void _addTest(){ const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. addWithCuda(c, a, b, arraySize); printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); checkCudaErrors(cudaDeviceReset()); } //int main(){ // _addTest(); // return 0; //}
22fb49c62632138b13557a962dab6d42e3261526.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere architecture, most concept still holds. The two main differences are 1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere. 2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide latency (see include/cutlass/gemm/threadblock/mma_multistage.h) Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is that we can load in fp32 data and convert them implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional fp32 data by using NVIDIA Ampere architecture. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = float; // <- data type of elements in input matrix A using ElementInputB = float; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 4; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; return -1; } hipDeviceProp_t props; hipError_t error = hipGetDeviceProperties(&props, 0); if (error != hipSuccess) { std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; // Return 0 so tests are considered passing if run on unsupported platforms. return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; // Returning zero so this test passes when built on older Toolkits. return 0; } else { return run(); } }
22fb49c62632138b13557a962dab6d42e3261526.cu
/*************************************************************************************************** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted * provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list of * conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other materials * provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used * to endorse or promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** Please check example 07 and 08 for the basics of tensor op gemm kernels. On NVIDIA Ampere architecture, most concept still holds. The two main differences are 1. NVIDIA Ampere architecture introduces a new series of tensor core instructions (see include/cutlass/arch/mma_sm80.h) which are more efficient on Ampere. 2. NVIDIA Ampere architecture uses cp_async() to build multistage software pipeline to better hide latency (see include/cutlass/gemm/threadblock/mma_multistage.h) Moreover, NVIDIA Ampere architecture starts supporting tfloat32 (see include/cutlass/tfloat32.h) data types in tensor cores. One big advantage is that we can load in fp32 data and convert them implicitly to tf32 inside the GEMM kernel which means no change is needed to accelerate traditional fp32 data by using NVIDIA Ampere architecture. */ #include <iostream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/tensor_view_io.h" #include "helper.h" // The code section below describes datatype for input, output matrices and computation between // elements in input matrices. using ElementAccumulator = float; // <- data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations using ElementInputA = float; // <- data type of elements in input matrix A using ElementInputB = float; // <- data type of elements in input matrix B using ElementOutput = float; // <- data type of elements in output matrix D // The code section below describes matrix layout of input and output matrices. Column Major for // Matrix A, Row Major for Matrix B and Row Major for Matrix C using LayoutInputA = cutlass::layout::RowMajor; using LayoutInputB = cutlass::layout::ColumnMajor; using LayoutOutput = cutlass::layout::RowMajor; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ShapeMMAThreadBlock = cutlass::gemm::GemmShape<128, 128, 16>; // <- threadblock tile M = 128, N = 128, K = 16 // This code section describes tile size a warp will compute using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 16>; // <- warp tile M = 64, N = 64, K = 16 // This code section describes the size of MMA op using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 8>; // <- MMA Op tile M = 16, N = 8, K = 8 // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ?? // This code section describes the epilogue part of the kernel using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // <- data type of output matrix 128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized // memory access. For a byte, it's 16 // elements. This becomes the vector width of // math instructions in the epilogue too ElementAccumulator, // <- data type of accumulator ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function // Number of pipelines you want to use constexpr int NumStages = 4; using Gemm = cutlass::gemm::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, SmArch, ShapeMMAThreadBlock, ShapeMMAWarp, ShapeMMAOp, EpilogueOp, SwizzleThreadBlock, NumStages>; int run() { // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; return -1; } cudaDeviceProp props; cudaError_t error = cudaGetDeviceProperties(&props, 0); if (error != cudaSuccess) { std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl; return -1; } if (!((props.major * 10 + props.minor) >= 80)) { std::cerr << "Turing Tensor Core operations must be run on a machine with compute capability at least 80." << std::endl; // Return 0 so tests are considered passing if run on unsupported platforms. return 0; } const int length_m = 5120; const int length_n = 4096; const int length_k = 4096; // Create a tuple of problem size for matrix multiplication cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k); // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a( problem_size.mk()); // <- Create matrix A with dimensions M x K cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b( problem_size.kn()); // <- Create matrix B with dimensions K x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c( problem_size.mn()); // <- Create matrix C with dimensions M x N cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d( problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from // reference kernel // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1, ElementInputA(4), ElementInputA(-4), 0); // <- Fill matrix A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 1, ElementInputB(4), ElementInputB(-4), 0); // <- Fill matrix B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 1, ElementOutput(4), ElementOutput(-4), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); // Initialize alpha and beta for dot product computation ElementComputeEpilogue alpha = ElementComputeEpilogue(1); ElementComputeEpilogue beta = ElementComputeEpilogue(0); // Split K dimension into 1 partitions int split_k_slices = 1; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication tensor_a.device_ref(), // <- reference to matrix A on device tensor_b.device_ref(), // <- reference to matrix B on device tensor_c.device_ref(), // <- reference to matrix C on device tensor_d.device_ref(), // <- reference to matrix D on device {alpha, beta}, // <- tuple of alpha and beta split_k_slices}; // <- k-dimension split factor // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Initialize CUTLASS kernel with arguments and workspace pointer cutlass::Status status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(status); // Launch initialized CUTLASS kernel status = gemm_op(); CUTLASS_CHECK(status); // Create instantiation for device reference gemm kernel cutlass::reference::device::Gemm<ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementComputeEpilogue> gemm_device; // Launch device reference gemm kernel gemm_device(problem_size, alpha, tensor_a.device_ref(), tensor_b.device_ref(), beta, tensor_c.device_ref(), tensor_ref_d.device_ref()); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); // Check if output from CUTLASS kernel and reference kernel are equal or not bool passed = cutlass::reference::host::TensorEquals( tensor_d.host_view(), tensor_ref_d.host_view()); std::cout << (passed ? "Passed" : "Failed") << std::endl; return (passed ? 0 : -1); } int main() { // Ampere Tensor Core operations exposed with mma.sync and ldmatrix are first available // in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11.0 Toolkit to run these examples. if (!(__CUDACC_VER_MAJOR__ >= 11)) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; // Returning zero so this test passes when built on older Toolkits. return 0; } else { return run(); } }
3246d10ab0ae2150760bc3d00d74bf173c4320c4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <ATen/ATen.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> // for the older gpus atomicAdd with double arguments does not exist /* #if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__) static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } while (assumed != old); return __longlong_as_double(old); } #endif */ namespace{ template <typename scalar_t> __global__ void forward_face_index_map_cuda_kernel_1( const scalar_t* __restrict__ faces, scalar_t* __restrict__ faces_inv, int batch_size, int num_faces, int image_size) { /* batch number, face, number, image size, face[v012][RGB] */ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int is = image_size; const scalar_t* face = &faces[i * 9]; scalar_t* face_inv_g = &faces_inv[i * 9]; /* return if backside */ if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) return; /* p[num][xy]: x, y is normalized from [-1, 1] to [0, is - 1]. */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = 0.5 * (face[3 * num + dim] * is + is - 1); } } /* compute face_inv */ scalar_t face_inv[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_denominator = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); for (int k = 0; k < 9; k++) { face_inv[k] /= face_inv_denominator; } /* set to global memory */ for (int k = 0; k < 9; k++) { face_inv_g[k] = face_inv[k]; } } template <typename scalar_t> __global__ void forward_face_index_map_cuda_kernel_2( const scalar_t* faces, scalar_t* faces_inv, int32_t* __restrict__ face_index_map, scalar_t* __restrict__ weight_map, scalar_t* __restrict__ depth_map, scalar_t* __restrict__ face_inv_map, int batch_size, int num_faces, int image_size, scalar_t near, scalar_t far, int return_rgb, int return_alpha, int return_depth) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int is = image_size; const int nf = num_faces; const int bn = i / (is * is); const int pn = i % (is * is); const int yi = pn / is; const int xi = pn % is; const scalar_t yp = (2. * yi + 1 - is) / is; const scalar_t xp = (2. * xi + 1 - is) / is; const scalar_t* face = &faces[bn * nf * 9] - 9; scalar_t* face_inv = &faces_inv[bn * nf * 9] - 9; scalar_t depth_min = far; int face_index_min = -1; scalar_t weight_min[3]; scalar_t face_inv_min[9]; for (int fn = 0; fn < nf; fn++) { /* go to next face */ face += 9; face_inv += 9; /* return if backside */ if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) continue; /* check [py, px] is inside the face */ if (((yp - face[1]) * (face[3] - face[0]) < (xp - face[0]) * (face[4] - face[1])) || ((yp - face[4]) * (face[6] - face[3]) < (xp - face[3]) * (face[7] - face[4])) || ((yp - face[7]) * (face[0] - face[6]) < (xp - face[6]) * (face[1] - face[7]))) continue; /* compute w = face_inv * p */ scalar_t w[3]; w[0] = face_inv[3 * 0 + 0] * xi + face_inv[3 * 0 + 1] * yi + face_inv[3 * 0 + 2]; w[1] = face_inv[3 * 1 + 0] * xi + face_inv[3 * 1 + 1] * yi + face_inv[3 * 1 + 2]; w[2] = face_inv[3 * 2 + 0] * xi + face_inv[3 * 2 + 1] * yi + face_inv[3 * 2 + 2]; /* sum(w) -> 1, 0 < w < 1 */ scalar_t w_sum = 0; for (int k = 0; k < 3; k++) { w[k] = min(max(w[k], 0.), 1.); w_sum += w[k]; } for (int k = 0; k < 3; k++) { w[k] /= w_sum; } /* compute 1 / zp = sum(w / z) */ const scalar_t zp = 1. / (w[0] / face[2] + w[1] / face[5] + w[2] / face[8]); if (zp <= near || far <= zp) { continue; } /* check z-buffer */ if (zp < depth_min) { depth_min = zp; face_index_min = fn; for (int k = 0; k < 3; k++) { weight_min[k] = w[k]; } if (return_depth) { for (int k = 0; k < 9; k++) { face_inv_min[k] = face_inv[k]; } } } } /* set to global memory */ if (0 <= face_index_min) { depth_map[i] = depth_min; face_index_map[i] = face_index_min; for (int k = 0; k < 3; k++) { weight_map[3 * i + k] = weight_min[k]; } if (return_depth) { for (int k = 0; k < 9; k++) { face_inv_map[9 * i + k] = face_inv_min[k]; } } } } template <typename scalar_t> __global__ void forward_texture_sampling_cuda_kernel( const scalar_t* faces, const scalar_t* textures, const int32_t* face_index_map, const scalar_t* weight_map, const scalar_t* depth_map, scalar_t* rgb_map, int32_t* sampling_index_map, scalar_t* sampling_weight_map, size_t batch_size, int num_faces, int image_size, int texture_size, scalar_t eps) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int face_index = face_index_map[i]; if (face_index >= 0) { /* from global variables: batch number, num of faces, image_size, face[v012][RGB], pixel[RGB], weight[v012], texture[ts][ts][ts][RGB], sampling indices[8], sampling_weights[8]; */ const int bn = i / (image_size * image_size); const int nf = num_faces; const int ts = texture_size; const scalar_t* face = &faces[(bn * nf + face_index) * 9]; const scalar_t* texture = &textures[(bn * nf + face_index) * ts * ts * ts * 3]; scalar_t* pixel = &rgb_map[i * 3]; const scalar_t* weight = &weight_map[i * 3]; const scalar_t depth = depth_map[i]; int32_t* sampling_indices = &sampling_index_map[i * 8]; scalar_t* sampling_weights = &sampling_weight_map[i * 8]; /* get texture index (float) */ scalar_t texture_index_float[3]; for (int k = 0; k < 3; k++) { scalar_t tif = weight[k] * (ts - 1) * (depth / (face[3 * k + 2])); tif = max(tif, 0.); tif = min(tif, ts - 1 - eps); texture_index_float[k] = tif; } /* blend */ scalar_t new_pixel[3] = {0, 0, 0}; for (int pn = 0; pn < 8; pn++) { scalar_t w = 1; // weight int texture_index_int[3]; // index in source (int) for (int k = 0; k < 3; k++) { if ((pn >> k) % 2 == 0) { w *= 1 - (texture_index_float[k] - (int)texture_index_float[k]); texture_index_int[k] = (int)texture_index_float[k]; } else { w *= texture_index_float[k] - (int)texture_index_float[k]; texture_index_int[k] = (int)texture_index_float[k] + 1; } } int isc = texture_index_int[0] * ts * ts + texture_index_int[1] * ts + texture_index_int[2]; for (int k = 0; k < 3; k++) new_pixel[k] += w * texture[isc * 3 + k]; sampling_indices[pn] = isc; sampling_weights[pn] = w; } for (int k = 0; k < 3; k++) pixel[k] = new_pixel[k]; } } template <typename scalar_t> __global__ void backward_pixel_map_cuda_kernel( const scalar_t* faces, int32_t* face_index_map, scalar_t* rgb_map, scalar_t* alpha_map, scalar_t* grad_rgb_map, scalar_t* grad_alpha_map, scalar_t* grad_faces, size_t batch_size, size_t num_faces, int image_size, scalar_t eps, int return_rgb, int return_alpha) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int bn = i / num_faces; const int fn = i % num_faces; const int is = image_size; const scalar_t* face = &faces[i * 9]; scalar_t grad_face[9] = {}; /* check backside */ if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) return; /* for each edge */ for (int edge_num = 0; edge_num < 3; edge_num++) { /* set points of target edge */ int pi[3]; scalar_t pp[3][2]; for (int num = 0; num < 3; num++) pi[num] = (edge_num + num) % 3; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { pp[num][dim] = 0.5 * (face[3 * pi[num] + dim] * is + is - 1); } } /* for dy, dx */ for (int axis = 0; axis < 2; axis++) { /* */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = pp[num][(dim + axis) % 2]; } } /* set direction */ int direction; if (axis == 0) { if (p[0][0] < p[1][0]) direction = -1; else direction = 1; } else { if (p[0][0] < p[1][0]) direction = 1; else direction = -1; } /* along edge */ int d0_from, d0_to; d0_from = max(ceil(min(p[0][0], p[1][0])), 0.); d0_to = min(max(p[0][0], p[1][0]), is - 1.); for (int d0 = d0_from; d0 <= d0_to; d0++) { /* get cross point */ int d1_in, d1_out; const scalar_t d1_cross = (p[1][1] - p[0][1]) / (p[1][0] - p[0][0]) * (d0 - p[0][0]) + p[0][1]; if (0 < direction) d1_in = floor(d1_cross); else d1_in = ceil(d1_cross); d1_out = d1_in + direction; /* continue if cross point is not shown */ if (d1_in < 0 || is <= d1_in) continue; if (d1_out < 0 || is <= d1_out) continue; /* get color of in-pixel and out-pixel */ scalar_t alpha_in; scalar_t alpha_out; scalar_t *rgb_in; scalar_t *rgb_out; int map_index_in, map_index_out; if (axis == 0) { map_index_in = bn * is * is + d1_in * is + d0; map_index_out = bn * is * is + d1_out * is + d0; } else { map_index_in = bn * is * is + d0 * is + d1_in; map_index_out = bn * is * is + d0 * is + d1_out; } if (return_alpha) { alpha_in = alpha_map[map_index_in]; alpha_out = alpha_map[map_index_out]; } if (return_rgb) { rgb_in = &rgb_map[map_index_in * 3]; rgb_out = &rgb_map[map_index_out * 3]; } /* out */ bool is_in_fn = (face_index_map[map_index_in] == fn); if (is_in_fn) { int d1_limit; if (0 < direction) d1_limit = is - 1; else d1_limit = 0; int d1_from = max(min(d1_out, d1_limit), 0); int d1_to = min(max(d1_out, d1_limit), is - 1); scalar_t* alpha_map_p; scalar_t* grad_alpha_map_p; scalar_t* rgb_map_p; scalar_t* grad_rgb_map_p; int map_offset, map_index_from; if (axis == 0) { map_offset = is; map_index_from = bn * is * is + d1_from * is + d0; } else { map_offset = 1; map_index_from = bn * is * is + d0 * is + d1_from; } if (return_alpha) { alpha_map_p = &alpha_map[map_index_from]; grad_alpha_map_p = &grad_alpha_map[map_index_from]; } if (return_rgb) { rgb_map_p = &rgb_map[map_index_from * 3]; grad_rgb_map_p = &grad_rgb_map[map_index_from * 3]; } for (int d1 = d1_from; d1 <= d1_to; d1++) { scalar_t diff_grad = 0; if (return_alpha) { diff_grad += (*alpha_map_p - alpha_in) * *grad_alpha_map_p; } if (return_rgb) { for (int k = 0; k < 3; k++) diff_grad += (rgb_map_p[k] - rgb_in[k]) * grad_rgb_map_p[k]; } if (return_alpha) { alpha_map_p += map_offset; grad_alpha_map_p += map_offset; } if (return_rgb) { rgb_map_p += 3 * map_offset; grad_rgb_map_p += 3 * map_offset; } if (diff_grad <= 0) continue; if (p[1][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (p[1][0] - d0) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[0] * 3 + (1 - axis)] -= diff_grad / dist; } if (p[0][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (d0 - p[0][0]) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[1] * 3 + (1 - axis)] -= diff_grad / dist; } } } /* in */ { int d1_limit; scalar_t d0_cross2; if ((d0 - p[0][0]) * (d0 - p[2][0]) < 0) { d0_cross2 = (p[2][1] - p[0][1]) / (p[2][0] - p[0][0]) * (d0 - p[0][0]) + p[0][1]; } else { d0_cross2 = (p[1][1] - p[2][1]) / (p[1][0] - p[2][0]) * (d0 - p[2][0]) + p[2][1]; } if (0 < direction) d1_limit = ceil(d0_cross2); else d1_limit = floor(d0_cross2); int d1_from = max(min(d1_in, d1_limit), 0); int d1_to = min(max(d1_in, d1_limit), is - 1); int* face_index_map_p; scalar_t* alpha_map_p; scalar_t* grad_alpha_map_p; scalar_t* rgb_map_p; scalar_t* grad_rgb_map_p; int map_index_from; int map_offset; if (axis == 0) map_offset = is; else map_offset = 1; if (axis == 0) { map_index_from = bn * is * is + d1_from * is + d0; } else { map_index_from = bn * is * is + d0 * is + d1_from; } face_index_map_p = &face_index_map[map_index_from] - map_offset; if (return_alpha) { alpha_map_p = &alpha_map[map_index_from] - map_offset; grad_alpha_map_p = &grad_alpha_map[map_index_from] - map_offset; } if (return_rgb) { rgb_map_p = &rgb_map[map_index_from * 3] - 3 * map_offset; grad_rgb_map_p = &grad_rgb_map[map_index_from * 3] - 3 * map_offset; } for (int d1 = d1_from; d1 <= d1_to; d1++) { face_index_map_p += map_offset; if (return_alpha) { alpha_map_p += map_offset; grad_alpha_map_p += map_offset; } if (return_rgb) { rgb_map_p += 3 * map_offset; grad_rgb_map_p += 3 * map_offset; } if (*face_index_map_p != fn) continue; scalar_t diff_grad = 0; if (return_alpha) { diff_grad += (*alpha_map_p - alpha_out) * *grad_alpha_map_p; } if (return_rgb) { for (int k = 0; k < 3; k++) diff_grad += (rgb_map_p[k] - rgb_out[k]) * grad_rgb_map_p[k]; } if (diff_grad <= 0) continue; if (p[1][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (p[1][0] - d0) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[0] * 3 + (1 - axis)] -= diff_grad / dist; } if (p[0][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (d0 - p[0][0]) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[1] * 3 + (1 - axis)] -= diff_grad / dist; } } } } } } /* set to global gradient variable */ for (int k = 0; k < 9; k++) grad_faces[i * 9 + k] = grad_face[k]; } template <typename scalar_t> __global__ void backward_textures_cuda_kernel( const int32_t* face_index_map, scalar_t* sampling_weight_map, int32_t* sampling_index_map, scalar_t* grad_rgb_map, scalar_t* grad_textures, size_t batch_size, size_t num_faces, int image_size, size_t texture_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int face_index = face_index_map[i]; if (0 <= face_index) { int is = image_size; int nf = num_faces; int ts = texture_size; int bn = i / (is * is); // batch number [0 -> bs] scalar_t* grad_texture = &grad_textures[(bn * nf + face_index) * ts * ts * ts * 3]; scalar_t* sampling_weight_map_p = &sampling_weight_map[i * 8]; int* sampling_index_map_p = &sampling_index_map[i * 8]; for (int pn = 0; pn < 8; pn++) { scalar_t w = *sampling_weight_map_p++; int isc = *sampling_index_map_p++; scalar_t* grad_texture_p = &grad_texture[isc * 3]; scalar_t* grad_rgb_map_p = &grad_rgb_map[i * 3]; for (int k = 0; k < 3; k++) atomicAdd(grad_texture_p++, w * *grad_rgb_map_p++); } } } template <typename scalar_t> __global__ void backward_depth_map_cuda_kernel( const scalar_t* faces, const scalar_t* depth_map, const int32_t* face_index_map, const scalar_t* face_inv_map, const scalar_t* weight_map, scalar_t* grad_depth_map, scalar_t* grad_faces, size_t batch_size, size_t num_faces, int image_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int fn = face_index_map[i]; if (0 <= fn) { const int nf = num_faces; const int is = image_size; const int bn = i / (is * is); const scalar_t* face = &faces[(bn * nf + fn) * 9]; const scalar_t depth = depth_map[i]; const scalar_t depth2 = depth * depth; const scalar_t* face_inv = &face_inv_map[i * 9]; const scalar_t* weight = &weight_map[i * 3]; const scalar_t grad_depth = grad_depth_map[i]; scalar_t* grad_face = &grad_faces[(bn * nf + fn) * 9]; /* derivative wrt z */ for (int k = 0; k < 3; k++) { const scalar_t z_k = face[3 * k + 2]; atomicAdd(&grad_face[3 * k + 2], grad_depth * weight[k] * depth2 / (z_k * z_k)); } /* derivative wrt x, y */ scalar_t tmp[3] = {}; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { tmp[k] += -face_inv[3 * l + k] / face[3 * l + 2]; } } for (int k = 0; k < 3; k++) { for (int l = 0; l < 2; l++) { // k: point number, l: dimension atomicAdd(&grad_face[3 * k + l], -grad_depth * tmp[l] * weight[k] * depth2 * is / 2); } } } } } std::vector<at::Tensor> forward_face_index_map_cuda( at::Tensor faces, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor face_inv_map, at::Tensor faces_inv, int image_size, float near, float far, int return_rgb, int return_alpha, int return_depth) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const int threads = 512; const dim3 blocks_1 ((batch_size * num_faces - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_face_index_map_cuda_1", ([&] { hipLaunchKernelGGL(( forward_face_index_map_cuda_kernel_1<scalar_t>), dim3(blocks_1), dim3(threads), 0, 0, faces.data<scalar_t>(), faces_inv.data<scalar_t>(), batch_size, num_faces, image_size); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_face_index_map_1: %s\n", hipGetErrorString(err)); const dim3 blocks_2 ((batch_size * image_size * image_size - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_face_index_map_cuda_2", ([&] { hipLaunchKernelGGL(( forward_face_index_map_cuda_kernel_2<scalar_t>), dim3(blocks_2), dim3(threads), 0, 0, faces.data<scalar_t>(), faces_inv.data<scalar_t>(), face_index_map.data<int32_t>(), weight_map.data<scalar_t>(), depth_map.data<scalar_t>(), face_inv_map.data<scalar_t>(), (int) batch_size, (int) num_faces, (int) image_size, (scalar_t) near, (scalar_t) far, return_rgb, return_alpha, return_depth); })); err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_face_index_map_2: %s\n", hipGetErrorString(err)); return {face_index_map, weight_map, depth_map, face_inv_map}; } std::vector<at::Tensor> forward_texture_sampling_cuda( at::Tensor faces, at::Tensor textures, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor rgb_map, at::Tensor sampling_index_map, at::Tensor sampling_weight_map, int image_size, float eps) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const auto texture_size = textures.size(2); const int threads = 512; const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_texture_sampling_cuda", ([&] { hipLaunchKernelGGL(( forward_texture_sampling_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, faces.data<scalar_t>(), textures.data<scalar_t>(), face_index_map.data<int32_t>(), weight_map.data<scalar_t>(), depth_map.data<scalar_t>(), rgb_map.data<scalar_t>(), sampling_index_map.data<int32_t>(), sampling_weight_map.data<scalar_t>(), batch_size, num_faces, image_size, texture_size, eps); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in forward_texture_sampling: %s\n", hipGetErrorString(err)); return {rgb_map, sampling_index_map, sampling_weight_map}; } at::Tensor backward_pixel_map_cuda( at::Tensor faces, at::Tensor face_index_map, at::Tensor rgb_map, at::Tensor alpha_map, at::Tensor grad_rgb_map, at::Tensor grad_alpha_map, at::Tensor grad_faces, int image_size, float eps, int return_rgb, int return_alpha) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const int threads = 512; const dim3 blocks ((batch_size * num_faces - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_pixel_map_cuda", ([&] { hipLaunchKernelGGL(( backward_pixel_map_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, faces.data<scalar_t>(), face_index_map.data<int32_t>(), rgb_map.data<scalar_t>(), alpha_map.data<scalar_t>(), grad_rgb_map.data<scalar_t>(), grad_alpha_map.data<scalar_t>(), grad_faces.data<scalar_t>(), batch_size, num_faces, image_size, (scalar_t) eps, return_rgb, return_alpha); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in backward_pixel_map: %s\n", hipGetErrorString(err)); return grad_faces; } at::Tensor backward_textures_cuda( at::Tensor face_index_map, at::Tensor sampling_weight_map, at::Tensor sampling_index_map, at::Tensor grad_rgb_map, at::Tensor grad_textures, int num_faces) { const auto batch_size = face_index_map.size(0); const auto image_size = face_index_map.size(1); const auto texture_size = grad_textures.size(2); const int threads = 512; const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(sampling_weight_map.type(), "backward_textures_cuda", ([&] { hipLaunchKernelGGL(( backward_textures_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, face_index_map.data<int32_t>(), sampling_weight_map.data<scalar_t>(), sampling_index_map.data<int32_t>(), grad_rgb_map.data<scalar_t>(), grad_textures.data<scalar_t>(), batch_size, num_faces, image_size, texture_size); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in backward_textures: %s\n", hipGetErrorString(err)); return grad_textures; } at::Tensor backward_depth_map_cuda( at::Tensor faces, at::Tensor depth_map, at::Tensor face_index_map, at::Tensor face_inv_map, at::Tensor weight_map, at::Tensor grad_depth_map, at::Tensor grad_faces, int image_size) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const int threads = 512; const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_depth_map_cuda", ([&] { hipLaunchKernelGGL(( backward_depth_map_cuda_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, faces.data<scalar_t>(), depth_map.data<scalar_t>(), face_index_map.data<int32_t>(), face_inv_map.data<scalar_t>(), weight_map.data<scalar_t>(), grad_depth_map.data<scalar_t>(), grad_faces.data<scalar_t>(), batch_size, num_faces, image_size); })); hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error in backward_depth_map: %s\n", hipGetErrorString(err)); return grad_faces; }
3246d10ab0ae2150760bc3d00d74bf173c4320c4.cu
#include <iostream> #include <ATen/ATen.h> #include <cuda.h> #include <cuda_runtime.h> // for the older gpus atomicAdd with double arguments does not exist /* #if __CUDA_ARCH__ < 600 and defined(__CUDA_ARCH__) static __inline__ __device__ double atomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); } while (assumed != old); return __longlong_as_double(old); } #endif */ namespace{ template <typename scalar_t> __global__ void forward_face_index_map_cuda_kernel_1( const scalar_t* __restrict__ faces, scalar_t* __restrict__ faces_inv, int batch_size, int num_faces, int image_size) { /* batch number, face, number, image size, face[v012][RGB] */ const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int is = image_size; const scalar_t* face = &faces[i * 9]; scalar_t* face_inv_g = &faces_inv[i * 9]; /* return if backside */ if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) return; /* p[num][xy]: x, y is normalized from [-1, 1] to [0, is - 1]. */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = 0.5 * (face[3 * num + dim] * is + is - 1); } } /* compute face_inv */ scalar_t face_inv[9] = { p[1][1] - p[2][1], p[2][0] - p[1][0], p[1][0] * p[2][1] - p[2][0] * p[1][1], p[2][1] - p[0][1], p[0][0] - p[2][0], p[2][0] * p[0][1] - p[0][0] * p[2][1], p[0][1] - p[1][1], p[1][0] - p[0][0], p[0][0] * p[1][1] - p[1][0] * p[0][1]}; scalar_t face_inv_denominator = ( p[2][0] * (p[0][1] - p[1][1]) + p[0][0] * (p[1][1] - p[2][1]) + p[1][0] * (p[2][1] - p[0][1])); for (int k = 0; k < 9; k++) { face_inv[k] /= face_inv_denominator; } /* set to global memory */ for (int k = 0; k < 9; k++) { face_inv_g[k] = face_inv[k]; } } template <typename scalar_t> __global__ void forward_face_index_map_cuda_kernel_2( const scalar_t* faces, scalar_t* faces_inv, int32_t* __restrict__ face_index_map, scalar_t* __restrict__ weight_map, scalar_t* __restrict__ depth_map, scalar_t* __restrict__ face_inv_map, int batch_size, int num_faces, int image_size, scalar_t near, scalar_t far, int return_rgb, int return_alpha, int return_depth) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int is = image_size; const int nf = num_faces; const int bn = i / (is * is); const int pn = i % (is * is); const int yi = pn / is; const int xi = pn % is; const scalar_t yp = (2. * yi + 1 - is) / is; const scalar_t xp = (2. * xi + 1 - is) / is; const scalar_t* face = &faces[bn * nf * 9] - 9; scalar_t* face_inv = &faces_inv[bn * nf * 9] - 9; scalar_t depth_min = far; int face_index_min = -1; scalar_t weight_min[3]; scalar_t face_inv_min[9]; for (int fn = 0; fn < nf; fn++) { /* go to next face */ face += 9; face_inv += 9; /* return if backside */ if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) continue; /* check [py, px] is inside the face */ if (((yp - face[1]) * (face[3] - face[0]) < (xp - face[0]) * (face[4] - face[1])) || ((yp - face[4]) * (face[6] - face[3]) < (xp - face[3]) * (face[7] - face[4])) || ((yp - face[7]) * (face[0] - face[6]) < (xp - face[6]) * (face[1] - face[7]))) continue; /* compute w = face_inv * p */ scalar_t w[3]; w[0] = face_inv[3 * 0 + 0] * xi + face_inv[3 * 0 + 1] * yi + face_inv[3 * 0 + 2]; w[1] = face_inv[3 * 1 + 0] * xi + face_inv[3 * 1 + 1] * yi + face_inv[3 * 1 + 2]; w[2] = face_inv[3 * 2 + 0] * xi + face_inv[3 * 2 + 1] * yi + face_inv[3 * 2 + 2]; /* sum(w) -> 1, 0 < w < 1 */ scalar_t w_sum = 0; for (int k = 0; k < 3; k++) { w[k] = min(max(w[k], 0.), 1.); w_sum += w[k]; } for (int k = 0; k < 3; k++) { w[k] /= w_sum; } /* compute 1 / zp = sum(w / z) */ const scalar_t zp = 1. / (w[0] / face[2] + w[1] / face[5] + w[2] / face[8]); if (zp <= near || far <= zp) { continue; } /* check z-buffer */ if (zp < depth_min) { depth_min = zp; face_index_min = fn; for (int k = 0; k < 3; k++) { weight_min[k] = w[k]; } if (return_depth) { for (int k = 0; k < 9; k++) { face_inv_min[k] = face_inv[k]; } } } } /* set to global memory */ if (0 <= face_index_min) { depth_map[i] = depth_min; face_index_map[i] = face_index_min; for (int k = 0; k < 3; k++) { weight_map[3 * i + k] = weight_min[k]; } if (return_depth) { for (int k = 0; k < 9; k++) { face_inv_map[9 * i + k] = face_inv_min[k]; } } } } template <typename scalar_t> __global__ void forward_texture_sampling_cuda_kernel( const scalar_t* faces, const scalar_t* textures, const int32_t* face_index_map, const scalar_t* weight_map, const scalar_t* depth_map, scalar_t* rgb_map, int32_t* sampling_index_map, scalar_t* sampling_weight_map, size_t batch_size, int num_faces, int image_size, int texture_size, scalar_t eps) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int face_index = face_index_map[i]; if (face_index >= 0) { /* from global variables: batch number, num of faces, image_size, face[v012][RGB], pixel[RGB], weight[v012], texture[ts][ts][ts][RGB], sampling indices[8], sampling_weights[8]; */ const int bn = i / (image_size * image_size); const int nf = num_faces; const int ts = texture_size; const scalar_t* face = &faces[(bn * nf + face_index) * 9]; const scalar_t* texture = &textures[(bn * nf + face_index) * ts * ts * ts * 3]; scalar_t* pixel = &rgb_map[i * 3]; const scalar_t* weight = &weight_map[i * 3]; const scalar_t depth = depth_map[i]; int32_t* sampling_indices = &sampling_index_map[i * 8]; scalar_t* sampling_weights = &sampling_weight_map[i * 8]; /* get texture index (float) */ scalar_t texture_index_float[3]; for (int k = 0; k < 3; k++) { scalar_t tif = weight[k] * (ts - 1) * (depth / (face[3 * k + 2])); tif = max(tif, 0.); tif = min(tif, ts - 1 - eps); texture_index_float[k] = tif; } /* blend */ scalar_t new_pixel[3] = {0, 0, 0}; for (int pn = 0; pn < 8; pn++) { scalar_t w = 1; // weight int texture_index_int[3]; // index in source (int) for (int k = 0; k < 3; k++) { if ((pn >> k) % 2 == 0) { w *= 1 - (texture_index_float[k] - (int)texture_index_float[k]); texture_index_int[k] = (int)texture_index_float[k]; } else { w *= texture_index_float[k] - (int)texture_index_float[k]; texture_index_int[k] = (int)texture_index_float[k] + 1; } } int isc = texture_index_int[0] * ts * ts + texture_index_int[1] * ts + texture_index_int[2]; for (int k = 0; k < 3; k++) new_pixel[k] += w * texture[isc * 3 + k]; sampling_indices[pn] = isc; sampling_weights[pn] = w; } for (int k = 0; k < 3; k++) pixel[k] = new_pixel[k]; } } template <typename scalar_t> __global__ void backward_pixel_map_cuda_kernel( const scalar_t* faces, int32_t* face_index_map, scalar_t* rgb_map, scalar_t* alpha_map, scalar_t* grad_rgb_map, scalar_t* grad_alpha_map, scalar_t* grad_faces, size_t batch_size, size_t num_faces, int image_size, scalar_t eps, int return_rgb, int return_alpha) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * num_faces) { return; } const int bn = i / num_faces; const int fn = i % num_faces; const int is = image_size; const scalar_t* face = &faces[i * 9]; scalar_t grad_face[9] = {}; /* check backside */ if ((face[7] - face[1]) * (face[3] - face[0]) < (face[4] - face[1]) * (face[6] - face[0])) return; /* for each edge */ for (int edge_num = 0; edge_num < 3; edge_num++) { /* set points of target edge */ int pi[3]; scalar_t pp[3][2]; for (int num = 0; num < 3; num++) pi[num] = (edge_num + num) % 3; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { pp[num][dim] = 0.5 * (face[3 * pi[num] + dim] * is + is - 1); } } /* for dy, dx */ for (int axis = 0; axis < 2; axis++) { /* */ scalar_t p[3][2]; for (int num = 0; num < 3; num++) { for (int dim = 0; dim < 2; dim++) { p[num][dim] = pp[num][(dim + axis) % 2]; } } /* set direction */ int direction; if (axis == 0) { if (p[0][0] < p[1][0]) direction = -1; else direction = 1; } else { if (p[0][0] < p[1][0]) direction = 1; else direction = -1; } /* along edge */ int d0_from, d0_to; d0_from = max(ceil(min(p[0][0], p[1][0])), 0.); d0_to = min(max(p[0][0], p[1][0]), is - 1.); for (int d0 = d0_from; d0 <= d0_to; d0++) { /* get cross point */ int d1_in, d1_out; const scalar_t d1_cross = (p[1][1] - p[0][1]) / (p[1][0] - p[0][0]) * (d0 - p[0][0]) + p[0][1]; if (0 < direction) d1_in = floor(d1_cross); else d1_in = ceil(d1_cross); d1_out = d1_in + direction; /* continue if cross point is not shown */ if (d1_in < 0 || is <= d1_in) continue; if (d1_out < 0 || is <= d1_out) continue; /* get color of in-pixel and out-pixel */ scalar_t alpha_in; scalar_t alpha_out; scalar_t *rgb_in; scalar_t *rgb_out; int map_index_in, map_index_out; if (axis == 0) { map_index_in = bn * is * is + d1_in * is + d0; map_index_out = bn * is * is + d1_out * is + d0; } else { map_index_in = bn * is * is + d0 * is + d1_in; map_index_out = bn * is * is + d0 * is + d1_out; } if (return_alpha) { alpha_in = alpha_map[map_index_in]; alpha_out = alpha_map[map_index_out]; } if (return_rgb) { rgb_in = &rgb_map[map_index_in * 3]; rgb_out = &rgb_map[map_index_out * 3]; } /* out */ bool is_in_fn = (face_index_map[map_index_in] == fn); if (is_in_fn) { int d1_limit; if (0 < direction) d1_limit = is - 1; else d1_limit = 0; int d1_from = max(min(d1_out, d1_limit), 0); int d1_to = min(max(d1_out, d1_limit), is - 1); scalar_t* alpha_map_p; scalar_t* grad_alpha_map_p; scalar_t* rgb_map_p; scalar_t* grad_rgb_map_p; int map_offset, map_index_from; if (axis == 0) { map_offset = is; map_index_from = bn * is * is + d1_from * is + d0; } else { map_offset = 1; map_index_from = bn * is * is + d0 * is + d1_from; } if (return_alpha) { alpha_map_p = &alpha_map[map_index_from]; grad_alpha_map_p = &grad_alpha_map[map_index_from]; } if (return_rgb) { rgb_map_p = &rgb_map[map_index_from * 3]; grad_rgb_map_p = &grad_rgb_map[map_index_from * 3]; } for (int d1 = d1_from; d1 <= d1_to; d1++) { scalar_t diff_grad = 0; if (return_alpha) { diff_grad += (*alpha_map_p - alpha_in) * *grad_alpha_map_p; } if (return_rgb) { for (int k = 0; k < 3; k++) diff_grad += (rgb_map_p[k] - rgb_in[k]) * grad_rgb_map_p[k]; } if (return_alpha) { alpha_map_p += map_offset; grad_alpha_map_p += map_offset; } if (return_rgb) { rgb_map_p += 3 * map_offset; grad_rgb_map_p += 3 * map_offset; } if (diff_grad <= 0) continue; if (p[1][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (p[1][0] - d0) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[0] * 3 + (1 - axis)] -= diff_grad / dist; } if (p[0][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (d0 - p[0][0]) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[1] * 3 + (1 - axis)] -= diff_grad / dist; } } } /* in */ { int d1_limit; scalar_t d0_cross2; if ((d0 - p[0][0]) * (d0 - p[2][0]) < 0) { d0_cross2 = (p[2][1] - p[0][1]) / (p[2][0] - p[0][0]) * (d0 - p[0][0]) + p[0][1]; } else { d0_cross2 = (p[1][1] - p[2][1]) / (p[1][0] - p[2][0]) * (d0 - p[2][0]) + p[2][1]; } if (0 < direction) d1_limit = ceil(d0_cross2); else d1_limit = floor(d0_cross2); int d1_from = max(min(d1_in, d1_limit), 0); int d1_to = min(max(d1_in, d1_limit), is - 1); int* face_index_map_p; scalar_t* alpha_map_p; scalar_t* grad_alpha_map_p; scalar_t* rgb_map_p; scalar_t* grad_rgb_map_p; int map_index_from; int map_offset; if (axis == 0) map_offset = is; else map_offset = 1; if (axis == 0) { map_index_from = bn * is * is + d1_from * is + d0; } else { map_index_from = bn * is * is + d0 * is + d1_from; } face_index_map_p = &face_index_map[map_index_from] - map_offset; if (return_alpha) { alpha_map_p = &alpha_map[map_index_from] - map_offset; grad_alpha_map_p = &grad_alpha_map[map_index_from] - map_offset; } if (return_rgb) { rgb_map_p = &rgb_map[map_index_from * 3] - 3 * map_offset; grad_rgb_map_p = &grad_rgb_map[map_index_from * 3] - 3 * map_offset; } for (int d1 = d1_from; d1 <= d1_to; d1++) { face_index_map_p += map_offset; if (return_alpha) { alpha_map_p += map_offset; grad_alpha_map_p += map_offset; } if (return_rgb) { rgb_map_p += 3 * map_offset; grad_rgb_map_p += 3 * map_offset; } if (*face_index_map_p != fn) continue; scalar_t diff_grad = 0; if (return_alpha) { diff_grad += (*alpha_map_p - alpha_out) * *grad_alpha_map_p; } if (return_rgb) { for (int k = 0; k < 3; k++) diff_grad += (rgb_map_p[k] - rgb_out[k]) * grad_rgb_map_p[k]; } if (diff_grad <= 0) continue; if (p[1][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (p[1][0] - d0) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[0] * 3 + (1 - axis)] -= diff_grad / dist; } if (p[0][0] != d0) { scalar_t dist = (p[1][0] - p[0][0]) / (d0 - p[0][0]) * (d1 - d1_cross) * 2. / is; dist = (0 < dist) ? dist + eps : dist - eps; grad_face[pi[1] * 3 + (1 - axis)] -= diff_grad / dist; } } } } } } /* set to global gradient variable */ for (int k = 0; k < 9; k++) grad_faces[i * 9 + k] = grad_face[k]; } template <typename scalar_t> __global__ void backward_textures_cuda_kernel( const int32_t* face_index_map, scalar_t* sampling_weight_map, int32_t* sampling_index_map, scalar_t* grad_rgb_map, scalar_t* grad_textures, size_t batch_size, size_t num_faces, int image_size, size_t texture_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int face_index = face_index_map[i]; if (0 <= face_index) { int is = image_size; int nf = num_faces; int ts = texture_size; int bn = i / (is * is); // batch number [0 -> bs] scalar_t* grad_texture = &grad_textures[(bn * nf + face_index) * ts * ts * ts * 3]; scalar_t* sampling_weight_map_p = &sampling_weight_map[i * 8]; int* sampling_index_map_p = &sampling_index_map[i * 8]; for (int pn = 0; pn < 8; pn++) { scalar_t w = *sampling_weight_map_p++; int isc = *sampling_index_map_p++; scalar_t* grad_texture_p = &grad_texture[isc * 3]; scalar_t* grad_rgb_map_p = &grad_rgb_map[i * 3]; for (int k = 0; k < 3; k++) atomicAdd(grad_texture_p++, w * *grad_rgb_map_p++); } } } template <typename scalar_t> __global__ void backward_depth_map_cuda_kernel( const scalar_t* faces, const scalar_t* depth_map, const int32_t* face_index_map, const scalar_t* face_inv_map, const scalar_t* weight_map, scalar_t* grad_depth_map, scalar_t* grad_faces, size_t batch_size, size_t num_faces, int image_size) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= batch_size * image_size * image_size) { return; } const int fn = face_index_map[i]; if (0 <= fn) { const int nf = num_faces; const int is = image_size; const int bn = i / (is * is); const scalar_t* face = &faces[(bn * nf + fn) * 9]; const scalar_t depth = depth_map[i]; const scalar_t depth2 = depth * depth; const scalar_t* face_inv = &face_inv_map[i * 9]; const scalar_t* weight = &weight_map[i * 3]; const scalar_t grad_depth = grad_depth_map[i]; scalar_t* grad_face = &grad_faces[(bn * nf + fn) * 9]; /* derivative wrt z */ for (int k = 0; k < 3; k++) { const scalar_t z_k = face[3 * k + 2]; atomicAdd(&grad_face[3 * k + 2], grad_depth * weight[k] * depth2 / (z_k * z_k)); } /* derivative wrt x, y */ scalar_t tmp[3] = {}; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { tmp[k] += -face_inv[3 * l + k] / face[3 * l + 2]; } } for (int k = 0; k < 3; k++) { for (int l = 0; l < 2; l++) { // k: point number, l: dimension atomicAdd(&grad_face[3 * k + l], -grad_depth * tmp[l] * weight[k] * depth2 * is / 2); } } } } } std::vector<at::Tensor> forward_face_index_map_cuda( at::Tensor faces, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor face_inv_map, at::Tensor faces_inv, int image_size, float near, float far, int return_rgb, int return_alpha, int return_depth) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const int threads = 512; const dim3 blocks_1 ((batch_size * num_faces - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_face_index_map_cuda_1", ([&] { forward_face_index_map_cuda_kernel_1<scalar_t><<<blocks_1, threads>>>( faces.data<scalar_t>(), faces_inv.data<scalar_t>(), batch_size, num_faces, image_size); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_face_index_map_1: %s\n", cudaGetErrorString(err)); const dim3 blocks_2 ((batch_size * image_size * image_size - 1) / threads +1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_face_index_map_cuda_2", ([&] { forward_face_index_map_cuda_kernel_2<scalar_t><<<blocks_2, threads>>>( faces.data<scalar_t>(), faces_inv.data<scalar_t>(), face_index_map.data<int32_t>(), weight_map.data<scalar_t>(), depth_map.data<scalar_t>(), face_inv_map.data<scalar_t>(), (int) batch_size, (int) num_faces, (int) image_size, (scalar_t) near, (scalar_t) far, return_rgb, return_alpha, return_depth); })); err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_face_index_map_2: %s\n", cudaGetErrorString(err)); return {face_index_map, weight_map, depth_map, face_inv_map}; } std::vector<at::Tensor> forward_texture_sampling_cuda( at::Tensor faces, at::Tensor textures, at::Tensor face_index_map, at::Tensor weight_map, at::Tensor depth_map, at::Tensor rgb_map, at::Tensor sampling_index_map, at::Tensor sampling_weight_map, int image_size, float eps) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const auto texture_size = textures.size(2); const int threads = 512; const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "forward_texture_sampling_cuda", ([&] { forward_texture_sampling_cuda_kernel<scalar_t><<<blocks, threads>>>( faces.data<scalar_t>(), textures.data<scalar_t>(), face_index_map.data<int32_t>(), weight_map.data<scalar_t>(), depth_map.data<scalar_t>(), rgb_map.data<scalar_t>(), sampling_index_map.data<int32_t>(), sampling_weight_map.data<scalar_t>(), batch_size, num_faces, image_size, texture_size, eps); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in forward_texture_sampling: %s\n", cudaGetErrorString(err)); return {rgb_map, sampling_index_map, sampling_weight_map}; } at::Tensor backward_pixel_map_cuda( at::Tensor faces, at::Tensor face_index_map, at::Tensor rgb_map, at::Tensor alpha_map, at::Tensor grad_rgb_map, at::Tensor grad_alpha_map, at::Tensor grad_faces, int image_size, float eps, int return_rgb, int return_alpha) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const int threads = 512; const dim3 blocks ((batch_size * num_faces - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_pixel_map_cuda", ([&] { backward_pixel_map_cuda_kernel<scalar_t><<<blocks, threads>>>( faces.data<scalar_t>(), face_index_map.data<int32_t>(), rgb_map.data<scalar_t>(), alpha_map.data<scalar_t>(), grad_rgb_map.data<scalar_t>(), grad_alpha_map.data<scalar_t>(), grad_faces.data<scalar_t>(), batch_size, num_faces, image_size, (scalar_t) eps, return_rgb, return_alpha); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in backward_pixel_map: %s\n", cudaGetErrorString(err)); return grad_faces; } at::Tensor backward_textures_cuda( at::Tensor face_index_map, at::Tensor sampling_weight_map, at::Tensor sampling_index_map, at::Tensor grad_rgb_map, at::Tensor grad_textures, int num_faces) { const auto batch_size = face_index_map.size(0); const auto image_size = face_index_map.size(1); const auto texture_size = grad_textures.size(2); const int threads = 512; const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(sampling_weight_map.type(), "backward_textures_cuda", ([&] { backward_textures_cuda_kernel<scalar_t><<<blocks, threads>>>( face_index_map.data<int32_t>(), sampling_weight_map.data<scalar_t>(), sampling_index_map.data<int32_t>(), grad_rgb_map.data<scalar_t>(), grad_textures.data<scalar_t>(), batch_size, num_faces, image_size, texture_size); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in backward_textures: %s\n", cudaGetErrorString(err)); return grad_textures; } at::Tensor backward_depth_map_cuda( at::Tensor faces, at::Tensor depth_map, at::Tensor face_index_map, at::Tensor face_inv_map, at::Tensor weight_map, at::Tensor grad_depth_map, at::Tensor grad_faces, int image_size) { const auto batch_size = faces.size(0); const auto num_faces = faces.size(1); const int threads = 512; const dim3 blocks ((batch_size * image_size * image_size - 1) / threads + 1); AT_DISPATCH_FLOATING_TYPES(faces.type(), "backward_depth_map_cuda", ([&] { backward_depth_map_cuda_kernel<scalar_t><<<blocks, threads>>>( faces.data<scalar_t>(), depth_map.data<scalar_t>(), face_index_map.data<int32_t>(), face_inv_map.data<scalar_t>(), weight_map.data<scalar_t>(), grad_depth_map.data<scalar_t>(), grad_faces.data<scalar_t>(), batch_size, num_faces, image_size); })); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error in backward_depth_map: %s\n", cudaGetErrorString(err)); return grad_faces; }
e88e84b63803523954724dfcdd191e80813640ea.hip
// !!! This is a file automatically generated by hipify!!! /* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/bboxUtils.h" #include "common/kernel.h" #include "rocblas.h" #include "hip/hip_runtime.h" #include <hipcub/hipcub.hpp> #include <stdint.h> #define CUDA_MEM_ALIGN 256 // HASH unsigned int hash(const void* array_, size_t size) { // Apply hashing only when debugging RPN codes. if (DEBUG_ENABLE) { const char* array_const; char* array; PLUGIN_CHECK_CUDA(hipHostMalloc((void**) &array, size)); PLUGIN_CHECK_CUDA(hipMemcpy(array, array_, size, hipMemcpyDeviceToHost)); array_const = array; unsigned int hash = 45599; for (size_t i = 0; i < size; i++) { unsigned int value = array_const[i]; hash = hash * 1487 + value; hash = hash * 317; hash = hash % 105359; } return hash; } else { return 0; } } // ALIGNPTR int8_t* alignPtr(int8_t* ptr, uintptr_t to) { uintptr_t addr = (uintptr_t) ptr; if (addr % to) { addr += to - addr % to; } return (int8_t*) addr; } // NEXTWORKSPACEPTR int8_t* nextWorkspacePtr(int8_t* ptr, uintptr_t previousWorkspaceSize) { uintptr_t addr = (uintptr_t) ptr; addr += previousWorkspaceSize; return alignPtr((int8_t*) addr, CUDA_MEM_ALIGN); } // CALCULATE TOTAL WORKSPACE SIZE size_t calculateTotalWorkspaceSize(size_t* workspaces, int count) { size_t total = 0; for (int i = 0; i < count; i++) { total += workspaces[i]; if (workspaces[i] % CUDA_MEM_ALIGN) { total += CUDA_MEM_ALIGN - (workspaces[i] % CUDA_MEM_ALIGN); } } return total; } using nvinfer1::DataType; // DATA TYPE SIZE size_t dataTypeSize(const DataType dtype) { switch (dtype) { case DataType::kINT8: return sizeof(char); case DataType::kHALF: return sizeof(short); case DataType::kFLOAT: return sizeof(float); default: return 0; } } // CUB /* size_t cubSortFloatIntPairsWorkspaceSize(int num_items, int num_segments) { size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedRadixSort::SortPairsDescending( (int *)NULL, temp_storage_bytes, (const float *)NULL, (float *)NULL, (const int *)NULL, (int *)NULL, num_items, // # items num_segments, // # segments (const int *)NULL, (const int *)NULL); return temp_storage_bytes; } size_t cubSortFloatBboxInfoPairsWorkspaceSize(int num_items, int num_segments) { size_t temp_storage_bytes = 0; hipcub::DeviceSegmentedRadixSort::SortPairsDescending( (int *)NULL, temp_storage_bytes, (const float *)NULL, (float *)NULL, (const BboxInfo<float> *)NULL, (BboxInfo<float> *)NULL, num_items, // # items num_segments, // # segments (const int *)NULL, (const int *)NULL); return temp_storage_bytes; } */ template <unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void setUniformOffsets_kernel( const int num_segments, const int offset, int* d_offsets) { const int idx = blockIdx.x * nthds_per_cta + threadIdx.x; if (idx <= num_segments) d_offsets[idx] = idx * offset; } void setUniformOffsets( hipStream_t stream, const int num_segments, const int offset, int* d_offsets) { const int BS = 32; const int GS = (num_segments + 1 + BS - 1) / BS; hipLaunchKernelGGL(( setUniformOffsets_kernel<BS>), dim3(GS), dim3(BS), 0, stream, num_segments, offset, d_offsets); } const char* cublasGetErrorString(hipblasStatus_t error) { switch (error) { case HIPBLAS_STATUS_SUCCESS: return "HIPBLAS_STATUS_SUCCESS"; case HIPBLAS_STATUS_NOT_INITIALIZED: return "HIPBLAS_STATUS_NOT_INITIALIZED"; case HIPBLAS_STATUS_ALLOC_FAILED: return "HIPBLAS_STATUS_ALLOC_FAILED"; case HIPBLAS_STATUS_INVALID_VALUE: return "HIPBLAS_STATUS_INVALID_VALUE"; case HIPBLAS_STATUS_ARCH_MISMATCH: return "HIPBLAS_STATUS_ARCH_MISMATCH"; case HIPBLAS_STATUS_MAPPING_ERROR: return "HIPBLAS_STATUS_MAPPING_ERROR"; case HIPBLAS_STATUS_EXECUTION_FAILED: return "HIPBLAS_STATUS_EXECUTION_FAILED"; case HIPBLAS_STATUS_INTERNAL_ERROR: return "HIPBLAS_STATUS_INTERNAL_ERROR"; #if TORCH_HIP_VERSION >= 6000 case HIPBLAS_STATUS_NOT_SUPPORTED: return "HIPBLAS_STATUS_NOT_SUPPORTED"; #endif #if TORCH_HIP_VERSION >= 6050 case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; #endif } return "Unknown cublas status"; }
e88e84b63803523954724dfcdd191e80813640ea.cu
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "common/bboxUtils.h" #include "common/kernel.h" #include "cublas_v2.h" #include "cuda.h" #include <cub/cub.cuh> #include <stdint.h> #define CUDA_MEM_ALIGN 256 // HASH unsigned int hash(const void* array_, size_t size) { // Apply hashing only when debugging RPN codes. if (DEBUG_ENABLE) { const char* array_const; char* array; PLUGIN_CHECK_CUDA(cudaMallocHost((void**) &array, size)); PLUGIN_CHECK_CUDA(cudaMemcpy(array, array_, size, cudaMemcpyDeviceToHost)); array_const = array; unsigned int hash = 45599; for (size_t i = 0; i < size; i++) { unsigned int value = array_const[i]; hash = hash * 1487 + value; hash = hash * 317; hash = hash % 105359; } return hash; } else { return 0; } } // ALIGNPTR int8_t* alignPtr(int8_t* ptr, uintptr_t to) { uintptr_t addr = (uintptr_t) ptr; if (addr % to) { addr += to - addr % to; } return (int8_t*) addr; } // NEXTWORKSPACEPTR int8_t* nextWorkspacePtr(int8_t* ptr, uintptr_t previousWorkspaceSize) { uintptr_t addr = (uintptr_t) ptr; addr += previousWorkspaceSize; return alignPtr((int8_t*) addr, CUDA_MEM_ALIGN); } // CALCULATE TOTAL WORKSPACE SIZE size_t calculateTotalWorkspaceSize(size_t* workspaces, int count) { size_t total = 0; for (int i = 0; i < count; i++) { total += workspaces[i]; if (workspaces[i] % CUDA_MEM_ALIGN) { total += CUDA_MEM_ALIGN - (workspaces[i] % CUDA_MEM_ALIGN); } } return total; } using nvinfer1::DataType; // DATA TYPE SIZE size_t dataTypeSize(const DataType dtype) { switch (dtype) { case DataType::kINT8: return sizeof(char); case DataType::kHALF: return sizeof(short); case DataType::kFLOAT: return sizeof(float); default: return 0; } } // CUB /* size_t cubSortFloatIntPairsWorkspaceSize(int num_items, int num_segments) { size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending( (int *)NULL, temp_storage_bytes, (const float *)NULL, (float *)NULL, (const int *)NULL, (int *)NULL, num_items, // # items num_segments, // # segments (const int *)NULL, (const int *)NULL); return temp_storage_bytes; } size_t cubSortFloatBboxInfoPairsWorkspaceSize(int num_items, int num_segments) { size_t temp_storage_bytes = 0; cub::DeviceSegmentedRadixSort::SortPairsDescending( (int *)NULL, temp_storage_bytes, (const float *)NULL, (float *)NULL, (const BboxInfo<float> *)NULL, (BboxInfo<float> *)NULL, num_items, // # items num_segments, // # segments (const int *)NULL, (const int *)NULL); return temp_storage_bytes; } */ template <unsigned nthds_per_cta> __launch_bounds__(nthds_per_cta) __global__ void setUniformOffsets_kernel( const int num_segments, const int offset, int* d_offsets) { const int idx = blockIdx.x * nthds_per_cta + threadIdx.x; if (idx <= num_segments) d_offsets[idx] = idx * offset; } void setUniformOffsets( cudaStream_t stream, const int num_segments, const int offset, int* d_offsets) { const int BS = 32; const int GS = (num_segments + 1 + BS - 1) / BS; setUniformOffsets_kernel<BS><<<GS, BS, 0, stream>>>(num_segments, offset, d_offsets); } const char* cublasGetErrorString(cublasStatus_t error) { switch (error) { case CUBLAS_STATUS_SUCCESS: return "CUBLAS_STATUS_SUCCESS"; case CUBLAS_STATUS_NOT_INITIALIZED: return "CUBLAS_STATUS_NOT_INITIALIZED"; case CUBLAS_STATUS_ALLOC_FAILED: return "CUBLAS_STATUS_ALLOC_FAILED"; case CUBLAS_STATUS_INVALID_VALUE: return "CUBLAS_STATUS_INVALID_VALUE"; case CUBLAS_STATUS_ARCH_MISMATCH: return "CUBLAS_STATUS_ARCH_MISMATCH"; case CUBLAS_STATUS_MAPPING_ERROR: return "CUBLAS_STATUS_MAPPING_ERROR"; case CUBLAS_STATUS_EXECUTION_FAILED: return "CUBLAS_STATUS_EXECUTION_FAILED"; case CUBLAS_STATUS_INTERNAL_ERROR: return "CUBLAS_STATUS_INTERNAL_ERROR"; #if CUDA_VERSION >= 6000 case CUBLAS_STATUS_NOT_SUPPORTED: return "CUBLAS_STATUS_NOT_SUPPORTED"; #endif #if CUDA_VERSION >= 6050 case CUBLAS_STATUS_LICENSE_ERROR: return "CUBLAS_STATUS_LICENSE_ERROR"; #endif } return "Unknown cublas status"; }
75a89bc184884ba6466841d326b93a532c52e367.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #if __linux__ && defined(__INTEL_COMPILER) #define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend) #endif #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <map> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include "csv.hpp" // typedef std::basic_string<char,std::char_traits<char>,tbb::tbb_allocator<char> > MyString; #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #include "util.h" using namespace std; // static MyString* Data; void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } int main( int argc, char* argv[] ) { int N = atoi(argv[2]); std::string timestamp; int counter = 0; std::map<unsigned long long, int> mp; thrust::host_vector<unsigned long long> h_vec_1(N); thrust::host_vector<unsigned long long> h_vec_2(N); thrust::device_vector<unsigned long long> d_vec_1(N); thrust::device_vector<unsigned long long> d_vec_2(N); size_t nBytes = N * sizeof(unsigned long long); /* unsigned long long *h_A, *h_B, *hostRef, *gpuRef; h_A = (unsigned long long *)malloc(nBytes); h_B = (unsigned long long *)malloc(nBytes); hostRef = (unsigned long long *)malloc(nBytes); gpuRef = (unsigned long long *)malloc(nBytes); */ // Data = new MyString[N]; const string csv_file = std::string(argv[1]); vector<vector<string>> data; try { Csv objCsv(csv_file); if (!objCsv.getCsv(data)) { cout << "read ERROR" << endl; return 1; } for (int row = 0; row < data.size(); row++) { vector<string> rec = data[row]; timestamp = rec[0]; std::string pair = rec[1]; h_vec_1[row] = stoull(rec[0].c_str()); /* h_vec_1[row] = stoull(string(pair.c_str())); h_vec_2[row] = 1; // atol(pair.c_str()); */ // h_A[row] = rec[0]; } thrust::copy(h_vec_1.begin(), h_vec_1.end(), d_vec_1.begin()); thrust::copy(h_vec_1.begin(), h_vec_1.end(), d_vec_2.begin()); for (int i = 0; i < N; i++) { std::cout << h_vec_1[i] << std::endl; unsigned long long f = h_vec_1[i]; auto ff = [=] __device__ (unsigned long long x) {return x - f ;}; thrust::transform(d_vec_1.begin(),d_vec_1.end(),d_vec_2.begin(),ff); counter = 0; for(int j = 0; j < N; j++) { if (d_vec_2[j]==0) { // std::cout << d_vec_1[i] << "," << f << "," << d_vec_2[j] << std::endl; counter = counter + 1; } } // std::cout << d_vec_1[i] << "," << counter << std::endl; mp[d_vec_1[i]] = counter; } for(auto x : mp) { std::cout << x.first << "," << x.second << "\n"; } } catch (...) { cout << "EXCEPTION!" << endl; return 1; } // delete[] Data; return 0; }
75a89bc184884ba6466841d326b93a532c52e367.cu
#if __linux__ && defined(__INTEL_COMPILER) #define __sync_fetch_and_add(ptr,addend) _InterlockedExchangeAdd(const_cast<void*>(reinterpret_cast<volatile void*>(ptr)), addend) #endif #include <string> #include <cstring> #include <cctype> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <bitset> #include <map> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include "csv.hpp" // typedef std::basic_string<char,std::char_traits<char>,tbb::tbb_allocator<char> > MyString; #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #include "util.h" using namespace std; // static MyString* Data; void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } int main( int argc, char* argv[] ) { int N = atoi(argv[2]); std::string timestamp; int counter = 0; std::map<unsigned long long, int> mp; thrust::host_vector<unsigned long long> h_vec_1(N); thrust::host_vector<unsigned long long> h_vec_2(N); thrust::device_vector<unsigned long long> d_vec_1(N); thrust::device_vector<unsigned long long> d_vec_2(N); size_t nBytes = N * sizeof(unsigned long long); /* unsigned long long *h_A, *h_B, *hostRef, *gpuRef; h_A = (unsigned long long *)malloc(nBytes); h_B = (unsigned long long *)malloc(nBytes); hostRef = (unsigned long long *)malloc(nBytes); gpuRef = (unsigned long long *)malloc(nBytes); */ // Data = new MyString[N]; const string csv_file = std::string(argv[1]); vector<vector<string>> data; try { Csv objCsv(csv_file); if (!objCsv.getCsv(data)) { cout << "read ERROR" << endl; return 1; } for (int row = 0; row < data.size(); row++) { vector<string> rec = data[row]; timestamp = rec[0]; std::string pair = rec[1]; h_vec_1[row] = stoull(rec[0].c_str()); /* h_vec_1[row] = stoull(string(pair.c_str())); h_vec_2[row] = 1; // atol(pair.c_str()); */ // h_A[row] = rec[0]; } thrust::copy(h_vec_1.begin(), h_vec_1.end(), d_vec_1.begin()); thrust::copy(h_vec_1.begin(), h_vec_1.end(), d_vec_2.begin()); for (int i = 0; i < N; i++) { std::cout << h_vec_1[i] << std::endl; unsigned long long f = h_vec_1[i]; auto ff = [=] __device__ (unsigned long long x) {return x - f ;}; thrust::transform(d_vec_1.begin(),d_vec_1.end(),d_vec_2.begin(),ff); counter = 0; for(int j = 0; j < N; j++) { if (d_vec_2[j]==0) { // std::cout << d_vec_1[i] << "," << f << "," << d_vec_2[j] << std::endl; counter = counter + 1; } } // std::cout << d_vec_1[i] << "," << counter << std::endl; mp[d_vec_1[i]] = counter; } for(auto x : mp) { std::cout << x.first << "," << x.second << "\n"; } } catch (...) { cout << "EXCEPTION!" << endl; return 1; } // delete[] Data; return 0; }
a89aa18743270d87fcc231016ef7b0a0db09b548.hip
// !!! This is a file automatically generated by hipify!!! // nvcc compute_capability.cu -o /tmp/compute_capability #include <stdio.h> int main(int, char**) { unsigned dev = 0 ; hipDeviceProp_t p; hipGetDeviceProperties(&p, dev); printf("%d%d\n", p.major, p.minor); }
a89aa18743270d87fcc231016ef7b0a0db09b548.cu
// nvcc compute_capability.cu -o /tmp/compute_capability #include <stdio.h> int main(int, char**) { unsigned dev = 0 ; cudaDeviceProp p; cudaGetDeviceProperties(&p, dev); printf("%d%d\n", p.major, p.minor); }
16956a9a6220ab23e9e62ff7399408f501f32632.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion // A common way to represent color images is known as RGBA - the color // is specified by how much Red, Grean and Blue is in it. // The 'A' stands for Alpha and is used for transparency, it will be // ignored in this homework. // Each channel Red, Blue, Green and Alpha is represented by one byte. // Since we are using one byte for each color there are 256 different // possible values for each color. This means we use 4 bytes per pixel. // Greyscale images are represented by a single intensity value per pixel // which is one byte in size. // To convert an image from color to grayscale one simple method is to // set the intensity to the average of the RGB channels. But we will // use a more sophisticated method that takes into account how the eye // perceives color and weights the channels unequally. // The eye responds most strongly to green followed by red and then blue. // The NTSC (National Television System Committee) recommends the following // formula for color to greyscale conversion: // I = .299f * R + .587f * G + .114f * B // Notice the trailing f's on the numbers which indicate that they are // single precision floating point constants and not double precision // constants. // You should fill in the kernel as well as set the block and grid sizes // so that the entire image is processed. #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { // Get corresponding offset for thread. int rowIdx = blockIdx.x; int colIdx = threadIdx.x; uchar4 rgba = rgbaImage[rowIdx * numCols + colIdx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[rowIdx * numCols + colIdx] = channelSum; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { // There are numRows blocks. const dim3 gridSize(numRows); // Each block has numCols threads. const dim3 blockSize(numCols); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
16956a9a6220ab23e9e62ff7399408f501f32632.cu
// Homework 1 // Color to Greyscale Conversion // A common way to represent color images is known as RGBA - the color // is specified by how much Red, Grean and Blue is in it. // The 'A' stands for Alpha and is used for transparency, it will be // ignored in this homework. // Each channel Red, Blue, Green and Alpha is represented by one byte. // Since we are using one byte for each color there are 256 different // possible values for each color. This means we use 4 bytes per pixel. // Greyscale images are represented by a single intensity value per pixel // which is one byte in size. // To convert an image from color to grayscale one simple method is to // set the intensity to the average of the RGB channels. But we will // use a more sophisticated method that takes into account how the eye // perceives color and weights the channels unequally. // The eye responds most strongly to green followed by red and then blue. // The NTSC (National Television System Committee) recommends the following // formula for color to greyscale conversion: // I = .299f * R + .587f * G + .114f * B // Notice the trailing f's on the numbers which indicate that they are // single precision floating point constants and not double precision // constants. // You should fill in the kernel as well as set the block and grid sizes // so that the entire image is processed. #include "utils.h" #include <stdio.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { // Get corresponding offset for thread. int rowIdx = blockIdx.x; int colIdx = threadIdx.x; uchar4 rgba = rgbaImage[rowIdx * numCols + colIdx]; float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z; greyImage[rowIdx * numCols + colIdx] = channelSum; } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { // There are numRows blocks. const dim3 gridSize(numRows); // Each block has numCols threads. const dim3 blockSize(numCols); rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
e5ca16d163552a6dfe33f488b66c48516a538945.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/operators/math/unary.hpp" #include "lbann/base.hpp" #include "lbann/utils/gpu/helpers.hpp" #include "common_hip.cuh" namespace lbann { namespace { // ========================================================= // Operator objects for entry-wise unary layers // ========================================================= // Note: Unary operator corresponds to forward prop step // (\f$ y = f(x) \f$) and binary operator corresponds to // back prop step // (\f$ \frac{dL}{dx} = \frac{dL}{dy} f'(x) \f$). /** Logical not operator. */ template <typename DataT> struct LogicalNotOpImpl { inline __device__ DataT operator()(DataT const& x) const { auto const& b = x != DataT(0.0) && !gpu_lib::isnan(x); return !b ? DataT(1.0) : DataT(0.0); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Negative operator. */ template <typename DataT> struct NegativeOpImpl { inline __device__ DataT operator()(DataT const& x) const { return -x; } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& dy) const { return -dy; } }; /** Sign operator. */ template <typename DataT> struct SignOpImpl { inline __device__ DataT operator()(DataT const& x) const { DataT const zero = 0.; DataT const one = 1.; if (x > zero) { return one; } else if (x < zero) { return -one; } else { return zero; } } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Round operator. */ template <typename DataT> struct RoundOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::round(x); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Ceiling operator. */ template <typename DataT> struct CeilOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::ceil(x); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Floor operator. */ template <typename DataT> struct FloorOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::floor(x); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Reciprocal operator. */ template <typename DataT> struct ReciprocalOpImpl { inline __device__ DataT operator()(DataT const& x) const { return DataT(1.) / x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { if (dy == DataT(0.0)) { return DataT(0.0); } else { return -dy / (x * x); } } }; /** Square operator. */ template <typename DataT> struct SquareOpImpl { inline __device__ DataT operator()(DataT const& x) const { return x * x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(2.) * x * dy; } }; /** Square root operator. */ template <typename DataT> struct SqrtOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sqrt(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(2.) * gpu_lib::sqrt(x)); } }; /** Reciprocal square root operator. */ template <typename DataT> struct RsqrtOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::rsqrt(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& s = gpu_lib::sqrt(x); return -dy / (DataT(2.) * x * s); } }; /** Safe reciprocal operator. * If a standard reciprocal produces an infinity or NaN, zero is * output instead. */ template <typename DataT> struct SafeReciprocalOpImpl { inline __device__ DataT operator()(DataT const& x) const { auto const& y = DataT(1.) / x; if (gpu_lib::isfinite(y)) { return y; } else { return DataT(0.0); } } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& y = DataT(1.) / x; if (gpu_lib::isfinite(y)) { return -dy * y * y; } else { return DataT(0.0); } } }; /** Exponential operator. */ template <typename DataT> struct ExpOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::exp(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::exp(x); } }; /** Exponential minus one operator. */ template <typename DataT> struct Expm1OpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::expm1(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::exp(x); } }; /** Natural logarithm operator. */ template <typename DataT> struct LogOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::log(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / x; } }; /** Natural logarithm one plus operator. */ template <typename DataT> struct Log1pOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::log1p(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (x + DataT(1.0)); } }; /** Cosine operator. */ template <typename DataT> struct CosOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::cos(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy * gpu_lib::sin(x); } }; /** Sine operator. */ template <typename DataT> struct SinOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sin(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::cos(x); } }; /** Tangent operator. */ template <typename DataT> struct TanOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::tan(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& c = gpu_lib::cos(x); return dy / (c * c); } }; /** Arccosine operator. */ template <typename DataT> struct AcosOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::acos(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy / gpu_lib::sqrt(DataT(1.0) - x * x); } }; /** Arcsine operator. */ template <typename DataT> struct AsinOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::asin(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / gpu_lib::sqrt(DataT(1.0) - x * x); } }; /** Arctangent operator. */ template <typename DataT> struct AtanOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::atan(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(1.0) + x * x); } }; /** Hyperbolic cosine operator. */ template <typename DataT> struct CoshOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::cosh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::sinh(x); } }; /** Hyperbolic sine operator. */ template <typename DataT> struct SinhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sinh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::cosh(x); } }; /** Hyperbolic tangent operator. */ template <typename DataT> struct TanhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::tanh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& c = gpu_lib::cosh(x); return dy / (c * c); } }; /** Hyperbolic arccosine operator. */ template <typename DataT> struct AcoshOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::acosh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy / (gpu_lib::sqrt(x - DataT(1.0)) * gpu_lib::sqrt(x + DataT(1.0))); } }; /** Hyperbolic arcsine operator. */ template <typename DataT> struct AsinhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::asinh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / gpu_lib::sqrt(DataT(1.0) + x * x); } }; /** Hyperbolic arctangent operator. */ template <typename DataT> struct AtanhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::atanh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(1.0) - x * x); } }; /** Error function operator. */ template <typename DataT> struct ErfOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::erf(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { DataT const two_rsqrt_pi(1.12837916709551257389); return dy * two_rsqrt_pi * gpu_lib::exp(-x * x); } }; /** Inverse error function operator. */ template <typename DataT> struct ErfInvOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::erfinv(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { DataT const half_sqrt_pi(0.88622692545275801364); auto const& y = gpu_lib::erfinv(x); return dy * half_sqrt_pi * gpu_lib::exp(y * y); } }; } // namespace // Template instantiation #define DEFINE_COMPUTE_OPS(OP_NAME) \ template <typename DataT, El::Device Device> \ void OP_NAME##Operator<DataT, Device>::fp_compute_local( \ std::vector<ConstLocalInputTensorType> inputs, \ std::vector<LocalOutputTensorType> outputs) const \ { \ LBANN_ASSERT_DEBUG(inputs.size() == 1); \ LBANN_ASSERT_DEBUG(outputs.size() == 1); \ auto const& input = inputs.front().data(); \ auto& output = outputs.front().data(); \ El::EntrywiseMap(input, output, OP_NAME##OpImpl<DataT>{}); \ } \ template <typename DataT, El::Device Device> \ void OP_NAME##Operator<DataT, Device>::bp_compute_local( \ std::vector<ConstLocalInputTensorType> inputs, \ std::vector<ConstLocalOutputTensorType> grads_wrt_outputs, \ std::vector<LocalInputTensorType> grads_wrt_inputs) const \ { \ LBANN_ASSERT_DEBUG(inputs.size() == 1); \ LBANN_ASSERT_DEBUG(grads_wrt_outputs.size() == 1); \ LBANN_ASSERT_DEBUG(grads_wrt_inputs.size() == 1); \ auto const& input = inputs.front().data(); \ auto const& grad_wrt_output = grads_wrt_outputs.front().data(); \ auto& grad_wrt_input = grads_wrt_inputs.front().data(); \ internal::EntrywiseZipInto(input, \ grad_wrt_output, \ grad_wrt_input, \ OP_NAME##OpImpl<DataT>{}); \ } DEFINE_COMPUTE_OPS(Acos) DEFINE_COMPUTE_OPS(Acosh) DEFINE_COMPUTE_OPS(Asin) DEFINE_COMPUTE_OPS(Asinh) DEFINE_COMPUTE_OPS(Atan) DEFINE_COMPUTE_OPS(Atanh) DEFINE_COMPUTE_OPS(Ceil) DEFINE_COMPUTE_OPS(Cos) DEFINE_COMPUTE_OPS(Cosh) DEFINE_COMPUTE_OPS(Erf) DEFINE_COMPUTE_OPS(ErfInv) DEFINE_COMPUTE_OPS(Exp) DEFINE_COMPUTE_OPS(Expm1) DEFINE_COMPUTE_OPS(Floor) DEFINE_COMPUTE_OPS(Log) DEFINE_COMPUTE_OPS(Log1p) DEFINE_COMPUTE_OPS(LogicalNot) DEFINE_COMPUTE_OPS(Negative) DEFINE_COMPUTE_OPS(Reciprocal) DEFINE_COMPUTE_OPS(Round) DEFINE_COMPUTE_OPS(Rsqrt) DEFINE_COMPUTE_OPS(SafeReciprocal) DEFINE_COMPUTE_OPS(Sign) DEFINE_COMPUTE_OPS(Sin) DEFINE_COMPUTE_OPS(Sinh) DEFINE_COMPUTE_OPS(Sqrt) DEFINE_COMPUTE_OPS(Square) DEFINE_COMPUTE_OPS(Tan) DEFINE_COMPUTE_OPS(Tanh) #define PROTO(T) \ template class AcosOperator<T, El::Device::GPU>; \ template class AcoshOperator<T, El::Device::GPU>; \ template class AsinOperator<T, El::Device::GPU>; \ template class AsinhOperator<T, El::Device::GPU>; \ template class AtanOperator<T, El::Device::GPU>; \ template class AtanhOperator<T, El::Device::GPU>; \ template class CeilOperator<T, El::Device::GPU>; \ template class CosOperator<T, El::Device::GPU>; \ template class CoshOperator<T, El::Device::GPU>; \ template class ErfInvOperator<T, El::Device::GPU>; \ template class ErfOperator<T, El::Device::GPU>; \ template class ExpOperator<T, El::Device::GPU>; \ template class Expm1Operator<T, El::Device::GPU>; \ template class FloorOperator<T, El::Device::GPU>; \ template class Log1pOperator<T, El::Device::GPU>; \ template class LogOperator<T, El::Device::GPU>; \ template class LogicalNotOperator<T, El::Device::GPU>; \ template class NegativeOperator<T, El::Device::GPU>; \ template class ReciprocalOperator<T, El::Device::GPU>; \ template class RoundOperator<T, El::Device::GPU>; \ template class RsqrtOperator<T, El::Device::GPU>; \ template class SafeReciprocalOperator<T, El::Device::GPU>; \ template class SignOperator<T, El::Device::GPU>; \ template class SinOperator<T, El::Device::GPU>; \ template class SinhOperator<T, El::Device::GPU>; \ template class SqrtOperator<T, El::Device::GPU>; \ template class SquareOperator<T, El::Device::GPU>; \ template class TanOperator<T, El::Device::GPU>; \ template class TanhOperator<T, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
e5ca16d163552a6dfe33f488b66c48516a538945.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #include "lbann/operators/math/unary.hpp" #include "lbann/base.hpp" #include "lbann/utils/gpu/helpers.hpp" #include "common.cuh" namespace lbann { namespace { // ========================================================= // Operator objects for entry-wise unary layers // ========================================================= // Note: Unary operator corresponds to forward prop step // (\f$ y = f(x) \f$) and binary operator corresponds to // back prop step // (\f$ \frac{dL}{dx} = \frac{dL}{dy} f'(x) \f$). /** Logical not operator. */ template <typename DataT> struct LogicalNotOpImpl { inline __device__ DataT operator()(DataT const& x) const { auto const& b = x != DataT(0.0) && !gpu_lib::isnan(x); return !b ? DataT(1.0) : DataT(0.0); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Negative operator. */ template <typename DataT> struct NegativeOpImpl { inline __device__ DataT operator()(DataT const& x) const { return -x; } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& dy) const { return -dy; } }; /** Sign operator. */ template <typename DataT> struct SignOpImpl { inline __device__ DataT operator()(DataT const& x) const { DataT const zero = 0.; DataT const one = 1.; if (x > zero) { return one; } else if (x < zero) { return -one; } else { return zero; } } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Round operator. */ template <typename DataT> struct RoundOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::round(x); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Ceiling operator. */ template <typename DataT> struct CeilOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::ceil(x); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Floor operator. */ template <typename DataT> struct FloorOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::floor(x); } inline __device__ DataT operator()(DataT const& /*x*/, DataT const& /*dy*/) const { return DataT(0.0); } }; /** Reciprocal operator. */ template <typename DataT> struct ReciprocalOpImpl { inline __device__ DataT operator()(DataT const& x) const { return DataT(1.) / x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { if (dy == DataT(0.0)) { return DataT(0.0); } else { return -dy / (x * x); } } }; /** Square operator. */ template <typename DataT> struct SquareOpImpl { inline __device__ DataT operator()(DataT const& x) const { return x * x; } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return DataT(2.) * x * dy; } }; /** Square root operator. */ template <typename DataT> struct SqrtOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sqrt(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(2.) * gpu_lib::sqrt(x)); } }; /** Reciprocal square root operator. */ template <typename DataT> struct RsqrtOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::rsqrt(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& s = gpu_lib::sqrt(x); return -dy / (DataT(2.) * x * s); } }; /** Safe reciprocal operator. * If a standard reciprocal produces an infinity or NaN, zero is * output instead. */ template <typename DataT> struct SafeReciprocalOpImpl { inline __device__ DataT operator()(DataT const& x) const { auto const& y = DataT(1.) / x; if (gpu_lib::isfinite(y)) { return y; } else { return DataT(0.0); } } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& y = DataT(1.) / x; if (gpu_lib::isfinite(y)) { return -dy * y * y; } else { return DataT(0.0); } } }; /** Exponential operator. */ template <typename DataT> struct ExpOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::exp(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::exp(x); } }; /** Exponential minus one operator. */ template <typename DataT> struct Expm1OpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::expm1(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::exp(x); } }; /** Natural logarithm operator. */ template <typename DataT> struct LogOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::log(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / x; } }; /** Natural logarithm one plus operator. */ template <typename DataT> struct Log1pOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::log1p(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (x + DataT(1.0)); } }; /** Cosine operator. */ template <typename DataT> struct CosOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::cos(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy * gpu_lib::sin(x); } }; /** Sine operator. */ template <typename DataT> struct SinOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sin(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::cos(x); } }; /** Tangent operator. */ template <typename DataT> struct TanOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::tan(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& c = gpu_lib::cos(x); return dy / (c * c); } }; /** Arccosine operator. */ template <typename DataT> struct AcosOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::acos(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy / gpu_lib::sqrt(DataT(1.0) - x * x); } }; /** Arcsine operator. */ template <typename DataT> struct AsinOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::asin(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / gpu_lib::sqrt(DataT(1.0) - x * x); } }; /** Arctangent operator. */ template <typename DataT> struct AtanOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::atan(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(1.0) + x * x); } }; /** Hyperbolic cosine operator. */ template <typename DataT> struct CoshOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::cosh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::sinh(x); } }; /** Hyperbolic sine operator. */ template <typename DataT> struct SinhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::sinh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy * gpu_lib::cosh(x); } }; /** Hyperbolic tangent operator. */ template <typename DataT> struct TanhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::tanh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { auto const& c = gpu_lib::cosh(x); return dy / (c * c); } }; /** Hyperbolic arccosine operator. */ template <typename DataT> struct AcoshOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::acosh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return -dy / (gpu_lib::sqrt(x - DataT(1.0)) * gpu_lib::sqrt(x + DataT(1.0))); } }; /** Hyperbolic arcsine operator. */ template <typename DataT> struct AsinhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::asinh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / gpu_lib::sqrt(DataT(1.0) + x * x); } }; /** Hyperbolic arctangent operator. */ template <typename DataT> struct AtanhOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::atanh(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { return dy / (DataT(1.0) - x * x); } }; /** Error function operator. */ template <typename DataT> struct ErfOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::erf(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { DataT const two_rsqrt_pi(1.12837916709551257389); return dy * two_rsqrt_pi * gpu_lib::exp(-x * x); } }; /** Inverse error function operator. */ template <typename DataT> struct ErfInvOpImpl { inline __device__ DataT operator()(DataT const& x) const { return gpu_lib::erfinv(x); } inline __device__ DataT operator()(DataT const& x, DataT const& dy) const { DataT const half_sqrt_pi(0.88622692545275801364); auto const& y = gpu_lib::erfinv(x); return dy * half_sqrt_pi * gpu_lib::exp(y * y); } }; } // namespace // Template instantiation #define DEFINE_COMPUTE_OPS(OP_NAME) \ template <typename DataT, El::Device Device> \ void OP_NAME##Operator<DataT, Device>::fp_compute_local( \ std::vector<ConstLocalInputTensorType> inputs, \ std::vector<LocalOutputTensorType> outputs) const \ { \ LBANN_ASSERT_DEBUG(inputs.size() == 1); \ LBANN_ASSERT_DEBUG(outputs.size() == 1); \ auto const& input = inputs.front().data(); \ auto& output = outputs.front().data(); \ El::EntrywiseMap(input, output, OP_NAME##OpImpl<DataT>{}); \ } \ template <typename DataT, El::Device Device> \ void OP_NAME##Operator<DataT, Device>::bp_compute_local( \ std::vector<ConstLocalInputTensorType> inputs, \ std::vector<ConstLocalOutputTensorType> grads_wrt_outputs, \ std::vector<LocalInputTensorType> grads_wrt_inputs) const \ { \ LBANN_ASSERT_DEBUG(inputs.size() == 1); \ LBANN_ASSERT_DEBUG(grads_wrt_outputs.size() == 1); \ LBANN_ASSERT_DEBUG(grads_wrt_inputs.size() == 1); \ auto const& input = inputs.front().data(); \ auto const& grad_wrt_output = grads_wrt_outputs.front().data(); \ auto& grad_wrt_input = grads_wrt_inputs.front().data(); \ internal::EntrywiseZipInto(input, \ grad_wrt_output, \ grad_wrt_input, \ OP_NAME##OpImpl<DataT>{}); \ } DEFINE_COMPUTE_OPS(Acos) DEFINE_COMPUTE_OPS(Acosh) DEFINE_COMPUTE_OPS(Asin) DEFINE_COMPUTE_OPS(Asinh) DEFINE_COMPUTE_OPS(Atan) DEFINE_COMPUTE_OPS(Atanh) DEFINE_COMPUTE_OPS(Ceil) DEFINE_COMPUTE_OPS(Cos) DEFINE_COMPUTE_OPS(Cosh) DEFINE_COMPUTE_OPS(Erf) DEFINE_COMPUTE_OPS(ErfInv) DEFINE_COMPUTE_OPS(Exp) DEFINE_COMPUTE_OPS(Expm1) DEFINE_COMPUTE_OPS(Floor) DEFINE_COMPUTE_OPS(Log) DEFINE_COMPUTE_OPS(Log1p) DEFINE_COMPUTE_OPS(LogicalNot) DEFINE_COMPUTE_OPS(Negative) DEFINE_COMPUTE_OPS(Reciprocal) DEFINE_COMPUTE_OPS(Round) DEFINE_COMPUTE_OPS(Rsqrt) DEFINE_COMPUTE_OPS(SafeReciprocal) DEFINE_COMPUTE_OPS(Sign) DEFINE_COMPUTE_OPS(Sin) DEFINE_COMPUTE_OPS(Sinh) DEFINE_COMPUTE_OPS(Sqrt) DEFINE_COMPUTE_OPS(Square) DEFINE_COMPUTE_OPS(Tan) DEFINE_COMPUTE_OPS(Tanh) #define PROTO(T) \ template class AcosOperator<T, El::Device::GPU>; \ template class AcoshOperator<T, El::Device::GPU>; \ template class AsinOperator<T, El::Device::GPU>; \ template class AsinhOperator<T, El::Device::GPU>; \ template class AtanOperator<T, El::Device::GPU>; \ template class AtanhOperator<T, El::Device::GPU>; \ template class CeilOperator<T, El::Device::GPU>; \ template class CosOperator<T, El::Device::GPU>; \ template class CoshOperator<T, El::Device::GPU>; \ template class ErfInvOperator<T, El::Device::GPU>; \ template class ErfOperator<T, El::Device::GPU>; \ template class ExpOperator<T, El::Device::GPU>; \ template class Expm1Operator<T, El::Device::GPU>; \ template class FloorOperator<T, El::Device::GPU>; \ template class Log1pOperator<T, El::Device::GPU>; \ template class LogOperator<T, El::Device::GPU>; \ template class LogicalNotOperator<T, El::Device::GPU>; \ template class NegativeOperator<T, El::Device::GPU>; \ template class ReciprocalOperator<T, El::Device::GPU>; \ template class RoundOperator<T, El::Device::GPU>; \ template class RsqrtOperator<T, El::Device::GPU>; \ template class SafeReciprocalOperator<T, El::Device::GPU>; \ template class SignOperator<T, El::Device::GPU>; \ template class SinOperator<T, El::Device::GPU>; \ template class SinhOperator<T, El::Device::GPU>; \ template class SqrtOperator<T, El::Device::GPU>; \ template class SquareOperator<T, El::Device::GPU>; \ template class TanOperator<T, El::Device::GPU>; \ template class TanhOperator<T, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
4a7bb888ce8f695170ccc6ad8dc0634a0f6891f4.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cblas.h> #include <iostream> #include "tiger/utils/math_function.hpp" template <typename Dtype> __global__ void show_data(Dtype* data, int N){ int i = threadIdx.x; if(i < N){ printf("%f\n", data[i]); } } template <typename Dtype> void test_tiger_gpu_gemm_notrans(){ // create host memory space and initialize int M = 5; int K = 4; int N = 3; Dtype* A = new Dtype[M * K]; Dtype* B = new Dtype[K * N]; Dtype* C = new Dtype[M * N]; for(int i = 0; i < M * K; i++){ A[i] = i + 1; } for(int i = 0; i < K * N; i++){ B[i] = i + 1; } for(int i = 0; i < M * N; i++){ C[i] = 0; } // create device memory space and initialize Dtype* d_A; Dtype* d_B; Dtype* d_C; hipMalloc((Dtype**)&d_A, M * K * sizeof(Dtype)); hipMalloc((Dtype**)&d_B, K * N * sizeof(Dtype)); hipMalloc((Dtype**)&d_C, M * N * sizeof(Dtype)); hipMemcpy(d_A, A, M * K * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_B, B, K * N * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_C, C, M * N * sizeof(Dtype), hipMemcpyDefault); tiger::tiger_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M, N, K, Dtype(1), d_A, d_B, Dtype(1), d_C); // transfer data from device to host hipMemcpy(C, d_C, M * N * sizeof(Dtype), hipMemcpyDefault); for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ std::cout << C[i * N + j] << " "; } std::cout << std::endl; } std::cout << std::endl; hipFree(d_A); hipFree(d_B); hipFree(d_C); free(A); free(B); free(C); } template <typename Dtype> void test_tiger_gpu_gemm_trans(){ int M = 3; int N = 2; Dtype* A = new Dtype[M * N]; Dtype* B = new Dtype[M * N]; Dtype* C = new Dtype[M * M]; for(int i = 0; i < M * M; i++){ C[i] = 0; } for(int i = 0; i < M * N; i++){ A[i] = i; B[i] = i; } // crate device memory space and initialize Dtype* d_A; Dtype* d_B; Dtype* d_C; hipMalloc((Dtype**)&d_A, M * N * sizeof(Dtype)); hipMalloc((Dtype**)&d_B, M * N * sizeof(Dtype)); hipMalloc((Dtype**)&d_C, M * M * sizeof(Dtype)); hipMemcpy(d_A, A, M * N * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_B, B, M * N * sizeof(Dtype), hipMemcpyDefault); // d_C hipMemcpy(d_C, C, M * M * sizeof(Dtype), hipMemcpyDefault); tiger::tiger_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 3, 3, 2, Dtype(1), d_A, d_B, Dtype(1), d_C); //transfer data from device to host hipMemcpy(C, d_C, M * M * sizeof(Dtype), hipMemcpyDefault); for(int i = 0; i < M; i++){ for(int j = 0; j < M; j++){ std::cout << C[i * M + j] << " "; } std::cout << std::endl; } std::cout << std::endl; // free host memroy sapce and device memroy space free(A); free(B); free(C); hipFree(d_A); hipFree(d_B); hipFree(d_C); } template <typename Dtype> void test_tiger_gpu_gemv_notrans(){ int M = 3; int N = 2; Dtype* A = new Dtype[M * N]; Dtype* x = new Dtype[N]; Dtype* y = new Dtype[M]; for(int i = 0; i < M * N; i++){ A[i] = i; } for(int i = 0; i < N; i++){ x[i] = i; } for(int i = 0; i < M; i++){ y[i] = 0; } //create device memory space and initialize Dtype* d_A; Dtype* d_x; Dtype* d_y; hipMalloc((Dtype**)&d_A, M * N * sizeof(Dtype)); hipMalloc((Dtype**)&d_x, N * sizeof(Dtype)); hipMalloc((Dtype**)&d_y, M * sizeof(Dtype)); hipMemcpy(d_A, A, M * N * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_x, x, N * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_y, y, M * sizeof(Dtype), hipMemcpyDefault); tiger::tiger_gpu_gemv<Dtype>(CblasNoTrans, M, N, Dtype(1), d_A, d_x, Dtype(1), d_y); //transfer data from device to host hipMemcpy(y, d_y, M * sizeof(Dtype), hipMemcpyDefault); for(int i = 0; i < M; i++){ std::cout << y[i] << " "; } std::cout << std::endl; free(A); free(x); free(y); hipFree(d_A); hipFree(d_x); hipFree(d_y); } template <typename Dtype> void test_tiger_gpu_gemv_trans(){ int M = 3; int N = 2; Dtype* A = new Dtype[M * N]; Dtype* x = new Dtype[M]; Dtype* y = new Dtype[N]; for(int i = 0; i < M * N; i++){ A[i] = i; } for(int i = 0; i < M; i++){ x[i] = i; } for(int i = 0; i < N; i++){ y[i] = 0; } //create device memory space and initialize Dtype* d_A; Dtype* d_x; Dtype* d_y; hipMalloc((Dtype**)&d_A, M * N * sizeof(Dtype)); hipMalloc((Dtype**)&d_x, M * sizeof(Dtype)); hipMalloc((Dtype**)&d_y, N * sizeof(Dtype)); hipMemcpy(d_A, A, M * N * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_x, x, M * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_y, y, N * sizeof(Dtype), hipMemcpyDefault); tiger::tiger_gpu_gemv<Dtype>(CblasTrans, M, N, Dtype(1), d_A, d_x, Dtype(1), d_y); //transfer data from device to host hipMemcpy(y, d_y, N * sizeof(Dtype), hipMemcpyDefault); for(int i = 0; i < N; i++){ std::cout << y[i] << " "; } std::cout << std::endl; free(A); free(x); free(y); hipFree(d_A); hipFree(d_x); hipFree(d_y); } template <typename Dtype> void test_tiger_gpu_axpy(){ int N = 10; Dtype alpha = 2; Dtype* X = new Dtype[N]; Dtype* Y = new Dtype[N]; for(int i = 0; i < N; i++){ X[i] = i; Y[i] = 0; } Dtype* d_X; Dtype* d_Y; hipMalloc((Dtype**)&d_X, N * sizeof(Dtype)); hipMalloc((Dtype**)&d_Y, N * sizeof(Dtype)); hipMemcpy(d_X, X, N * sizeof(Dtype), hipMemcpyDefault); hipMemcpy(d_Y, Y, N * sizeof(Dtype), hipMemcpyDefault); tiger::tiger_gpu_axpy(N, alpha, d_X, d_Y); hipMemcpy(Y, d_Y, N * sizeof(Dtype), hipMemcpyDefault); for(int i = 0; i < N; i++){ std::cout << Y[i] << std::endl; } free(X); free(Y); hipFree(d_X); hipFree(d_Y); } template <typename Dtype> void test_dot(){ std::vector<int> shape_vec{2,2,2,2}; } int main(){ std::cout << "float no transpose no transpose" << std::endl; test_tiger_gpu_gemm_notrans<float>(); std::cout << "double no transpose no transpose " << std::endl; test_tiger_gpu_gemm_notrans<double>(); std::cout << "float no transpose transpose " << std::endl; test_tiger_gpu_gemm_trans<float>(); std::cout << "double no transpose transpose " << std::endl; test_tiger_gpu_gemm_trans<double>(); std::cout << "test sgemm function " << std::endl; std::cout << "float no transpose" << std::endl; test_tiger_gpu_gemv_notrans<float>(); std::cout << "double no transpose " << std::endl; test_tiger_gpu_gemv_notrans<double>(); std::cout << "float transpose " << std::endl; test_tiger_gpu_gemv_trans<float>(); std::cout << "double transpose " << std::endl; test_tiger_gpu_gemv_trans<double>(); std::cout << "test axpy function " << std::endl; test_tiger_gpu_axpy<float>(); test_tiger_gpu_axpy<double>(); test_dot<float>(); }
4a7bb888ce8f695170ccc6ad8dc0634a0f6891f4.cu
#include <cuda_runtime.h> #include <cuda.h> #include <cblas.h> #include <iostream> #include "tiger/utils/math_function.hpp" template <typename Dtype> __global__ void show_data(Dtype* data, int N){ int i = threadIdx.x; if(i < N){ printf("%f\n", data[i]); } } template <typename Dtype> void test_tiger_gpu_gemm_notrans(){ // create host memory space and initialize int M = 5; int K = 4; int N = 3; Dtype* A = new Dtype[M * K]; Dtype* B = new Dtype[K * N]; Dtype* C = new Dtype[M * N]; for(int i = 0; i < M * K; i++){ A[i] = i + 1; } for(int i = 0; i < K * N; i++){ B[i] = i + 1; } for(int i = 0; i < M * N; i++){ C[i] = 0; } // create device memory space and initialize Dtype* d_A; Dtype* d_B; Dtype* d_C; cudaMalloc((Dtype**)&d_A, M * K * sizeof(Dtype)); cudaMalloc((Dtype**)&d_B, K * N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_C, M * N * sizeof(Dtype)); cudaMemcpy(d_A, A, M * K * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_B, B, K * N * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_C, C, M * N * sizeof(Dtype), cudaMemcpyDefault); tiger::tiger_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M, N, K, Dtype(1), d_A, d_B, Dtype(1), d_C); // transfer data from device to host cudaMemcpy(C, d_C, M * N * sizeof(Dtype), cudaMemcpyDefault); for(int i = 0; i < M; i++){ for(int j = 0; j < N; j++){ std::cout << C[i * N + j] << " "; } std::cout << std::endl; } std::cout << std::endl; cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(A); free(B); free(C); } template <typename Dtype> void test_tiger_gpu_gemm_trans(){ int M = 3; int N = 2; Dtype* A = new Dtype[M * N]; Dtype* B = new Dtype[M * N]; Dtype* C = new Dtype[M * M]; for(int i = 0; i < M * M; i++){ C[i] = 0; } for(int i = 0; i < M * N; i++){ A[i] = i; B[i] = i; } // crate device memory space and initialize Dtype* d_A; Dtype* d_B; Dtype* d_C; cudaMalloc((Dtype**)&d_A, M * N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_B, M * N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_C, M * M * sizeof(Dtype)); cudaMemcpy(d_A, A, M * N * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_B, B, M * N * sizeof(Dtype), cudaMemcpyDefault); // 记得要给d_C矩阵进行初始化 cudaMemcpy(d_C, C, M * M * sizeof(Dtype), cudaMemcpyDefault); tiger::tiger_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans, 3, 3, 2, Dtype(1), d_A, d_B, Dtype(1), d_C); //transfer data from device to host cudaMemcpy(C, d_C, M * M * sizeof(Dtype), cudaMemcpyDefault); for(int i = 0; i < M; i++){ for(int j = 0; j < M; j++){ std::cout << C[i * M + j] << " "; } std::cout << std::endl; } std::cout << std::endl; // free host memroy sapce and device memroy space free(A); free(B); free(C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } template <typename Dtype> void test_tiger_gpu_gemv_notrans(){ int M = 3; int N = 2; Dtype* A = new Dtype[M * N]; Dtype* x = new Dtype[N]; Dtype* y = new Dtype[M]; for(int i = 0; i < M * N; i++){ A[i] = i; } for(int i = 0; i < N; i++){ x[i] = i; } for(int i = 0; i < M; i++){ y[i] = 0; } //create device memory space and initialize Dtype* d_A; Dtype* d_x; Dtype* d_y; cudaMalloc((Dtype**)&d_A, M * N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_x, N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_y, M * sizeof(Dtype)); cudaMemcpy(d_A, A, M * N * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_x, x, N * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_y, y, M * sizeof(Dtype), cudaMemcpyDefault); tiger::tiger_gpu_gemv<Dtype>(CblasNoTrans, M, N, Dtype(1), d_A, d_x, Dtype(1), d_y); //transfer data from device to host cudaMemcpy(y, d_y, M * sizeof(Dtype), cudaMemcpyDefault); for(int i = 0; i < M; i++){ std::cout << y[i] << " "; } std::cout << std::endl; free(A); free(x); free(y); cudaFree(d_A); cudaFree(d_x); cudaFree(d_y); } template <typename Dtype> void test_tiger_gpu_gemv_trans(){ int M = 3; int N = 2; Dtype* A = new Dtype[M * N]; Dtype* x = new Dtype[M]; Dtype* y = new Dtype[N]; for(int i = 0; i < M * N; i++){ A[i] = i; } for(int i = 0; i < M; i++){ x[i] = i; } for(int i = 0; i < N; i++){ y[i] = 0; } //create device memory space and initialize Dtype* d_A; Dtype* d_x; Dtype* d_y; cudaMalloc((Dtype**)&d_A, M * N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_x, M * sizeof(Dtype)); cudaMalloc((Dtype**)&d_y, N * sizeof(Dtype)); cudaMemcpy(d_A, A, M * N * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_x, x, M * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_y, y, N * sizeof(Dtype), cudaMemcpyDefault); tiger::tiger_gpu_gemv<Dtype>(CblasTrans, M, N, Dtype(1), d_A, d_x, Dtype(1), d_y); //transfer data from device to host cudaMemcpy(y, d_y, N * sizeof(Dtype), cudaMemcpyDefault); for(int i = 0; i < N; i++){ std::cout << y[i] << " "; } std::cout << std::endl; free(A); free(x); free(y); cudaFree(d_A); cudaFree(d_x); cudaFree(d_y); } template <typename Dtype> void test_tiger_gpu_axpy(){ int N = 10; Dtype alpha = 2; Dtype* X = new Dtype[N]; Dtype* Y = new Dtype[N]; for(int i = 0; i < N; i++){ X[i] = i; Y[i] = 0; } Dtype* d_X; Dtype* d_Y; cudaMalloc((Dtype**)&d_X, N * sizeof(Dtype)); cudaMalloc((Dtype**)&d_Y, N * sizeof(Dtype)); cudaMemcpy(d_X, X, N * sizeof(Dtype), cudaMemcpyDefault); cudaMemcpy(d_Y, Y, N * sizeof(Dtype), cudaMemcpyDefault); tiger::tiger_gpu_axpy(N, alpha, d_X, d_Y); cudaMemcpy(Y, d_Y, N * sizeof(Dtype), cudaMemcpyDefault); for(int i = 0; i < N; i++){ std::cout << Y[i] << std::endl; } free(X); free(Y); cudaFree(d_X); cudaFree(d_Y); } template <typename Dtype> void test_dot(){ std::vector<int> shape_vec{2,2,2,2}; } int main(){ std::cout << "float no transpose no transpose" << std::endl; test_tiger_gpu_gemm_notrans<float>(); std::cout << "double no transpose no transpose " << std::endl; test_tiger_gpu_gemm_notrans<double>(); std::cout << "float no transpose transpose " << std::endl; test_tiger_gpu_gemm_trans<float>(); std::cout << "double no transpose transpose " << std::endl; test_tiger_gpu_gemm_trans<double>(); std::cout << "test sgemm function " << std::endl; std::cout << "float no transpose" << std::endl; test_tiger_gpu_gemv_notrans<float>(); std::cout << "double no transpose " << std::endl; test_tiger_gpu_gemv_notrans<double>(); std::cout << "float transpose " << std::endl; test_tiger_gpu_gemv_trans<float>(); std::cout << "double transpose " << std::endl; test_tiger_gpu_gemv_trans<double>(); std::cout << "test axpy function " << std::endl; test_tiger_gpu_axpy<float>(); test_tiger_gpu_axpy<double>(); test_dot<float>(); }
a14c5db5748915728a44b4e12dbe960c1c09aa8a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/layout_utils.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/batch_norm_kernel.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/norm_utils.h" #include "paddle/phi/kernels/funcs/reduce_function.h" #include "paddle/phi/kernels/gpu/batch_norm_utils.h" #ifdef __HIPCC__ #define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim) #else #define LAUNCH_BOUNDS(BlockDim) #endif DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace phi { template <typename T> using CudnnDataType = paddle::platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, phi::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const gpuStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, phi::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream, layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *saved_mean, const BatchNormParamType<T> *saved_inv_variance, const int C, const int N, const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; __shared__ typename BlockReduce::TempStorage mean_storage; __shared__ typename BlockReduce::TempStorage variance_storeage; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; __shared__ BatchNormParamType<T> dscale_val; __shared__ BatchNormParamType<T> dbias_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); if (saved_mean && saved_inv_variance) { if (threadIdx.x == 0) { inv_var_val = saved_inv_variance[i]; mean_val = saved_mean[i]; } } else { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum()); x_square_sum = BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; inv_var_val = 1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon); } } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum()); if (threadIdx.x == 0) { dscale_val = ds_sum * inv_var_val; dbias_val = db_sum; dscale[i] = dscale_val; dbias[i] = dbias_val; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T> __device__ __forceinline__ void BlockReduceByVetical( BatchNormParamType<T> x_sum, BatchNormParamType<T> x_square_sum, BatchNormParamType<T> *smem_sum, BatchNormParamType<T> *smem_square_sum, BatchNormParamType<T> *x_sum_out, BatchNormParamType<T> *x_square_sum_out) { int tid = threadIdx.x + threadIdx.y * blockDim.x; #pragma unroll for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { if (threadIdx.y < offset * 2) { smem_sum[tid] = x_sum; smem_square_sum[tid] = x_square_sum; } __syncthreads(); if (threadIdx.y < offset) { int pair_tid = tid + offset * blockDim.x; x_sum += smem_sum[pair_tid]; x_square_sum += smem_square_sum[pair_tid]; } } if (threadIdx.y == 0) { *x_sum_out = x_sum; *x_square_sum_out = x_square_sum; } } template <typename T, int BlockDim> static __global__ void BNBackward2DChannelLastStage1( const T *x, const int C, const int N, const int HxW, const double epsilon, BatchNormParamType<T> *block_data_ptr, BatchNormParamType<T> *compute_mean, BatchNormParamType<T> *compute_inv_var, int *flag_ptr) { int outer_size = C; int inner_size = N * HxW; __shared__ BatchNormParamType<T> smem_sum[BlockDim]; __shared__ BatchNormParamType<T> smem_square_sum[BlockDim]; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; int outer_loop_stride = gridDim.x * blockDim.x; int inner_loop_stride = gridDim.y * blockDim.y; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size; i += outer_loop_stride) { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size; j += inner_loop_stride) { const int index = j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } // vertical block sum BlockReduceByVetical<T>(x_sum, x_square_sum, &smem_sum[0], &smem_square_sum[0], &x_sum, &x_square_sum); if (gridDim.y > 1) { volatile BatchNormParamType<T> *staging_sum = block_data_ptr; volatile BatchNormParamType<T> *staging_square_sum = &block_data_ptr[C * gridDim.y]; // write block data to global memory if (threadIdx.y == 0) { staging_sum[i + blockIdx.y * C] = x_sum; staging_square_sum[i + blockIdx.y * C] = x_square_sum; } // make sure write is visible to all blocks __threadfence(); __syncthreads(); __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&flag_ptr[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y - 1)); } __syncthreads(); if (is_last_block_done) { x_sum = static_cast<BatchNormParamType<T>>(0); x_square_sum = static_cast<BatchNormParamType<T>>(0); // thread sum for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { x_sum += staging_sum[i + y * C]; x_square_sum += staging_square_sum[i + y * C]; } // vertical block sum BlockReduceByVetical<T>(x_sum, x_square_sum, &smem_sum[0], &smem_square_sum[0], &x_sum, &x_square_sum); // final compute if (threadIdx.y == 0) { BatchNormParamType<T> compute_mean_val = x_sum / inner_size; BatchNormParamType<T> variance_val = x_square_sum / inner_size - compute_mean_val * compute_mean_val; BatchNormParamType<T> compute_inv_var_val = 1 / sqrt(variance_val + epsilon); compute_mean[i] = compute_mean_val; compute_inv_var[i] = compute_inv_var_val; } } } } } template <typename T, int BlockDim> static __global__ void BNBackward2DChannelLastStage2( const T *dy, const T *x, const BatchNormParamType<T> *means, const BatchNormParamType<T> *variances, const int C, const int N, const int HxW, const double epsilon, BatchNormParamType<T> *block_data_ptr, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias, int *flag_ptr) { int outer_size = C; int inner_size = N * HxW; __shared__ BatchNormParamType<T> smem_ds_sum[BlockDim]; __shared__ BatchNormParamType<T> smem_db_sum[BlockDim]; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; int outer_loop_stride = gridDim.x * blockDim.x; int inner_loop_stride = gridDim.y * blockDim.y; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size; i += outer_loop_stride) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> mean_val = means[i]; BatchNormParamType<T> inv_var_val = variances[i]; for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size; j += inner_loop_stride) { const int index = j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } // vertical block sum BlockReduceByVetical<T>( ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum); if (gridDim.y > 1) { volatile BatchNormParamType<T> *staging_ds_sum = block_data_ptr; volatile BatchNormParamType<T> *staging_db_sum = &block_data_ptr[C * gridDim.y]; // write block data to global memory if (threadIdx.y == 0) { staging_ds_sum[i + blockIdx.y * C] = ds_sum; staging_db_sum[i + blockIdx.y * C] = db_sum; } // make sure write is visible to all blocks __threadfence(); __syncthreads(); __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&flag_ptr[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y - 1)); } __syncthreads(); if (is_last_block_done) { ds_sum = static_cast<BatchNormParamType<T>>(0); db_sum = static_cast<BatchNormParamType<T>>(0); // thread sum for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { ds_sum += staging_ds_sum[i + y * C]; db_sum += staging_db_sum[i + y * C]; } // vertical block sum BlockReduceByVetical<T>( ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum); // final compute if (threadIdx.y == 0) { dscale[i] = ds_sum * inv_var_val; dbias[i] = db_sum; } } } } } template <typename T, int BlockDim> static __global__ void BNBackward2DChannelLastStage3( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *dscales, const BatchNormParamType<T> *dbias, const BatchNormParamType<T> *means, const BatchNormParamType<T> *variances, const int C, const int N, const int HxW, const double epsilon, T *dx) { const int outer_size = C; const int inner_size = N * HxW; int outer_loop_stride = gridDim.x * blockDim.x; int inner_loop_stride = gridDim.y * blockDim.y; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size; i += outer_loop_stride) { BatchNormParamType<T> mean_val = means[i]; BatchNormParamType<T> inv_var_val = variances[i]; BatchNormParamType<T> dscale_val = dscales[i]; BatchNormParamType<T> dbias_val = dbias[i]; for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size; j += inner_loop_stride) { const int index = j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData( const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, hipcub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T, typename Context> void BatchNormGradRawKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon_f, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, bool is_inplace, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { double epsilon = static_cast<double>(epsilon_f); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const auto *d_y = &y_grad; auto *d_x = x_grad; auto *d_scale = scale_grad; auto *d_bias = bias_grad; use_global_stats = is_test || use_global_stats; const auto &x_dims = x.dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, phi::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output if (d_x) { ctx.template Alloc<T>(d_x); } if (d_scale && d_bias) { ctx.template Alloc<BatchNormParamType<T>>(d_scale); ctx.template Alloc<BatchNormParamType<T>>(d_bias); } PADDLE_ENFORCE_EQ( scale.dims().size(), 1UL, phi::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale.dims().size(), scale.dims())); PADDLE_ENFORCE_EQ( scale.dims()[0], C, phi::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale.dims()[0])); auto dtype = paddle::platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; // TODO(wangran16): wait for MIOpen to improve the performance of BN // HIP do not support compute format of NHWC // auto compute_format = DataLayout::kNCHW; #else const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && (reserve_space.get_ptr() != nullptr); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; #endif DenseTensor transformed_x(x.type()); DenseTensor transformed_d_y(d_y->type()); DenseTensor transformed_d_x; if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x); TransToChannelFirst<Context, T>(ctx, &x, &transformed_x); ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); if (d_x) { ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x); } } else { transformed_x.ShareDataWith(x); transformed_d_y.ShareDataWith(*d_y); if (d_x) { transformed_d_x.ShareDataWith(*d_x); } } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } const int num = transformed_x.numel(); #ifdef HIPCC const int block = 256; #else const int block = 512; #endif int max_threads = ctx.GetMaxPhysicalThreadCount(); const int max_blocks = ::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = ::min(C, max_blocks); auto stream = ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { if (d_x) { paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); } phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor; functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // miopenTensorDescriptor_t data_desc_; // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor( &bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // mode_ = miopenBNSpatial; #elif CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #endif // CUDNN_VERSION_MIN(7, 0, 1) #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType<T>::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()), // const_cast<int *>(strides.data()))); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_, // data_desc_, mode_)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); #endif const auto *saved_mean_data = saved_mean.template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_variance.template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_x && d_scale && d_bias) { #ifdef PADDLE_WITH_HIP if (compute_format == DataLayout::kNCHW) { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationBackward( // dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), data_desc_, // transformed_x.template data<T>(), data_desc_, // transformed_d_y.template data<T>(), data_desc_, // transformed_d_x.template mutable_data<T>(ctx.GetPlace()), // bn_param_desc_, scale->template data<BatchNormParamType<T>>(), // d_scale->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // d_bias->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // epsilon, saved_mean_data, saved_var_data)); #else // CUDNN only support small batch size // const size_t CUDNN_PER_ACTIVATION_THRESHOLD = 131070; const size_t CUDNN_PER_ACTIVATION_THRESHOLD = 10240; const size_t CUDNN_SPATIAL_THRESHOLD = 880801; const bool use_native_kernel = ((x_dims.size() == 2 && N >= CUDNN_PER_ACTIVATION_THRESHOLD) || (x_dims.size() == 3 && N >= CUDNN_SPATIAL_THRESHOLD)); if (use_native_kernel) { if (x_dims.size() == 2) { dim3 block; dim3 grid; const int block_size = 512; const int MAX_GRID_SIZE = 128; const int WARP_SIZE = 32; // init intermediate storage DenseTensor block_data_tensor; DenseTensor flag_tensor; DenseTensor compute_mean_tensor = phi::Empty<BatchNormParamType<T>, Context>(ctx, {C}); DenseTensor compute_inv_var_tensor = phi::Empty<BatchNormParamType<T>, Context>(ctx, {C}); BatchNormParamType<T> *block_data_ptr = nullptr; int *flag_ptr = nullptr; int block_x = ::min(phi::funcs::details::GetLastPow2(C), WARP_SIZE); int block_y = ::min(phi::funcs::details::GetLastPow2(N * H * W * D / 16), block_size / block_x); if (block_x * block_y != block_size) { block_x = ::min(phi::funcs::details::GetLastPow2(C), block_size / block_y); } int grid_x = (C + block_x - 1) / block_x; int grid_y = ::min((N * H * W * D + block_y * 16 - 1) / (block_y * 16), MAX_GRID_SIZE); block.x = block_x; block.y = block_y; grid.x = grid_x; grid.y = grid_y; if (grid.y > 1) { block_data_tensor = phi::Empty<BatchNormParamType<T>, Context>( ctx, {2 * C * grid.y}); flag_tensor = phi::Empty<int, Context>(ctx, {grid.x}); block_data_ptr = block_data_tensor.data<BatchNormParamType<T>>(); flag_ptr = flag_tensor.data<int>(); funcs::SetConstant<Context, int> set_zero; set_zero(ctx, &flag_tensor, static_cast<int>(0)); } // 1. reduce_sum(x) => mean, inv_var auto *mean_ptr = saved_mean_data == nullptr ? compute_mean_tensor.data<BatchNormParamType<T>>() : saved_mean_data; auto *variance_ptr = saved_var_data == nullptr ? compute_inv_var_tensor.data<BatchNormParamType<T>>() : saved_var_data; if (saved_mean_data == nullptr) { hipLaunchKernelGGL(( BNBackward2DChannelLastStage1<T, block_size>) , dim3(grid), dim3(block), 0, ctx.stream(), transformed_x.template data<T>(), C, N, H * W * D, epsilon, block_data_ptr, compute_mean_tensor.data<BatchNormParamType<T>>(), compute_inv_var_tensor.data<BatchNormParamType<T>>(), flag_ptr); } // 2. reduce_sum(x, dy, mean) => dscale, dbias hipLaunchKernelGGL(( BNBackward2DChannelLastStage2<T, block_size>) , dim3(grid), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), mean_ptr, variance_ptr, C, N, H * W * D, epsilon, block_data_ptr, ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias), flag_ptr); // 3. elementwise_mul(scale, mean, inv_var, dy, dscale, dbias) => dx hipLaunchKernelGGL(( BNBackward2DChannelLastStage3<T, block_size>) , dim3(grid), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>(), mean_ptr, variance_ptr, C, N, H * W * D, epsilon, transformed_d_x.template data<T>()); } else { if (compute_format == DataLayout::kNCHW) { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { hipLaunchKernelGGL(( BNBackward<T, block, DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, ctx.stream(), transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } } } else { #if CUDNN_VERSION_MIN(7, 4, 1) size_t workspace_size = 0; void *workspace_ptr = nullptr; DenseTensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_tensor.Resize({static_cast<int64_t>(workspace_size)}); workspace_ptr = static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/ctx.template Alloc<T>(&transformed_d_x), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale.template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_scale), /*dBnBiasData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_bias), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/ const_cast<uint8_t *>(reserve_space->template data<uint8_t>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackward( ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, ctx.template Alloc<T>(&transformed_d_x), bn_param_desc_, scale.template data<BatchNormParamType<T>>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias), epsilon, saved_mean_data, saved_var_data)); #endif // CUDNN_VERSION_MIN(7, 4, 1) } #endif if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, ctx.stream(), d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { hipLaunchKernelGGL(( BNBackwardData<T, block, phi::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, ctx.stream(), d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor( bn_param_desc_)); #endif } else { const auto *running_mean = mean.get_ptr(); const auto *running_var = variance.get_ptr(); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = x; inplace_functor(data_layout, ctx.template Alloc<T>(&px), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x.data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNCHW>) , dim3(grid1), dim3(block), 0, stream, d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { hipLaunchKernelGGL(( KeBNBackwardData<T, phi::DataLayout::kNHWC>) , dim3(grid1), dim3(block), 0, stream, d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { hipLaunchKernelGGL(( KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC>) , dim3(grid2), dim3(block), 0, stream, d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } template <typename T, typename Context> void BatchNormGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon, const std::string &data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { BatchNormGradRawKernel<T, Context>(dev_ctx, x, scale, bias, mean, variance, saved_mean, saved_variance, reserve_space, y_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu, false, x_grad, scale_grad, bias_grad); } template <typename T, typename Context> void BatchNormDoubleGradKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const DenseTensor &y_grad, const DenseTensor &x_grad_grad, const DenseTensor &scale_grad_grad, const DenseTensor &bias_grad_grad, float momentum, float epsilon, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *y_grad_grad) { PADDLE_ENFORCE_EQ(is_test, false, phi::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const DenseTensor *running_mean = nullptr; const DenseTensor *running_variance = nullptr; if (use_global_stats) { running_mean = mean.get_ptr(); running_variance = variance.get_ptr(); } paddle::operators::NormDoubleGradFunctor<Context, T>(ctx, data_layout, &x, &scale, &y_grad, &saved_mean, &saved_variance, running_mean, running_variance, epsilon, use_global_stats, &x_grad_grad, &scale_grad_grad, &bias_grad_grad, x_grad, scale_grad, y_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } #endif #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #else PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #endif
a14c5db5748915728a44b4e12dbe960c1c09aa8a.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/operators/layout_utils.h" #include "paddle/fluid/operators/norm_utils.cu.h" #include "paddle/fluid/platform/device/gpu/gpu_dnn.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/batch_norm_kernel.h" #include "paddle/phi/kernels/empty_kernel.h" #include "paddle/phi/kernels/funcs/eigen/common.h" #include "paddle/phi/kernels/funcs/norm_utils.h" #include "paddle/phi/kernels/funcs/reduce_function.h" #include "paddle/phi/kernels/gpu/batch_norm_utils.h" #ifdef __HIPCC__ #define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim) #else #define LAUNCH_BOUNDS(BlockDim) #endif DECLARE_bool(cudnn_batchnorm_spatial_persistent); namespace phi { template <typename T> using CudnnDataType = paddle::platform::CudnnDataType<T>; template <typename T> using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias( const T *dy, const T *x, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, const double epsilon, const int N, const int C, const int HxW, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon); BatchNormParamType<T> mean_i = mean[i]; for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); db_sum += static_cast<BatchNormParamType<T>>(dy[index]); } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale[i] = ds_sum * inv_var_i; dbias[i] = db_sum; } __syncthreads(); } } template <typename T, phi::DataLayout layout> static __global__ void KeBNBackwardData(const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *variance, const double epsilon, const int C, const int HxW, const int num, T *dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C; BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon); dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) * scale[c] * inv_var); } } template <typename T> static __global__ void KeBNRestoreData(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = gid; i < num; i += stride) { const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C; auto y_i = static_cast<BatchNormParamType<T>>(y[i]); auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c]; x[i] = static_cast<T>(x_i); } } template <typename T> class InplaceHelper { public: void operator()(const phi::DataLayout layout, T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *bias, const BatchNormParamType<T> *mean, const BatchNormParamType<T> *variance, double epsilon, int C, int M, const int num, const T *y, int grid2, const int block, const gpuStream_t &stream) { PADDLE_ENFORCE_EQ(x, y, phi::errors::InvalidArgument( "X and Y should be inplaced in inplace mode")); KeBNRestoreData<<<grid2, block, 0, stream>>>( layout, x, scale, bias, mean, variance, epsilon, C, M, num, y); } }; template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *saved_mean, const BatchNormParamType<T> *saved_inv_variance, const int C, const int N, const int HxW, const double epsilon, T *dx, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage ds_storage; __shared__ typename BlockReduce::TempStorage db_storage; __shared__ typename BlockReduce::TempStorage mean_storage; __shared__ typename BlockReduce::TempStorage variance_storeage; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; __shared__ BatchNormParamType<T> dscale_val; __shared__ BatchNormParamType<T> dbias_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); if (saved_mean && saved_inv_variance) { if (threadIdx.x == 0) { inv_var_val = saved_inv_variance[i]; mean_val = saved_mean[i]; } } else { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum()); x_square_sum = BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum()); if (threadIdx.x == 0) { mean_val = x_sum / inner_size; inv_var_val = 1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon); } } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum()); db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum()); if (threadIdx.x == 0) { dscale_val = ds_sum * inv_var_val; dbias_val = db_sum; dscale[i] = dscale_val; dbias[i] = dbias_val; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T> __device__ __forceinline__ void BlockReduceByVetical( BatchNormParamType<T> x_sum, BatchNormParamType<T> x_square_sum, BatchNormParamType<T> *smem_sum, BatchNormParamType<T> *smem_square_sum, BatchNormParamType<T> *x_sum_out, BatchNormParamType<T> *x_square_sum_out) { int tid = threadIdx.x + threadIdx.y * blockDim.x; #pragma unroll for (int offset = blockDim.y / 2; offset > 0; offset >>= 1) { if (threadIdx.y < offset * 2) { smem_sum[tid] = x_sum; smem_square_sum[tid] = x_square_sum; } __syncthreads(); if (threadIdx.y < offset) { int pair_tid = tid + offset * blockDim.x; x_sum += smem_sum[pair_tid]; x_square_sum += smem_square_sum[pair_tid]; } } if (threadIdx.y == 0) { *x_sum_out = x_sum; *x_square_sum_out = x_square_sum; } } template <typename T, int BlockDim> static __global__ void BNBackward2DChannelLastStage1( const T *x, const int C, const int N, const int HxW, const double epsilon, BatchNormParamType<T> *block_data_ptr, BatchNormParamType<T> *compute_mean, BatchNormParamType<T> *compute_inv_var, int *flag_ptr) { int outer_size = C; int inner_size = N * HxW; __shared__ BatchNormParamType<T> smem_sum[BlockDim]; __shared__ BatchNormParamType<T> smem_square_sum[BlockDim]; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; int outer_loop_stride = gridDim.x * blockDim.x; int inner_loop_stride = gridDim.y * blockDim.y; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size; i += outer_loop_stride) { BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> x_square_sum = static_cast<BatchNormParamType<T>>(0); for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size; j += inner_loop_stride) { const int index = j * outer_size + i; BatchNormParamType<T> x_i = static_cast<BatchNormParamType<T>>(x[index]); x_sum += x_i; x_square_sum += x_i * x_i; } // vertical block sum BlockReduceByVetical<T>(x_sum, x_square_sum, &smem_sum[0], &smem_square_sum[0], &x_sum, &x_square_sum); if (gridDim.y > 1) { volatile BatchNormParamType<T> *staging_sum = block_data_ptr; volatile BatchNormParamType<T> *staging_square_sum = &block_data_ptr[C * gridDim.y]; // write block data to global memory if (threadIdx.y == 0) { staging_sum[i + blockIdx.y * C] = x_sum; staging_square_sum[i + blockIdx.y * C] = x_square_sum; } // make sure write is visible to all blocks __threadfence(); __syncthreads(); __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&flag_ptr[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y - 1)); } __syncthreads(); if (is_last_block_done) { x_sum = static_cast<BatchNormParamType<T>>(0); x_square_sum = static_cast<BatchNormParamType<T>>(0); // thread sum for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { x_sum += staging_sum[i + y * C]; x_square_sum += staging_square_sum[i + y * C]; } // vertical block sum BlockReduceByVetical<T>(x_sum, x_square_sum, &smem_sum[0], &smem_square_sum[0], &x_sum, &x_square_sum); // final compute if (threadIdx.y == 0) { BatchNormParamType<T> compute_mean_val = x_sum / inner_size; BatchNormParamType<T> variance_val = x_square_sum / inner_size - compute_mean_val * compute_mean_val; BatchNormParamType<T> compute_inv_var_val = 1 / sqrt(variance_val + epsilon); compute_mean[i] = compute_mean_val; compute_inv_var[i] = compute_inv_var_val; } } } } } template <typename T, int BlockDim> static __global__ void BNBackward2DChannelLastStage2( const T *dy, const T *x, const BatchNormParamType<T> *means, const BatchNormParamType<T> *variances, const int C, const int N, const int HxW, const double epsilon, BatchNormParamType<T> *block_data_ptr, BatchNormParamType<T> *dscale, BatchNormParamType<T> *dbias, int *flag_ptr) { int outer_size = C; int inner_size = N * HxW; __shared__ BatchNormParamType<T> smem_ds_sum[BlockDim]; __shared__ BatchNormParamType<T> smem_db_sum[BlockDim]; __shared__ BatchNormParamType<T> inv_var_val; __shared__ BatchNormParamType<T> mean_val; int outer_loop_stride = gridDim.x * blockDim.x; int inner_loop_stride = gridDim.y * blockDim.y; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size; i += outer_loop_stride) { BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> mean_val = means[i]; BatchNormParamType<T> inv_var_val = variances[i]; for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size; j += inner_loop_stride) { const int index = j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); ds_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val); db_sum += dy_i; } // vertical block sum BlockReduceByVetical<T>( ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum); if (gridDim.y > 1) { volatile BatchNormParamType<T> *staging_ds_sum = block_data_ptr; volatile BatchNormParamType<T> *staging_db_sum = &block_data_ptr[C * gridDim.y]; // write block data to global memory if (threadIdx.y == 0) { staging_ds_sum[i + blockIdx.y * C] = ds_sum; staging_db_sum[i + blockIdx.y * C] = db_sum; } // make sure write is visible to all blocks __threadfence(); __syncthreads(); __shared__ bool is_last_block_done; // mark block done if (threadIdx.x == 0 && threadIdx.y == 0) { int old = atomicAdd(&flag_ptr[blockIdx.x], 1); is_last_block_done = (old == (gridDim.y - 1)); } __syncthreads(); if (is_last_block_done) { ds_sum = static_cast<BatchNormParamType<T>>(0); db_sum = static_cast<BatchNormParamType<T>>(0); // thread sum for (int y = threadIdx.y; y < gridDim.y; y += blockDim.y) { ds_sum += staging_ds_sum[i + y * C]; db_sum += staging_db_sum[i + y * C]; } // vertical block sum BlockReduceByVetical<T>( ds_sum, db_sum, &smem_ds_sum[0], &smem_db_sum[0], &ds_sum, &db_sum); // final compute if (threadIdx.y == 0) { dscale[i] = ds_sum * inv_var_val; dbias[i] = db_sum; } } } } } template <typename T, int BlockDim> static __global__ void BNBackward2DChannelLastStage3( const T *dy, const T *x, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *dscales, const BatchNormParamType<T> *dbias, const BatchNormParamType<T> *means, const BatchNormParamType<T> *variances, const int C, const int N, const int HxW, const double epsilon, T *dx) { const int outer_size = C; const int inner_size = N * HxW; int outer_loop_stride = gridDim.x * blockDim.x; int inner_loop_stride = gridDim.y * blockDim.y; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < outer_size; i += outer_loop_stride) { BatchNormParamType<T> mean_val = means[i]; BatchNormParamType<T> inv_var_val = variances[i]; BatchNormParamType<T> dscale_val = dscales[i]; BatchNormParamType<T> dbias_val = dbias[i]; for (int j = blockIdx.y * blockDim.y + threadIdx.y; j < inner_size; j += inner_loop_stride) { const int index = j * outer_size + i; dx[index] = scale[i] * inv_var_val * (static_cast<BatchNormParamType<T>>(dy[index]) - dbias_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_val) * inv_var_val * dscale_val / inner_size); } } } template <typename T, int BlockDim, phi::DataLayout layout> static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData( const T *dy, const BatchNormParamType<T> *scale, const BatchNormParamType<T> *mean, const T *x, const BatchNormParamType<T> *variance, const int C, const int N, const int HxW, T *dx) { const int outer_size = C; const int inner_size = N * HxW; typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce; __shared__ typename BlockReduce::TempStorage dy_storage; __shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage; __shared__ BatchNormParamType<T> dy_sum_val; __shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val; for (int i = blockIdx.x; i < outer_size; i += gridDim.x) { BatchNormParamType<T> inv_var_i = variance[i]; BatchNormParamType<T> mean_i = mean[i]; BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0); BatchNormParamType<T> dy_x_sub_mean_sum = static_cast<BatchNormParamType<T>>(0); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; BatchNormParamType<T> dy_i = static_cast<BatchNormParamType<T>>(dy[index]); dy_sum += dy_i; dy_x_sub_mean_sum += dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i); } dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum()); dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage) .Reduce(dy_x_sub_mean_sum, cub::Sum()); if (threadIdx.x == 0) { dy_sum_val = dy_sum; dy_x_sub_mean_sum_val = dy_x_sub_mean_sum; } __syncthreads(); for (int j = threadIdx.x; j < inner_size; j += blockDim.x) { const int index = layout == phi::DataLayout::kNCHW ? (j / HxW * C + i) * HxW + j % HxW : j * outer_size + i; dx[index] = (static_cast<BatchNormParamType<T>>(dy[index]) - dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) - (static_cast<BatchNormParamType<T>>(x[index]) - mean_i) * dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) * scale[i] * inv_var_i; } } } template <typename T, typename Context> void BatchNormGradRawKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon_f, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, bool is_inplace, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { double epsilon = static_cast<double>(epsilon_f); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const auto *d_y = &y_grad; auto *d_x = x_grad; auto *d_scale = scale_grad; auto *d_bias = bias_grad; use_global_stats = is_test || use_global_stats; const auto &x_dims = x.dims(); PADDLE_ENFORCE_EQ( x_dims.size() >= 2 && x_dims.size() <= 5, true, phi::errors::InvalidArgument( "The size of input's dimensions should be between 2 and 5." "But received: the size of input's dimensions is [%d]," "the dimensions of input is [%s]", x_dims.size(), x_dims)); int N, C, H, W, D; phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); // init output if (d_x) { ctx.template Alloc<T>(d_x); } if (d_scale && d_bias) { ctx.template Alloc<BatchNormParamType<T>>(d_scale); ctx.template Alloc<BatchNormParamType<T>>(d_bias); } PADDLE_ENFORCE_EQ( scale.dims().size(), 1UL, phi::errors::InvalidArgument( "The size of scale's dimensions must equal to 1. But received: " "the size of scale's dimensions is [%d], the dimensions of scale " "is [%s].", scale.dims().size(), scale.dims())); PADDLE_ENFORCE_EQ( scale.dims()[0], C, phi::errors::InvalidArgument( "The first dimension of scale must equal to Channels[%d]. But " "received: the first dimension of scale is [%d]", C, scale.dims()[0])); auto dtype = paddle::platform::CudnnDataType<T>::type; #ifdef PADDLE_WITH_HIP auto compute_format = data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; // TODO(wangran16): wait for MIOpen to improve the performance of BN // HIP do not support compute format of NHWC // auto compute_format = DataLayout::kNCHW; #else const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF && FLAGS_cudnn_batchnorm_spatial_persistent && (reserve_space.get_ptr() != nullptr); auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW; #endif DenseTensor transformed_x(x.type()); DenseTensor transformed_d_y(d_y->type()); DenseTensor transformed_d_x; if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW && x_dims.size() > 2) { VLOG(3) << "Transform input tensor from NHWC to NCHW."; ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x); TransToChannelFirst<Context, T>(ctx, &x, &transformed_x); ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y); if (d_x) { ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x); } } else { transformed_x.ShareDataWith(x); transformed_d_y.ShareDataWith(*d_y); if (d_x) { transformed_d_x.ShareDataWith(*d_x); } } std::vector<int> dims; std::vector<int> strides; if (compute_format == DataLayout::kNCHW) { dims = {N, C, H, W, D}; strides = {C * H * W * D, H * W * D, W * D, D, 1}; } else { dims = {N, C, H, W, D}; strides = {H * W * C * D, 1, W * D * C, D * C, C}; } const int num = transformed_x.numel(); #ifdef HIPCC const int block = 256; #else const int block = 512; #endif int max_threads = ctx.GetMaxPhysicalThreadCount(); const int max_blocks = std::max(max_threads / block, 1); int grid1 = (num + block - 1) / block; int grid2 = std::min(C, max_blocks); auto stream = ctx.stream(); InplaceHelper<T> inplace_functor; if (!use_global_stats) { if ((N * H * W * D) == 1) { if (d_x) { paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x); } phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor; functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0)); functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0)); return; } // ------------------- cudnn descriptors --------------------- #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // miopenTensorDescriptor_t data_desc_; // miopenTensorDescriptor_t bn_param_desc_; // miopenBatchNormMode_t mode_; // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_)); #else cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; cudnnBatchNormMode_t mode_; PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnCreateTensorDescriptor( &bn_param_desc_)); #endif if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) { LOG(ERROR) << "Provided epsilon is smaller than " << "CUDNN_BN_MIN_EPSILON. Setting it to " << "CUDNN_BN_MIN_EPSILON instead."; } epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON); #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // mode_ = miopenBNSpatial; #elif CUDNN_VERSION_MIN(7, 0, 1) if (FLAGS_cudnn_batchnorm_spatial_persistent) { mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT; } else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #else if (H == 1 && W == 1) { mode_ = CUDNN_BATCHNORM_PER_ACTIVATION; } else { mode_ = CUDNN_BATCHNORM_SPATIAL; } #endif // CUDNN_VERSION_MIN(7, 0, 1) #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor( // data_desc_, CudnnDataType<T>::type, // x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()), // const_cast<int *>(strides.data()))); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_, // data_desc_, mode_)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnSetTensorNdDescriptor( data_desc_, CudnnDataType<T>::type, x_dims.size() > 3 ? x_dims.size() : 4, dims.data(), strides.data())); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); #endif const auto *saved_mean_data = saved_mean.template data<BatchNormParamType<T>>(); const auto *saved_var_data = saved_variance.template data<BatchNormParamType<T>>(); if (is_inplace) { inplace_functor(compute_format, transformed_x.data<T>(), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, epsilon, C, H * W * D, num, transformed_x.data<T>(), grid2, block, stream); } // This branch calls CUDNN APIs if (d_x && d_scale && d_bias) { #ifdef PADDLE_WITH_HIP if (compute_format == DataLayout::kNCHW) { BNBackward<T, block, DataLayout::kNCHW> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { BNBackward<T, block, DataLayout::kNHWC> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } // TODO(wangran16): wait for MIOpen to improve the performance of BN // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenBatchNormalizationBackward( // dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), // CudnnDataType<T>::kZero(), data_desc_, // transformed_x.template data<T>(), data_desc_, // transformed_d_y.template data<T>(), data_desc_, // transformed_d_x.template mutable_data<T>(ctx.GetPlace()), // bn_param_desc_, scale->template data<BatchNormParamType<T>>(), // d_scale->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // d_bias->template mutable_data<BatchNormParamType<T>>( // ctx.GetPlace()), // epsilon, saved_mean_data, saved_var_data)); #else // CUDNN only support small batch size // const size_t CUDNN_PER_ACTIVATION_THRESHOLD = 131070; const size_t CUDNN_PER_ACTIVATION_THRESHOLD = 10240; const size_t CUDNN_SPATIAL_THRESHOLD = 880801; const bool use_native_kernel = ((x_dims.size() == 2 && N >= CUDNN_PER_ACTIVATION_THRESHOLD) || (x_dims.size() == 3 && N >= CUDNN_SPATIAL_THRESHOLD)); if (use_native_kernel) { if (x_dims.size() == 2) { dim3 block; dim3 grid; const int block_size = 512; const int MAX_GRID_SIZE = 128; const int WARP_SIZE = 32; // init intermediate storage DenseTensor block_data_tensor; DenseTensor flag_tensor; DenseTensor compute_mean_tensor = phi::Empty<BatchNormParamType<T>, Context>(ctx, {C}); DenseTensor compute_inv_var_tensor = phi::Empty<BatchNormParamType<T>, Context>(ctx, {C}); BatchNormParamType<T> *block_data_ptr = nullptr; int *flag_ptr = nullptr; int block_x = std::min(phi::funcs::details::GetLastPow2(C), WARP_SIZE); int block_y = std::min(phi::funcs::details::GetLastPow2(N * H * W * D / 16), block_size / block_x); if (block_x * block_y != block_size) { block_x = std::min(phi::funcs::details::GetLastPow2(C), block_size / block_y); } int grid_x = (C + block_x - 1) / block_x; int grid_y = std::min((N * H * W * D + block_y * 16 - 1) / (block_y * 16), MAX_GRID_SIZE); block.x = block_x; block.y = block_y; grid.x = grid_x; grid.y = grid_y; if (grid.y > 1) { block_data_tensor = phi::Empty<BatchNormParamType<T>, Context>( ctx, {2 * C * grid.y}); flag_tensor = phi::Empty<int, Context>(ctx, {grid.x}); block_data_ptr = block_data_tensor.data<BatchNormParamType<T>>(); flag_ptr = flag_tensor.data<int>(); funcs::SetConstant<Context, int> set_zero; set_zero(ctx, &flag_tensor, static_cast<int>(0)); } // 1. reduce_sum(x) => mean, inv_var auto *mean_ptr = saved_mean_data == nullptr ? compute_mean_tensor.data<BatchNormParamType<T>>() : saved_mean_data; auto *variance_ptr = saved_var_data == nullptr ? compute_inv_var_tensor.data<BatchNormParamType<T>>() : saved_var_data; if (saved_mean_data == nullptr) { BNBackward2DChannelLastStage1<T, block_size> <<<grid, block, 0, ctx.stream()>>>( transformed_x.template data<T>(), C, N, H * W * D, epsilon, block_data_ptr, compute_mean_tensor.data<BatchNormParamType<T>>(), compute_inv_var_tensor.data<BatchNormParamType<T>>(), flag_ptr); } // 2. reduce_sum(x, dy, mean) => dscale, dbias BNBackward2DChannelLastStage2<T, block_size> <<<grid, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), mean_ptr, variance_ptr, C, N, H * W * D, epsilon, block_data_ptr, ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias), flag_ptr); // 3. elementwise_mul(scale, mean, inv_var, dy, dscale, dbias) => dx BNBackward2DChannelLastStage3<T, block_size> <<<grid, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>(), mean_ptr, variance_ptr, C, N, H * W * D, epsilon, transformed_d_x.template data<T>()); } else { if (compute_format == DataLayout::kNCHW) { BNBackward<T, block, DataLayout::kNCHW> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } else { BNBackward<T, block, DataLayout::kNHWC> <<<grid2, block, 0, ctx.stream()>>>( transformed_d_y.template data<T>(), transformed_x.template data<T>(), scale.template data<BatchNormParamType<T>>(), saved_mean_data, saved_var_data, C, N, H * W * D, epsilon, transformed_d_x.template data<T>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias)); } } } else { #if CUDNN_VERSION_MIN(7, 4, 1) size_t workspace_size = 0; void *workspace_ptr = nullptr; DenseTensor workspace_tensor; auto reserve_space_size = reserve_space->memory_size(); // --------------- cudnn batchnorm workspace --------------- PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload:: cudnnGetBatchNormalizationBackwardExWorkspaceSize( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnIps=*/CUDNN_BATCHNORM_OPS_BN, /*xDesc=*/data_desc_, /*yDesc=*/data_desc_, /*dyDesc=*/data_desc_, /*dzDesc=*/nullptr, /*dxDesc=*/data_desc_, /*bnScaleBiasMeanVarDesc=*/bn_param_desc_, /*activationDesc=*/nullptr, /*sizeInBytes=*/&workspace_size)); workspace_tensor.Resize({static_cast<int64_t>(workspace_size)}); workspace_ptr = static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackwardEx( /*handle=*/ctx.cudnn_handle(), /*mode=*/mode_, /*bnOps=*/CUDNN_BATCHNORM_OPS_BN, /*alphaDataDiff=*/CudnnDataType<T>::kOne(), /*betaDataDiff=*/CudnnDataType<T>::kZero(), /*alphaParamDiff=*/CudnnDataType<T>::kOne(), /*betaParamDiff=*/CudnnDataType<T>::kZero(), /*xDesc=*/data_desc_, /*xData=*/transformed_x.template data<T>(), /*yDesc=*/nullptr, /*yData=*/nullptr, /*dyDesc=*/data_desc_, /*dyData=*/transformed_d_y.template data<T>(), /*dzDesc=*/nullptr, /*dzData=*/nullptr, /*dxDesc=*/data_desc_, /*dxData=*/ctx.template Alloc<T>(&transformed_d_x), /*dBnScaleBiasDesc=*/bn_param_desc_, /*bnScaleData=*/scale.template data<BatchNormParamType<T>>(), /*bnBiasData=*/nullptr, /*dBnScaleData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_scale), /*dBnBiasData=*/ ctx.template Alloc<BatchNormParamType<T>>(d_bias), /*epsilon=*/epsilon, /*savedMean=*/saved_mean_data, /*savedInvVariance=*/saved_var_data, /*activationDesc=*/nullptr, /*workspace=*/workspace_ptr, /*workSpaceSizeInBytes=*/workspace_size, /*reserveSpace=*/ const_cast<uint8_t *>(reserve_space->template data<uint8_t>()), /*reserveSpaceSizeInBytes=*/reserve_space_size)); #else PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnBatchNormalizationBackward( ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(), CudnnDataType<T>::kZero(), data_desc_, transformed_x.template data<T>(), data_desc_, transformed_d_y.template data<T>(), data_desc_, ctx.template Alloc<T>(&transformed_d_x), bn_param_desc_, scale.template data<BatchNormParamType<T>>(), ctx.template Alloc<BatchNormParamType<T>>(d_scale), ctx.template Alloc<BatchNormParamType<T>>(d_bias), epsilon, saved_mean_data, saved_var_data)); #endif // CUDNN_VERSION_MIN(7, 4, 1) } #endif if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW) { VLOG(3) << "Transform batchnorm output from NCHW to NHWC"; TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x); } } else { // This branch call CUDA kernels if (compute_format == DataLayout::kNCHW) { if (d_x) { BNBackwardData<T, block, phi::DataLayout::kNCHW> <<<grid2, block, 0, ctx.stream()>>>( d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { BNBackwardData<T, block, phi::DataLayout::kNHWC> <<<grid2, block, 0, ctx.stream()>>>( d_y->data<T>(), scale.data<BatchNormParamType<T>>(), saved_mean_data, x.data<T>(), saved_var_data, C, N, H * W * D, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), saved_mean_data, saved_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } #ifdef PADDLE_WITH_HIP // TODO(wangran16): wait for MIOpen to improve the performance of BN // clean when exit. // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(data_desc_)); // PADDLE_ENFORCE_GPU_SUCCESS( // platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_)); #else // clean when exit. PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_)); PADDLE_ENFORCE_GPU_SUCCESS( paddle::platform::dynload::cudnnDestroyTensorDescriptor( bn_param_desc_)); #endif } else { const auto *running_mean = mean.get_ptr(); const auto *running_var = variance.get_ptr(); const auto *running_mean_data = running_mean->template data<BatchNormParamType<T>>(); const auto *running_var_data = running_var->template data<BatchNormParamType<T>>(); if (is_inplace) { auto px = x; inplace_functor(data_layout, ctx.template Alloc<T>(&px), scale.template data<BatchNormParamType<T>>(), bias.template data<BatchNormParamType<T>>(), running_mean_data, running_var_data, epsilon, C, H * W * D, num, x.data<T>(), grid2, block, stream); } if (compute_format == DataLayout::kNCHW) { if (d_x) { KeBNBackwardData<T, phi::DataLayout::kNCHW> <<<grid1, block, 0, stream>>>(d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNCHW> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } else { if (d_x) { KeBNBackwardData<T, phi::DataLayout::kNHWC> <<<grid1, block, 0, stream>>>(d_y->data<T>(), scale.data<BatchNormParamType<T>>(), running_var_data, epsilon, C, H * W, num, d_x->data<T>()); } if (d_scale && d_bias) { KeBNBackwardScaleBias<T, block, phi::DataLayout::kNHWC> <<<grid2, block, 0, stream>>>( d_y->data<T>(), x.data<T>(), running_mean_data, running_var_data, epsilon, N, C, H * W * D, d_scale->data<BatchNormParamType<T>>(), d_bias->data<BatchNormParamType<T>>()); } } } } template <typename T, typename Context> void BatchNormGradKernel(const Context &dev_ctx, const DenseTensor &x, const DenseTensor &scale, const DenseTensor &bias, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const paddle::optional<DenseTensor> &reserve_space, const DenseTensor &y_grad, float momentum, float epsilon, const std::string &data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *bias_grad) { BatchNormGradRawKernel<T, Context>(dev_ctx, x, scale, bias, mean, variance, saved_mean, saved_variance, reserve_space, y_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu, false, x_grad, scale_grad, bias_grad); } template <typename T, typename Context> void BatchNormDoubleGradKernel(const Context &ctx, const DenseTensor &x, const DenseTensor &scale, const paddle::optional<DenseTensor> &mean, const paddle::optional<DenseTensor> &variance, const DenseTensor &saved_mean, const DenseTensor &saved_variance, const DenseTensor &y_grad, const DenseTensor &x_grad_grad, const DenseTensor &scale_grad_grad, const DenseTensor &bias_grad_grad, float momentum, float epsilon, const std::string &data_layout_str, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu, DenseTensor *x_grad, DenseTensor *scale_grad, DenseTensor *y_grad_grad) { PADDLE_ENFORCE_EQ(is_test, false, phi::errors::InvalidArgument( "`is_test = True` CANNOT be used in train program. If " "you want to use global status in pre_train model, " "please set `use_global_stats = True`")); const DataLayout data_layout = paddle::framework::StringToDataLayout(data_layout_str); const DenseTensor *running_mean = nullptr; const DenseTensor *running_variance = nullptr; if (use_global_stats) { running_mean = mean.get_ptr(); running_variance = variance.get_ptr(); } paddle::operators::NormDoubleGradFunctor<Context, T>(ctx, data_layout, &x, &scale, &y_grad, &saved_mean, &saved_variance, running_mean, running_variance, epsilon, use_global_stats, &x_grad_grad, &scale_grad_grad, &bias_grad_grad, x_grad, scale_grad, y_grad_grad); } } // namespace phi #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, phi::dtype::float16) {} PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(batch_norm_grad, GPU, ALL_LAYOUT, phi::BatchNormGradKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } PD_REGISTER_KERNEL(batch_norm_grad_raw, GPU, ALL_LAYOUT, phi::BatchNormGradRawKernel, float, double, phi::dtype::float16) { if (kernel_key.dtype() == phi::DataType::FLOAT16) { kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad } } #endif #ifdef PADDLE_WITH_HIP PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #else PD_REGISTER_KERNEL(batch_norm_grad_grad, GPU, ALL_LAYOUT, phi::BatchNormDoubleGradKernel, float, double) {} #endif
528c81eba1d0d39ca8b705efa4301dcd2c9bcbb1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2023, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "cuda/components/sorting.cuh" #include <memory> #include <random> #include <gtest/gtest.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/executor.hpp> #include "cuda/test/utils.hpp" namespace { using gko::kernels::cuda::bitonic_sort; using gko::kernels::cuda::config; constexpr int num_elements = 2048; constexpr int num_local = 4; constexpr auto num_threads = num_elements / num_local; __global__ void test_sort_shared(gko::int32* data) { gko::int32 local[num_local]; __shared__ gko::int32 sh_local[num_elements]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<num_elements, num_local>(local, sh_local); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } __global__ void test_sort_warp(gko::int32* data) { gko::int32 local[num_local]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<config::warp_size * num_local, num_local>( local, static_cast<gko::int32*>(nullptr)); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } class Sorting : public CudaTestFixture { protected: Sorting() : rng(123456), ref_shared(ref, num_elements), ref_warp(ref), ddata(exec) { // we want some duplicate elements std::uniform_int_distribution<gko::int32> dist(0, num_elements / 2); for (int i = 0; i < num_elements; ++i) { ref_shared.get_data()[i] = dist(rng); } ddata = gko::array<gko::int32>{exec, ref_shared}; ref_warp = ref_shared; std::sort(ref_shared.get_data(), ref_shared.get_data() + num_elements); std::sort(ref_warp.get_data(), ref_warp.get_data() + (config::warp_size * num_local)); } std::default_random_engine rng; gko::array<gko::int32> ref_shared; gko::array<gko::int32> ref_warp; gko::array<gko::int32> ddata; }; TEST_F(Sorting, CudaBitonicSortWarp) { hipLaunchKernelGGL(( test_sort_warp), dim3(1), dim3(config::warp_size), 0, exec->get_stream(), ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_warp.get_const_data(); GKO_ASSERT_ARRAY_EQ(ddata, ref_warp); } TEST_F(Sorting, CudaBitonicSortShared) { hipLaunchKernelGGL(( test_sort_shared), dim3(1), dim3(num_threads), 0, exec->get_stream(), ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_shared.get_const_data(); GKO_ASSERT_ARRAY_EQ(ddata, ref_shared); } } // namespace
528c81eba1d0d39ca8b705efa4301dcd2c9bcbb1.cu
/*******************************<GINKGO LICENSE>****************************** Copyright (c) 2017-2023, the Ginkgo authors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************<GINKGO LICENSE>*******************************/ #include "cuda/components/sorting.cuh" #include <memory> #include <random> #include <gtest/gtest.h> #include <ginkgo/core/base/array.hpp> #include <ginkgo/core/base/executor.hpp> #include "cuda/test/utils.hpp" namespace { using gko::kernels::cuda::bitonic_sort; using gko::kernels::cuda::config; constexpr int num_elements = 2048; constexpr int num_local = 4; constexpr auto num_threads = num_elements / num_local; __global__ void test_sort_shared(gko::int32* data) { gko::int32 local[num_local]; __shared__ gko::int32 sh_local[num_elements]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<num_elements, num_local>(local, sh_local); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } __global__ void test_sort_warp(gko::int32* data) { gko::int32 local[num_local]; for (int i = 0; i < num_local; ++i) { local[i] = data[threadIdx.x * num_local + i]; } bitonic_sort<config::warp_size * num_local, num_local>( local, static_cast<gko::int32*>(nullptr)); for (int i = 0; i < num_local; ++i) { data[threadIdx.x * num_local + i] = local[i]; } } class Sorting : public CudaTestFixture { protected: Sorting() : rng(123456), ref_shared(ref, num_elements), ref_warp(ref), ddata(exec) { // we want some duplicate elements std::uniform_int_distribution<gko::int32> dist(0, num_elements / 2); for (int i = 0; i < num_elements; ++i) { ref_shared.get_data()[i] = dist(rng); } ddata = gko::array<gko::int32>{exec, ref_shared}; ref_warp = ref_shared; std::sort(ref_shared.get_data(), ref_shared.get_data() + num_elements); std::sort(ref_warp.get_data(), ref_warp.get_data() + (config::warp_size * num_local)); } std::default_random_engine rng; gko::array<gko::int32> ref_shared; gko::array<gko::int32> ref_warp; gko::array<gko::int32> ddata; }; TEST_F(Sorting, CudaBitonicSortWarp) { test_sort_warp<<<1, config::warp_size, 0, exec->get_stream()>>>( ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_warp.get_const_data(); GKO_ASSERT_ARRAY_EQ(ddata, ref_warp); } TEST_F(Sorting, CudaBitonicSortShared) { test_sort_shared<<<1, num_threads, 0, exec->get_stream()>>>( ddata.get_data()); ddata.set_executor(ref); auto data_ptr = ddata.get_const_data(); auto ref_ptr = ref_shared.get_const_data(); GKO_ASSERT_ARRAY_EQ(ddata, ref_shared); } } // namespace
5b7292543c3485e576d14c79bdbdee3c44c2120d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47 #include "preprocess_hip.cuh" __device__ float divergence(const float* pz, const float* py, const float* px, long idx, long z, long y, long x, long size2d, int3 shape, float3 spacing) { float _div = 0.0f; long _idx; if ( z - 1 >= 0 ) { _idx = (z - 1) * size2d + y * shape.x + x; _div += (pz[idx] - pz[_idx]); } else { _div += pz[idx]; } if ( y - 1 >= 0 ) { _idx = z * size2d + (y - 1) * shape.x + x; _div += (py[idx] - py[_idx]); } else { _div += py[idx]; } if ( x - 1 >= 0 ) { _idx = z * size2d + y * shape.x + (x - 1); _div += (px[idx] - px[_idx]); } else { _div += px[idx]; } return _div; } __device__ void gradient(const float* u, float* grad, long idx, long z, long y, long x, long size2d, int3 shape, float3 spacing) { float uidx = u[idx]; if ( z + 1 < shape.z ) { grad[0] = (u[(z+1)*size2d + y*shape.x + x] - uidx); } if ( y + 1 < shape.y ) { grad[1] = (u[z*size2d + (y+1)*shape.x + x] - uidx); } if ( x + 1 < shape.x ) { grad[2] = (u[z*size2d + y*shape.x + (x+1)] - uidx); } } __global__ void update_u(const float* f, const float* pz, const float* py, const float* px, float* u, float tau, float lambda, int3 shape, float3 spacing) { long idx = blockDim.x * blockIdx.x + threadIdx.x; long plane = shape.y * shape.x; if ( idx >= plane * shape.z ) return; long t = idx % plane; long z = idx / plane; long y = t / shape.x; long x = t % shape.x; float _div = divergence(pz, py, px, idx, z, y, x, plane, shape, spacing); float r = u[idx] * (1.0f - tau) + tau * (f[idx] + lambda * _div); u[idx] = r; } __global__ void update_p(const float* u, float* pz, float* py, float* px, float tau, int3 shape, float3 spacing) { long idx = blockDim.x * blockIdx.x + threadIdx.x; long plane = shape.y * shape.x; if ( idx >= plane * shape.z ) return; long t = idx % plane; long z = idx / plane; long y = t / shape.x; long x = t % shape.x; float grad[3], q[3]; grad[0] = 0; grad[1] = 0; grad[2] = 0; gradient(u, grad, idx, z, y, x, plane, shape, spacing); q[0] = pz[idx] + tau * grad[0]; q[1] = py[idx] + tau * grad[1]; q[2] = px[idx] + tau * grad[2]; float n = q[0] * q[0] + q[1] * q[1] + q[2] * q[2]; float norm = fmaxf(1.0f, sqrtf(fmaxf(0, n))); pz[idx] = q[0] / norm; py[idx] = q[1] / norm; px[idx] = q[2] / norm; } // Main function void tvdenoising_gpu(const float* src, float* dst, float lambda, float3 spacing, int3 shape, int maxIter, float eps, int gpu) { // Init params long total = shape.x * shape.y * shape.z; long mem_size = sizeof(float) * total; // Init cuda memory int max_threads = initCuda(gpu); float *d_src, *d_u, *d_px, *d_py, *d_pz; // F hipMalloc(&d_src, mem_size); hipMemset(d_src, 0, mem_size); hipMemcpy(d_src, src, mem_size, hipMemcpyHostToDevice); cudaCheckErrors("Memory Malloc and Memset: SRC"); // U hipMalloc(&d_u, mem_size); hipMemset(d_u, 0, mem_size); hipMemcpy(d_u, src, mem_size, hipMemcpyHostToDevice); cudaCheckErrors("Memory Malloc and Memset: U"); // PX hipMalloc(&d_px, mem_size); hipMemset(d_px, 0, mem_size); cudaCheckErrors("Memory Malloc and Memset: PX"); // PY hipMalloc(&d_py, mem_size); hipMemset(d_py, 0, mem_size); cudaCheckErrors("Memory Malloc and Memset: PY"); // PZ hipMalloc(&d_pz, mem_size); hipMemset(d_pz, 0, mem_size); cudaCheckErrors("Memory Malloc and Memset: PZ"); // bdim and gdim dim3 block(max_threads, 1, 1); dim3 grid((total+max_threads-1)/max_threads, 1, 1); float tau2, tau1; for ( int i = 0; i < maxIter; i++ ) { tau2 = 0.3f + 0.02f * i; tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+i))); hipLaunchKernelGGL(( update_u), dim3(grid), dim3(block), 0, 0, d_src, d_pz, d_py, d_px, d_u, tau1, 1.0f/lambda, shape, spacing); hipLaunchKernelGGL(( update_p), dim3(grid), dim3(block), 0, 0, d_u, d_pz, d_py, d_px, tau2, shape, spacing); } cudaCheckErrors("TV minimization"); hipMemcpy(dst, d_u, mem_size, hipMemcpyDeviceToHost); cudaCheckErrors("Copy result back"); hipFree(d_src); hipFree(d_u); hipFree(d_pz); hipFree(d_py); hipFree(d_px); hipDeviceReset(); }
5b7292543c3485e576d14c79bdbdee3c44c2120d.cu
// http://gpu4vision.icg.tugraz.at/papers/2010/knoll.pdf#pub47 #include "preprocess.cuh" __device__ float divergence(const float* pz, const float* py, const float* px, long idx, long z, long y, long x, long size2d, int3 shape, float3 spacing) { float _div = 0.0f; long _idx; if ( z - 1 >= 0 ) { _idx = (z - 1) * size2d + y * shape.x + x; _div += (pz[idx] - pz[_idx]); } else { _div += pz[idx]; } if ( y - 1 >= 0 ) { _idx = z * size2d + (y - 1) * shape.x + x; _div += (py[idx] - py[_idx]); } else { _div += py[idx]; } if ( x - 1 >= 0 ) { _idx = z * size2d + y * shape.x + (x - 1); _div += (px[idx] - px[_idx]); } else { _div += px[idx]; } return _div; } __device__ void gradient(const float* u, float* grad, long idx, long z, long y, long x, long size2d, int3 shape, float3 spacing) { float uidx = u[idx]; if ( z + 1 < shape.z ) { grad[0] = (u[(z+1)*size2d + y*shape.x + x] - uidx); } if ( y + 1 < shape.y ) { grad[1] = (u[z*size2d + (y+1)*shape.x + x] - uidx); } if ( x + 1 < shape.x ) { grad[2] = (u[z*size2d + y*shape.x + (x+1)] - uidx); } } __global__ void update_u(const float* f, const float* pz, const float* py, const float* px, float* u, float tau, float lambda, int3 shape, float3 spacing) { long idx = blockDim.x * blockIdx.x + threadIdx.x; long plane = shape.y * shape.x; if ( idx >= plane * shape.z ) return; long t = idx % plane; long z = idx / plane; long y = t / shape.x; long x = t % shape.x; float _div = divergence(pz, py, px, idx, z, y, x, plane, shape, spacing); float r = u[idx] * (1.0f - tau) + tau * (f[idx] + lambda * _div); u[idx] = r; } __global__ void update_p(const float* u, float* pz, float* py, float* px, float tau, int3 shape, float3 spacing) { long idx = blockDim.x * blockIdx.x + threadIdx.x; long plane = shape.y * shape.x; if ( idx >= plane * shape.z ) return; long t = idx % plane; long z = idx / plane; long y = t / shape.x; long x = t % shape.x; float grad[3], q[3]; grad[0] = 0; grad[1] = 0; grad[2] = 0; gradient(u, grad, idx, z, y, x, plane, shape, spacing); q[0] = pz[idx] + tau * grad[0]; q[1] = py[idx] + tau * grad[1]; q[2] = px[idx] + tau * grad[2]; float n = q[0] * q[0] + q[1] * q[1] + q[2] * q[2]; float norm = fmaxf(1.0f, sqrtf(fmaxf(0, n))); pz[idx] = q[0] / norm; py[idx] = q[1] / norm; px[idx] = q[2] / norm; } // Main function void tvdenoising_gpu(const float* src, float* dst, float lambda, float3 spacing, int3 shape, int maxIter, float eps, int gpu) { // Init params long total = shape.x * shape.y * shape.z; long mem_size = sizeof(float) * total; // Init cuda memory int max_threads = initCuda(gpu); float *d_src, *d_u, *d_px, *d_py, *d_pz; // F cudaMalloc(&d_src, mem_size); cudaMemset(d_src, 0, mem_size); cudaMemcpy(d_src, src, mem_size, cudaMemcpyHostToDevice); cudaCheckErrors("Memory Malloc and Memset: SRC"); // U cudaMalloc(&d_u, mem_size); cudaMemset(d_u, 0, mem_size); cudaMemcpy(d_u, src, mem_size, cudaMemcpyHostToDevice); cudaCheckErrors("Memory Malloc and Memset: U"); // PX cudaMalloc(&d_px, mem_size); cudaMemset(d_px, 0, mem_size); cudaCheckErrors("Memory Malloc and Memset: PX"); // PY cudaMalloc(&d_py, mem_size); cudaMemset(d_py, 0, mem_size); cudaCheckErrors("Memory Malloc and Memset: PY"); // PZ cudaMalloc(&d_pz, mem_size); cudaMemset(d_pz, 0, mem_size); cudaCheckErrors("Memory Malloc and Memset: PZ"); // bdim and gdim dim3 block(max_threads, 1, 1); dim3 grid((total+max_threads-1)/max_threads, 1, 1); float tau2, tau1; for ( int i = 0; i < maxIter; i++ ) { tau2 = 0.3f + 0.02f * i; tau1 = (1.f/tau2) * ((1.f/6.f) - (5.f/(15.f+i))); update_u<<<grid, block>>>(d_src, d_pz, d_py, d_px, d_u, tau1, 1.0f/lambda, shape, spacing); update_p<<<grid, block>>>(d_u, d_pz, d_py, d_px, tau2, shape, spacing); } cudaCheckErrors("TV minimization"); cudaMemcpy(dst, d_u, mem_size, cudaMemcpyDeviceToHost); cudaCheckErrors("Copy result back"); cudaFree(d_src); cudaFree(d_u); cudaFree(d_pz); cudaFree(d_py); cudaFree(d_px); cudaDeviceReset(); }
1c84203bf98e975385753335922431d4ced572b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" { } const double TOLERANCE = 1.0e-10; /* cgsolver with CUDA support solves the linear equation A*x = b where A is of size m x n */ __global__ void mvm_gpu(double *A_cuda, double *X_cuda, double *Y_cuda, int *m_locals_cuda, int *A_all_pos_cuda, int n, int nthreads){ int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < nthreads){ for (int i=A_all_pos_cuda[t]; i<A_all_pos_cuda[t]+m_locals_cuda[t]; ++i) { Y_cuda[i] = 0.; for (int j=0; j<n; ++j) Y_cuda[i] += A_cuda[i * n + j] * X_cuda[j]; } } }
1c84203bf98e975385753335922431d4ced572b3.cu
#include "includes.h" extern "C" { } const double TOLERANCE = 1.0e-10; /* cgsolver with CUDA support solves the linear equation A*x = b where A is of size m x n */ __global__ void mvm_gpu(double *A_cuda, double *X_cuda, double *Y_cuda, int *m_locals_cuda, int *A_all_pos_cuda, int n, int nthreads){ int t = blockIdx.x * blockDim.x + threadIdx.x; if (t < nthreads){ for (int i=A_all_pos_cuda[t]; i<A_all_pos_cuda[t]+m_locals_cuda[t]; ++i) { Y_cuda[i] = 0.; for (int j=0; j<n; ++j) Y_cuda[i] += A_cuda[i * n + j] * X_cuda[j]; } } }
b22caef8b7cd6aff984b2207e4f9d1d07ff4655c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void AES_decrypt(const uint *ct, uint *pt, uint *rdk, uint Nr) { uint s0, s1, s2, s3, t0, t1, t2, t3; /* * map byte array block to cipher state * and add initial round key: */ s0 = ct[0] ^ rdk[0]; s1 = ct[1] ^ rdk[1]; s2 = ct[2] ^ rdk[2]; s3 = ct[3] ^ rdk[3]; /* round 1: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[ 4]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[ 5]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[ 6]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[ 7]; /* round 2: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[ 8]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[ 9]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[10]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[11]; /* round 3: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[12]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[13]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[14]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[15]; /* round 4: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[16]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[17]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[18]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[19]; /* round 5: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[20]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[21]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[22]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[23]; /* round 6: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[24]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[25]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[26]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[27]; /* round 7: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[28]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[29]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[30]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[31]; /* round 8: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[32]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[33]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[34]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[35]; /* round 9: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[36]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[37]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[38]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[39]; if (Nr > 10) { /* round 10: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[40]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[41]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[42]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[43]; /* round 11: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[44]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[45]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[46]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[47]; if (Nr > 12) { /* round 12: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[48]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[49]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[50]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[51]; /* round 13: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[52]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[53]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[54]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[55]; } } rdk += Nr << 2; /* * apply last round and * map cipher state to byte array block: */ pt[0] = (cTd4[(t0 >> 24) ] & 0xff000000) ^ (cTd4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t1 ) & 0xff] & 0x000000ff) ^ rdk[0]; pt[1] = (cTd4[(t1 >> 24) ] & 0xff000000) ^ (cTd4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t2 ) & 0xff] & 0x000000ff) ^ rdk[1]; pt[2] = (cTd4[(t2 >> 24) ] & 0xff000000) ^ (cTd4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t3 ) & 0xff] & 0x000000ff) ^ rdk[2]; pt[3] = (cTd4[(t3 >> 24) ] & 0xff000000) ^ (cTd4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t0 ) & 0xff] & 0x000000ff) ^ rdk[3]; }
b22caef8b7cd6aff984b2207e4f9d1d07ff4655c.cu
__global__ void AES_decrypt(const uint *ct, uint *pt, uint *rdk, uint Nr) { uint s0, s1, s2, s3, t0, t1, t2, t3; /* * map byte array block to cipher state * and add initial round key: */ s0 = ct[0] ^ rdk[0]; s1 = ct[1] ^ rdk[1]; s2 = ct[2] ^ rdk[2]; s3 = ct[3] ^ rdk[3]; /* round 1: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[ 4]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[ 5]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[ 6]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[ 7]; /* round 2: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[ 8]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[ 9]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[10]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[11]; /* round 3: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[12]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[13]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[14]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[15]; /* round 4: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[16]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[17]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[18]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[19]; /* round 5: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[20]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[21]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[22]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[23]; /* round 6: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[24]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[25]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[26]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[27]; /* round 7: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[28]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[29]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[30]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[31]; /* round 8: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[32]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[33]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[34]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[35]; /* round 9: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[36]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[37]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[38]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[39]; if (Nr > 10) { /* round 10: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[40]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[41]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[42]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[43]; /* round 11: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[44]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[45]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[46]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[47]; if (Nr > 12) { /* round 12: */ s0 = cTd0[t0 >> 24] ^ cTd1[(t3 >> 16) & 0xff] ^ cTd2[(t2 >> 8) & 0xff] ^ cTd3[t1 & 0xff] ^ rdk[48]; s1 = cTd0[t1 >> 24] ^ cTd1[(t0 >> 16) & 0xff] ^ cTd2[(t3 >> 8) & 0xff] ^ cTd3[t2 & 0xff] ^ rdk[49]; s2 = cTd0[t2 >> 24] ^ cTd1[(t1 >> 16) & 0xff] ^ cTd2[(t0 >> 8) & 0xff] ^ cTd3[t3 & 0xff] ^ rdk[50]; s3 = cTd0[t3 >> 24] ^ cTd1[(t2 >> 16) & 0xff] ^ cTd2[(t1 >> 8) & 0xff] ^ cTd3[t0 & 0xff] ^ rdk[51]; /* round 13: */ t0 = cTd0[s0 >> 24] ^ cTd1[(s3 >> 16) & 0xff] ^ cTd2[(s2 >> 8) & 0xff] ^ cTd3[s1 & 0xff] ^ rdk[52]; t1 = cTd0[s1 >> 24] ^ cTd1[(s0 >> 16) & 0xff] ^ cTd2[(s3 >> 8) & 0xff] ^ cTd3[s2 & 0xff] ^ rdk[53]; t2 = cTd0[s2 >> 24] ^ cTd1[(s1 >> 16) & 0xff] ^ cTd2[(s0 >> 8) & 0xff] ^ cTd3[s3 & 0xff] ^ rdk[54]; t3 = cTd0[s3 >> 24] ^ cTd1[(s2 >> 16) & 0xff] ^ cTd2[(s1 >> 8) & 0xff] ^ cTd3[s0 & 0xff] ^ rdk[55]; } } rdk += Nr << 2; /* * apply last round and * map cipher state to byte array block: */ pt[0] = (cTd4[(t0 >> 24) ] & 0xff000000) ^ (cTd4[(t3 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t2 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t1 ) & 0xff] & 0x000000ff) ^ rdk[0]; pt[1] = (cTd4[(t1 >> 24) ] & 0xff000000) ^ (cTd4[(t0 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t3 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t2 ) & 0xff] & 0x000000ff) ^ rdk[1]; pt[2] = (cTd4[(t2 >> 24) ] & 0xff000000) ^ (cTd4[(t1 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t0 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t3 ) & 0xff] & 0x000000ff) ^ rdk[2]; pt[3] = (cTd4[(t3 >> 24) ] & 0xff000000) ^ (cTd4[(t2 >> 16) & 0xff] & 0x00ff0000) ^ (cTd4[(t1 >> 8) & 0xff] & 0x0000ff00) ^ (cTd4[(t0 ) & 0xff] & 0x000000ff) ^ rdk[3]; }
fea973355de779c742508d72f7a3d0cf944f1ae3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "vec_add_64.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *A = NULL; hipMalloc(&A, XSIZE*YSIZE); const double *B = NULL; hipMalloc(&B, XSIZE*YSIZE); double *C = NULL; hipMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( vec_add_64), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( vec_add_64), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( vec_add_64), dim3(gridBlock),dim3(threadBlock), 0, 0, A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
fea973355de779c742508d72f7a3d0cf944f1ae3.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "vec_add_64.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const double *A = NULL; cudaMalloc(&A, XSIZE*YSIZE); const double *B = NULL; cudaMalloc(&B, XSIZE*YSIZE); double *C = NULL; cudaMalloc(&C, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); vec_add_64<<<gridBlock,threadBlock>>>(A,B,C,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { vec_add_64<<<gridBlock,threadBlock>>>(A,B,C,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { vec_add_64<<<gridBlock,threadBlock>>>(A,B,C,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
07f6368b38d680a5cb2fa59d024a4e6408cdc5d9.hip
// !!! This is a file automatically generated by hipify!!! #include <chrono> #include <iostream> #include <iomanip> #include <hip/hip_runtime.h> #include <cudnn.h> #include <cuda_device_runtime_api.h> void validate_call(const hipError_t& err) { if (err != hipSuccess) { std::cerr << "CUDA error occurred: " << err << " " << hipGetErrorString(err) << std::endl; std::exit(1); } } void validate_call(const cudnnStatus_t& err) { if (err != CUDNN_STATUS_SUCCESS) { std::cerr << "cuDNN error occurred: " << err << " " << cudnnGetErrorString(err) << std::endl; std::exit(1); } } void log(int verbose, std::ostream& ostream, std::string str) { if (verbose) ostream << str << std::endl; } template <typename T> __global__ void fill_with_constant(T *px, T k) { int tid = threadIdx.x + blockIdx.x * blockDim.x; px[tid] = k; } int main() { // Prerequisits int verbose = 2; // Data types and formats auto inputTensorFormat = CUDNN_TENSOR_NCHW_VECT_C; auto inputDataType = CUDNN_DATA_INT8x32; auto filterDataType = CUDNN_DATA_INT8x32; auto filterTensorFormat = CUDNN_TENSOR_NCHW_VECT_C; auto convAccumulatorDataType = CUDNN_DATA_INT32; auto outDataType = CUDNN_DATA_FLOAT; auto outputTensorFormat = CUDNN_TENSOR_NCHW; // Tensors auto numFilters = 32; auto filterH = 3; auto filterW = 3; auto B = 1; auto C = 32; auto H = 3; auto W = 3; size_t padH, padW; padH = padW = 0; size_t strideH, strideW, dilationH, dilationW; strideH = strideW = dilationH = dilationW = 1; const int8_t CONSTANT_DATA = 127; validate_call(hipSetDeviceFlags(hipDeviceScheduleBlockingSync)); // Create cudnn cudnnHandle_t cudnn; validate_call(cudnnCreate(&cudnn)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Cudnn created"); // Create input tensor cudnnTensorDescriptor_t inputDescriptor; validate_call(cudnnCreateTensorDescriptor(&inputDescriptor)); validate_call(cudnnSetTensor4dDescriptor(inputDescriptor, inputTensorFormat, inputDataType, B, C, H, W)); int8_t *inputData; validate_call(hipMalloc(&inputData, B * C * H * W * sizeof(int8_t))); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Input tensor allocated"); // Create filter descriptor cudnnFilterDescriptor_t filterDescriptor; validate_call(cudnnCreateFilterDescriptor(&filterDescriptor)); validate_call(cudnnSetFilter4dDescriptor(filterDescriptor, filterDataType, filterTensorFormat, numFilters, C, filterH, filterW)); int8_t *filterData; validate_call(hipMalloc(&filterData, numFilters * C * filterH * filterW * sizeof(int8_t))); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Filter tensor allocated"); // Convolution descriptor cudnnConvolutionDescriptor_t convDescriptor; validate_call(cudnnCreateConvolutionDescriptor(&convDescriptor)); validate_call(cudnnSetConvolution2dDescriptor(convDescriptor, padH, padW, strideH, strideW, dilationH, dilationW, CUDNN_CONVOLUTION, convAccumulatorDataType)); validate_call(cudnnSetConvolutionMathType(convDescriptor, CUDNN_TENSOR_OP_MATH)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Convolution descriptor created"); int outB, outC, outH, outW; validate_call(cudnnGetConvolution2dForwardOutputDim(convDescriptor, inputDescriptor, filterDescriptor, &outB, &outC, &outH, &outW)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Computed convolution output shape"); log(verbose, std::clog, std::to_string(outB)+"x"+std::to_string(outC)+"x"+std::to_string(outH)+"x"+std::to_string(outW)); // Output tensor cudnnTensorDescriptor_t outDescriptor; validate_call(cudnnCreateTensorDescriptor(&outDescriptor)); validate_call(cudnnSetTensor4dDescriptor(outDescriptor, outputTensorFormat, outDataType, outB, outC, outH, outW)); int8_t *outData; validate_call(hipMalloc(&outData, outB * outC * outH * outW * sizeof(float))); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Output tensor allocated"); // Algorithm cudnnConvolutionFwdAlgoPerf_t convAlgo; int foundAlgo; validate_call(cudnnGetConvolutionForwardAlgorithm_v7( cudnn, inputDescriptor, filterDescriptor, convDescriptor, outDescriptor, 1, &foundAlgo, &convAlgo)); if (foundAlgo == 0 || convAlgo.determinism == CUDNN_NON_DETERMINISTIC || convAlgo.status != CUDNN_STATUS_SUCCESS) { log(verbose, std::clog, "Best algorithm is non deterministic or not found. Terminating."); throw std::runtime_error("Failed to find cudnn algorithm for convolution."); } validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Best algorithm is chosen " + std::to_string(convAlgo.algo) + " with math " + std::to_string(convAlgo.mathType)); if (convAlgo.mathType == CUDNN_TENSOR_OP_MATH) log(verbose, std::clog, "Using Tensor CORES!!!"); // Workspace size_t workspaceSize = convAlgo.memory; void *workspaceData; if (workspaceSize != 0){} validate_call(hipMalloc(&workspaceData, workspaceSize)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Workspace is allocated"); // Performing convolution float alpha = 0.5f; float beta = 0.0f; // Dummy values hipLaunchKernelGGL(( ::fill_with_constant), dim3(numFilters*filterW * filterH), dim3(C), 0, 0, filterData, (int8_t)CONSTANT_DATA); hipLaunchKernelGGL(( ::fill_with_constant), dim3(W * H), dim3(B * C), 0, 0, inputData, (int8_t)CONSTANT_DATA); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Filled with dummy values"); validate_call(hipDeviceSynchronize()); auto begin = std::chrono::high_resolution_clock::now(); validate_call(cudnnConvolutionForward( cudnn, &alpha, inputDescriptor, inputData, filterDescriptor, filterData, convDescriptor, convAlgo.algo, workspaceData, workspaceSize, &beta, outDescriptor, outData)); validate_call(hipDeviceSynchronize()); auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin); auto cpuOutData = new float[outB * outC * outH * outW * sizeof(float)]; validate_call(hipMemcpy(cpuOutData, outData, outB * outC * outH * outW * sizeof(float), hipMemcpyDeviceToHost)); validate_call(hipDeviceSynchronize()); std::clog << "RESULT : " << std::endl; for (size_t b=0; b < outB; ++b) { for (size_t c=0; c < outC; ++c) { for (size_t h=0; h < outH; ++h) for (size_t w=0; w < outW; ++w) // std::clog << std::setw(4) << static_cast<int>(cpuOutData[b*(outH*outW*outC) + c*(outW*outH) + h*outW + w]) << " "; std::clog << std::setw(4) << cpuOutData[b*(outH*outW*outC) + c*(outW*outH) + h*outW + w] << " "; } std::clog << std::endl; } std::clog << "END RESULT" << std::endl; delete[] cpuOutData; log(verbose, std::clog, "Finalizing"); // Finalizing if (workspaceSize != 0) validate_call(hipFree(workspaceData)); validate_call(hipFree(outData)); validate_call(cudnnDestroyTensorDescriptor(outDescriptor)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Out tensor destroyed"); validate_call(cudnnDestroyConvolutionDescriptor(convDescriptor)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Conv descriptor destroyed"); validate_call(hipFree(filterData)); validate_call(cudnnDestroyFilterDescriptor(filterDescriptor)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Filter tensor destroyed"); validate_call(hipFree(inputData)); validate_call(cudnnDestroyTensorDescriptor(inputDescriptor)); log(verbose, std::clog, "Input tensor destroyed"); validate_call(cudnnDestroy(cudnn)); validate_call(hipDeviceSynchronize()); log(verbose, std::clog, "Cudnn destroyed"); log(verbose, std::clog, "Elapsed time is " + std::to_string(elapsed.count())); return EXIT_SUCCESS; }
07f6368b38d680a5cb2fa59d024a4e6408cdc5d9.cu
#include <chrono> #include <iostream> #include <iomanip> #include <cuda.h> #include <cudnn.h> #include <cuda_device_runtime_api.h> void validate_call(const cudaError_t& err) { if (err != cudaSuccess) { std::cerr << "CUDA error occurred: " << err << " " << cudaGetErrorString(err) << std::endl; std::exit(1); } } void validate_call(const cudnnStatus_t& err) { if (err != CUDNN_STATUS_SUCCESS) { std::cerr << "cuDNN error occurred: " << err << " " << cudnnGetErrorString(err) << std::endl; std::exit(1); } } void log(int verbose, std::ostream& ostream, std::string str) { if (verbose) ostream << str << std::endl; } template <typename T> __global__ void fill_with_constant(T *px, T k) { int tid = threadIdx.x + blockIdx.x * blockDim.x; px[tid] = k; } int main() { // Prerequisits int verbose = 2; // Data types and formats auto inputTensorFormat = CUDNN_TENSOR_NCHW_VECT_C; auto inputDataType = CUDNN_DATA_INT8x32; auto filterDataType = CUDNN_DATA_INT8x32; auto filterTensorFormat = CUDNN_TENSOR_NCHW_VECT_C; auto convAccumulatorDataType = CUDNN_DATA_INT32; auto outDataType = CUDNN_DATA_FLOAT; auto outputTensorFormat = CUDNN_TENSOR_NCHW; // Tensors auto numFilters = 32; auto filterH = 3; auto filterW = 3; auto B = 1; auto C = 32; auto H = 3; auto W = 3; size_t padH, padW; padH = padW = 0; size_t strideH, strideW, dilationH, dilationW; strideH = strideW = dilationH = dilationW = 1; const int8_t CONSTANT_DATA = 127; validate_call(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)); // Create cudnn cudnnHandle_t cudnn; validate_call(cudnnCreate(&cudnn)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Cudnn created"); // Create input tensor cudnnTensorDescriptor_t inputDescriptor; validate_call(cudnnCreateTensorDescriptor(&inputDescriptor)); validate_call(cudnnSetTensor4dDescriptor(inputDescriptor, inputTensorFormat, inputDataType, B, C, H, W)); int8_t *inputData; validate_call(cudaMalloc(&inputData, B * C * H * W * sizeof(int8_t))); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Input tensor allocated"); // Create filter descriptor cudnnFilterDescriptor_t filterDescriptor; validate_call(cudnnCreateFilterDescriptor(&filterDescriptor)); validate_call(cudnnSetFilter4dDescriptor(filterDescriptor, filterDataType, filterTensorFormat, numFilters, C, filterH, filterW)); int8_t *filterData; validate_call(cudaMalloc(&filterData, numFilters * C * filterH * filterW * sizeof(int8_t))); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Filter tensor allocated"); // Convolution descriptor cudnnConvolutionDescriptor_t convDescriptor; validate_call(cudnnCreateConvolutionDescriptor(&convDescriptor)); validate_call(cudnnSetConvolution2dDescriptor(convDescriptor, padH, padW, strideH, strideW, dilationH, dilationW, CUDNN_CONVOLUTION, convAccumulatorDataType)); validate_call(cudnnSetConvolutionMathType(convDescriptor, CUDNN_TENSOR_OP_MATH)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Convolution descriptor created"); int outB, outC, outH, outW; validate_call(cudnnGetConvolution2dForwardOutputDim(convDescriptor, inputDescriptor, filterDescriptor, &outB, &outC, &outH, &outW)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Computed convolution output shape"); log(verbose, std::clog, std::to_string(outB)+"x"+std::to_string(outC)+"x"+std::to_string(outH)+"x"+std::to_string(outW)); // Output tensor cudnnTensorDescriptor_t outDescriptor; validate_call(cudnnCreateTensorDescriptor(&outDescriptor)); validate_call(cudnnSetTensor4dDescriptor(outDescriptor, outputTensorFormat, outDataType, outB, outC, outH, outW)); int8_t *outData; validate_call(cudaMalloc(&outData, outB * outC * outH * outW * sizeof(float))); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Output tensor allocated"); // Algorithm cudnnConvolutionFwdAlgoPerf_t convAlgo; int foundAlgo; validate_call(cudnnGetConvolutionForwardAlgorithm_v7( cudnn, inputDescriptor, filterDescriptor, convDescriptor, outDescriptor, 1, &foundAlgo, &convAlgo)); if (foundAlgo == 0 || convAlgo.determinism == CUDNN_NON_DETERMINISTIC || convAlgo.status != CUDNN_STATUS_SUCCESS) { log(verbose, std::clog, "Best algorithm is non deterministic or not found. Terminating."); throw std::runtime_error("Failed to find cudnn algorithm for convolution."); } validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Best algorithm is chosen " + std::to_string(convAlgo.algo) + " with math " + std::to_string(convAlgo.mathType)); if (convAlgo.mathType == CUDNN_TENSOR_OP_MATH) log(verbose, std::clog, "Using Tensor CORES!!!"); // Workspace size_t workspaceSize = convAlgo.memory; void *workspaceData; if (workspaceSize != 0){} validate_call(cudaMalloc(&workspaceData, workspaceSize)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Workspace is allocated"); // Performing convolution float alpha = 0.5f; float beta = 0.0f; // Dummy values ::fill_with_constant<<<numFilters*filterW * filterH, C>>>(filterData, (int8_t)CONSTANT_DATA); ::fill_with_constant<<<W * H, B * C>>>(inputData, (int8_t)CONSTANT_DATA); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Filled with dummy values"); validate_call(cudaDeviceSynchronize()); auto begin = std::chrono::high_resolution_clock::now(); validate_call(cudnnConvolutionForward( cudnn, &alpha, inputDescriptor, inputData, filterDescriptor, filterData, convDescriptor, convAlgo.algo, workspaceData, workspaceSize, &beta, outDescriptor, outData)); validate_call(cudaDeviceSynchronize()); auto elapsed = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::high_resolution_clock::now() - begin); auto cpuOutData = new float[outB * outC * outH * outW * sizeof(float)]; validate_call(cudaMemcpy(cpuOutData, outData, outB * outC * outH * outW * sizeof(float), cudaMemcpyDeviceToHost)); validate_call(cudaDeviceSynchronize()); std::clog << "RESULT : " << std::endl; for (size_t b=0; b < outB; ++b) { for (size_t c=0; c < outC; ++c) { for (size_t h=0; h < outH; ++h) for (size_t w=0; w < outW; ++w) // std::clog << std::setw(4) << static_cast<int>(cpuOutData[b*(outH*outW*outC) + c*(outW*outH) + h*outW + w]) << " "; std::clog << std::setw(4) << cpuOutData[b*(outH*outW*outC) + c*(outW*outH) + h*outW + w] << " "; } std::clog << std::endl; } std::clog << "END RESULT" << std::endl; delete[] cpuOutData; log(verbose, std::clog, "Finalizing"); // Finalizing if (workspaceSize != 0) validate_call(cudaFree(workspaceData)); validate_call(cudaFree(outData)); validate_call(cudnnDestroyTensorDescriptor(outDescriptor)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Out tensor destroyed"); validate_call(cudnnDestroyConvolutionDescriptor(convDescriptor)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Conv descriptor destroyed"); validate_call(cudaFree(filterData)); validate_call(cudnnDestroyFilterDescriptor(filterDescriptor)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Filter tensor destroyed"); validate_call(cudaFree(inputData)); validate_call(cudnnDestroyTensorDescriptor(inputDescriptor)); log(verbose, std::clog, "Input tensor destroyed"); validate_call(cudnnDestroy(cudnn)); validate_call(cudaDeviceSynchronize()); log(verbose, std::clog, "Cudnn destroyed"); log(verbose, std::clog, "Elapsed time is " + std::to_string(elapsed.count())); return EXIT_SUCCESS; }
5cca0a70ebf53defdfeea5b28ade40bd488635ec.hip
// !!! This is a file automatically generated by hipify!!! /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal z -> c d s */ #include <stdio.h> #include <hip/hip_runtime_api.h> #include <rocblas.h> // include before magma.h #include "magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m #define fetch_x_A(i) (((i)<m*m)?Aval[i]:0) #define fetch_x_B(i) (((i)<m*m)?B[i]:0) // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel32( int m, int n, int kblocks, magmaDouble_ptr *Avals, magmaDouble_ptr *Bval, magmaDouble_ptr *Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = __mul24( ty2, lda) + tx2 ; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j<64; j+=16){ for(int y=tx2; y<64; y+=16){ Abs[y][j] = fetch_x_A(trackA + y-tx2) ; } trackA += __mul24( 16, m); } for(int k=0; k<kblocks; k++){ B = Bval[k]; int trackB = tx2+ __mul24( ty2 * 16, ldb ); // Prefetch part of B #pragma unroll for(int y=0; y<4; y++){ Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ; } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<m-16; k1+=16) { trackB += 16; #pragma unroll for( int y=0; y<4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++){ Axs[y] = Abs[tx2+y*16][j1+k1] ; } #pragma unroll for( int y=0; y<4; y++){ Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + __mul24 (ty2 ,ldc); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0;y<4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0;y<4;y++, gy+=16) { int gx = tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n){ C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel64( int m, int n, int kblocks, magmaDouble_ptr *Avals, magmaDouble_ptr *Bval, magmaDouble_ptr *Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = __mul24( ty2, lda) + tx2 ; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j<64; j+=16){ for(int y=tx2; y<64; y+=16){ Abs[y][j] = fetch_x_A(trackA + y-tx2) ; } trackA += __mul24( 16, m); } for(int k=0; k<kblocks; k++){ B = Bval[k]; int trackB = tx2+ __mul24( ty2 * 4, ldb ); // Prefetch part of B #pragma unroll for(int y=0; y<4; y++){ Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ; } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<m-16; k1+=16) { trackB += 16; #pragma unroll for( int y=0; y<4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++){ Axs[y] = Abs[tx2+y*16][j1+k1] ; } #pragma unroll for( int y=0; y<4; y++){ Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + __mul24 (ty2 ,ldc); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0;y<4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0;y<4;y++, gy+=16) { int gx = tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n){ C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_brows magma_int_t number of block rows @param[in] kblocks magma_int_t number of blocks in row @param[in] dA magmaDoubleComplex** input blocks of matrix A @param[in] dB magmaDoubleComplex** input blocks of matrix B @param[in] dC magmaDoubleComplex** output blocks of matrix C @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrluegemm( magma_int_t size_b, magma_int_t num_brows, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaDoubleComplex_ptr *dB, magmaDoubleComplex_ptr *dC, magma_queue_t queue ) { #if defined(PRECISION_d) magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("error: magma_zbcsrluegemm needs a CUDA architecture" " with at least 48K shared memory (Fermi +).\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); } else { dim3 threads( 64, 4 ); dim3 grid(1, 1, num_brows); hipLaunchKernelGGL(( zbcsr_gemm_kernel64), dim3(grid), dim3(threads), 0, queue , size_b, size_b, kblocks, dA, dB, dC ); } #else printf("error: currently only supported for double precision.\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); #endif return MAGMA_SUCCESS; }
5cca0a70ebf53defdfeea5b28ade40bd488635ec.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal z -> c d s */ #include <stdio.h> #include <cuda_runtime_api.h> #include <cublas_v2.h> // include before magma.h #include "magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif #define PRECISION_z #define Ablockinfo(i,j) Ablockinfo[(i)*c_blocks + (j)] #define Bblockinfo(i,j) Bblockinfo[(i)*c_blocks + (j)] #define A(i,j) ((Ablockinfo(i,j)-1)*size_b*size_b) #define B(i,j) ((Bblockinfo(i,j)-1)*size_b*size_b) //============================================================ #define ldb m #define lda m #define ldc m #define fetch_x_A(i) (((i)<m*m)?Aval[i]:0) #define fetch_x_B(i) (((i)<m*m)?B[i]:0) // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel32( int m, int n, int kblocks, magmaDouble_ptr *Avals, magmaDouble_ptr *Bval, magmaDouble_ptr *Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = __mul24( ty2, lda) + tx2 ; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j<64; j+=16){ for(int y=tx2; y<64; y+=16){ Abs[y][j] = fetch_x_A(trackA + y-tx2) ; } trackA += __mul24( 16, m); } for(int k=0; k<kblocks; k++){ B = Bval[k]; int trackB = tx2+ __mul24( ty2 * 16, ldb ); // Prefetch part of B #pragma unroll for(int y=0; y<4; y++){ Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ; } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<m-16; k1+=16) { trackB += 16; #pragma unroll for( int y=0; y<4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++){ Axs[y] = Abs[tx2+y*16][j1+k1] ; } #pragma unroll for( int y=0; y<4; y++){ Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + __mul24 (ty2 ,ldc); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0;y<4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0;y<4;y++, gy+=16) { int gx = tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n){ C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } // every multiprocessor handles one BCSR-block __global__ void zbcsr_gemm_kernel64( int m, int n, int kblocks, magmaDouble_ptr *Avals, magmaDouble_ptr *Bval, magmaDouble_ptr *Cval) { #if (__CUDA_ARCH__ >= 200) #if defined(PRECISION_d) const int tx = threadIdx.x; const int ty = threadIdx.y; const int idt = ty * 64 + tx; const int tx2 = idt%16; const int ty2 = idt/16; double xxB[4]; magmaDouble_ptr B; int trackA = __mul24( ty2, lda) + tx2 ; magmaDouble_ptr Aval = Avals[blockIdx.z]; __shared__ double Abs[64][65]; __shared__ double Bb[16][65]; for(int j=ty2; j<64; j+=16){ for(int y=tx2; y<64; y+=16){ Abs[y][j] = fetch_x_A(trackA + y-tx2) ; } trackA += __mul24( 16, m); } for(int k=0; k<kblocks; k++){ B = Bval[k]; int trackB = tx2+ __mul24( ty2 * 4, ldb ); // Prefetch part of B #pragma unroll for(int y=0; y<4; y++){ Bb[tx2][ty2*4+y] = fetch_x_B( trackB + y * ldb) ; } __syncthreads(); // this is necessary!!! double Axs[4]; double Bxp[4]; double Cb[16] = {0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0}; int k1; for(k1=0; k1<m-16; k1+=16) { trackB += 16; #pragma unroll for( int y=0; y<4; y++) xxB[y] = fetch_x_B( trackB + y*ldb); #pragma unroll for( int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++){ Axs[y] = Abs[tx2+y*16][j1+k1] ; } #pragma unroll for( int y=0; y<4; y++){ Bxp[y]= Bb[j1][ty2+y*16]; } #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0; y<4; y++) { Cb[x*4+y] += Axs[x]*Bxp[y]; } } } __syncthreads(); #pragma unroll for(int y=0; y<4; y++) Bb[tx2][ty2*4 + y] = xxB[y]; __syncthreads(); // this is necessary!!! } // Prepare where to write the result magmaDouble_ptr C = Cval[blockIdx.z * kblocks + k]; C += tx2 + __mul24 (ty2 ,ldc); #pragma unroll for(int j1=0;j1<16;j1++) { #pragma unroll for( int y=0; y<4; y++) Axs[y] = Abs[tx2 + y*16][j1+k1] ; #pragma unroll for( int y=0; y<4; y++) Bxp[y]= Bb[j1][ty2 + y*16]; #pragma unroll for( int x=0; x<4; x++) { #pragma unroll for( int y=0;y<4; y++) { Cb[x*4 + y] += Axs[x]*Bxp[y]; } } } int gy = ty2; #pragma unroll for( int y=0;y<4;y++, gy+=16) { int gx = tx2; #pragma unroll for(int x=0;x<4;x++, gx+=16) { if (gx < m && gy < n){ C[x*16] -= Cb[y+x*4]; } } C += ldc*16; } } #endif #endif } /** Purpose ------- For a Block-CSR ILU factorization, this routine updates all blocks in the trailing matrix. Arguments --------- @param[in] size_b magma_int_t blocksize in BCSR @param[in] num_brows magma_int_t number of block rows @param[in] kblocks magma_int_t number of blocks in row @param[in] dA magmaDoubleComplex** input blocks of matrix A @param[in] dB magmaDoubleComplex** input blocks of matrix B @param[in] dC magmaDoubleComplex** output blocks of matrix C @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zgegpuk ********************************************************************/ extern "C" magma_int_t magma_zbcsrluegemm( magma_int_t size_b, magma_int_t num_brows, magma_int_t kblocks, magmaDoubleComplex_ptr *dA, magmaDoubleComplex_ptr *dB, magmaDoubleComplex_ptr *dC, magma_queue_t queue ) { #if defined(PRECISION_d) magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { printf("error: magma_zbcsrluegemm needs a CUDA architecture" " with at least 48K shared memory (Fermi +).\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); } else { dim3 threads( 64, 4 ); dim3 grid(1, 1, num_brows); zbcsr_gemm_kernel64<<< grid, threads, 0, queue >>>( size_b, size_b, kblocks, dA, dB, dC ); } #else printf("error: currently only supported for double precision.\n" "Please run zbcsrlu.cpp using CUBLAS batched.\n"); #endif return MAGMA_SUCCESS; }
ebeceda3240974584192b31e28b0b2dc0fc199ca.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "stdio.h" #include "stdlib.h" #include <string.h> #include "algorithmCudaNormal.h" #include "algorithmCudaNormalInternal.h" namespace AlgorithmCudaNormal { #if 0 } // indent guard #endif __forceinline__ __device__ void updateCell(int* matDst, int* matSrc, int globalIndex, int cnt) { if (matSrc[globalIndex] == 0) { if (cnt == 3) { // birth matDst[globalIndex] = 1; } else { // keep dead matDst[globalIndex] = 0; } } else { if (cnt <= 2 || cnt >= 5) { // die matDst[globalIndex] = 0; } else { // keep alive (age++) matDst[globalIndex] = matSrc[globalIndex] + 1; } } } /* * when block size is (32,32), calculation block is (30,30) * the 1st block calculates (1,1) - (30,30) (using matrix[(0,0) - (31,31)]) * the 2nd block calculates (31,1) - (60,30) (using matrix[(30,0) - (61,0)]) * Note: matrix memory size is (1 + width + 1, 1 + height + 1) (the real matrix is [(1,1) - (memWidth - 2, memHeight - 2)] */ __global__ void loop_2(int* matDst, int *matSrc, int width, int height, int memWidth, int memHeight) { __shared__ int tile[BLOCK_SIZE_H][BLOCK_SIZE_W]; /* this is position on memory */ int globalX = blockIdx.x * (blockDim.x - 2 * MEMORY_MARGIN) + threadIdx.x; // <- increase 30(block size - 2) per block int globalY = blockIdx.y * (blockDim.y - 2 * MEMORY_MARGIN) + threadIdx.y; // <- increase 30(block size - 2) per block int localX = threadIdx.x; int localY = threadIdx.y; if (globalX >= memWidth || globalY >= memHeight) return; /* copy data from global memory to shared memory [(0,0) - (31,31)] */ int thisCell = tile[localY][localX] = matSrc[memWidth * globalY + globalX]; __syncthreads(); if (globalX >= memWidth - 1 || globalY >= memHeight - 1 || localX == 0 || localX == blockDim.x - 1 || localY == 0 || localY == blockDim.y - 1) return; /* calculate if [(1,1) - (30,30)] */ int cnt; cnt = (tile[localY - 1][localX - 1] != 0) + (tile[localY - 1][localX - 0] != 0) + (tile[localY - 1][localX + 1] != 0) + (tile[localY - 0][localX - 1] != 0) + (thisCell != 0) + (tile[localY - 0][localX + 1] != 0) + (tile[localY + 1][localX - 1] != 0) + (tile[localY + 1][localX - 0] != 0) + (tile[localY + 1][localX + 1] != 0); updateCell(matDst, matSrc, memWidth * globalY + globalX, cnt); } __global__ void copyAliasRow2(int* devMat, int memWidth, int memHeight) { int devMatX = blockIdx.x * blockDim.x + threadIdx.x + 1; devMat[memWidth * 0 + devMatX] = devMat[memWidth * (memHeight - 2) + devMatX]; devMat[memWidth * (memHeight - 1) + devMatX] = devMat[memWidth * 1 + devMatX]; } __global__ void copyAliasCol2(int* devMat, int memWidth, int memHeight) { int devMatY = blockIdx.x * blockDim.x + threadIdx.x + 1; devMat[memWidth * devMatY + 0] = devMat[memWidth * devMatY + (memWidth - 2)]; devMat[memWidth * devMatY + memWidth - 1] = devMat[memWidth * devMatY + 1]; } /* The algorithm using alias area on 4 corners and edges so that main logic doen't need to consider border * with shared memory * Note: matrix memory size is (1 + width + 1, 1 + height + 1) (the real matrix is [(1,1) - (memWidth - 2, memHeight - 2)] */ void process_2(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height) { int memWidth = width + 2 * MEMORY_MARGIN; int memHeight = height + 2 * MEMORY_MARGIN; /* block size setting for main logic * do copy per 32(BLOCK_SIZE) * do calculation per 30(BLOCK_SIZE-2) * the number of block is ceil(width / 30) */ dim3 block(BLOCK_SIZE_W, BLOCK_SIZE_H, 1); dim3 grid((int)ceil(width / (double)(BLOCK_SIZE_W - 2 * MEMORY_MARGIN)), (int)ceil(height / (double)(BLOCK_SIZE_H - 2 * MEMORY_MARGIN)), 1); /* Create alias area in CPU at first, then copy all the memory area from host to device */ int *p = param->hostMatSrc; memcpy(p, p + (memHeight - 2) * memWidth, memWidth * sizeof(int)); memcpy(p + (memHeight - 1) * memWidth, p + (1) * memWidth, memWidth * sizeof(int)); for (int y = 1; y < memHeight - 1; y++) { p[memWidth * y + 0] = p[memWidth * y + memWidth - 2]; p[memWidth * y + memWidth - 1] = p[memWidth * y + 1]; } p[memWidth * 0 + 0] = p[memWidth * (memHeight - 2) + memWidth - 2]; p[memWidth * 0 + memWidth - 1] = p[memWidth * (memHeight - 2) + 1]; p[memWidth * (memHeight - 1) + 0] = p[memWidth * (1) + memWidth - 2]; p[memWidth * (memHeight - 1) + memWidth - 1] = p[memWidth * (1) + 1]; #if !defined(USE_ZEROCOPY_MEMORY) CHECK(hipMemcpy(param->devMatSrc, param->hostMatSrc, memWidth * memHeight * sizeof(int), hipMemcpyHostToDevice)); #endif /*** operate logic without border check ***/ loop_2 << < grid, block >> > (param->devMatDst, param->devMatSrc, width, height, memWidth, memHeight); CHECK(hipDeviceSynchronize()); #if !defined(USE_ZEROCOPY_MEMORY) CHECK(hipMemcpy(param->hostMatDst + (memWidth * 1) + MEMORY_MARGIN, param->devMatDst + (memWidth * 1) + MEMORY_MARGIN, memWidth * height * sizeof(int), hipMemcpyDeviceToHost)); #endif swapMat(param); // hostMatSrc is ready to be displayed } }
ebeceda3240974584192b31e28b0b2dc0fc199ca.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "stdio.h" #include "stdlib.h" #include <string.h> #include "algorithmCudaNormal.h" #include "algorithmCudaNormalInternal.h" namespace AlgorithmCudaNormal { #if 0 } // indent guard #endif __forceinline__ __device__ void updateCell(int* matDst, int* matSrc, int globalIndex, int cnt) { if (matSrc[globalIndex] == 0) { if (cnt == 3) { // birth matDst[globalIndex] = 1; } else { // keep dead matDst[globalIndex] = 0; } } else { if (cnt <= 2 || cnt >= 5) { // die matDst[globalIndex] = 0; } else { // keep alive (age++) matDst[globalIndex] = matSrc[globalIndex] + 1; } } } /* * when block size is (32,32), calculation block is (30,30) * the 1st block calculates (1,1) - (30,30) (using matrix[(0,0) - (31,31)]) * the 2nd block calculates (31,1) - (60,30) (using matrix[(30,0) - (61,0)]) * Note: matrix memory size is (1 + width + 1, 1 + height + 1) (the real matrix is [(1,1) - (memWidth - 2, memHeight - 2)] */ __global__ void loop_2(int* matDst, int *matSrc, int width, int height, int memWidth, int memHeight) { __shared__ int tile[BLOCK_SIZE_H][BLOCK_SIZE_W]; /* this is position on memory */ int globalX = blockIdx.x * (blockDim.x - 2 * MEMORY_MARGIN) + threadIdx.x; // <- increase 30(block size - 2) per block int globalY = blockIdx.y * (blockDim.y - 2 * MEMORY_MARGIN) + threadIdx.y; // <- increase 30(block size - 2) per block int localX = threadIdx.x; int localY = threadIdx.y; if (globalX >= memWidth || globalY >= memHeight) return; /* copy data from global memory to shared memory [(0,0) - (31,31)] */ int thisCell = tile[localY][localX] = matSrc[memWidth * globalY + globalX]; __syncthreads(); if (globalX >= memWidth - 1 || globalY >= memHeight - 1 || localX == 0 || localX == blockDim.x - 1 || localY == 0 || localY == blockDim.y - 1) return; /* calculate if [(1,1) - (30,30)] */ int cnt; cnt = (tile[localY - 1][localX - 1] != 0) + (tile[localY - 1][localX - 0] != 0) + (tile[localY - 1][localX + 1] != 0) + (tile[localY - 0][localX - 1] != 0) + (thisCell != 0) + (tile[localY - 0][localX + 1] != 0) + (tile[localY + 1][localX - 1] != 0) + (tile[localY + 1][localX - 0] != 0) + (tile[localY + 1][localX + 1] != 0); updateCell(matDst, matSrc, memWidth * globalY + globalX, cnt); } __global__ void copyAliasRow2(int* devMat, int memWidth, int memHeight) { int devMatX = blockIdx.x * blockDim.x + threadIdx.x + 1; devMat[memWidth * 0 + devMatX] = devMat[memWidth * (memHeight - 2) + devMatX]; devMat[memWidth * (memHeight - 1) + devMatX] = devMat[memWidth * 1 + devMatX]; } __global__ void copyAliasCol2(int* devMat, int memWidth, int memHeight) { int devMatY = blockIdx.x * blockDim.x + threadIdx.x + 1; devMat[memWidth * devMatY + 0] = devMat[memWidth * devMatY + (memWidth - 2)]; devMat[memWidth * devMatY + memWidth - 1] = devMat[memWidth * devMatY + 1]; } /* The algorithm using alias area on 4 corners and edges so that main logic doen't need to consider border * with shared memory * Note: matrix memory size is (1 + width + 1, 1 + height + 1) (the real matrix is [(1,1) - (memWidth - 2, memHeight - 2)] */ void process_2(ALGORITHM_CUDA_NORMAL_PARAM *param, int width, int height) { int memWidth = width + 2 * MEMORY_MARGIN; int memHeight = height + 2 * MEMORY_MARGIN; /* block size setting for main logic * do copy per 32(BLOCK_SIZE) * do calculation per 30(BLOCK_SIZE-2) * the number of block is ceil(width / 30) */ dim3 block(BLOCK_SIZE_W, BLOCK_SIZE_H, 1); dim3 grid((int)ceil(width / (double)(BLOCK_SIZE_W - 2 * MEMORY_MARGIN)), (int)ceil(height / (double)(BLOCK_SIZE_H - 2 * MEMORY_MARGIN)), 1); /* Create alias area in CPU at first, then copy all the memory area from host to device */ int *p = param->hostMatSrc; memcpy(p, p + (memHeight - 2) * memWidth, memWidth * sizeof(int)); memcpy(p + (memHeight - 1) * memWidth, p + (1) * memWidth, memWidth * sizeof(int)); for (int y = 1; y < memHeight - 1; y++) { p[memWidth * y + 0] = p[memWidth * y + memWidth - 2]; p[memWidth * y + memWidth - 1] = p[memWidth * y + 1]; } p[memWidth * 0 + 0] = p[memWidth * (memHeight - 2) + memWidth - 2]; p[memWidth * 0 + memWidth - 1] = p[memWidth * (memHeight - 2) + 1]; p[memWidth * (memHeight - 1) + 0] = p[memWidth * (1) + memWidth - 2]; p[memWidth * (memHeight - 1) + memWidth - 1] = p[memWidth * (1) + 1]; #if !defined(USE_ZEROCOPY_MEMORY) CHECK(cudaMemcpy(param->devMatSrc, param->hostMatSrc, memWidth * memHeight * sizeof(int), cudaMemcpyHostToDevice)); #endif /*** operate logic without border check ***/ loop_2 << < grid, block >> > (param->devMatDst, param->devMatSrc, width, height, memWidth, memHeight); CHECK(cudaDeviceSynchronize()); #if !defined(USE_ZEROCOPY_MEMORY) CHECK(cudaMemcpy(param->hostMatDst + (memWidth * 1) + MEMORY_MARGIN, param->devMatDst + (memWidth * 1) + MEMORY_MARGIN, memWidth * height * sizeof(int), cudaMemcpyDeviceToHost)); #endif swapMat(param); // hostMatSrc is ready to be displayed } }
10f92eb4ff614143b23edc24a7b61e1cf294751c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/random_choice.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/variable.hpp> #include <thrust/device_vector.h> #include <thrust/scan.h> namespace nbla { namespace random_choice_cuda { // CUDA kernel to uniformly draw samples from the cumulative summed weights // (per population) in w_sums. Each population has w_size elements. The number // of samples to draw (per population) is given by u_size, the u_vals pointer // is input with uniform random value [0..1). Each thread (subject to grid // striding) determines for one input element and all samples if the weight sum // is less than the uniform value and, if true, increses the mapping index for // that input value. After all threads have run, the index map points to the // values drawn from input x. template <typename T> __global__ void draw_samples(const size_t size, const size_t w_size, const size_t u_size, const T *w_sums, const float *u_vals, int *idxmap) { NBLA_CUDA_KERNEL_LOOP(i, size) { const auto b = i / w_size; // population index const auto scale = w_sums[(b + 1) * w_size - 1]; for (int j = 0; j < u_size; j++) { if (w_sums[i] < u_vals[b * u_size + j] * scale) { atomic_add(idxmap + b * u_size + j, 1); } } } } // Same kernel as above but draws one sample per round `r`. Needed for sampling // without replacement. template <typename T> __global__ void draw_sample(const size_t size, const size_t w_size, const size_t u_size, const T *w_sums, const float *u_vals, int *idxmap, const int r) { NBLA_CUDA_KERNEL_LOOP(i, size) { const auto b = i / w_size; // population index const auto scale = w_sums[(b + 1) * w_size - 1]; if (w_sums[i] < u_vals[b * u_size + r] * scale) { atomic_add(idxmap + b * u_size + r, 1); } } } // Kernel that sets choosen weigths to zero after each round `r`. Used for // sampling without replacement. template <typename T> __global__ void zero_weight(const size_t size, const size_t w_size, const size_t u_size, const int *idxmap, const int r, T *w_data) { NBLA_CUDA_KERNEL_LOOP(b, size) { w_data[b * w_size + idxmap[b * u_size + r]] = 0; } } // Copy choosen sample values from input `x`, using the index map. template <typename T> __global__ void copy_samples(const size_t size, const size_t w_size, const size_t u_size, const int *idxmap, const T *src, T *dst) { NBLA_CUDA_KERNEL_LOOP(i, size) { dst[i] = src[(i / u_size) * w_size + idxmap[i]]; } } // Backward kernel just adds gradient through index map determined at forward. template <typename T> __global__ void add_gradient(const size_t size, const size_t w_size, const size_t u_size, const int *idxmap, const T *src, T *dst) { NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(dst + (i / u_size) * w_size + idxmap[i], src[i]); } } } // namespace random_choice_cuda template <typename T> void RandomChoiceCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); if (this->replace_ == true) { this->sample_with_replacement(inputs, outputs); } else { this->sample_without_replace(inputs, outputs); } } template <typename T> void RandomChoiceCuda<T>::sample_with_replacement(const Variables &inputs, const Variables &outputs) { auto x = inputs[0], w = inputs[1], y = outputs[0]; Variable &idxbuf_ = this->idxbuf_; idxbuf_.data()->zero(); auto idxbuf = idxbuf_.cast_data_and_get_pointer<int>(this->ctx_, false); auto x_data = x->get_data_pointer<Tcu>(this->ctx_); auto w_data = w->get_data_pointer<Tcu>(this->ctx_); auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto w_size = w->shape().back(); // size of each weight vector auto u_size = this->inner_loop_; // samples to draw per weight vector ArrayPtr tmp[] = { make_shared<CudaCachedArray>(x->size(), get_dtype<Tcu>(), this->ctx_), make_shared<CudaCachedArray>(y->size(), get_dtype<float>(), this->ctx_)}; auto w_sums = tmp[0]->pointer<Tcu>(); auto u_vals = tmp[1]->pointer<float>(); // Generate random choices for each output sample point. curand_generate_rand<float>(curand_generator_, 0, 1, u_vals, y->size()); // Build cumulative sum of weights per population. for (int i = 0; i < this->outer_loop_; i++) { auto w_data_ptr = thrust::device_pointer_cast(w_data + i * w_size); auto w_sums_ptr = thrust::device_pointer_cast(w_sums + i * w_size); thrust::inclusive_scan(w_data_ptr, w_data_ptr + w_size, w_sums_ptr); } // Indirectly draw samples by building an index map. NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::draw_samples, x->size(), w_size, u_size, w_sums, u_vals, idxbuf); // Copy input data values according to index map. NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::copy_samples, y->size(), w_size, u_size, idxbuf, x_data, y_data); } template <typename T> void RandomChoiceCuda<T>::sample_without_replace(const Variables &inputs, const Variables &outputs) { auto x = inputs[0], w = inputs[1], y = outputs[0]; Variable &idxbuf_ = this->idxbuf_; idxbuf_.data()->zero(); auto idxbuf = idxbuf_.cast_data_and_get_pointer<int>(this->ctx_, false); auto x_data = x->get_data_pointer<Tcu>(this->ctx_); auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto w_size = w->shape().back(); // size of each weight vector auto u_size = this->inner_loop_; // samples to draw per weight vector auto b_size = this->outer_loop_; // batch size (left N-1 dims of x and w) ArrayPtr tmp[] = { make_shared<CudaCachedArray>(x->size(), get_dtype<Tcu>(), this->ctx_), make_shared<CudaCachedArray>(x->size(), get_dtype<Tcu>(), this->ctx_), make_shared<CudaCachedArray>(y->size(), get_dtype<float>(), this->ctx_)}; // Copy the weight data to writable memory where we can remove a // category (by nulling it's weight) after each round. auto w_data_ptr = w->get_data_pointer<Tcu>(this->ctx_); thrust::copy_n(thrust::device_pointer_cast(w_data_ptr), w->size(), thrust::device_pointer_cast(tmp[0]->pointer<Tcu>())); auto w_data = tmp[0]->pointer<Tcu>(); auto w_sums = tmp[1]->pointer<Tcu>(); auto u_vals = tmp[2]->pointer<float>(); // Generate random choices for each output sample point. curand_generate_rand<float>(curand_generator_, 0, 1, u_vals, y->size()); // We draw one sample per round (and population) and set the choosen weight // to zero, so each round decreases the number of non-zero weights. for (int r = 0; r < u_size; r++) { for (int i = 0; i < b_size; i++) { auto w_data_ptr = thrust::device_pointer_cast(w_data + i * w_size); auto w_sums_ptr = thrust::device_pointer_cast(w_sums + i * w_size); thrust::inclusive_scan(w_data_ptr, w_data_ptr + w_size, w_sums_ptr); } NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::draw_sample, x->size(), w_size, u_size, w_sums, u_vals, idxbuf, r); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::zero_weight, b_size, w_size, u_size, idxbuf, r, w_data) } // Copy input data values according to index map. NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::copy_samples, y->size(), w_size, u_size, idxbuf, x_data, y_data); } template <typename T> void RandomChoiceCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1])) { return; } cuda_set_device(this->device_); if ((propagate_down[0]) && (!accum[0])) inputs[0]->grad()->zero(); if ((propagate_down[1]) && (!accum[1])) inputs[1]->grad()->zero(); auto x = inputs[0], w = inputs[1], y = outputs[0]; Variable &idxbuf_ = this->idxbuf_; auto w_size = w->shape().back(); // size of each weight vector auto u_size = this->inner_loop_; // samples to draw per weight vector if (propagate_down[0]) { auto x_grad = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_); auto idxbuf = idxbuf_.get_data_pointer<int>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::add_gradient, y->size(), w_size, u_size, idxbuf, y_grad, x_grad); } if (propagate_down[1]) { auto w_grad = w->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_); auto idxbuf = idxbuf_.get_data_pointer<int>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::add_gradient, y->size(), w_size, u_size, idxbuf, y_grad, w_grad); } } }
10f92eb4ff614143b23edc24a7b61e1cf294751c.cu
// Copyright (c) 2017 Sony Corporation. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/array/cuda_array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/random_choice.hpp> #include <nbla/cuda/utils/atomic_add.cuh> #include <nbla/variable.hpp> #include <thrust/device_vector.h> #include <thrust/scan.h> namespace nbla { namespace random_choice_cuda { // CUDA kernel to uniformly draw samples from the cumulative summed weights // (per population) in w_sums. Each population has w_size elements. The number // of samples to draw (per population) is given by u_size, the u_vals pointer // is input with uniform random value [0..1). Each thread (subject to grid // striding) determines for one input element and all samples if the weight sum // is less than the uniform value and, if true, increses the mapping index for // that input value. After all threads have run, the index map points to the // values drawn from input x. template <typename T> __global__ void draw_samples(const size_t size, const size_t w_size, const size_t u_size, const T *w_sums, const float *u_vals, int *idxmap) { NBLA_CUDA_KERNEL_LOOP(i, size) { const auto b = i / w_size; // population index const auto scale = w_sums[(b + 1) * w_size - 1]; for (int j = 0; j < u_size; j++) { if (w_sums[i] < u_vals[b * u_size + j] * scale) { atomic_add(idxmap + b * u_size + j, 1); } } } } // Same kernel as above but draws one sample per round `r`. Needed for sampling // without replacement. template <typename T> __global__ void draw_sample(const size_t size, const size_t w_size, const size_t u_size, const T *w_sums, const float *u_vals, int *idxmap, const int r) { NBLA_CUDA_KERNEL_LOOP(i, size) { const auto b = i / w_size; // population index const auto scale = w_sums[(b + 1) * w_size - 1]; if (w_sums[i] < u_vals[b * u_size + r] * scale) { atomic_add(idxmap + b * u_size + r, 1); } } } // Kernel that sets choosen weigths to zero after each round `r`. Used for // sampling without replacement. template <typename T> __global__ void zero_weight(const size_t size, const size_t w_size, const size_t u_size, const int *idxmap, const int r, T *w_data) { NBLA_CUDA_KERNEL_LOOP(b, size) { w_data[b * w_size + idxmap[b * u_size + r]] = 0; } } // Copy choosen sample values from input `x`, using the index map. template <typename T> __global__ void copy_samples(const size_t size, const size_t w_size, const size_t u_size, const int *idxmap, const T *src, T *dst) { NBLA_CUDA_KERNEL_LOOP(i, size) { dst[i] = src[(i / u_size) * w_size + idxmap[i]]; } } // Backward kernel just adds gradient through index map determined at forward. template <typename T> __global__ void add_gradient(const size_t size, const size_t w_size, const size_t u_size, const int *idxmap, const T *src, T *dst) { NBLA_CUDA_KERNEL_LOOP(i, size) { atomic_add(dst + (i / u_size) * w_size + idxmap[i], src[i]); } } } // namespace random_choice_cuda template <typename T> void RandomChoiceCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(this->device_); if (this->replace_ == true) { this->sample_with_replacement(inputs, outputs); } else { this->sample_without_replace(inputs, outputs); } } template <typename T> void RandomChoiceCuda<T>::sample_with_replacement(const Variables &inputs, const Variables &outputs) { auto x = inputs[0], w = inputs[1], y = outputs[0]; Variable &idxbuf_ = this->idxbuf_; idxbuf_.data()->zero(); auto idxbuf = idxbuf_.cast_data_and_get_pointer<int>(this->ctx_, false); auto x_data = x->get_data_pointer<Tcu>(this->ctx_); auto w_data = w->get_data_pointer<Tcu>(this->ctx_); auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto w_size = w->shape().back(); // size of each weight vector auto u_size = this->inner_loop_; // samples to draw per weight vector ArrayPtr tmp[] = { make_shared<CudaCachedArray>(x->size(), get_dtype<Tcu>(), this->ctx_), make_shared<CudaCachedArray>(y->size(), get_dtype<float>(), this->ctx_)}; auto w_sums = tmp[0]->pointer<Tcu>(); auto u_vals = tmp[1]->pointer<float>(); // Generate random choices for each output sample point. curand_generate_rand<float>(curand_generator_, 0, 1, u_vals, y->size()); // Build cumulative sum of weights per population. for (int i = 0; i < this->outer_loop_; i++) { auto w_data_ptr = thrust::device_pointer_cast(w_data + i * w_size); auto w_sums_ptr = thrust::device_pointer_cast(w_sums + i * w_size); thrust::inclusive_scan(w_data_ptr, w_data_ptr + w_size, w_sums_ptr); } // Indirectly draw samples by building an index map. NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::draw_samples, x->size(), w_size, u_size, w_sums, u_vals, idxbuf); // Copy input data values according to index map. NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::copy_samples, y->size(), w_size, u_size, idxbuf, x_data, y_data); } template <typename T> void RandomChoiceCuda<T>::sample_without_replace(const Variables &inputs, const Variables &outputs) { auto x = inputs[0], w = inputs[1], y = outputs[0]; Variable &idxbuf_ = this->idxbuf_; idxbuf_.data()->zero(); auto idxbuf = idxbuf_.cast_data_and_get_pointer<int>(this->ctx_, false); auto x_data = x->get_data_pointer<Tcu>(this->ctx_); auto y_data = y->cast_data_and_get_pointer<Tcu>(this->ctx_, true); auto w_size = w->shape().back(); // size of each weight vector auto u_size = this->inner_loop_; // samples to draw per weight vector auto b_size = this->outer_loop_; // batch size (left N-1 dims of x and w) ArrayPtr tmp[] = { make_shared<CudaCachedArray>(x->size(), get_dtype<Tcu>(), this->ctx_), make_shared<CudaCachedArray>(x->size(), get_dtype<Tcu>(), this->ctx_), make_shared<CudaCachedArray>(y->size(), get_dtype<float>(), this->ctx_)}; // Copy the weight data to writable memory where we can remove a // category (by nulling it's weight) after each round. auto w_data_ptr = w->get_data_pointer<Tcu>(this->ctx_); thrust::copy_n(thrust::device_pointer_cast(w_data_ptr), w->size(), thrust::device_pointer_cast(tmp[0]->pointer<Tcu>())); auto w_data = tmp[0]->pointer<Tcu>(); auto w_sums = tmp[1]->pointer<Tcu>(); auto u_vals = tmp[2]->pointer<float>(); // Generate random choices for each output sample point. curand_generate_rand<float>(curand_generator_, 0, 1, u_vals, y->size()); // We draw one sample per round (and population) and set the choosen weight // to zero, so each round decreases the number of non-zero weights. for (int r = 0; r < u_size; r++) { for (int i = 0; i < b_size; i++) { auto w_data_ptr = thrust::device_pointer_cast(w_data + i * w_size); auto w_sums_ptr = thrust::device_pointer_cast(w_sums + i * w_size); thrust::inclusive_scan(w_data_ptr, w_data_ptr + w_size, w_sums_ptr); } NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::draw_sample, x->size(), w_size, u_size, w_sums, u_vals, idxbuf, r); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::zero_weight, b_size, w_size, u_size, idxbuf, r, w_data) } // Copy input data values according to index map. NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::copy_samples, y->size(), w_size, u_size, idxbuf, x_data, y_data); } template <typename T> void RandomChoiceCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { if (!(propagate_down[0] || propagate_down[1])) { return; } cuda_set_device(this->device_); if ((propagate_down[0]) && (!accum[0])) inputs[0]->grad()->zero(); if ((propagate_down[1]) && (!accum[1])) inputs[1]->grad()->zero(); auto x = inputs[0], w = inputs[1], y = outputs[0]; Variable &idxbuf_ = this->idxbuf_; auto w_size = w->shape().back(); // size of each weight vector auto u_size = this->inner_loop_; // samples to draw per weight vector if (propagate_down[0]) { auto x_grad = x->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_); auto idxbuf = idxbuf_.get_data_pointer<int>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::add_gradient, y->size(), w_size, u_size, idxbuf, y_grad, x_grad); } if (propagate_down[1]) { auto w_grad = w->cast_grad_and_get_pointer<Tcu>(this->ctx_, false); auto y_grad = y->get_grad_pointer<Tcu>(this->ctx_); auto idxbuf = idxbuf_.get_data_pointer<int>(this->ctx_); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(random_choice_cuda::add_gradient, y->size(), w_size, u_size, idxbuf, y_grad, w_grad); } } }
d991d4a74d2e3d2911711c850d561bcdbf4f5c84.hip
// !!! This is a file automatically generated by hipify!!! #include "device_launch_parameters.h" #include <iostream> #include <hip/hip_runtime_api.h> int main() { int deviceCount; hipGetDeviceCount(&deviceCount); for (int i = 0; i < deviceCount; i++) { hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); std::cout << "GPU device " << i << ": " << devProp.name << std::endl; std::cout << " " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM" << devProp.multiProcessorCount << std::endl; std::cout << "" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "" << devProp.maxThreadsPerBlock << std::endl; std::cout << "Block32 " << devProp.regsPerBlock << std::endl; std::cout << "EM" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "EM" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << " " << devProp.multiProcessorCount << std::endl; std::cout << "======================================================" << std::endl; } return 0; }
d991d4a74d2e3d2911711c850d561bcdbf4f5c84.cu
#include "device_launch_parameters.h" #include <iostream> #include <cuda_runtime_api.h> int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); for (int i = 0; i < deviceCount; i++) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl; std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl; std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl; std::cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl; std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << "设备上多处理器的数量: " << devProp.multiProcessorCount << std::endl; std::cout << "======================================================" << std::endl; } return 0; }
20469f9f361285d267627323344533246249f6b4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "helper_functions.h" #include "thrust\host_vector.h" #include "thrust\device_vector.h" #include "thrust\reduce.h" #include "EasyBMP.h" #include "PIDIC.cuh" #include "FFTCC.h" #include <stdio.h> #include <iostream> #include <fstream> //Parameters const int iMarginX = 10, iMarginY = 10; const int iGridX = 10, iGridY = 10; const int iSubsetX = 8, iSubsetY =8; const float fDeltaP = 0.001f; const int iIterationNum = 5; const int BLOCK_SIZE = 16; //CUDA RUNTIME Initialization void InitCuda() { hipFree(0); } __global__ void precomputation_kernel(float *d_InputIMGR, float *d_InputIMGT, const float* __restrict__ d_InputBiubicMatrix, float *d_OutputIMGR, float *d_OutputIMGT, float *d_OutputIMGRx, float *d_OutputIMGRy, float *d_OutputIMGTx, float *d_OutputIMGTy, float *d_OutputIMGTxy, float *d_OutputdtBicubic, int width, int height) { //The size of input images int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //Temp arrays float d_TaoT[16]; float d_AlphaT[16]; //The rows and cols of output matrix. if((row < height) && (col < width)){ d_OutputIMGR[row*width+col] = d_InputIMGR[(row+1)*(width+2)+col+1]; d_OutputIMGRx[row*width+col] = 0.5 * (d_InputIMGR[(row+1)*(width+2)+col+2] - d_InputIMGR[(row+1)*(width+2)+col]); d_OutputIMGRy[row*width+col] = 0.5 * (d_InputIMGR[(row+2)*(width+2)+col+1] - d_InputIMGR[(row)*(width+2)+col+1]); d_OutputIMGT[row*width+col] = d_InputIMGT[(row+1)*(width+2)+col+1]; d_OutputIMGTx[row*width+col] = 0.5 * (d_InputIMGT[(row+1)*(width+2)+col+2] -d_InputIMGT[(row+1)*(width+2)+col]); d_OutputIMGTy[row*width+col] = 0.5 * (d_InputIMGT[(row+2)*(width+2)+col+1] - d_InputIMGT[(row)*(width+2)+col+1]); d_OutputIMGTxy[row*width+col]= 0.25 * (d_InputIMGT[(row+2)*(width+2)+col+2] - d_InputIMGT[(row)*(width+2)+col+2] -d_InputIMGT[(row+2)*(width+2)+col] + d_InputIMGT[(row)*(width+2)+col]); } __syncthreads(); if((row < height-1) && (col < width-1)){ d_TaoT[0] = d_OutputIMGT[row*(width)+col]; d_TaoT[1] = d_OutputIMGT[row*(width)+col+1]; d_TaoT[2] = d_OutputIMGT[(row+1)*(width)+col]; d_TaoT[3] = d_OutputIMGT[(row+1)*(width)+col+1]; d_TaoT[4] = d_OutputIMGTx[row*(width)+col]; d_TaoT[5] = d_OutputIMGTx[row*(width)+col+1]; d_TaoT[6] = d_OutputIMGTx[(row+1)*(width)+col]; d_TaoT[7] = d_OutputIMGTx[(row+1)*(width)+col+1]; d_TaoT[8] = d_OutputIMGTy[row*(width)+col]; d_TaoT[9] = d_OutputIMGTy[row*(width)+col+1]; d_TaoT[10] = d_OutputIMGTy[(row+1)*(width)+col]; d_TaoT[11] = d_OutputIMGTy[(row+1)*(width)+col+1]; d_TaoT[12] = d_OutputIMGTxy[row*(width)+col]; d_TaoT[13] = d_OutputIMGTxy[row*(width)+col+1]; d_TaoT[14] = d_OutputIMGTxy[(row+1)*(width)+col]; d_TaoT[15] = d_OutputIMGTxy[(row+1)*(width)+col+1]; for(int k=0; k<16; k++){ d_AlphaT[k] = 0.0; for(int l=0; l<16; l++){ d_AlphaT[k] += (d_InputBiubicMatrix[k*16+l] * d_TaoT[l]); } } d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = d_AlphaT[0]; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = d_AlphaT[1]; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = d_AlphaT[2]; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = d_AlphaT[3]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = d_AlphaT[4]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = d_AlphaT[5]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = d_AlphaT[6]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = d_AlphaT[7]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = d_AlphaT[8]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = d_AlphaT[9]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = d_AlphaT[10]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = d_AlphaT[11]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = d_AlphaT[12]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = d_AlphaT[13]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = d_AlphaT[14]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = d_AlphaT[15]; } else if(((row >=height-1)&&(row < height)) && ((col >= width-1)&&(col<width))){ d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = 0.0; } } __global__ void ICGN_kernel(float* input_R, float* input_Rx, float* input_Ry, float* input_AveR, float* input_NormR, float fDeltaP, float *input_T, float* input_Bicubic, int* input_iU, int* input_iV, int iNumberY, int iNumberX, int iSubsetH, int iSubsetW, int width, int height, int iSubsetY, int iSubsetX, int iGridSpaceX, int iGridSpaceY, int iMarginX, int iMarginY, int iIterationNum, float* output_dP) /*BLOCK_SIZE: 2*(iSubsetW+1)+1, 2*(iSubsetH+1)+1 Grid_SIZE: iNumberX * iNumberY */ { int x = threadIdx.x, y = threadIdx.y; int offset = blockIdx.y * gridDim.x + blockIdx.x; //Shared variables of each ROI __shared__ float fPXY[2]; __shared__ float fT[19*19]; __shared__ float fBicubic[19*19*4*4]; __shared__ float fR[17*17]; __shared__ float fRx[17*17]; __shared__ float fRy[17*17]; __shared__ float fdP[6], fdDP[6], fdWarp[3][3], fHessian[6][6],fInvHessian[6][6],fNumerator[6]; __shared__ float fdU, fdV, fdUx, fdUy, fdVx, fdVy; __shared__ float fdDU, fdDUx, fdDUy, fdDV, fdDVx, fdDVy; __shared__ float fSubAveR, fSubNormR, fSubAveT, fSubNormT; __shared__ float fTemp; //Private variables for each subset point float fJacobian[2][6], fRDescent[6], fHessianXY[6][6]; float fSubsetR, fSubsetAveR, fSubsetT, fSubsetAveT; float fdError; float fWarpX, fWarpY; int iTemp, iTempX, iTempY; float fTempX, fTempY; //Load the auxiliary variables into shared memory of each block if(x==0 && y ==0){ fPXY[0] = float(iMarginX + iSubsetY + blockIdx.y * iGridSpaceY); fPXY[1] = float(iMarginY + iSubsetX + blockIdx.x * iGridSpaceX); fdU = float(input_iU[offset]); fdDU = 0.0f; fdV = float(input_iV[offset]); fdDV = 0.0f; fdUx = 0.0f; fdDUx = 0.0f; fdUy = 0.0f; fdDUy = 0.0f; fdVx = 0.0f; fdDVx = 0.0f; fdVy = 0.0f; fdDVy = 0.0f; fdP[0] = fdU; fdP[3] = fdV; fdP[1] = fdUx; fdP[4] = fdVx; fdP[2] = fdUy; fdP[5] = fdVy; fdP[0] = 0.0f; fdP[3] = 0.0f; fdP[1] = 0.0f; fdP[4] = 0.0f; fdP[2] = 0.0f; fdP[5] = 0.0f; fdWarp[0][0] = 1 + fdUx; fdWarp[0][1] = fdUy; fdWarp[0][2] = fdU; fdWarp[1][0] = fdVx; fdWarp[1][1] = 1 + fdVy; fdWarp[1][2] = fdV; fdWarp[2][0] = 0.0f; fdWarp[2][1] = 0.0f; fdWarp[2][2] = 1.0f; fNumerator[0] = 0.0f; fNumerator[1] = 0.0f; fNumerator[2] = 0.0f; fNumerator[3] = 0.0f; fNumerator[4] = 0.0f; fNumerator[5] = 0.0f; fdDP[0] = 0.0f; fdDP[1] = 0.0f; fdDP[2] = 0.0f; fdDP[3] = 0.0f; fdDP[4] = 0.0f; fdDP[5] = 0.0f; fSubAveR = input_AveR[offset]; fSubNormR = input_NormR[offset]; fSubsetAveT = 0.0f; fSubNormT = 0.0f; } __syncthreads(); if( x<6 && y<6){ if( x == y){ fInvHessian[y][x] = 1.0f; fHessian[y][x] = 0.0f; } else{ fInvHessian[y][x] = 0.0f; fHessian[y][x] = 0.0f; } } __syncthreads(); //Load PXY, Rx, Ry and R, T, Bicubic into shared_memory if( x<iSubsetW && y<iSubsetH ){ fR[y*iSubsetW+x] = input_R[int(fPXY[0] - iSubsetY + y)*width+int(fPXY[1] - iSubsetX + x)]; fRx[y*iSubsetW+x] = input_Rx[int(fPXY[0] - iSubsetY + y)*width+int(fPXY[1] - iSubsetX + x)]; fRy[y*iSubsetW+x] = input_Ry[int(fPXY[0] - iSubsetY + y)*width+int(fPXY[1] - iSubsetX + x)]; } __syncthreads(); //Load T, Bicubic with additional 1 pixel wider on each side fT[y*iSubsetW+x] = input_T[int(fPXY[0] - (iSubsetY+1) + y)*width+int(fPXY[1] - (iSubsetX+1) + x)]; for(int k=0; k<4; k++){ for(int n=0; n<4; n++){ fBicubic[((y*iSubsetW+x)*4+k)*4+n] = input_Bicubic[((int(fPXY[0] - (iSubsetY+1) + y)*width+int(fPXY[1] - (iSubsetX+1) + x))*4+k)*4+n]; } } __syncthreads(); //Start computing if( x<iSubsetW && y<iSubsetH){ // Evaluate the Jacbian dW/dp at (x, 0); fJacobian[0][0] = 1; fJacobian[0][1] = x - iSubsetX; fJacobian[0][2] = y - iSubsetY; fJacobian[0][3] = 0; fJacobian[0][4] = 0; fJacobian[0][5] = 0; fJacobian[1][0] = 0; fJacobian[1][1] = 0; fJacobian[1][2] = 0; fJacobian[1][3] = 1; fJacobian[1][4] = x - iSubsetX; fJacobian[1][5] = y - iSubsetY; for(unsigned int i=0; i<6; i++){ fRDescent[i] = fRx[y*iSubsetW+x] * fJacobian[0][i] + fRy[y*iSubsetW+x] * fJacobian[1][i]; } for(unsigned int i=0; i<6; i++){ for(unsigned int j=0; j<6; j++){ fHessianXY[i][j] = fRDescent[i] * fRDescent[j]; /*fHessian[i][j] += fHessianXY[i][j];*/ //This is bad code, cannot be all added to one index. atomicAdd(&fHessian[i][j], fHessianXY[i][j]); //Must use this instead. } } fSubsetAveR = fSubsetR - fSubAveR; } __syncthreads(); //Invert the Hessian matrix using the first 36 threads if(x ==0 && y ==0){ for(int l=0; l<6; l++){ iTemp = l; for(int m=l+1; m<6; m++){ if(fHessian[m][l] > fHessian[iTemp][l]){ iTemp = m; } } //Swap the row which has maximum lth column element if(iTemp != l){ for(int k=0; k<6; k++){ fTemp = fHessian[l][k]; fHessian[l][k] = fHessian[iTemp][k]; fHessian[iTemp][k] = fTemp; fTemp = fInvHessian[l][k]; fInvHessian[l][k] = fInvHessian[iTemp][k]; fInvHessian[iTemp][k] = fTemp; } } //Row oerpation to form required identity matrix for(int m=0; m<6; m++){ fTemp = fHessian[m][l]; if(m != l){ for(int n=0; n<6; n++){ fInvHessian[m][n] -= fInvHessian[l][n] * fTemp / fHessian[l][l]; fHessian[m][n] -= fHessian[l][n] * fTemp / fHessian[l][l]; } } else{ for(int n=0; n<6; n++){ fInvHessian[m][n] /= fTemp; fHessian[m][n] /= fTemp; } } } } } for(int it=0; it < iIterationNum; it++){ if( x==0 && y==0){ fSubsetAveT = 0.0f; fSubNormT = 0.0f; fNumerator[0] = 0.0f; fNumerator[1] = 0.0f; fNumerator[2] = 0.0f; fNumerator[3] = 0.0f; fNumerator[4] = 0.0f; fNumerator[5] = 0.0f; fNumerator[6] = 0.0f; } __syncthreads(); if( (x<iSubsetW) && (y<iSubsetH) ){ fWarpX = (iSubsetX+1) + fdWarp[0][0]*(x - iSubsetX) + fdWarp[0][1]*(y - iSubsetY) + fdWarp[0][2]; fWarpY = (iSubsetY+1) + fdWarp[1][0]*(x - iSubsetX) + fdWarp[1][1]*(y - iSubsetY) + fdWarp[1][2]; iTempX = int(fWarpX); iTempY = int(fWarpY); if( (iTempX>=0) && (iTempY>=0) && (iTempX<(iSubsetW+1)) && (iTempY<(iSubsetH+1)) ){ fTempX = fWarpX - float(iTempX); fTempY = fWarpY - float(iTempY); //if it is integer-pixel location ,feed the intensity of T into subset T if( (fTempX <= 0.000001) && (fTempY <= 0.000001) ){ fSubsetT = fT[iTempY*(iSubsetW+2)+iTempX]; } else{ fSubsetT = 0.0f; for(int k=0; k<4; k++){ for(int n=0; n<4; n++){ fSubsetT += fBicubic[((iTempY*(iSubsetW+2)+iTempX)*4+k)*4+n]*pow(fTempY,k)*pow(fTempX,n); } } } atomicAdd(&fSubAveT, fSubsetT/float(iSubsetH*iSubsetW)); fSubsetAveT = fSubsetT - fSubAveT; __syncthreads(); atomicAdd(&fSubNormT, pow(fSubAveT,2)); } } if( (x==0) && (y==0) ){ fSubNormT = sqrt(fSubNormT); } __syncthreads(); if( (x<iSubsetW) && (y<iSubsetH) ){ //Compute Error image fdError = (fSubNormR / fSubNormT) * fSubAveT - fSubsetAveR; } __syncthreads(); if( x==0 && y==0){ for(int i=0; i<6; i++){ atomicAdd(&(fNumerator[i]), (fRDescent[i]*fdError)); } for(int k=0; k<6; k++){ fdDP[k] = 0.0; for(int n=0; n<6; n++){ fdDP[k] += (fInvHessian[k][n] * fNumerator[n]); } } fdDU = fdDP[0]; fdDUx = fdDP[1]; fdDUy = fdDP[2]; fdDV = fdDP[3]; fdDVx = fdDP[4]; fdDVy = fdDP[5]; fTemp = (1+fdDUx) * (1+fdDVy) - fdDUy*fdDVx; fdWarp[0][0] = ((1 + fdUx) * (1 + fdDVy) - fdUy * fdDVx) / fTemp; fdWarp[0][1] = (fdUy * (1 + fdDUx) - (1 + fdUx) * fdDUy) / fTemp; fdWarp[0][2] = fdU + (fdUy * (fdDU * fdDVx - fdDV - fdDV * fdDUx) - (1 + fdUx) * (fdDU * fdDVy + fdDU - fdDUy * fdDV)) / fTemp; fdWarp[1][0] = (fdVx * (1 + fdDVy) - (1 + fdVy) * fdDVx) / fTemp; fdWarp[1][1] = ((1 + fdVy) * (1 + fdDUx) - fdVx * fdDUy) / fTemp; fdWarp[1][2] = fdV + ((1 + fdVy) * (fdDU * fdDVx - fdDV - fdDV * fdDUx) - fdVx * (fdDU * fdDVy + fdDU - fdDUy * fdDV)) / fTemp; fdWarp[2][0] = 0; fdWarp[2][1] = 0; fdWarp[2][2] = 1; // Update DeltaP fdP[0] = fdWarp[0][2]; fdP[1] = fdWarp[0][0] - 1; fdP[2] = fdWarp[0][1]; fdP[3] = fdWarp[1][2]; fdP[4] = fdWarp[1][0]; fdP[5] = fdWarp[1][1] - 1; fdU = fdP[0]; fdUx = fdP[1]; fdUy = fdP[2]; fdV = fdP[3]; fdVx = fdP[4]; fdVy = fdP[5]; } __syncthreads(); } //Pass back the values if( x==0 && y==0 ){ output_dP[offset*6+0] = fdP[0]; output_dP[offset*6+1] = fdP[1]; output_dP[offset*6+2] = fdP[2]; output_dP[offset*6+3] = fdP[3]; output_dP[offset*6+4] = fdP[4]; output_dP[offset*6+5] = fdP[5]; } } void computation_interface(const std::vector<float>& ImgR, const std::vector<float>& ImgT, int iWidth, int iHeight) { //Timers StopWatchWin WatchPrecompute, WatchICGN, WatchTotal; float fTimePrecopmute=0.0f, fTimeFFTCC=0.0f, fTimeICGN=0.0f, fTimeTotal=0.0f; //Parameters used in the computations. int width = iWidth - 2; int height = iHeight -2; int iNumberX = int(floor((width - iSubsetX*2 - iMarginX*2)/float(iGridX))) + 1; int iNumberY = int(floor((height - iSubsetY*2 - iMarginY*2)/float(iGridY))) + 1; int iSubsetW = iSubsetX*2+1; int iSubsetH = iSubsetY*2+1; int iFFTSubW = iSubsetX*2; int iFFTSubH = iSubsetY*2; /*--------------------------------------Parameters for CUDA kernel use---------------------------------------------*/ //Precomputation Parameters const static float h_InputBicubicCoeff[16*16] = { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , -3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0 , -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 9, -9, -9, 9, 6, 3, -6, -3, 6, -6, 3, -3, 4, 2, 2, 1 , -6, 6, 6, -6, -3, -3, 3, 3, -4, 4, -2, 2, -2, -2, -1, -1, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0 , -6, 6, 6, -6, -4, -2, 4, 2, -3, 3, -3, 3, -2, -1, -2, -1, 4, -4, -4, 4, 2, 2, -2, -2, 2, -2, 2, -2, 1, 1, 1, 1 }; float *d_InputIMGR, *d_InputIMGT,*d_InputBiubicCoeff; float *d_OutputIMGTx, *d_OutputIMGTy, *d_OutputIMGTxy,*d_OutputIMGR, *d_OutputIMGT, *d_OutputIMGRx, *d_OutputIMGRy, *d_OutputBicubic; //FFT-ZNCC Parameters float *hInput_dR, *hInput_dT, *fZNCC; int *iU, *iV; /*------------------------------Real computation starts here-------------------------------- Totally, there are three steps: 1. Precomputation of images' gradients matrix and bicubic interpolation matrix 2. Using FFT to transform the two images into frequency domain, and after per- forming ZNCC, transforming the results back. 3. A Gaussian Newton's optimization method is used to estimate the warped images. */ WatchTotal.start(); //Precomputation Starts; WatchPrecompute.start(); checkCudaErrors(hipMalloc((void**)&d_InputIMGR, (width+2)*(height+2)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_InputIMGT, (width+2)*(height+2)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_InputBiubicCoeff, 16*16*sizeof(float))); checkCudaErrors(hipMemcpy(d_InputIMGR,&ImgR[0],(width+2)*(height+2)*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_InputIMGT,&ImgT[0],(width+2)*(height+2)*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_InputBiubicCoeff,h_InputBicubicCoeff,16*16*sizeof(float),hipMemcpyHostToDevice)); checkCudaErrors(hipMalloc((void**)&d_OutputIMGR, (width*height)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputIMGRx, (width*height)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputIMGRy, (width*height)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputIMGT, (width*height)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputIMGTx, width*height*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputIMGTy, width*height*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputIMGTxy, width*height*sizeof(float))); checkCudaErrors(hipMalloc((void**)&d_OutputBicubic, (width*height*4*4)*sizeof(float))); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); dim3 dimGirds((width-1)/BLOCK_SIZE+1,(height-1)/BLOCK_SIZE+1,1); hipLaunchKernelGGL(( precomputation_kernel), dim3(dimGirds),dim3(dimBlock), 0, 0, d_InputIMGR,d_InputIMGT,d_InputBiubicCoeff, d_OutputIMGR,d_OutputIMGT,d_OutputIMGRx,d_OutputIMGRy, d_OutputIMGTx,d_OutputIMGTy,d_OutputIMGTxy,d_OutputBicubic, width,height); hipFree(d_OutputIMGTx); hipFree(d_OutputIMGTy); hipFree(d_OutputIMGTxy); hipFree(d_InputIMGR); hipFree(d_InputIMGT); hipFree(d_InputBiubicCoeff); WatchPrecompute.stop(); fTimePrecopmute = WatchPrecompute.getTime(); //FFT-ZNCC Begins hInput_dR = (float*)malloc(width*height*sizeof(float)); hInput_dT = (float*)malloc(width*height*sizeof(float)); fZNCC = (float*)malloc(iNumberX*iNumberY*sizeof(float)); iU = (int*)malloc(iNumberX*iNumberY*sizeof(int)); iV = (int*)malloc(iNumberX*iNumberY*sizeof(int)); float *fdPXY = (float*)malloc(iNumberX*iNumberY*2*sizeof(float)); for(int i=0; i<iNumberY; i++){ for(int j=0; j<iNumberX; j++){ fdPXY[(i*iNumberX+j)*2+0] = float(iMarginX + iSubsetY + i*iGridY); fdPXY[(i*iNumberX+j)*2+1] = float(iMarginY + iSubsetX + j*iGridX); } } checkCudaErrors(hipMemcpy(hInput_dR, d_OutputIMGR, width*height*sizeof(float), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(hInput_dT, d_OutputIMGT, width*height*sizeof(float), hipMemcpyDeviceToHost)); FFT_CC_interface(hInput_dR, hInput_dT, fdPXY, iNumberY, iNumberX, iFFTSubH, iFFTSubW, width, height, iSubsetY, iSubsetX, fZNCC, iU, iV, fTimeFFTCC); /*Befor ICGN starts, pass the average and norm value of image R in first, to simplify GPU's work.*/ thrust::host_vector<float> hAveR; thrust::host_vector<float> hNormR; float temp = 0.0f, temp1=0.0f, temp2 = 0.0f; for(int i=0; i<iNumberY; i++){ for(int j=0; j<iNumberX; j++){ temp = 0.0; for(int l=0; l<iSubsetH; l++){ for(int m=0; m<iSubsetW; m++){ temp += hInput_dR[int(fdPXY[(i*iNumberX+j)*2+0] - iSubsetY+l)*width + int(fdPXY[(i*iNumberX+j)*2+1] - iSubsetX+m)] / float(iSubsetW*iSubsetH); } } hAveR.push_back(temp); temp1 = 0.0f, temp2 = 0.0f; for(int l=0; l<iSubsetH; l++){ for(int m=0; m<iSubsetW; m++){ temp1 = hInput_dR[int(fdPXY[(i*iNumberX+j)*2+0] - iSubsetY+l)*width + int(fdPXY[(i*iNumberX+j)*2+1] - iSubsetX+m)] / float(iSubsetW*iSubsetH) - hAveR[i*iNumberX+j]; temp2 += pow(temp1,2); } } hNormR.push_back(sqrt(temp2)); } } thrust::device_vector<float> dAveR = hAveR; thrust::device_vector<float> dNormR = hNormR; float *dAveRaw = thrust::raw_pointer_cast(&dAveR[0]); float *dNormRaw = thrust::raw_pointer_cast(&dNormR[0]); //ICGN-Begins WatchICGN.start(); int *dInput_iU, *dInput_iV; float *dInput_fPXY, *dOutput_fDP; checkCudaErrors(hipMalloc((void**)&dInput_iU, (iNumberX*iNumberY)*sizeof(int))); checkCudaErrors(hipMalloc((void**)&dInput_iV, (iNumberX*iNumberY)*sizeof(int))); checkCudaErrors(hipMalloc((void**)&dInput_fPXY, (iNumberX*iNumberY)*2*sizeof(float))); checkCudaErrors(hipMalloc((void**)&dOutput_fDP, (iNumberX*iNumberY)*6*sizeof(float))); checkCudaErrors(hipMemcpy(dInput_iU, iU,(iNumberX*iNumberY)*sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dInput_iV, iV,(iNumberX*iNumberY)*sizeof(int), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(dInput_fPXY, fdPXY,(iNumberX*iNumberY)*2*sizeof(float), hipMemcpyHostToDevice)); dim3 dimB((iSubsetW+2),(iSubsetH+2),1); dim3 dimG(iNumberX,iNumberY,1); hipLaunchKernelGGL(( ICGN_kernel), dim3(dimG), dim3(dimB), 0, 0, d_OutputIMGR,d_OutputIMGRx,d_OutputIMGRy,dAveRaw, dNormRaw,fDeltaP,d_OutputIMGT,d_OutputBicubic,dInput_iU,dInput_iV, iNumberY, iNumberX, iSubsetH, iSubsetW, width, height, iSubsetY, iSubsetX, iGridX, iGridY, iMarginX, iMarginY, iIterationNum, dOutput_fDP); float *fdP = (float*)malloc(iNumberX*iNumberY*6*sizeof(float)); checkCudaErrors(hipMemcpy(fdP, dOutput_fDP, iNumberY*iNumberX*6*sizeof(float), hipMemcpyDeviceToHost)); WatchICGN.stop(); fTimeICGN = WatchICGN.getTime(); WatchTotal.stop(); fTimeTotal = WatchTotal.getTime(); checkCudaErrors(hipFree(d_OutputIMGR)); checkCudaErrors(hipFree(d_OutputIMGRx)); checkCudaErrors(hipFree(d_OutputIMGRy)); checkCudaErrors(hipFree(d_OutputIMGT)); checkCudaErrors(hipFree(d_OutputBicubic)); checkCudaErrors(hipFree(dInput_iU)); checkCudaErrors(hipFree(dInput_iV)); checkCudaErrors(hipFree(dInput_fPXY)); checkCudaErrors(hipFree(dOutput_fDP)); std::ofstream OutputFile; OutputFile.open("Results.txt"); for(int i =0; i<iNumberY; i++){ for(int j=0; j<iNumberX; j++){ OutputFile<<int(fdPXY[(i*iNumberX+j)*2+1])<<", "<<int(fdPXY[(i*iNumberX+j)*2+0])<<", "<<iU[i*iNumberX+j]<<", " <<fdP[(i*iNumberX+j)*6+0]<<", "<<fdP[(i*iNumberX+j)*6+1]<<", "<<fdP[(i*iNumberX+j)*6+2]<<", "<<fdP[(i*iNumberX+j)*6+3]<<", "<<iV[i*iNumberX+j]<<", "<<fdP[(i*iNumberX+j)*6+4]<<", "<<fdP[(i*iNumberX+j)*6+5]<<", " <<fZNCC[i*iNumberX+j]<<std::endl; } } OutputFile.close(); OutputFile.open("Time.txt"); OutputFile << "Interval (X-axis): " << iGridX << " [pixel]" << std::endl; OutputFile << "Interval (Y-axis): " << iGridY << " [pixel]" << std::endl; OutputFile << "Number of POI: " << iNumberY*iNumberX << " = " << iNumberX << " X " << iNumberY << std::endl; OutputFile << "Subset dimension: " << iSubsetW << "x" << iSubsetH << " pixels" << std::endl; OutputFile << "Time comsumed: " << fTimeTotal << " [millisec]" <<std::endl; OutputFile << "Time for Pre-computation: " << fTimePrecopmute << " [millisec]" << std::endl; OutputFile << "Time for integral-pixel registration: " << fTimeFFTCC / (iNumberY*iNumberX) << " [millisec]" << std::endl; OutputFile << "Time for sub-pixel registration: " << fTimeICGN / (iNumberY*iNumberX) << " [millisec]" << " for average iteration steps of " << float(iIterationNum) / (iNumberY*iNumberX) << std::endl; OutputFile << width << ", " << height << ", " << iGridX << ", " << iGridY << ", " << std::endl; OutputFile <<"Time for computing every FFT:"<<fTimeFFTCC<<"[miliseconds]"<<std::endl; OutputFile <<"Time for ICGN:"<<fTimeICGN<<std::endl; OutputFile.close(); free(hInput_dR); free(hInput_dT); free(fZNCC); free(fdP); free(fdPXY); free(iU); free(iV); }
20469f9f361285d267627323344533246249f6b4.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include "helper_functions.h" #include "thrust\host_vector.h" #include "thrust\device_vector.h" #include "thrust\reduce.h" #include "EasyBMP.h" #include "PIDIC.cuh" #include "FFTCC.h" #include <stdio.h> #include <iostream> #include <fstream> //Parameters const int iMarginX = 10, iMarginY = 10; const int iGridX = 10, iGridY = 10; const int iSubsetX = 8, iSubsetY =8; const float fDeltaP = 0.001f; const int iIterationNum = 5; const int BLOCK_SIZE = 16; //CUDA RUNTIME Initialization void InitCuda() { cudaFree(0); } __global__ void precomputation_kernel(float *d_InputIMGR, float *d_InputIMGT, const float* __restrict__ d_InputBiubicMatrix, float *d_OutputIMGR, float *d_OutputIMGT, float *d_OutputIMGRx, float *d_OutputIMGRy, float *d_OutputIMGTx, float *d_OutputIMGTy, float *d_OutputIMGTxy, float *d_OutputdtBicubic, int width, int height) { //The size of input images int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //Temp arrays float d_TaoT[16]; float d_AlphaT[16]; //The rows and cols of output matrix. if((row < height) && (col < width)){ d_OutputIMGR[row*width+col] = d_InputIMGR[(row+1)*(width+2)+col+1]; d_OutputIMGRx[row*width+col] = 0.5 * (d_InputIMGR[(row+1)*(width+2)+col+2] - d_InputIMGR[(row+1)*(width+2)+col]); d_OutputIMGRy[row*width+col] = 0.5 * (d_InputIMGR[(row+2)*(width+2)+col+1] - d_InputIMGR[(row)*(width+2)+col+1]); d_OutputIMGT[row*width+col] = d_InputIMGT[(row+1)*(width+2)+col+1]; d_OutputIMGTx[row*width+col] = 0.5 * (d_InputIMGT[(row+1)*(width+2)+col+2] -d_InputIMGT[(row+1)*(width+2)+col]); d_OutputIMGTy[row*width+col] = 0.5 * (d_InputIMGT[(row+2)*(width+2)+col+1] - d_InputIMGT[(row)*(width+2)+col+1]); d_OutputIMGTxy[row*width+col]= 0.25 * (d_InputIMGT[(row+2)*(width+2)+col+2] - d_InputIMGT[(row)*(width+2)+col+2] -d_InputIMGT[(row+2)*(width+2)+col] + d_InputIMGT[(row)*(width+2)+col]); } __syncthreads(); if((row < height-1) && (col < width-1)){ d_TaoT[0] = d_OutputIMGT[row*(width)+col]; d_TaoT[1] = d_OutputIMGT[row*(width)+col+1]; d_TaoT[2] = d_OutputIMGT[(row+1)*(width)+col]; d_TaoT[3] = d_OutputIMGT[(row+1)*(width)+col+1]; d_TaoT[4] = d_OutputIMGTx[row*(width)+col]; d_TaoT[5] = d_OutputIMGTx[row*(width)+col+1]; d_TaoT[6] = d_OutputIMGTx[(row+1)*(width)+col]; d_TaoT[7] = d_OutputIMGTx[(row+1)*(width)+col+1]; d_TaoT[8] = d_OutputIMGTy[row*(width)+col]; d_TaoT[9] = d_OutputIMGTy[row*(width)+col+1]; d_TaoT[10] = d_OutputIMGTy[(row+1)*(width)+col]; d_TaoT[11] = d_OutputIMGTy[(row+1)*(width)+col+1]; d_TaoT[12] = d_OutputIMGTxy[row*(width)+col]; d_TaoT[13] = d_OutputIMGTxy[row*(width)+col+1]; d_TaoT[14] = d_OutputIMGTxy[(row+1)*(width)+col]; d_TaoT[15] = d_OutputIMGTxy[(row+1)*(width)+col+1]; for(int k=0; k<16; k++){ d_AlphaT[k] = 0.0; for(int l=0; l<16; l++){ d_AlphaT[k] += (d_InputBiubicMatrix[k*16+l] * d_TaoT[l]); } } d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = d_AlphaT[0]; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = d_AlphaT[1]; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = d_AlphaT[2]; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = d_AlphaT[3]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = d_AlphaT[4]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = d_AlphaT[5]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = d_AlphaT[6]; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = d_AlphaT[7]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = d_AlphaT[8]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = d_AlphaT[9]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = d_AlphaT[10]; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = d_AlphaT[11]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = d_AlphaT[12]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = d_AlphaT[13]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = d_AlphaT[14]; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = d_AlphaT[15]; } else if(((row >=height-1)&&(row < height)) && ((col >= width-1)&&(col<width))){ d_OutputdtBicubic[((row*(width)+col)*4+0)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+0)*4+3] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+1)*4+3] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+2)*4+3] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+0] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+1] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+2] = 0.0; d_OutputdtBicubic[((row*(width)+col)*4+3)*4+3] = 0.0; } } __global__ void ICGN_kernel(float* input_R, float* input_Rx, float* input_Ry, float* input_AveR, float* input_NormR, float fDeltaP, float *input_T, float* input_Bicubic, int* input_iU, int* input_iV, int iNumberY, int iNumberX, int iSubsetH, int iSubsetW, int width, int height, int iSubsetY, int iSubsetX, int iGridSpaceX, int iGridSpaceY, int iMarginX, int iMarginY, int iIterationNum, float* output_dP) /*BLOCK_SIZE: 2*(iSubsetW+1)+1, 2*(iSubsetH+1)+1 Grid_SIZE: iNumberX * iNumberY */ { int x = threadIdx.x, y = threadIdx.y; int offset = blockIdx.y * gridDim.x + blockIdx.x; //Shared variables of each ROI __shared__ float fPXY[2]; __shared__ float fT[19*19]; __shared__ float fBicubic[19*19*4*4]; __shared__ float fR[17*17]; __shared__ float fRx[17*17]; __shared__ float fRy[17*17]; __shared__ float fdP[6], fdDP[6], fdWarp[3][3], fHessian[6][6],fInvHessian[6][6],fNumerator[6]; __shared__ float fdU, fdV, fdUx, fdUy, fdVx, fdVy; __shared__ float fdDU, fdDUx, fdDUy, fdDV, fdDVx, fdDVy; __shared__ float fSubAveR, fSubNormR, fSubAveT, fSubNormT; __shared__ float fTemp; //Private variables for each subset point float fJacobian[2][6], fRDescent[6], fHessianXY[6][6]; float fSubsetR, fSubsetAveR, fSubsetT, fSubsetAveT; float fdError; float fWarpX, fWarpY; int iTemp, iTempX, iTempY; float fTempX, fTempY; //Load the auxiliary variables into shared memory of each block if(x==0 && y ==0){ fPXY[0] = float(iMarginX + iSubsetY + blockIdx.y * iGridSpaceY); fPXY[1] = float(iMarginY + iSubsetX + blockIdx.x * iGridSpaceX); fdU = float(input_iU[offset]); fdDU = 0.0f; fdV = float(input_iV[offset]); fdDV = 0.0f; fdUx = 0.0f; fdDUx = 0.0f; fdUy = 0.0f; fdDUy = 0.0f; fdVx = 0.0f; fdDVx = 0.0f; fdVy = 0.0f; fdDVy = 0.0f; fdP[0] = fdU; fdP[3] = fdV; fdP[1] = fdUx; fdP[4] = fdVx; fdP[2] = fdUy; fdP[5] = fdVy; fdP[0] = 0.0f; fdP[3] = 0.0f; fdP[1] = 0.0f; fdP[4] = 0.0f; fdP[2] = 0.0f; fdP[5] = 0.0f; fdWarp[0][0] = 1 + fdUx; fdWarp[0][1] = fdUy; fdWarp[0][2] = fdU; fdWarp[1][0] = fdVx; fdWarp[1][1] = 1 + fdVy; fdWarp[1][2] = fdV; fdWarp[2][0] = 0.0f; fdWarp[2][1] = 0.0f; fdWarp[2][2] = 1.0f; fNumerator[0] = 0.0f; fNumerator[1] = 0.0f; fNumerator[2] = 0.0f; fNumerator[3] = 0.0f; fNumerator[4] = 0.0f; fNumerator[5] = 0.0f; fdDP[0] = 0.0f; fdDP[1] = 0.0f; fdDP[2] = 0.0f; fdDP[3] = 0.0f; fdDP[4] = 0.0f; fdDP[5] = 0.0f; fSubAveR = input_AveR[offset]; fSubNormR = input_NormR[offset]; fSubsetAveT = 0.0f; fSubNormT = 0.0f; } __syncthreads(); if( x<6 && y<6){ if( x == y){ fInvHessian[y][x] = 1.0f; fHessian[y][x] = 0.0f; } else{ fInvHessian[y][x] = 0.0f; fHessian[y][x] = 0.0f; } } __syncthreads(); //Load PXY, Rx, Ry and R, T, Bicubic into shared_memory if( x<iSubsetW && y<iSubsetH ){ fR[y*iSubsetW+x] = input_R[int(fPXY[0] - iSubsetY + y)*width+int(fPXY[1] - iSubsetX + x)]; fRx[y*iSubsetW+x] = input_Rx[int(fPXY[0] - iSubsetY + y)*width+int(fPXY[1] - iSubsetX + x)]; fRy[y*iSubsetW+x] = input_Ry[int(fPXY[0] - iSubsetY + y)*width+int(fPXY[1] - iSubsetX + x)]; } __syncthreads(); //Load T, Bicubic with additional 1 pixel wider on each side fT[y*iSubsetW+x] = input_T[int(fPXY[0] - (iSubsetY+1) + y)*width+int(fPXY[1] - (iSubsetX+1) + x)]; for(int k=0; k<4; k++){ for(int n=0; n<4; n++){ fBicubic[((y*iSubsetW+x)*4+k)*4+n] = input_Bicubic[((int(fPXY[0] - (iSubsetY+1) + y)*width+int(fPXY[1] - (iSubsetX+1) + x))*4+k)*4+n]; } } __syncthreads(); //Start computing if( x<iSubsetW && y<iSubsetH){ // Evaluate the Jacbian dW/dp at (x, 0); fJacobian[0][0] = 1; fJacobian[0][1] = x - iSubsetX; fJacobian[0][2] = y - iSubsetY; fJacobian[0][3] = 0; fJacobian[0][4] = 0; fJacobian[0][5] = 0; fJacobian[1][0] = 0; fJacobian[1][1] = 0; fJacobian[1][2] = 0; fJacobian[1][3] = 1; fJacobian[1][4] = x - iSubsetX; fJacobian[1][5] = y - iSubsetY; for(unsigned int i=0; i<6; i++){ fRDescent[i] = fRx[y*iSubsetW+x] * fJacobian[0][i] + fRy[y*iSubsetW+x] * fJacobian[1][i]; } for(unsigned int i=0; i<6; i++){ for(unsigned int j=0; j<6; j++){ fHessianXY[i][j] = fRDescent[i] * fRDescent[j]; /*fHessian[i][j] += fHessianXY[i][j];*/ //This is bad code, cannot be all added to one index. atomicAdd(&fHessian[i][j], fHessianXY[i][j]); //Must use this instead. } } fSubsetAveR = fSubsetR - fSubAveR; } __syncthreads(); //Invert the Hessian matrix using the first 36 threads if(x ==0 && y ==0){ for(int l=0; l<6; l++){ iTemp = l; for(int m=l+1; m<6; m++){ if(fHessian[m][l] > fHessian[iTemp][l]){ iTemp = m; } } //Swap the row which has maximum lth column element if(iTemp != l){ for(int k=0; k<6; k++){ fTemp = fHessian[l][k]; fHessian[l][k] = fHessian[iTemp][k]; fHessian[iTemp][k] = fTemp; fTemp = fInvHessian[l][k]; fInvHessian[l][k] = fInvHessian[iTemp][k]; fInvHessian[iTemp][k] = fTemp; } } //Row oerpation to form required identity matrix for(int m=0; m<6; m++){ fTemp = fHessian[m][l]; if(m != l){ for(int n=0; n<6; n++){ fInvHessian[m][n] -= fInvHessian[l][n] * fTemp / fHessian[l][l]; fHessian[m][n] -= fHessian[l][n] * fTemp / fHessian[l][l]; } } else{ for(int n=0; n<6; n++){ fInvHessian[m][n] /= fTemp; fHessian[m][n] /= fTemp; } } } } } for(int it=0; it < iIterationNum; it++){ if( x==0 && y==0){ fSubsetAveT = 0.0f; fSubNormT = 0.0f; fNumerator[0] = 0.0f; fNumerator[1] = 0.0f; fNumerator[2] = 0.0f; fNumerator[3] = 0.0f; fNumerator[4] = 0.0f; fNumerator[5] = 0.0f; fNumerator[6] = 0.0f; } __syncthreads(); if( (x<iSubsetW) && (y<iSubsetH) ){ fWarpX = (iSubsetX+1) + fdWarp[0][0]*(x - iSubsetX) + fdWarp[0][1]*(y - iSubsetY) + fdWarp[0][2]; fWarpY = (iSubsetY+1) + fdWarp[1][0]*(x - iSubsetX) + fdWarp[1][1]*(y - iSubsetY) + fdWarp[1][2]; iTempX = int(fWarpX); iTempY = int(fWarpY); if( (iTempX>=0) && (iTempY>=0) && (iTempX<(iSubsetW+1)) && (iTempY<(iSubsetH+1)) ){ fTempX = fWarpX - float(iTempX); fTempY = fWarpY - float(iTempY); //if it is integer-pixel location ,feed the intensity of T into subset T if( (fTempX <= 0.000001) && (fTempY <= 0.000001) ){ fSubsetT = fT[iTempY*(iSubsetW+2)+iTempX]; } else{ fSubsetT = 0.0f; for(int k=0; k<4; k++){ for(int n=0; n<4; n++){ fSubsetT += fBicubic[((iTempY*(iSubsetW+2)+iTempX)*4+k)*4+n]*pow(fTempY,k)*pow(fTempX,n); } } } atomicAdd(&fSubAveT, fSubsetT/float(iSubsetH*iSubsetW)); fSubsetAveT = fSubsetT - fSubAveT; __syncthreads(); atomicAdd(&fSubNormT, pow(fSubAveT,2)); } } if( (x==0) && (y==0) ){ fSubNormT = sqrt(fSubNormT); } __syncthreads(); if( (x<iSubsetW) && (y<iSubsetH) ){ //Compute Error image fdError = (fSubNormR / fSubNormT) * fSubAveT - fSubsetAveR; } __syncthreads(); if( x==0 && y==0){ for(int i=0; i<6; i++){ atomicAdd(&(fNumerator[i]), (fRDescent[i]*fdError)); } for(int k=0; k<6; k++){ fdDP[k] = 0.0; for(int n=0; n<6; n++){ fdDP[k] += (fInvHessian[k][n] * fNumerator[n]); } } fdDU = fdDP[0]; fdDUx = fdDP[1]; fdDUy = fdDP[2]; fdDV = fdDP[3]; fdDVx = fdDP[4]; fdDVy = fdDP[5]; fTemp = (1+fdDUx) * (1+fdDVy) - fdDUy*fdDVx; fdWarp[0][0] = ((1 + fdUx) * (1 + fdDVy) - fdUy * fdDVx) / fTemp; fdWarp[0][1] = (fdUy * (1 + fdDUx) - (1 + fdUx) * fdDUy) / fTemp; fdWarp[0][2] = fdU + (fdUy * (fdDU * fdDVx - fdDV - fdDV * fdDUx) - (1 + fdUx) * (fdDU * fdDVy + fdDU - fdDUy * fdDV)) / fTemp; fdWarp[1][0] = (fdVx * (1 + fdDVy) - (1 + fdVy) * fdDVx) / fTemp; fdWarp[1][1] = ((1 + fdVy) * (1 + fdDUx) - fdVx * fdDUy) / fTemp; fdWarp[1][2] = fdV + ((1 + fdVy) * (fdDU * fdDVx - fdDV - fdDV * fdDUx) - fdVx * (fdDU * fdDVy + fdDU - fdDUy * fdDV)) / fTemp; fdWarp[2][0] = 0; fdWarp[2][1] = 0; fdWarp[2][2] = 1; // Update DeltaP fdP[0] = fdWarp[0][2]; fdP[1] = fdWarp[0][0] - 1; fdP[2] = fdWarp[0][1]; fdP[3] = fdWarp[1][2]; fdP[4] = fdWarp[1][0]; fdP[5] = fdWarp[1][1] - 1; fdU = fdP[0]; fdUx = fdP[1]; fdUy = fdP[2]; fdV = fdP[3]; fdVx = fdP[4]; fdVy = fdP[5]; } __syncthreads(); } //Pass back the values if( x==0 && y==0 ){ output_dP[offset*6+0] = fdP[0]; output_dP[offset*6+1] = fdP[1]; output_dP[offset*6+2] = fdP[2]; output_dP[offset*6+3] = fdP[3]; output_dP[offset*6+4] = fdP[4]; output_dP[offset*6+5] = fdP[5]; } } void computation_interface(const std::vector<float>& ImgR, const std::vector<float>& ImgT, int iWidth, int iHeight) { //Timers StopWatchWin WatchPrecompute, WatchICGN, WatchTotal; float fTimePrecopmute=0.0f, fTimeFFTCC=0.0f, fTimeICGN=0.0f, fTimeTotal=0.0f; //Parameters used in the computations. int width = iWidth - 2; int height = iHeight -2; int iNumberX = int(floor((width - iSubsetX*2 - iMarginX*2)/float(iGridX))) + 1; int iNumberY = int(floor((height - iSubsetY*2 - iMarginY*2)/float(iGridY))) + 1; int iSubsetW = iSubsetX*2+1; int iSubsetH = iSubsetY*2+1; int iFFTSubW = iSubsetX*2; int iFFTSubH = iSubsetY*2; /*--------------------------------------Parameters for CUDA kernel use---------------------------------------------*/ //Precomputation Parameters const static float h_InputBicubicCoeff[16*16] = { 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 , -3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 , 0, 0, 0, 0, 0, 0, 0, 0, -3, 3, 0, 0, -2, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, -2, 0, 0, 1, 1, 0, 0 , -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3, 0, 3, 0, 0, 0, 0, 0, -2, 0, -1, 0, 9, -9, -9, 9, 6, 3, -6, -3, 6, -6, 3, -3, 4, 2, 2, 1 , -6, 6, 6, -6, -3, -3, 3, 3, -4, 4, -2, 2, -2, -2, -1, -1, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0 , 0, 0, 0, 0, 2, 0, -2, 0, 0, 0, 0, 0, 1, 0, 1, 0 , -6, 6, 6, -6, -4, -2, 4, 2, -3, 3, -3, 3, -2, -1, -2, -1, 4, -4, -4, 4, 2, 2, -2, -2, 2, -2, 2, -2, 1, 1, 1, 1 }; float *d_InputIMGR, *d_InputIMGT,*d_InputBiubicCoeff; float *d_OutputIMGTx, *d_OutputIMGTy, *d_OutputIMGTxy,*d_OutputIMGR, *d_OutputIMGT, *d_OutputIMGRx, *d_OutputIMGRy, *d_OutputBicubic; //FFT-ZNCC Parameters float *hInput_dR, *hInput_dT, *fZNCC; int *iU, *iV; /*------------------------------Real computation starts here-------------------------------- Totally, there are three steps: 1. Precomputation of images' gradients matrix and bicubic interpolation matrix 2. Using FFT to transform the two images into frequency domain, and after per- forming ZNCC, transforming the results back. 3. A Gaussian Newton's optimization method is used to estimate the warped images. */ WatchTotal.start(); //Precomputation Starts; WatchPrecompute.start(); checkCudaErrors(cudaMalloc((void**)&d_InputIMGR, (width+2)*(height+2)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_InputIMGT, (width+2)*(height+2)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_InputBiubicCoeff, 16*16*sizeof(float))); checkCudaErrors(cudaMemcpy(d_InputIMGR,&ImgR[0],(width+2)*(height+2)*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_InputIMGT,&ImgT[0],(width+2)*(height+2)*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_InputBiubicCoeff,h_InputBicubicCoeff,16*16*sizeof(float),cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGR, (width*height)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGRx, (width*height)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGRy, (width*height)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGT, (width*height)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGTx, width*height*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGTy, width*height*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputIMGTxy, width*height*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&d_OutputBicubic, (width*height*4*4)*sizeof(float))); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); dim3 dimGirds((width-1)/BLOCK_SIZE+1,(height-1)/BLOCK_SIZE+1,1); precomputation_kernel<<<dimGirds,dimBlock>>>(d_InputIMGR,d_InputIMGT,d_InputBiubicCoeff, d_OutputIMGR,d_OutputIMGT,d_OutputIMGRx,d_OutputIMGRy, d_OutputIMGTx,d_OutputIMGTy,d_OutputIMGTxy,d_OutputBicubic, width,height); cudaFree(d_OutputIMGTx); cudaFree(d_OutputIMGTy); cudaFree(d_OutputIMGTxy); cudaFree(d_InputIMGR); cudaFree(d_InputIMGT); cudaFree(d_InputBiubicCoeff); WatchPrecompute.stop(); fTimePrecopmute = WatchPrecompute.getTime(); //FFT-ZNCC Begins hInput_dR = (float*)malloc(width*height*sizeof(float)); hInput_dT = (float*)malloc(width*height*sizeof(float)); fZNCC = (float*)malloc(iNumberX*iNumberY*sizeof(float)); iU = (int*)malloc(iNumberX*iNumberY*sizeof(int)); iV = (int*)malloc(iNumberX*iNumberY*sizeof(int)); float *fdPXY = (float*)malloc(iNumberX*iNumberY*2*sizeof(float)); for(int i=0; i<iNumberY; i++){ for(int j=0; j<iNumberX; j++){ fdPXY[(i*iNumberX+j)*2+0] = float(iMarginX + iSubsetY + i*iGridY); fdPXY[(i*iNumberX+j)*2+1] = float(iMarginY + iSubsetX + j*iGridX); } } checkCudaErrors(cudaMemcpy(hInput_dR, d_OutputIMGR, width*height*sizeof(float), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(hInput_dT, d_OutputIMGT, width*height*sizeof(float), cudaMemcpyDeviceToHost)); FFT_CC_interface(hInput_dR, hInput_dT, fdPXY, iNumberY, iNumberX, iFFTSubH, iFFTSubW, width, height, iSubsetY, iSubsetX, fZNCC, iU, iV, fTimeFFTCC); /*Befor ICGN starts, pass the average and norm value of image R in first, to simplify GPU's work.*/ thrust::host_vector<float> hAveR; thrust::host_vector<float> hNormR; float temp = 0.0f, temp1=0.0f, temp2 = 0.0f; for(int i=0; i<iNumberY; i++){ for(int j=0; j<iNumberX; j++){ temp = 0.0; for(int l=0; l<iSubsetH; l++){ for(int m=0; m<iSubsetW; m++){ temp += hInput_dR[int(fdPXY[(i*iNumberX+j)*2+0] - iSubsetY+l)*width + int(fdPXY[(i*iNumberX+j)*2+1] - iSubsetX+m)] / float(iSubsetW*iSubsetH); } } hAveR.push_back(temp); temp1 = 0.0f, temp2 = 0.0f; for(int l=0; l<iSubsetH; l++){ for(int m=0; m<iSubsetW; m++){ temp1 = hInput_dR[int(fdPXY[(i*iNumberX+j)*2+0] - iSubsetY+l)*width + int(fdPXY[(i*iNumberX+j)*2+1] - iSubsetX+m)] / float(iSubsetW*iSubsetH) - hAveR[i*iNumberX+j]; temp2 += pow(temp1,2); } } hNormR.push_back(sqrt(temp2)); } } thrust::device_vector<float> dAveR = hAveR; thrust::device_vector<float> dNormR = hNormR; float *dAveRaw = thrust::raw_pointer_cast(&dAveR[0]); float *dNormRaw = thrust::raw_pointer_cast(&dNormR[0]); //ICGN-Begins WatchICGN.start(); int *dInput_iU, *dInput_iV; float *dInput_fPXY, *dOutput_fDP; checkCudaErrors(cudaMalloc((void**)&dInput_iU, (iNumberX*iNumberY)*sizeof(int))); checkCudaErrors(cudaMalloc((void**)&dInput_iV, (iNumberX*iNumberY)*sizeof(int))); checkCudaErrors(cudaMalloc((void**)&dInput_fPXY, (iNumberX*iNumberY)*2*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&dOutput_fDP, (iNumberX*iNumberY)*6*sizeof(float))); checkCudaErrors(cudaMemcpy(dInput_iU, iU,(iNumberX*iNumberY)*sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dInput_iV, iV,(iNumberX*iNumberY)*sizeof(int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(dInput_fPXY, fdPXY,(iNumberX*iNumberY)*2*sizeof(float), cudaMemcpyHostToDevice)); dim3 dimB((iSubsetW+2),(iSubsetH+2),1); dim3 dimG(iNumberX,iNumberY,1); ICGN_kernel<<<dimG, dimB>>>(d_OutputIMGR,d_OutputIMGRx,d_OutputIMGRy,dAveRaw, dNormRaw,fDeltaP,d_OutputIMGT,d_OutputBicubic,dInput_iU,dInput_iV, iNumberY, iNumberX, iSubsetH, iSubsetW, width, height, iSubsetY, iSubsetX, iGridX, iGridY, iMarginX, iMarginY, iIterationNum, dOutput_fDP); float *fdP = (float*)malloc(iNumberX*iNumberY*6*sizeof(float)); checkCudaErrors(cudaMemcpy(fdP, dOutput_fDP, iNumberY*iNumberX*6*sizeof(float), cudaMemcpyDeviceToHost)); WatchICGN.stop(); fTimeICGN = WatchICGN.getTime(); WatchTotal.stop(); fTimeTotal = WatchTotal.getTime(); checkCudaErrors(cudaFree(d_OutputIMGR)); checkCudaErrors(cudaFree(d_OutputIMGRx)); checkCudaErrors(cudaFree(d_OutputIMGRy)); checkCudaErrors(cudaFree(d_OutputIMGT)); checkCudaErrors(cudaFree(d_OutputBicubic)); checkCudaErrors(cudaFree(dInput_iU)); checkCudaErrors(cudaFree(dInput_iV)); checkCudaErrors(cudaFree(dInput_fPXY)); checkCudaErrors(cudaFree(dOutput_fDP)); std::ofstream OutputFile; OutputFile.open("Results.txt"); for(int i =0; i<iNumberY; i++){ for(int j=0; j<iNumberX; j++){ OutputFile<<int(fdPXY[(i*iNumberX+j)*2+1])<<", "<<int(fdPXY[(i*iNumberX+j)*2+0])<<", "<<iU[i*iNumberX+j]<<", " <<fdP[(i*iNumberX+j)*6+0]<<", "<<fdP[(i*iNumberX+j)*6+1]<<", "<<fdP[(i*iNumberX+j)*6+2]<<", "<<fdP[(i*iNumberX+j)*6+3]<<", "<<iV[i*iNumberX+j]<<", "<<fdP[(i*iNumberX+j)*6+4]<<", "<<fdP[(i*iNumberX+j)*6+5]<<", " <<fZNCC[i*iNumberX+j]<<std::endl; } } OutputFile.close(); OutputFile.open("Time.txt"); OutputFile << "Interval (X-axis): " << iGridX << " [pixel]" << std::endl; OutputFile << "Interval (Y-axis): " << iGridY << " [pixel]" << std::endl; OutputFile << "Number of POI: " << iNumberY*iNumberX << " = " << iNumberX << " X " << iNumberY << std::endl; OutputFile << "Subset dimension: " << iSubsetW << "x" << iSubsetH << " pixels" << std::endl; OutputFile << "Time comsumed: " << fTimeTotal << " [millisec]" <<std::endl; OutputFile << "Time for Pre-computation: " << fTimePrecopmute << " [millisec]" << std::endl; OutputFile << "Time for integral-pixel registration: " << fTimeFFTCC / (iNumberY*iNumberX) << " [millisec]" << std::endl; OutputFile << "Time for sub-pixel registration: " << fTimeICGN / (iNumberY*iNumberX) << " [millisec]" << " for average iteration steps of " << float(iIterationNum) / (iNumberY*iNumberX) << std::endl; OutputFile << width << ", " << height << ", " << iGridX << ", " << iGridY << ", " << std::endl; OutputFile <<"Time for computing every FFT:"<<fTimeFFTCC<<"[miliseconds]"<<std::endl; OutputFile <<"Time for ICGN:"<<fTimeICGN<<std::endl; OutputFile.close(); free(hInput_dR); free(hInput_dT); free(fZNCC); free(fdP); free(fdPXY); free(iU); free(iV); }
cccfda88f41e7ca8ba0ff4ed7d624adcb82fa095.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utils.h" /* recursive call cuda kernel ----- |A B| |C D| ----- */ __global__ void min_plus(float* A, float* B, float* C, int a_height, int a_width, int b_width, int total_width, int a_startx, int a_starty, int b_startx, int b_starty, int c_startx, int c_starty, bool add); __global__ void single_floyd(float*A, int m_size, int start_x, int start_y, int total_width); __global__ void gpu_floyd_d(float* A, int m_size, int k); void gpu_floyd(float* A, int m_size); void recursive_apsp(float* in_dist_d, int m_size, int start_x, int start_y, int total_width); void cuda_apsp(float* in_dist_d, int m_size) { //gpu_floyd(in_dist_d, m_size); recursive_apsp(in_dist_d, m_size, 0, 0, m_size); } void gpu_floyd(float* A, int m_size) { int grid_size = ceil(float(m_size)/float(BLOCKSIZE)); dim3 blocks(grid_size, grid_size); dim3 threads(BLOCKSIZE, BLOCKSIZE); for (int k=0; k<m_size; k++) hipLaunchKernelGGL(( gpu_floyd_d), dim3(blocks), dim3(threads), 0, 0, A, m_size, k); return; } __global__ void gpu_floyd_d(float* A, int m_size, int k) { int dx = blockIdx.x; int dy = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = dy*BLOCKSIZE + ty; int Col = dx*BLOCKSIZE + tx; if (Row<m_size && Col<m_size){ A[Row*m_size + Col] = fminf(A[Row*m_size + k] + A[k*m_size+ Col], A[Row*m_size + Col]); } } /* do in-place operation on in_dist_d */ void recursive_apsp(float* in_dist_d, int m_size, int start_x, int start_y, int total_width) { if (m_size<=BLOCKSIZE){ // for matrix small than blocksize we use floyd-warshall using one block // fast for small graph dim3 blocks(1, 1); dim3 threads(BLOCKSIZE, BLOCKSIZE); hipLaunchKernelGGL(( single_floyd) , dim3(blocks), dim3(threads), 0, 0, in_dist_d, m_size, start_x, start_y, total_width); return; } else{ int new_size = ceil(m_size/2); // split matrix into A,B,C,D int a_startx = start_x, a_starty = start_y; int b_startx = start_x, b_starty = start_y+new_size; int c_startx = start_x+new_size, c_starty = start_y; int d_startx = start_x+new_size, d_starty = start_y+new_size; int a_height = new_size, a_width = new_size; int b_height = new_size; int b_width = m_size - new_size; int c_height = m_size - new_size; int c_width = new_size; int d_height = m_size - new_size, d_width = m_size - new_size; int grid_size = ceil(float(new_size)/float(BLOCKSIZE)); dim3 blocks(grid_size, grid_size); dim3 threads(BLOCKSIZE, BLOCKSIZE); // A = A* //minplus(B,A,B,add=0); // B = AB //minplus(C,C,A,add=0); // C = CA //minplus(D,C,B,add=1); // D = D+CB recursive_apsp(in_dist_d, new_size, start_x, start_y, total_width); hipLaunchKernelGGL(( min_plus) , dim3(blocks), dim3(threads), 0, 0, in_dist_d,in_dist_d,in_dist_d, b_height, b_width, a_width, total_width, b_startx, b_starty, a_startx, a_starty, b_startx, b_starty,false); hipLaunchKernelGGL(( min_plus) , dim3(blocks), dim3(threads), 0, 0, in_dist_d,in_dist_d,in_dist_d, c_height, c_width, c_width, total_width, c_startx, c_starty, c_startx, c_starty, a_startx, a_starty,false); hipLaunchKernelGGL(( min_plus) , dim3(blocks), dim3(threads), 0, 0, in_dist_d,in_dist_d,in_dist_d, d_height, d_width, c_width, total_width, d_startx, d_starty, c_startx, c_starty, b_startx, b_starty,true); // D = D* //minplus(B,B,D,add=0); //B = BD //minplus(C,D,C,add=0); //C = DC //minplus(A,B,C,add=1); //A = A+BC recursive_apsp(in_dist_d, m_size-new_size, start_x+new_size, start_y+new_size, total_width); hipLaunchKernelGGL(( min_plus) , dim3(blocks), dim3(threads), 0, 0, in_dist_d,in_dist_d,in_dist_d, b_height, b_width, b_width, total_width, b_startx, b_starty, b_startx, b_starty, d_startx, d_starty,false); hipLaunchKernelGGL(( min_plus) , dim3(blocks), dim3(threads), 0, 0, in_dist_d,in_dist_d,in_dist_d, c_height, c_width, d_width, total_width, c_startx, c_starty, d_startx, d_starty, c_startx, c_starty,false); hipLaunchKernelGGL(( min_plus) , dim3(blocks), dim3(threads), 0, 0, in_dist_d,in_dist_d,in_dist_d, a_height, a_width, b_width, total_width, a_startx, a_starty, b_startx, b_starty, c_startx, c_starty,true); return; } } __global__ void single_floyd(float*A, int m_size, int start_x, int start_y, int total_width) { __shared__ float As[BLOCKSIZE][BLOCKSIZE]; // only one block will be launched int tx = threadIdx.x; int ty = threadIdx.y; int Row = ty + start_x; int Col = tx + start_y; // load A to shared memory if (tx<m_size && ty<m_size){ As[ty][tx] = A[Row*total_width + Col]; } else As[ty][tx] = finf; __syncthreads(); //floyd for (int k=0; k<m_size; k++) { As[ty][tx] = fminf(As[ty][k]+As[k][tx], As[ty][tx]); __syncthreads(); } //save to the original A if (tx<m_size && ty<m_size){ A[Row*total_width + Col] = As[ty][tx]; } } __global__ void min_plus_global(float* A, float* B, float* C, int a_height, int a_width, int b_width, int total_width, int a_startx, int a_starty, int b_startx, int b_starty, int c_startx, int c_starty, bool add) { // do we need __syncthreads() ? int x = blockIdx.x * blockDim.y + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; if (x<a_height && y<a_width){ int a_index = (a_startx+x)*total_width + a_starty+y; float min_value = finf; //////////////////////// int b_index, c_index; for(int i=0; i<b_width; i++) { b_index = (x+b_startx)*total_width + b_starty + i; c_index = (i+c_startx)*total_width + c_starty + y; min_value = fminf(min_value, B[b_index] + C[c_index]); } if(add){ min_value = fminf(min_value, A[a_index]); } #ifdef debug printf("%f ", min_value); #endif A[a_index] = min_value; } } __global__ void min_plus(float* A, float* B, float* C, int a_height, int a_width, int b_width, int total_width, int a_startx, int a_starty, int b_startx, int b_starty, int c_startx, int c_starty, bool add) { __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; __shared__ float Cs[BLOCKSIZE][BLOCKSIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by*BLOCKSIZE + ty; // Row first to make it more cache friendly int Col = bx*BLOCKSIZE + tx; float min_value = finf; //init with finf (not zero...) int out_loop = ceilf( float(b_width)/float(BLOCKSIZE) ); for (int m=0; m<out_loop; m++){ int Bs_index = (Row+b_startx)*total_width + m*BLOCKSIZE+tx+b_starty; int Cs_index = (ty+m*BLOCKSIZE+c_startx)*total_width + Col+c_starty; if (Row<a_height && (m*BLOCKSIZE+tx)<b_width ) Bs[ty][tx] = B[Bs_index]; else Bs[ty][tx] = finf; if ( (ty+m*BLOCKSIZE)<b_width && Col<a_width ) Cs[ty][tx] = C[Cs_index]; else Cs[ty][tx] = finf; __syncthreads(); for (int k=0; k<BLOCKSIZE; k++) min_value = fminf(Bs[ty][k]+Cs[k][tx], min_value); __syncthreads(); } if (Row<a_height && Col<a_width){ int A_index = (a_startx+Row)*total_width + a_starty+Col; if (add) min_value = fminf(A[A_index], min_value); A[A_index] = min_value; } }
cccfda88f41e7ca8ba0ff4ed7d624adcb82fa095.cu
#include "utils.h" /* recursive call cuda kernel ----- |A B| |C D| ----- */ __global__ void min_plus(float* A, float* B, float* C, int a_height, int a_width, int b_width, int total_width, int a_startx, int a_starty, int b_startx, int b_starty, int c_startx, int c_starty, bool add); __global__ void single_floyd(float*A, int m_size, int start_x, int start_y, int total_width); __global__ void gpu_floyd_d(float* A, int m_size, int k); void gpu_floyd(float* A, int m_size); void recursive_apsp(float* in_dist_d, int m_size, int start_x, int start_y, int total_width); void cuda_apsp(float* in_dist_d, int m_size) { //gpu_floyd(in_dist_d, m_size); recursive_apsp(in_dist_d, m_size, 0, 0, m_size); } void gpu_floyd(float* A, int m_size) { int grid_size = ceil(float(m_size)/float(BLOCKSIZE)); dim3 blocks(grid_size, grid_size); dim3 threads(BLOCKSIZE, BLOCKSIZE); for (int k=0; k<m_size; k++) gpu_floyd_d<<<blocks, threads>>> (A, m_size, k); return; } __global__ void gpu_floyd_d(float* A, int m_size, int k) { int dx = blockIdx.x; int dy = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = dy*BLOCKSIZE + ty; int Col = dx*BLOCKSIZE + tx; if (Row<m_size && Col<m_size){ A[Row*m_size + Col] = fminf(A[Row*m_size + k] + A[k*m_size+ Col], A[Row*m_size + Col]); } } /* do in-place operation on in_dist_d */ void recursive_apsp(float* in_dist_d, int m_size, int start_x, int start_y, int total_width) { if (m_size<=BLOCKSIZE){ // for matrix small than blocksize we use floyd-warshall using one block // fast for small graph dim3 blocks(1, 1); dim3 threads(BLOCKSIZE, BLOCKSIZE); single_floyd <<<blocks, threads>>> (in_dist_d, m_size, start_x, start_y, total_width); return; } else{ int new_size = ceil(m_size/2); // split matrix into A,B,C,D int a_startx = start_x, a_starty = start_y; int b_startx = start_x, b_starty = start_y+new_size; int c_startx = start_x+new_size, c_starty = start_y; int d_startx = start_x+new_size, d_starty = start_y+new_size; int a_height = new_size, a_width = new_size; int b_height = new_size; int b_width = m_size - new_size; int c_height = m_size - new_size; int c_width = new_size; int d_height = m_size - new_size, d_width = m_size - new_size; int grid_size = ceil(float(new_size)/float(BLOCKSIZE)); dim3 blocks(grid_size, grid_size); dim3 threads(BLOCKSIZE, BLOCKSIZE); // A = A* //minplus(B,A,B,add=0); // B = AB //minplus(C,C,A,add=0); // C = CA //minplus(D,C,B,add=1); // D = D+CB recursive_apsp(in_dist_d, new_size, start_x, start_y, total_width); min_plus <<<blocks, threads>>> (in_dist_d,in_dist_d,in_dist_d, b_height, b_width, a_width, total_width, b_startx, b_starty, a_startx, a_starty, b_startx, b_starty,false); min_plus <<<blocks, threads>>> (in_dist_d,in_dist_d,in_dist_d, c_height, c_width, c_width, total_width, c_startx, c_starty, c_startx, c_starty, a_startx, a_starty,false); min_plus <<<blocks, threads>>> (in_dist_d,in_dist_d,in_dist_d, d_height, d_width, c_width, total_width, d_startx, d_starty, c_startx, c_starty, b_startx, b_starty,true); // D = D* //minplus(B,B,D,add=0); //B = BD //minplus(C,D,C,add=0); //C = DC //minplus(A,B,C,add=1); //A = A+BC recursive_apsp(in_dist_d, m_size-new_size, start_x+new_size, start_y+new_size, total_width); min_plus <<<blocks, threads>>> (in_dist_d,in_dist_d,in_dist_d, b_height, b_width, b_width, total_width, b_startx, b_starty, b_startx, b_starty, d_startx, d_starty,false); min_plus <<<blocks, threads>>> (in_dist_d,in_dist_d,in_dist_d, c_height, c_width, d_width, total_width, c_startx, c_starty, d_startx, d_starty, c_startx, c_starty,false); min_plus <<<blocks, threads>>> (in_dist_d,in_dist_d,in_dist_d, a_height, a_width, b_width, total_width, a_startx, a_starty, b_startx, b_starty, c_startx, c_starty,true); return; } } __global__ void single_floyd(float*A, int m_size, int start_x, int start_y, int total_width) { __shared__ float As[BLOCKSIZE][BLOCKSIZE]; // only one block will be launched int tx = threadIdx.x; int ty = threadIdx.y; int Row = ty + start_x; int Col = tx + start_y; // load A to shared memory if (tx<m_size && ty<m_size){ As[ty][tx] = A[Row*total_width + Col]; } else As[ty][tx] = finf; __syncthreads(); //floyd for (int k=0; k<m_size; k++) { As[ty][tx] = fminf(As[ty][k]+As[k][tx], As[ty][tx]); __syncthreads(); } //save to the original A if (tx<m_size && ty<m_size){ A[Row*total_width + Col] = As[ty][tx]; } } __global__ void min_plus_global(float* A, float* B, float* C, int a_height, int a_width, int b_width, int total_width, int a_startx, int a_starty, int b_startx, int b_starty, int c_startx, int c_starty, bool add) { // do we need __syncthreads() ? int x = blockIdx.x * blockDim.y + threadIdx.x; int y = blockIdx.y * blockDim.x + threadIdx.y; if (x<a_height && y<a_width){ int a_index = (a_startx+x)*total_width + a_starty+y; float min_value = finf; //////////////////////// int b_index, c_index; for(int i=0; i<b_width; i++) { b_index = (x+b_startx)*total_width + b_starty + i; c_index = (i+c_startx)*total_width + c_starty + y; min_value = fminf(min_value, B[b_index] + C[c_index]); } if(add){ min_value = fminf(min_value, A[a_index]); } #ifdef debug printf("%f ", min_value); #endif A[a_index] = min_value; } } __global__ void min_plus(float* A, float* B, float* C, int a_height, int a_width, int b_width, int total_width, int a_startx, int a_starty, int b_startx, int b_starty, int c_startx, int c_starty, bool add) { __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; __shared__ float Cs[BLOCKSIZE][BLOCKSIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by*BLOCKSIZE + ty; // Row first to make it more cache friendly int Col = bx*BLOCKSIZE + tx; float min_value = finf; //init with finf (not zero...) int out_loop = ceilf( float(b_width)/float(BLOCKSIZE) ); for (int m=0; m<out_loop; m++){ int Bs_index = (Row+b_startx)*total_width + m*BLOCKSIZE+tx+b_starty; int Cs_index = (ty+m*BLOCKSIZE+c_startx)*total_width + Col+c_starty; if (Row<a_height && (m*BLOCKSIZE+tx)<b_width ) Bs[ty][tx] = B[Bs_index]; else Bs[ty][tx] = finf; if ( (ty+m*BLOCKSIZE)<b_width && Col<a_width ) Cs[ty][tx] = C[Cs_index]; else Cs[ty][tx] = finf; __syncthreads(); for (int k=0; k<BLOCKSIZE; k++) min_value = fminf(Bs[ty][k]+Cs[k][tx], min_value); __syncthreads(); } if (Row<a_height && Col<a_width){ int A_index = (a_startx+Row)*total_width + a_starty+Col; if (add) min_value = fminf(A[A_index], min_value); A[A_index] = min_value; } }
11f49b8e4b262f1d4d3b23f7d18362aa0aacaaf5.hip
// !!! This is a file automatically generated by hipify!!! #include <sys/time.h> #include <stdio.h> #include <math.h> //TODO for writing to file, will be deleted #include <stdlib.h> //TODO: could include later //#include <device_launch_parameters.h> #include <hip/hip_runtime.h> //#include "../inc/helper_cuda.h" #define GRID_YDIM 65535 // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } void initDataA(float* data, int nx, int ny){ int i,j; for(i = 0; i < nx; i++){ for(j = 0; j < ny; j++){ data[i*ny + j] = (float) (i+j)/3.0; } } } void initDataB(float* data, int nx, int ny){ int i,j; for(i = 0; i < nx; i++){ for(j = 0; j < ny; j++){ data[i*ny + j] = (float)3.14*(i+j); } } } void debugPrint(float* data, int nx, int ny){ int i,j; for(i = 0; i < nx; i++){ for(j = 0; j < ny; j++){ printf("%f ",data[i*ny + j]); } printf("\n"); } printf("\n"); } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ int i; for(i = 0; i < nx*ny; i++){ C[i] = A[i] + B[i]; } } // device-side matrix addition //__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // // kernel code might look something like this // // but you may want to pad the matrices and index into them accordingly // int ix = threadIdx.x + blockIdx.x*blockDim.x ; // int iy = threadIdx.y + blockIdx.y*blockDim.y ; // int idx = iy*nx + ix ; // if( (ix<nx) && (iy<ny) ) // C[idx] = A[idx] + B[idx] ; //} __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = iy*blockDim.x*gridDim.x + ix ; if(idx<nx*ny) C[idx] = A[idx] + B[idx] ; } int main( int argc, char *argv[] ) { // get program arguments if( argc != 3) { printf("Error: wrong number of args\n") ; exit(1) ; } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity int noElems = nx*ny ; int bytes = noElems * sizeof(float) ; // but you may want to pad the matrices // alloc memory host-side float *h_A = (float *) malloc( bytes ) ; float *h_B = (float *) malloc( bytes ) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_dC = (float *) malloc( bytes ) ; // gpu result // init matrices with random data //initData( h_A, noElems ) ; initData( h_B, noElems ) ; initDataA(h_A, nx, ny); initDataB(h_B, nx, ny); // alloc memory dev-side float *d_A, *d_B, *d_C ; hipMalloc( (void **) &d_A, bytes ) ; hipMalloc( (void **) &d_B, bytes ) ; hipMalloc( (void **) &d_C, bytes ) ; // invoke Kernel dim3 block( 32, 32 ) ; // you will want to configure this //int block = 64; //int grid = (noElems + block-1)/block; int gy = (int)sqrt(noElems); int gx = (noElems+gy-1)/gy; //printf("prev gx %d and gy %d\n",gx,gy); if(gy > GRID_YDIM){ gx = (gx*gy+GRID_YDIM-1)/GRID_YDIM; gy = GRID_YDIM; } //printf("gx %d and gy %d\n",gx,gy); gx = (gx+block.x-1)/block.x; gy = (gy+block.y-1)/block.y; dim3 grid( gx, gy ) ; //hipDeviceProp_t GPUprop; //hipGetDeviceProperties(&GPUprop,0); //printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]); printf("noelems is %d\n",noElems); //printf("gridx is %d\n",grid); //printf("gridx is %d and grid y is %d\n",grid.x,grid.y); double timeStampA = getTimeStamp() ; //transfer data to dev hipMemcpy( d_A, h_A, bytes, hipMemcpyHostToDevice ) ; hipMemcpy( d_B, h_B, bytes, hipMemcpyHostToDevice ) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; hipLaunchKernelGGL(( f_addmat), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nx, ny ) ; hipDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back hipMemcpy( h_dC, d_C, bytes, hipMemcpyDeviceToHost ) ; double timeStampD = getTimeStamp() ; // free GPU resources hipFree( d_A ) ; hipFree( d_B ) ; hipFree( d_C ) ; hipDeviceReset() ; // check result h_addmat( h_A, h_B, h_hC, nx, ny ) ; // print out results if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){ //debugPrint(h_hC, nx, ny); //debugPrint(h_dC, nx, ny); FILE* fptr; fptr = fopen("time.log","a"); fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n", nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC); fclose(fptr); printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC); }else{ printf("Error: function failed.\n"); } }
11f49b8e4b262f1d4d3b23f7d18362aa0aacaaf5.cu
#include <sys/time.h> #include <stdio.h> #include <math.h> //TODO for writing to file, will be deleted #include <stdlib.h> //TODO: could include later //#include <device_launch_parameters.h> #include <cuda_runtime.h> //#include "../inc/helper_cuda.h" #define GRID_YDIM 65535 // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } void initDataA(float* data, int nx, int ny){ int i,j; for(i = 0; i < nx; i++){ for(j = 0; j < ny; j++){ data[i*ny + j] = (float) (i+j)/3.0; } } } void initDataB(float* data, int nx, int ny){ int i,j; for(i = 0; i < nx; i++){ for(j = 0; j < ny; j++){ data[i*ny + j] = (float)3.14*(i+j); } } } void debugPrint(float* data, int nx, int ny){ int i,j; for(i = 0; i < nx; i++){ for(j = 0; j < ny; j++){ printf("%f ",data[i*ny + j]); } printf("\n"); } printf("\n"); } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ int i; for(i = 0; i < nx*ny; i++){ C[i] = A[i] + B[i]; } } // device-side matrix addition //__global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // // kernel code might look something like this // // but you may want to pad the matrices and index into them accordingly // int ix = threadIdx.x + blockIdx.x*blockDim.x ; // int iy = threadIdx.y + blockIdx.y*blockDim.y ; // int idx = iy*nx + ix ; // if( (ix<nx) && (iy<ny) ) // C[idx] = A[idx] + B[idx] ; //} __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = iy*blockDim.x*gridDim.x + ix ; if(idx<nx*ny) C[idx] = A[idx] + B[idx] ; } int main( int argc, char *argv[] ) { // get program arguments if( argc != 3) { printf("Error: wrong number of args\n") ; exit(1) ; } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity int noElems = nx*ny ; int bytes = noElems * sizeof(float) ; // but you may want to pad the matrices… // alloc memory host-side float *h_A = (float *) malloc( bytes ) ; float *h_B = (float *) malloc( bytes ) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_dC = (float *) malloc( bytes ) ; // gpu result // init matrices with random data //initData( h_A, noElems ) ; initData( h_B, noElems ) ; initDataA(h_A, nx, ny); initDataB(h_B, nx, ny); // alloc memory dev-side float *d_A, *d_B, *d_C ; cudaMalloc( (void **) &d_A, bytes ) ; cudaMalloc( (void **) &d_B, bytes ) ; cudaMalloc( (void **) &d_C, bytes ) ; // invoke Kernel dim3 block( 32, 32 ) ; // you will want to configure this //int block = 64; //int grid = (noElems + block-1)/block; int gy = (int)sqrt(noElems); int gx = (noElems+gy-1)/gy; //printf("prev gx %d and gy %d\n",gx,gy); if(gy > GRID_YDIM){ gx = (gx*gy+GRID_YDIM-1)/GRID_YDIM; gy = GRID_YDIM; } //printf("gx %d and gy %d\n",gx,gy); gx = (gx+block.x-1)/block.x; gy = (gy+block.y-1)/block.y; dim3 grid( gx, gy ) ; //cudaDeviceProp GPUprop; //cudaGetDeviceProperties(&GPUprop,0); //printf("maxgridsize x is %d\n",GPUprop.maxGridSize[0]); printf("noelems is %d\n",noElems); //printf("gridx is %d\n",grid); //printf("gridx is %d and grid y is %d\n",grid.x,grid.y); double timeStampA = getTimeStamp() ; //transfer data to dev cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice ) ; cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ; cudaDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back cudaMemcpy( h_dC, d_C, bytes, cudaMemcpyDeviceToHost ) ; double timeStampD = getTimeStamp() ; // free GPU resources cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ; cudaDeviceReset() ; // check result h_addmat( h_A, h_B, h_hC, nx, ny ) ; // print out results if(!memcmp(h_hC,h_dC,nx*ny*sizeof(float))){ //debugPrint(h_hC, nx, ny); //debugPrint(h_dC, nx, ny); FILE* fptr; fptr = fopen("time.log","a"); fprintf(fptr,"%dX%d %.6f %.6f %.6f %.6f\n", nx, ny, timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC); fclose(fptr); printf("%.6f %.6f %.6f %.6f\n", timeStampD-timeStampA, timeStampB-timeStampA, timeStampC-timeStampB, timeStampD-timeStampC); }else{ printf("Error: function failed.\n"); } }
a3f8607375232dc2533431613e92bde82984d395.hip
// !!! This is a file automatically generated by hipify!!! // includes, cuda #include <hip/hip_runtime.h> #include <cudaDefs.h> #include <imageManager.h> #include "imageKernels.cuh" #define BLOCK_DIM 8 hipError_t error = hipSuccess; hipDeviceProp_t deviceProp = hipDeviceProp_t(); texture<float,2,hipReadModeElementType> texRef; // declared texture reference must be at file-scope !!! hipChannelFormatDesc texChannelDesc; unsigned char *dImageData = 0; unsigned int imageWidth; unsigned int imageHeight; unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit unsigned int imagePitch; size_t texPitch; float *dLinearPitchTextureData = 0; hipArray *dArrayTextureData = 0; KernelSetting ks; float *dOutputData = 0; void loadSourceImage(const char* imageFileName) { FreeImage_Initialise(); FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0); imageWidth = FreeImage_GetWidth(tmp); imageHeight = FreeImage_GetHeight(tmp); imageBPP = FreeImage_GetBPP(tmp); imagePitch = FreeImage_GetPitch(tmp); // FREEIMAGE align row data ... You have to use pitch instead of width hipMalloc((void**)&dImageData, imagePitch * imageHeight * imageBPP/8); hipMemcpy(dImageData, FreeImage_GetBits(tmp), imagePitch * imageHeight * imageBPP/8, hipMemcpyHostToDevice); checkHostMatrix<unsigned char>(FreeImage_GetBits(tmp), imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text"); checkDeviceMatrix<unsigned char>(dImageData, imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text"); FreeImage_Unload(tmp); FreeImage_DeInitialise(); } void createTextureFromLinearPitchMemory() { // TODO: Allocate dLinearPitchTextureData variable memory hipMallocPitch((void **)&dLinearPitchTextureData, &texPitch, imageWidth * sizeof(float), imageHeight); switch(imageBPP) { //TODO: Here call your kernel to convert image into linearPitch memory case 8: hipLaunchKernelGGL(( colorToFloat<8>) , dim3(ks.dimGrid), dim3(ks.dimBlock) , 0, 0, dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; case 16: hipLaunchKernelGGL(( colorToFloat<16>) , dim3(ks.dimGrid), dim3(ks.dimBlock) , 0, 0, dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; case 24: hipLaunchKernelGGL(( colorToFloat<24>) , dim3(ks.dimGrid), dim3(ks.dimBlock) , 0, 0, dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; case 32: hipLaunchKernelGGL(( colorToFloat<32>) , dim3(ks.dimGrid), dim3(ks.dimBlock) , 0, 0, dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; default: break; } checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "%6.1f ", "Result of Linear Pitch Text"); //TODO: Define texture (texRef) parameters texChannelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat); //TODO: Define texture channel descriptor (texChannelDesc) texRef.normalized = false; texRef.filterMode = hipFilterModePoint; texRef.addressMode[0] = hipAddressModeClamp; texRef.addressMode[1] = hipAddressModeClamp; //TODO: Bind texture hipBindTexture2D(0, &texRef, dLinearPitchTextureData, &texChannelDesc, imageWidth, imageHeight, texPitch); } void createTextureFrom2DArray() { //TODO: Define texture (texRef) parameters //TODO: Define texture channel descriptor (texChannelDesc) //texChannelDesc = ... //Converts custom image data to float and stores result in the float_linear_data float *dLinearTextureData = 0; hipMalloc((void**)&dLinearTextureData, imageWidth * imageHeight * sizeof(float)); switch(imageBPP) { //TODO: Here call your kernel to convert image into linear memory (no pitch!!!) } hipMallocArray(&dArrayTextureData, &texChannelDesc, imageWidth, imageHeight); //TODO: copy data into cuda array (dArrayTextureData) //hipMemcpyToArray(...); //TODO: Bind texture //cudaBind... hipFree(dLinearTextureData); } void releaseMemory() { hipUnbindTexture(texRef); if (dImageData!=0) hipFree(dImageData); if (dLinearPitchTextureData!=0) hipFree(dLinearPitchTextureData); if (dArrayTextureData) hipFreeArray(dArrayTextureData); if (dOutputData) hipFree(dOutputData); } __global__ void texKernel(const unsigned int texWidth, const unsigned int texHeight, float* dst) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //TODO some kernel } int main(int argc, char *argv[]) { initializeCUDA(deviceProp); loadSourceImage("C:/GIT/VSB-PA2/template/TemplateProject2017/TemplateProject2017/run6/terrain10x10.tif"); hipMalloc((void**)&dOutputData, imageWidth * imageHeight * sizeof(float)); ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1); ks.blockSize = BLOCK_DIM * BLOCK_DIM; ks.dimGrid = dim3((imageWidth + BLOCK_DIM-1)/BLOCK_DIM, (imageHeight + BLOCK_DIM-1)/BLOCK_DIM, 1); //Test 1 - texture stored in linear pitch memory createTextureFromLinearPitchMemory(); hipLaunchKernelGGL(( texKernel), dim3(ks.dimGrid), dim3(ks.dimBlock), 0, 0, imageWidth, imageHeight, dOutputData); checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData"); //Test 2 - texture stored in 2D array createTextureFrom2DArray(); hipLaunchKernelGGL(( texKernel), dim3(ks.dimGrid), dim3(ks.dimBlock), 0, 0, imageWidth, imageHeight, dOutputData); checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData"); releaseMemory(); }
a3f8607375232dc2533431613e92bde82984d395.cu
// includes, cuda #include <cuda_runtime.h> #include <cudaDefs.h> #include <imageManager.h> #include "imageKernels.cuh" #define BLOCK_DIM 8 cudaError_t error = cudaSuccess; cudaDeviceProp deviceProp = cudaDeviceProp(); texture<float,2,cudaReadModeElementType> texRef; // declared texture reference must be at file-scope !!! cudaChannelFormatDesc texChannelDesc; unsigned char *dImageData = 0; unsigned int imageWidth; unsigned int imageHeight; unsigned int imageBPP; //Bits Per Pixel = 8, 16, 24, or 32 bit unsigned int imagePitch; size_t texPitch; float *dLinearPitchTextureData = 0; cudaArray *dArrayTextureData = 0; KernelSetting ks; float *dOutputData = 0; void loadSourceImage(const char* imageFileName) { FreeImage_Initialise(); FIBITMAP *tmp = ImageManager::GenericLoader(imageFileName, 0); imageWidth = FreeImage_GetWidth(tmp); imageHeight = FreeImage_GetHeight(tmp); imageBPP = FreeImage_GetBPP(tmp); imagePitch = FreeImage_GetPitch(tmp); // FREEIMAGE align row data ... You have to use pitch instead of width cudaMalloc((void**)&dImageData, imagePitch * imageHeight * imageBPP/8); cudaMemcpy(dImageData, FreeImage_GetBits(tmp), imagePitch * imageHeight * imageBPP/8, cudaMemcpyHostToDevice); checkHostMatrix<unsigned char>(FreeImage_GetBits(tmp), imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text"); checkDeviceMatrix<unsigned char>(dImageData, imagePitch, imageHeight, imageWidth, "%hhu ", "Result of Linear Pitch Text"); FreeImage_Unload(tmp); FreeImage_DeInitialise(); } void createTextureFromLinearPitchMemory() { // TODO: Allocate dLinearPitchTextureData variable memory cudaMallocPitch((void **)&dLinearPitchTextureData, &texPitch, imageWidth * sizeof(float), imageHeight); switch(imageBPP) { //TODO: Here call your kernel to convert image into linearPitch memory case 8: colorToFloat<8> <<< ks.dimGrid, ks.dimBlock >>> (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; case 16: colorToFloat<16> <<< ks.dimGrid, ks.dimBlock >>> (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; case 24: colorToFloat<24> <<< ks.dimGrid, ks.dimBlock >>> (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; case 32: colorToFloat<32> <<< ks.dimGrid, ks.dimBlock >>> (dImageData, imageWidth, imageHeight, imagePitch, texPitch / sizeof(float), dLinearPitchTextureData); break; default: break; } checkDeviceMatrix<float>(dLinearPitchTextureData, texPitch, imageHeight, imageWidth, "%6.1f ", "Result of Linear Pitch Text"); //TODO: Define texture (texRef) parameters texChannelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat); //TODO: Define texture channel descriptor (texChannelDesc) texRef.normalized = false; texRef.filterMode = cudaFilterModePoint; texRef.addressMode[0] = cudaAddressModeClamp; texRef.addressMode[1] = cudaAddressModeClamp; //TODO: Bind texture cudaBindTexture2D(0, &texRef, dLinearPitchTextureData, &texChannelDesc, imageWidth, imageHeight, texPitch); } void createTextureFrom2DArray() { //TODO: Define texture (texRef) parameters //TODO: Define texture channel descriptor (texChannelDesc) //texChannelDesc = ... //Converts custom image data to float and stores result in the float_linear_data float *dLinearTextureData = 0; cudaMalloc((void**)&dLinearTextureData, imageWidth * imageHeight * sizeof(float)); switch(imageBPP) { //TODO: Here call your kernel to convert image into linear memory (no pitch!!!) } cudaMallocArray(&dArrayTextureData, &texChannelDesc, imageWidth, imageHeight); //TODO: copy data into cuda array (dArrayTextureData) //cudaMemcpyToArray(...); //TODO: Bind texture //cudaBind... cudaFree(dLinearTextureData); } void releaseMemory() { cudaUnbindTexture(texRef); if (dImageData!=0) cudaFree(dImageData); if (dLinearPitchTextureData!=0) cudaFree(dLinearPitchTextureData); if (dArrayTextureData) cudaFreeArray(dArrayTextureData); if (dOutputData) cudaFree(dOutputData); } __global__ void texKernel(const unsigned int texWidth, const unsigned int texHeight, float* dst) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //TODO some kernel } int main(int argc, char *argv[]) { initializeCUDA(deviceProp); loadSourceImage("C:/GIT/VSB-PA2/template/TemplateProject2017/TemplateProject2017/run6/terrain10x10.tif"); cudaMalloc((void**)&dOutputData, imageWidth * imageHeight * sizeof(float)); ks.dimBlock = dim3(BLOCK_DIM, BLOCK_DIM, 1); ks.blockSize = BLOCK_DIM * BLOCK_DIM; ks.dimGrid = dim3((imageWidth + BLOCK_DIM-1)/BLOCK_DIM, (imageHeight + BLOCK_DIM-1)/BLOCK_DIM, 1); //Test 1 - texture stored in linear pitch memory createTextureFromLinearPitchMemory(); texKernel<<<ks.dimGrid, ks.dimBlock>>>(imageWidth, imageHeight, dOutputData); checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData"); //Test 2 - texture stored in 2D array createTextureFrom2DArray(); texKernel<<<ks.dimGrid, ks.dimBlock>>>(imageWidth, imageHeight, dOutputData); checkDeviceMatrix<float>(dOutputData, imageWidth * sizeof(float), imageHeight, imageWidth, "%6.1f ", "dOutputData"); releaseMemory(); }
b1a69fd9d20741c0a45909a4b3d64b5b915a8a89.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <paddle/fluid/platform/device_context.h> #include <algorithm> #include <cstdint> #include <type_traits> #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace paddle { namespace operators { template <typename DeviceContext, typename T> class EmbeddingEltWiseLayerNormKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { using Tensor = phi::DenseTensor; auto &device_ctx = context.template device_context<DeviceContext>(); auto ids = context.MultiInput<phi::DenseTensor>("Ids"); auto embs = context.MultiInput<phi::DenseTensor>("Embs"); int input_num = static_cast<int>(ids.size()); phi::DenseTensor in_ids_( framework::TransToPhiDataType(framework::proto::VarType::INT64)), in_embs_( framework::TransToPhiDataType(framework::proto::VarType::INT64)); framework::DDim in_dim{input_num}; int device_id; #ifdef PADDLE_WITH_HIP hipGetDevice(&device_id); #else hipGetDevice(&device_id); #endif auto &dev_ctx = context.template device_context<phi::GPUContext>(); in_ids_.Resize(in_dim); in_embs_.Resize(in_dim); int64_t *in_ids_d = dev_ctx.template Alloc<int64_t>( &in_ids_, in_ids_.numel() * sizeof(int64_t)); int64_t *in_embs_d = dev_ctx.template Alloc<int64_t>( &in_embs_, in_embs_.numel() * sizeof(int64_t)); std::vector<int64_t> in1s, in2s; for (int i = 0; i < input_num; ++i) { in1s.push_back(reinterpret_cast<uintptr_t>(ids[i]->data<int64_t>())); in2s.push_back(reinterpret_cast<uintptr_t>(embs[i]->data<T>())); } #ifdef PADDLE_WITH_HIP hipMemcpyAsync(in_ids_d, in1s.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, device_ctx.stream()); hipMemcpyAsync(in_embs_d, in2s.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, device_ctx.stream()); #else hipMemcpyAsync(in_ids_d, in1s.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, device_ctx.stream()); hipMemcpyAsync(in_embs_d, in2s.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, device_ctx.stream()); #endif auto *bias = context.Input<phi::DenseTensor>("Bias"); auto *scale = context.Input<phi::DenseTensor>("Scale"); auto *out = context.Output<phi::DenseTensor>("Out"); // should be (B * S * hidden) auto id0_dims = ids[0]->dims(); auto emb0_dims = embs[0]->dims(); int batch = id0_dims[0]; int seq_len = id0_dims[1]; int hidden = emb0_dims[1]; auto *bias_d = bias->data<T>(); auto *scale_d = scale->data<T>(); auto *output_d = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T)); float eps = context.Attr<float>("epsilon"); if (std::is_same<T, paddle::platform::float16>::value) { const half *scale_new = reinterpret_cast<const half *>(scale_d); const half *bias_new = reinterpret_cast<const half *>(bias_d); half *output_new = reinterpret_cast<half *>(output_d); math::EmbEltwiseLayerNormFunctor<half> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden, in_ids_d, scale_new, bias_new, in_embs_d, output_new, eps, input_num, device_ctx.stream()); } else { math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden, in_ids_d, scale_d, bias_d, in_embs_d, output_d, eps, input_num, device_ctx.stream()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; #if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 10000 REGISTER_OP_CUDA_KERNEL( fused_embedding_eltwise_layernorm, ops::EmbeddingEltWiseLayerNormKernel<phi::GPUContext, float>, ops::EmbeddingEltWiseLayerNormKernel<phi::GPUContext, paddle::platform::float16>); #else REGISTER_OP_CUDA_KERNEL( fused_embedding_eltwise_layernorm, ops::EmbeddingEltWiseLayerNormKernel<phi::GPUContext, float>); #endif
b1a69fd9d20741c0a45909a4b3d64b5b915a8a89.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <paddle/fluid/platform/device_context.h> #include <algorithm> #include <cstdint> #include <type_traits> #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/memory/malloc.h" #include "paddle/fluid/operators/math/bert_encoder_functor.h" #include "paddle/fluid/platform/float16.h" #include "paddle/phi/common/data_type.h" #include "paddle/phi/kernels/funcs/blas/blas.h" namespace paddle { namespace operators { template <typename DeviceContext, typename T> class EmbeddingEltWiseLayerNormKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &context) const override { using Tensor = phi::DenseTensor; auto &device_ctx = context.template device_context<DeviceContext>(); auto ids = context.MultiInput<phi::DenseTensor>("Ids"); auto embs = context.MultiInput<phi::DenseTensor>("Embs"); int input_num = static_cast<int>(ids.size()); phi::DenseTensor in_ids_( framework::TransToPhiDataType(framework::proto::VarType::INT64)), in_embs_( framework::TransToPhiDataType(framework::proto::VarType::INT64)); framework::DDim in_dim{input_num}; int device_id; #ifdef PADDLE_WITH_HIP hipGetDevice(&device_id); #else cudaGetDevice(&device_id); #endif auto &dev_ctx = context.template device_context<phi::GPUContext>(); in_ids_.Resize(in_dim); in_embs_.Resize(in_dim); int64_t *in_ids_d = dev_ctx.template Alloc<int64_t>( &in_ids_, in_ids_.numel() * sizeof(int64_t)); int64_t *in_embs_d = dev_ctx.template Alloc<int64_t>( &in_embs_, in_embs_.numel() * sizeof(int64_t)); std::vector<int64_t> in1s, in2s; for (int i = 0; i < input_num; ++i) { in1s.push_back(reinterpret_cast<uintptr_t>(ids[i]->data<int64_t>())); in2s.push_back(reinterpret_cast<uintptr_t>(embs[i]->data<T>())); } #ifdef PADDLE_WITH_HIP hipMemcpyAsync(in_ids_d, in1s.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, device_ctx.stream()); hipMemcpyAsync(in_embs_d, in2s.data(), sizeof(int64_t) * input_num, hipMemcpyHostToDevice, device_ctx.stream()); #else cudaMemcpyAsync(in_ids_d, in1s.data(), sizeof(int64_t) * input_num, cudaMemcpyHostToDevice, device_ctx.stream()); cudaMemcpyAsync(in_embs_d, in2s.data(), sizeof(int64_t) * input_num, cudaMemcpyHostToDevice, device_ctx.stream()); #endif auto *bias = context.Input<phi::DenseTensor>("Bias"); auto *scale = context.Input<phi::DenseTensor>("Scale"); auto *out = context.Output<phi::DenseTensor>("Out"); // should be (B * S * hidden) auto id0_dims = ids[0]->dims(); auto emb0_dims = embs[0]->dims(); int batch = id0_dims[0]; int seq_len = id0_dims[1]; int hidden = emb0_dims[1]; auto *bias_d = bias->data<T>(); auto *scale_d = scale->data<T>(); auto *output_d = dev_ctx.template Alloc<T>(out, out->numel() * sizeof(T)); float eps = context.Attr<float>("epsilon"); if (std::is_same<T, paddle::platform::float16>::value) { const half *scale_new = reinterpret_cast<const half *>(scale_d); const half *bias_new = reinterpret_cast<const half *>(bias_d); half *output_new = reinterpret_cast<half *>(output_d); math::EmbEltwiseLayerNormFunctor<half> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden, in_ids_d, scale_new, bias_new, in_embs_d, output_new, eps, input_num, device_ctx.stream()); } else { math::EmbEltwiseLayerNormFunctor<T> emb_eltwise_layernorm_func; emb_eltwise_layernorm_func(batch, seq_len, hidden, in_ids_d, scale_d, bias_d, in_embs_d, output_d, eps, input_num, device_ctx.stream()); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; #if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000 REGISTER_OP_CUDA_KERNEL( fused_embedding_eltwise_layernorm, ops::EmbeddingEltWiseLayerNormKernel<phi::GPUContext, float>, ops::EmbeddingEltWiseLayerNormKernel<phi::GPUContext, paddle::platform::float16>); #else REGISTER_OP_CUDA_KERNEL( fused_embedding_eltwise_layernorm, ops::EmbeddingEltWiseLayerNormKernel<phi::GPUContext, float>); #endif
bebf0ce7f1a16ce27a701d32e3bedaa50829394b.hip
// !!! This is a file automatically generated by hipify!!! //============================================================================ // Name : convertToGreyScale.cu // Author : Rashi Goyal // Copyright : Your copyright notice // Description : Color to Grayscale using CUDA & C++, // To Run : nvcc convertToGreyScale.cu -lcublas -o convertToGreyScale.out // Note : Please see report to understand how to run the code to get // different outputs //============================================================================ #include <stdio.h> #include <math.h> #include <fstream> #include <iostream> #include <string> #include <time.h> using namespace std; // CUDA runtime #include <hip/hip_runtime.h> #include <rocblas.h> const int WIDTH =255; #define TILE_WIDTH 8 //kernel implementation for Matrix Multiplication Naive (Non Shared) __global__ void Convert_to_Grey_2d( int *d_gpu_matrix_in , int *d_gpu_matrix_out , const int WIDTH ){ // calculate row & col values for current thread unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ; int index=row*WIDTH + col; int rgbvalue=d_gpu_matrix_in[index]; int blue = rgbvalue % 1000; int green = ((rgbvalue % 1000000)-blue)/1000; int red = ((rgbvalue / 1000000)-1000); d_gpu_matrix_out[index]=int((red*.299f) + (green*.587f) + (blue*.114f)); } int main() { int in_img_data[WIDTH][WIDTH]; // used to store Original Image Data at host int out_img_data[WIDTH][WIDTH]; // used to store Converted Image Data at host int *d_gpu_matrix_in; // used to store Original Image Data at device int *d_gpu_matrix_out; // used to store Converted Image Data at device //variables for performance calculations clock_t start; clock_t end; double dSeconds =0.0; double gflops =0.0; double dNumOps =0.0; cout<<endl<<endl<<endl<<"################### High Performance Computing Project!! (Colored to Greyscale Conversion) ###################"<<endl<<endl; cout<<" Image Size (Height,Width) : "<<WIDTH<<","<<WIDTH<<endl; cout<<" Number of Pixels : "<<WIDTH*WIDTH<<endl; cout<<" Image Format : PPM"<<endl<<endl; /* Starting to create a Random Color Image based on matrix size */ std::ofstream img("picture.ppm"); img << "P3" <<endl; img << WIDTH << " " << WIDTH << endl; img << "255" << endl; // cout<<" Creating Image with random Pixels colors "<<endl; /* Allocating pixel colors randomly */ for (int y=0;y<WIDTH;y++){ for(int x=0;x<WIDTH;x++){ // int index=y*WIDTH + x; int red=x % 255; int green=y % 255; int blue=y * x % 255; img << red << " " << green << " " << blue << endl; int rgbValue= 1000000000; rgbValue=rgbValue+(red *1000000); rgbValue=rgbValue+(green *1000); rgbValue=rgbValue+(blue); in_img_data[y][x]=rgbValue; } } img.close(); // cout<<"Colored Image created "<<endl; start=clock(); dNumOps = WIDTH * WIDTH * 4; //create device array hipMalloc ( (void **)&array_name, sizeofmatrixinbytes) ; hipMalloc((void **) &d_gpu_matrix_in , WIDTH*WIDTH*sizeof(int) ) ; hipMalloc((void **) &d_gpu_matrix_out , WIDTH*WIDTH*sizeof(int)) ; //copy host array to device array hipMemcpy ( d_gpu_matrix_in , in_img_data , WIDTH*WIDTH*sizeof(int) , hipMemcpyHostToDevice ) ; dim3 dimGrid ( WIDTH/TILE_WIDTH+1 , WIDTH/TILE_WIDTH+1 ,1 ) ; dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ; //Calling kernel 1 D cout<<" Kernel Config.... : "<<endl; cout<<" Grid (x,y,z) : ("<<WIDTH/TILE_WIDTH+1 <<","<<WIDTH/TILE_WIDTH+1<<",1)"<<endl; cout<<" Block (x,y,z) : ("<<TILE_WIDTH <<","<<TILE_WIDTH<<",1)"<<endl<<endl; hipLaunchKernelGGL(( Convert_to_Grey_2d) , dim3(dimGrid),dim3(dimBlock), 0, 0, d_gpu_matrix_in ,d_gpu_matrix_out , WIDTH) ; cout<<" Kernel running.... : "<<endl<<endl; hipMemcpy(out_img_data , d_gpu_matrix_out , WIDTH*WIDTH*sizeof(int) ,hipMemcpyDeviceToHost) ; end=clock(); //Measuring Performance dSeconds = (end-start)/1000.0; gflops = 1.0e-9 * dNumOps/dSeconds; cout<<" Number of Operations : "<<dNumOps<<endl; cout<<" Total time taken : "<<dSeconds*1000<<endl; cout<<" GFlop per second : "<<gflops<<endl<<endl; /* Create GreyScale Image */ std::ofstream new_img("new_picture.ppm"); new_img << "P3" <<endl; new_img << WIDTH << " " << WIDTH << endl; new_img << "255" << endl; for (int y=0;y<WIDTH;y++){ for(int x=0;x<WIDTH;x++){ // cout<<"("<<y<<","<<x<<") -> "<<out_img_data[y][x]<<endl; new_img << out_img_data[y][x] << " " << out_img_data[y][x] << " " << out_img_data[y][x] << endl; } } new_img.close(); cout<<endl<<endl<<endl<<" ################## Execution Completed ################## "<<endl; }
bebf0ce7f1a16ce27a701d32e3bedaa50829394b.cu
//============================================================================ // Name : convertToGreyScale.cu // Author : Rashi Goyal // Copyright : Your copyright notice // Description : Color to Grayscale using CUDA & C++, // To Run : nvcc convertToGreyScale.cu -lcublas -o convertToGreyScale.out // Note : Please see report to understand how to run the code to get // different outputs //============================================================================ #include <stdio.h> #include <math.h> #include <fstream> #include <iostream> #include <string> #include <time.h> using namespace std; // CUDA runtime #include <cuda_runtime.h> #include <cublas_v2.h> const int WIDTH =255; #define TILE_WIDTH 8 //kernel implementation for Matrix Multiplication Naive (Non Shared) __global__ void Convert_to_Grey_2d( int *d_gpu_matrix_in , int *d_gpu_matrix_out , const int WIDTH ){ // calculate row & col values for current thread unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x ; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y ; int index=row*WIDTH + col; int rgbvalue=d_gpu_matrix_in[index]; int blue = rgbvalue % 1000; int green = ((rgbvalue % 1000000)-blue)/1000; int red = ((rgbvalue / 1000000)-1000); d_gpu_matrix_out[index]=int((red*.299f) + (green*.587f) + (blue*.114f)); } int main() { int in_img_data[WIDTH][WIDTH]; // used to store Original Image Data at host int out_img_data[WIDTH][WIDTH]; // used to store Converted Image Data at host int *d_gpu_matrix_in; // used to store Original Image Data at device int *d_gpu_matrix_out; // used to store Converted Image Data at device //variables for performance calculations clock_t start; clock_t end; double dSeconds =0.0; double gflops =0.0; double dNumOps =0.0; cout<<endl<<endl<<endl<<"################### High Performance Computing Project!! (Colored to Greyscale Conversion) ###################"<<endl<<endl; cout<<" Image Size (Height,Width) : "<<WIDTH<<","<<WIDTH<<endl; cout<<" Number of Pixels : "<<WIDTH*WIDTH<<endl; cout<<" Image Format : PPM"<<endl<<endl; /* Starting to create a Random Color Image based on matrix size */ std::ofstream img("picture.ppm"); img << "P3" <<endl; img << WIDTH << " " << WIDTH << endl; img << "255" << endl; // cout<<" Creating Image with random Pixels colors "<<endl; /* Allocating pixel colors randomly */ for (int y=0;y<WIDTH;y++){ for(int x=0;x<WIDTH;x++){ // int index=y*WIDTH + x; int red=x % 255; int green=y % 255; int blue=y * x % 255; img << red << " " << green << " " << blue << endl; int rgbValue= 1000000000; rgbValue=rgbValue+(red *1000000); rgbValue=rgbValue+(green *1000); rgbValue=rgbValue+(blue); in_img_data[y][x]=rgbValue; } } img.close(); // cout<<"Colored Image created "<<endl; start=clock(); dNumOps = WIDTH * WIDTH * 4; //create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes) ; cudaMalloc((void **) &d_gpu_matrix_in , WIDTH*WIDTH*sizeof(int) ) ; cudaMalloc((void **) &d_gpu_matrix_out , WIDTH*WIDTH*sizeof(int)) ; //copy host array to device array cudaMemcpy ( d_gpu_matrix_in , in_img_data , WIDTH*WIDTH*sizeof(int) , cudaMemcpyHostToDevice ) ; dim3 dimGrid ( WIDTH/TILE_WIDTH+1 , WIDTH/TILE_WIDTH+1 ,1 ) ; dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ; //Calling kernel 1 D cout<<" Kernel Config.... : "<<endl; cout<<" Grid (x,y,z) : ("<<WIDTH/TILE_WIDTH+1 <<","<<WIDTH/TILE_WIDTH+1<<",1)"<<endl; cout<<" Block (x,y,z) : ("<<TILE_WIDTH <<","<<TILE_WIDTH<<",1)"<<endl<<endl; Convert_to_Grey_2d <<<dimGrid,dimBlock>>> ( d_gpu_matrix_in ,d_gpu_matrix_out , WIDTH) ; cout<<" Kernel running.... : "<<endl<<endl; cudaMemcpy(out_img_data , d_gpu_matrix_out , WIDTH*WIDTH*sizeof(int) ,cudaMemcpyDeviceToHost) ; end=clock(); //Measuring Performance dSeconds = (end-start)/1000.0; gflops = 1.0e-9 * dNumOps/dSeconds; cout<<" Number of Operations : "<<dNumOps<<endl; cout<<" Total time taken : "<<dSeconds*1000<<endl; cout<<" GFlop per second : "<<gflops<<endl<<endl; /* Create GreyScale Image */ std::ofstream new_img("new_picture.ppm"); new_img << "P3" <<endl; new_img << WIDTH << " " << WIDTH << endl; new_img << "255" << endl; for (int y=0;y<WIDTH;y++){ for(int x=0;x<WIDTH;x++){ // cout<<"("<<y<<","<<x<<") -> "<<out_img_data[y][x]<<endl; new_img << out_img_data[y][x] << " " << out_img_data[y][x] << " " << out_img_data[y][x] << endl; } } new_img.close(); cout<<endl<<endl<<endl<<" ################## Execution Completed ################## "<<endl; }
d656bc4d0f1af63042e110bf260f87bbbcb24e6e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_L1_NORM_LAYER_INSTANTIATE #include "lbann/layers/loss/l1_norm_impl.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <El::Int block_size, typename TensorDataType> __global__ void fp_kernel(El::Int local_height, El::Int local_width, const TensorDataType* __restrict__ input, El::Int input_ldim, TensorDataType* __restrict__ contribution) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int nthreadsx = blockDim.x * gridDim.x; // Compute local contribution for each matrix column for (El::Int col = bidy; col < local_width; col += gridDim.y) { // Compute contributions for each thread TensorDataType private_contribution = 0; for (El::Int row = gidx; row < local_height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; private_contribution += gpu_lib::abs(x); } // Shared memory reduction to get contribution for each block /// @todo unroll loops __shared__ TensorDataType shared_contribution[block_size]; shared_contribution[tid] = private_contribution; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_contribution[tid] += shared_contribution[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&contribution[col], shared_contribution[0]); } } } template <typename TensorDataType> void local_fp_gpu(const El::AbstractMatrix<TensorDataType>& local_input, El::AbstractMatrix<TensorDataType>& local_contribution) { El::Zero(local_contribution); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_contribution), gpu::get_sync_info(local_input)); const auto& local_height = local_input.Height(); const auto& local_width = local_input.Width(); const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(fp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_contribution.Buffer()); } } template <El::Int block_size, typename TensorDataType> __global__ void bp_kernel(El::Int local_height, El::Int local_width, const TensorDataType* __restrict__ input, El::Int input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, TensorDataType* __restrict__ gradient_wrt_input, El::Int gradient_wrt_input_ldim) { const TensorDataType zero = 0.; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int nthreadsx = blockDim.x * gridDim.x; for (El::Int col = bidy; col < local_width; col += gridDim.y) { const auto& dy = gradient_wrt_output[col]; for (El::Int row = gidx; row < local_height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; if (x > zero) { dx = dy; } else if (x < zero) { dx = -dy; } else { dx = zero; } } } } template <typename TensorDataType> void local_bp_gpu( const El::AbstractMatrix<TensorDataType>& local_input, const El::AbstractMatrix<TensorDataType>& local_gradient_wrt_output, El::AbstractMatrix<TensorDataType>& local_gradient_wrt_input) { if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_input)); const auto& local_height = local_input.Height(); const auto& local_width = local_input.Width(); const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(bp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } } // namespace template <typename TensorDataType, data_layout T_layout, El::Device Dev> void l1_norm_layer<TensorDataType, T_layout, Dev>::local_fp_compute() { local_fp_gpu(this->get_local_prev_activations(), this->m_workspace->Matrix()); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void l1_norm_layer<TensorDataType, T_layout, Dev>::local_bp_compute() { local_bp_gpu(this->get_local_prev_activations(), this->m_workspace->LockedMatrix(), this->get_local_error_signals()); } #define PROTO(T) \ template class l1_norm_layer<T, \ data_layout::DATA_PARALLEL, \ El::Device::GPU>; \ template class l1_norm_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
d656bc4d0f1af63042e110bf260f87bbbcb24e6e.cu
//////////////////////////////////////////////////////////////////////////////// // Copyright (c) 2014-2023, Lawrence Livermore National Security, LLC. // Produced at the Lawrence Livermore National Laboratory. // Written by the LBANN Research Team (B. Van Essen, et al.) listed in // the CONTRIBUTORS file. <[email protected]> // // LLNL-CODE-697807. // All rights reserved. // // This file is part of LBANN: Livermore Big Artificial Neural Network // Toolkit. For details, see http://software.llnl.gov/LBANN or // https://github.com/LLNL/LBANN. // // Licensed under the Apache License, Version 2.0 (the "Licensee"); you // may not use this file except in compliance with the License. You may // obtain a copy of the License at: // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the license. //////////////////////////////////////////////////////////////////////////////// #define LBANN_L1_NORM_LAYER_INSTANTIATE #include "lbann/layers/loss/l1_norm_impl.hpp" #include "lbann/utils/gpu/helpers.hpp" namespace lbann { namespace { template <El::Int block_size, typename TensorDataType> __global__ void fp_kernel(El::Int local_height, El::Int local_width, const TensorDataType* __restrict__ input, El::Int input_ldim, TensorDataType* __restrict__ contribution) { // Indices const El::Int tid = threadIdx.x; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int nthreadsx = blockDim.x * gridDim.x; // Compute local contribution for each matrix column for (El::Int col = bidy; col < local_width; col += gridDim.y) { // Compute contributions for each thread TensorDataType private_contribution = 0; for (El::Int row = gidx; row < local_height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; private_contribution += gpu_lib::abs(x); } // Shared memory reduction to get contribution for each block /// @todo unroll loops __shared__ TensorDataType shared_contribution[block_size]; shared_contribution[tid] = private_contribution; for (El::Int stride = block_size / 2; stride > 0; stride /= 2) { __syncthreads(); if (tid < stride) { shared_contribution[tid] += shared_contribution[tid + stride]; } } if (tid == 0) { gpu_lib::atomic_add(&contribution[col], shared_contribution[0]); } } } template <typename TensorDataType> void local_fp_gpu(const El::AbstractMatrix<TensorDataType>& local_input, El::AbstractMatrix<TensorDataType>& local_contribution) { El::Zero(local_contribution); if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_contribution), gpu::get_sync_info(local_input)); const auto& local_height = local_input.Height(); const auto& local_width = local_input.Width(); const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(fp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_contribution.Buffer()); } } template <El::Int block_size, typename TensorDataType> __global__ void bp_kernel(El::Int local_height, El::Int local_width, const TensorDataType* __restrict__ input, El::Int input_ldim, const TensorDataType* __restrict__ gradient_wrt_output, TensorDataType* __restrict__ gradient_wrt_input, El::Int gradient_wrt_input_ldim) { const TensorDataType zero = 0.; const El::Int gidx = threadIdx.x + blockIdx.x * blockDim.x; const El::Int bidy = blockIdx.y; const El::Int nthreadsx = blockDim.x * gridDim.x; for (El::Int col = bidy; col < local_width; col += gridDim.y) { const auto& dy = gradient_wrt_output[col]; for (El::Int row = gidx; row < local_height; row += nthreadsx) { const auto& x = input[row + col * input_ldim]; auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim]; if (x > zero) { dx = dy; } else if (x < zero) { dx = -dy; } else { dx = zero; } } } } template <typename TensorDataType> void local_bp_gpu( const El::AbstractMatrix<TensorDataType>& local_input, const El::AbstractMatrix<TensorDataType>& local_gradient_wrt_output, El::AbstractMatrix<TensorDataType>& local_gradient_wrt_input) { if (!local_input.IsEmpty()) { auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_gradient_wrt_input), gpu::get_sync_info(local_gradient_wrt_output), gpu::get_sync_info(local_input)); const auto& local_height = local_input.Height(); const auto& local_width = local_input.Width(); const El::Int block_size = 256; dim3 block_dims, grid_dims; block_dims.x = block_size; grid_dims.x = (local_height + block_size - 1) / block_size; grid_dims.y = local_width; hydrogen::gpu::LaunchKernel(bp_kernel<block_size, TensorDataType>, grid_dims, block_dims, 0, multisync, local_height, local_width, local_input.LockedBuffer(), local_input.LDim(), local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim()); } } } // namespace template <typename TensorDataType, data_layout T_layout, El::Device Dev> void l1_norm_layer<TensorDataType, T_layout, Dev>::local_fp_compute() { local_fp_gpu(this->get_local_prev_activations(), this->m_workspace->Matrix()); } template <typename TensorDataType, data_layout T_layout, El::Device Dev> void l1_norm_layer<TensorDataType, T_layout, Dev>::local_bp_compute() { local_bp_gpu(this->get_local_prev_activations(), this->m_workspace->LockedMatrix(), this->get_local_error_signals()); } #define PROTO(T) \ template class l1_norm_layer<T, \ data_layout::DATA_PARALLEL, \ El::Device::GPU>; \ template class l1_norm_layer<T, data_layout::MODEL_PARALLEL, El::Device::GPU> #define LBANN_INSTANTIATE_GPU_HALF #include "lbann/macros/instantiate.hpp" } // namespace lbann
860dc635eda168e529461012608e6b3a12a5c78a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated d Tue Dec 17 13:18:45 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void dsymmetrize_tiles_lower( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = (*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void dsymmetrize_tiles_upper( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dA = (*dAT); // lower := upper dA += ldda; dAT += 1; } } } extern "C" void magmablas_dsymmetrize_tiles( char uplo, magma_int_t m, double *dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { /* Purpose ======= DSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix dA that is valid on input. = 'U': Upper triangular part = 'L': Lower triangular part M (input) INTEGER The number of rows of the matrix dA. M >= 0. dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by m matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). ===================================================================== */ if ( m == 0 || ntile == 0 ) return; assert( m >= 0 ); assert( ldda >= m ); assert( ldda >= (ntile - 1)*mstride + m ); assert( ntile >= 0 ); assert( mstride >= 0 ); assert( nstride >= 0 ); assert( mstride >= m || nstride >= m ); // prevent tile overlap dim3 threads( NB ); dim3 grid( ntile, (m + NB - 1)/NB ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( (uplo == 'U') || (uplo == 'u') ) { hipLaunchKernelGGL(( dsymmetrize_tiles_upper), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride ); } else if ( (uplo == 'L') || (uplo == 'l') ) { hipLaunchKernelGGL(( dsymmetrize_tiles_lower), dim3(grid), dim3(threads), 0, magma_stream , m, dA, ldda, mstride, nstride ); } else { printf( "uplo has illegal value\n" ); exit(1); } }
860dc635eda168e529461012608e6b3a12a5c78a.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated d Tue Dec 17 13:18:45 2013 @author Mark Gates */ #include "common_magma.h" #include <assert.h> #define NB 64 /* Symmetrizes ntile tiles at a time, e.g., all diagonal tiles of a matrix. Grid is ntile x ceil(m/NB). Each tile is m x m, and is divided into block rows, each NB x m. Each block has NB threads. Each thread copies one row, iterating across all columns below diagonal. The bottom block of rows may be partially outside the matrix; if so, rows outside the matrix (i >= m) are disabled. */ __global__ void dsymmetrize_tiles_lower( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dAT = (*dA); // upper := lower dA += ldda; dAT += 1; } } } // only difference with _lower version is direction dA=dAT instead of dAT=dA. __global__ void dsymmetrize_tiles_upper( int m, double *dA, int ldda, int mstride, int nstride ) { // shift dA to tile's top-left corner dA += blockIdx.x*(mstride + nstride*ldda); // dA iterates across row i and dAT iterates down column i. int i = blockIdx.y*NB + threadIdx.x; double *dAT = dA; if ( i < m ) { dA += i; dAT += i*ldda; double *dAend = dA + i*ldda; while( dA < dAend ) { *dA = (*dAT); // lower := upper dA += ldda; dAT += 1; } } } extern "C" void magmablas_dsymmetrize_tiles( char uplo, magma_int_t m, double *dA, magma_int_t ldda, magma_int_t ntile, magma_int_t mstride, magma_int_t nstride ) { /* Purpose ======= DSYMMETRIZE copies lower triangle to upper triangle, or vice-versa, to make dA a general representation of a symmetric matrix. Arguments ========= UPLO (input) CHARACTER*1 Specifies the part of the matrix dA that is valid on input. = 'U': Upper triangular part = 'L': Lower triangular part M (input) INTEGER The number of rows of the matrix dA. M >= 0. dA (input/output) COMPLEX DOUBLE PRECISION array, dimension (LDDA,N) The m by m matrix dA. LDDA (input) INTEGER The leading dimension of the array dA. LDDA >= max(1,M). ===================================================================== */ if ( m == 0 || ntile == 0 ) return; assert( m >= 0 ); assert( ldda >= m ); assert( ldda >= (ntile - 1)*mstride + m ); assert( ntile >= 0 ); assert( mstride >= 0 ); assert( nstride >= 0 ); assert( mstride >= m || nstride >= m ); // prevent tile overlap dim3 threads( NB ); dim3 grid( ntile, (m + NB - 1)/NB ); //printf( "m %d, grid %d x %d, threads %d\n", m, grid.x, grid.y, threads.x ); if ( (uplo == 'U') || (uplo == 'u') ) { dsymmetrize_tiles_upper<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride ); } else if ( (uplo == 'L') || (uplo == 'l') ) { dsymmetrize_tiles_lower<<< grid, threads, 0, magma_stream >>>( m, dA, ldda, mstride, nstride ); } else { printf( "uplo has illegal value\n" ); exit(1); } }
f5c0a7742f941fc0a17b44928a67f1d2cd0438ed.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } void __global__ kernScanUpSweep (const int N, const int powerv1, const int powerv2, int *dev_oDataArray) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; if (index % powerv1 != powerv1 - 1) return; dev_oDataArray[index] += dev_oDataArray[index - powerv2]; // After UpSweep is over the last value in the array is given value 0 which will be useful in the DownSweep step if (index == N - 1) { dev_oDataArray[index] = 0; } } void __global__ kernScanDownSweep (const int N, const int powerv1, const int powerv2, int *dev_oDataArray) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; if (index % powerv1 != powerv1 - 1) return; int temp = dev_oDataArray[index - powerv2]; dev_oDataArray[index - powerv2] = dev_oDataArray[index]; dev_oDataArray[index] += temp; } void __global__ kernPaddArrayWithZero(const int N, const int paddedArrayLength, int *dev_oDataArray) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N || index >= paddedArrayLength) return; dev_oDataArray[index] = 0; } void scanExcusivePrefixSum(int N, int dimension, dim3 fullBlocksPerGrid, dim3 threadsPerBlock, int *dev_oDataArray) { // Up Sweep Scan int powerv1, powerv2; for (int d = 0; d < dimension; ++d) { powerv1 = 1 << (d + 1); powerv2 = 1 << d; kernScanUpSweep << <fullBlocksPerGrid, threadsPerBlock >> > (N, powerv1, powerv2, dev_oDataArray); } // Down Sweep Scans for (int i = dimension - 1; i >= 0; --i) { powerv1 = 1 << (i + 1); powerv2 = 1 << i; kernScanDownSweep << <fullBlocksPerGrid, threadsPerBlock >> > (N, powerv1, powerv2, dev_oDataArray); } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // Defining the configuration of the kernel int dimension = ilog2ceil(n); int paddedArrayLength = 1 << (dimension); int paddedArraySize = paddedArrayLength * sizeof(int); dim3 fullBlocksPerGrid((paddedArrayLength + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int size = n * sizeof(int); // Creating array buffers on the device memory int *dev_oDataArray; hipMalloc((void**)&dev_oDataArray, paddedArraySize); checkCUDAError("hipMalloc for dev_oDataArray failed!"); // Copying array buffers (Host to Device) hipMemcpy(dev_oDataArray, idata, size, hipMemcpyHostToDevice); checkCUDAError("hipMemcpy into dev_oDataArray failed!"); // For the extra space in the padded array fill it with 0 hipLaunchKernelGGL(( kernPaddArrayWithZero) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, paddedArrayLength, dev_oDataArray); timer().startGpuTimer(); // TODO scanExcusivePrefixSum(paddedArrayLength, dimension, fullBlocksPerGrid, threadsPerBlock, dev_oDataArray); checkCUDAError("scanExcusivePrefixSum failed!"); timer().endGpuTimer(); //Copying array buffers (Device to Host) hipMemcpy(odata, dev_oDataArray, size, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy into odata failed!"); // Freeing the data buffer stored in device memory hipFree(dev_oDataArray); checkCUDAError("hipFree on dev_oDataArray failed!"); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // Defining the configuration of the kernel int dimension = ilog2ceil(n); int paddedArrayLength = 1 << (dimension); int paddedArraySize = paddedArrayLength * sizeof(int); dim3 fullBlocksPerGridPadded((paddedArrayLength + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int size = n * sizeof(int); int fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Creating buffers on the device memory int *dev_oScanDataArray; hipMalloc((void**)&dev_oScanDataArray, paddedArraySize); checkCUDAError("hipMalloc for dev_oScanDataArray failed!"); int *dev_oData; hipMalloc((void**)&dev_oData, size); checkCUDAError("hipMalloc for dev_oData failed!"); int *dev_iData; hipMalloc((void**)&dev_iData, size); checkCUDAError("hipMalloc for dev_iData failed!"); int *dev_boolIndexArray; hipMalloc((void**)&dev_boolIndexArray, size); checkCUDAError("hipMalloc for dev_boolIndexArray failed!"); // Copying array buffers idata to dev_iData (Host to Device) hipMemcpy(dev_iData, idata, size, hipMemcpyHostToDevice); checkCUDAError("hipMemcpy into dev_iData failed!"); // Initialize the bool array: For each index fill 1 in dev_boolIndexArray[index] if corrosponding value in dev_iData is non-zero otherwise fill 0 hipLaunchKernelGGL(( StreamCompaction::Common::kernMapToBoolean) , dim3(fullBlocksPerGrid), dim3(threadsPerBlock), 0, 0, n, dev_boolIndexArray, dev_iData); checkCUDAError("kernMapToBoolean failed!"); // Copy buffer dev_boolIndexArray to buffer dev_oScanDataArray (Device to Device) hipMemcpy(dev_oScanDataArray, dev_boolIndexArray, size, hipMemcpyDeviceToDevice); checkCUDAError("hipMemcpy into dev_oScanDataArray failed!"); // Padd the dev_oScanDataArray with zero's at the end kernPaddArrayWithZero << <fullBlocksPerGridPadded, threadsPerBlock >> > (n, paddedArrayLength, dev_oScanDataArray); checkCUDAError("kernPaddArrayWithZero failed!"); timer().startGpuTimer(); // TODO scanExcusivePrefixSum(paddedArrayLength, dimension, fullBlocksPerGridPadded, threadsPerBlock, dev_oScanDataArray); checkCUDAError("scanExcusivePrefixSum failed!"); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_oData, dev_iData, dev_boolIndexArray, dev_oScanDataArray); checkCUDAError("kernScatter failed!"); timer().endGpuTimer(); // Copying the data from dev_oData to odata (Device To Host) hipMemcpy(odata, dev_oData, size, hipMemcpyDeviceToHost); checkCUDAError("hipMemcpy into odata failed!"); // Getting the size of the number of elements that were filled (Device To Host) int valueAtIndexArrayEnd, valueAtBoolArrayEnd, totalSize; hipMemcpy(&valueAtIndexArrayEnd, dev_oScanDataArray + n - 1, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&valueAtBoolArrayEnd, dev_boolIndexArray + n - 1, sizeof(int), hipMemcpyDeviceToHost); totalSize = valueAtBoolArrayEnd + valueAtIndexArrayEnd; // Freeing Cuda Memory hipFree(dev_boolIndexArray); hipFree(dev_iData); hipFree(dev_oData); hipFree(dev_oScanDataArray); checkCUDAError("hipFree failed!"); return totalSize; } } }
f5c0a7742f941fc0a17b44928a67f1d2cd0438ed.cu
#include <cuda.h> #include <cuda_runtime.h> #include "common.h" #include "efficient.h" namespace StreamCompaction { namespace Efficient { using StreamCompaction::Common::PerformanceTimer; PerformanceTimer& timer() { static PerformanceTimer timer; return timer; } void __global__ kernScanUpSweep (const int N, const int powerv1, const int powerv2, int *dev_oDataArray) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; if (index % powerv1 != powerv1 - 1) return; dev_oDataArray[index] += dev_oDataArray[index - powerv2]; // After UpSweep is over the last value in the array is given value 0 which will be useful in the DownSweep step if (index == N - 1) { dev_oDataArray[index] = 0; } } void __global__ kernScanDownSweep (const int N, const int powerv1, const int powerv2, int *dev_oDataArray) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index >= N) return; if (index % powerv1 != powerv1 - 1) return; int temp = dev_oDataArray[index - powerv2]; dev_oDataArray[index - powerv2] = dev_oDataArray[index]; dev_oDataArray[index] += temp; } void __global__ kernPaddArrayWithZero(const int N, const int paddedArrayLength, int *dev_oDataArray) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < N || index >= paddedArrayLength) return; dev_oDataArray[index] = 0; } void scanExcusivePrefixSum(int N, int dimension, dim3 fullBlocksPerGrid, dim3 threadsPerBlock, int *dev_oDataArray) { // Up Sweep Scan int powerv1, powerv2; for (int d = 0; d < dimension; ++d) { powerv1 = 1 << (d + 1); powerv2 = 1 << d; kernScanUpSweep << <fullBlocksPerGrid, threadsPerBlock >> > (N, powerv1, powerv2, dev_oDataArray); } // Down Sweep Scans for (int i = dimension - 1; i >= 0; --i) { powerv1 = 1 << (i + 1); powerv2 = 1 << i; kernScanDownSweep << <fullBlocksPerGrid, threadsPerBlock >> > (N, powerv1, powerv2, dev_oDataArray); } } /** * Performs prefix-sum (aka scan) on idata, storing the result into odata. */ void scan(int n, int *odata, const int *idata) { // Defining the configuration of the kernel int dimension = ilog2ceil(n); int paddedArrayLength = 1 << (dimension); int paddedArraySize = paddedArrayLength * sizeof(int); dim3 fullBlocksPerGrid((paddedArrayLength + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int size = n * sizeof(int); // Creating array buffers on the device memory int *dev_oDataArray; cudaMalloc((void**)&dev_oDataArray, paddedArraySize); checkCUDAError("cudaMalloc for dev_oDataArray failed!"); // Copying array buffers (Host to Device) cudaMemcpy(dev_oDataArray, idata, size, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy into dev_oDataArray failed!"); // For the extra space in the padded array fill it with 0 kernPaddArrayWithZero <<<fullBlocksPerGrid, threadsPerBlock>>> (n, paddedArrayLength, dev_oDataArray); timer().startGpuTimer(); // TODO scanExcusivePrefixSum(paddedArrayLength, dimension, fullBlocksPerGrid, threadsPerBlock, dev_oDataArray); checkCUDAError("scanExcusivePrefixSum failed!"); timer().endGpuTimer(); //Copying array buffers (Device to Host) cudaMemcpy(odata, dev_oDataArray, size, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy into odata failed!"); // Freeing the data buffer stored in device memory cudaFree(dev_oDataArray); checkCUDAError("cudaFree on dev_oDataArray failed!"); } /** * Performs stream compaction on idata, storing the result into odata. * All zeroes are discarded. * * @param n The number of elements in idata. * @param odata The array into which to store elements. * @param idata The array of elements to compact. * @returns The number of elements remaining after compaction. */ int compact(int n, int *odata, const int *idata) { // Defining the configuration of the kernel int dimension = ilog2ceil(n); int paddedArrayLength = 1 << (dimension); int paddedArraySize = paddedArrayLength * sizeof(int); dim3 fullBlocksPerGridPadded((paddedArrayLength + blockSize - 1) / blockSize); dim3 threadsPerBlock(blockSize); int size = n * sizeof(int); int fullBlocksPerGrid((n + blockSize - 1) / blockSize); // Creating buffers on the device memory int *dev_oScanDataArray; cudaMalloc((void**)&dev_oScanDataArray, paddedArraySize); checkCUDAError("cudaMalloc for dev_oScanDataArray failed!"); int *dev_oData; cudaMalloc((void**)&dev_oData, size); checkCUDAError("cudaMalloc for dev_oData failed!"); int *dev_iData; cudaMalloc((void**)&dev_iData, size); checkCUDAError("cudaMalloc for dev_iData failed!"); int *dev_boolIndexArray; cudaMalloc((void**)&dev_boolIndexArray, size); checkCUDAError("cudaMalloc for dev_boolIndexArray failed!"); // Copying array buffers idata to dev_iData (Host to Device) cudaMemcpy(dev_iData, idata, size, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy into dev_iData failed!"); // Initialize the bool array: For each index fill 1 in dev_boolIndexArray[index] if corrosponding value in dev_iData is non-zero otherwise fill 0 StreamCompaction::Common::kernMapToBoolean <<<fullBlocksPerGrid, threadsPerBlock>>> (n, dev_boolIndexArray, dev_iData); checkCUDAError("kernMapToBoolean failed!"); // Copy buffer dev_boolIndexArray to buffer dev_oScanDataArray (Device to Device) cudaMemcpy(dev_oScanDataArray, dev_boolIndexArray, size, cudaMemcpyDeviceToDevice); checkCUDAError("cudaMemcpy into dev_oScanDataArray failed!"); // Padd the dev_oScanDataArray with zero's at the end kernPaddArrayWithZero << <fullBlocksPerGridPadded, threadsPerBlock >> > (n, paddedArrayLength, dev_oScanDataArray); checkCUDAError("kernPaddArrayWithZero failed!"); timer().startGpuTimer(); // TODO scanExcusivePrefixSum(paddedArrayLength, dimension, fullBlocksPerGridPadded, threadsPerBlock, dev_oScanDataArray); checkCUDAError("scanExcusivePrefixSum failed!"); StreamCompaction::Common::kernScatter << <fullBlocksPerGrid, threadsPerBlock >> > (n, dev_oData, dev_iData, dev_boolIndexArray, dev_oScanDataArray); checkCUDAError("kernScatter failed!"); timer().endGpuTimer(); // Copying the data from dev_oData to odata (Device To Host) cudaMemcpy(odata, dev_oData, size, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy into odata failed!"); // Getting the size of the number of elements that were filled (Device To Host) int valueAtIndexArrayEnd, valueAtBoolArrayEnd, totalSize; cudaMemcpy(&valueAtIndexArrayEnd, dev_oScanDataArray + n - 1, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&valueAtBoolArrayEnd, dev_boolIndexArray + n - 1, sizeof(int), cudaMemcpyDeviceToHost); totalSize = valueAtBoolArrayEnd + valueAtIndexArrayEnd; // Freeing Cuda Memory cudaFree(dev_boolIndexArray); cudaFree(dev_iData); cudaFree(dev_oData); cudaFree(dev_oScanDataArray); checkCUDAError("cudaFree failed!"); return totalSize; } } }
53b85b2682571dee9b3aa9b12b7423e87a4c5fa5.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2020 by XGBoost Contributors */ #include <thrust/unique.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/binary_search.h> #include <thrust/transform_scan.h> #include <thrust/execution_policy.h> #include <memory> #include <utility> #include "xgboost/span.h" #include "quantile.h" #include "quantile.cuh" #include "hist_util.h" #include "device_helpers_hip.cuh" #include "categorical.h" #include "common.h" namespace xgboost { namespace common { using WQSketch = HostSketchContainer::WQSketch; using SketchEntry = WQSketch::Entry; // Algorithm 4 in XGBoost's paper, using binary search to find i. template <typename EntryIter> __device__ SketchEntry BinarySearchQuery(EntryIter beg, EntryIter end, float rank) { assert(end - beg >= 2); rank *= 2; auto front = *beg; if (rank < front.rmin + front.rmax) { return *beg; } auto back = *(end - 1); if (rank >= back.rmin + back.rmax) { return back; } auto search_begin = dh::MakeTransformIterator<float>( beg, [=] __device__(SketchEntry const &entry) { return entry.rmin + entry.rmax; }); auto search_end = search_begin + (end - beg); auto i = thrust::upper_bound(thrust::seq, search_begin + 1, search_end - 1, rank) - search_begin - 1; if (rank < (*(beg + i)).RMinNext() + (*(beg + i + 1)).RMaxPrev()) { return *(beg + i); } else { return *(beg + i + 1); } } template <typename InEntry, typename ToSketchEntry> void PruneImpl(int device, common::Span<SketchContainer::OffsetT const> cuts_ptr, Span<InEntry const> sorted_data, Span<size_t const> columns_ptr_in, // could be ptr for data or cuts Span<FeatureType const> feature_types, Span<SketchEntry> out_cuts, ToSketchEntry to_sketch_entry) { dh::LaunchN(device, out_cuts.size(), [=] __device__(size_t idx) { size_t column_id = dh::SegmentId(cuts_ptr, idx); auto out_column = out_cuts.subspan( cuts_ptr[column_id], cuts_ptr[column_id + 1] - cuts_ptr[column_id]); auto in_column = sorted_data.subspan(columns_ptr_in[column_id], columns_ptr_in[column_id + 1] - columns_ptr_in[column_id]); auto to = cuts_ptr[column_id + 1] - cuts_ptr[column_id]; idx -= cuts_ptr[column_id]; auto front = to_sketch_entry(0ul, in_column, column_id); auto back = to_sketch_entry(in_column.size() - 1, in_column, column_id); auto is_cat = IsCat(feature_types, column_id); if (in_column.size() <= to || is_cat) { // cut idx equals sample idx out_column[idx] = to_sketch_entry(idx, in_column, column_id); return; } // 1 thread for each output. See A.4 for detail. auto d_out = out_column; if (idx == 0) { d_out.front() = front; return; } if (idx == to - 1) { d_out.back() = back; return; } float w = back.rmin - front.rmax; assert(w != 0); auto budget = static_cast<float>(d_out.size()); assert(budget != 0); auto q = ((static_cast<float>(idx) * w) / (static_cast<float>(to) - 1.0f) + front.rmax); auto it = dh::MakeTransformIterator<SketchEntry>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { auto e = to_sketch_entry(idx, in_column, column_id); return e; }); d_out[idx] = BinarySearchQuery(it, it + in_column.size(), q); }); } template <typename T> void CopyTo(Span<T> out, Span<T const> src) { CHECK_EQ(out.size(), src.size()); dh::safe_cuda(hipMemcpyAsync(out.data(), src.data(), out.size_bytes(), hipMemcpyDefault)); } // Compute the merge path. common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath( Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { auto x_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(x_ptr, idx); }), d_x.data())); auto y_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(y_ptr, idx); }), d_y.data())); using Tuple = thrust::tuple<uint64_t, uint64_t>; thrust::constant_iterator<uint64_t> a_ind_iter(0ul); thrust::constant_iterator<uint64_t> b_ind_iter(1ul); auto place_holder = thrust::make_constant_iterator<uint64_t>(0u); auto x_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(a_ind_iter, place_holder)); auto y_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder)); dh::XGBCachingDeviceAllocator<Tuple> alloc; static_assert(sizeof(Tuple) == sizeof(SketchEntry), ""); // We reuse the memory for storing merge path. common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()}; // Determine the merge path, 0 if element is from x, 1 if it's from y. thrust::merge_by_key( thrust::hip::par(alloc), x_merge_key_it, x_merge_key_it + d_x.size(), y_merge_key_it, y_merge_key_it + d_y.size(), x_merge_val_it, y_merge_val_it, thrust::make_discard_iterator(), merge_path.data(), [=] __device__(auto const &l, auto const &r) -> bool { auto l_column_id = thrust::get<0>(l); auto r_column_id = thrust::get<0>(r); if (l_column_id == r_column_id) { return thrust::get<1>(l).value < thrust::get<1>(r).value; } return l_column_id < r_column_id; }); // Compute output ptr auto transform_it = thrust::make_zip_iterator(thrust::make_tuple(x_ptr.data(), y_ptr.data())); thrust::transform( thrust::hip::par(alloc), transform_it, transform_it + x_ptr.size(), out_ptr.data(), [] __device__(auto const& t) { return thrust::get<0>(t) + thrust::get<1>(t); }); // 0^th is the indicator, 1^th is placeholder auto get_ind = []XGBOOST_DEVICE(Tuple const& t) { return thrust::get<0>(t); }; // 0^th is the counter for x, 1^th for y. auto get_x = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<0>(t); }; auto get_y = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<1>(t); }; auto scan_key_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(out_ptr, idx); }); auto scan_val_it = dh::MakeTransformIterator<Tuple>( merge_path.data(), [=] __device__(Tuple const &t) -> Tuple { auto ind = get_ind(t); // == 0 if element is from x // x_counter, y_counter return thrust::make_tuple<uint64_t, uint64_t>(!ind, ind); }); // Compute the index for both x and y (which of the element in a and b are used in each // comparison) by scaning the binary merge path. Take output [(x_0, y_0), (x_0, y_1), // ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path. // Asumming y_0 is less than x_0 so this step is torward the end of y. After the // comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0 // is landed into output as the first element in merge result. The scan result is the // subscript of x and y. thrust::exclusive_scan_by_key( thrust::hip::par(alloc), scan_key_it, scan_key_it + merge_path.size(), scan_val_it, merge_path.data(), thrust::make_tuple<uint64_t, uint64_t>(0ul, 0ul), thrust::equal_to<size_t>{}, [=] __device__(Tuple const &l, Tuple const &r) -> Tuple { return thrust::make_tuple(get_x(l) + get_x(r), get_y(l) + get_y(r)); }); return merge_path; } // Merge d_x and d_y into out. Because the final output depends on predicate (which // summary does the output element come from) result by definition of merged rank. So we // run it in 2 passes to obtain the merge path and then customize the standard merge // algorithm. void MergeImpl(int32_t device, Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { dh::safe_cuda(hipSetDevice(device)); CHECK_EQ(d_x.size() + d_y.size(), out.size()); CHECK_EQ(x_ptr.size(), out_ptr.size()); CHECK_EQ(y_ptr.size(), out_ptr.size()); auto d_merge_path = MergePath(d_x, x_ptr, d_y, y_ptr, out, out_ptr); auto d_out = out; dh::LaunchN(device, d_out.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(out_ptr, idx); idx -= out_ptr[column_id]; auto d_x_column = d_x.subspan(x_ptr[column_id], x_ptr[column_id + 1] - x_ptr[column_id]); auto d_y_column = d_y.subspan(y_ptr[column_id], y_ptr[column_id + 1] - y_ptr[column_id]); auto d_out_column = d_out.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); auto d_path_column = d_merge_path.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); uint64_t a_ind, b_ind; thrust::tie(a_ind, b_ind) = d_path_column[idx]; // Handle empty column. If both columns are empty, we should not get this column_id // as result of binary search. assert((d_x_column.size() != 0) || (d_y_column.size() != 0)); if (d_x_column.size() == 0) { d_out_column[idx] = d_y_column[b_ind]; return; } if (d_y_column.size() == 0) { d_out_column[idx] = d_x_column[a_ind]; return; } // Handle trailing elements. assert(a_ind <= d_x_column.size()); if (a_ind == d_x_column.size()) { // Trailing elements are from y because there's no more x to land. auto y_elem = d_y_column[b_ind]; d_out_column[idx] = SketchEntry(y_elem.rmin + d_x_column.back().RMinNext(), y_elem.rmax + d_x_column.back().rmax, y_elem.wmin, y_elem.value); return; } auto x_elem = d_x_column[a_ind]; assert(b_ind <= d_y_column.size()); if (b_ind == d_y_column.size()) { d_out_column[idx] = SketchEntry(x_elem.rmin + d_y_column.back().RMinNext(), x_elem.rmax + d_y_column.back().rmax, x_elem.wmin, x_elem.value); return; } auto y_elem = d_y_column[b_ind]; /* Merge procedure. See A.3 merge operation eq (26) ~ (28). The trick to interpret it is rewriting the symbols on both side of equality. Take eq (26) as an example: Expand it according to definition of extended rank then rewrite it into: If $k_i$ is the $i$ element in output and \textbf{comes from $D_1$}: r_\bar{D}(k_i) = r_{\bar{D_1}}(k_i) + w_{\bar{{D_1}}}(k_i) + [r_{\bar{D_2}}(x_i) + w_{\bar{D_2}}(x_i)] Where $x_i$ is the largest element in $D_2$ that's less than $k_i$. $k_i$ can be used in $D_1$ as it's since $k_i \in D_1$. Other 2 equations can be applied similarly with $k_i$ comes from different $D$. just use different symbol on different source of summary. */ assert(idx < d_out_column.size()); if (x_elem.value == y_elem.value) { d_out_column[idx] = SketchEntry{x_elem.rmin + y_elem.rmin, x_elem.rmax + y_elem.rmax, x_elem.wmin + y_elem.wmin, x_elem.value}; } else if (x_elem.value < y_elem.value) { // elem from x is landed. yprev_min is the element in D_2 that's 1 rank less than // x_elem if we put x_elem in D_2. float yprev_min = b_ind == 0 ? 0.0f : d_y_column[b_ind - 1].RMinNext(); // rmin should be equal to x_elem.rmin + x_elem.wmin + yprev_min. But for // implementation, the weight is stored in a separated field and we compute the // extended definition on the fly when needed. d_out_column[idx] = SketchEntry{x_elem.rmin + yprev_min, x_elem.rmax + y_elem.RMaxPrev(), x_elem.wmin, x_elem.value}; } else { // elem from y is landed. float xprev_min = a_ind == 0 ? 0.0f : d_x_column[a_ind - 1].RMinNext(); d_out_column[idx] = SketchEntry{xprev_min + y_elem.rmin, x_elem.RMaxPrev() + y_elem.rmax, y_elem.wmin, y_elem.value}; } }); } void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr, common::Span<OffsetT const> cuts_ptr, size_t total_cuts, Span<float> weights) { Span<SketchEntry> out; dh::caching_device_vector<SketchEntry> cuts; bool first_window = this->Current().empty(); if (!first_window) { cuts.resize(total_cuts); out = dh::ToSpan(cuts); } else { this->Current().resize(total_cuts); out = dh::ToSpan(this->Current()); } auto ft = this->feature_types_.ConstDeviceSpan(); if (weights.empty()) { auto to_sketch_entry = [] __device__(size_t sample_idx, Span<Entry const> const &column, size_t) { float rmin = sample_idx; float rmax = sample_idx + 1; return SketchEntry{rmin, rmax, 1, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } else { auto to_sketch_entry = [weights, columns_ptr] __device__( size_t sample_idx, Span<Entry const> const &column, size_t column_id) { Span<float const> column_weights_scan = weights.subspan(columns_ptr[column_id], column.size()); float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f; float rmax = column_weights_scan[sample_idx]; float wmin = rmax - rmin; wmin = wmin < 0 ? kRtEps : wmin; // GPU scan can generate floating error. return SketchEntry{rmin, rmax, wmin, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } if (!first_window) { CHECK_EQ(this->columns_ptr_.Size(), cuts_ptr.size()); this->Merge(cuts_ptr, out); this->FixError(); } else { this->columns_ptr_.SetDevice(device_); this->columns_ptr_.Resize(cuts_ptr.size()); auto d_cuts_ptr = this->columns_ptr_.DeviceSpan(); CopyTo(d_cuts_ptr, cuts_ptr); } } size_t SketchContainer::Unique() { timer_.Start(__func__); dh::safe_cuda(hipSetDevice(device_)); this->columns_ptr_.SetDevice(device_); Span<OffsetT> d_column_scan = this->columns_ptr_.DeviceSpan(); CHECK_EQ(d_column_scan.size(), num_columns_ + 1); Span<SketchEntry> entries = dh::ToSpan(this->Current()); HostDeviceVector<OffsetT> scan_out(d_column_scan.size()); scan_out.SetDevice(device_); auto d_scan_out = scan_out.DeviceSpan(); d_column_scan = this->columns_ptr_.DeviceSpan(); size_t n_uniques = dh::SegmentedUnique( d_column_scan.data(), d_column_scan.data() + d_column_scan.size(), entries.data(), entries.data() + entries.size(), scan_out.DevicePointer(), entries.data(), detail::SketchUnique{}); this->columns_ptr_.Copy(scan_out); CHECK(!this->columns_ptr_.HostCanRead()); this->Current().resize(n_uniques); timer_.Stop(__func__); return n_uniques; } void SketchContainer::Prune(size_t to) { timer_.Start(__func__); dh::safe_cuda(hipSetDevice(device_)); this->Unique(); OffsetT to_total = 0; auto& h_columns_ptr = columns_ptr_b_.HostVector(); h_columns_ptr[0] = to_total; auto const& h_feature_types = feature_types_.ConstHostSpan(); for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t length = this->Column(i).size(); length = ::min(length, to); if (IsCat(h_feature_types, i)) { length = this->Column(i).size(); } to_total += length; h_columns_ptr[i+1] = to_total; } this->Other().resize(to_total); auto d_columns_ptr_in = this->columns_ptr_.ConstDeviceSpan(); auto d_columns_ptr_out = columns_ptr_b_.ConstDeviceSpan(); auto out = dh::ToSpan(this->Other()); auto in = dh::ToSpan(this->Current()); auto no_op = [] __device__(size_t sample_idx, Span<SketchEntry const> const &entries, size_t) { return entries[sample_idx]; }; // NOLINT auto ft = this->feature_types_.ConstDeviceSpan(); PruneImpl<SketchEntry>(device_, d_columns_ptr_out, in, d_columns_ptr_in, ft, out, no_op); this->columns_ptr_.Copy(columns_ptr_b_); this->Alternate(); timer_.Stop(__func__); } void SketchContainer::Merge(Span<OffsetT const> d_that_columns_ptr, Span<SketchEntry const> that) { dh::safe_cuda(hipSetDevice(device_)); timer_.Start(__func__); if (this->Current().size() == 0) { CHECK_EQ(this->columns_ptr_.HostVector().back(), 0); CHECK_EQ(this->columns_ptr_.HostVector().size(), d_that_columns_ptr.size()); CHECK_EQ(columns_ptr_.Size(), num_columns_ + 1); thrust::copy(thrust::device, d_that_columns_ptr.data(), d_that_columns_ptr.data() + d_that_columns_ptr.size(), this->columns_ptr_.DevicePointer()); auto total = this->columns_ptr_.HostVector().back(); this->Current().resize(total); CopyTo(dh::ToSpan(this->Current()), that); timer_.Stop(__func__); return; } this->Other().resize(this->Current().size() + that.size()); CHECK_EQ(d_that_columns_ptr.size(), this->columns_ptr_.Size()); MergeImpl(device_, this->Data(), this->ColumnsPtr(), that, d_that_columns_ptr, dh::ToSpan(this->Other()), columns_ptr_b_.DeviceSpan()); this->columns_ptr_.Copy(columns_ptr_b_); CHECK_EQ(this->columns_ptr_.Size(), num_columns_ + 1); this->Alternate(); timer_.Stop(__func__); } void SketchContainer::FixError() { dh::safe_cuda(hipSetDevice(device_)); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); auto in = dh::ToSpan(this->Current()); dh::LaunchN(device_, in.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_columns_ptr, idx); auto in_column = in.subspan(d_columns_ptr[column_id], d_columns_ptr[column_id + 1] - d_columns_ptr[column_id]); idx -= d_columns_ptr[column_id]; float prev_rmin = idx == 0 ? 0.0f : in_column[idx-1].rmin; if (in_column[idx].rmin < prev_rmin) { in_column[idx].rmin = prev_rmin; } float prev_rmax = idx == 0 ? 0.0f : in_column[idx-1].rmax; if (in_column[idx].rmax < prev_rmax) { in_column[idx].rmax = prev_rmax; } float rmin_next = in_column[idx].RMinNext(); if (in_column[idx].rmax < rmin_next) { in_column[idx].rmax = rmin_next; } }); } void SketchContainer::AllReduce() { dh::safe_cuda(hipSetDevice(device_)); auto world = rabit::GetWorldSize(); if (world == 1) { return; } timer_.Start(__func__); if (!reducer_) { reducer_ = std::make_unique<dh::AllReducer>(); reducer_->Init(device_); } // Reduce the overhead on syncing. size_t global_sum_rows = num_rows_; rabit::Allreduce<rabit::op::Sum>(&global_sum_rows, 1); size_t intermediate_num_cuts = ::min(global_sum_rows, static_cast<size_t>(num_bins_ * kFactor)); this->Prune(intermediate_num_cuts); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); CHECK_EQ(d_columns_ptr.size(), num_columns_ + 1); size_t n = d_columns_ptr.size(); rabit::Allreduce<rabit::op::Max>(&n, 1); CHECK_EQ(n, d_columns_ptr.size()) << "Number of columns differs across workers"; // Get the columns ptr from all workers dh::device_vector<SketchContainer::OffsetT> gathered_ptrs; gathered_ptrs.resize(d_columns_ptr.size() * world, 0); size_t rank = rabit::GetRank(); auto offset = rank * d_columns_ptr.size(); thrust::copy(thrust::device, d_columns_ptr.data(), d_columns_ptr.data() + d_columns_ptr.size(), gathered_ptrs.begin() + offset); reducer_->AllReduceSum(gathered_ptrs.data().get(), gathered_ptrs.data().get(), gathered_ptrs.size()); // Get the data from all workers. std::vector<size_t> recv_lengths; dh::caching_device_vector<char> recvbuf; reducer_->AllGather(this->Current().data().get(), dh::ToSpan(this->Current()).size_bytes(), &recv_lengths, &recvbuf); reducer_->Synchronize(); // Segment the received data. auto s_recvbuf = dh::ToSpan(recvbuf); std::vector<Span<SketchEntry>> allworkers; offset = 0; for (int32_t i = 0; i < world; ++i) { size_t length_as_bytes = recv_lengths.at(i); auto raw = s_recvbuf.subspan(offset, length_as_bytes); auto sketch = Span<SketchEntry>(reinterpret_cast<SketchEntry *>(raw.data()), length_as_bytes / sizeof(SketchEntry)); allworkers.emplace_back(sketch); offset += length_as_bytes; } // Merge them into a new sketch. SketchContainer new_sketch(this->feature_types_, num_bins_, this->num_columns_, global_sum_rows, this->device_); for (size_t i = 0; i < allworkers.size(); ++i) { auto worker = allworkers[i]; auto worker_ptr = dh::ToSpan(gathered_ptrs) .subspan(i * d_columns_ptr.size(), d_columns_ptr.size()); new_sketch.Merge(worker_ptr, worker); new_sketch.FixError(); } *this = std::move(new_sketch); timer_.Stop(__func__); } void SketchContainer::MakeCuts(HistogramCuts* p_cuts) { timer_.Start(__func__); dh::safe_cuda(hipSetDevice(device_)); p_cuts->min_vals_.Resize(num_columns_); // Sync between workers. this->AllReduce(); // Prune to final number of bins. this->Prune(num_bins_ + 1); this->Unique(); this->FixError(); // Set up inputs auto d_in_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); p_cuts->min_vals_.SetDevice(device_); auto d_min_values = p_cuts->min_vals_.DeviceSpan(); auto in_cut_values = dh::ToSpan(this->Current()); // Set up output ptr p_cuts->cut_ptrs_.SetDevice(device_); auto& h_out_columns_ptr = p_cuts->cut_ptrs_.HostVector(); h_out_columns_ptr.clear(); h_out_columns_ptr.push_back(0); auto const& h_feature_types = this->feature_types_.ConstHostSpan(); for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t column_size = ::max(static_cast<size_t>(1ul), this->Column(i).size()); if (IsCat(h_feature_types, i)) { h_out_columns_ptr.push_back(static_cast<size_t>(column_size)); } else { h_out_columns_ptr.push_back(::min(static_cast<size_t>(column_size), static_cast<size_t>(num_bins_))); } } std::partial_sum(h_out_columns_ptr.begin(), h_out_columns_ptr.end(), h_out_columns_ptr.begin()); auto d_out_columns_ptr = p_cuts->cut_ptrs_.ConstDeviceSpan(); // Set up output cuts size_t total_bins = h_out_columns_ptr.back(); p_cuts->cut_values_.SetDevice(device_); p_cuts->cut_values_.Resize(total_bins); auto out_cut_values = p_cuts->cut_values_.DeviceSpan(); auto d_ft = feature_types_.ConstDeviceSpan(); dh::LaunchN(0, total_bins, [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_out_columns_ptr, idx); auto in_column = in_cut_values.subspan(d_in_columns_ptr[column_id], d_in_columns_ptr[column_id + 1] - d_in_columns_ptr[column_id]); auto out_column = out_cut_values.subspan(d_out_columns_ptr[column_id], d_out_columns_ptr[column_id + 1] - d_out_columns_ptr[column_id]); idx -= d_out_columns_ptr[column_id]; if (in_column.size() == 0) { // If the column is empty, we push a dummy value. It won't affect training as the // column is empty, trees cannot split on it. This is just to be consistent with // rest of the library. if (idx == 0) { d_min_values[column_id] = kRtEps; out_column[0] = kRtEps; assert(out_column.size() == 1); } return; } if (idx == 0 && !IsCat(d_ft, column_id)) { auto mval = in_column[idx].value; d_min_values[column_id] = mval - (fabs(mval) + 1e-5); } if (IsCat(d_ft, column_id)) { assert(out_column.size() == in_column.size()); out_column[idx] = in_column[idx].value; return; } // Last thread is responsible for setting a value that's greater than other cuts. if (idx == out_column.size() - 1) { const bst_float cpt = in_column.back().value; // this must be bigger than last value in a scale const bst_float last = cpt + (fabs(cpt) + 1e-5); out_column[idx] = last; return; } assert(idx+1 < in_column.size()); out_column[idx] = in_column[idx+1].value; }); timer_.Stop(__func__); } } // namespace common } // namespace xgboost
53b85b2682571dee9b3aa9b12b7423e87a4c5fa5.cu
/*! * Copyright 2020 by XGBoost Contributors */ #include <thrust/unique.h> #include <thrust/iterator/discard_iterator.h> #include <thrust/binary_search.h> #include <thrust/transform_scan.h> #include <thrust/execution_policy.h> #include <memory> #include <utility> #include "xgboost/span.h" #include "quantile.h" #include "quantile.cuh" #include "hist_util.h" #include "device_helpers.cuh" #include "categorical.h" #include "common.h" namespace xgboost { namespace common { using WQSketch = HostSketchContainer::WQSketch; using SketchEntry = WQSketch::Entry; // Algorithm 4 in XGBoost's paper, using binary search to find i. template <typename EntryIter> __device__ SketchEntry BinarySearchQuery(EntryIter beg, EntryIter end, float rank) { assert(end - beg >= 2); rank *= 2; auto front = *beg; if (rank < front.rmin + front.rmax) { return *beg; } auto back = *(end - 1); if (rank >= back.rmin + back.rmax) { return back; } auto search_begin = dh::MakeTransformIterator<float>( beg, [=] __device__(SketchEntry const &entry) { return entry.rmin + entry.rmax; }); auto search_end = search_begin + (end - beg); auto i = thrust::upper_bound(thrust::seq, search_begin + 1, search_end - 1, rank) - search_begin - 1; if (rank < (*(beg + i)).RMinNext() + (*(beg + i + 1)).RMaxPrev()) { return *(beg + i); } else { return *(beg + i + 1); } } template <typename InEntry, typename ToSketchEntry> void PruneImpl(int device, common::Span<SketchContainer::OffsetT const> cuts_ptr, Span<InEntry const> sorted_data, Span<size_t const> columns_ptr_in, // could be ptr for data or cuts Span<FeatureType const> feature_types, Span<SketchEntry> out_cuts, ToSketchEntry to_sketch_entry) { dh::LaunchN(device, out_cuts.size(), [=] __device__(size_t idx) { size_t column_id = dh::SegmentId(cuts_ptr, idx); auto out_column = out_cuts.subspan( cuts_ptr[column_id], cuts_ptr[column_id + 1] - cuts_ptr[column_id]); auto in_column = sorted_data.subspan(columns_ptr_in[column_id], columns_ptr_in[column_id + 1] - columns_ptr_in[column_id]); auto to = cuts_ptr[column_id + 1] - cuts_ptr[column_id]; idx -= cuts_ptr[column_id]; auto front = to_sketch_entry(0ul, in_column, column_id); auto back = to_sketch_entry(in_column.size() - 1, in_column, column_id); auto is_cat = IsCat(feature_types, column_id); if (in_column.size() <= to || is_cat) { // cut idx equals sample idx out_column[idx] = to_sketch_entry(idx, in_column, column_id); return; } // 1 thread for each output. See A.4 for detail. auto d_out = out_column; if (idx == 0) { d_out.front() = front; return; } if (idx == to - 1) { d_out.back() = back; return; } float w = back.rmin - front.rmax; assert(w != 0); auto budget = static_cast<float>(d_out.size()); assert(budget != 0); auto q = ((static_cast<float>(idx) * w) / (static_cast<float>(to) - 1.0f) + front.rmax); auto it = dh::MakeTransformIterator<SketchEntry>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { auto e = to_sketch_entry(idx, in_column, column_id); return e; }); d_out[idx] = BinarySearchQuery(it, it + in_column.size(), q); }); } template <typename T> void CopyTo(Span<T> out, Span<T const> src) { CHECK_EQ(out.size(), src.size()); dh::safe_cuda(cudaMemcpyAsync(out.data(), src.data(), out.size_bytes(), cudaMemcpyDefault)); } // Compute the merge path. common::Span<thrust::tuple<uint64_t, uint64_t>> MergePath( Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { auto x_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(x_ptr, idx); }), d_x.data())); auto y_merge_key_it = thrust::make_zip_iterator(thrust::make_tuple( dh::MakeTransformIterator<bst_row_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(y_ptr, idx); }), d_y.data())); using Tuple = thrust::tuple<uint64_t, uint64_t>; thrust::constant_iterator<uint64_t> a_ind_iter(0ul); thrust::constant_iterator<uint64_t> b_ind_iter(1ul); auto place_holder = thrust::make_constant_iterator<uint64_t>(0u); auto x_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(a_ind_iter, place_holder)); auto y_merge_val_it = thrust::make_zip_iterator(thrust::make_tuple(b_ind_iter, place_holder)); dh::XGBCachingDeviceAllocator<Tuple> alloc; static_assert(sizeof(Tuple) == sizeof(SketchEntry), ""); // We reuse the memory for storing merge path. common::Span<Tuple> merge_path{reinterpret_cast<Tuple *>(out.data()), out.size()}; // Determine the merge path, 0 if element is from x, 1 if it's from y. thrust::merge_by_key( thrust::cuda::par(alloc), x_merge_key_it, x_merge_key_it + d_x.size(), y_merge_key_it, y_merge_key_it + d_y.size(), x_merge_val_it, y_merge_val_it, thrust::make_discard_iterator(), merge_path.data(), [=] __device__(auto const &l, auto const &r) -> bool { auto l_column_id = thrust::get<0>(l); auto r_column_id = thrust::get<0>(r); if (l_column_id == r_column_id) { return thrust::get<1>(l).value < thrust::get<1>(r).value; } return l_column_id < r_column_id; }); // Compute output ptr auto transform_it = thrust::make_zip_iterator(thrust::make_tuple(x_ptr.data(), y_ptr.data())); thrust::transform( thrust::cuda::par(alloc), transform_it, transform_it + x_ptr.size(), out_ptr.data(), [] __device__(auto const& t) { return thrust::get<0>(t) + thrust::get<1>(t); }); // 0^th is the indicator, 1^th is placeholder auto get_ind = []XGBOOST_DEVICE(Tuple const& t) { return thrust::get<0>(t); }; // 0^th is the counter for x, 1^th for y. auto get_x = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<0>(t); }; auto get_y = []XGBOOST_DEVICE(Tuple const &t) { return thrust::get<1>(t); }; auto scan_key_it = dh::MakeTransformIterator<size_t>( thrust::make_counting_iterator(0ul), [=] __device__(size_t idx) { return dh::SegmentId(out_ptr, idx); }); auto scan_val_it = dh::MakeTransformIterator<Tuple>( merge_path.data(), [=] __device__(Tuple const &t) -> Tuple { auto ind = get_ind(t); // == 0 if element is from x // x_counter, y_counter return thrust::make_tuple<uint64_t, uint64_t>(!ind, ind); }); // Compute the index for both x and y (which of the element in a and b are used in each // comparison) by scaning the binary merge path. Take output [(x_0, y_0), (x_0, y_1), // ...] as an example, the comparison between (x_0, y_0) adds 1 step in the merge path. // Asumming y_0 is less than x_0 so this step is torward the end of y. After the // comparison, index of y is incremented by 1 from y_0 to y_1, and at the same time, y_0 // is landed into output as the first element in merge result. The scan result is the // subscript of x and y. thrust::exclusive_scan_by_key( thrust::cuda::par(alloc), scan_key_it, scan_key_it + merge_path.size(), scan_val_it, merge_path.data(), thrust::make_tuple<uint64_t, uint64_t>(0ul, 0ul), thrust::equal_to<size_t>{}, [=] __device__(Tuple const &l, Tuple const &r) -> Tuple { return thrust::make_tuple(get_x(l) + get_x(r), get_y(l) + get_y(r)); }); return merge_path; } // Merge d_x and d_y into out. Because the final output depends on predicate (which // summary does the output element come from) result by definition of merged rank. So we // run it in 2 passes to obtain the merge path and then customize the standard merge // algorithm. void MergeImpl(int32_t device, Span<SketchEntry const> const &d_x, Span<bst_row_t const> const &x_ptr, Span<SketchEntry const> const &d_y, Span<bst_row_t const> const &y_ptr, Span<SketchEntry> out, Span<bst_row_t> out_ptr) { dh::safe_cuda(cudaSetDevice(device)); CHECK_EQ(d_x.size() + d_y.size(), out.size()); CHECK_EQ(x_ptr.size(), out_ptr.size()); CHECK_EQ(y_ptr.size(), out_ptr.size()); auto d_merge_path = MergePath(d_x, x_ptr, d_y, y_ptr, out, out_ptr); auto d_out = out; dh::LaunchN(device, d_out.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(out_ptr, idx); idx -= out_ptr[column_id]; auto d_x_column = d_x.subspan(x_ptr[column_id], x_ptr[column_id + 1] - x_ptr[column_id]); auto d_y_column = d_y.subspan(y_ptr[column_id], y_ptr[column_id + 1] - y_ptr[column_id]); auto d_out_column = d_out.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); auto d_path_column = d_merge_path.subspan( out_ptr[column_id], out_ptr[column_id + 1] - out_ptr[column_id]); uint64_t a_ind, b_ind; thrust::tie(a_ind, b_ind) = d_path_column[idx]; // Handle empty column. If both columns are empty, we should not get this column_id // as result of binary search. assert((d_x_column.size() != 0) || (d_y_column.size() != 0)); if (d_x_column.size() == 0) { d_out_column[idx] = d_y_column[b_ind]; return; } if (d_y_column.size() == 0) { d_out_column[idx] = d_x_column[a_ind]; return; } // Handle trailing elements. assert(a_ind <= d_x_column.size()); if (a_ind == d_x_column.size()) { // Trailing elements are from y because there's no more x to land. auto y_elem = d_y_column[b_ind]; d_out_column[idx] = SketchEntry(y_elem.rmin + d_x_column.back().RMinNext(), y_elem.rmax + d_x_column.back().rmax, y_elem.wmin, y_elem.value); return; } auto x_elem = d_x_column[a_ind]; assert(b_ind <= d_y_column.size()); if (b_ind == d_y_column.size()) { d_out_column[idx] = SketchEntry(x_elem.rmin + d_y_column.back().RMinNext(), x_elem.rmax + d_y_column.back().rmax, x_elem.wmin, x_elem.value); return; } auto y_elem = d_y_column[b_ind]; /* Merge procedure. See A.3 merge operation eq (26) ~ (28). The trick to interpret it is rewriting the symbols on both side of equality. Take eq (26) as an example: Expand it according to definition of extended rank then rewrite it into: If $k_i$ is the $i$ element in output and \textbf{comes from $D_1$}: r_\bar{D}(k_i) = r_{\bar{D_1}}(k_i) + w_{\bar{{D_1}}}(k_i) + [r_{\bar{D_2}}(x_i) + w_{\bar{D_2}}(x_i)] Where $x_i$ is the largest element in $D_2$ that's less than $k_i$. $k_i$ can be used in $D_1$ as it's since $k_i \in D_1$. Other 2 equations can be applied similarly with $k_i$ comes from different $D$. just use different symbol on different source of summary. */ assert(idx < d_out_column.size()); if (x_elem.value == y_elem.value) { d_out_column[idx] = SketchEntry{x_elem.rmin + y_elem.rmin, x_elem.rmax + y_elem.rmax, x_elem.wmin + y_elem.wmin, x_elem.value}; } else if (x_elem.value < y_elem.value) { // elem from x is landed. yprev_min is the element in D_2 that's 1 rank less than // x_elem if we put x_elem in D_2. float yprev_min = b_ind == 0 ? 0.0f : d_y_column[b_ind - 1].RMinNext(); // rmin should be equal to x_elem.rmin + x_elem.wmin + yprev_min. But for // implementation, the weight is stored in a separated field and we compute the // extended definition on the fly when needed. d_out_column[idx] = SketchEntry{x_elem.rmin + yprev_min, x_elem.rmax + y_elem.RMaxPrev(), x_elem.wmin, x_elem.value}; } else { // elem from y is landed. float xprev_min = a_ind == 0 ? 0.0f : d_x_column[a_ind - 1].RMinNext(); d_out_column[idx] = SketchEntry{xprev_min + y_elem.rmin, x_elem.RMaxPrev() + y_elem.rmax, y_elem.wmin, y_elem.value}; } }); } void SketchContainer::Push(Span<Entry const> entries, Span<size_t> columns_ptr, common::Span<OffsetT const> cuts_ptr, size_t total_cuts, Span<float> weights) { Span<SketchEntry> out; dh::caching_device_vector<SketchEntry> cuts; bool first_window = this->Current().empty(); if (!first_window) { cuts.resize(total_cuts); out = dh::ToSpan(cuts); } else { this->Current().resize(total_cuts); out = dh::ToSpan(this->Current()); } auto ft = this->feature_types_.ConstDeviceSpan(); if (weights.empty()) { auto to_sketch_entry = [] __device__(size_t sample_idx, Span<Entry const> const &column, size_t) { float rmin = sample_idx; float rmax = sample_idx + 1; return SketchEntry{rmin, rmax, 1, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } else { auto to_sketch_entry = [weights, columns_ptr] __device__( size_t sample_idx, Span<Entry const> const &column, size_t column_id) { Span<float const> column_weights_scan = weights.subspan(columns_ptr[column_id], column.size()); float rmin = sample_idx > 0 ? column_weights_scan[sample_idx - 1] : 0.0f; float rmax = column_weights_scan[sample_idx]; float wmin = rmax - rmin; wmin = wmin < 0 ? kRtEps : wmin; // GPU scan can generate floating error. return SketchEntry{rmin, rmax, wmin, column[sample_idx].fvalue}; }; // NOLINT PruneImpl<Entry>(device_, cuts_ptr, entries, columns_ptr, ft, out, to_sketch_entry); } if (!first_window) { CHECK_EQ(this->columns_ptr_.Size(), cuts_ptr.size()); this->Merge(cuts_ptr, out); this->FixError(); } else { this->columns_ptr_.SetDevice(device_); this->columns_ptr_.Resize(cuts_ptr.size()); auto d_cuts_ptr = this->columns_ptr_.DeviceSpan(); CopyTo(d_cuts_ptr, cuts_ptr); } } size_t SketchContainer::Unique() { timer_.Start(__func__); dh::safe_cuda(cudaSetDevice(device_)); this->columns_ptr_.SetDevice(device_); Span<OffsetT> d_column_scan = this->columns_ptr_.DeviceSpan(); CHECK_EQ(d_column_scan.size(), num_columns_ + 1); Span<SketchEntry> entries = dh::ToSpan(this->Current()); HostDeviceVector<OffsetT> scan_out(d_column_scan.size()); scan_out.SetDevice(device_); auto d_scan_out = scan_out.DeviceSpan(); d_column_scan = this->columns_ptr_.DeviceSpan(); size_t n_uniques = dh::SegmentedUnique( d_column_scan.data(), d_column_scan.data() + d_column_scan.size(), entries.data(), entries.data() + entries.size(), scan_out.DevicePointer(), entries.data(), detail::SketchUnique{}); this->columns_ptr_.Copy(scan_out); CHECK(!this->columns_ptr_.HostCanRead()); this->Current().resize(n_uniques); timer_.Stop(__func__); return n_uniques; } void SketchContainer::Prune(size_t to) { timer_.Start(__func__); dh::safe_cuda(cudaSetDevice(device_)); this->Unique(); OffsetT to_total = 0; auto& h_columns_ptr = columns_ptr_b_.HostVector(); h_columns_ptr[0] = to_total; auto const& h_feature_types = feature_types_.ConstHostSpan(); for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t length = this->Column(i).size(); length = std::min(length, to); if (IsCat(h_feature_types, i)) { length = this->Column(i).size(); } to_total += length; h_columns_ptr[i+1] = to_total; } this->Other().resize(to_total); auto d_columns_ptr_in = this->columns_ptr_.ConstDeviceSpan(); auto d_columns_ptr_out = columns_ptr_b_.ConstDeviceSpan(); auto out = dh::ToSpan(this->Other()); auto in = dh::ToSpan(this->Current()); auto no_op = [] __device__(size_t sample_idx, Span<SketchEntry const> const &entries, size_t) { return entries[sample_idx]; }; // NOLINT auto ft = this->feature_types_.ConstDeviceSpan(); PruneImpl<SketchEntry>(device_, d_columns_ptr_out, in, d_columns_ptr_in, ft, out, no_op); this->columns_ptr_.Copy(columns_ptr_b_); this->Alternate(); timer_.Stop(__func__); } void SketchContainer::Merge(Span<OffsetT const> d_that_columns_ptr, Span<SketchEntry const> that) { dh::safe_cuda(cudaSetDevice(device_)); timer_.Start(__func__); if (this->Current().size() == 0) { CHECK_EQ(this->columns_ptr_.HostVector().back(), 0); CHECK_EQ(this->columns_ptr_.HostVector().size(), d_that_columns_ptr.size()); CHECK_EQ(columns_ptr_.Size(), num_columns_ + 1); thrust::copy(thrust::device, d_that_columns_ptr.data(), d_that_columns_ptr.data() + d_that_columns_ptr.size(), this->columns_ptr_.DevicePointer()); auto total = this->columns_ptr_.HostVector().back(); this->Current().resize(total); CopyTo(dh::ToSpan(this->Current()), that); timer_.Stop(__func__); return; } this->Other().resize(this->Current().size() + that.size()); CHECK_EQ(d_that_columns_ptr.size(), this->columns_ptr_.Size()); MergeImpl(device_, this->Data(), this->ColumnsPtr(), that, d_that_columns_ptr, dh::ToSpan(this->Other()), columns_ptr_b_.DeviceSpan()); this->columns_ptr_.Copy(columns_ptr_b_); CHECK_EQ(this->columns_ptr_.Size(), num_columns_ + 1); this->Alternate(); timer_.Stop(__func__); } void SketchContainer::FixError() { dh::safe_cuda(cudaSetDevice(device_)); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); auto in = dh::ToSpan(this->Current()); dh::LaunchN(device_, in.size(), [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_columns_ptr, idx); auto in_column = in.subspan(d_columns_ptr[column_id], d_columns_ptr[column_id + 1] - d_columns_ptr[column_id]); idx -= d_columns_ptr[column_id]; float prev_rmin = idx == 0 ? 0.0f : in_column[idx-1].rmin; if (in_column[idx].rmin < prev_rmin) { in_column[idx].rmin = prev_rmin; } float prev_rmax = idx == 0 ? 0.0f : in_column[idx-1].rmax; if (in_column[idx].rmax < prev_rmax) { in_column[idx].rmax = prev_rmax; } float rmin_next = in_column[idx].RMinNext(); if (in_column[idx].rmax < rmin_next) { in_column[idx].rmax = rmin_next; } }); } void SketchContainer::AllReduce() { dh::safe_cuda(cudaSetDevice(device_)); auto world = rabit::GetWorldSize(); if (world == 1) { return; } timer_.Start(__func__); if (!reducer_) { reducer_ = std::make_unique<dh::AllReducer>(); reducer_->Init(device_); } // Reduce the overhead on syncing. size_t global_sum_rows = num_rows_; rabit::Allreduce<rabit::op::Sum>(&global_sum_rows, 1); size_t intermediate_num_cuts = std::min(global_sum_rows, static_cast<size_t>(num_bins_ * kFactor)); this->Prune(intermediate_num_cuts); auto d_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); CHECK_EQ(d_columns_ptr.size(), num_columns_ + 1); size_t n = d_columns_ptr.size(); rabit::Allreduce<rabit::op::Max>(&n, 1); CHECK_EQ(n, d_columns_ptr.size()) << "Number of columns differs across workers"; // Get the columns ptr from all workers dh::device_vector<SketchContainer::OffsetT> gathered_ptrs; gathered_ptrs.resize(d_columns_ptr.size() * world, 0); size_t rank = rabit::GetRank(); auto offset = rank * d_columns_ptr.size(); thrust::copy(thrust::device, d_columns_ptr.data(), d_columns_ptr.data() + d_columns_ptr.size(), gathered_ptrs.begin() + offset); reducer_->AllReduceSum(gathered_ptrs.data().get(), gathered_ptrs.data().get(), gathered_ptrs.size()); // Get the data from all workers. std::vector<size_t> recv_lengths; dh::caching_device_vector<char> recvbuf; reducer_->AllGather(this->Current().data().get(), dh::ToSpan(this->Current()).size_bytes(), &recv_lengths, &recvbuf); reducer_->Synchronize(); // Segment the received data. auto s_recvbuf = dh::ToSpan(recvbuf); std::vector<Span<SketchEntry>> allworkers; offset = 0; for (int32_t i = 0; i < world; ++i) { size_t length_as_bytes = recv_lengths.at(i); auto raw = s_recvbuf.subspan(offset, length_as_bytes); auto sketch = Span<SketchEntry>(reinterpret_cast<SketchEntry *>(raw.data()), length_as_bytes / sizeof(SketchEntry)); allworkers.emplace_back(sketch); offset += length_as_bytes; } // Merge them into a new sketch. SketchContainer new_sketch(this->feature_types_, num_bins_, this->num_columns_, global_sum_rows, this->device_); for (size_t i = 0; i < allworkers.size(); ++i) { auto worker = allworkers[i]; auto worker_ptr = dh::ToSpan(gathered_ptrs) .subspan(i * d_columns_ptr.size(), d_columns_ptr.size()); new_sketch.Merge(worker_ptr, worker); new_sketch.FixError(); } *this = std::move(new_sketch); timer_.Stop(__func__); } void SketchContainer::MakeCuts(HistogramCuts* p_cuts) { timer_.Start(__func__); dh::safe_cuda(cudaSetDevice(device_)); p_cuts->min_vals_.Resize(num_columns_); // Sync between workers. this->AllReduce(); // Prune to final number of bins. this->Prune(num_bins_ + 1); this->Unique(); this->FixError(); // Set up inputs auto d_in_columns_ptr = this->columns_ptr_.ConstDeviceSpan(); p_cuts->min_vals_.SetDevice(device_); auto d_min_values = p_cuts->min_vals_.DeviceSpan(); auto in_cut_values = dh::ToSpan(this->Current()); // Set up output ptr p_cuts->cut_ptrs_.SetDevice(device_); auto& h_out_columns_ptr = p_cuts->cut_ptrs_.HostVector(); h_out_columns_ptr.clear(); h_out_columns_ptr.push_back(0); auto const& h_feature_types = this->feature_types_.ConstHostSpan(); for (bst_feature_t i = 0; i < num_columns_; ++i) { size_t column_size = std::max(static_cast<size_t>(1ul), this->Column(i).size()); if (IsCat(h_feature_types, i)) { h_out_columns_ptr.push_back(static_cast<size_t>(column_size)); } else { h_out_columns_ptr.push_back(std::min(static_cast<size_t>(column_size), static_cast<size_t>(num_bins_))); } } std::partial_sum(h_out_columns_ptr.begin(), h_out_columns_ptr.end(), h_out_columns_ptr.begin()); auto d_out_columns_ptr = p_cuts->cut_ptrs_.ConstDeviceSpan(); // Set up output cuts size_t total_bins = h_out_columns_ptr.back(); p_cuts->cut_values_.SetDevice(device_); p_cuts->cut_values_.Resize(total_bins); auto out_cut_values = p_cuts->cut_values_.DeviceSpan(); auto d_ft = feature_types_.ConstDeviceSpan(); dh::LaunchN(0, total_bins, [=] __device__(size_t idx) { auto column_id = dh::SegmentId(d_out_columns_ptr, idx); auto in_column = in_cut_values.subspan(d_in_columns_ptr[column_id], d_in_columns_ptr[column_id + 1] - d_in_columns_ptr[column_id]); auto out_column = out_cut_values.subspan(d_out_columns_ptr[column_id], d_out_columns_ptr[column_id + 1] - d_out_columns_ptr[column_id]); idx -= d_out_columns_ptr[column_id]; if (in_column.size() == 0) { // If the column is empty, we push a dummy value. It won't affect training as the // column is empty, trees cannot split on it. This is just to be consistent with // rest of the library. if (idx == 0) { d_min_values[column_id] = kRtEps; out_column[0] = kRtEps; assert(out_column.size() == 1); } return; } if (idx == 0 && !IsCat(d_ft, column_id)) { auto mval = in_column[idx].value; d_min_values[column_id] = mval - (fabs(mval) + 1e-5); } if (IsCat(d_ft, column_id)) { assert(out_column.size() == in_column.size()); out_column[idx] = in_column[idx].value; return; } // Last thread is responsible for setting a value that's greater than other cuts. if (idx == out_column.size() - 1) { const bst_float cpt = in_column.back().value; // this must be bigger than last value in a scale const bst_float last = cpt + (fabs(cpt) + 1e-5); out_column[idx] = last; return; } assert(idx+1 < in_column.size()); out_column[idx] = in_column[idx+1].value; }); timer_.Stop(__func__); } } // namespace common } // namespace xgboost
045710001bce4cd92ac626fad55e19d2b5fc896c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2019 by Contributors * \file array/cuda/array_scatter.cu * \brief Array scatter GPU implementation */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <typename DType, typename IdType> __global__ void _ScatterKernel(const IdType* index, const DType* value, int64_t length, DType* out) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[index[tx]] = value[tx]; tx += stride_x; } } template <DLDeviceType XPU, typename DType, typename IdType> void Scatter_(IdArray index, NDArray value, NDArray out) { const int64_t len = index->shape[0]; const IdType* idx = index.Ptr<IdType>(); const DType* val = value.Ptr<DType>(); DType* outd = out.Ptr<DType>(); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; hipLaunchKernelGGL(( _ScatterKernel), dim3(nb), dim3(nt), 0, thr_entry->stream, idx, val, len, outd); } template void Scatter_<kDLGPU, int32_t, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, int64_t, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, float, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, double, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, int32_t, int64_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, int64_t, int64_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, float, int64_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, double, int64_t>(IdArray, NDArray, NDArray); }; // namespace impl }; // namespace aten }; // namespace dgl
045710001bce4cd92ac626fad55e19d2b5fc896c.cu
/*! * Copyright (c) 2019 by Contributors * \file array/cuda/array_scatter.cu * \brief Array scatter GPU implementation */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" #include "./utils.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { template <typename DType, typename IdType> __global__ void _ScatterKernel(const IdType* index, const DType* value, int64_t length, DType* out) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[index[tx]] = value[tx]; tx += stride_x; } } template <DLDeviceType XPU, typename DType, typename IdType> void Scatter_(IdArray index, NDArray value, NDArray out) { const int64_t len = index->shape[0]; const IdType* idx = index.Ptr<IdType>(); const DType* val = value.Ptr<DType>(); DType* outd = out.Ptr<DType>(); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); const int nt = cuda::FindNumThreads(len); const int nb = (len + nt - 1) / nt; _ScatterKernel<<<nb, nt, 0, thr_entry->stream>>>(idx, val, len, outd); } template void Scatter_<kDLGPU, int32_t, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, int64_t, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, float, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, double, int32_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, int32_t, int64_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, int64_t, int64_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, float, int64_t>(IdArray, NDArray, NDArray); template void Scatter_<kDLGPU, double, int64_t>(IdArray, NDArray, NDArray); }; // namespace impl }; // namespace aten }; // namespace dgl
768b6cddd2cdd5399861f4332fdbdd6ae696917b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs Code Name: Panda File: PandaUtils.cu First Version: 2012-07-01 V0.1 Current Version: 2012-09-01 V0.3 Last Updates: 2012-09-02 Developer: Hui Li ([email protected]) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #include "Panda.h" #include "UserAPI.h" #ifdef _WIN32 #include <windows.h> #include <time.h> #elif MACOS #include <sys/param.h> #include <sys/sysctl.h> #elif __linux #include <unistd.h> #include <sys/time.h> #endif #ifndef __PANDAUTILS_CU__ #define __PANDAUTILS_CU__ int getGPUCoresNum() { //assert(tid<total); int arch_cores_sm[3] = {1, 8, 32 }; hipDeviceProp_t gpu_dev; int tid = 0; hipGetDeviceProperties(&gpu_dev, tid); int sm_per_multiproc = 1; if (gpu_dev.major == 9999 && gpu_dev.minor == 9999) sm_per_multiproc = 1; else if (gpu_dev.major <=2) sm_per_multiproc = arch_cores_sm[gpu_dev.major]; else sm_per_multiproc = arch_cores_sm[2]; return ((gpu_dev.multiProcessorCount)*(sm_per_multiproc)); //ShowLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc); } int getCPUCoresNum() { #ifdef WIN32 SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; #elif MACOS int nm[2]; size_t len = 4; uint32_t count; nm[0] = CTL_HW; nm[1] = HW_AVAILCPU; sysctl(nm, 2, &count, &len, NULL, 0); if(count < 1) { nm[1] = HW_NCPU; sysctl(nm, 2, &count, &len, NULL, 0); if(count < 1) { count = 1; } } return count; #elif __linux return sysconf(_SC_NPROCESSORS_ONLN); #endif } void DoDiskLog(const char *str){ FILE *fptr; char file_name[128]; sprintf(file_name,"%s","panda.log"); fptr = fopen(file_name,"a"); fprintf(fptr,"[PandaDiskLog]\t\t:%s\n",str); //fprintf(fptr,"%s",__VA_ARGS__); fclose(fptr); //printf("\n"); }//void double PandaTimer(){ #ifndef _WIN32 static struct timeval tv; gettimeofday(&tv,NULL); double curTime = tv.tv_sec + tv.tv_usec/1000000.0; //ShowLog("\t Panda CurTime:%f", curTime); return curTime; #else //newtime = localtime( &long_time2 ); double curTime = GetTickCount(); //ShowLog("\t Panda CurTime:%f", curTime); curTime /=1000.0; return curTime; #endif } void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err) { fprintf(stderr, "[PandaError][%s][%i]: CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit((int)err); } } __global__ void printData(gpu_context d_g_state ){ //printf("-----------printData TID:%d\n",TID); int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x); int block_start_idx = num_records_per_thread*blockIdx.x*blockDim.x; int thread_start_row_idx = block_start_idx + (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE + (threadIdx.x%STRIDE); int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE; if(thread_end_idx>d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; int begin, end, val_pos, key_pos; char *val_p,*key_p; for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){ begin=0; end=0; for (int i=0;i<map_task_idx;i++){ // begin += (d_g_state.d_intermediate_keyval_arr_arr[i].arr_len); }//for //end = begin + (d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr_len); //printf("copyData:%d begin:%d, end:%d\n",TID,begin,end); for(int i=begin;i<end;i++){ //keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]); val_pos = d_g_state.d_intermediate_keyval_pos_arr[i].valPos; key_pos = d_g_state.d_intermediate_keyval_pos_arr[i].keyPos; val_p = (char*)(d_g_state.d_intermediate_vals_shared_buff)+val_pos; key_p = (char*)(d_g_state.d_intermediate_keys_shared_buff)+key_pos; //keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr[i-begin]); //memcpy(key_p,p2->key,p2->keySize); //memcpy(val_p,p2->val,p2->valSize); printf("printData: TID:%d key: %s val:%d\n",TID,key_p,*(int *)val_p); }//for //if (index*recordsPerTask >= recordNum) return; }//for }//printData #ifdef DEV_MODE __global__ void printData2(gpu_context d_g_state ){ //printf("-----------printData TID:%d\n",TID); //if(TID>=d_g_state.num_input_record)return; //printf("printData2------------------------------%d\n",d_g_state.d_intermediate_keyval_arr_arr[TID].arr_len); if (TID==0){ int keyPos = (d_g_state.d_input_keyval_pos_arr[0]).keyPos; int valPos = (d_g_state.d_input_keyval_pos_arr[0]).valPos; char *keyBuf = (char *)(d_g_state.d_input_keys_shared_buff)+keyPos; MM_KEY_T *key = (MM_KEY_T*)keyBuf; printf("Key2 =====================:%d\n",key->test); for (int i=0;i<10;i++) printf("%f ",key->matrix1[i]); printf("\n"); for (int i=0;i<10;i++) printf("%f ",key->matrix2[i]); printf("\n"); for (int i=0;i<10;i++) printf("%f ",key->matrix3[i]); printf("\n"); } //keyval_t * p1 = &(d_g_state.d_input_keyval_arr[TID]); //int len = p1->valSize -1; //((char *)(p1->val))[len] = '\0'; //printf("printData TID:%d keySize:%d key %d val:%s\n",TID,p1->keySize, *(int*)(p1->key), p1->val); }//printData #endif __global__ void printData3(gpu_context d_g_state){ ShowWarn("))))))))))))))))))))))))))))))"); __syncthreads(); ShowWarn("sorted_key_arr_len:%d d_sorted_keyvals_arr_len:%d", d_g_state.d_reduced_keyval_arr_len,d_g_state.d_sorted_keyvals_arr_len); __syncthreads(); ShowWarn(" +++++++++>d_pos_arr_4_sorted_keyval_pos_arr[0]:%d", d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[0]); __syncthreads(); }//printData //-------------------------------------------------------- //start_row_id a timer // //param : start_row_id_tv //-------------------------------------------------------- /* void start_row_idTimer(TimeVal_t *start_row_id_tv) { //gettimeofday((struct timeval*)start_row_id_tv, NULL); } */ //-------------------------------------------------------- //end a timer, and print out a message // //param : msg message to print out //param : start_row_id_tv //-------------------------------------------------------- /* void endTimer(char *msg, TimeVal_t *start_row_id_tv) { hipDeviceSynchronize(); struct timeval end_tv; gettimeofday(&end_tv, NULL); time_t sec = end_tv.tv_sec - start_row_id_tv->tv_sec; time_t ms = end_tv.tv_usec - start_row_id_tv->tv_usec; time_t diff = sec * 1000000 + ms; //printf("%10s:\t\t%fms\n", msg, (double)((double)diff/1000.0)); }//void */ //---------------------------------------------------------- //print output records // //param: spec //param: num -- maximum number of output records to print //param: printFunc -- a function pointer // void printFunc(void* key, void* val, int keySize, int valSize) //---------------------------------------------------------- void PrintOutputRecords(Spec_t* spec, int num, PrintFunc_t printFunc) { /* int maxNum = num; if (maxNum > spec->outputRecordCount || maxNum < 0) maxNum = spec->outputRecordCount; for (int i = 0; i < maxNum; ++i) { int4 index = spec->outputOffsetSizes[i]; printFunc((char*)spec->outputKeys + index.x, (char*)spec->outputVals + index.z, index.y, index.w); } */ }//void #endif //__PANDAUTILS_CU__
768b6cddd2cdd5399861f4332fdbdd6ae696917b.cu
/* Copyright 2012 The Trustees of Indiana University. All rights reserved. CGL MapReduce Framework on GPUs and CPUs Code Name: Panda File: PandaUtils.cu First Version: 2012-07-01 V0.1 Current Version: 2012-09-01 V0.3 Last Updates: 2012-09-02 Developer: Hui Li ([email protected]) This is the source code for Panda, a MapReduce runtime on GPUs and CPUs. */ #include "Panda.h" #include "UserAPI.h" #ifdef _WIN32 #include <windows.h> #include <time.h> #elif MACOS #include <sys/param.h> #include <sys/sysctl.h> #elif __linux #include <unistd.h> #include <sys/time.h> #endif #ifndef __PANDAUTILS_CU__ #define __PANDAUTILS_CU__ int getGPUCoresNum() { //assert(tid<total); int arch_cores_sm[3] = {1, 8, 32 }; cudaDeviceProp gpu_dev; int tid = 0; cudaGetDeviceProperties(&gpu_dev, tid); int sm_per_multiproc = 1; if (gpu_dev.major == 9999 && gpu_dev.minor == 9999) sm_per_multiproc = 1; else if (gpu_dev.major <=2) sm_per_multiproc = arch_cores_sm[gpu_dev.major]; else sm_per_multiproc = arch_cores_sm[2]; return ((gpu_dev.multiProcessorCount)*(sm_per_multiproc)); //ShowLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc); } int getCPUCoresNum() { #ifdef WIN32 SYSTEM_INFO sysinfo; GetSystemInfo(&sysinfo); return sysinfo.dwNumberOfProcessors; #elif MACOS int nm[2]; size_t len = 4; uint32_t count; nm[0] = CTL_HW; nm[1] = HW_AVAILCPU; sysctl(nm, 2, &count, &len, NULL, 0); if(count < 1) { nm[1] = HW_NCPU; sysctl(nm, 2, &count, &len, NULL, 0); if(count < 1) { count = 1; } } return count; #elif __linux return sysconf(_SC_NPROCESSORS_ONLN); #endif } void DoDiskLog(const char *str){ FILE *fptr; char file_name[128]; sprintf(file_name,"%s","panda.log"); fptr = fopen(file_name,"a"); fprintf(fptr,"[PandaDiskLog]\t\t:%s\n",str); //fprintf(fptr,"%s",__VA_ARGS__); fclose(fptr); //printf("\n"); }//void double PandaTimer(){ #ifndef _WIN32 static struct timeval tv; gettimeofday(&tv,NULL); double curTime = tv.tv_sec + tv.tv_usec/1000000.0; //ShowLog("\t Panda CurTime:%f", curTime); return curTime; #else //newtime = localtime( &long_time2 ); double curTime = GetTickCount(); //ShowLog("\t Panda CurTime:%f", curTime); curTime /=1000.0; return curTime; #endif } void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "[PandaError][%s][%i]: CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit((int)err); } } __global__ void printData(gpu_context d_g_state ){ //printf("-----------printData TID:%d\n",TID); int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x); int block_start_idx = num_records_per_thread*blockIdx.x*blockDim.x; int thread_start_row_idx = block_start_idx + (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE + (threadIdx.x%STRIDE); int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE; if(thread_end_idx>d_g_state.num_input_record) thread_end_idx = d_g_state.num_input_record; int begin, end, val_pos, key_pos; char *val_p,*key_p; for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){ begin=0; end=0; for (int i=0;i<map_task_idx;i++){ // begin += (d_g_state.d_intermediate_keyval_arr_arr[i].arr_len); }//for //end = begin + (d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr_len); //printf("copyData:%d begin:%d, end:%d\n",TID,begin,end); for(int i=begin;i<end;i++){ //keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]); val_pos = d_g_state.d_intermediate_keyval_pos_arr[i].valPos; key_pos = d_g_state.d_intermediate_keyval_pos_arr[i].keyPos; val_p = (char*)(d_g_state.d_intermediate_vals_shared_buff)+val_pos; key_p = (char*)(d_g_state.d_intermediate_keys_shared_buff)+key_pos; //keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr[i-begin]); //memcpy(key_p,p2->key,p2->keySize); //memcpy(val_p,p2->val,p2->valSize); printf("printData: TID:%d key: %s val:%d\n",TID,key_p,*(int *)val_p); }//for //if (index*recordsPerTask >= recordNum) return; }//for }//printData #ifdef DEV_MODE __global__ void printData2(gpu_context d_g_state ){ //printf("-----------printData TID:%d\n",TID); //if(TID>=d_g_state.num_input_record)return; //printf("printData2------------------------------%d\n",d_g_state.d_intermediate_keyval_arr_arr[TID].arr_len); if (TID==0){ int keyPos = (d_g_state.d_input_keyval_pos_arr[0]).keyPos; int valPos = (d_g_state.d_input_keyval_pos_arr[0]).valPos; char *keyBuf = (char *)(d_g_state.d_input_keys_shared_buff)+keyPos; MM_KEY_T *key = (MM_KEY_T*)keyBuf; printf("Key2 =====================:%d\n",key->test); for (int i=0;i<10;i++) printf("%f ",key->matrix1[i]); printf("\n"); for (int i=0;i<10;i++) printf("%f ",key->matrix2[i]); printf("\n"); for (int i=0;i<10;i++) printf("%f ",key->matrix3[i]); printf("\n"); } //keyval_t * p1 = &(d_g_state.d_input_keyval_arr[TID]); //int len = p1->valSize -1; //((char *)(p1->val))[len] = '\0'; //printf("printData TID:%d keySize:%d key %d val:%s\n",TID,p1->keySize, *(int*)(p1->key), p1->val); }//printData #endif __global__ void printData3(gpu_context d_g_state){ ShowWarn("))))))))))))))))))))))))))))))"); __syncthreads(); ShowWarn("sorted_key_arr_len:%d d_sorted_keyvals_arr_len:%d", d_g_state.d_reduced_keyval_arr_len,d_g_state.d_sorted_keyvals_arr_len); __syncthreads(); ShowWarn(" +++++++++>d_pos_arr_4_sorted_keyval_pos_arr[0]:%d", d_g_state.d_pos_arr_4_sorted_keyval_pos_arr[0]); __syncthreads(); }//printData //-------------------------------------------------------- //start_row_id a timer // //param : start_row_id_tv //-------------------------------------------------------- /* void start_row_idTimer(TimeVal_t *start_row_id_tv) { //gettimeofday((struct timeval*)start_row_id_tv, NULL); } */ //-------------------------------------------------------- //end a timer, and print out a message // //param : msg message to print out //param : start_row_id_tv //-------------------------------------------------------- /* void endTimer(char *msg, TimeVal_t *start_row_id_tv) { cudaThreadSynchronize(); struct timeval end_tv; gettimeofday(&end_tv, NULL); time_t sec = end_tv.tv_sec - start_row_id_tv->tv_sec; time_t ms = end_tv.tv_usec - start_row_id_tv->tv_usec; time_t diff = sec * 1000000 + ms; //printf("%10s:\t\t%fms\n", msg, (double)((double)diff/1000.0)); }//void */ //---------------------------------------------------------- //print output records // //param: spec //param: num -- maximum number of output records to print //param: printFunc -- a function pointer // void printFunc(void* key, void* val, int keySize, int valSize) //---------------------------------------------------------- void PrintOutputRecords(Spec_t* spec, int num, PrintFunc_t printFunc) { /* int maxNum = num; if (maxNum > spec->outputRecordCount || maxNum < 0) maxNum = spec->outputRecordCount; for (int i = 0; i < maxNum; ++i) { int4 index = spec->outputOffsetSizes[i]; printFunc((char*)spec->outputKeys + index.x, (char*)spec->outputVals + index.z, index.y, index.w); } */ }//void #endif //__PANDAUTILS_CU__
bd9744642270358b27d0269812f7a3e6ff3aa4fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <chrono> #include "nbody.h" #include "../configuration.h" #include "../rendering.h" namespace nbody { // Host side pointer. AllocatorHandle<AllocatorT>* allocator_handle; __device__ AllocatorT* device_allocator; // Helper variable for checksum computation. __device__ double device_checksum; // Helper variables for drawing. __device__ int draw_counter = 0; __device__ float Body_pos_x[kNumBodies]; __device__ float Body_pos_y[kNumBodies]; __device__ float Body_mass[kNumBodies]; __DEV__ Body::Body(float pos_x, float pos_y, float vel_x, float vel_y, float mass) : pos_x_(pos_x), pos_y_(pos_y), vel_x_(vel_x), vel_y_(vel_y), mass_(mass) {} __DEV__ void Body::compute_force() { force_x_ = 0.0f; force_y_ = 0.0f; device_allocator->template device_do<Body>(&Body::apply_force, this); } __DEV__ void Body::apply_force(Body* other) { // Update `other`. if (other != this) { float dx = pos_x_ - other->pos_x_; float dy = pos_y_ - other->pos_y_; float dist = sqrt(dx*dx + dy*dy); float F = kGravityConstant * mass_ * other->mass_ / (dist * dist + kDampeningFactor); other->force_x_ += F*dx / dist; other->force_y_ += F*dy / dist; } } __DEV__ void Body::update() { vel_x_ += force_x_*kDt / mass_; vel_y_ += force_y_*kDt / mass_; pos_x_ += vel_x_*kDt; pos_y_ += vel_y_*kDt; if (pos_x_ < -1 || pos_x_ > 1) { vel_x_ = -vel_x_; } if (pos_y_ < -1 || pos_y_ > 1) { vel_y_ = -vel_y_; } } __DEV__ void Body::add_checksum() { device_checksum += pos_x_ + pos_y_*2 + vel_x_*3 + vel_y_*4; } __DEV__ void Body::add_to_draw_array() { int idx = atomicAdd(&draw_counter, 1); Body_pos_x[idx] = pos_x_; Body_pos_y[idx] = pos_y_; Body_mass[idx] = mass_; } __global__ void kernel_compute_checksum() { device_checksum = 0.0f; device_allocator->device_do<Body>(&Body::add_checksum); } __global__ void kernel_initialize_bodies() { int tid = threadIdx.x + blockDim.x * blockIdx.x; hiprandState_t rand_state; hiprand_init(kSeed, tid, 0, &rand_state); for (int i = tid; i < kNumBodies; i += blockDim.x * gridDim.x) { new(device_allocator) Body( /*pos_x=*/ 2 * hiprand_uniform(&rand_state) - 1, /*pos_y=*/ 2 * hiprand_uniform(&rand_state) - 1, /*vel_x=*/ (hiprand_uniform(&rand_state) - 0.5) / 1000, /*vel_y=*/ (hiprand_uniform(&rand_state) - 0.5) / 1000, /*mass=*/ (hiprand_uniform(&rand_state)/2 + 0.5) * kMaxMass); } } __global__ void kernel_reset_draw_counters() { draw_counter = 0; } int main(int /*argc*/, char** /*argv*/) { if (kOptionRender) { init_renderer(); } // Host-side variables for rendering. float host_Body_pos_x[kNumBodies]; float host_Body_pos_y[kNumBodies]; float host_Body_mass[kNumBodies]; // Create new allocator. allocator_handle = new AllocatorHandle<AllocatorT>(); AllocatorT* dev_ptr = allocator_handle->device_pointer(); hipMemcpyToSymbol(device_allocator, &dev_ptr, sizeof(AllocatorT*), 0, hipMemcpyHostToDevice); auto time_start = std::chrono::system_clock::now(); hipLaunchKernelGGL(( kernel_initialize_bodies), dim3(128), dim3(128), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); for (int i = 0; i < kNumIterations; ++i) { allocator_handle->parallel_do<Body, &Body::compute_force>(); allocator_handle->parallel_do<Body, &Body::update>(); if (kOptionRender) { hipLaunchKernelGGL(( kernel_reset_draw_counters), dim3(1), dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); allocator_handle->parallel_do<Body, &Body::add_to_draw_array>(); gpuErrchk(hipDeviceSynchronize()); hipMemcpyFromSymbol(host_Body_pos_x, Body_pos_x, sizeof(float)*kNumBodies, 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(host_Body_pos_y, Body_pos_y, sizeof(float)*kNumBodies, 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol(host_Body_mass, Body_mass, sizeof(float)*kNumBodies, 0, hipMemcpyDeviceToHost); draw(host_Body_pos_x, host_Body_pos_y, host_Body_mass); } } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed) .count(); printf("%lu,%lu\n", millis, allocator_handle->DBG_get_enumeration_time()); #ifndef NDEBUG hipLaunchKernelGGL(( kernel_compute_checksum), dim3(1), dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); double checksum; hipMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0, hipMemcpyDeviceToHost); printf("Checksum: %f\n", checksum); #endif // NDEBUG if (kOptionRender) { close_renderer(); } return 0; } } int main(int argc, char** argv) { return nbody::main(argc, argv); }
bd9744642270358b27d0269812f7a3e6ff3aa4fc.cu
#include <chrono> #include "nbody.h" #include "../configuration.h" #include "../rendering.h" namespace nbody { // Host side pointer. AllocatorHandle<AllocatorT>* allocator_handle; __device__ AllocatorT* device_allocator; // Helper variable for checksum computation. __device__ double device_checksum; // Helper variables for drawing. __device__ int draw_counter = 0; __device__ float Body_pos_x[kNumBodies]; __device__ float Body_pos_y[kNumBodies]; __device__ float Body_mass[kNumBodies]; __DEV__ Body::Body(float pos_x, float pos_y, float vel_x, float vel_y, float mass) : pos_x_(pos_x), pos_y_(pos_y), vel_x_(vel_x), vel_y_(vel_y), mass_(mass) {} __DEV__ void Body::compute_force() { force_x_ = 0.0f; force_y_ = 0.0f; device_allocator->template device_do<Body>(&Body::apply_force, this); } __DEV__ void Body::apply_force(Body* other) { // Update `other`. if (other != this) { float dx = pos_x_ - other->pos_x_; float dy = pos_y_ - other->pos_y_; float dist = sqrt(dx*dx + dy*dy); float F = kGravityConstant * mass_ * other->mass_ / (dist * dist + kDampeningFactor); other->force_x_ += F*dx / dist; other->force_y_ += F*dy / dist; } } __DEV__ void Body::update() { vel_x_ += force_x_*kDt / mass_; vel_y_ += force_y_*kDt / mass_; pos_x_ += vel_x_*kDt; pos_y_ += vel_y_*kDt; if (pos_x_ < -1 || pos_x_ > 1) { vel_x_ = -vel_x_; } if (pos_y_ < -1 || pos_y_ > 1) { vel_y_ = -vel_y_; } } __DEV__ void Body::add_checksum() { device_checksum += pos_x_ + pos_y_*2 + vel_x_*3 + vel_y_*4; } __DEV__ void Body::add_to_draw_array() { int idx = atomicAdd(&draw_counter, 1); Body_pos_x[idx] = pos_x_; Body_pos_y[idx] = pos_y_; Body_mass[idx] = mass_; } __global__ void kernel_compute_checksum() { device_checksum = 0.0f; device_allocator->device_do<Body>(&Body::add_checksum); } __global__ void kernel_initialize_bodies() { int tid = threadIdx.x + blockDim.x * blockIdx.x; curandState rand_state; curand_init(kSeed, tid, 0, &rand_state); for (int i = tid; i < kNumBodies; i += blockDim.x * gridDim.x) { new(device_allocator) Body( /*pos_x=*/ 2 * curand_uniform(&rand_state) - 1, /*pos_y=*/ 2 * curand_uniform(&rand_state) - 1, /*vel_x=*/ (curand_uniform(&rand_state) - 0.5) / 1000, /*vel_y=*/ (curand_uniform(&rand_state) - 0.5) / 1000, /*mass=*/ (curand_uniform(&rand_state)/2 + 0.5) * kMaxMass); } } __global__ void kernel_reset_draw_counters() { draw_counter = 0; } int main(int /*argc*/, char** /*argv*/) { if (kOptionRender) { init_renderer(); } // Host-side variables for rendering. float host_Body_pos_x[kNumBodies]; float host_Body_pos_y[kNumBodies]; float host_Body_mass[kNumBodies]; // Create new allocator. allocator_handle = new AllocatorHandle<AllocatorT>(); AllocatorT* dev_ptr = allocator_handle->device_pointer(); cudaMemcpyToSymbol(device_allocator, &dev_ptr, sizeof(AllocatorT*), 0, cudaMemcpyHostToDevice); auto time_start = std::chrono::system_clock::now(); kernel_initialize_bodies<<<128, 128>>>(); gpuErrchk(cudaDeviceSynchronize()); for (int i = 0; i < kNumIterations; ++i) { allocator_handle->parallel_do<Body, &Body::compute_force>(); allocator_handle->parallel_do<Body, &Body::update>(); if (kOptionRender) { kernel_reset_draw_counters<<<1, 1>>>(); gpuErrchk(cudaDeviceSynchronize()); allocator_handle->parallel_do<Body, &Body::add_to_draw_array>(); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpyFromSymbol(host_Body_pos_x, Body_pos_x, sizeof(float)*kNumBodies, 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(host_Body_pos_y, Body_pos_y, sizeof(float)*kNumBodies, 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(host_Body_mass, Body_mass, sizeof(float)*kNumBodies, 0, cudaMemcpyDeviceToHost); draw(host_Body_pos_x, host_Body_pos_y, host_Body_mass); } } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto millis = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed) .count(); printf("%lu,%lu\n", millis, allocator_handle->DBG_get_enumeration_time()); #ifndef NDEBUG kernel_compute_checksum<<<1, 1>>>(); gpuErrchk(cudaDeviceSynchronize()); double checksum; cudaMemcpyFromSymbol(&checksum, device_checksum, sizeof(device_checksum), 0, cudaMemcpyDeviceToHost); printf("Checksum: %f\n", checksum); #endif // NDEBUG if (kOptionRender) { close_renderer(); } return 0; } } int main(int argc, char** argv) { return nbody::main(argc, argv); }
ad30839fa0be1201dcf52355df261257ecce0ef2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "vadd.hh" #include <iostream> #include "../app/mode.hh" #include "../app/timer.hh" namespace gpu { namespace { constexpr std::size_t BLOCK_SIZE = 512; __global__ void sum0(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i]; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE && i + s < len; s *= 2) { if (i % (2 * s) == 0) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum1(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i]; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE; s *= 2) { std::size_t index = 2 * s * i; if (index + s < len) partial[index] += partial[index + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum2(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i]; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (i + s < len) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum3(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i] + x[i + BLOCK_SIZE]; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (i + s < len) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum4(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; //partial[i] = x[i] + x[i + BLOCK_SIZE]; partial[i] = i < len ? x[i] : 0; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1) { if (i < s) partial[i] += partial[i + s]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { vpartial[i] += vpartial[i + 32]; vpartial[i] += vpartial[i + 16]; vpartial[i] += vpartial[i + 8]; vpartial[i] += vpartial[i + 4]; vpartial[i] += vpartial[i + 2]; vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } template <std::size_t BlockSize> __global__ void sum5(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; //partial[i] = x[i] + x[i + BLOCK_SIZE]; partial[i] = i < len ? x[i] : 0; __syncthreads(); if (BlockSize >= 512) { if (i < 256) partial[i] += partial[i + 256]; __syncthreads(); } if (BlockSize >= 256) { if (i < 128) partial[i] += partial[i + 128]; __syncthreads(); } if (BlockSize >= 128) { if (i < 64) partial[i] += partial[i + 64]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { if (BlockSize >= 64) vpartial[i] += vpartial[i + 32]; if (BlockSize >= 32) vpartial[i] += vpartial[i + 16]; if (BlockSize >= 16) vpartial[i] += vpartial[i + 8]; if (BlockSize >= 8) vpartial[i] += vpartial[i + 4]; if (BlockSize >= 4) vpartial[i] += vpartial[i + 2]; if (BlockSize >= 2) vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } __global__ void full_sum0(const float* x, float* y, std::size_t len) //8ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE && i + s < len; s *= 2) { if (i % (2 * s) == 0) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void full_sum1(const float* x, float* y, std::size_t len) //11ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE; s *= 2) { std::size_t index = 2 * s * i; if (index + s < len) partial[index] += partial[index + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void full_sum2(const float* x, float* y, std::size_t len) //11ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (i + s < len) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void full_sum4(const float* x, float* y, std::size_t len) //8ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1) { if (i < s) partial[i] += partial[i + s]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { vpartial[i] += vpartial[i + 32]; vpartial[i] += vpartial[i + 16]; vpartial[i] += vpartial[i + 8]; vpartial[i] += vpartial[i + 4]; vpartial[i] += vpartial[i + 2]; vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } template <std::size_t BlockSize> __global__ void full_sum5(const float* x, float* y, std::size_t len) // { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); if (BlockSize >= 512) { if (i < 256) partial[i] += partial[i + 256]; __syncthreads(); } if (BlockSize >= 256) { if (i < 128) partial[i] += partial[i + 128]; __syncthreads(); } if (BlockSize >= 128) { if (i < 64) partial[i] += partial[i + 64]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { if (BlockSize >= 64) vpartial[i] += vpartial[i + 32]; if (BlockSize >= 32) vpartial[i] += vpartial[i + 16]; if (BlockSize >= 16) vpartial[i] += vpartial[i + 8]; if (BlockSize >= 8) vpartial[i] += vpartial[i + 4]; if (BlockSize >= 4) vpartial[i] += vpartial[i + 2]; if (BlockSize >= 2) vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } } void op_sum(const float* a, float* out, std::size_t len) { auto start = timer::now(); hipLaunchKernelGGL(( full_sum4), dim3(1), dim3(BLOCK_SIZE), 0, 0, a, out, len); //full_sum5<BLOCK_SIZE><<<1, BLOCK_SIZE>>>(a, out, len); hipDeviceSynchronize(); auto time = timer::now() - start; logs << "[GPU_SUM]: " << time << "ms.\n"; } }
ad30839fa0be1201dcf52355df261257ecce0ef2.cu
#include "vadd.hh" #include <iostream> #include "../app/mode.hh" #include "../app/timer.hh" namespace gpu { namespace { constexpr std::size_t BLOCK_SIZE = 512; __global__ void sum0(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i]; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE && i + s < len; s *= 2) { if (i % (2 * s) == 0) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum1(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i]; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE; s *= 2) { std::size_t index = 2 * s * i; if (index + s < len) partial[index] += partial[index + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum2(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i]; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (i + s < len) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum3(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; if (i >= len) return; partial[i] = x[i] + x[i + BLOCK_SIZE]; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (i + s < len) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void sum4(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; //partial[i] = x[i] + x[i + BLOCK_SIZE]; partial[i] = i < len ? x[i] : 0; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1) { if (i < s) partial[i] += partial[i + s]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { vpartial[i] += vpartial[i + 32]; vpartial[i] += vpartial[i + 16]; vpartial[i] += vpartial[i + 8]; vpartial[i] += vpartial[i + 4]; vpartial[i] += vpartial[i + 2]; vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } template <std::size_t BlockSize> __global__ void sum5(const float* x, float* y, std::size_t len) { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; //partial[i] = x[i] + x[i + BLOCK_SIZE]; partial[i] = i < len ? x[i] : 0; __syncthreads(); if (BlockSize >= 512) { if (i < 256) partial[i] += partial[i + 256]; __syncthreads(); } if (BlockSize >= 256) { if (i < 128) partial[i] += partial[i + 128]; __syncthreads(); } if (BlockSize >= 128) { if (i < 64) partial[i] += partial[i + 64]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { if (BlockSize >= 64) vpartial[i] += vpartial[i + 32]; if (BlockSize >= 32) vpartial[i] += vpartial[i + 16]; if (BlockSize >= 16) vpartial[i] += vpartial[i + 8]; if (BlockSize >= 8) vpartial[i] += vpartial[i + 4]; if (BlockSize >= 4) vpartial[i] += vpartial[i + 2]; if (BlockSize >= 2) vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } __global__ void full_sum0(const float* x, float* y, std::size_t len) //8ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE && i + s < len; s *= 2) { if (i % (2 * s) == 0) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void full_sum1(const float* x, float* y, std::size_t len) //11ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = 1; s < BLOCK_SIZE; s *= 2) { std::size_t index = 2 * s * i; if (index + s < len) partial[index] += partial[index + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void full_sum2(const float* x, float* y, std::size_t len) //11ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 0; s >>= 1) { if (i + s < len) partial[i] += partial[i + s]; __syncthreads(); } if (i == 0) y[i] = partial[0]; } __global__ void full_sum4(const float* x, float* y, std::size_t len) //8ms { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); for (std::size_t s = BLOCK_SIZE / 2; s > 32; s >>= 1) { if (i < s) partial[i] += partial[i + s]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { vpartial[i] += vpartial[i + 32]; vpartial[i] += vpartial[i + 16]; vpartial[i] += vpartial[i + 8]; vpartial[i] += vpartial[i + 4]; vpartial[i] += vpartial[i + 2]; vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } template <std::size_t BlockSize> __global__ void full_sum5(const float* x, float* y, std::size_t len) // { __shared__ float partial[2 * BLOCK_SIZE]; //load all elements of the array in shared memory auto i = threadIdx.x; std::size_t step = BLOCK_SIZE; float init = 0; for (std::size_t j = i; j < len; j += step) init += x[j]; partial[i] = init; __syncthreads(); if (BlockSize >= 512) { if (i < 256) partial[i] += partial[i + 256]; __syncthreads(); } if (BlockSize >= 256) { if (i < 128) partial[i] += partial[i + 128]; __syncthreads(); } if (BlockSize >= 128) { if (i < 64) partial[i] += partial[i + 64]; __syncthreads(); } //if not volatile, must use __synctthreads again, why ? volatile float* vpartial = partial; if (i < 32) { if (BlockSize >= 64) vpartial[i] += vpartial[i + 32]; if (BlockSize >= 32) vpartial[i] += vpartial[i + 16]; if (BlockSize >= 16) vpartial[i] += vpartial[i + 8]; if (BlockSize >= 8) vpartial[i] += vpartial[i + 4]; if (BlockSize >= 4) vpartial[i] += vpartial[i + 2]; if (BlockSize >= 2) vpartial[i] += vpartial[i + 1]; } if (i == 0) y[i] = partial[0]; } } void op_sum(const float* a, float* out, std::size_t len) { auto start = timer::now(); full_sum4<<<1, BLOCK_SIZE>>>(a, out, len); //full_sum5<BLOCK_SIZE><<<1, BLOCK_SIZE>>>(a, out, len); cudaDeviceSynchronize(); auto time = timer::now() - start; logs << "[GPU_SUM]: " << time << "ms.\n"; } }
2fa656b723c949177ef41367d18ed1f8c1c593b5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // X = H , y = W #include<iostream> #include<stdio.h> #include<cuda.h> #include<ctime> #include<cstdlib> #include<hip/hip_runtime_api.h> using namespace std; #define TILE_WIDTH 32 // tile witdt // serially intializing tensor of the image void tensor_init(int * image, int N, int H, int W, int C){ /* Initialise the tensor for the convolution operation. Runs on the CPU N : Batch Size of the image H : Height of the image W : Width of the image C : channels for the kernels */ srand(time(0)); int tot = N*H*W*C; for(int i = 0; i< tot;i++){ image[i] = rand()%256; //random initializing of the image tensor// for simulating it as an image } } //serially intialising the kernel with given dimensions void kernel_init(int *krnl, int d, int h, int w,int c){ /* Initialise the kernel(s) for the convolution operation. Runs on the CPU d : Number of kernel h : Height of the kernel w : Width of the kernel c : channels for the kernels */ int tot = d*h*w*c; for(int i = 0; i< tot;i++){ if(i%2 ==0){ krnl[i] = rand()%10; } else{ krnl[i] = -rand()%10; //random initializing of the image tensor // cout<<krnl[i]<<endl; } } } // intialising the mask for checking sparsity of the block void mask_init(int *mask,int N,int H,int W,int sparsity_perc){ /* Initialise the tensor for the convolution operation. Runs on the CPU N : Batch Size of the image H : Height of the image W : Width of the image */ int tot = N*H*W; for(int i = 0; i< tot;i++){ if(rand()%100<=sparsity_perc){ mask[i] = 0; } else{ mask[i] = 1; } //random initializing of the image tensor // cout<<mask[i]<<endl; } } // ************************ device kernels **************** to be optimizzed *************************************** __device__ bool checksparse(int *d_mask,int cx,int cy,int H, int W, int C,int h,int w,int S,int n){// may be i can have some more conditions /* device function to check for sparsity (device int *) d_mask : pointer to the mask of the image (int) n: number of the image (int) h: height of the kernels (int) w: Weight of the kernels (int) c_x: x coordinate of the center (int) c_y: y coordinate of the center */ int x = 0; int y = 0; for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){ for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){ x = cx + l; y = cy + p; if( d_mask[n*H*W + W*y + x ] == 1 ){ return false; } } } return true; } __global__ void gather(int *d_mask, int *d_tensor, int *d_mat,unsigned int *row_address, int * d_row_map, int N , int H , int W , int h, int w, int C , int S ){ /* Gather kernel from the paper to check for sparse and non sparse parts of image for convolution (device int *) d_mask : pointer to the mask of the image (device int *) d_tensor : pointer to the tensor containing the all the images (device int *) d_mat : pointer with memmory alloc to store every non sparse part of thhe images (device int *) row_address : pointer to single integer containing the number of non sparse part of the image (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image (int) h: height of the kernels (int) w: Weight of the kernels */ int id2 = blockIdx.x*blockDim.x + threadIdx.x; int in = blockIdx.y; int x_dim = id2%W;// along the height of the image int y_dim = id2/W;// along the length oh the image if(x_dim > 0 && x_dim/S + h < H/S){// condition considering s = 1 for now if(y_dim > 0 && y_dim/S +w < W/S){ int cen_x = x_dim + (h-1)/2; int cen_y = y_dim + (w-1)/2; // printf("%d,%d,%d\n",checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in),cen_x,cen_y); if(!checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in)){ unsigned int val = atomicInc(row_address,1000000); int col_index = 0; for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){ for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){ for( int q=0; q < C; q++){ d_mat[val*h*w*C+col_index] = d_mask[in*(H/S)*(W/S)+((int)((cen_x+l)/S))*(W/S)+((int)((cen_y+p)/S))]?d_tensor[in*H*W*C+(cen_x+l)*W*C+(cen_y+p)*C+q]:0; col_index += 1; } } } d_row_map[val*3+0] = x_dim; /* Store the original x-coordinate corresponding to a row into a map */ d_row_map[val*3+1] = y_dim; /* Store the original y-coordinate corresponding to a row into a map */ d_row_map[val*3+2] = in; /* Store the image corresponding to a row in a map */ // printf("%d\n",val); } } } } __global__ void convolution(int *d_mat,int *d_kernel,unsigned int number_rows ,int d,int *output_mat,int h,int w,int C){ /* The most basic implementation of the cuda kernel; (int *)d_mat : pointer to the conovoluted results for all the non scarse part of the original image (int *)d_kernel : kernel for the coonvoltion(d kernels) (int *)output_mat : pointer for finally storing the output of the matrix (unsigned int): int containing the number of non sparse convolution block (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image (int) h: height of the kernels (int) w: Weight of the kernels (int) d : number of kernels */ int t_idy = blockDim.x*blockIdx.x + threadIdx.x;// for the number of the element being changed int t_idx = blockDim.y*blockIdx.y + threadIdx.y;// for the number of kernels output_mat[t_idx*d + t_idy] = 0; int offset = h*w*C; if(t_idx < number_rows && t_idy < d){ // now the convolution part for(int i = 0; i < h*w*C; i++ ){ output_mat[t_idx*d + t_idy] += d_kernel[t_idy*h*w + i]*d_mat[offset*t_idx + i]; // printf("%d,%d,\n",d_kernel[t_idy*d +i],d_mat[offset*t_idx + i]); } // printf("%d,%d,%d\n",t_idx,t_idy,output_mat[t_idx*d + t_idy]); } } //////// another convolution function ////////// ------------------ using shared memory __global__ void convolution_opti(int *d_mat,int *d_kernel,unsigned int *number_rows,int d,int *output_mat,int h,int w,int C){ /* Kernel to perform matrix multiplication. We utilize shared memory and tiling to perform the matrix multiplication. */ int tx = threadIdx.x; /*Thread-ID in the x-direction */ int ty = threadIdx.y; /*Thread-ID in the y-direction */ __shared__ int image_mat_s[TILE_WIDTH][TILE_WIDTH]; /* Shared memory to be used by threads in a block */ __shared__ int kernel_mat_s[TILE_WIDTH][TILE_WIDTH]; /// Since we have multiple kernels to taken into account at the ame time int row = blockIdx.y*blockDim.y + ty; /* row in the output matrix */ int col = blockIdx.x*blockDim.x + tx; /* column in the output matrix */ int pSum = 0; for(int m=0;m<(w*h*C+TILE_WIDTH-1)/TILE_WIDTH;m++){ if(row<*number_rows && (m*TILE_WIDTH+tx)<(w*h*C) ) image_mat_s[ty][tx] = d_mat[row*w*h*C+m*TILE_WIDTH+tx]; if( (ty+m*TILE_WIDTH)<(w*h*C) && col<d ) kernel_mat_s[ty][tx] = d_kernel[(ty+m*TILE_WIDTH)*d+col]; /* This is assuming that the tile is a sqaure */ __syncthreads(); for(int i=0;i<TILE_WIDTH;i++){ pSum += image_mat_s[ty][i]*kernel_mat_s[i][tx]; } __syncthreads(); image_mat_s[ty][tx] = 0; /* Setting the elements in the shared memory back to 0. This takes care of the corner cases where junk values are stored */ kernel_mat_s[ty][tx] = 0; } if(row<*number_rows && col<d) output_mat[row*d+col] = pSum; /* Load the result into the output matrix */ } __global__ void scatter(int *output_mat, int *d_row_map, unsigned int *number_rows, int *output,int H,int W,int d,int h,int w){ /* Putting the peices back together in the final image(restoring the final output part of the kernel (int *)output_mat : pointer to the conovoluted results for all the non scarse part of the original image (int *)d_row_map : pointer to the center positions non sparse part of the image (int *)output : pointer to the final image after convolutions (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image (int) h: height of the kernels (int) w: Weight of the kernels (int) d : number of kernels */ int image_size = (H - h + 1)*(W-w+1); // image size after the convolution happens int t_idy = blockIdx.x*blockDim.x + threadIdx.x;// The number of convs in the output matrux int t_idx = blockDim.y*blockIdx.y + threadIdx.y;// The number of output kernels // printf("%d,%d,%d \n",t_idx,t_idy, 0); if(t_idx<*number_rows && t_idy <d){ int c_x = d_row_map[t_idx*3] - (h-1)/2; // convert the center to convoluted positions int c_y = d_row_map[t_idx*3 + 1] - (w-1)/2; int N = d_row_map[t_idx*3 + 2]; output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x] = output_mat[t_idx*d + t_idy ]; //printf("%d,%d,%d\n",output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x],output_mat[t_idx*d + t_idy ],N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x); } } int main(){ // taking input of the image(tensor) dimnsions int BLOCK_SIZE = 32; int N,H,W,C; /* (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image */ cout<<"Gimme Image Block Dimensions"<<endl; N = 4; H = 256; W = 256; C = 3; int *tensor = (int *)malloc(N*H*W*C*sizeof(int)); tensor_init(tensor,N,H,W,C); int h,w,d; /* (int) h: height of the kernels (int) w: Weight of the kernels (int) d : number of kernels */ int c = C; cout<<"Gimme krnl Block Dimension"<<endl; d = 1; h = 4; w = 4; int *kernel = (int *)malloc(sizeof(int)*h*w*c*d); kernel_init(kernel,d,h,w,C); // space for d kernels int per_sp; cout<<"Gimme Percent Sparcity of the block"<<endl; per_sp =70; int S = 1;// assuming the mask dimension to be 1 for now int *mask = (int * )malloc(sizeof(int)*N*H*W*C); mask_init(mask,N,H,W,per_sp); int num_images = 2; int n_streams = N/2; // memory allocation for tensor kernel and the mask on the device int *d_tensor; int *d_kernel; int *d_mask; hipMalloc(&d_tensor,sizeof(int)*N*H*W*C);// 4-D tensor containing images for the convolution operation hipMalloc(&d_kernel,sizeof(int)*d*h*w*c);// for the kernels to stored in the matrix hipMalloc(&d_mask,sizeof(int)*N*H*W); //mask for checking the sparsity of blocks for the kernel // memory copying to the device hipMemcpy( d_kernel, kernel, sizeof(int)*d*h*w*c, hipMemcpyHostToDevice ); hipMemcpy( d_mask, mask, sizeof(int)*N*H*W, hipMemcpyHostToDevice ); hipMemcpy( d_tensor, tensor, sizeof(int)*N*H*W*C, hipMemcpyHostToDevice ); // gatther kernel to fill on the device int * d_mat;// int * d_row_map; unsigned int *row_address; hipMalloc(&d_mat,sizeof(int)*h*w*C*(H-h+1)*(W-w+1)*N); // considering that all the parts will be in the imsge hipMalloc(&row_address,n_streams*sizeof( unsigned int)); hipMemset(&row_address, 0, n_streams*sizeof(unsigned int) ); hipMalloc(&d_row_map,sizeof(int)*(H-h+1)*(W-w+1)*N*3); // create streams: // it can roughly handle about 1000 images at once hipStream_t streams[1000]; /* Declaring a set of CUDA streams */ for( int i=0; i<n_streams; i++ ) hipStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */ // creating memory for the intermediate kernels int * output_mat; /// for the putput of the gather kernel hipMalloc(&output_mat,sizeof(int)*(H-h+1)*(W-w+1)*d*N); // final output matrix we all know its dimensionb already int * output; hipMalloc(&output,sizeof(int)*N*(H-h+1)*(W-w+1)*d); // profiling features ----------------------- hipEvent_t start,stop; /* CUDA events to time the program */ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); // blocks and threads for diffrent kernel plaunches ----------- // for the gather kernle dim3 Block(H*W/BLOCK_SIZE,num_images,1); dim3 Thread(BLOCK_SIZE,1,1); //convolution kernel dim3 Block_c(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1); dim3 Thread_c(BLOCK_SIZE,1,1); // offset for diffrent arrays ------------- int offset; // tensor fooffset int mask_offset; // mask offset int mat_offset; // d_mat offsaet int map_offset; // d_row_map offset int o_offset; //output_mat offset int om_offset; // output offset unsigned int *number_rows = (unsigned int *)malloc(sizeof(int)*n_streams ); // Allocating memory for the output tensor int * h_output = (int *)malloc(sizeof(int)*N*(H-h+1)*(W-w+1)*d); //scatter kernel --- dim3 Block_s(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1); dim3 Thread_s(BLOCK_SIZE,1,1); //lanching the diffrent streams for(int j=0; j<n_streams; j++){ /* Initialize a set of off-sets for each stream */ offset = j*H*W*C*num_images; // tensor offset will be needed mask_offset = j*(H)*(W)*num_images; /// mask offset will be needed mat_offset = h*w*C*(H-h+1)*(W-w+1)*j*num_images;//matrix offset for the value to be in the matrix map_offset = 3*(H-h+1)*(W-w+1)*j*num_images;//offset for d_row_map o_offset = (H-h+1)*(W-w+1)*d*j*num_images;//offset for convolution output om_offset = d*(H-h+1)*(W-w+1)*j*num_images;//final output offset // now the kernels.............. // gether kernel hipLaunchKernelGGL(( gather), dim3(Block), dim3(Thread), 0, streams[j], &d_mask[mask_offset], &d_tensor[offset], &d_mat[mat_offset],&row_address[j], &d_row_map[map_offset],N , H , W , h, w, C , S); // hipMemcpyAsync(&number_rows[j], &row_address[j], sizeof(unsigned int), hipMemcpyDeviceToHost,streams[j]); //convolution kernel hipLaunchKernelGGL(( convolution_opti), dim3(Block_c), dim3(Thread_c),0, streams[j], &d_mat[mat_offset], d_kernel, &row_address[j], d, &output_mat[om_offset],h,w,C); // cout<<"kernel went through"<<endl; // convert the kernel back to its original form hipLaunchKernelGGL(( scatter), dim3(Block_s),dim3(Thread_s), 0 , streams[j], &output_mat[om_offset], &d_row_map[map_offset], &row_address[j], &output[o_offset],H, W, d, h, w); } hipMemcpy(h_output,output,sizeof(int)*(H-h+1)*(W-w+1)*d*N,hipMemcpyDeviceToHost); hipEventRecord(stop); hipEventSynchronize(stop); float run_time = 0.0; hipEventElapsedTime(&run_time,start,stop); cout<<run_time<<endl; // for(int k = 0;k<N;k++){ // for(int p = 0; p<d;p++){ // cout<<"image"<<" "<<k<<" "<<"kernel"<<" "<<p<<endl; // for(int i = 0; i<(H-h+1);i++){ // for(int j = 0; j<(W-w+1);j++){ // cout<<h_output[k*(H-h+1)*(W-w+1)*d + p*(H-h+1)*(W-w+1) + i*(W-w+1)+ j ]<<" "; // } // cout<<endl; // } // cout<<endl; // } // cout<<endl; // } // Destroying all the streams in rthe for( int i=0; i<n_streams; i++ ) hipStreamDestroy(streams[i]); return 0; }
2fa656b723c949177ef41367d18ed1f8c1c593b5.cu
// X = H , y = W #include<iostream> #include<stdio.h> #include<cuda.h> #include<ctime> #include<cstdlib> #include<cuda_profiler_api.h> using namespace std; #define TILE_WIDTH 32 // tile witdt // serially intializing tensor of the image void tensor_init(int * image, int N, int H, int W, int C){ /* Initialise the tensor for the convolution operation. Runs on the CPU N : Batch Size of the image H : Height of the image W : Width of the image C : channels for the kernels */ srand(time(0)); int tot = N*H*W*C; for(int i = 0; i< tot;i++){ image[i] = rand()%256; //random initializing of the image tensor// for simulating it as an image } } //serially intialising the kernel with given dimensions void kernel_init(int *krnl, int d, int h, int w,int c){ /* Initialise the kernel(s) for the convolution operation. Runs on the CPU d : Number of kernel h : Height of the kernel w : Width of the kernel c : channels for the kernels */ int tot = d*h*w*c; for(int i = 0; i< tot;i++){ if(i%2 ==0){ krnl[i] = rand()%10; } else{ krnl[i] = -rand()%10; //random initializing of the image tensor // cout<<krnl[i]<<endl; } } } // intialising the mask for checking sparsity of the block void mask_init(int *mask,int N,int H,int W,int sparsity_perc){ /* Initialise the tensor for the convolution operation. Runs on the CPU N : Batch Size of the image H : Height of the image W : Width of the image */ int tot = N*H*W; for(int i = 0; i< tot;i++){ if(rand()%100<=sparsity_perc){ mask[i] = 0; } else{ mask[i] = 1; } //random initializing of the image tensor // cout<<mask[i]<<endl; } } // ************************ device kernels **************** to be optimizzed *************************************** __device__ bool checksparse(int *d_mask,int cx,int cy,int H, int W, int C,int h,int w,int S,int n){// may be i can have some more conditions /* device function to check for sparsity (device int *) d_mask : pointer to the mask of the image (int) n: number of the image (int) h: height of the kernels (int) w: Weight of the kernels (int) c_x: x coordinate of the center (int) c_y: y coordinate of the center */ int x = 0; int y = 0; for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){ for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){ x = cx + l; y = cy + p; if( d_mask[n*H*W + W*y + x ] == 1 ){ return false; } } } return true; } __global__ void gather(int *d_mask, int *d_tensor, int *d_mat,unsigned int *row_address, int * d_row_map, int N , int H , int W , int h, int w, int C , int S ){ /* Gather kernel from the paper to check for sparse and non sparse parts of image for convolution (device int *) d_mask : pointer to the mask of the image (device int *) d_tensor : pointer to the tensor containing the all the images (device int *) d_mat : pointer with memmory alloc to store every non sparse part of thhe images (device int *) row_address : pointer to single integer containing the number of non sparse part of the image (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image (int) h: height of the kernels (int) w: Weight of the kernels */ int id2 = blockIdx.x*blockDim.x + threadIdx.x; int in = blockIdx.y; int x_dim = id2%W;// along the height of the image int y_dim = id2/W;// along the length oh the image if(x_dim > 0 && x_dim/S + h < H/S){// condition considering s = 1 for now if(y_dim > 0 && y_dim/S +w < W/S){ int cen_x = x_dim + (h-1)/2; int cen_y = y_dim + (w-1)/2; // printf("%d,%d,%d\n",checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in),cen_x,cen_y); if(!checksparse(d_mask,x_dim,y_dim,H,W,C,h,w,S,in)){ unsigned int val = atomicInc(row_address,1000000); int col_index = 0; for( int l=-(h-1)/2; l <= (h-1)/2; l++ ){ for( int p=-(w-1)/2; p <= (w-1)/2; p++ ){ for( int q=0; q < C; q++){ d_mat[val*h*w*C+col_index] = d_mask[in*(H/S)*(W/S)+((int)((cen_x+l)/S))*(W/S)+((int)((cen_y+p)/S))]?d_tensor[in*H*W*C+(cen_x+l)*W*C+(cen_y+p)*C+q]:0; col_index += 1; } } } d_row_map[val*3+0] = x_dim; /* Store the original x-coordinate corresponding to a row into a map */ d_row_map[val*3+1] = y_dim; /* Store the original y-coordinate corresponding to a row into a map */ d_row_map[val*3+2] = in; /* Store the image corresponding to a row in a map */ // printf("%d\n",val); } } } } __global__ void convolution(int *d_mat,int *d_kernel,unsigned int number_rows ,int d,int *output_mat,int h,int w,int C){ /* The most basic implementation of the cuda kernel; (int *)d_mat : pointer to the conovoluted results for all the non scarse part of the original image (int *)d_kernel : kernel for the coonvoltion(d kernels) (int *)output_mat : pointer for finally storing the output of the matrix (unsigned int): int containing the number of non sparse convolution block (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image (int) h: height of the kernels (int) w: Weight of the kernels (int) d : number of kernels */ int t_idy = blockDim.x*blockIdx.x + threadIdx.x;// for the number of the element being changed int t_idx = blockDim.y*blockIdx.y + threadIdx.y;// for the number of kernels output_mat[t_idx*d + t_idy] = 0; int offset = h*w*C; if(t_idx < number_rows && t_idy < d){ // now the convolution part for(int i = 0; i < h*w*C; i++ ){ output_mat[t_idx*d + t_idy] += d_kernel[t_idy*h*w + i]*d_mat[offset*t_idx + i]; // printf("%d,%d,\n",d_kernel[t_idy*d +i],d_mat[offset*t_idx + i]); } // printf("%d,%d,%d\n",t_idx,t_idy,output_mat[t_idx*d + t_idy]); } } //////// another convolution function ////////// ------------------ using shared memory __global__ void convolution_opti(int *d_mat,int *d_kernel,unsigned int *number_rows,int d,int *output_mat,int h,int w,int C){ /* Kernel to perform matrix multiplication. We utilize shared memory and tiling to perform the matrix multiplication. */ int tx = threadIdx.x; /*Thread-ID in the x-direction */ int ty = threadIdx.y; /*Thread-ID in the y-direction */ __shared__ int image_mat_s[TILE_WIDTH][TILE_WIDTH]; /* Shared memory to be used by threads in a block */ __shared__ int kernel_mat_s[TILE_WIDTH][TILE_WIDTH]; /// Since we have multiple kernels to taken into account at the ame time int row = blockIdx.y*blockDim.y + ty; /* row in the output matrix */ int col = blockIdx.x*blockDim.x + tx; /* column in the output matrix */ int pSum = 0; for(int m=0;m<(w*h*C+TILE_WIDTH-1)/TILE_WIDTH;m++){ if(row<*number_rows && (m*TILE_WIDTH+tx)<(w*h*C) ) image_mat_s[ty][tx] = d_mat[row*w*h*C+m*TILE_WIDTH+tx]; if( (ty+m*TILE_WIDTH)<(w*h*C) && col<d ) kernel_mat_s[ty][tx] = d_kernel[(ty+m*TILE_WIDTH)*d+col]; /* This is assuming that the tile is a sqaure */ __syncthreads(); for(int i=0;i<TILE_WIDTH;i++){ pSum += image_mat_s[ty][i]*kernel_mat_s[i][tx]; } __syncthreads(); image_mat_s[ty][tx] = 0; /* Setting the elements in the shared memory back to 0. This takes care of the corner cases where junk values are stored */ kernel_mat_s[ty][tx] = 0; } if(row<*number_rows && col<d) output_mat[row*d+col] = pSum; /* Load the result into the output matrix */ } __global__ void scatter(int *output_mat, int *d_row_map, unsigned int *number_rows, int *output,int H,int W,int d,int h,int w){ /* Putting the peices back together in the final image(restoring the final output part of the kernel (int *)output_mat : pointer to the conovoluted results for all the non scarse part of the original image (int *)d_row_map : pointer to the center positions non sparse part of the image (int *)output : pointer to the final image after convolutions (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image (int) h: height of the kernels (int) w: Weight of the kernels (int) d : number of kernels */ int image_size = (H - h + 1)*(W-w+1); // image size after the convolution happens int t_idy = blockIdx.x*blockDim.x + threadIdx.x;// The number of convs in the output matrux int t_idx = blockDim.y*blockIdx.y + threadIdx.y;// The number of output kernels // printf("%d,%d,%d \n",t_idx,t_idy, 0); if(t_idx<*number_rows && t_idy <d){ int c_x = d_row_map[t_idx*3] - (h-1)/2; // convert the center to convoluted positions int c_y = d_row_map[t_idx*3 + 1] - (w-1)/2; int N = d_row_map[t_idx*3 + 2]; output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x] = output_mat[t_idx*d + t_idy ]; //printf("%d,%d,%d\n",output[N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x],output_mat[t_idx*d + t_idy ],N*(image_size*d) + t_idy*(image_size) + W*(c_y) + c_x); } } int main(){ // taking input of the image(tensor) dimnsions int BLOCK_SIZE = 32; int N,H,W,C; /* (int) N: number of the images in the given tensor (int) H: Height of the image (int) W: Weight of the image (int) C: Channels of the image */ cout<<"Gimme Image Block Dimensions"<<endl; N = 4; H = 256; W = 256; C = 3; int *tensor = (int *)malloc(N*H*W*C*sizeof(int)); tensor_init(tensor,N,H,W,C); int h,w,d; /* (int) h: height of the kernels (int) w: Weight of the kernels (int) d : number of kernels */ int c = C; cout<<"Gimme krnl Block Dimension"<<endl; d = 1; h = 4; w = 4; int *kernel = (int *)malloc(sizeof(int)*h*w*c*d); kernel_init(kernel,d,h,w,C); // space for d kernels int per_sp; cout<<"Gimme Percent Sparcity of the block"<<endl; per_sp =70; int S = 1;// assuming the mask dimension to be 1 for now int *mask = (int * )malloc(sizeof(int)*N*H*W*C); mask_init(mask,N,H,W,per_sp); int num_images = 2; int n_streams = N/2; // memory allocation for tensor kernel and the mask on the device int *d_tensor; int *d_kernel; int *d_mask; cudaMalloc(&d_tensor,sizeof(int)*N*H*W*C);// 4-D tensor containing images for the convolution operation cudaMalloc(&d_kernel,sizeof(int)*d*h*w*c);// for the kernels to stored in the matrix cudaMalloc(&d_mask,sizeof(int)*N*H*W); //mask for checking the sparsity of blocks for the kernel // memory copying to the device cudaMemcpy( d_kernel, kernel, sizeof(int)*d*h*w*c, cudaMemcpyHostToDevice ); cudaMemcpy( d_mask, mask, sizeof(int)*N*H*W, cudaMemcpyHostToDevice ); cudaMemcpy( d_tensor, tensor, sizeof(int)*N*H*W*C, cudaMemcpyHostToDevice ); // gatther kernel to fill on the device int * d_mat;// int * d_row_map; unsigned int *row_address; cudaMalloc(&d_mat,sizeof(int)*h*w*C*(H-h+1)*(W-w+1)*N); // considering that all the parts will be in the imsge cudaMalloc(&row_address,n_streams*sizeof( unsigned int)); cudaMemset(&row_address, 0, n_streams*sizeof(unsigned int) ); cudaMalloc(&d_row_map,sizeof(int)*(H-h+1)*(W-w+1)*N*3); // create streams: // it can roughly handle about 1000 images at once cudaStream_t streams[1000]; /* Declaring a set of CUDA streams */ for( int i=0; i<n_streams; i++ ) cudaStreamCreate(&streams[i]); /* Initializing a set of streams to work on a set of each image */ // creating memory for the intermediate kernels int * output_mat; /// for the putput of the gather kernel cudaMalloc(&output_mat,sizeof(int)*(H-h+1)*(W-w+1)*d*N); // final output matrix we all know its dimensionb already int * output; cudaMalloc(&output,sizeof(int)*N*(H-h+1)*(W-w+1)*d); // profiling features ----------------------- cudaEvent_t start,stop; /* CUDA events to time the program */ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // blocks and threads for diffrent kernel plaunches ----------- // for the gather kernle dim3 Block(H*W/BLOCK_SIZE,num_images,1); dim3 Thread(BLOCK_SIZE,1,1); //convolution kernel dim3 Block_c(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1); dim3 Thread_c(BLOCK_SIZE,1,1); // offset for diffrent arrays ------------- int offset; // tensor fooffset int mask_offset; // mask offset int mat_offset; // d_mat offsaet int map_offset; // d_row_map offset int o_offset; //output_mat offset int om_offset; // output offset unsigned int *number_rows = (unsigned int *)malloc(sizeof(int)*n_streams ); // Allocating memory for the output tensor int * h_output = (int *)malloc(sizeof(int)*N*(H-h+1)*(W-w+1)*d); //scatter kernel --- dim3 Block_s(((H-h+1)*(W-w+1)*num_images)/BLOCK_SIZE,d,1); dim3 Thread_s(BLOCK_SIZE,1,1); //lanching the diffrent streams for(int j=0; j<n_streams; j++){ /* Initialize a set of off-sets for each stream */ offset = j*H*W*C*num_images; // tensor offset will be needed mask_offset = j*(H)*(W)*num_images; /// mask offset will be needed mat_offset = h*w*C*(H-h+1)*(W-w+1)*j*num_images;//matrix offset for the value to be in the matrix map_offset = 3*(H-h+1)*(W-w+1)*j*num_images;//offset for d_row_map o_offset = (H-h+1)*(W-w+1)*d*j*num_images;//offset for convolution output om_offset = d*(H-h+1)*(W-w+1)*j*num_images;//final output offset // now the kernels.............. // gether kernel gather<<<Block, Thread, 0, streams[j]>>>(&d_mask[mask_offset], &d_tensor[offset], &d_mat[mat_offset],&row_address[j], &d_row_map[map_offset],N , H , W , h, w, C , S); // cudaMemcpyAsync(&number_rows[j], &row_address[j], sizeof(unsigned int), cudaMemcpyDeviceToHost,streams[j]); //convolution kernel convolution_opti<<<Block_c, Thread_c,0, streams[j]>>>(&d_mat[mat_offset], d_kernel, &row_address[j], d, &output_mat[om_offset],h,w,C); // cout<<"kernel went through"<<endl; // convert the kernel back to its original form scatter<<<Block_s,Thread_s, 0 , streams[j]>>>(&output_mat[om_offset], &d_row_map[map_offset], &row_address[j], &output[o_offset],H, W, d, h, w); } cudaMemcpy(h_output,output,sizeof(int)*(H-h+1)*(W-w+1)*d*N,cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float run_time = 0.0; cudaEventElapsedTime(&run_time,start,stop); cout<<run_time<<endl; // for(int k = 0;k<N;k++){ // for(int p = 0; p<d;p++){ // cout<<"image"<<" "<<k<<" "<<"kernel"<<" "<<p<<endl; // for(int i = 0; i<(H-h+1);i++){ // for(int j = 0; j<(W-w+1);j++){ // cout<<h_output[k*(H-h+1)*(W-w+1)*d + p*(H-h+1)*(W-w+1) + i*(W-w+1)+ j ]<<" "; // } // cout<<endl; // } // cout<<endl; // } // cout<<endl; // } // Destroying all the streams in rthe for( int i=0; i<n_streams; i++ ) cudaStreamDestroy(streams[i]); return 0; }
31ebf90f21f2850100817923e876a6ba9fe01ecd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "printThreadIDs.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( printThreadIDs), dim3(gridBlock),dim3(threadBlock), 0, 0, ); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( printThreadIDs), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( printThreadIDs), dim3(gridBlock),dim3(threadBlock), 0, 0, ); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
31ebf90f21f2850100817923e876a6ba9fe01ecd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "printThreadIDs.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); printThreadIDs<<<gridBlock,threadBlock>>>(); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { printThreadIDs<<<gridBlock,threadBlock>>>(); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { printThreadIDs<<<gridBlock,threadBlock>>>(); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
01110a87cb70bbe2befd0d82312496c0a09790e5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <algorithm> #include <cuml/common/cuml_allocator.hpp> #include <iostream> #include <metrics/entropy.cuh> #include <raft/cuda_utils.cuh> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { struct entropyParam { int nElements; int lowerLabelRange; int upperLabelRange; double tolerance; }; //test fixture class template <typename T> class entropyTest : public ::testing::TestWithParam<entropyParam> { protected: //the constructor void SetUp() override { //getting the parameters params = ::testing::TestWithParam<entropyParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; //generating random value test input std::vector<int> arr1(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); //generating the golden output int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; int *p = (int *)malloc(numUniqueClasses * sizeof(int)); memset(p, 0, numUniqueClasses * sizeof(int)); //calculating the bincount array for (int i = 0; i < nElements; ++i) { ++p[arr1[i] - lowerLabelRange]; } //calculating the aggregate entropy for (int i = 0; i < numUniqueClasses; ++i) { if (p[i]) truthEntropy += -1 * (double(p[i]) / double(nElements)) * (log(double(p[i])) - log(double(nElements))); } //allocating and initializing memory to the GPU CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(clusterArray, nElements, true); raft::update_device(clusterArray, &arr1[0], (int)nElements, stream); std::shared_ptr<MLCommon::deviceAllocator> allocator( new raft::mr::device::default_allocator); CUDA_CHECK(hipStreamSynchronize(stream)); //calling the entropy CUDA implementation computedEntropy = MLCommon::Metrics::entropy(clusterArray, nElements, lowerLabelRange, upperLabelRange, allocator, stream); } //the destructor void TearDown() override { CUDA_CHECK(hipFree(clusterArray)); CUDA_CHECK(hipStreamDestroy(stream)); } //declaring the data values entropyParam params; T lowerLabelRange, upperLabelRange; T *clusterArray = nullptr; int nElements = 0; double truthEntropy = 0; double computedEntropy = 0; hipStream_t stream; }; //setting test parameter values const std::vector<entropyParam> inputs = { {199, 1, 10, 0.000001}, {200, 15, 100, 0.000001}, {100, 1, 20, 0.000001}, {10, 1, 10, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}, {199, 1, 10, 0.000001}, {200, 15, 100, 0.000001}, {100, 1, 20, 0.000001}, {10, 1, 10, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}}; //writing the test suite typedef entropyTest<int> entropyTestClass; TEST_P(entropyTestClass, Result) { ASSERT_NEAR(computedEntropy, truthEntropy, params.tolerance); } INSTANTIATE_TEST_CASE_P(entropy, entropyTestClass, ::testing::ValuesIn(inputs)); } //end namespace Metrics } //end namespace MLCommon
01110a87cb70bbe2befd0d82312496c0a09790e5.cu
/* * Copyright (c) 2019-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <algorithm> #include <cuml/common/cuml_allocator.hpp> #include <iostream> #include <metrics/entropy.cuh> #include <raft/cuda_utils.cuh> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { struct entropyParam { int nElements; int lowerLabelRange; int upperLabelRange; double tolerance; }; //test fixture class template <typename T> class entropyTest : public ::testing::TestWithParam<entropyParam> { protected: //the constructor void SetUp() override { //getting the parameters params = ::testing::TestWithParam<entropyParam>::GetParam(); nElements = params.nElements; lowerLabelRange = params.lowerLabelRange; upperLabelRange = params.upperLabelRange; //generating random value test input std::vector<int> arr1(nElements, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(lowerLabelRange, upperLabelRange); std::generate(arr1.begin(), arr1.end(), [&]() { return intGenerator(dre); }); //generating the golden output int numUniqueClasses = upperLabelRange - lowerLabelRange + 1; int *p = (int *)malloc(numUniqueClasses * sizeof(int)); memset(p, 0, numUniqueClasses * sizeof(int)); //calculating the bincount array for (int i = 0; i < nElements; ++i) { ++p[arr1[i] - lowerLabelRange]; } //calculating the aggregate entropy for (int i = 0; i < numUniqueClasses; ++i) { if (p[i]) truthEntropy += -1 * (double(p[i]) / double(nElements)) * (log(double(p[i])) - log(double(nElements))); } //allocating and initializing memory to the GPU CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(clusterArray, nElements, true); raft::update_device(clusterArray, &arr1[0], (int)nElements, stream); std::shared_ptr<MLCommon::deviceAllocator> allocator( new raft::mr::device::default_allocator); CUDA_CHECK(cudaStreamSynchronize(stream)); //calling the entropy CUDA implementation computedEntropy = MLCommon::Metrics::entropy(clusterArray, nElements, lowerLabelRange, upperLabelRange, allocator, stream); } //the destructor void TearDown() override { CUDA_CHECK(cudaFree(clusterArray)); CUDA_CHECK(cudaStreamDestroy(stream)); } //declaring the data values entropyParam params; T lowerLabelRange, upperLabelRange; T *clusterArray = nullptr; int nElements = 0; double truthEntropy = 0; double computedEntropy = 0; cudaStream_t stream; }; //setting test parameter values const std::vector<entropyParam> inputs = { {199, 1, 10, 0.000001}, {200, 15, 100, 0.000001}, {100, 1, 20, 0.000001}, {10, 1, 10, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}, {199, 1, 10, 0.000001}, {200, 15, 100, 0.000001}, {100, 1, 20, 0.000001}, {10, 1, 10, 0.000001}, {198, 1, 100, 0.000001}, {300, 3, 99, 0.000001}}; //writing the test suite typedef entropyTest<int> entropyTestClass; TEST_P(entropyTestClass, Result) { ASSERT_NEAR(computedEntropy, truthEntropy, params.tolerance); } INSTANTIATE_TEST_CASE_P(entropy, entropyTestClass, ::testing::ValuesIn(inputs)); } //end namespace Metrics } //end namespace MLCommon
a18781b38bb93aa312d9976a6c5ed16765300f2f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <stdint.h> #include <malloc.h> #include <stdlib.h> #include "cuPrintf.hip" using namespace std; struct row_cols { int rows, cols; }; typedef struct row_cols rc; __global__ void conv_operate(int * d_arr1, int * d_arr2, int * d_arr3, int * d_kernel, int * d_conv_arr1, int * d_conv_arr2, int * d_conv_arr3, rc arr_size, rc kernel_size, rc conv_size) { int index = blockDim.x * blockIdx.x + threadIdx.x; int row_num = index/blockDim.x; int col_num = index%blockDim.x; int min_row = row_num; int max_row = row_num + kernel_size.rows-1; int min_col = col_num; int max_col = col_num + kernel_size.cols-1; int conv_val1=0, conv_val2=0, conv_val3=0; for (int i = min_row, conv_row=0; i <= max_row; i++, conv_row++) { for (int j = min_col, conv_col=0; j <= max_col; ++j, conv_col++) { conv_val1 += d_arr1[i*arr_size.cols + j] * d_kernel[conv_row*kernel_size.cols + conv_col]; conv_val2 += d_arr2[i*arr_size.cols + j] * d_kernel[conv_row*kernel_size.cols + conv_col]; conv_val3 += d_arr3[i*arr_size.cols + j] * d_kernel[conv_row*kernel_size.cols + conv_col]; } } __syncthreads(); atomicAdd((int*)&d_conv_arr1[index], (int)conv_val1/9); atomicAdd((int*)&d_conv_arr2[index], (int)conv_val2/9); atomicAdd((int*)&d_conv_arr3[index], (int)conv_val3/9); } int main(int argc, char *argv[]) { // Run the python script which converts image to seperate channels system("python img2txt.py"); //Load the image which was seperated by python into different channels, each in its respective file. rc arr_size, kernel_size; fstream ch1("ch1.txt", std::ios_base::in); fstream ch2("ch2.txt", std::ios_base::in); fstream ch3("ch3.txt", std::ios_base::in); ch1 >> arr_size.rows >> arr_size.cols; int* channels[3]; channels[0] = (int*) malloc(sizeof(int) * arr_size.rows * arr_size.cols); channels[1] = (int*) malloc(sizeof(int) * arr_size.rows * arr_size.cols); channels[2] = (int*) malloc(sizeof(int) * arr_size.rows * arr_size.cols); for (int i = 0; i < arr_size.rows * arr_size.cols; ++i) { ch1 >> channels[0][i]; ch2 >> channels[1][i]; ch3 >> channels[2][i]; } cout << "Image loaded\n"; //Load the kernel fstream kern("kernel_edge.txt", std::ios_base::in); kern >> kernel_size.rows; kern >> kernel_size.cols; int* kernel = (int*)malloc(sizeof(int) * (kernel_size.rows) * (kernel_size.cols)); for (int i = 0; i < kernel_size.rows * kernel_size.cols; ++i) { kern >> kernel[i]; } cout << "Kernel loaded\n"; rc conv_size; conv_size.rows = (arr_size.rows) - (kernel_size.rows)+1; conv_size.cols = (arr_size.cols) - (kernel_size.cols)+1; // setup variables used in device int *d_arr1; int *d_arr2; int *d_arr3; int *d_kernel; int *d_conv_arr1; int *d_conv_arr2; int *d_conv_arr3; hipMalloc((void **)&d_arr1, sizeof(int) * (arr_size.rows) * (arr_size.cols)); hipMalloc((void **)&d_arr2, sizeof(int) * (arr_size.rows) * (arr_size.cols)); hipMalloc((void **)&d_arr3, sizeof(int) * (arr_size.rows) * (arr_size.cols)); hipMalloc((void **)&d_kernel, sizeof(int) * (kernel_size.rows) * (kernel_size.cols)); hipMalloc((void **)&d_conv_arr1, sizeof(int) * (conv_size.rows) * (conv_size.cols)); hipMalloc((void **)&d_conv_arr2, sizeof(int) * (conv_size.rows) * (conv_size.cols)); hipMalloc((void **)&d_conv_arr3, sizeof(int) * (conv_size.rows) * (conv_size.cols)); hipMemcpy(d_arr1, channels[0], sizeof(int) * (arr_size.rows) * (arr_size.cols), hipMemcpyHostToDevice); hipMemcpy(d_arr2, channels[1], sizeof(int) * (arr_size.rows) * (arr_size.cols), hipMemcpyHostToDevice); hipMemcpy(d_arr3, channels[2], sizeof(int) * (arr_size.rows) * (arr_size.cols), hipMemcpyHostToDevice); hipMemcpy(d_kernel, kernel, sizeof(int) * (kernel_size.rows) * (kernel_size.cols), hipMemcpyHostToDevice); hipMemset(d_conv_arr1, 0, sizeof(int) * (conv_size.rows) * (conv_size.cols)); hipMemset(d_conv_arr2, 0, sizeof(int) * (conv_size.rows) * (conv_size.cols)); hipMemset(d_conv_arr3, 0, sizeof(int) * (conv_size.rows) * (conv_size.cols)); // // printf("mem copied\n"); cout << "Processing\n"; cudaPrintfInit(); hipLaunchKernelGGL(( conv_operate), dim3(conv_size.rows),dim3(conv_size.cols), 0, 0, d_arr1, d_arr2, d_arr3, d_kernel, d_conv_arr1, d_conv_arr2, d_conv_arr3, arr_size, kernel_size, conv_size); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); free(channels[0]); free(channels[1]); free(channels[2]); free(kernel); //get the computed data back channels[0] = (int*) malloc(sizeof(int) * conv_size.rows*conv_size.cols); channels[1] = (int*) malloc(sizeof(int) * conv_size.rows*conv_size.cols); channels[2] = (int*) malloc(sizeof(int) * conv_size.rows*conv_size.cols); hipMemcpy(channels[0], d_conv_arr1, sizeof(int) * (conv_size.rows) * (conv_size.cols), hipMemcpyDeviceToHost); hipMemcpy(channels[1], d_conv_arr2, sizeof(int) * (conv_size.rows) * (conv_size.cols), hipMemcpyDeviceToHost); hipMemcpy(channels[2], d_conv_arr3, sizeof(int) * (conv_size.rows) * (conv_size.cols), hipMemcpyDeviceToHost); //write the data into seperate text files for each channel ofstream ch1_out("ch1_out.txt"); ofstream ch2_out("ch2_out.txt"); ofstream ch3_out("ch3_out.txt"); int i=0; ch1_out << conv_size.rows << "\n" << conv_size.cols << "\n"; for (; i < conv_size.rows * conv_size.cols - 1; ++i) { ch1_out << channels[0][i] << "\n"; ch2_out << channels[1][i] << "\n"; ch3_out << channels[2][i] << "\n"; } ch1_out << channels[0][i]; ch2_out << channels[1][i]; ch3_out << channels[2][i]; ch1_out.close(); ch2_out.close(); ch3_out.close(); //Run python code which converts the seperated channels back to image system("python txt2img.py"); cout << "Image Saved as out.jpg\n"; return 0; }
a18781b38bb93aa312d9976a6c5ed16765300f2f.cu
#include <iostream> #include <fstream> #include <stdint.h> #include <malloc.h> #include <stdlib.h> #include "cuPrintf.cu" using namespace std; struct row_cols { int rows, cols; }; typedef struct row_cols rc; __global__ void conv_operate(int * d_arr1, int * d_arr2, int * d_arr3, int * d_kernel, int * d_conv_arr1, int * d_conv_arr2, int * d_conv_arr3, rc arr_size, rc kernel_size, rc conv_size) { int index = blockDim.x * blockIdx.x + threadIdx.x; int row_num = index/blockDim.x; int col_num = index%blockDim.x; int min_row = row_num; int max_row = row_num + kernel_size.rows-1; int min_col = col_num; int max_col = col_num + kernel_size.cols-1; int conv_val1=0, conv_val2=0, conv_val3=0; for (int i = min_row, conv_row=0; i <= max_row; i++, conv_row++) { for (int j = min_col, conv_col=0; j <= max_col; ++j, conv_col++) { conv_val1 += d_arr1[i*arr_size.cols + j] * d_kernel[conv_row*kernel_size.cols + conv_col]; conv_val2 += d_arr2[i*arr_size.cols + j] * d_kernel[conv_row*kernel_size.cols + conv_col]; conv_val3 += d_arr3[i*arr_size.cols + j] * d_kernel[conv_row*kernel_size.cols + conv_col]; } } __syncthreads(); atomicAdd((int*)&d_conv_arr1[index], (int)conv_val1/9); atomicAdd((int*)&d_conv_arr2[index], (int)conv_val2/9); atomicAdd((int*)&d_conv_arr3[index], (int)conv_val3/9); } int main(int argc, char *argv[]) { // Run the python script which converts image to seperate channels system("python img2txt.py"); //Load the image which was seperated by python into different channels, each in its respective file. rc arr_size, kernel_size; fstream ch1("ch1.txt", std::ios_base::in); fstream ch2("ch2.txt", std::ios_base::in); fstream ch3("ch3.txt", std::ios_base::in); ch1 >> arr_size.rows >> arr_size.cols; int* channels[3]; channels[0] = (int*) malloc(sizeof(int) * arr_size.rows * arr_size.cols); channels[1] = (int*) malloc(sizeof(int) * arr_size.rows * arr_size.cols); channels[2] = (int*) malloc(sizeof(int) * arr_size.rows * arr_size.cols); for (int i = 0; i < arr_size.rows * arr_size.cols; ++i) { ch1 >> channels[0][i]; ch2 >> channels[1][i]; ch3 >> channels[2][i]; } cout << "Image loaded\n"; //Load the kernel fstream kern("kernel_edge.txt", std::ios_base::in); kern >> kernel_size.rows; kern >> kernel_size.cols; int* kernel = (int*)malloc(sizeof(int) * (kernel_size.rows) * (kernel_size.cols)); for (int i = 0; i < kernel_size.rows * kernel_size.cols; ++i) { kern >> kernel[i]; } cout << "Kernel loaded\n"; rc conv_size; conv_size.rows = (arr_size.rows) - (kernel_size.rows)+1; conv_size.cols = (arr_size.cols) - (kernel_size.cols)+1; // setup variables used in device int *d_arr1; int *d_arr2; int *d_arr3; int *d_kernel; int *d_conv_arr1; int *d_conv_arr2; int *d_conv_arr3; cudaMalloc((void **)&d_arr1, sizeof(int) * (arr_size.rows) * (arr_size.cols)); cudaMalloc((void **)&d_arr2, sizeof(int) * (arr_size.rows) * (arr_size.cols)); cudaMalloc((void **)&d_arr3, sizeof(int) * (arr_size.rows) * (arr_size.cols)); cudaMalloc((void **)&d_kernel, sizeof(int) * (kernel_size.rows) * (kernel_size.cols)); cudaMalloc((void **)&d_conv_arr1, sizeof(int) * (conv_size.rows) * (conv_size.cols)); cudaMalloc((void **)&d_conv_arr2, sizeof(int) * (conv_size.rows) * (conv_size.cols)); cudaMalloc((void **)&d_conv_arr3, sizeof(int) * (conv_size.rows) * (conv_size.cols)); cudaMemcpy(d_arr1, channels[0], sizeof(int) * (arr_size.rows) * (arr_size.cols), cudaMemcpyHostToDevice); cudaMemcpy(d_arr2, channels[1], sizeof(int) * (arr_size.rows) * (arr_size.cols), cudaMemcpyHostToDevice); cudaMemcpy(d_arr3, channels[2], sizeof(int) * (arr_size.rows) * (arr_size.cols), cudaMemcpyHostToDevice); cudaMemcpy(d_kernel, kernel, sizeof(int) * (kernel_size.rows) * (kernel_size.cols), cudaMemcpyHostToDevice); cudaMemset(d_conv_arr1, 0, sizeof(int) * (conv_size.rows) * (conv_size.cols)); cudaMemset(d_conv_arr2, 0, sizeof(int) * (conv_size.rows) * (conv_size.cols)); cudaMemset(d_conv_arr3, 0, sizeof(int) * (conv_size.rows) * (conv_size.cols)); // // printf("mem copied\n"); cout << "Processing\n"; cudaPrintfInit(); conv_operate<<<conv_size.rows,conv_size.cols>>>(d_arr1, d_arr2, d_arr3, d_kernel, d_conv_arr1, d_conv_arr2, d_conv_arr3, arr_size, kernel_size, conv_size); cudaPrintfDisplay(stdout, true); cudaPrintfEnd(); free(channels[0]); free(channels[1]); free(channels[2]); free(kernel); //get the computed data back channels[0] = (int*) malloc(sizeof(int) * conv_size.rows*conv_size.cols); channels[1] = (int*) malloc(sizeof(int) * conv_size.rows*conv_size.cols); channels[2] = (int*) malloc(sizeof(int) * conv_size.rows*conv_size.cols); cudaMemcpy(channels[0], d_conv_arr1, sizeof(int) * (conv_size.rows) * (conv_size.cols), cudaMemcpyDeviceToHost); cudaMemcpy(channels[1], d_conv_arr2, sizeof(int) * (conv_size.rows) * (conv_size.cols), cudaMemcpyDeviceToHost); cudaMemcpy(channels[2], d_conv_arr3, sizeof(int) * (conv_size.rows) * (conv_size.cols), cudaMemcpyDeviceToHost); //write the data into seperate text files for each channel ofstream ch1_out("ch1_out.txt"); ofstream ch2_out("ch2_out.txt"); ofstream ch3_out("ch3_out.txt"); int i=0; ch1_out << conv_size.rows << "\n" << conv_size.cols << "\n"; for (; i < conv_size.rows * conv_size.cols - 1; ++i) { ch1_out << channels[0][i] << "\n"; ch2_out << channels[1][i] << "\n"; ch3_out << channels[2][i] << "\n"; } ch1_out << channels[0][i]; ch2_out << channels[1][i]; ch3_out << channels[2][i]; ch1_out.close(); ch2_out.close(); ch3_out.close(); //Run python code which converts the seperated channels back to image system("python txt2img.py"); cout << "Image Saved as out.jpg\n"; return 0; }
6f5b1ed49ef03704110ed6d685b90e178406cc68.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "simpleHashTable.h" #define BUFFER_SIZE SIZE * 6 //global function callable from main function //count each word's occurrence __global__ void counter(unsigned int *key, Table table, Lock *lock, int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; /** initialize table's pool in the initialize table funtion make sure zero out values could only occurs once //initialize table if(tid == 0) { printf("In zero out table\n"); zero_out_values_in_table(table); //need to be synchronized __syncthreads(); } **/ //all the key with index larger than count is not exist while (tid < count) { unsigned int k = key[tid]; put(table, k, lock, tid); tid += stride; } } int main(int argc, char* argv[]) { if(argc != 4) { printf("Usage: %s <File name> <Searching word> <Number of threads>", argv[0]); exit(-1); } char *filename = argv[1]; unsigned int key_num = (unsigned int)atoi(argv[2]); int numThread = atoi(argv[3]); //open file FILE *pFile; pFile = fopen(filename, "r"); if(pFile == NULL) { printf("Fail to open file"); exit(-1); } //time function hipEvent_t start, stop; HANDLE_ERROR(hipEventCreate(&start)); HANDLE_ERROR(hipEventCreate(&stop)); HANDLE_ERROR(hipEventRecord(start, 0)); //copy hash table and lock to GPU Table dev_table; initialize_table(dev_table, HASH_ENTRIES, ELEMENTS); Lock lock[HASH_ENTRIES]; Lock *dev_lock; HANDLE_ERROR(hipMalloc((void**)&dev_lock, HASH_ENTRIES * sizeof( Lock))); HANDLE_ERROR(hipMemcpy(dev_lock, lock, HASH_ENTRIES * sizeof( Lock ), hipMemcpyHostToDevice ) ); /** //set count in GPU to count number of elements unsigned int count = 0, *dev_count; hipMalloc((void**)&dev_count, sizeof(unsigned int)); HANDLE_ERROR(hipMemcpy(dev_count, &count, sizeof(unsigned int), hipMemcpyHostToDevice)); **/ //set block number and thread number per block int numBlock = 32; int threadPerBlock = numThread/numBlock; //copy file to GPU unsigned int *key, *d_key; while(!feof(pFile)) { key = (unsigned int*)malloc(BUFFER_SIZE); int num = BUFFER_SIZE/sizeof(unsigned int); int i, count; for(i = 0; i < num && !feof(pFile); i++) { fscanf(pFile, "%u", &key[i]); count = i; } HANDLE_ERROR(hipMalloc((void**)&d_key, BUFFER_SIZE)); HANDLE_ERROR(hipMemcpy(d_key, key, BUFFER_SIZE, hipMemcpyHostToDevice)); //count the word on GPU hipLaunchKernelGGL(( counter), dim3(numBlock), dim3(threadPerBlock), 0, 0, d_key, dev_table, dev_lock, count+1); //wait until all device ended hipDeviceSynchronize(); //free memory in GPU HANDLE_ERROR(hipFree(d_key)); free(key); } //copy hash table from device to host Table table; copy_table_to_host(dev_table, table); unsigned long occurrence = 0; occurrence = get(table, key_num); if(occurrence == 0) printf("Key number not found!"); else printf("Occurrence of Key number: %u\n", occurrence); HANDLE_ERROR(hipEventRecord(stop, 0)); HANDLE_ERROR(hipEventSynchronize(stop)); float elapsedTime; HANDLE_ERROR(hipEventElapsedTime(&elapsedTime, start, stop)); printf("Time to hash: %3.1f ms\n", elapsedTime); free(table.pool); free(table.entries); HANDLE_ERROR(hipEventDestroy(start)); HANDLE_ERROR(hipEventDestroy(stop)); free_table(dev_table); HANDLE_ERROR(hipFree(dev_lock)); return 0; }
6f5b1ed49ef03704110ed6d685b90e178406cc68.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include "simpleHashTable.h" #define BUFFER_SIZE SIZE * 6 //global function callable from main function //count each word's occurrence __global__ void counter(unsigned int *key, Table table, Lock *lock, int count) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; /** initialize table's pool in the initialize table funtion make sure zero out values could only occurs once //initialize table if(tid == 0) { printf("In zero out table\n"); zero_out_values_in_table(table); //need to be synchronized __syncthreads(); } **/ //all the key with index larger than count is not exist while (tid < count) { unsigned int k = key[tid]; put(table, k, lock, tid); tid += stride; } } int main(int argc, char* argv[]) { if(argc != 4) { printf("Usage: %s <File name> <Searching word> <Number of threads>", argv[0]); exit(-1); } char *filename = argv[1]; unsigned int key_num = (unsigned int)atoi(argv[2]); int numThread = atoi(argv[3]); //open file FILE *pFile; pFile = fopen(filename, "r"); if(pFile == NULL) { printf("Fail to open file"); exit(-1); } //time function cudaEvent_t start, stop; HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); HANDLE_ERROR(cudaEventRecord(start, 0)); //copy hash table and lock to GPU Table dev_table; initialize_table(dev_table, HASH_ENTRIES, ELEMENTS); Lock lock[HASH_ENTRIES]; Lock *dev_lock; HANDLE_ERROR(cudaMalloc((void**)&dev_lock, HASH_ENTRIES * sizeof( Lock))); HANDLE_ERROR(cudaMemcpy(dev_lock, lock, HASH_ENTRIES * sizeof( Lock ), cudaMemcpyHostToDevice ) ); /** //set count in GPU to count number of elements unsigned int count = 0, *dev_count; cudaMalloc((void**)&dev_count, sizeof(unsigned int)); HANDLE_ERROR(cudaMemcpy(dev_count, &count, sizeof(unsigned int), cudaMemcpyHostToDevice)); **/ //set block number and thread number per block int numBlock = 32; int threadPerBlock = numThread/numBlock; //copy file to GPU unsigned int *key, *d_key; while(!feof(pFile)) { key = (unsigned int*)malloc(BUFFER_SIZE); int num = BUFFER_SIZE/sizeof(unsigned int); int i, count; for(i = 0; i < num && !feof(pFile); i++) { fscanf(pFile, "%u", &key[i]); count = i; } HANDLE_ERROR(cudaMalloc((void**)&d_key, BUFFER_SIZE)); HANDLE_ERROR(cudaMemcpy(d_key, key, BUFFER_SIZE, cudaMemcpyHostToDevice)); //count the word on GPU counter<<<numBlock, threadPerBlock>>>(d_key, dev_table, dev_lock, count+1); //wait until all device ended cudaDeviceSynchronize(); //free memory in GPU HANDLE_ERROR(cudaFree(d_key)); free(key); } //copy hash table from device to host Table table; copy_table_to_host(dev_table, table); unsigned long occurrence = 0; occurrence = get(table, key_num); if(occurrence == 0) printf("Key number not found!"); else printf("Occurrence of Key number: %u\n", occurrence); HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); float elapsedTime; HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("Time to hash: %3.1f ms\n", elapsedTime); free(table.pool); free(table.entries); HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); free_table(dev_table); HANDLE_ERROR(cudaFree(dev_lock)); return 0; }
c484f846e1b2c77d17d024e6356e835422b4754b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <chrono> #define BLOCK_SIZE 1024 #define NUM_OF_BANKS 32 #define LOG_NUM_OF_BANKS 5 #define SHIFT_BANK(n) \ (n + (n >> LOG_NUM_OF_BANKS)) __global__ void prefix_sum(float *in, float *out, float* aux, int noc, int res) { __shared__ float temp[2*BLOCK_SIZE]; int n = BLOCK_SIZE*2; if (blockIdx.x == noc - 1) n = res; int thid = threadIdx.x; // printf("thid: %d, bid: %d, noc: %d\n", thid, blockIdx.x, noc); temp[SHIFT_BANK(2*thid)] = in[2*(thid + blockIdx.x*BLOCK_SIZE)]; temp[SHIFT_BANK(2*thid+1)] = in[2*(thid + blockIdx.x*BLOCK_SIZE)+1]; int offset = 1; for (int d = (n >> 1); d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = SHIFT_BANK(offset*(2*thid+1) - 1); int bi = SHIFT_BANK(offset*(2*thid+2) - 1); temp[bi] += temp[ai]; } offset <<= 1; } __syncthreads(); if (thid == 0) { aux[blockIdx.x] = temp[SHIFT_BANK(n - 1)]; temp[SHIFT_BANK(n-1)] = 0; } for (int d = 1; d <= (n >> 1); d <<= 1) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = SHIFT_BANK(offset*(2*thid+1) - 1); int bi = SHIFT_BANK(offset*(2*thid+2) - 1); float t = temp[ai] + temp[bi]; temp[ai] = temp[bi]; temp[bi] = t; } } __syncthreads(); out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE)] = temp[SHIFT_BANK(2*thid)]; out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE) + 1] = temp[SHIFT_BANK(2*thid+1)]; } __global__ void block_add(float *in, float *out){ out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE)] += in[blockIdx.x]; out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE) + 1] += in[blockIdx.x]; } void prefix_sum_cpu_rec(float *in, float *out, int noc, int n) { float *aux; hipMalloc((void **)&aux, noc*sizeof(float)); int res = 2*BLOCK_SIZE; if (n%(2*BLOCK_SIZE) != 0 || n == 0) res = n%(2*BLOCK_SIZE); hipLaunchKernelGGL(( prefix_sum), dim3(noc), dim3(BLOCK_SIZE), 0, 0, in, out, aux, noc, res); hipDeviceSynchronize(); if (noc == 1) { hipFree(aux); return; } float *auxout; hipMalloc((void **)&auxout, ((noc + 2*BLOCK_SIZE - 1)/(2*BLOCK_SIZE))*sizeof(float)); prefix_sum_cpu_rec(aux, auxout, (noc + 2*BLOCK_SIZE - 1)/(2*BLOCK_SIZE), noc); hipLaunchKernelGGL(( block_add), dim3(noc), dim3(BLOCK_SIZE), 0, 0, auxout, out); hipDeviceSynchronize(); } int main() { int n = 1 << 20; float *in, *out; int noc = (n + 2*BLOCK_SIZE - 1)/(2*BLOCK_SIZE); hipMalloc((void **)&in, n*sizeof(float)); hipMalloc((void **)&out, n*sizeof(float)); srand(0); float test[n]; for (int i = 0; i < n; ++i) { float t = (rand()%10)/10.0f; test[i] = t; } hipMemcpy(in, test, n*sizeof(float), hipMemcpyHostToDevice); auto start = std::chrono::steady_clock::now(); prefix_sum_cpu_rec(in,out, noc, n); auto end = std::chrono::steady_clock::now(); auto diff = end - start; std::cout << std::chrono::duration <double, std::milli> (diff).count() << " ms" << std::endl; printf("%s\n", hipGetErrorString(hipGetLastError())); for (int i = n - 1; i < n; ++i) { float a; hipMemcpy(&a, out+i, sizeof(float), hipMemcpyDeviceToHost); std::cout << a << std::endl; } hipFree(in); hipFree(out); }
c484f846e1b2c77d17d024e6356e835422b4754b.cu
#include <iostream> #include <chrono> #define BLOCK_SIZE 1024 #define NUM_OF_BANKS 32 #define LOG_NUM_OF_BANKS 5 #define SHIFT_BANK(n) \ (n + (n >> LOG_NUM_OF_BANKS)) __global__ void prefix_sum(float *in, float *out, float* aux, int noc, int res) { __shared__ float temp[2*BLOCK_SIZE]; int n = BLOCK_SIZE*2; if (blockIdx.x == noc - 1) n = res; int thid = threadIdx.x; // printf("thid: %d, bid: %d, noc: %d\n", thid, blockIdx.x, noc); temp[SHIFT_BANK(2*thid)] = in[2*(thid + blockIdx.x*BLOCK_SIZE)]; temp[SHIFT_BANK(2*thid+1)] = in[2*(thid + blockIdx.x*BLOCK_SIZE)+1]; int offset = 1; for (int d = (n >> 1); d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = SHIFT_BANK(offset*(2*thid+1) - 1); int bi = SHIFT_BANK(offset*(2*thid+2) - 1); temp[bi] += temp[ai]; } offset <<= 1; } __syncthreads(); if (thid == 0) { aux[blockIdx.x] = temp[SHIFT_BANK(n - 1)]; temp[SHIFT_BANK(n-1)] = 0; } for (int d = 1; d <= (n >> 1); d <<= 1) { offset >>= 1; __syncthreads(); if (thid < d) { int ai = SHIFT_BANK(offset*(2*thid+1) - 1); int bi = SHIFT_BANK(offset*(2*thid+2) - 1); float t = temp[ai] + temp[bi]; temp[ai] = temp[bi]; temp[bi] = t; } } __syncthreads(); out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE)] = temp[SHIFT_BANK(2*thid)]; out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE) + 1] = temp[SHIFT_BANK(2*thid+1)]; } __global__ void block_add(float *in, float *out){ out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE)] += in[blockIdx.x]; out[2*(threadIdx.x + blockIdx.x * BLOCK_SIZE) + 1] += in[blockIdx.x]; } void prefix_sum_cpu_rec(float *in, float *out, int noc, int n) { float *aux; cudaMalloc((void **)&aux, noc*sizeof(float)); int res = 2*BLOCK_SIZE; if (n%(2*BLOCK_SIZE) != 0 || n == 0) res = n%(2*BLOCK_SIZE); prefix_sum<<<noc, BLOCK_SIZE>>>(in, out, aux, noc, res); cudaDeviceSynchronize(); if (noc == 1) { cudaFree(aux); return; } float *auxout; cudaMalloc((void **)&auxout, ((noc + 2*BLOCK_SIZE - 1)/(2*BLOCK_SIZE))*sizeof(float)); prefix_sum_cpu_rec(aux, auxout, (noc + 2*BLOCK_SIZE - 1)/(2*BLOCK_SIZE), noc); block_add<<<noc, BLOCK_SIZE>>>(auxout, out); cudaDeviceSynchronize(); } int main() { int n = 1 << 20; float *in, *out; int noc = (n + 2*BLOCK_SIZE - 1)/(2*BLOCK_SIZE); cudaMalloc((void **)&in, n*sizeof(float)); cudaMalloc((void **)&out, n*sizeof(float)); srand(0); float test[n]; for (int i = 0; i < n; ++i) { float t = (rand()%10)/10.0f; test[i] = t; } cudaMemcpy(in, test, n*sizeof(float), cudaMemcpyHostToDevice); auto start = std::chrono::steady_clock::now(); prefix_sum_cpu_rec(in,out, noc, n); auto end = std::chrono::steady_clock::now(); auto diff = end - start; std::cout << std::chrono::duration <double, std::milli> (diff).count() << " ms" << std::endl; printf("%s\n", cudaGetErrorString(cudaGetLastError())); for (int i = n - 1; i < n; ++i) { float a; cudaMemcpy(&a, out+i, sizeof(float), cudaMemcpyDeviceToHost); std::cout << a << std::endl; } cudaFree(in); cudaFree(out); }
7f376c23c5929c516b4fd08bf5003aa24bd961d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } }
7f376c23c5929c516b4fd08bf5003aa24bd961d7.cu
#include "includes.h" __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } }
7ea9bdea2838752e67ad549213d40d747a773722.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpuCrossover.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *chromosome = NULL; hipMalloc(&chromosome, XSIZE*YSIZE); hiprandState_t *globalState = NULL; hipMalloc(&globalState, XSIZE*YSIZE); int sizeofChromosome = XSIZE*YSIZE; int sizeofPopulation = XSIZE*YSIZE; int Bias = 1; float prob = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpuCrossover), dim3(gridBlock),dim3(threadBlock), 0, 0, chromosome,globalState,sizeofChromosome,sizeofPopulation,Bias,prob); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpuCrossover), dim3(gridBlock),dim3(threadBlock), 0, 0, chromosome,globalState,sizeofChromosome,sizeofPopulation,Bias,prob); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpuCrossover), dim3(gridBlock),dim3(threadBlock), 0, 0, chromosome,globalState,sizeofChromosome,sizeofPopulation,Bias,prob); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
7ea9bdea2838752e67ad549213d40d747a773722.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpuCrossover.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *chromosome = NULL; cudaMalloc(&chromosome, XSIZE*YSIZE); curandState *globalState = NULL; cudaMalloc(&globalState, XSIZE*YSIZE); int sizeofChromosome = XSIZE*YSIZE; int sizeofPopulation = XSIZE*YSIZE; int Bias = 1; float prob = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpuCrossover<<<gridBlock,threadBlock>>>(chromosome,globalState,sizeofChromosome,sizeofPopulation,Bias,prob); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpuCrossover<<<gridBlock,threadBlock>>>(chromosome,globalState,sizeofChromosome,sizeofPopulation,Bias,prob); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpuCrossover<<<gridBlock,threadBlock>>>(chromosome,globalState,sizeofChromosome,sizeofPopulation,Bias,prob); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
002de71125960f06c223a401c11a7bf407c32c95.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <hip/hip_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 6 #define ITERATIONS 50000000 #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(hipError_t err, const char *file, const int line ) { if(hipSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling hipGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { hipError_t err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, hipGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=1; float Value2=A[i]; float Value3=B[i]; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access // if(i%32==0){ for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1*Value1; Value3=I2*Value3; Value1*=Value2; Value1*=Value2; Value2=Value3*Value1; Value1=Value2*Value3; } // } __syncthreads(); Value=Value1; C[i]=Value*Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( hipMalloc((void**)&d_A, size) ); checkCudaErrors( hipMalloc((void**)&d_B, size) ); checkCudaErrors( hipMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice) ); checkCudaErrors( hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); hipLaunchKernelGGL(( PowerKernal2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_A, d_B, d_C, N); CUDA_SAFE_CALL( hipDeviceSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( hipDeviceSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( hipDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) hipFree(d_A); if (d_B) hipFree(d_B); if (d_C) hipFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }
002de71125960f06c223a401c11a7bf407c32c95.cu
#include <stdio.h> #include <stdlib.h> #include <cutil.h> // Includes #include <stdio.h> // includes, project #include "../include/sdkHelper.h" // helper for shared functions common to CUDA SDK samples //#include <shrQATest.h> //#include <shrUtils.h> // includes CUDA #include <cuda_runtime.h> #define THREADS_PER_BLOCK 256 #define NUM_OF_BLOCKS 6 #define ITERATIONS 50000000 #include "../include/ContAcq-IntClk.h" // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; bool noprompt = false; unsigned int my_timer; // Functions void CleanupResources(void); void RandomInit(float*, int); void ParseArguments(int, char**); //////////////////////////////////////////////////////////////////////////////// // These are CUDA Helper functions // This will output the proper CUDA error strings in the event that a CUDA host call returns an error #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err){ fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // end of CUDA Helper Functions __global__ void PowerKernal2(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; //Do Some Computation float Value1=1; float Value2=A[i]; float Value3=B[i]; float Value; float I1=A[i]; float I2=B[i]; // Excessive Addition access // if(i%32==0){ for(unsigned k=0; k<ITERATIONS;k++) { Value1=I1*Value1; Value3=I2*Value3; Value1*=Value2; Value1*=Value2; Value2=Value3*Value1; Value1=Value2*Value3; } // } __syncthreads(); Value=Value1; C[i]=Value*Value2; } int main() { printf("Power Microbenchmarks\n"); int N = THREADS_PER_BLOCK*NUM_OF_BLOCKS; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); if (h_A == 0) CleanupResources(); h_B = (float*)malloc(size); if (h_B == 0) CleanupResources(); h_C = (float*)malloc(size); if (h_C == 0) CleanupResources(); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Allocate vectors in device memory printf("before\n"); checkCudaErrors( cudaMalloc((void**)&d_A, size) ); checkCudaErrors( cudaMalloc((void**)&d_B, size) ); checkCudaErrors( cudaMalloc((void**)&d_C, size) ); printf("after\n"); // Copy vectors from host memory to device memory checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); dim3 dimGrid(NUM_OF_BLOCKS,1); dim3 dimBlock(THREADS_PER_BLOCK,1); dim3 dimGrid2(1,1); dim3 dimBlock2(1,1); CUT_SAFE_CALL(cutCreateTimer(&my_timer)); TaskHandle taskhandle = LaunchDAQ(); CUT_SAFE_CALL(cutStartTimer(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); PowerKernal2<<<dimGrid,dimBlock>>>(d_A, d_B, d_C, N); CUDA_SAFE_CALL( cudaThreadSynchronize() ); printf("execution time = %f\n", cutGetTimerValue(my_timer)); getLastCudaError("kernel launch failure"); CUDA_SAFE_CALL( cudaThreadSynchronize() ); CUT_SAFE_CALL(cutStopTimer(my_timer)); TurnOffDAQ(taskhandle, cutGetTimerValue(my_timer)); printf("execution time = %f\n", cutGetTimerValue(my_timer)); CUT_SAFE_CALL(cutDeleteTimer(my_timer)); #ifdef _DEBUG checkCudaErrors( cudaDeviceSynchronize() ); #endif // Copy result from device memory to host memory // h_C contains the result in host memory checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); CleanupResources(); return 0; } void CleanupResources(void) { // Free device memory if (d_A) cudaFree(d_A); if (d_B) cudaFree(d_B); if (d_C) cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); } // Allocates an array with random float entries. void RandomInit(float* data, int n) { for (int i = 0; i < n; ++i){ data[i] = rand() / RAND_MAX; } }