hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
ac4eadcb807994fecfafcbce8f72c4679446f6bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <omp.h>
#include <math.h>
#include <stdio.h>
void jacobi_cpu(double* u, const double* u0, double h, long N) {
#pragma omp parallel for schedule(static)
for (long i=1; i<N-1; i++) {
for (long j=1; j<N-1; j++) {
u[N*i + j] = (h*h + u0[N*(i-1)+j] + u0[N*(i+1)+j] + u0[N*i+j+1] + u0[N*i+j-1]) / 4.0;
}
}
}
void calculate_A_times_u(const double *u, double *res, long N, double h) {
res[0] = 0;
#pragma omp parallel for schedule(static)
for (long i = 1; i < N-1; i++) {
for (long j=1; j< N-1; j++) {
res[N*i+j] = (4 * u[N*i+j] - u[N*(i-1)+j] - u[N*(i+1)+j] - u[N*i+j+1] - u[N*i+j-1]) / (h * h);
}
}
res[N*(N-1) + N-1] = 0;
}
double l2_norm_shift(double const* u, double shift, long n) {
double accum = 0.;
for (long i = 1; i < n-1; i++) {
for (long j=1; j< n-1; j++) {
accum += (u[n*i+j] - shift) * (u[n*i+j] - shift);
}
}
return sqrt(accum);
}
double dist(double const* u, double const* v, long N) {
double accum = 0.;
for (long i=0; i < N; i++) {
for (long j=0; j< N; j++) {
accum += (u[i*N+j] - v[i*N+j]) * (u[i*N+j] - v[i*N+j]);
}
}
return sqrt(accum);
}
__global__ void jacobi_kernel(double* u, const double* u0, double h, long N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // idx = N*i + j
int j = idx % N;
int i = (idx - j) / N;
if ((i>0) && (i<N-1) && (j>0) && (j<N-1)) {
u[idx] = (h*h + u0[N*(i+1)+j] + u0[N*(i-1)+j] + u0[idx-1] + u0[idx+1]) / 4.0;
}
}
__global__ void A_u_kernel(double* res, const double* u, long N, double h) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // idx = N*i + j
int j = idx % N;
int i = (idx - j) / N;
printf("idx: %d\n", idx);
if (idx == 0) res[idx] = 0.0;
else if (idx == N*(N-1) + N-1) res[idx] = 0.0;
else res[idx] = (4*u[idx] - u[idx+1] - u[idx-1] - u[N*(i-1)+j] - u[N*(i+1)+j]) / (h*h);
printf("A_v: %f at i:%d, j:%d\n", res[idx], i, j);
}
#define BLOCK_SIZE 1024
__global__ void l2_norm_shift_kernel(double* sum, const double* a, double shift, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = (a[idx]-shift) * (a[idx]-shift);
else smem[threadIdx.x] = 0;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s) {
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
__global__ void reduction_kernel(double* sum, const double*a, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
for(int s = 1; s < blockDim.x; s *= 2) {
if(threadIdx.x % (2*s) == 0)
smem[threadIdx.x] += smem[threadIdx.x + s];
__syncthreads();
}
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
int main() {
long N = 512;
long it = 10*3000;
//const long BLOCK_SIZE = 1024;
double h = 1./(N+1.);
double *u, *u_0, *A_u;
hipHostMalloc((void**) &u, N*N*sizeof(double));
hipHostMalloc((void**) &u_0, N*N*sizeof(double));
hipHostMalloc((void**) &A_u, N*N*sizeof(double));
#pragma omp parallel for schedule(static)
for (long i=0; i<N*N; i++) {
u[i] = 0.0;
u_0[i] = 0.0;
}
double *v_0, *v, *A_v;
hipMalloc(&v_0, N*N*sizeof(double));
hipMalloc(&v, N*N*sizeof(double));
hipMalloc(&A_v, N*N*sizeof(double));
hipMemcpyAsync(v_0, u_0, N*N*sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(v, u, N*N*sizeof(double), hipMemcpyHostToDevice);
double tt = omp_get_wtime();
printf("CPU Jacobi2D\n");
printf("iteration residue\n");
printf("---------------------\n");
for (long k=1; k<it+1; k++) {
hipMemcpy(u_0, u, N*N*sizeof(double), hipMemcpyHostToHost);
jacobi_cpu(u, u_0, h, N);
if ((k % 3000) == 0) {
calculate_A_times_u(u, A_u, N, h);
double res = l2_norm_shift(A_u, 1., N);
printf(" %ld %f\n", k, res);
}
}
printf("CPU: %f it/s\n\n\n\n", 1.0*it / (omp_get_wtime()-tt));
double *res_d;
long N_work = 1;
for (long i= (N*N+BLOCK_SIZE-1)/BLOCK_SIZE; i>1; i = (i+BLOCK_SIZE-1)/BLOCK_SIZE) N_work += i;
hipMalloc(&res_d, N_work*sizeof(double));
//printf("N_work: %ld\n", N_work);
tt = omp_get_wtime();
printf("GPU Jacobi2D\n");
printf("iteration residue\n");
printf("---------------------\n");
for (long k=1; k<it+1; k++) {
hipMemcpy(v_0, v, N*N*sizeof(double), hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( jacobi_kernel), dim3(N*N/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, v, v_0, h, N);
hipDeviceSynchronize();
if (((k % 3000) == 0) && (k > it)) { // code does not work for some reason
hipLaunchKernelGGL(( A_u_kernel), dim3(N*N/BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, A_v, v, N, h);
// calculate residue as in other task
long Nb = (N*N+BLOCK_SIZE-1)/BLOCK_SIZE;
hipLaunchKernelGGL(( l2_norm_shift_kernel), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, res_d, A_v, 1., N*N);
while (Nb > 1) {
//printf("hi\n");
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/BLOCK_SIZE;
hipLaunchKernelGGL(( reduction_kernel), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, res_d+N, res_d, N);
res_d += N;
}
double res;
hipMemcpyAsync(&res, &res_d, 1*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf(" %ld %f\n", k, res);
}
}
printf("GPU: %f it/s\n", 1.0*it / (omp_get_wtime()-tt));
hipMemcpyAsync(u_0, v, N*N*sizeof(double), hipMemcpyDeviceToHost);
printf("Difference is %f\n", dist(u,u_0, N));
hipFree(v_0);
hipFree(v);
hipFree(res_d);
hipFree(A_v);
hipHostFree(u_0);
hipHostFree(A_u);
hipHostFree(u);
return 0;
}
| ac4eadcb807994fecfafcbce8f72c4679446f6bf.cu | #include <omp.h>
#include <math.h>
#include <stdio.h>
void jacobi_cpu(double* u, const double* u0, double h, long N) {
#pragma omp parallel for schedule(static)
for (long i=1; i<N-1; i++) {
for (long j=1; j<N-1; j++) {
u[N*i + j] = (h*h + u0[N*(i-1)+j] + u0[N*(i+1)+j] + u0[N*i+j+1] + u0[N*i+j-1]) / 4.0;
}
}
}
void calculate_A_times_u(const double *u, double *res, long N, double h) {
res[0] = 0;
#pragma omp parallel for schedule(static)
for (long i = 1; i < N-1; i++) {
for (long j=1; j< N-1; j++) {
res[N*i+j] = (4 * u[N*i+j] - u[N*(i-1)+j] - u[N*(i+1)+j] - u[N*i+j+1] - u[N*i+j-1]) / (h * h);
}
}
res[N*(N-1) + N-1] = 0;
}
double l2_norm_shift(double const* u, double shift, long n) {
double accum = 0.;
for (long i = 1; i < n-1; i++) {
for (long j=1; j< n-1; j++) {
accum += (u[n*i+j] - shift) * (u[n*i+j] - shift);
}
}
return sqrt(accum);
}
double dist(double const* u, double const* v, long N) {
double accum = 0.;
for (long i=0; i < N; i++) {
for (long j=0; j< N; j++) {
accum += (u[i*N+j] - v[i*N+j]) * (u[i*N+j] - v[i*N+j]);
}
}
return sqrt(accum);
}
__global__ void jacobi_kernel(double* u, const double* u0, double h, long N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // idx = N*i + j
int j = idx % N;
int i = (idx - j) / N;
if ((i>0) && (i<N-1) && (j>0) && (j<N-1)) {
u[idx] = (h*h + u0[N*(i+1)+j] + u0[N*(i-1)+j] + u0[idx-1] + u0[idx+1]) / 4.0;
}
}
__global__ void A_u_kernel(double* res, const double* u, long N, double h) {
int idx = blockIdx.x * blockDim.x + threadIdx.x; // idx = N*i + j
int j = idx % N;
int i = (idx - j) / N;
printf("idx: %d\n", idx);
if (idx == 0) res[idx] = 0.0;
else if (idx == N*(N-1) + N-1) res[idx] = 0.0;
else res[idx] = (4*u[idx] - u[idx+1] - u[idx-1] - u[N*(i-1)+j] - u[N*(i+1)+j]) / (h*h);
printf("A_v: %f at i:%d, j:%d\n", res[idx], i, j);
}
#define BLOCK_SIZE 1024
__global__ void l2_norm_shift_kernel(double* sum, const double* a, double shift, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = (a[idx]-shift) * (a[idx]-shift);
else smem[threadIdx.x] = 0;
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x < s) {
smem[threadIdx.x] += smem[threadIdx.x + s];
}
__syncthreads();
}
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
__global__ void reduction_kernel(double* sum, const double*a, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
// each thread reads data from global into shared memory
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
for(int s = 1; s < blockDim.x; s *= 2) {
if(threadIdx.x % (2*s) == 0)
smem[threadIdx.x] += smem[threadIdx.x + s];
__syncthreads();
}
if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x];
}
int main() {
long N = 512;
long it = 10*3000;
//const long BLOCK_SIZE = 1024;
double h = 1./(N+1.);
double *u, *u_0, *A_u;
cudaMallocHost((void**) &u, N*N*sizeof(double));
cudaMallocHost((void**) &u_0, N*N*sizeof(double));
cudaMallocHost((void**) &A_u, N*N*sizeof(double));
#pragma omp parallel for schedule(static)
for (long i=0; i<N*N; i++) {
u[i] = 0.0;
u_0[i] = 0.0;
}
double *v_0, *v, *A_v;
cudaMalloc(&v_0, N*N*sizeof(double));
cudaMalloc(&v, N*N*sizeof(double));
cudaMalloc(&A_v, N*N*sizeof(double));
cudaMemcpyAsync(v_0, u_0, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(v, u, N*N*sizeof(double), cudaMemcpyHostToDevice);
double tt = omp_get_wtime();
printf("CPU Jacobi2D\n");
printf("iteration residue\n");
printf("---------------------\n");
for (long k=1; k<it+1; k++) {
cudaMemcpy(u_0, u, N*N*sizeof(double), cudaMemcpyHostToHost);
jacobi_cpu(u, u_0, h, N);
if ((k % 3000) == 0) {
calculate_A_times_u(u, A_u, N, h);
double res = l2_norm_shift(A_u, 1., N);
printf(" %ld %f\n", k, res);
}
}
printf("CPU: %f it/s\n\n\n\n", 1.0*it / (omp_get_wtime()-tt));
double *res_d;
long N_work = 1;
for (long i= (N*N+BLOCK_SIZE-1)/BLOCK_SIZE; i>1; i = (i+BLOCK_SIZE-1)/BLOCK_SIZE) N_work += i;
cudaMalloc(&res_d, N_work*sizeof(double));
//printf("N_work: %ld\n", N_work);
tt = omp_get_wtime();
printf("GPU Jacobi2D\n");
printf("iteration residue\n");
printf("---------------------\n");
for (long k=1; k<it+1; k++) {
cudaMemcpy(v_0, v, N*N*sizeof(double), cudaMemcpyDeviceToDevice);
jacobi_kernel<<<N*N/BLOCK_SIZE, BLOCK_SIZE>>>(v, v_0, h, N);
cudaDeviceSynchronize();
if (((k % 3000) == 0) && (k > it)) { // code does not work for some reason
A_u_kernel<<<N*N/BLOCK_SIZE, BLOCK_SIZE>>>(A_v, v, N, h);
// calculate residue as in other task
long Nb = (N*N+BLOCK_SIZE-1)/BLOCK_SIZE;
l2_norm_shift_kernel<<<Nb,BLOCK_SIZE>>>(res_d, A_v, 1., N*N);
while (Nb > 1) {
//printf("hi\n");
long N = Nb;
Nb = (Nb+BLOCK_SIZE-1)/BLOCK_SIZE;
reduction_kernel<<<Nb,BLOCK_SIZE>>>(res_d+N, res_d, N);
res_d += N;
}
double res;
cudaMemcpyAsync(&res, &res_d, 1*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf(" %ld %f\n", k, res);
}
}
printf("GPU: %f it/s\n", 1.0*it / (omp_get_wtime()-tt));
cudaMemcpyAsync(u_0, v, N*N*sizeof(double), cudaMemcpyDeviceToHost);
printf("Difference is %f\n", dist(u,u_0, N));
cudaFree(v_0);
cudaFree(v);
cudaFree(res_d);
cudaFree(A_v);
cudaFreeHost(u_0);
cudaFreeHost(A_u);
cudaFreeHost(u);
return 0;
}
|
4b588292ebe74943d30d84782f42c3e92c2e1223.hip | // !!! This is a file automatically generated by hipify!!!
#include "common.h"
template<typename T>
device_vector_holder<T>::~device_vector_holder(){
__free();
}
template<typename T>
void device_vector_holder<T>::__free(){
if(valid){
hipFree(__gpu_memory);
valid = false;
__size = 0;
}
}
template<typename T>
device_vector_holder<T>::device_vector_holder(size_t size_, T init)
{
__malloc(size_);
thrust::fill(begin_thr(), end_thr(), init);
}
template<typename T>
void device_vector_holder<T>::__malloc(size_t size_){
if(valid) __free();
hipMalloc((void**)&__gpu_memory, size_ * sizeof(T));
__size = size_;
valid = true;
}
template<typename T>
device_vector_holder<T>::device_vector_holder(size_t size_){
__malloc(size_);
}
template class device_vector_holder<Vec3f>;
#include "pcd_scene/pcd_scene.h"
template class device_vector_holder<Node_kdtree>;
| 4b588292ebe74943d30d84782f42c3e92c2e1223.cu | #include "common.h"
template<typename T>
device_vector_holder<T>::~device_vector_holder(){
__free();
}
template<typename T>
void device_vector_holder<T>::__free(){
if(valid){
cudaFree(__gpu_memory);
valid = false;
__size = 0;
}
}
template<typename T>
device_vector_holder<T>::device_vector_holder(size_t size_, T init)
{
__malloc(size_);
thrust::fill(begin_thr(), end_thr(), init);
}
template<typename T>
void device_vector_holder<T>::__malloc(size_t size_){
if(valid) __free();
cudaMalloc((void**)&__gpu_memory, size_ * sizeof(T));
__size = size_;
valid = true;
}
template<typename T>
device_vector_holder<T>::device_vector_holder(size_t size_){
__malloc(size_);
}
template class device_vector_holder<Vec3f>;
#include "pcd_scene/pcd_scene.h"
template class device_vector_holder<Node_kdtree>;
|
89c8e312ad7b46b86e97184084ab43e249cf4d23.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <THHUNN/THHUNN.h>
#include <THHUNN/common.h>
#include <THH/THHTensor.hpp>
#include <THHUNN/upsampling.h>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHAtomics.cuh>
template<typename Dtype, typename Acctype>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void nearest_neighbor_5d_kernel(
const int n,
const THCDeviceTensor<Dtype, 5> data1,
THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
const float depth_scale = (float) depth1 / (float) depth2;
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int d2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int d1 = d2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][d1][h1][w1];
data2[n][c][d2][h2][w2] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
const int d1 = nearest_neighbor_compute_source_index(depth_scale, d2, depth1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][d1][h1][w1];
data2[n][c][d2][h2][w2] = val;
}
}
}
}
// Backward operation
template <typename Dtype, typename Acctype>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void nearest_neighbor_5d_kernel_backward(
const int n,
THCDeviceTensor<Dtype, 5> data1,
const THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
const float depth_scale = (float) depth1 / (float) depth2;
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int d2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int d1 = d2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][d1][h1][w1];
data1[n][c][d2][h2][w2] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
const int d1 = nearest_neighbor_compute_source_index(depth_scale, d2, depth1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][d2][h2][w2];
atomicAdd(data1[n][c][d1][h1][w1].data(), val);
}
}
}
}
#include <THHUNN/generic/VolumetricUpSamplingNearest.hip>
#include <THH/THHGenerateFloatTypes.h>
| 89c8e312ad7b46b86e97184084ab43e249cf4d23.cu | #include <THCUNN/THCUNN.h>
#include <THCUNN/common.h>
#include <THC/THCTensor.hpp>
#include <THCUNN/upsampling.h>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCAtomics.cuh>
template<typename Dtype, typename Acctype>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void nearest_neighbor_5d_kernel(
const int n,
const THCDeviceTensor<Dtype, 5> data1,
THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
const float depth_scale = (float) depth1 / (float) depth2;
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int d2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int d1 = d2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][d1][h1][w1];
data2[n][c][d2][h2][w2] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
const int d1 = nearest_neighbor_compute_source_index(depth_scale, d2, depth1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][d1][h1][w1];
data2[n][c][d2][h2][w2] = val;
}
}
}
}
// Backward operation
template <typename Dtype, typename Acctype>
#ifdef __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_1(1024)
#endif
__global__ void nearest_neighbor_5d_kernel_backward(
const int n,
THCDeviceTensor<Dtype, 5> data1,
const THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
const float depth_scale = (float) depth1 / (float) depth2;
const float height_scale = (float) height1 / (float) height2;
const float width_scale = (float) width1 / (float) width2;
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int d2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int d1 = d2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][d1][h1][w1];
data1[n][c][d2][h2][w2] = val;
}
}
return;
}
//
const int h1 = nearest_neighbor_compute_source_index(height_scale, h2, height1);
const int w1 = nearest_neighbor_compute_source_index(width_scale, w2, width1);
const int d1 = nearest_neighbor_compute_source_index(depth_scale, d2, depth1);
for (int n = 0; n < batchsize; n++) {
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][d2][h2][w2];
atomicAdd(data1[n][c][d1][h1][w1].data(), val);
}
}
}
}
#include <THCUNN/generic/VolumetricUpSamplingNearest.cu>
#include <THC/THCGenerateFloatTypes.h>
|
c387778567f8dd9e28f42ee69155420bb20bc843.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdafx.h"
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
using namespace std;
__global__ void myFirstKernel()
{
}
void main()
{
hipLaunchKernelGGL(( myFirstKernel), dim3(1),dim3(1) , 0, 0, );
cin.get();
}
| c387778567f8dd9e28f42ee69155420bb20bc843.cu | #include "stdafx.h"
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
__global__ void myFirstKernel()
{
}
void main()
{
myFirstKernel<<< 1,1 >>>();
cin.get();
}
|
322b0ade2b2efa98f2bf7f946e06c40da946832a.hip | // !!! This is a file automatically generated by hipify!!!
/*
* A program that compare performance of gfft and cuFFT library
* Test the speed and accuracy of FP16 and FP32 calculation
*/
// C library, CUDA runtime, helpers, and utilities
#include "../util/my_include.h"
#include <vector>
// gfft
#include "../util/gfft_using_fft4.h"
// CUFFT
#include <hipfft.h>
#include <hipfftXt.h>
typedef half2 Chalf;
typedef float2 Csingle;
const float NORM = 1.0f;
const int BATCH = 1;
const int SIZE = 4;
const int BLOCK_SIZE = 32;
const int DISPLAY_DATA = 1;
const int DEVICE = 0;
#define __START__ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0);
#define __STOP__(_V) hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&duration, start, stop); _V.push_back(duration); hipEventDestroy(start); hipEventDestroy(stop);
float show_mean(std::vector<float> v)
{
float sum = 0;
for (int i = 0; i < v.size(); i++)
sum += v[i];
return sum / v.size();
}
int cuFFT32(int N, int B, Csingle* X, Csingle* FX){
// Allocate unified momory for input and output
int mem_size = N * B *sizeof(Csingle);
Csingle *d_idata, *d_odata;
checkCudaErrors(hipMallocManaged((void **) &d_idata, mem_size));
checkCudaErrors(hipMallocManaged((void **) &d_odata, mem_size));
// Copy input data to memory
checkCudaErrors(hipMemcpy(d_idata, X, mem_size, hipMemcpyHostToDevice));
// cuFFT plan
hipfftResult result;
hipfftHandle plan;
size_t workSize;
long long int input_size_long = N;
result = hipfftCreate(&plan);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
HIP_C_32F, NULL, 1, 1, HIP_C_32F, B, \
&workSize, HIP_C_32F);
if (result != HIPFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT execution
result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \
reinterpret_cast<hipfftComplex *>(d_odata), \
HIPFFT_FORWARD);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Copy Device memory to output
checkCudaErrors(hipMemcpy(FX, d_odata, mem_size, hipMemcpyDeviceToHost));
// Clean up content and memory
hipfftDestroy(plan);
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return 0;
}
int cuFFT16(int N, int B, Chalf* X, Chalf* FX){
// Allocate unified momory for input and output
int mem_size = N * B *sizeof(Chalf);
Chalf *d_idata, *d_odata;
checkCudaErrors(hipMallocManaged((void **) &d_idata, mem_size));
checkCudaErrors(hipMallocManaged((void **) &d_odata, mem_size));
// Copy input data to memory
if (DISPLAY_DATA == 1){
printf("___fft16____input:\n");
for (int j = 0; j < N * B; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, (float)X[j].x, (float)X[j].y);
}
}
checkCudaErrors(hipMemcpy(d_idata, X, mem_size, hipMemcpyHostToDevice));
// cuFFT plan
hipfftResult result;
hipfftHandle plan;
size_t workSize;
long long int input_size_long = N;
result = hipfftCreate(&plan);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
HIP_C_16F, NULL, 1, 1, HIP_C_16F, B, \
&workSize, HIP_C_16F);
if (result != HIPFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT execution
result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \
reinterpret_cast<hipfftComplex *>(d_odata), \
HIPFFT_FORWARD);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Copy Device memory to output
checkCudaErrors(hipMemcpy(FX, d_odata, mem_size, hipMemcpyDeviceToHost));
if (DISPLAY_DATA == 1){
printf("___fft16____output:\n");
for (int j = 0; j < N * B; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, (float)FX[j].x, (float)FX[j].y);
}
}
// Clean up content and memory
hipfftDestroy(plan);
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
return 0;
}
int run_test_FP16(int input_size){
printf("[cuFFT16] is starting...\n");
// Initialize the memory for the input data
int mem_size = input_size*sizeof(Chalf);
Chalf *h_idata = (Chalf *)malloc(mem_size);
for (unsigned int i = 0; i < input_size; i++) {
h_idata[i].x = rand() / (0.5 * static_cast<float>(RAND_MAX)) - 1;
h_idata[i].y = rand() / (0.5 * static_cast<float>(RAND_MAX)) - 1;
}
if (input_size == 4) {
h_idata[0].x = 1.0f; h_idata[0].y = 2.0f;
h_idata[1].x = 0.0f; h_idata[1].y = 0.0f;
h_idata[2].x = 0.0f; h_idata[2].y = 1.0f;
h_idata[3].x = -1.0f; h_idata[3].y = 0.0f;
}
if (DISPLAY_DATA == 1) {
printf("Input data: \n");
for (unsigned int i = 0; i < input_size; i++) {
printf("x[%d]=(%.2f, %.2f); \n", i, (float)h_idata[i].x, (float)h_idata[i].y);
}
printf("\n");
}
// Allocate device momory for input and output
Chalf *d_idata, *d_odata;
checkCudaErrors(hipMalloc((void **) &d_idata, mem_size));
checkCudaErrors(hipMalloc((void **) &d_odata, mem_size));
// Copy host data to device
checkCudaErrors(hipMemcpy(d_idata, h_idata, mem_size, hipMemcpyHostToDevice));
// cuFFT plan
hipfftResult result;
hipfftHandle plan;
size_t workSize;
long long int input_size_long = input_size;
result = hipfftCreate(&plan);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
HIP_C_16F, NULL, 1, 1, HIP_C_16F, 1, \
&workSize, HIP_C_16F);
if (result != HIPFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT warm-up execution
result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \
reinterpret_cast<hipfftComplex *>(d_odata), \
HIPFFT_FORWARD);
if (result != HIPFFT_SUCCESS)
{
printf("hipfftExecC2C (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Measure execution time
hipDeviceSynchronize();
// Allocate CUDA events
hipEvent_t start;
checkCudaErrors(hipEventCreate(&start));
hipEvent_t stop;
checkCudaErrors(hipEventCreate(&stop));
// Record the start event
checkCudaErrors(hipEventRecord(start, NULL));
// Repeatedly execute cuFFT
int nIter = 300;
for (int i = 0; i < nIter; i++){
result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \
reinterpret_cast<hipfftComplex *>(d_odata), \
HIPFFT_FORWARD);
}
// Record the stop event
checkCudaErrors(hipEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(hipEventSynchronize(stop));
// Calculate performance
float msecTotal = 0.0f;
checkCudaErrors(hipEventElapsedTime(&msecTotal, start, stop));
float msecPerFFT = msecTotal / nIter;
// Copy Device memory to host
Chalf *h_odata = (Chalf *)malloc(mem_size);
checkCudaErrors(hipMemcpy(h_odata, d_odata, mem_size, hipMemcpyDeviceToHost));
// Print result
if (DISPLAY_DATA == 1) {
printf("FFT result: \n");
for (unsigned int i = 0; i < input_size; i++) {
printf("x[%d]=(%.2f, %.2f); \n", i, (float)h_odata[i].x, (float)h_odata[i].y);
}
printf("\n");
}
// Print the performance
printf("Performance of cuFFT16: Problem size= %d, Time= %.5f msec\n", \
input_size,
msecPerFFT);
// Clean up content and memory
checkCudaErrors(hipEventDestroy(start));
checkCudaErrors(hipEventDestroy(stop));
hipfftDestroy(plan);
checkCudaErrors(hipFree(d_idata));
checkCudaErrors(hipFree(d_odata));
free(h_idata);
free(h_odata);
return 0;
}
int main(int argc, char **argv)
{
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?") ||
checkCmdLineFlag(argc, (const char **)argv, "h")) {
printf("Usage: -norm=upper_bound (Max norm of input elements)"
" -n=size (Input vector size)\n"
" -batch=batch_size (Number of input vectors)\n"
" -bs=block_size (Number of threads in a block)\n"
" -display=show_result (0 or 1) \n"
" -device=ID (ID >= 0 for deviceID)\n");
exit(EXIT_SUCCESS);
}
// Get and set parameter
//// Norm
float norm = NORM;
if (checkCmdLineFlag(argc, (const char **)argv, "norm")) {
norm = getCmdLineArgumentInt(argc, (const char **)argv, "norm");
}
//// Input size
int n = SIZE;
if (checkCmdLineFlag(argc, (const char **)argv, "n")) {
n = getCmdLineArgumentInt(argc, (const char **)argv, "n");
}
//// Batch size
int batch = BATCH;
if (checkCmdLineFlag(argc, (const char **)argv, "batch")) {
batch = getCmdLineArgumentInt(argc, (const char **)argv, "batch");
}
//// Block size
int bs = BLOCK_SIZE;
if (checkCmdLineFlag(argc, (const char **)argv, "bs")) {
bs = getCmdLineArgumentInt(argc, (const char **)argv, "bs");
}
//// Result display mode
int display = DISPLAY_DATA;
if (checkCmdLineFlag(argc, (const char **)argv, "display")) {
display = getCmdLineArgumentInt(argc, (const char **)argv, "display");
}
//// Device ID by defualt is 0
int device = DEVICE;
if (checkCmdLineFlag(argc, (const char **)argv, "device")) {
device = getCmdLineArgumentInt(argc, (const char **)argv, "device");
hipSetDevice(device);
}
hipError_t error;
hipDeviceProp_t deviceProp;
error = hipGetDevice(&device);
if (error != hipSuccess)
{
printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
error = hipGetDeviceProperties(&deviceProp, device);
if (deviceProp.computeMode == hipComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Start program
printf("Problem size = %d, batch size = %d\n", n, batch);
printf("[Testing of gfft and cuFFT] - Starting...\n");
// Define event, result data structure
hipEvent_t start, stop;
std::vector<float> cuFFT32Run, cuFFT16Run, gfftRun;
std::vector<float> cuFFT16Error, gfftError;
float duration, error1, error2;
// Define input and output
float X_re[n * batch], X_im[n * batch], FX_re[n * batch], FX_im[n * batch];
Csingle X_32[n * batch], FX_32[n * batch];
Chalf X_16[n * batch], FX_16[n * batch];
// Run experiment
for (int i = 0; i < 1; i++){
// Initialize input
srand(time(NULL));
for (int j = 0; j < n * batch; j++){
X_re[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
X_im[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
X_re[j] = (float)j;
X_im[j] = 0.0f;
X_32[j].x = X_re[j]; X_32[j].y = X_im[j];
X_16[j].x = (half)X_re[j]; X_16[j].y = (half)X_im[j];
if (display == 1){
printf("X[%d] = (%.10f, %.10f) \n", j, X_re[j], X_im[j]);
}
}
// Call cuFFT32
__START__
cuFFT32(n, batch, X_32, FX_32);
__STOP__(cuFFT32Run)
// Call cuFFT16
__START__
cuFFT16(n, batch, X_16, FX_16);
__STOP__(cuFFT16Run)
// Call gfft
__START__
gfft(n, batch, X_re, X_im, FX_re, FX_im);
__STOP__(gfftRun)
if (display == 1){
printf("Result of cuFFT32:\n");
for (int j = 0; j < n * batch; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, FX_32[j].x, FX_32[j].y);
}
printf("Result of cuFFT16:\n");
for (int j = 0; j < n * batch; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, (float)FX_16[j].x, (float)FX_16[j].y);
}
printf("Result of gfft:\n");
for (int j = 0; j < n * batch; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, FX_re[j], FX_im[j]);
}
}
// Calculate error
for (int j = 0; j < n * batch; j++){
error1 += (float)fabs((float)(FX_16[j].x) - FX_32[j].x);
error1 += (float)fabs((float)(FX_16[j].y) - FX_32[j].y);
error2 += (float)fabs(FX_re[j] - FX_32[j].x);
error2 += (float)fabs(FX_im[j] - FX_32[j].y);
}
cuFFT16Error.push_back(error1 / (n * batch));
gfftError.push_back(error2 / (n * batch));
}
printf("Time of cuFFT32: %f milliseconds\n", show_mean(cuFFT32Run));
printf("Time of cuFFT16: %f milliseconds, error = %.10f\n", show_mean(cuFFT16Run), show_mean(cuFFT16Error)/norm);
printf("Time of gfft: %f milliseconds, error = %.10f\n", show_mean(gfftRun), show_mean(gfftError)/norm);
exit(0);
}
| 322b0ade2b2efa98f2bf7f946e06c40da946832a.cu | /*
* A program that compare performance of gfft and cuFFT library
* Test the speed and accuracy of FP16 and FP32 calculation
*/
// C library, CUDA runtime, helpers, and utilities
#include "../util/my_include.h"
#include <vector>
// gfft
#include "../util/gfft_using_fft4.h"
// CUFFT
#include <cufft.h>
#include <cufftXt.h>
typedef half2 Chalf;
typedef float2 Csingle;
const float NORM = 1.0f;
const int BATCH = 1;
const int SIZE = 4;
const int BLOCK_SIZE = 32;
const int DISPLAY_DATA = 1;
const int DEVICE = 0;
#define __START__ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0);
#define __STOP__(_V) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&duration, start, stop); _V.push_back(duration); cudaEventDestroy(start); cudaEventDestroy(stop);
float show_mean(std::vector<float> v)
{
float sum = 0;
for (int i = 0; i < v.size(); i++)
sum += v[i];
return sum / v.size();
}
int cuFFT32(int N, int B, Csingle* X, Csingle* FX){
// Allocate unified momory for input and output
int mem_size = N * B *sizeof(Csingle);
Csingle *d_idata, *d_odata;
checkCudaErrors(cudaMallocManaged((void **) &d_idata, mem_size));
checkCudaErrors(cudaMallocManaged((void **) &d_odata, mem_size));
// Copy input data to memory
checkCudaErrors(cudaMemcpy(d_idata, X, mem_size, cudaMemcpyHostToDevice));
// cuFFT plan
cufftResult result;
cufftHandle plan;
size_t workSize;
long long int input_size_long = N;
result = cufftCreate(&plan);
if (result != CUFFT_SUCCESS)
{
printf("cufftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
CUDA_C_32F, NULL, 1, 1, CUDA_C_32F, B, \
&workSize, CUDA_C_32F);
if (result != CUFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT execution
result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \
reinterpret_cast<cufftComplex *>(d_odata), \
CUFFT_FORWARD);
if (result != CUFFT_SUCCESS)
{
printf("cufftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Copy Device memory to output
checkCudaErrors(cudaMemcpy(FX, d_odata, mem_size, cudaMemcpyDeviceToHost));
// Clean up content and memory
cufftDestroy(plan);
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return 0;
}
int cuFFT16(int N, int B, Chalf* X, Chalf* FX){
// Allocate unified momory for input and output
int mem_size = N * B *sizeof(Chalf);
Chalf *d_idata, *d_odata;
checkCudaErrors(cudaMallocManaged((void **) &d_idata, mem_size));
checkCudaErrors(cudaMallocManaged((void **) &d_odata, mem_size));
// Copy input data to memory
if (DISPLAY_DATA == 1){
printf("___fft16____input:\n");
for (int j = 0; j < N * B; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, (float)X[j].x, (float)X[j].y);
}
}
checkCudaErrors(cudaMemcpy(d_idata, X, mem_size, cudaMemcpyHostToDevice));
// cuFFT plan
cufftResult result;
cufftHandle plan;
size_t workSize;
long long int input_size_long = N;
result = cufftCreate(&plan);
if (result != CUFFT_SUCCESS)
{
printf("cufftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
CUDA_C_16F, NULL, 1, 1, CUDA_C_16F, B, \
&workSize, CUDA_C_16F);
if (result != CUFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT execution
result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \
reinterpret_cast<cufftComplex *>(d_odata), \
CUFFT_FORWARD);
if (result != CUFFT_SUCCESS)
{
printf("cufftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Copy Device memory to output
checkCudaErrors(cudaMemcpy(FX, d_odata, mem_size, cudaMemcpyDeviceToHost));
if (DISPLAY_DATA == 1){
printf("___fft16____output:\n");
for (int j = 0; j < N * B; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, (float)FX[j].x, (float)FX[j].y);
}
}
// Clean up content and memory
cufftDestroy(plan);
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
return 0;
}
int run_test_FP16(int input_size){
printf("[cuFFT16] is starting...\n");
// Initialize the memory for the input data
int mem_size = input_size*sizeof(Chalf);
Chalf *h_idata = (Chalf *)malloc(mem_size);
for (unsigned int i = 0; i < input_size; i++) {
h_idata[i].x = rand() / (0.5 * static_cast<float>(RAND_MAX)) - 1;
h_idata[i].y = rand() / (0.5 * static_cast<float>(RAND_MAX)) - 1;
}
if (input_size == 4) {
h_idata[0].x = 1.0f; h_idata[0].y = 2.0f;
h_idata[1].x = 0.0f; h_idata[1].y = 0.0f;
h_idata[2].x = 0.0f; h_idata[2].y = 1.0f;
h_idata[3].x = -1.0f; h_idata[3].y = 0.0f;
}
if (DISPLAY_DATA == 1) {
printf("Input data: \n");
for (unsigned int i = 0; i < input_size; i++) {
printf("x[%d]=(%.2f, %.2f); \n", i, (float)h_idata[i].x, (float)h_idata[i].y);
}
printf("\n");
}
// Allocate device momory for input and output
Chalf *d_idata, *d_odata;
checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size));
checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size));
// Copy host data to device
checkCudaErrors(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice));
// cuFFT plan
cufftResult result;
cufftHandle plan;
size_t workSize;
long long int input_size_long = input_size;
result = cufftCreate(&plan);
if (result != CUFFT_SUCCESS)
{
printf("cufftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \
CUDA_C_16F, NULL, 1, 1, CUDA_C_16F, 1, \
&workSize, CUDA_C_16F);
if (result != CUFFT_SUCCESS)
{
printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// cuFFT warm-up execution
result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \
reinterpret_cast<cufftComplex *>(d_odata), \
CUFFT_FORWARD);
if (result != CUFFT_SUCCESS)
{
printf("cufftExecC2C (plan) returned error code %d, line(%d)\n", result, __LINE__);
exit(EXIT_FAILURE);
}
// Measure execution time
cudaDeviceSynchronize();
// Allocate CUDA events
cudaEvent_t start;
checkCudaErrors(cudaEventCreate(&start));
cudaEvent_t stop;
checkCudaErrors(cudaEventCreate(&stop));
// Record the start event
checkCudaErrors(cudaEventRecord(start, NULL));
// Repeatedly execute cuFFT
int nIter = 300;
for (int i = 0; i < nIter; i++){
result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \
reinterpret_cast<cufftComplex *>(d_odata), \
CUFFT_FORWARD);
}
// Record the stop event
checkCudaErrors(cudaEventRecord(stop, NULL));
// Wait for the stop event to complete
checkCudaErrors(cudaEventSynchronize(stop));
// Calculate performance
float msecTotal = 0.0f;
checkCudaErrors(cudaEventElapsedTime(&msecTotal, start, stop));
float msecPerFFT = msecTotal / nIter;
// Copy Device memory to host
Chalf *h_odata = (Chalf *)malloc(mem_size);
checkCudaErrors(cudaMemcpy(h_odata, d_odata, mem_size, cudaMemcpyDeviceToHost));
// Print result
if (DISPLAY_DATA == 1) {
printf("FFT result: \n");
for (unsigned int i = 0; i < input_size; i++) {
printf("x[%d]=(%.2f, %.2f); \n", i, (float)h_odata[i].x, (float)h_odata[i].y);
}
printf("\n");
}
// Print the performance
printf("Performance of cuFFT16: Problem size= %d, Time= %.5f msec\n", \
input_size,
msecPerFFT);
// Clean up content and memory
checkCudaErrors(cudaEventDestroy(start));
checkCudaErrors(cudaEventDestroy(stop));
cufftDestroy(plan);
checkCudaErrors(cudaFree(d_idata));
checkCudaErrors(cudaFree(d_odata));
free(h_idata);
free(h_odata);
return 0;
}
int main(int argc, char **argv)
{
if (checkCmdLineFlag(argc, (const char **)argv, "help") ||
checkCmdLineFlag(argc, (const char **)argv, "?") ||
checkCmdLineFlag(argc, (const char **)argv, "h")) {
printf("Usage: -norm=upper_bound (Max norm of input elements)"
" -n=size (Input vector size)\n"
" -batch=batch_size (Number of input vectors)\n"
" -bs=block_size (Number of threads in a block)\n"
" -display=show_result (0 or 1) \n"
" -device=ID (ID >= 0 for deviceID)\n");
exit(EXIT_SUCCESS);
}
// Get and set parameter
//// Norm
float norm = NORM;
if (checkCmdLineFlag(argc, (const char **)argv, "norm")) {
norm = getCmdLineArgumentInt(argc, (const char **)argv, "norm");
}
//// Input size
int n = SIZE;
if (checkCmdLineFlag(argc, (const char **)argv, "n")) {
n = getCmdLineArgumentInt(argc, (const char **)argv, "n");
}
//// Batch size
int batch = BATCH;
if (checkCmdLineFlag(argc, (const char **)argv, "batch")) {
batch = getCmdLineArgumentInt(argc, (const char **)argv, "batch");
}
//// Block size
int bs = BLOCK_SIZE;
if (checkCmdLineFlag(argc, (const char **)argv, "bs")) {
bs = getCmdLineArgumentInt(argc, (const char **)argv, "bs");
}
//// Result display mode
int display = DISPLAY_DATA;
if (checkCmdLineFlag(argc, (const char **)argv, "display")) {
display = getCmdLineArgumentInt(argc, (const char **)argv, "display");
}
//// Device ID by defualt is 0
int device = DEVICE;
if (checkCmdLineFlag(argc, (const char **)argv, "device")) {
device = getCmdLineArgumentInt(argc, (const char **)argv, "device");
cudaSetDevice(device);
}
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&device);
if (error != cudaSuccess)
{
printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
error = cudaGetDeviceProperties(&deviceProp, device);
if (deviceProp.computeMode == cudaComputeModeProhibited)
{
fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n");
exit(EXIT_SUCCESS);
}
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
}
else
{
printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor);
}
// Start program
printf("Problem size = %d, batch size = %d\n", n, batch);
printf("[Testing of gfft and cuFFT] - Starting...\n");
// Define event, result data structure
cudaEvent_t start, stop;
std::vector<float> cuFFT32Run, cuFFT16Run, gfftRun;
std::vector<float> cuFFT16Error, gfftError;
float duration, error1, error2;
// Define input and output
float X_re[n * batch], X_im[n * batch], FX_re[n * batch], FX_im[n * batch];
Csingle X_32[n * batch], FX_32[n * batch];
Chalf X_16[n * batch], FX_16[n * batch];
// Run experiment
for (int i = 0; i < 1; i++){
// Initialize input
srand(time(NULL));
for (int j = 0; j < n * batch; j++){
X_re[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
X_im[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm;
X_re[j] = (float)j;
X_im[j] = 0.0f;
X_32[j].x = X_re[j]; X_32[j].y = X_im[j];
X_16[j].x = (half)X_re[j]; X_16[j].y = (half)X_im[j];
if (display == 1){
printf("X[%d] = (%.10f, %.10f) \n", j, X_re[j], X_im[j]);
}
}
// Call cuFFT32
__START__
cuFFT32(n, batch, X_32, FX_32);
__STOP__(cuFFT32Run)
// Call cuFFT16
__START__
cuFFT16(n, batch, X_16, FX_16);
__STOP__(cuFFT16Run)
// Call gfft
__START__
gfft(n, batch, X_re, X_im, FX_re, FX_im);
__STOP__(gfftRun)
if (display == 1){
printf("Result of cuFFT32:\n");
for (int j = 0; j < n * batch; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, FX_32[j].x, FX_32[j].y);
}
printf("Result of cuFFT16:\n");
for (int j = 0; j < n * batch; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, (float)FX_16[j].x, (float)FX_16[j].y);
}
printf("Result of gfft:\n");
for (int j = 0; j < n * batch; j++){
printf("FX[%d] = (%.10f, %.10f) \n", j, FX_re[j], FX_im[j]);
}
}
// Calculate error
for (int j = 0; j < n * batch; j++){
error1 += (float)fabs((float)(FX_16[j].x) - FX_32[j].x);
error1 += (float)fabs((float)(FX_16[j].y) - FX_32[j].y);
error2 += (float)fabs(FX_re[j] - FX_32[j].x);
error2 += (float)fabs(FX_im[j] - FX_32[j].y);
}
cuFFT16Error.push_back(error1 / (n * batch));
gfftError.push_back(error2 / (n * batch));
}
printf("Time of cuFFT32: %f milliseconds\n", show_mean(cuFFT32Run));
printf("Time of cuFFT16: %f milliseconds, error = %.10f\n", show_mean(cuFFT16Run), show_mean(cuFFT16Error)/norm);
printf("Time of gfft: %f milliseconds, error = %.10f\n", show_mean(gfftRun), show_mean(gfftError)/norm);
exit(0);
}
|
443305f54080fcc3984d681fd21cdf99acaa4c16.hip | // !!! This is a file automatically generated by hipify!!!
#include "cupoch/visualization/shader/texture_phong_shader.h"
#include "cupoch/geometry/image.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include "cupoch/utility/console.h"
#include <hip/hip_runtime.h>
#include <cuda_gl_interop.h>
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
struct copy_trianglemesh_functor {
copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals,
const int* triangles, const Eigen::Vector3f* triangle_normals,
const Eigen::Vector2f* triangle_uvs,
RenderOption::MeshShadeOption shade_option)
: vertices_(vertices), vertex_normals_(vertex_normals),
triangles_(triangles), triangle_normals_(triangle_normals),
triangle_uvs_(triangle_uvs), shade_option_(shade_option) {};
const Eigen::Vector3f* vertices_;
const Eigen::Vector3f* vertex_normals_;
const int* triangles_;
const Eigen::Vector3f* triangle_normals_;
const Eigen::Vector2f* triangle_uvs_;
const RenderOption::MeshShadeOption shade_option_;
__device__
thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator() (size_t k) const {
int i = k / 3;
int vi = triangles_[k];
if (shade_option_ ==
RenderOption::MeshShadeOption::FlatShade) {
return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]);
} else {
return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]);
}
}
};
}
bool TexturePhongShader::Compile() {
if (CompileShaders(texture_phong_vertex_shader, NULL,
texture_phong_fragment_shader) == false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
vertex_normal_ = glGetAttribLocation(program_, "vertex_normal");
vertex_uv_ = glGetAttribLocation(program_, "vertex_uv");
MVP_ = glGetUniformLocation(program_, "MVP");
V_ = glGetUniformLocation(program_, "V");
M_ = glGetUniformLocation(program_, "M");
light_position_world_ =
glGetUniformLocation(program_, "light_position_world_4");
light_color_ = glGetUniformLocation(program_, "light_color_4");
light_diffuse_power_ =
glGetUniformLocation(program_, "light_diffuse_power_4");
light_specular_power_ =
glGetUniformLocation(program_, "light_specular_power_4");
light_specular_shininess_ =
glGetUniformLocation(program_, "light_specular_shininess_4");
light_ambient_ = glGetUniformLocation(program_, "light_ambient");
diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture");
return true;
}
void TexturePhongShader::Release() {
UnbindGeometry(true);
ReleaseProgram();
}
bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, hipGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_normal_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, hipGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_uv_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(hipGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, hipGraphicsMapFlagsNone));
Eigen::Vector3f* raw_points_ptr;
Eigen::Vector3f* raw_normals_ptr;
Eigen::Vector2f* raw_uvs_ptr;
size_t n_bytes;
cudaSafeCall(hipGraphicsMapResources(3, cuda_graphics_resources_));
cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1]));
cudaSafeCall(hipGraphicsResourceGetMappedPointer((void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr);
thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr);
thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(3);
bound_ = true;
return true;
}
bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data());
glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data());
glUniformMatrix4fv(light_position_world_, 1, GL_FALSE,
light_position_world_data_.data());
glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data());
glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data());
glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data());
glUniform4fv(light_specular_shininess_, 1,
light_specular_shininess_data_.data());
glUniform4fv(light_ambient_, 1, light_ambient_data_.data());
glUniform1i(diffuse_texture_, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_);
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_normal_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_);
glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_uv_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_);
glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
glDisableVertexAttribArray(vertex_normal_);
glDisableVertexAttribArray(vertex_uv_);
return true;
}
void TexturePhongShader::UnbindGeometry(bool finalize) {
if (bound_) {
if (!finalize) {
cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[0]));
cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[1]));
cudaSafeCall(hipGraphicsUnregisterResource(cuda_graphics_resources_[2]));
}
glDeleteBuffers(1, &vertex_position_buffer_);
glDeleteBuffers(1, &vertex_normal_buffer_);
glDeleteBuffers(1, &vertex_uv_buffer_);
glDeleteTextures(1, &diffuse_texture_buffer_);
bound_ = false;
}
}
void TexturePhongShader::SetLighting(const ViewControl &view,
const RenderOption &option) {
const auto &box = view.GetBoundingBox();
light_position_world_data_.setOnes();
light_color_data_.setOnes();
for (int i = 0; i < 4; i++) {
light_position_world_data_.block<3, 1>(0, i) =
box.GetCenter().cast<GLfloat>() +
(float)box.GetMaxExtent() *
((float)option.light_position_relative_[i](0) *
view.GetRight() +
(float)option.light_position_relative_[i](1) *
view.GetUp() +
(float)option.light_position_relative_[i](2) *
view.GetFront());
light_color_data_.block<3, 1>(0, i) =
option.light_color_[i].cast<GLfloat>();
}
if (option.light_on_) {
light_diffuse_power_data_ =
Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>();
light_specular_power_data_ =
Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>();
light_specular_shininess_data_ =
Eigen::Vector4f(option.light_specular_shininess_)
.cast<GLfloat>();
light_ambient_data_.block<3, 1>(0, 0) =
option.light_ambient_color_.cast<GLfloat>();
light_ambient_data_(3) = 1.0f;
} else {
light_diffuse_power_data_ = gl_helper::GLVector4f::Zero();
light_specular_power_data_ = gl_helper::GLVector4f::Zero();
light_specular_shininess_data_ = gl_helper::GLVector4f::Ones();
light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f);
}
}
bool TexturePhongShaderForTriangleMesh::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
if (option.mesh_show_back_face_) {
glDisable(GL_CULL_FACE);
} else {
glEnable(GL_CULL_FACE);
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (option.mesh_show_wireframe_) {
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.0, 1.0);
} else {
glDisable(GL_POLYGON_OFFSET_FILL);
}
SetLighting(view, option);
return true;
}
bool TexturePhongShaderForTriangleMesh::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector3f> &normals,
thrust::device_ptr<Eigen::Vector2f> &uvs) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty triangle mesh.");
return false;
}
if (mesh.HasTriangleNormals() == false ||
mesh.HasVertexNormals() == false) {
PrintShaderWarning("Binding failed because mesh has no normals.");
PrintShaderWarning("Call ComputeVertexNormals() before binding.");
return false;
}
copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()),
thrust::raw_pointer_cast(mesh.vertex_normals_.data()),
(int*)(thrust::raw_pointer_cast(mesh.triangles_.data())),
thrust::raw_pointer_cast(mesh.triangle_normals_.data()),
thrust::raw_pointer_cast(mesh.triangle_uvs_.data()),
option.mesh_shade_option_);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3),
make_tuple_iterator(points, normals, uvs), func);
glGenTextures(1, &diffuse_texture_);
glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_);
GLenum format;
switch (mesh.texture_.num_of_channels_) {
case 1: {
format = GL_RED;
break;
}
case 3: {
format = GL_RGB;
break;
}
case 4: {
format = GL_RGBA;
break;
}
default: {
utility::LogWarning("Unknown format, abort!");
return false;
}
}
GLenum type;
switch (mesh.texture_.bytes_per_channel_) {
case 1: {
type = GL_UNSIGNED_BYTE;
break;
}
case 2: {
type = GL_UNSIGNED_SHORT;
break;
}
case 4: {
type = GL_FLOAT;
break;
}
default: {
utility::LogWarning("Unknown format, abort!");
return false;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, format, mesh.texture_.width_,
mesh.texture_.height_, 0, format, type,
thrust::raw_pointer_cast(mesh.texture_.data_.data()));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t TexturePhongShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
} | 443305f54080fcc3984d681fd21cdf99acaa4c16.cu | #include "cupoch/visualization/shader/texture_phong_shader.h"
#include "cupoch/geometry/image.h"
#include "cupoch/geometry/pointcloud.h"
#include "cupoch/geometry/trianglemesh.h"
#include "cupoch/visualization/shader/shader.h"
#include "cupoch/visualization/utility/color_map.h"
#include "cupoch/utility/console.h"
#include <cuda_runtime.h>
#include <cuda_gl_interop.h>
using namespace cupoch;
using namespace cupoch::visualization;
using namespace cupoch::visualization::glsl;
namespace {
struct copy_trianglemesh_functor {
copy_trianglemesh_functor(const Eigen::Vector3f* vertices, const Eigen::Vector3f* vertex_normals,
const int* triangles, const Eigen::Vector3f* triangle_normals,
const Eigen::Vector2f* triangle_uvs,
RenderOption::MeshShadeOption shade_option)
: vertices_(vertices), vertex_normals_(vertex_normals),
triangles_(triangles), triangle_normals_(triangle_normals),
triangle_uvs_(triangle_uvs), shade_option_(shade_option) {};
const Eigen::Vector3f* vertices_;
const Eigen::Vector3f* vertex_normals_;
const int* triangles_;
const Eigen::Vector3f* triangle_normals_;
const Eigen::Vector2f* triangle_uvs_;
const RenderOption::MeshShadeOption shade_option_;
__device__
thrust::tuple<Eigen::Vector3f, Eigen::Vector3f, Eigen::Vector2f> operator() (size_t k) const {
int i = k / 3;
int vi = triangles_[k];
if (shade_option_ ==
RenderOption::MeshShadeOption::FlatShade) {
return thrust::make_tuple(vertices_[vi], triangle_normals_[i], triangle_uvs_[k]);
} else {
return thrust::make_tuple(vertices_[vi], vertex_normals_[vi], triangle_uvs_[k]);
}
}
};
}
bool TexturePhongShader::Compile() {
if (CompileShaders(texture_phong_vertex_shader, NULL,
texture_phong_fragment_shader) == false) {
PrintShaderWarning("Compiling shaders failed.");
return false;
}
vertex_position_ = glGetAttribLocation(program_, "vertex_position");
vertex_normal_ = glGetAttribLocation(program_, "vertex_normal");
vertex_uv_ = glGetAttribLocation(program_, "vertex_uv");
MVP_ = glGetUniformLocation(program_, "MVP");
V_ = glGetUniformLocation(program_, "V");
M_ = glGetUniformLocation(program_, "M");
light_position_world_ =
glGetUniformLocation(program_, "light_position_world_4");
light_color_ = glGetUniformLocation(program_, "light_color_4");
light_diffuse_power_ =
glGetUniformLocation(program_, "light_diffuse_power_4");
light_specular_power_ =
glGetUniformLocation(program_, "light_specular_power_4");
light_specular_shininess_ =
glGetUniformLocation(program_, "light_specular_shininess_4");
light_ambient_ = glGetUniformLocation(program_, "light_ambient");
diffuse_texture_ = glGetUniformLocation(program_, "diffuse_texture");
return true;
}
void TexturePhongShader::Release() {
UnbindGeometry(true);
ReleaseProgram();
}
bool TexturePhongShader::BindGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
// If there is already geometry, we first unbind it.
// We use GL_STATIC_DRAW. When geometry changes, we clear buffers and
// rebind the geometry. Note that this approach is slow. If the geometry is
// changing per frame, consider implementing a new ShaderWrapper using
// GL_STREAM_DRAW, and replace UnbindGeometry() with Buffer Object
// Streaming mechanisms.
UnbindGeometry();
// Prepare data to be passed to GPU
const size_t num_data_size = GetDataSize(geometry);
// Create buffers and bind the geometry
glGenBuffers(1, &vertex_position_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[0], vertex_position_buffer_, cudaGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_normal_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector3f), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[1], vertex_normal_buffer_, cudaGraphicsMapFlagsNone));
glGenBuffers(1, &vertex_uv_buffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_);
glBufferData(GL_ARRAY_BUFFER, num_data_size * sizeof(Eigen::Vector2f), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
cudaSafeCall(cudaGraphicsGLRegisterBuffer(&cuda_graphics_resources_[2], vertex_uv_buffer_, cudaGraphicsMapFlagsNone));
Eigen::Vector3f* raw_points_ptr;
Eigen::Vector3f* raw_normals_ptr;
Eigen::Vector2f* raw_uvs_ptr;
size_t n_bytes;
cudaSafeCall(cudaGraphicsMapResources(3, cuda_graphics_resources_));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_points_ptr, &n_bytes, cuda_graphics_resources_[0]));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_normals_ptr, &n_bytes, cuda_graphics_resources_[1]));
cudaSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&raw_uvs_ptr, &n_bytes, cuda_graphics_resources_[2]));
thrust::device_ptr<Eigen::Vector3f> dev_points_ptr = thrust::device_pointer_cast(raw_points_ptr);
thrust::device_ptr<Eigen::Vector3f> dev_normals_ptr = thrust::device_pointer_cast(raw_normals_ptr);
thrust::device_ptr<Eigen::Vector2f> dev_uvs_ptr = thrust::device_pointer_cast(raw_uvs_ptr);
if (PrepareBinding(geometry, option, view, dev_points_ptr, dev_normals_ptr, dev_uvs_ptr) == false) {
PrintShaderWarning("Binding failed when preparing data.");
return false;
}
Unmap(3);
bound_ = true;
return true;
}
bool TexturePhongShader::RenderGeometry(const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (PrepareRendering(geometry, option, view) == false) {
PrintShaderWarning("Rendering failed during preparation.");
return false;
}
glUseProgram(program_);
glUniformMatrix4fv(MVP_, 1, GL_FALSE, view.GetMVPMatrix().data());
glUniformMatrix4fv(V_, 1, GL_FALSE, view.GetViewMatrix().data());
glUniformMatrix4fv(M_, 1, GL_FALSE, view.GetModelMatrix().data());
glUniformMatrix4fv(light_position_world_, 1, GL_FALSE,
light_position_world_data_.data());
glUniformMatrix4fv(light_color_, 1, GL_FALSE, light_color_data_.data());
glUniform4fv(light_diffuse_power_, 1, light_diffuse_power_data_.data());
glUniform4fv(light_specular_power_, 1, light_specular_power_data_.data());
glUniform4fv(light_specular_shininess_, 1,
light_specular_shininess_data_.data());
glUniform4fv(light_ambient_, 1, light_ambient_data_.data());
glUniform1i(diffuse_texture_, 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_);
glEnableVertexAttribArray(vertex_position_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position_buffer_);
glVertexAttribPointer(vertex_position_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_normal_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_normal_buffer_);
glVertexAttribPointer(vertex_normal_, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(vertex_uv_);
glBindBuffer(GL_ARRAY_BUFFER, vertex_uv_buffer_);
glVertexAttribPointer(vertex_uv_, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glDrawArrays(draw_arrays_mode_, 0, draw_arrays_size_);
glDisableVertexAttribArray(vertex_position_);
glDisableVertexAttribArray(vertex_normal_);
glDisableVertexAttribArray(vertex_uv_);
return true;
}
void TexturePhongShader::UnbindGeometry(bool finalize) {
if (bound_) {
if (!finalize) {
cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[0]));
cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[1]));
cudaSafeCall(cudaGraphicsUnregisterResource(cuda_graphics_resources_[2]));
}
glDeleteBuffers(1, &vertex_position_buffer_);
glDeleteBuffers(1, &vertex_normal_buffer_);
glDeleteBuffers(1, &vertex_uv_buffer_);
glDeleteTextures(1, &diffuse_texture_buffer_);
bound_ = false;
}
}
void TexturePhongShader::SetLighting(const ViewControl &view,
const RenderOption &option) {
const auto &box = view.GetBoundingBox();
light_position_world_data_.setOnes();
light_color_data_.setOnes();
for (int i = 0; i < 4; i++) {
light_position_world_data_.block<3, 1>(0, i) =
box.GetCenter().cast<GLfloat>() +
(float)box.GetMaxExtent() *
((float)option.light_position_relative_[i](0) *
view.GetRight() +
(float)option.light_position_relative_[i](1) *
view.GetUp() +
(float)option.light_position_relative_[i](2) *
view.GetFront());
light_color_data_.block<3, 1>(0, i) =
option.light_color_[i].cast<GLfloat>();
}
if (option.light_on_) {
light_diffuse_power_data_ =
Eigen::Vector4f(option.light_diffuse_power_).cast<GLfloat>();
light_specular_power_data_ =
Eigen::Vector4f(option.light_specular_power_).cast<GLfloat>();
light_specular_shininess_data_ =
Eigen::Vector4f(option.light_specular_shininess_)
.cast<GLfloat>();
light_ambient_data_.block<3, 1>(0, 0) =
option.light_ambient_color_.cast<GLfloat>();
light_ambient_data_(3) = 1.0f;
} else {
light_diffuse_power_data_ = gl_helper::GLVector4f::Zero();
light_specular_power_data_ = gl_helper::GLVector4f::Zero();
light_specular_shininess_data_ = gl_helper::GLVector4f::Ones();
light_ambient_data_ = gl_helper::GLVector4f(1.0f, 1.0f, 1.0f, 1.0f);
}
}
bool TexturePhongShaderForTriangleMesh::PrepareRendering(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
if (option.mesh_show_back_face_) {
glDisable(GL_CULL_FACE);
} else {
glEnable(GL_CULL_FACE);
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GLenum(option.GetGLDepthFunc()));
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
if (option.mesh_show_wireframe_) {
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.0, 1.0);
} else {
glDisable(GL_POLYGON_OFFSET_FILL);
}
SetLighting(view, option);
return true;
}
bool TexturePhongShaderForTriangleMesh::PrepareBinding(
const geometry::Geometry &geometry,
const RenderOption &option,
const ViewControl &view,
thrust::device_ptr<Eigen::Vector3f> &points,
thrust::device_ptr<Eigen::Vector3f> &normals,
thrust::device_ptr<Eigen::Vector2f> &uvs) {
if (geometry.GetGeometryType() !=
geometry::Geometry::GeometryType::TriangleMesh) {
PrintShaderWarning("Rendering type is not geometry::TriangleMesh.");
return false;
}
const geometry::TriangleMesh &mesh =
(const geometry::TriangleMesh &)geometry;
if (mesh.HasTriangles() == false) {
PrintShaderWarning("Binding failed with empty triangle mesh.");
return false;
}
if (mesh.HasTriangleNormals() == false ||
mesh.HasVertexNormals() == false) {
PrintShaderWarning("Binding failed because mesh has no normals.");
PrintShaderWarning("Call ComputeVertexNormals() before binding.");
return false;
}
copy_trianglemesh_functor func(thrust::raw_pointer_cast(mesh.vertices_.data()),
thrust::raw_pointer_cast(mesh.vertex_normals_.data()),
(int*)(thrust::raw_pointer_cast(mesh.triangles_.data())),
thrust::raw_pointer_cast(mesh.triangle_normals_.data()),
thrust::raw_pointer_cast(mesh.triangle_uvs_.data()),
option.mesh_shade_option_);
thrust::transform(thrust::make_counting_iterator<size_t>(0),
thrust::make_counting_iterator<size_t>(mesh.triangles_.size() * 3),
make_tuple_iterator(points, normals, uvs), func);
glGenTextures(1, &diffuse_texture_);
glBindTexture(GL_TEXTURE_2D, diffuse_texture_buffer_);
GLenum format;
switch (mesh.texture_.num_of_channels_) {
case 1: {
format = GL_RED;
break;
}
case 3: {
format = GL_RGB;
break;
}
case 4: {
format = GL_RGBA;
break;
}
default: {
utility::LogWarning("Unknown format, abort!");
return false;
}
}
GLenum type;
switch (mesh.texture_.bytes_per_channel_) {
case 1: {
type = GL_UNSIGNED_BYTE;
break;
}
case 2: {
type = GL_UNSIGNED_SHORT;
break;
}
case 4: {
type = GL_FLOAT;
break;
}
default: {
utility::LogWarning("Unknown format, abort!");
return false;
}
}
glTexImage2D(GL_TEXTURE_2D, 0, format, mesh.texture_.width_,
mesh.texture_.height_, 0, format, type,
thrust::raw_pointer_cast(mesh.texture_.data_.data()));
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
draw_arrays_mode_ = GL_TRIANGLES;
draw_arrays_size_ = GLsizei(mesh.triangles_.size() * 3);
return true;
}
size_t TexturePhongShaderForTriangleMesh::GetDataSize(const geometry::Geometry &geometry) const {
return ((const geometry::TriangleMesh &)geometry).triangles_.size() * 3;
} |
e1934865abb5f5000dd0b9110bbd496fd9f2e352.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
#include <stdio.h>
#define SIZE 1024
#define ONERROR(expression) cudaStatus = expression;\
if(cudaStatus != hipSuccess) \
{\
fprintf(stderr, "%s failed on line %d!\n",__FUNCTION__,__LINE__);\
goto Error;\
}
__global__ void multiply(int categories, int features,int batches, float* W, float* b, float* input, float* score)
{
int feature = threadIdx.x;
int category = blockIdx.x;
int batch = blockIdx.y;
float Wx;
float Wx_plus_b;
float result;
int init = threadIdx.x;
__shared__ float temp[SIZE];
while (init < SIZE)
{
temp[init] = 0;
init += blockDim.x;
}
__syncthreads();
{
Wx = W[feature + category * features] * input[feature + batch * features];
temp[feature] += Wx;
__syncthreads();
for (int thread = 1024 / 2; thread > 32; thread /= 2)
{
if (threadIdx.x < thread)
{
temp[threadIdx.x] += temp[threadIdx.x + thread];
}
__syncthreads();
}
if (threadIdx.x < 16)
{
volatile float* volatiletemp = temp;
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 32];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 16];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 8];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 4];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 2];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 1];
if (threadIdx.x == 0)
score[category + categories * batch] = volatiletemp[0];
}
}
}
__global__ void update1(int categories, int features, int batches, int* labels,float* score,float* diff)
{
int category = threadIdx.x;
int batch = blockIdx.x;
// extern __shared__ float diff[];
__shared__ float temp[SIZE];
diff[category + batch * categories] = exp(score[category + batch * categories]);
int init = threadIdx.x;
while (init < SIZE)
{
temp[init] = 0;
init += blockDim.x;
}
__syncthreads();
temp[category] += diff[category + batch * categories];
__syncthreads();
for (int thread = SIZE / 2; thread > 0; thread /= 2)
{
if (threadIdx.x < thread)
{
temp[threadIdx.x] += temp[threadIdx.x + thread];
}
__syncthreads();
}
diff[category + batch * categories] /= temp[0];
if (category == labels[batch])
diff[category + batch * categories]--;
}
__global__ void update2(int categories, int features, int batches,float* input, float* diff,float* diffW)
{
int category = threadIdx.x;
int batch = threadIdx.y;
int feature = blockIdx.x;
// extern __shared__ float diff[];
diffW[category + categories*batch + feature*categories*batches] = diff[category + batch * categories] * input[feature + batch * features];
}
__global__ void update3(int categories, int features, int batches, float* W, float* b, float* diff, float* diffW,float learningRate)
{
int feature = threadIdx.x;
int category = blockIdx.x;
float batchDiffW = 0;
float batchDiffb = 0;
for (int batch = 0; batch < batches; batch++)
{
batchDiffW += learningRate * diffW[category + categories*batch + feature*categories*batches];
if (feature == 0)
batchDiffb += learningRate * diff[category + categories* batch];
}
W[feature + features * category] -= batchDiffW;
if (feature == 0)
b[category] -= batchDiffb;
}
extern "C"
hipError_t mallocInitPara(float** dev_W, float** dev_b, int features, int categories,float* host_W, float* host_b)
{
hipError_t cudaStatus;
ONERROR(hipMalloc((void**)dev_W, features * categories * sizeof(float)));
ONERROR(hipMemcpy(*dev_W, host_W, sizeof(float)*features*categories, hipMemcpyHostToDevice));
ONERROR(hipMalloc((void**)dev_b, categories * sizeof(float)));
ONERROR(hipMemcpy(*dev_b, host_b, sizeof(float)*categories, hipMemcpyHostToDevice));
return hipSuccess;
Error:
hipFree(*dev_W);
hipFree(*dev_b);
return cudaStatus;
}
using namespace std;
extern "C"
hipError_t updatePara_SGD(float* x, int* label, int batches, float* backPropagationForInput,float* dev_W,float* dev_b, int features, int categories,float learningRate, float* correctRate)
{
hipError_t cudaStatus = hipSuccess;
float* dev_x;
float* dev_score;
float* host_score = new float[categories * batches];
float* dev_diff;
float* dev_diffW;
int* dev_labels;
hipStream_t stream0;
ONERROR(hipStreamCreate(&stream0));
ONERROR(hipMalloc(&dev_x,sizeof(float)*features*batches));
ONERROR(hipMalloc(&dev_labels, sizeof(int)*batches));
ONERROR(hipMemcpyAsync(dev_x, x, sizeof(float)*features*batches, hipMemcpyHostToDevice, stream0));
ONERROR(hipMemcpyAsync(dev_labels, label, sizeof(int)*batches, hipMemcpyHostToDevice, stream0));
ONERROR(hipMalloc(&dev_score, sizeof(float)*categories*batches));
ONERROR(hipMalloc(&dev_diff, sizeof(float)*categories*batches));
ONERROR(hipMalloc(&dev_diffW, sizeof(float)*categories*batches*features));
/*
struct hipDeviceProp_t properties;
hipGetDeviceProperties(&properties, 0);
int a = properties.multiProcessorCount;
int b = properties.maxThreadsPerMultiProcessor;*/
dim3 gridShape = dim3(categories, batches);
dim3 blockShape = dim3(features);
int shareMemSize = 1;
multiply << <gridShape, blockShape,0, stream0 >> > (categories, features, batches, dev_W, dev_b, dev_x, dev_score);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
ONERROR(hipMemcpyAsync(host_score, dev_score, sizeof(float)*categories*batches, hipMemcpyDeviceToHost,stream0));
ONERROR(hipStreamSynchronize(stream0));
update1 << <batches, categories,0, stream0 >> > (categories, features, batches, dev_labels, dev_score, dev_diff);
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
update2 << <features, dim3(categories,batches),0, stream0 >> > (categories, features, batches,dev_x, dev_diff, dev_diffW);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
update3 << <categories, features,0, stream0 >> >(categories, features, batches, dev_W, dev_b, dev_diff, dev_diffW, learningRate);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
int correctCount = 0;
for (int i = 0; i < batches; i++)
{
float max = -999999.0;
int maxlabel = 0;
for (int j = 0; j < categories; j++)
{
if (host_score[categories * i + j] > max)
{
max = host_score[categories * i + j];
maxlabel = j;
}
}
if (maxlabel == label[i])
correctCount++;
}
ONERROR(hipStreamSynchronize(stream0));
delete[] host_score;
correctRate[0] = (float)correctCount / batches;
Error:
hipFree(dev_x);
hipFree(dev_labels);
hipFree(dev_score);
hipFree(dev_diff);
hipFree(dev_diffW);
hipStreamDestroy(stream0);
return cudaStatus;
}
| e1934865abb5f5000dd0b9110bbd496fd9f2e352.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <stdio.h>
#define SIZE 1024
#define ONERROR(expression) cudaStatus = expression;\
if(cudaStatus != cudaSuccess) \
{\
fprintf(stderr, "%s failed on line %d!\n",__FUNCTION__,__LINE__);\
goto Error;\
}
__global__ void multiply(int categories, int features,int batches, float* W, float* b, float* input, float* score)
{
int feature = threadIdx.x;
int category = blockIdx.x;
int batch = blockIdx.y;
float Wx;
float Wx_plus_b;
float result;
int init = threadIdx.x;
__shared__ float temp[SIZE];
while (init < SIZE)
{
temp[init] = 0;
init += blockDim.x;
}
__syncthreads();
{
Wx = W[feature + category * features] * input[feature + batch * features];
temp[feature] += Wx;
__syncthreads();
for (int thread = 1024 / 2; thread > 32; thread /= 2)
{
if (threadIdx.x < thread)
{
temp[threadIdx.x] += temp[threadIdx.x + thread];
}
__syncthreads();
}
if (threadIdx.x < 16)
{
volatile float* volatiletemp = temp;
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 32];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 16];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 8];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 4];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 2];
volatiletemp[threadIdx.x] += volatiletemp[threadIdx.x + 1];
if (threadIdx.x == 0)
score[category + categories * batch] = volatiletemp[0];
}
}
}
__global__ void update1(int categories, int features, int batches, int* labels,float* score,float* diff)
{
int category = threadIdx.x;
int batch = blockIdx.x;
// extern __shared__ float diff[];
__shared__ float temp[SIZE];
diff[category + batch * categories] = exp(score[category + batch * categories]);
int init = threadIdx.x;
while (init < SIZE)
{
temp[init] = 0;
init += blockDim.x;
}
__syncthreads();
temp[category] += diff[category + batch * categories];
__syncthreads();
for (int thread = SIZE / 2; thread > 0; thread /= 2)
{
if (threadIdx.x < thread)
{
temp[threadIdx.x] += temp[threadIdx.x + thread];
}
__syncthreads();
}
diff[category + batch * categories] /= temp[0];
if (category == labels[batch])
diff[category + batch * categories]--;
}
__global__ void update2(int categories, int features, int batches,float* input, float* diff,float* diffW)
{
int category = threadIdx.x;
int batch = threadIdx.y;
int feature = blockIdx.x;
// extern __shared__ float diff[];
diffW[category + categories*batch + feature*categories*batches] = diff[category + batch * categories] * input[feature + batch * features];
}
__global__ void update3(int categories, int features, int batches, float* W, float* b, float* diff, float* diffW,float learningRate)
{
int feature = threadIdx.x;
int category = blockIdx.x;
float batchDiffW = 0;
float batchDiffb = 0;
for (int batch = 0; batch < batches; batch++)
{
batchDiffW += learningRate * diffW[category + categories*batch + feature*categories*batches];
if (feature == 0)
batchDiffb += learningRate * diff[category + categories* batch];
}
W[feature + features * category] -= batchDiffW;
if (feature == 0)
b[category] -= batchDiffb;
}
extern "C"
cudaError_t mallocInitPara(float** dev_W, float** dev_b, int features, int categories,float* host_W, float* host_b)
{
cudaError_t cudaStatus;
ONERROR(cudaMalloc((void**)dev_W, features * categories * sizeof(float)));
ONERROR(cudaMemcpy(*dev_W, host_W, sizeof(float)*features*categories, cudaMemcpyHostToDevice));
ONERROR(cudaMalloc((void**)dev_b, categories * sizeof(float)));
ONERROR(cudaMemcpy(*dev_b, host_b, sizeof(float)*categories, cudaMemcpyHostToDevice));
return cudaSuccess;
Error:
cudaFree(*dev_W);
cudaFree(*dev_b);
return cudaStatus;
}
using namespace std;
extern "C"
cudaError_t updatePara_SGD(float* x, int* label, int batches, float* backPropagationForInput,float* dev_W,float* dev_b, int features, int categories,float learningRate, float* correctRate)
{
cudaError_t cudaStatus = cudaSuccess;
float* dev_x;
float* dev_score;
float* host_score = new float[categories * batches];
float* dev_diff;
float* dev_diffW;
int* dev_labels;
cudaStream_t stream0;
ONERROR(cudaStreamCreate(&stream0));
ONERROR(cudaMalloc(&dev_x,sizeof(float)*features*batches));
ONERROR(cudaMalloc(&dev_labels, sizeof(int)*batches));
ONERROR(cudaMemcpyAsync(dev_x, x, sizeof(float)*features*batches, cudaMemcpyHostToDevice, stream0));
ONERROR(cudaMemcpyAsync(dev_labels, label, sizeof(int)*batches, cudaMemcpyHostToDevice, stream0));
ONERROR(cudaMalloc(&dev_score, sizeof(float)*categories*batches));
ONERROR(cudaMalloc(&dev_diff, sizeof(float)*categories*batches));
ONERROR(cudaMalloc(&dev_diffW, sizeof(float)*categories*batches*features));
/*
struct cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, 0);
int a = properties.multiProcessorCount;
int b = properties.maxThreadsPerMultiProcessor;*/
dim3 gridShape = dim3(categories, batches);
dim3 blockShape = dim3(features);
int shareMemSize = 1;
multiply << <gridShape, blockShape,0, stream0 >> > (categories, features, batches, dev_W, dev_b, dev_x, dev_score);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
ONERROR(cudaMemcpyAsync(host_score, dev_score, sizeof(float)*categories*batches, cudaMemcpyDeviceToHost,stream0));
ONERROR(cudaStreamSynchronize(stream0));
update1 << <batches, categories,0, stream0 >> > (categories, features, batches, dev_labels, dev_score, dev_diff);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
update2 << <features, dim3(categories,batches),0, stream0 >> > (categories, features, batches,dev_x, dev_diff, dev_diffW);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
update3 << <categories, features,0, stream0 >> >(categories, features, batches, dev_W, dev_b, dev_diff, dev_diffW, learningRate);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Kernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
int correctCount = 0;
for (int i = 0; i < batches; i++)
{
float max = -999999.0;
int maxlabel = 0;
for (int j = 0; j < categories; j++)
{
if (host_score[categories * i + j] > max)
{
max = host_score[categories * i + j];
maxlabel = j;
}
}
if (maxlabel == label[i])
correctCount++;
}
ONERROR(cudaStreamSynchronize(stream0));
delete[] host_score;
correctRate[0] = (float)correctCount / batches;
Error:
cudaFree(dev_x);
cudaFree(dev_labels);
cudaFree(dev_score);
cudaFree(dev_diff);
cudaFree(dev_diffW);
cudaStreamDestroy(stream0);
return cudaStatus;
}
|
a96b89c061319ddba93fd19f694391d17dee5bcd.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019 by XGBoost Contributors
*
* \file data.cu
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "columnar.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) {
Json j_arr = Json::Load({interface_str.c_str(), interface_str.size()});
auto const& j_arr_obj = get<Object>(j_arr);
std::string key {c_key};
auto version = get<Integer const>(j_arr_obj.at("version"));
CHECK_EQ(version, 1) << ColumnarErrors::Version();
if (j_arr_obj.find("mask") != j_arr_obj.cend()) {
LOG(FATAL) << "Meta info " << key << " should be dense, found validity mask";
}
auto typestr = get<String const>(j_arr_obj.at("typestr"));
CHECK_EQ(typestr.size(), 3) << ColumnarErrors::TypestrFormat();
CHECK_NE(typestr.front(), '>') << ColumnarErrors::BigEndian();
auto j_shape = get<Array const>(j_arr_obj.at("shape"));
CHECK_EQ(j_shape.size(), 1) << ColumnarErrors::Dimension(1);
auto length = get<Integer const>(j_shape.at(0));
CHECK_GT(length, 0) << "Label set cannot be empty.";
if (j_arr_obj.find("strides") != j_arr_obj.cend()) {
auto strides = get<Array const>(j_arr_obj.at("strides"));
CHECK_EQ(get<Integer>(strides.at(0)), 4) << ColumnarErrors::Contigious();
}
float* p_data = GetPtrFromArrayData<float*>(j_arr_obj);
hipPointerAttribute_t attr;
dh::safe_cuda(hipPointerGetAttributes(&attr, p_data));
int32_t ptr_device = attr.device;
dh::safe_cuda(hipSetDevice(ptr_device));
thrust::device_ptr<float> p_src {p_data};
HostDeviceVector<float>* dst;
if (key == "root_index") {
LOG(FATAL) << "root index for columnar data is not supported.";
} else if (key == "label") {
dst = &labels_;
CHECK_EQ(typestr.at(1), 'f') << "Label"
<< ColumnarErrors::ofType("floating point");
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toFloat();
} else if (key == "weight") {
dst = &weights_;
CHECK_EQ(typestr.at(1), 'f') << "Weight"
<< ColumnarErrors::ofType("floating point");;
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toFloat();
} else if (key == "base_margin") {
dst = &base_margin_;
CHECK_EQ(typestr.at(1), 'f') << "Base Margin"
<< ColumnarErrors::ofType("floating point");
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toFloat();
} else if (key == "group") {
CHECK_EQ(typestr.at(1), 'u') << "Group"
<< ColumnarErrors::ofType("unsigned 32 bit integers");
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toUInt();
group_ptr_.resize(length + 1);
group_ptr_[0] = 0;
// Ranking is not performed on device.
thrust::copy(p_src, p_src + length, group_ptr_.begin() + 1);
for (size_t i = 1; i < group_ptr_.size(); ++i) {
group_ptr_[i] = group_ptr_[i - 1] + group_ptr_[i];
}
return;
} else {
LOG(FATAL) << "Unknown metainfo: " << key;
}
dst->SetDevice(ptr_device);
dst->Resize(length);
auto p_dst = thrust::device_pointer_cast(dst->DevicePointer());
thrust::copy(p_src, p_src + length, p_dst);
}
} // namespace xgboost
| a96b89c061319ddba93fd19f694391d17dee5bcd.cu | /*!
* Copyright 2019 by XGBoost Contributors
*
* \file data.cu
*/
#include "xgboost/data.h"
#include "xgboost/logging.h"
#include "xgboost/json.h"
#include "columnar.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
void MetaInfo::SetInfo(const char * c_key, std::string const& interface_str) {
Json j_arr = Json::Load({interface_str.c_str(), interface_str.size()});
auto const& j_arr_obj = get<Object>(j_arr);
std::string key {c_key};
auto version = get<Integer const>(j_arr_obj.at("version"));
CHECK_EQ(version, 1) << ColumnarErrors::Version();
if (j_arr_obj.find("mask") != j_arr_obj.cend()) {
LOG(FATAL) << "Meta info " << key << " should be dense, found validity mask";
}
auto typestr = get<String const>(j_arr_obj.at("typestr"));
CHECK_EQ(typestr.size(), 3) << ColumnarErrors::TypestrFormat();
CHECK_NE(typestr.front(), '>') << ColumnarErrors::BigEndian();
auto j_shape = get<Array const>(j_arr_obj.at("shape"));
CHECK_EQ(j_shape.size(), 1) << ColumnarErrors::Dimension(1);
auto length = get<Integer const>(j_shape.at(0));
CHECK_GT(length, 0) << "Label set cannot be empty.";
if (j_arr_obj.find("strides") != j_arr_obj.cend()) {
auto strides = get<Array const>(j_arr_obj.at("strides"));
CHECK_EQ(get<Integer>(strides.at(0)), 4) << ColumnarErrors::Contigious();
}
float* p_data = GetPtrFromArrayData<float*>(j_arr_obj);
cudaPointerAttributes attr;
dh::safe_cuda(cudaPointerGetAttributes(&attr, p_data));
int32_t ptr_device = attr.device;
dh::safe_cuda(cudaSetDevice(ptr_device));
thrust::device_ptr<float> p_src {p_data};
HostDeviceVector<float>* dst;
if (key == "root_index") {
LOG(FATAL) << "root index for columnar data is not supported.";
} else if (key == "label") {
dst = &labels_;
CHECK_EQ(typestr.at(1), 'f') << "Label"
<< ColumnarErrors::ofType("floating point");
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toFloat();
} else if (key == "weight") {
dst = &weights_;
CHECK_EQ(typestr.at(1), 'f') << "Weight"
<< ColumnarErrors::ofType("floating point");;
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toFloat();
} else if (key == "base_margin") {
dst = &base_margin_;
CHECK_EQ(typestr.at(1), 'f') << "Base Margin"
<< ColumnarErrors::ofType("floating point");
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toFloat();
} else if (key == "group") {
CHECK_EQ(typestr.at(1), 'u') << "Group"
<< ColumnarErrors::ofType("unsigned 32 bit integers");
CHECK_EQ(typestr.at(2), '4') << ColumnarErrors::toUInt();
group_ptr_.resize(length + 1);
group_ptr_[0] = 0;
// Ranking is not performed on device.
thrust::copy(p_src, p_src + length, group_ptr_.begin() + 1);
for (size_t i = 1; i < group_ptr_.size(); ++i) {
group_ptr_[i] = group_ptr_[i - 1] + group_ptr_[i];
}
return;
} else {
LOG(FATAL) << "Unknown metainfo: " << key;
}
dst->SetDevice(ptr_device);
dst->Resize(length);
auto p_dst = thrust::device_pointer_cast(dst->DevicePointer());
thrust::copy(p_src, p_src + length, p_dst);
}
} // namespace xgboost
|
8c599927c2be1f1575bf8d5814cfeb6be5ddeebe.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <new>
#include <cstddef>
#include <windows.h>
#include <iostream>
#include <cinttypes>
#include <random>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/device_functions.h"
static const int MIN = -30;
static const int MAX = 30;
static const unsigned int BLOCK_SIZE = 32;
static const size_t MATRIX_SIZES[3] = {1000, 2000, 3000};
enum class MultType { cpu, gpu, gpuShared };
__global__ void gpuKernMult(double* m1, double* m2, size_t size, double* res)
{
size_t i = blockDim.y * blockIdx.y + threadIdx.y;
size_t j = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= size || j >= size) return;
size_t ind = i * size + j;
res[ind] = 0;
for (size_t k = 0; k < size; k++)
{
res[ind] += m1[i * size + k] * m2[k * size + j];
}
}
__global__ void gpuKernMultShared(double* m1, double* m2, size_t size, double* res)
{
size_t ty = threadIdx.y;
size_t tx = threadIdx.x;
size_t i = blockDim.y * blockIdx.y + ty;
size_t j = blockDim.x * blockIdx.x + tx;
double sum = 0;
for (size_t ind = 0, aj = tx, bi = ty; ind * BLOCK_SIZE < size; ++ind, aj += BLOCK_SIZE, bi += BLOCK_SIZE)
{
__shared__ double a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double b[BLOCK_SIZE][BLOCK_SIZE];
a[ty][tx] = 0;
b[ty][tx] = 0;
if (i < size && aj < size)
{
a[ty][tx] = m1[i * size + aj];
}
if (j < size && bi < size)
{
b[ty][tx] = m2[bi * size + j];
}
__syncthreads();
for (size_t k = 0; k < BLOCK_SIZE; k++)
{
sum += a[ty][k] * b[k][tx];
}
__syncthreads();
}
if (i < size && j < size)
{
res[i * size + j] = sum;
}
}
static void initCudaMatr(double** m1, double** m2, double** res, size_t bytes, double* src1, double* src2)
{
hipMalloc(m1, bytes);
hipMalloc(m2, bytes);
hipMalloc(res, bytes);
hipMemcpy(*m1, src1, bytes, hipMemcpyHostToDevice);
hipMemcpy(*m2, src2, bytes, hipMemcpyHostToDevice);
}
static void initCudaTimer(hipEvent_t* start, hipEvent_t* end)
{
hipEventCreate(start);
hipEventCreate(end);
hipEventRecord(*start, 0);
}
static float countTime(hipEvent_t start, hipEvent_t end)
{
float time;
hipEventRecord(end, 0);
hipEventSynchronize(end);
hipEventElapsedTime(&time, start, end);
return time;
}
static void destroyCudaObj(double* m1, double* m2, double* res, hipEvent_t start, hipEvent_t end)
{
hipEventDestroy(end);
hipEventDestroy(start);
hipFree(res);
hipFree(m2);
hipFree(m1);
}
static float gpuMult(double* m1, double* m2, size_t size, double* res, MultType type)
{
hipEvent_t start, end;
float time;
double* cudaM1;
double* cudaM2;
double* cudaRes;
size_t matrixBytesNum = sizeof(double) * size * size;
dim3 cudaThreads(BLOCK_SIZE, BLOCK_SIZE);
dim3 cudaBlocks((size + cudaThreads.x - 1) / cudaThreads.x, (size + cudaThreads.y - 1) / cudaThreads.y);
initCudaMatr(&cudaM1, &cudaM2, &cudaRes, matrixBytesNum, m1, m2);
initCudaTimer(&start, &end);
switch (type)
{
case MultType::gpu:
hipLaunchKernelGGL(( gpuKernMult), dim3(cudaBlocks), dim3(cudaThreads), 0, 0, cudaM1, cudaM2, size, cudaRes);
break;
case MultType::gpuShared:
hipLaunchKernelGGL(( gpuKernMultShared), dim3(cudaBlocks), dim3(cudaThreads), 0, 0, cudaM1, cudaM2, size, cudaRes);
break;
default:
return -1;
}
time = countTime(start, end);
hipMemcpy(res, cudaRes, matrixBytesNum, hipMemcpyDeviceToHost);
destroyCudaObj(cudaM1, cudaM2, cudaRes, start, end);
return time / 1000.0f;
}
static double deviation(double* m1, double* m2, size_t size)
{
size_t n = size * size;
double res = 0.0;
for (size_t i = 0; i < n; i++)
{
res = ::max(res, std::abs(m1[i] - m2[i]));
}
return res;
}
static double* randomMatrix(size_t size)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<double> distrib(MIN, MAX);
size_t n = size * size;
double* res = new double[n];
for (size_t i = 0; i < n; ++i)
{
res[i] = distrib(gen);
}
return res;
}
static float cpuMult(double* m1, double* m2, size_t size, double* res)
{
LARGE_INTEGER start, end, freq;
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(&start);
for (size_t i = 0; i < size; i++)
{
for (size_t j = 0; j < size; j++)
{
size_t ind = i * size + j;
res[ind] = 0;
for (size_t k = 0; k < size; k++)
{
res[ind] += m1[i * size + k] * m2[k * size + j];
}
}
}
QueryPerformanceCounter(&end);
return static_cast<float>(end.QuadPart - start.QuadPart) / freq.QuadPart;
}
static float mult(double* m1, double* m2, size_t size, double* res, MultType type)
{
if (type == MultType::cpu)
{
return cpuMult(m1, m2, size, res);
}
else
{
return gpuMult(m1, m2, size, res, type);
}
}
int main(int argc, char* argv[])
{
for (size_t size: MATRIX_SIZES)
{
std::cout << "Size == " << size << std::endl;
double* m1 = randomMatrix(size);
double* m2 = randomMatrix(size);
double* gpuResMatr = new double[size * size];
double* gpuSharedResMatr = new double[size * size];
float gpuResTime = mult(m1, m2, size, gpuResMatr, MultType::gpu);
float gpuSharedResTime = mult(m1, m2, size, gpuSharedResMatr, MultType::gpuShared);
std::cout << "GPU result time == " << gpuResTime << std::endl;
std::cout << "GPU + shared result time == " << gpuSharedResTime << std::endl;
std::cout << "Maximum deviation: " << deviation(gpuResMatr, gpuSharedResMatr, size) << std::endl;
delete[] gpuResMatr;
delete[] gpuSharedResMatr;
delete[] m2;
delete[] m1;
}
return 0;
}
| 8c599927c2be1f1575bf8d5814cfeb6be5ddeebe.cu | #include <algorithm>
#include <new>
#include <cstddef>
#include <windows.h>
#include <iostream>
#include <cinttypes>
#include <random>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
static const int MIN = -30;
static const int MAX = 30;
static const unsigned int BLOCK_SIZE = 32;
static const size_t MATRIX_SIZES[3] = {1000, 2000, 3000};
enum class MultType { cpu, gpu, gpuShared };
__global__ void gpuKernMult(double* m1, double* m2, size_t size, double* res)
{
size_t i = blockDim.y * blockIdx.y + threadIdx.y;
size_t j = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= size || j >= size) return;
size_t ind = i * size + j;
res[ind] = 0;
for (size_t k = 0; k < size; k++)
{
res[ind] += m1[i * size + k] * m2[k * size + j];
}
}
__global__ void gpuKernMultShared(double* m1, double* m2, size_t size, double* res)
{
size_t ty = threadIdx.y;
size_t tx = threadIdx.x;
size_t i = blockDim.y * blockIdx.y + ty;
size_t j = blockDim.x * blockIdx.x + tx;
double sum = 0;
for (size_t ind = 0, aj = tx, bi = ty; ind * BLOCK_SIZE < size; ++ind, aj += BLOCK_SIZE, bi += BLOCK_SIZE)
{
__shared__ double a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double b[BLOCK_SIZE][BLOCK_SIZE];
a[ty][tx] = 0;
b[ty][tx] = 0;
if (i < size && aj < size)
{
a[ty][tx] = m1[i * size + aj];
}
if (j < size && bi < size)
{
b[ty][tx] = m2[bi * size + j];
}
__syncthreads();
for (size_t k = 0; k < BLOCK_SIZE; k++)
{
sum += a[ty][k] * b[k][tx];
}
__syncthreads();
}
if (i < size && j < size)
{
res[i * size + j] = sum;
}
}
static void initCudaMatr(double** m1, double** m2, double** res, size_t bytes, double* src1, double* src2)
{
cudaMalloc(m1, bytes);
cudaMalloc(m2, bytes);
cudaMalloc(res, bytes);
cudaMemcpy(*m1, src1, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(*m2, src2, bytes, cudaMemcpyHostToDevice);
}
static void initCudaTimer(cudaEvent_t* start, cudaEvent_t* end)
{
cudaEventCreate(start);
cudaEventCreate(end);
cudaEventRecord(*start, 0);
}
static float countTime(cudaEvent_t start, cudaEvent_t end)
{
float time;
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
return time;
}
static void destroyCudaObj(double* m1, double* m2, double* res, cudaEvent_t start, cudaEvent_t end)
{
cudaEventDestroy(end);
cudaEventDestroy(start);
cudaFree(res);
cudaFree(m2);
cudaFree(m1);
}
static float gpuMult(double* m1, double* m2, size_t size, double* res, MultType type)
{
cudaEvent_t start, end;
float time;
double* cudaM1;
double* cudaM2;
double* cudaRes;
size_t matrixBytesNum = sizeof(double) * size * size;
dim3 cudaThreads(BLOCK_SIZE, BLOCK_SIZE);
dim3 cudaBlocks((size + cudaThreads.x - 1) / cudaThreads.x, (size + cudaThreads.y - 1) / cudaThreads.y);
initCudaMatr(&cudaM1, &cudaM2, &cudaRes, matrixBytesNum, m1, m2);
initCudaTimer(&start, &end);
switch (type)
{
case MultType::gpu:
gpuKernMult<<<cudaBlocks, cudaThreads>>>(cudaM1, cudaM2, size, cudaRes);
break;
case MultType::gpuShared:
gpuKernMultShared<<<cudaBlocks, cudaThreads>>>(cudaM1, cudaM2, size, cudaRes);
break;
default:
return -1;
}
time = countTime(start, end);
cudaMemcpy(res, cudaRes, matrixBytesNum, cudaMemcpyDeviceToHost);
destroyCudaObj(cudaM1, cudaM2, cudaRes, start, end);
return time / 1000.0f;
}
static double deviation(double* m1, double* m2, size_t size)
{
size_t n = size * size;
double res = 0.0;
for (size_t i = 0; i < n; i++)
{
res = std::max(res, std::abs(m1[i] - m2[i]));
}
return res;
}
static double* randomMatrix(size_t size)
{
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<double> distrib(MIN, MAX);
size_t n = size * size;
double* res = new double[n];
for (size_t i = 0; i < n; ++i)
{
res[i] = distrib(gen);
}
return res;
}
static float cpuMult(double* m1, double* m2, size_t size, double* res)
{
LARGE_INTEGER start, end, freq;
QueryPerformanceFrequency(&freq);
QueryPerformanceCounter(&start);
for (size_t i = 0; i < size; i++)
{
for (size_t j = 0; j < size; j++)
{
size_t ind = i * size + j;
res[ind] = 0;
for (size_t k = 0; k < size; k++)
{
res[ind] += m1[i * size + k] * m2[k * size + j];
}
}
}
QueryPerformanceCounter(&end);
return static_cast<float>(end.QuadPart - start.QuadPart) / freq.QuadPart;
}
static float mult(double* m1, double* m2, size_t size, double* res, MultType type)
{
if (type == MultType::cpu)
{
return cpuMult(m1, m2, size, res);
}
else
{
return gpuMult(m1, m2, size, res, type);
}
}
int main(int argc, char* argv[])
{
for (size_t size: MATRIX_SIZES)
{
std::cout << "Size == " << size << std::endl;
double* m1 = randomMatrix(size);
double* m2 = randomMatrix(size);
double* gpuResMatr = new double[size * size];
double* gpuSharedResMatr = new double[size * size];
float gpuResTime = mult(m1, m2, size, gpuResMatr, MultType::gpu);
float gpuSharedResTime = mult(m1, m2, size, gpuSharedResMatr, MultType::gpuShared);
std::cout << "GPU result time == " << gpuResTime << std::endl;
std::cout << "GPU + shared result time == " << gpuSharedResTime << std::endl;
std::cout << "Maximum deviation: " << deviation(gpuResMatr, gpuSharedResMatr, size) << std::endl;
delete[] gpuResMatr;
delete[] gpuSharedResMatr;
delete[] m2;
delete[] m1;
}
return 0;
}
|
84adbdee6b07ba17da8c3ec8e512162323f2bc33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_2_back;
int xdim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_2_back;
int ydim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_2_back;
int xdim1_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_2_back;
int ydim1_update_halo_kernel5_minus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_2_back * (y) + \
xdim0_update_halo_kernel5_minus_2_back * \
ydim0_update_halo_kernel5_minus_2_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_2_back * (y) + \
xdim1_update_halo_kernel5_minus_2_back * \
ydim1_update_halo_kernel5_minus_2_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_2_back(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 2)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_2_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_2_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_2_back *
ydim0_update_halo_kernel5_minus_2_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_2_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_2_back *
ydim1_update_halo_kernel5_minus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_2_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_2_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 137))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137, "update_halo_kernel5_minus_2_back");
OPS_kernels[137].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_2_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_2_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_2_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_2_back_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel5_minus_2_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_2_back_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel5_minus_2_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_2_back_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel5_minus_2_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_2_back_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel5_minus_2_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel5_minus_2_back), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[137].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 84adbdee6b07ba17da8c3ec8e512162323f2bc33.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel5_minus_2_back;
int xdim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim0_update_halo_kernel5_minus_2_back;
int ydim0_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int xdim1_update_halo_kernel5_minus_2_back;
int xdim1_update_halo_kernel5_minus_2_back_h = -1;
__constant__ int ydim1_update_halo_kernel5_minus_2_back;
int ydim1_update_halo_kernel5_minus_2_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel5_minus_2_back * (y) + \
xdim0_update_halo_kernel5_minus_2_back * \
ydim0_update_halo_kernel5_minus_2_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel5_minus_2_back * (y) + \
xdim1_update_halo_kernel5_minus_2_back * \
ydim1_update_halo_kernel5_minus_2_back * (z))
// user function
__device__
inline void
update_halo_kernel5_minus_2_back(double *vol_flux_z, double *mass_flux_z,
const int *fields) {
if (fields[FIELD_VOL_FLUX_Z] == 1)
vol_flux_z[OPS_ACC0(0, 0, 0)] = -vol_flux_z[OPS_ACC0(0, 0, 2)];
if (fields[FIELD_MASS_FLUX_Z] == 1)
mass_flux_z[OPS_ACC1(0, 0, 0)] = -mass_flux_z[OPS_ACC1(0, 0, 2)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel5_minus_2_back(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel5_minus_2_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel5_minus_2_back *
ydim0_update_halo_kernel5_minus_2_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel5_minus_2_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel5_minus_2_back *
ydim1_update_halo_kernel5_minus_2_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel5_minus_2_back(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel5_minus_2_back(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 137))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(137, "update_halo_kernel5_minus_2_back");
OPS_kernels[137].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel5_minus_2_back_h ||
ydim0 != ydim0_update_halo_kernel5_minus_2_back_h ||
xdim1 != xdim1_update_halo_kernel5_minus_2_back_h ||
ydim1 != ydim1_update_halo_kernel5_minus_2_back_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel5_minus_2_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel5_minus_2_back_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel5_minus_2_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel5_minus_2_back_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel5_minus_2_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel5_minus_2_back_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel5_minus_2_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel5_minus_2_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel5_minus_2_back<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[137].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[137].mpi_time += t2 - t1;
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[137].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
f8f061a69ee5288826cdad148143c9089f09aab2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cudaKernel_maxlocPlusZoominOffset(float *offset, const int * padStart, const int * maxlocUpSample, const size_t nImages, float zoomInRatioX, float zoomInRatioY)
{
int imageIndex = threadIdx.x + blockDim.x *blockIdx.x; //image index
if (imageIndex < nImages)
{
int index=2*imageIndex;
offset[index] = padStart[index] + maxlocUpSample[index] * zoomInRatioX;
index++;
offset[index] = padStart[index] + maxlocUpSample[index] * zoomInRatioY;
}
} | f8f061a69ee5288826cdad148143c9089f09aab2.cu | #include "includes.h"
__global__ void cudaKernel_maxlocPlusZoominOffset(float *offset, const int * padStart, const int * maxlocUpSample, const size_t nImages, float zoomInRatioX, float zoomInRatioY)
{
int imageIndex = threadIdx.x + blockDim.x *blockIdx.x; //image index
if (imageIndex < nImages)
{
int index=2*imageIndex;
offset[index] = padStart[index] + maxlocUpSample[index] * zoomInRatioX;
index++;
offset[index] = padStart[index] + maxlocUpSample[index] * zoomInRatioY;
}
} |
05bb266ef74b198e6cfa8fa3f44a2b198e5e9cb4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "dot.h"
#define imin(a,b) (a<b?a:b)
#include "book.h"
const int N = 33*1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock);
__global__ void dodot(float *a, float *b, float *c);
void dot()
{
//int a[N],b[N],c[N];
//CPUBitmap bitmap(16,16);
float *a,*b,c,*partial_c;
float *dev_a,*dev_b,*dev_partial_c;
a=(float*)malloc(N*sizeof(float));
b=(float*)malloc(N*sizeof(float));
partial_c = (float*)malloc(blocksPerGrid*sizeof(float));
HANDLE_ERROR(hipMalloc((void **)&dev_a, N*sizeof(float)));
HANDLE_ERROR(hipMalloc((void **)&dev_b, N*sizeof(float)));
HANDLE_ERROR(hipMalloc((void **)&dev_partial_c, N*sizeof(float)));
for(int i=0;i<N;i++)
{
a[i]=i;
b[i]=i*i;
}
HANDLE_ERROR(hipMemcpy(dev_a, a, N*sizeof(float),hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(dev_b, b, N*sizeof(float),hipMemcpyHostToDevice));
//HANDLE_ERROR(hipMemcpy(dev_c, c, N*sizeof(int),hipMemcpyHostToDevice));
hipLaunchKernelGGL(( dodot), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dev_a,dev_b,dev_partial_c);
HANDLE_ERROR(hipMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float),hipMemcpyDeviceToHost));
c=0;
for(int i=0;i<blocksPerGrid;i++)
{
//printf("%d + %d= %d\n", a[i], b[i], c[i]);
c+=partial_c[i];
}
printf("%.6g\n",c);
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
}
__global__ void dodot(float *a, float *b, float *c)
{
__shared__ float cache[threadsPerBlock];
int tid =threadIdx.x + blockIdx.x*blockDim.x;
int cacheIndex = threadIdx.x;
float tmp = 0;
while(tid<N)
{
tmp += a[tid]*b[tid];
tid += blockDim.x*gridDim.x;
}
cache[cacheIndex] = tmp;
__syncthreads();
int i = blockDim.x/2;
while(i!=0)
{
if(cacheIndex<i)
cache[cacheIndex] += cache[cacheIndex+i];
__syncthreads();
i=i/2;
}
if(cacheIndex==0)
c[blockIdx.x]=cache[0];
}
| 05bb266ef74b198e6cfa8fa3f44a2b198e5e9cb4.cu | #include "dot.h"
#define imin(a,b) (a<b?a:b)
#include "book.h"
const int N = 33*1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock);
__global__ void dodot(float *a, float *b, float *c);
void dot()
{
//int a[N],b[N],c[N];
//CPUBitmap bitmap(16,16);
float *a,*b,c,*partial_c;
float *dev_a,*dev_b,*dev_partial_c;
a=(float*)malloc(N*sizeof(float));
b=(float*)malloc(N*sizeof(float));
partial_c = (float*)malloc(blocksPerGrid*sizeof(float));
HANDLE_ERROR(cudaMalloc((void **)&dev_a, N*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **)&dev_b, N*sizeof(float)));
HANDLE_ERROR(cudaMalloc((void **)&dev_partial_c, N*sizeof(float)));
for(int i=0;i<N;i++)
{
a[i]=i;
b[i]=i*i;
}
HANDLE_ERROR(cudaMemcpy(dev_a, a, N*sizeof(float),cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(dev_b, b, N*sizeof(float),cudaMemcpyHostToDevice));
//HANDLE_ERROR(cudaMemcpy(dev_c, c, N*sizeof(int),cudaMemcpyHostToDevice));
dodot<<<blocksPerGrid, threadsPerBlock>>>(dev_a,dev_b,dev_partial_c);
HANDLE_ERROR(cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float),cudaMemcpyDeviceToHost));
c=0;
for(int i=0;i<blocksPerGrid;i++)
{
//printf("%d + %d= %d\n", a[i], b[i], c[i]);
c+=partial_c[i];
}
printf("%.6g\n",c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
free(a);
free(b);
free(partial_c);
}
__global__ void dodot(float *a, float *b, float *c)
{
__shared__ float cache[threadsPerBlock];
int tid =threadIdx.x + blockIdx.x*blockDim.x;
int cacheIndex = threadIdx.x;
float tmp = 0;
while(tid<N)
{
tmp += a[tid]*b[tid];
tid += blockDim.x*gridDim.x;
}
cache[cacheIndex] = tmp;
__syncthreads();
int i = blockDim.x/2;
while(i!=0)
{
if(cacheIndex<i)
cache[cacheIndex] += cache[cacheIndex+i];
__syncthreads();
i=i/2;
}
if(cacheIndex==0)
c[blockIdx.x]=cache[0];
}
|
d6c9f0dd8541b7f096d2981c8f2f3fe137098b55.hip | // !!! This is a file automatically generated by hipify!!!
#include "timer.hpp"
#include <algorithm>
#include <numeric>
#include <cmath>
#include <rocblas.h>
#include <hip/hip_runtime.h>
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <string>
#include <vector>
#define BLOCK_SIZE 256
#define GRID_SIZE 128
// #define SEP ";"
// #define DEBUG
#ifndef DEBUG
#define CSV
#endif
template <typename T>
void printContainer(T container, const int size) {
std::cout << container[0];
for (int i = 1; i < size; ++i)
std::cout << " | " << container[i] ;
std::cout << std::endl;
}
template <typename T>
void printContainer(T container, const int size, const int only) {
std::cout << container[0];
for (int i = 1; i < only; ++i)
std::cout << " | " << container[i];
std::cout << " | ...";
for (int i = size - only; i < size; ++i)
std::cout << " | " << container[i];
std::cout << std::endl;
}
void printResults(double* results, std::vector<std::string> names, int size){
std::cout << "Results:" << std::endl;
for (int i = 0; i < size; ++i) {
std::cout << names[i] << " : " << results[i] << std::endl;
}
}
void printResults(double* results, double* ref, std::vector<std::string> names, int size){
std::cout << "Results (with difference to reference):" << std::endl;
for (int i = 0; i < size; ++i) {
std::cout << names[i] << " = " << results[i] << " || " << ref[i] - results[i] << std::endl;
}
}
// ------------------ KERNELS ---------------
/** atomicMax for double
*
* References:
* (1) https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicmax
* (2) https://www.micc.unifi.it/bertini/download/gpu-programming-basics/2017/gpu_cuda_5.pdf
* (3) https://stackoverflow.com/questions/17399119/cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ void atomicMax(double* address, double val){
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(fmax(val, __longlong_as_double(assumed))));
// atomicCAS returns the value that is stored in address AFTER the CAS
// atomicCAS(a, b, c) --> return c
//
} while (assumed != old);
}
/** atomicMin for double
*/
__device__ void atomicMin(double* address, double val){
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(fmin(val, __longlong_as_double(assumed))));
// atomicCAS returns the value that is stored in address AFTER the CAS
// atomicCAS(a, b, c) --> return c
//
} while (assumed != old);
}
/** scalar = x DOT y
*/
__global__ void xDOTy(const int N, double *x, double *y, double *scalar) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
__shared__ double cache[BLOCK_SIZE];
double tid_sum = 0.0;
for (; tid < N; tid += stride) {
double tmp_x = x[tid];
tid_sum += tmp_x * y[tid];
}
tid = threadIdx.x;
cache[tid] = tid_sum;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2) {
__syncthreads();
if (tid < i) // lower half does smth, rest idles
cache[tid] += cache[tid + i]; // lower looks up by stride and sums up
}
if (tid == 0) // cache[0] now contains block_sum
{
atomicAdd(scalar, cache[0]);
}
}
/** analyze_x_shared
*
* result[0] = sum;
* result[1] = abs_sum;
* result[2] = sqr_sum;
* result[3] = mod_max;
* result[4] = min;
* result[5] = max;
* result[6] = z_entries;
*/
// template <int block_size=BLOCK_SIZE>
__global__ void analyze_x_shared(const int N, double *x, double *results) {
if (blockDim.x * blockIdx.x < N) {
int tid = threadIdx.x + blockDim.x * blockIdx.x; // global tid
const int stride = blockDim.x * gridDim.x;
__shared__ double cache[7][BLOCK_SIZE];
double sum = 0.0, abs_sum = 0.0, sqr_sum = 0.0;
// double mod_max = 0.0;
double max = x[0];
double min = max;
double z_entries = 0;
for (; tid < N; tid += stride) {
double value = x[tid];
sum += value;
abs_sum += std::abs(value);
sqr_sum += value*value;
// mod_max = (std::abs(value) > mod_max)? value : mod_max;
min = fmin(value, min);
max = fmax(value, max);
z_entries += (value)? 0 : 1;
}
tid = threadIdx.x; // block tid
cache[0][tid] = sum;
cache[1][tid] = abs_sum;
cache[2][tid] = sqr_sum;
cache[3][tid] = fmax(std::abs(min), max);
cache[4][tid] = min;
cache[5][tid] = max;
cache[6][tid] = z_entries;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2) {
__syncthreads();
if (tid < i) { // lower half does smth, rest idles
// sums
cache[0][tid] += cache[0][tid + i];
cache[1][tid] += cache[1][tid + i];
cache[2][tid] += cache[2][tid + i];
// min/max values
cache[3][tid] = fmax(cache[3][tid + i], cache[3][tid]); // already all values are std::abs(...)
cache[4][tid] = fmin(cache[4][tid + i], cache[4][tid]);
cache[5][tid] = fmax(cache[5][tid + i], cache[5][tid]);
// "sum"
cache[6][tid] += cache[6][tid + i];
}
}
if (tid == 0) // cache[0] now contains block_sum
{
atomicAdd(results, cache[0][0]);
atomicAdd(results+1, cache[1][0]);
atomicAdd(results+2, cache[2][0]);
// Ideally...
atomicMax(results+3, cache[3][0]);
atomicMin(results+4, cache[4][0]);
atomicMax(results+5, cache[5][0]);
atomicAdd(results+6, cache[6][0]);
}
}
}
/** analyze_x_shared
*
* result[0] = sum;
* result[1] = abs_sum;
* result[2] = sqr_sum;
* result[3] = mod_max;
* result[4] = min;
* result[5] = max;
* result[6] = z_entries;
*/
__global__ void analyze_x_warp(const int N, double *x, double *results) {
if (blockDim.x * blockIdx.x < N) {
int tid = threadIdx.x + blockDim.x * blockIdx.x; // global tid
const int stride = blockDim.x * gridDim.x;
double sum = 0.0, abs_sum = 0.0, sqr_sum = 0.0;
// double mod_max = 0.0;
double max = x[0];
double min = max;
int z_entries = 0;
for (; tid < N; tid += stride) {
double value = x[tid];
sum += value;
abs_sum += std::abs(value);
sqr_sum += value*value;
min = fmin(value, min);
max = fmax(value, max);
z_entries += (value)? 0 : 1;
}
tid = threadIdx.x; // block tid
double mod_max = fmax(std::abs(min), max);
__syncthreads();
for (int i = warpSize / 2; i != 0; i /= 2) {
//__syncthreads();
sum += __shfl_down_sync(0xffffffff, sum, i);
abs_sum += __shfl_down_sync(0xffffffff, abs_sum, i);
sqr_sum += __shfl_down_sync(0xffffffff, sqr_sum, i);
double tmp = __shfl_down_sync(0xffffffff, mod_max, i);
mod_max = fmax(tmp, mod_max);
tmp = __shfl_down_sync(0xffffffff, min, i);
min = fmin(tmp, min);
tmp = __shfl_down_sync(0xffffffff, max, i);
max = fmax(tmp, max) ;
z_entries += __shfl_down_sync(0xffffffff, z_entries, i);
}
// for (int i = blockDim.x / 2; i != 0; i /= 2) {
// for (int i = warpSize / 2; i != 0; i /= 2) {
// //__syncthreads();
// sum += __shfl_xor_sync(-1, sum, i);
// abs_sum += __shfl_xor_sync(-1, abs_sum, i);
// sqr_sum += __shfl_xor_sync(-1, sqr_sum, i);
// double tmp = __shfl_xor_sync(-1, mod_max, i);
// mod_max = (tmp > mod_max) ? tmp : mod_max;
// tmp = __shfl_xor_sync(-1, min, i);
// min = (tmp < min) ? tmp : min;
// tmp = __shfl_xor_sync(-1, max, i);
// max = (tmp > max) ? tmp : max;
// z_entries += __shfl_xor_sync(-1, z_entries, i);
// }
if (tid % warpSize == 0) // a block can consist of multiple warps
{
atomicAdd(results, sum);
atomicAdd(results+1, abs_sum);
atomicAdd(results+2, sqr_sum);
atomicMax(results+3, mod_max);
atomicMin(results+4, min);
atomicMax(results+5, max);
atomicAdd(results+6, z_entries);
}
}
}
template <typename T>
void toCSV(std::fstream& csv, T* array, int size) {
csv << size;
for (int i = 0; i < size; ++i) {
csv << ";" << array[i];
}
csv << std::endl;
}
int main(void) {
Timer timer;
std::vector<int> vec_Ns{100, 10000, 1000000, 100000000};
// std::vector<int> vec_Ns{100, 1000};
#ifdef CSV
std::fstream csv_times, csv_results, csv_results2, csv_results3, csv_results_ref;
std::string csv_times_name = "ph_data.csv";
std::string csv_results_name = "ph_results.csv";
std::string csv_results2_name = "ph_results2.csv";
std::string csv_results3_name = "ph_results3.csv";
std::string csv_results_ref_name = "ph_results_ref.csv";
csv_times.open(csv_times_name, std::fstream::out | std::fstream::trunc);
csv_results.open(csv_results_name, std::fstream::out | std::fstream::trunc);
csv_results2.open(csv_results2_name, std::fstream::out | std::fstream::trunc);
csv_results3.open(csv_results3_name, std::fstream::out | std::fstream::trunc);
csv_results_ref.open(csv_results_ref_name, std::fstream::out | std::fstream::trunc);
std::string header = "N;time_shared;time_warp;time_warp_adapt;time_dot;time_cpuref";
// to csv file
csv_times << header << std::endl;
std::string header_results = "N;sum;abs_sum;sqr_sum;mod_max;min;max;z_entries";
csv_results << header_results << std::endl;
csv_results2 << header_results << std::endl;
csv_results3 << header_results << std::endl;
csv_results_ref << header_results << std::endl;
#endif
for (int& N : vec_Ns) {
//
// Initialize CUBLAS:
//
#ifdef DEBUG
std::cout << "N = " << N << std::endl;
std::cout << "Init CUBLAS..." << std::endl;
#endif
hipblasHandle_t h;
hipblasCreate(&h);
//
// allocate + init host memory:
//
#ifdef DEBUG
std::cout << "Allocating host arrays..." << std::endl;
#endif
double *x = (double *)malloc(sizeof(double) * N);
double *results = (double *)malloc(sizeof(double) * 7);
double *results2 = (double *)malloc(sizeof(double) * 7);
double *results3 = (double *)malloc(sizeof(double) * 7);
double *results_ref = (double *)malloc(sizeof(double) * 7);
std::vector<std::string> names {"sum", "abs_sum", "sqr_sum", "mod_max", "min", "max", "zero_entries"};
std::generate_n(x, N, [n = -N/2] () mutable { return n++; });
std::random_shuffle(x, x+N);
// I'm placing some values here by hand, so that certain results can be forced
// --> to test: mod_max, min, max...
x[0] = -1.1;
x[N/5] = 0.;
x[N/3] = -(N-1);
x[2*N/3] = N;
std::fill(results, results+7, 0.0);
results[3] = x[0];
results[4] = x[0];
results[5] = x[0];
std::copy(results, results+7, results2);
std::copy(results, results+7, results3);
std::copy(results, results+7, results_ref);
timer.reset();
// results_ref[0] = std::accumulate(x, x+N, 0.0);
for (int i = 0; i < N; ++i){
double tmp = x[i];
results_ref[0] += tmp;
results_ref[1] += std::abs(tmp);
results_ref[2] += tmp*tmp;
results_ref[4] = fmin(tmp, results_ref[4]);
results_ref[5] = fmax(tmp, results_ref[5]);
results_ref[6] += tmp ? 0 : 1;
}
results_ref[3] = fmax(std::abs(results_ref[4]), results_ref[5]);
double time_cpuref = timer.get();
/*result[0] = sum;
* result[1] = abs_sum;
* result[2] = sqr_sum;
* result[3] = mod_max;
* result[4] = min;
* result[5] = max;
* result[6] = z_entries;*/
//
// allocate device memory
//
#ifdef DEBUG
std::cout << "Initialized results containers: " << std::endl;
printContainer(results, 7);
printContainer(results2, 7);
std::cout << "Allocating CUDA arrays..." << std::endl;
#endif
double *cuda_x;
double *cuda_results;
double *cuda_scalar;
hipMalloc(&cuda_x, sizeof(double) * N);
hipMalloc(&cuda_results, sizeof(double) * 7);
hipMalloc(&cuda_scalar, sizeof(double));
//
// Copy data to GPU
//
#ifdef DEBUG
std::cout << "Copying data to GPU..." << std::endl;
#endif
hipMemcpy(cuda_x, x, sizeof(double) * N, hipMemcpyHostToDevice);
//
// Let CUBLAS do the work:
//
// std::cout << "Running dot products with CUBLAS..." << std::endl;
// timer.reset();
// for (size_t i = 0; i < k; ++i) {
// hipblasDdot(h, N, cuda_x, 1, cuda_y[i], 1, results2 + i);
// }
// double time_cublas = timer.get();
//
// Let xDOTy do the work:
//
#ifdef DEBUG
std::cout << "Running with analyze_x_shared..." << std::endl;
#endif
hipMemcpy(cuda_results, results, sizeof(double) * 7, hipMemcpyHostToDevice);
timer.reset();
hipLaunchKernelGGL(( analyze_x_shared), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_x, cuda_results);
hipMemcpy(results, cuda_results, sizeof(double) * 7, hipMemcpyDeviceToHost);
double time_shared = timer.get();
#ifdef DEBUG
std::cout << "Running analyze_x_warp<GS, BS>..." << std::endl;
#endif
hipMemcpy(cuda_results, results2, sizeof(double) * 7, hipMemcpyHostToDevice);
timer.reset();
hipLaunchKernelGGL(( analyze_x_warp), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_x, cuda_results);
hipMemcpy(results2, cuda_results, sizeof(double) * 7, hipMemcpyDeviceToHost);
double time_warp = timer.get();
#ifdef DEBUG
std::cout << "Running analyze_x_warp<N/BS, BS>..." << std::endl;
#endif
hipMemcpy(cuda_results, results3, sizeof(double) * 7, hipMemcpyHostToDevice);
timer.reset();
hipLaunchKernelGGL(( analyze_x_warp), dim3(max(1,(int)N/BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, 0, N, cuda_x, cuda_results);
hipMemcpy(results3, cuda_results, sizeof(double) * 7, hipMemcpyDeviceToHost);
double time_warp_adapt = timer.get();
#ifdef DEBUG
std::cout << "Running dot product xDOTy..." << std::endl;
#endif
double dot = 0.0;
hipMemcpy(cuda_scalar, &dot, sizeof(double), hipMemcpyHostToDevice);
timer.reset();
hipLaunchKernelGGL(( xDOTy), dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, N, cuda_x, cuda_x, cuda_scalar);
hipMemcpy(&dot, cuda_scalar, sizeof(double), hipMemcpyDeviceToHost);
double time_dot = timer.get();
//
// Compare results
//
#ifdef DEBUG
std::cout << "DEBUG output:" << std::endl;
std::cout << "x:" << std::endl;
int only = 4;
printContainer(x, N, only);
std::cout << ">SHARED<" << std::endl;
printResults(results, results_ref, names, names.size());
std::cout << ">WARP<" << std::endl;
printResults(results2, results_ref, names, names.size());
std::cout << "GPU shared runtime: " << time_shared << std::endl;
std::cout << "GPU warp runtime: " << time_warp << std::endl;
std::cout << "GPU warp adaptive runtime: " << time_warp_adapt << std::endl;
std::cout << "GPU dot runtime: " << time_dot << std::endl;
std::cout << "CPU ref runtime: " << time_cpuref << std::endl;
//
// Clean up:
//
std::cout << "Cleaning up..." << std::endl;
std::cout << "----------------------------------------------------" << std::endl;
#endif
#ifdef CSV
std::string sep = ";";
csv_times << N << sep << time_shared << sep << time_warp << sep << time_warp_adapt << sep << time_dot << sep << time_cpuref << std::endl;
toCSV(csv_results, results, 7);
toCSV(csv_results2, results2, 7);
toCSV(csv_results3, results3, 7);
toCSV(csv_results_ref, results_ref, 7);
#endif
free(x);
free(results);
free(results2);
free(results3);
free(results_ref);
hipFree(cuda_x);
hipFree(cuda_results);
hipFree(cuda_scalar);
hipblasDestroy(h);
}
#ifdef CSV
csv_times.close();
csv_results.close();
csv_results2.close();
csv_results3.close();
csv_results_ref.close();
std::cout << "\nRuntimes in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_times_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results2_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results3_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results_ref_name << std::endl;
#endif
return EXIT_SUCCESS;
} | d6c9f0dd8541b7f096d2981c8f2f3fe137098b55.cu | #include "timer.hpp"
#include <algorithm>
#include <numeric>
#include <cmath>
#include <cublas_v2.h>
#include <cuda_runtime.h>
#include <fstream>
#include <iostream>
#include <stdio.h>
#include <string>
#include <vector>
#define BLOCK_SIZE 256
#define GRID_SIZE 128
// #define SEP ";"
// #define DEBUG
#ifndef DEBUG
#define CSV
#endif
template <typename T>
void printContainer(T container, const int size) {
std::cout << container[0];
for (int i = 1; i < size; ++i)
std::cout << " | " << container[i] ;
std::cout << std::endl;
}
template <typename T>
void printContainer(T container, const int size, const int only) {
std::cout << container[0];
for (int i = 1; i < only; ++i)
std::cout << " | " << container[i];
std::cout << " | ...";
for (int i = size - only; i < size; ++i)
std::cout << " | " << container[i];
std::cout << std::endl;
}
void printResults(double* results, std::vector<std::string> names, int size){
std::cout << "Results:" << std::endl;
for (int i = 0; i < size; ++i) {
std::cout << names[i] << " : " << results[i] << std::endl;
}
}
void printResults(double* results, double* ref, std::vector<std::string> names, int size){
std::cout << "Results (with difference to reference):" << std::endl;
for (int i = 0; i < size; ++i) {
std::cout << names[i] << " = " << results[i] << " || " << ref[i] - results[i] << std::endl;
}
}
// ------------------ KERNELS ---------------
/** atomicMax for double
*
* References:
* (1) https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#atomicmax
* (2) https://www.micc.unifi.it/bertini/download/gpu-programming-basics/2017/gpu_cuda_5.pdf
* (3) https://stackoverflow.com/questions/17399119/cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ void atomicMax(double* address, double val){
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(fmax(val, __longlong_as_double(assumed))));
// atomicCAS returns the value that is stored in address AFTER the CAS
// atomicCAS(a, b, c) --> return c
//
} while (assumed != old);
}
/** atomicMin for double
*/
__device__ void atomicMin(double* address, double val){
unsigned long long int* address_as_ull = (unsigned long long int*) address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(fmin(val, __longlong_as_double(assumed))));
// atomicCAS returns the value that is stored in address AFTER the CAS
// atomicCAS(a, b, c) --> return c
//
} while (assumed != old);
}
/** scalar = x DOT y
*/
__global__ void xDOTy(const int N, double *x, double *y, double *scalar) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
const int stride = blockDim.x * gridDim.x;
__shared__ double cache[BLOCK_SIZE];
double tid_sum = 0.0;
for (; tid < N; tid += stride) {
double tmp_x = x[tid];
tid_sum += tmp_x * y[tid];
}
tid = threadIdx.x;
cache[tid] = tid_sum;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2) {
__syncthreads();
if (tid < i) // lower half does smth, rest idles
cache[tid] += cache[tid + i]; // lower looks up by stride and sums up
}
if (tid == 0) // cache[0] now contains block_sum
{
atomicAdd(scalar, cache[0]);
}
}
/** analyze_x_shared
*
* result[0] = sum;
* result[1] = abs_sum;
* result[2] = sqr_sum;
* result[3] = mod_max;
* result[4] = min;
* result[5] = max;
* result[6] = z_entries;
*/
// template <int block_size=BLOCK_SIZE>
__global__ void analyze_x_shared(const int N, double *x, double *results) {
if (blockDim.x * blockIdx.x < N) {
int tid = threadIdx.x + blockDim.x * blockIdx.x; // global tid
const int stride = blockDim.x * gridDim.x;
__shared__ double cache[7][BLOCK_SIZE];
double sum = 0.0, abs_sum = 0.0, sqr_sum = 0.0;
// double mod_max = 0.0;
double max = x[0];
double min = max;
double z_entries = 0;
for (; tid < N; tid += stride) {
double value = x[tid];
sum += value;
abs_sum += std::abs(value);
sqr_sum += value*value;
// mod_max = (std::abs(value) > mod_max)? value : mod_max;
min = fmin(value, min);
max = fmax(value, max);
z_entries += (value)? 0 : 1;
}
tid = threadIdx.x; // block tid
cache[0][tid] = sum;
cache[1][tid] = abs_sum;
cache[2][tid] = sqr_sum;
cache[3][tid] = fmax(std::abs(min), max);
cache[4][tid] = min;
cache[5][tid] = max;
cache[6][tid] = z_entries;
__syncthreads();
for (int i = blockDim.x / 2; i != 0; i /= 2) {
__syncthreads();
if (tid < i) { // lower half does smth, rest idles
// sums
cache[0][tid] += cache[0][tid + i];
cache[1][tid] += cache[1][tid + i];
cache[2][tid] += cache[2][tid + i];
// min/max values
cache[3][tid] = fmax(cache[3][tid + i], cache[3][tid]); // already all values are std::abs(...)
cache[4][tid] = fmin(cache[4][tid + i], cache[4][tid]);
cache[5][tid] = fmax(cache[5][tid + i], cache[5][tid]);
// "sum"
cache[6][tid] += cache[6][tid + i];
}
}
if (tid == 0) // cache[0] now contains block_sum
{
atomicAdd(results, cache[0][0]);
atomicAdd(results+1, cache[1][0]);
atomicAdd(results+2, cache[2][0]);
// Ideally...
atomicMax(results+3, cache[3][0]);
atomicMin(results+4, cache[4][0]);
atomicMax(results+5, cache[5][0]);
atomicAdd(results+6, cache[6][0]);
}
}
}
/** analyze_x_shared
*
* result[0] = sum;
* result[1] = abs_sum;
* result[2] = sqr_sum;
* result[3] = mod_max;
* result[4] = min;
* result[5] = max;
* result[6] = z_entries;
*/
__global__ void analyze_x_warp(const int N, double *x, double *results) {
if (blockDim.x * blockIdx.x < N) {
int tid = threadIdx.x + blockDim.x * blockIdx.x; // global tid
const int stride = blockDim.x * gridDim.x;
double sum = 0.0, abs_sum = 0.0, sqr_sum = 0.0;
// double mod_max = 0.0;
double max = x[0];
double min = max;
int z_entries = 0;
for (; tid < N; tid += stride) {
double value = x[tid];
sum += value;
abs_sum += std::abs(value);
sqr_sum += value*value;
min = fmin(value, min);
max = fmax(value, max);
z_entries += (value)? 0 : 1;
}
tid = threadIdx.x; // block tid
double mod_max = fmax(std::abs(min), max);
__syncthreads();
for (int i = warpSize / 2; i != 0; i /= 2) {
//__syncthreads();
sum += __shfl_down_sync(0xffffffff, sum, i);
abs_sum += __shfl_down_sync(0xffffffff, abs_sum, i);
sqr_sum += __shfl_down_sync(0xffffffff, sqr_sum, i);
double tmp = __shfl_down_sync(0xffffffff, mod_max, i);
mod_max = fmax(tmp, mod_max);
tmp = __shfl_down_sync(0xffffffff, min, i);
min = fmin(tmp, min);
tmp = __shfl_down_sync(0xffffffff, max, i);
max = fmax(tmp, max) ;
z_entries += __shfl_down_sync(0xffffffff, z_entries, i);
}
// for (int i = blockDim.x / 2; i != 0; i /= 2) {
// for (int i = warpSize / 2; i != 0; i /= 2) {
// //__syncthreads();
// sum += __shfl_xor_sync(-1, sum, i);
// abs_sum += __shfl_xor_sync(-1, abs_sum, i);
// sqr_sum += __shfl_xor_sync(-1, sqr_sum, i);
// double tmp = __shfl_xor_sync(-1, mod_max, i);
// mod_max = (tmp > mod_max) ? tmp : mod_max;
// tmp = __shfl_xor_sync(-1, min, i);
// min = (tmp < min) ? tmp : min;
// tmp = __shfl_xor_sync(-1, max, i);
// max = (tmp > max) ? tmp : max;
// z_entries += __shfl_xor_sync(-1, z_entries, i);
// }
if (tid % warpSize == 0) // a block can consist of multiple warps
{
atomicAdd(results, sum);
atomicAdd(results+1, abs_sum);
atomicAdd(results+2, sqr_sum);
atomicMax(results+3, mod_max);
atomicMin(results+4, min);
atomicMax(results+5, max);
atomicAdd(results+6, z_entries);
}
}
}
template <typename T>
void toCSV(std::fstream& csv, T* array, int size) {
csv << size;
for (int i = 0; i < size; ++i) {
csv << ";" << array[i];
}
csv << std::endl;
}
int main(void) {
Timer timer;
std::vector<int> vec_Ns{100, 10000, 1000000, 100000000};
// std::vector<int> vec_Ns{100, 1000};
#ifdef CSV
std::fstream csv_times, csv_results, csv_results2, csv_results3, csv_results_ref;
std::string csv_times_name = "ph_data.csv";
std::string csv_results_name = "ph_results.csv";
std::string csv_results2_name = "ph_results2.csv";
std::string csv_results3_name = "ph_results3.csv";
std::string csv_results_ref_name = "ph_results_ref.csv";
csv_times.open(csv_times_name, std::fstream::out | std::fstream::trunc);
csv_results.open(csv_results_name, std::fstream::out | std::fstream::trunc);
csv_results2.open(csv_results2_name, std::fstream::out | std::fstream::trunc);
csv_results3.open(csv_results3_name, std::fstream::out | std::fstream::trunc);
csv_results_ref.open(csv_results_ref_name, std::fstream::out | std::fstream::trunc);
std::string header = "N;time_shared;time_warp;time_warp_adapt;time_dot;time_cpuref";
// to csv file
csv_times << header << std::endl;
std::string header_results = "N;sum;abs_sum;sqr_sum;mod_max;min;max;z_entries";
csv_results << header_results << std::endl;
csv_results2 << header_results << std::endl;
csv_results3 << header_results << std::endl;
csv_results_ref << header_results << std::endl;
#endif
for (int& N : vec_Ns) {
//
// Initialize CUBLAS:
//
#ifdef DEBUG
std::cout << "N = " << N << std::endl;
std::cout << "Init CUBLAS..." << std::endl;
#endif
cublasHandle_t h;
cublasCreate(&h);
//
// allocate + init host memory:
//
#ifdef DEBUG
std::cout << "Allocating host arrays..." << std::endl;
#endif
double *x = (double *)malloc(sizeof(double) * N);
double *results = (double *)malloc(sizeof(double) * 7);
double *results2 = (double *)malloc(sizeof(double) * 7);
double *results3 = (double *)malloc(sizeof(double) * 7);
double *results_ref = (double *)malloc(sizeof(double) * 7);
std::vector<std::string> names {"sum", "abs_sum", "sqr_sum", "mod_max", "min", "max", "zero_entries"};
std::generate_n(x, N, [n = -N/2] () mutable { return n++; });
std::random_shuffle(x, x+N);
// I'm placing some values here by hand, so that certain results can be forced
// --> to test: mod_max, min, max...
x[0] = -1.1;
x[N/5] = 0.;
x[N/3] = -(N-1);
x[2*N/3] = N;
std::fill(results, results+7, 0.0);
results[3] = x[0];
results[4] = x[0];
results[5] = x[0];
std::copy(results, results+7, results2);
std::copy(results, results+7, results3);
std::copy(results, results+7, results_ref);
timer.reset();
// results_ref[0] = std::accumulate(x, x+N, 0.0);
for (int i = 0; i < N; ++i){
double tmp = x[i];
results_ref[0] += tmp;
results_ref[1] += std::abs(tmp);
results_ref[2] += tmp*tmp;
results_ref[4] = fmin(tmp, results_ref[4]);
results_ref[5] = fmax(tmp, results_ref[5]);
results_ref[6] += tmp ? 0 : 1;
}
results_ref[3] = fmax(std::abs(results_ref[4]), results_ref[5]);
double time_cpuref = timer.get();
/*result[0] = sum;
* result[1] = abs_sum;
* result[2] = sqr_sum;
* result[3] = mod_max;
* result[4] = min;
* result[5] = max;
* result[6] = z_entries;*/
//
// allocate device memory
//
#ifdef DEBUG
std::cout << "Initialized results containers: " << std::endl;
printContainer(results, 7);
printContainer(results2, 7);
std::cout << "Allocating CUDA arrays..." << std::endl;
#endif
double *cuda_x;
double *cuda_results;
double *cuda_scalar;
cudaMalloc(&cuda_x, sizeof(double) * N);
cudaMalloc(&cuda_results, sizeof(double) * 7);
cudaMalloc(&cuda_scalar, sizeof(double));
//
// Copy data to GPU
//
#ifdef DEBUG
std::cout << "Copying data to GPU..." << std::endl;
#endif
cudaMemcpy(cuda_x, x, sizeof(double) * N, cudaMemcpyHostToDevice);
//
// Let CUBLAS do the work:
//
// std::cout << "Running dot products with CUBLAS..." << std::endl;
// timer.reset();
// for (size_t i = 0; i < k; ++i) {
// cublasDdot(h, N, cuda_x, 1, cuda_y[i], 1, results2 + i);
// }
// double time_cublas = timer.get();
//
// Let xDOTy do the work:
//
#ifdef DEBUG
std::cout << "Running with analyze_x_shared..." << std::endl;
#endif
cudaMemcpy(cuda_results, results, sizeof(double) * 7, cudaMemcpyHostToDevice);
timer.reset();
analyze_x_shared<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_x, cuda_results);
cudaMemcpy(results, cuda_results, sizeof(double) * 7, cudaMemcpyDeviceToHost);
double time_shared = timer.get();
#ifdef DEBUG
std::cout << "Running analyze_x_warp<GS, BS>..." << std::endl;
#endif
cudaMemcpy(cuda_results, results2, sizeof(double) * 7, cudaMemcpyHostToDevice);
timer.reset();
analyze_x_warp<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_x, cuda_results);
cudaMemcpy(results2, cuda_results, sizeof(double) * 7, cudaMemcpyDeviceToHost);
double time_warp = timer.get();
#ifdef DEBUG
std::cout << "Running analyze_x_warp<N/BS, BS>..." << std::endl;
#endif
cudaMemcpy(cuda_results, results3, sizeof(double) * 7, cudaMemcpyHostToDevice);
timer.reset();
analyze_x_warp<<<max(1,(int)N/BLOCK_SIZE), BLOCK_SIZE>>>(N, cuda_x, cuda_results);
cudaMemcpy(results3, cuda_results, sizeof(double) * 7, cudaMemcpyDeviceToHost);
double time_warp_adapt = timer.get();
#ifdef DEBUG
std::cout << "Running dot product xDOTy..." << std::endl;
#endif
double dot = 0.0;
cudaMemcpy(cuda_scalar, &dot, sizeof(double), cudaMemcpyHostToDevice);
timer.reset();
xDOTy<<<GRID_SIZE, BLOCK_SIZE>>>(N, cuda_x, cuda_x, cuda_scalar);
cudaMemcpy(&dot, cuda_scalar, sizeof(double), cudaMemcpyDeviceToHost);
double time_dot = timer.get();
//
// Compare results
//
#ifdef DEBUG
std::cout << "DEBUG output:" << std::endl;
std::cout << "x:" << std::endl;
int only = 4;
printContainer(x, N, only);
std::cout << ">SHARED<" << std::endl;
printResults(results, results_ref, names, names.size());
std::cout << ">WARP<" << std::endl;
printResults(results2, results_ref, names, names.size());
std::cout << "GPU shared runtime: " << time_shared << std::endl;
std::cout << "GPU warp runtime: " << time_warp << std::endl;
std::cout << "GPU warp adaptive runtime: " << time_warp_adapt << std::endl;
std::cout << "GPU dot runtime: " << time_dot << std::endl;
std::cout << "CPU ref runtime: " << time_cpuref << std::endl;
//
// Clean up:
//
std::cout << "Cleaning up..." << std::endl;
std::cout << "----------------------------------------------------" << std::endl;
#endif
#ifdef CSV
std::string sep = ";";
csv_times << N << sep << time_shared << sep << time_warp << sep << time_warp_adapt << sep << time_dot << sep << time_cpuref << std::endl;
toCSV(csv_results, results, 7);
toCSV(csv_results2, results2, 7);
toCSV(csv_results3, results3, 7);
toCSV(csv_results_ref, results_ref, 7);
#endif
free(x);
free(results);
free(results2);
free(results3);
free(results_ref);
cudaFree(cuda_x);
cudaFree(cuda_results);
cudaFree(cuda_scalar);
cublasDestroy(h);
}
#ifdef CSV
csv_times.close();
csv_results.close();
csv_results2.close();
csv_results3.close();
csv_results_ref.close();
std::cout << "\nRuntimes in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_times_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results2_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results3_name << std::endl;
std::cout << "\nResults in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_results_ref_name << std::endl;
#endif
return EXIT_SUCCESS;
} |
1c7515871364dcfe702ffaa994106243dcac532e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void
mat_transpose(float *a, float *out, int size_x, int size_y)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size_x && j < size_y)
{
out[j * size_y + i] = a[i * size_y + j];
}
}
| 1c7515871364dcfe702ffaa994106243dcac532e.cu | __global__ void
mat_transpose(float *a, float *out, int size_x, int size_y)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size_x && j < size_y)
{
out[j * size_y + i] = a[i * size_y + j];
}
}
|
d21f8a525648653851e4054ed2a71b5f7ac9be91.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void gpuFunc_copiarLayer(float *layer, float *layer_copy)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
layer_copy[globalId]=layer[globalId];
}
__global__ void gpuFunc_actualiza(float *layer, int posicion, float energia)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
/* Funcin actualiza */
int distancia = posicion - globalId;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= 0.001f || energia_k <= -0.001f )
layer[globalId] = layer[globalId] + energia_k;
}
__global__ void gpuFunc_extremos(float *layer, float *layer_copy, int layer_size)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(globalId > 0 && globalId < layer_size-1){
layer[globalId] = ( layer_copy[globalId-1] + layer_copy[globalId] + layer_copy[globalId+1] ) / 3;
}
}
__global__ void gpuFunc_maximos(float *layer, int *posiciones, float *maximos, int layer_size, int i)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(globalId > 0 && globalId < layer_size){
if ( layer[globalId] > layer[globalId-1] && layer[globalId] > layer[globalId+1] ) {
if ( layer[globalId] > maximos[i] ) {
maximos[i] = layer[globalId];
posiciones[i] = globalId;
}
}
}
}
| d21f8a525648653851e4054ed2a71b5f7ac9be91.cu |
__global__ void gpuFunc_copiarLayer(float *layer, float *layer_copy)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
layer_copy[globalId]=layer[globalId];
}
__global__ void gpuFunc_actualiza(float *layer, int posicion, float energia)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
/* Función actualiza */
int distancia = posicion - globalId;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= 0.001f || energia_k <= -0.001f )
layer[globalId] = layer[globalId] + energia_k;
}
__global__ void gpuFunc_extremos(float *layer, float *layer_copy, int layer_size)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(globalId > 0 && globalId < layer_size-1){
layer[globalId] = ( layer_copy[globalId-1] + layer_copy[globalId] + layer_copy[globalId+1] ) / 3;
}
}
__global__ void gpuFunc_maximos(float *layer, int *posiciones, float *maximos, int layer_size, int i)
{
/*Formula para calcular la posicion*/
int bloqueId = blockIdx.x + blockIdx.y * gridDim.x;
int globalId = bloqueId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(globalId > 0 && globalId < layer_size){
if ( layer[globalId] > layer[globalId-1] && layer[globalId] > layer[globalId+1] ) {
if ( layer[globalId] > maximos[i] ) {
maximos[i] = layer[globalId];
posiciones[i] = globalId;
}
}
}
}
|
1b58d79d16fc51e1a956f02ac1ca0222678a469c.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorFFT.cu"
#else
#if defined(THC_REAL_IS_ZFLOAT) || defined(THC_REAL_IS_ZDOUBLE)
// THC_API int THCState_getNumCuFFTPlans(THCState* state);
// THC_API hipfftHandle* THCState_getCuFFTPlan(THCState* state,int batch, int n1, int n2, int n3) ;
void THCTensor_(fftnbase)(THCState *state, THCTensor *self, THCTensor *result, int direction) {
int ndim = THCTensor_(nDimension)(state, result);
int batch = 1;
int *fft_dims = (int*)malloc(ndim*sizeof(int));
for (int i = 0; i < ndim; i++) {
fft_dims[i] = (int) THCTensor_(size)(state, self, i);
}
hipfftHandle plan;
cufftSafeCall(hipfftPlanMany(&plan, ndim, fft_dims, NULL, 1, 0, NULL, 1, 0, cufftname, batch));
cufftSafeCall(hipfftSetStream(plan, THCState_getCurrentStream(state)));
cufftSafeCall(cufft(plan, (cureal *)THCTensor_(data)(state, self), (cureal *)THCTensor_(data)(state, result), direction));
hipfftDestroy(plan);
free(fft_dims);
}
// takes the first dimension as batch dimension
void THCTensor_(fftnBatchedbase)(THCState *state, THCTensor *self, THCTensor *result, int direction) {
int ndim = THCTensor_(nDimension)(state, self) -1;
int batch = THCTensor_(size)(state, self, 0);
int *fft_dims = (int*)malloc(ndim*sizeof(int));
FILE *f;
//f = fopen("/home/philipp/fftnBatchedbase.log", "a+");
//fprintf(f,"fftnBatchedbase start" );
for (int i = 1; i <= ndim ; i++) {
fft_dims[i - 1] = (int) THCTensor_(size)(state, self, i);
//fprintf(f,"fft_dims[i - 1] = %d",fft_dims[i - 1]);
}
hipfftHandle handle;
cufftSafeCall(hipfftPlanMany(&handle, ndim, fft_dims, NULL, 1, 0, NULL, 1, 0, cufftname, batch));
//fprintf(f,"hipfftPlanMany\n");
//fclose(f);
cufftSafeCall(hipfftSetStream(handle, THCState_getCurrentStream(state)));
//fprintf(f,"hipfftSetStream\n");
cufftSafeCall(cufft(handle, (cureal *)THCTensor_(data)(state, self), (cureal *)THCTensor_(data)(state, result), direction));
//fprintf(f,"cufft\n");
hipfftDestroy(handle);
free(fft_dims);
}
void THCTensor_(fftnBatched)(THCState *state, THCTensor *self, THCTensor *result) {
THCTensor_(fftnBatchedbase)(state, self, result, HIPFFT_FORWARD);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
void THCTensor_(ifftnBatched)(THCState *state, THCTensor *self, THCTensor *result) {
THCTensor_(fftnBatchedbase)(state, self, result, HIPFFT_BACKWARD);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
void THCTensor_(fft)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 1)
THError("tensor must at least have dimension 1\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-1; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(fftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(fft2)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 2)
THError("tensor must at least have dimension 2\n");
int self_ndim = THCTensor_(nDimension)(state, self);
int res_ndim = THCTensor_(nDimension)(state, result);
// fprintf(f,"(self_dim,res_dim) = (%d,%d)\n",self_ndim,res_ndim);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-2; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
//fprintf(f,"self_batch_dim = %d\n",self_batch_dim);
//fprintf(f,"dim1 = %d\n",THCTensor_(size)(state, self, self_ndim-2));
//fprintf(f,"dim2 = %d\n",THCTensor_(size)(state, self, self_ndim-1));
THLongStorage *new_self_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
// fprintf(f,"after THLongStorage_newWithSize3\n");
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
// fprintf(f,"after newView\n");
THLongStorage *new_result_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
// fprintf(f,"after THLongStorage_newWithSize3\n");
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
// fprintf(f,"after newView\n");
// fclose(f);
THCTensor_(fftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(fft3)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 3)
THError("tensor must at least have dimension 3\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-3; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(fftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(fftn)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
THCTensor_(fftnbase)(state, self, result, HIPFFT_FORWARD);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
void THCTensor_(ifft)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 1)
THError("tensor must at least have dimension 1\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-1; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(ifftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(ifft2)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 2)
THError("tensor must at least have dimension 2\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-2; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(ifftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(ifft3)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 3)
THError("tensor must at least have dimension 3\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-3; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(ifftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(ifftn)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
THCTensor_(fftnbase)(state, self, result, HIPFFT_BACKWARD);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
#endif
#endif
| 1b58d79d16fc51e1a956f02ac1ca0222678a469c.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/THCTensorFFT.cu"
#else
#if defined(THC_REAL_IS_ZFLOAT) || defined(THC_REAL_IS_ZDOUBLE)
// THC_API int THCState_getNumCuFFTPlans(THCState* state);
// THC_API cufftHandle* THCState_getCuFFTPlan(THCState* state,int batch, int n1, int n2, int n3) ;
void THCTensor_(fftnbase)(THCState *state, THCTensor *self, THCTensor *result, int direction) {
int ndim = THCTensor_(nDimension)(state, result);
int batch = 1;
int *fft_dims = (int*)malloc(ndim*sizeof(int));
for (int i = 0; i < ndim; i++) {
fft_dims[i] = (int) THCTensor_(size)(state, self, i);
}
cufftHandle plan;
cufftSafeCall(cufftPlanMany(&plan, ndim, fft_dims, NULL, 1, 0, NULL, 1, 0, cufftname, batch));
cufftSafeCall(cufftSetStream(plan, THCState_getCurrentStream(state)));
cufftSafeCall(cufft(plan, (cureal *)THCTensor_(data)(state, self), (cureal *)THCTensor_(data)(state, result), direction));
cufftDestroy(plan);
free(fft_dims);
}
// takes the first dimension as batch dimension
void THCTensor_(fftnBatchedbase)(THCState *state, THCTensor *self, THCTensor *result, int direction) {
int ndim = THCTensor_(nDimension)(state, self) -1;
int batch = THCTensor_(size)(state, self, 0);
int *fft_dims = (int*)malloc(ndim*sizeof(int));
FILE *f;
//f = fopen("/home/philipp/fftnBatchedbase.log", "a+");
//fprintf(f,"fftnBatchedbase start" );
for (int i = 1; i <= ndim ; i++) {
fft_dims[i - 1] = (int) THCTensor_(size)(state, self, i);
//fprintf(f,"fft_dims[i - 1] = %d",fft_dims[i - 1]);
}
cufftHandle handle;
cufftSafeCall(cufftPlanMany(&handle, ndim, fft_dims, NULL, 1, 0, NULL, 1, 0, cufftname, batch));
//fprintf(f,"cufftPlanMany\n");
//fclose(f);
cufftSafeCall(cufftSetStream(handle, THCState_getCurrentStream(state)));
//fprintf(f,"cufftSetStream\n");
cufftSafeCall(cufft(handle, (cureal *)THCTensor_(data)(state, self), (cureal *)THCTensor_(data)(state, result), direction));
//fprintf(f,"cufft\n");
cufftDestroy(handle);
free(fft_dims);
}
void THCTensor_(fftnBatched)(THCState *state, THCTensor *self, THCTensor *result) {
THCTensor_(fftnBatchedbase)(state, self, result, CUFFT_FORWARD);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
void THCTensor_(ifftnBatched)(THCState *state, THCTensor *self, THCTensor *result) {
THCTensor_(fftnBatchedbase)(state, self, result, CUFFT_INVERSE);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
void THCTensor_(fft)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 1)
THError("tensor must at least have dimension 1\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-1; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(fftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(fft2)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 2)
THError("tensor must at least have dimension 2\n");
int self_ndim = THCTensor_(nDimension)(state, self);
int res_ndim = THCTensor_(nDimension)(state, result);
// fprintf(f,"(self_dim,res_dim) = (%d,%d)\n",self_ndim,res_ndim);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-2; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
//fprintf(f,"self_batch_dim = %d\n",self_batch_dim);
//fprintf(f,"dim1 = %d\n",THCTensor_(size)(state, self, self_ndim-2));
//fprintf(f,"dim2 = %d\n",THCTensor_(size)(state, self, self_ndim-1));
THLongStorage *new_self_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
// fprintf(f,"after THLongStorage_newWithSize3\n");
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
// fprintf(f,"after newView\n");
THLongStorage *new_result_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
// fprintf(f,"after THLongStorage_newWithSize3\n");
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
// fprintf(f,"after newView\n");
// fclose(f);
THCTensor_(fftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(fft3)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 3)
THError("tensor must at least have dimension 3\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-3; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(fftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(fftn)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
THCTensor_(fftnbase)(state, self, result, CUFFT_FORWARD);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
void THCTensor_(ifft)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 1)
THError("tensor must at least have dimension 1\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-1; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize2( self_batch_dim, THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(ifftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(ifft2)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 2)
THError("tensor must at least have dimension 2\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-2; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize3( self_batch_dim, THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(ifftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(ifft3)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
if(THCTensor_(nDimension)(state, self) < 3)
THError("tensor must at least have dimension 3\n");
int self_ndim = THCTensor_(nDimension)(state, self);
if (!THCTensor_(isSameSizeAs)(state, self, result))
THError("self_ndim must be equal result_ndim\n");
int self_batch_dim = 1;
for(int i = 0; i< self_ndim-3; i++){
self_batch_dim *= THCTensor_(size)(state, self, i);
}
THLongStorage *new_self_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_self = THCTensor_(newView)(state, self, new_self_size);
THLongStorage *new_result_size = THLongStorage_newWithSize4( self_batch_dim, THCTensor_(size)(state, self, self_ndim-3),THCTensor_(size)(state, self, self_ndim-2),THCTensor_(size)(state, self, self_ndim-1));
THCTensor *new_result = THCTensor_(newView)(state, result, new_result_size);
THCTensor_(ifftnBatched)(state,new_self,new_result);
THLongStorage_free(new_self_size);
THLongStorage_free(new_result_size);
THCTensor_(free)(state,new_result);
THCTensor_(free)(state,new_self);
}
void THCTensor_(ifftn)(THCState *state, THCTensor *result, THCTensor *self) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, result, self));
if (self != result)
THCTensor_(resizeAs)(state, result, self);
THCTensor_(fftnbase)(state, self, result, CUFFT_INVERSE);
THCTensor_(mul)(state, result, result, ccx(1 / sqrt(THCTensor_(nElement)(state, result)),0));
}
#endif
#endif
|
ddebfb7a8fc5e039d454d50930cdaa5a9a3c3ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include "../gpu_lib/header.h"
#include "../utils/header.h"
#include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <cstring>
namespace ftxj {
__global__ void bf_spmm(
float* Y0, // input
float* Y1,
int* roffW, // len neuron * N_SLAB - 1
int* colsW, // index 32 * neuron
float* valsW, // all 32 * neuron 0.0625
int COL_BLK, // TN, shared memory size = TN
int N_SLAB, // neuron / TN
int neuron // neuron
) {
extern __shared__ float shRow[];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int rid = blockIdx.x;
__syncthreads();
for(int i = 0; i < N_SLAB; i++) {
__syncthreads();
for(int j = threadIdx.x; j < COL_BLK; j++) {
shRow[j] = 0;
}
__syncthreads();
for(int j = threadIdx.y; j < neuron; j += blockDim.y) {
float valY = Y0[rid * neuron + j];
// if(valY == 0) {
// continue;
// }
int begOffW = roffW[i * neuron + j] + threadIdx.x;
int endOffW = roffW[i * neuron + j + 1];
for(int k = begOffW; k < endOffW; k += blockDim.x) {
int colW = colsW[k];
float valW = valsW[k];
// if(colW - i * COL_BLK < 0 || colW - i * COL_BLK >= 1024) {
// printf("bugs %d %d %d %d\n", k, i, colW, colW - i * COL_BLK);
// }
atomicAdd(&shRow[colW - i * COL_BLK], valY * valW);
}
}
__syncthreads();
int count = 0;
for(size_t j = 0; j < COL_BLK; j += blockDim.x * blockDim.y) {
// float v = j + tid < COL_BLK ? shRow[j + tid] + bias : -1;
// count += __syncthreads_count(v > 0);
if(j + tid < COL_BLK) {
Y1[rid * neuron + i * COL_BLK + j + tid] = shRow[j + tid];
// min(T(32), max(T(0), v));
}
}
}
}
void test_benchmark_19_BF(COOMatrix &coo, BFMatrix &matrix,
int neuron, int batch, int TN,
int blockx, int blocky,
GpuEnv &env) {
float *nextfeat;
float *currfeat;
int *rowoff;
int off_size = neuron * (neuron / TN + 1) + 1;
int *rowindex;
int weight_nnz = 32 * neuron;
float *value;
float bias = 0;
int mybatch = batch;
// std::vector<std::vector<float>> input(mybatch, std::vector<float>(neuron, 0.0));
float * input = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(input, 0, sizeof(float) * neuron * mybatch);
float * output = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(output, 0, sizeof(float) * neuron * mybatch);
srand (static_cast <unsigned> (time(0)));
for(int i = 0; i < mybatch; ++i) {
for(int j = 0; j < neuron; ++j) {
float r2 = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/32.0));
input[i * neuron + j] = r2;
}
}
Safe_Call(hipMalloc((void**)&rowoff, sizeof(int) * off_size));
Safe_Call(hipMemcpy(rowoff, &matrix.rowoff[0], sizeof(int) * off_size, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&rowindex, sizeof(int) * weight_nnz));
Safe_Call(hipMemcpy(rowindex, &matrix.rowindex[0], sizeof(int) * weight_nnz, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&value, sizeof(float) * weight_nnz));
Safe_Call(hipMemcpy(value, &matrix.val[0], sizeof(float) * weight_nnz, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&currfeat, sizeof(float) * neuron * mybatch));
Safe_Call(hipMemcpy(currfeat, input, sizeof(float) * neuron * mybatch, hipMemcpyHostToDevice));
Safe_Call(hipMalloc((void**)&nextfeat, sizeof(float) * neuron * mybatch));
Safe_Call(hipMemset(nextfeat, 0, sizeof(float) * neuron * mybatch));
std::cout << "begin inference..." << std::endl;
env.add_event("uiuc_kernel_timer");
env.event_start_record("uiuc_kernel_timer");
dim3 block(blockx, blocky);
dim3 grid(batch);
hipLaunchKernelGGL(( bf_spmm), dim3(grid),dim3(block), sizeof(float) * TN, env.get_stream("uiuc_kernel_timer"),
currfeat, nextfeat, rowoff, rowindex, value, TN, neuron / TN, neuron
);
env.event_stop_record("uiuc_kernel_timer");
float time = env.get_event_time("uiuc_kernel_timer");
Safe_Call(hipMemcpy(output, nextfeat, sizeof(float) * neuron * mybatch, hipMemcpyDeviceToHost));
std::cout << "Kernel Exec Time [19-BF] = " << time << "ms"<< std::endl;
std::cout << "Flops [19-BF] = " << float(2 * batch * neuron * 32) / time * 1000 /1e12 << "TFLOPS"<< std::endl;
CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output, false, true, true);
}
}
| ddebfb7a8fc5e039d454d50930cdaa5a9a3c3ac8.cu | #include <cuda.h>
#include "../gpu_lib/header.h"
#include "../utils/header.h"
#include <stdio.h>
#include <algorithm>
#include <cstdlib>
#include <cstring>
namespace ftxj {
__global__ void bf_spmm(
float* Y0, // input
float* Y1,
int* roffW, // len neuron * N_SLAB - 1
int* colsW, // index 32 * neuron
float* valsW, // all 32 * neuron 0.0625
int COL_BLK, // TN, shared memory size = TN
int N_SLAB, // neuron / TN
int neuron // neuron
) {
extern __shared__ float shRow[];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int rid = blockIdx.x;
__syncthreads();
for(int i = 0; i < N_SLAB; i++) {
__syncthreads();
for(int j = threadIdx.x; j < COL_BLK; j++) {
shRow[j] = 0;
}
__syncthreads();
for(int j = threadIdx.y; j < neuron; j += blockDim.y) {
float valY = Y0[rid * neuron + j];
// if(valY == 0) {
// continue;
// }
int begOffW = roffW[i * neuron + j] + threadIdx.x;
int endOffW = roffW[i * neuron + j + 1];
for(int k = begOffW; k < endOffW; k += blockDim.x) {
int colW = colsW[k];
float valW = valsW[k];
// if(colW - i * COL_BLK < 0 || colW - i * COL_BLK >= 1024) {
// printf("bugs %d %d %d %d\n", k, i, colW, colW - i * COL_BLK);
// }
atomicAdd(&shRow[colW - i * COL_BLK], valY * valW);
}
}
__syncthreads();
int count = 0;
for(size_t j = 0; j < COL_BLK; j += blockDim.x * blockDim.y) {
// float v = j + tid < COL_BLK ? shRow[j + tid] + bias : -1;
// count += __syncthreads_count(v > 0);
if(j + tid < COL_BLK) {
Y1[rid * neuron + i * COL_BLK + j + tid] = shRow[j + tid];
// min(T(32), max(T(0), v));
}
}
}
}
void test_benchmark_19_BF(COOMatrix &coo, BFMatrix &matrix,
int neuron, int batch, int TN,
int blockx, int blocky,
GpuEnv &env) {
float *nextfeat;
float *currfeat;
int *rowoff;
int off_size = neuron * (neuron / TN + 1) + 1;
int *rowindex;
int weight_nnz = 32 * neuron;
float *value;
float bias = 0;
int mybatch = batch;
// std::vector<std::vector<float>> input(mybatch, std::vector<float>(neuron, 0.0));
float * input = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(input, 0, sizeof(float) * neuron * mybatch);
float * output = (float*)malloc(sizeof(float) * neuron * mybatch);
memset(output, 0, sizeof(float) * neuron * mybatch);
srand (static_cast <unsigned> (time(0)));
for(int i = 0; i < mybatch; ++i) {
for(int j = 0; j < neuron; ++j) {
float r2 = static_cast <float> (rand()) / (static_cast <float> (RAND_MAX/32.0));
input[i * neuron + j] = r2;
}
}
Safe_Call(cudaMalloc((void**)&rowoff, sizeof(int) * off_size));
Safe_Call(cudaMemcpy(rowoff, &matrix.rowoff[0], sizeof(int) * off_size, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&rowindex, sizeof(int) * weight_nnz));
Safe_Call(cudaMemcpy(rowindex, &matrix.rowindex[0], sizeof(int) * weight_nnz, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&value, sizeof(float) * weight_nnz));
Safe_Call(cudaMemcpy(value, &matrix.val[0], sizeof(float) * weight_nnz, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&currfeat, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMemcpy(currfeat, input, sizeof(float) * neuron * mybatch, cudaMemcpyHostToDevice));
Safe_Call(cudaMalloc((void**)&nextfeat, sizeof(float) * neuron * mybatch));
Safe_Call(cudaMemset(nextfeat, 0, sizeof(float) * neuron * mybatch));
std::cout << "begin inference..." << std::endl;
env.add_event("uiuc_kernel_timer");
env.event_start_record("uiuc_kernel_timer");
dim3 block(blockx, blocky);
dim3 grid(batch);
bf_spmm<<<grid,block, sizeof(float) * TN, env.get_stream("uiuc_kernel_timer")>>>(
currfeat, nextfeat, rowoff, rowindex, value, TN, neuron / TN, neuron
);
env.event_stop_record("uiuc_kernel_timer");
float time = env.get_event_time("uiuc_kernel_timer");
Safe_Call(cudaMemcpy(output, nextfeat, sizeof(float) * neuron * mybatch, cudaMemcpyDeviceToHost));
std::cout << "Kernel Exec Time [19-BF] = " << time << "ms"<< std::endl;
std::cout << "Flops [19-BF] = " << float(2 * batch * neuron * 32) / time * 1000 /1e12 << "TFLOPS"<< std::endl;
CpuSpmm::run_and_cmp(coo, input, neuron, mybatch, output, false, true, true);
}
}
|
9ed5922f3ec26f4520da75574cf21bece7b03ac5.hip | // !!! This is a file automatically generated by hipify!!!
//--------------------------------------------------------------------------
// Project:
// Select the least utilized GPU on a CUDA-enabled system.
// Insert into your code as desired.
//
// Prerequisites:
// Must have installed the CUDA toolkit.
// Must be running on a UNIX machine
//
// Independent testing info:
// Compile on commandline: nvcc least_utilized_GPU.cu -o test
// run on commandline: ./test
//
// Author: Jordan Bonilla
// Date : April 2016
// License: All rights Reserved. See LICENSE.txt
//--------------------------------------------------------------------------
#include <cstdio> // printf
#include <stdlib.h> // popen, pclose, atoi, fread
#include <hip/hip_runtime.h> // hipGetDeviceCount, hipSetDevice
// Select the least utilized GPU on this system. Estimate
// GPU utilization using GPU temperature. UNIX only.
void select_GPU()
{
// Get the number of GPUs on this machine
int num_devices;
hipGetDeviceCount(&num_devices);
if(num_devices == 1) {
return;
}
// Read GPU info into buffer "output"
const unsigned int MAX_BYTES = 10000;
char output[MAX_BYTES];
FILE *fp = popen("nvidia-smi &> /dev/null", "r");
fread(output, sizeof(char), MAX_BYTES, fp);
pclose(fp);
// array to hold GPU temperatures
int * temperatures = new int[num_devices];
// parse output for temperatures using knowledge of "nvidia-smi" output format
int i = 0;
unsigned int num_temps_parsed = 0;
while(output[i] != '\0') {
if(output[i] == '%') {
unsigned int temp_begin = i + 1;
while(output[i] != 'C') {
++i;
}
unsigned int temp_end = i;
char this_temperature[32];
// Read in the characters cooresponding to this temperature
for(int j = 0; j < temp_end - temp_begin; ++j) {
this_temperature[j] = output[temp_begin + j];
}
this_temperature[temp_end - temp_begin + 1] = '\0';
// Convert the string representation to an int
temperatures[num_temps_parsed] = atoi(this_temperature);
num_temps_parsed++;
}
++i;
}
// Get GPU with lowest temperature
int min_temp = 1e7, index_of_min = -1;
for (int i = 0; i < num_devices; i++)
{
int candidate_min = temperatures[i];
if(candidate_min < min_temp)
{
min_temp = candidate_min;
index_of_min = i;
}
}
// Tell CUDA to use the GPU with the lowest temeprature
printf("Index of the GPU with the lowest temperature: %d (%d C)\n",
index_of_min, min_temp);
hipSetDevice(index_of_min);
// Free memory and return
delete(temperatures);
return;
}
int main(int argc, char **argv) {
select_GPU();
return 0;
}
| 9ed5922f3ec26f4520da75574cf21bece7b03ac5.cu | //--------------------------------------------------------------------------
// Project:
// Select the least utilized GPU on a CUDA-enabled system.
// Insert into your code as desired.
//
// Prerequisites:
// Must have installed the CUDA toolkit.
// Must be running on a UNIX machine
//
// Independent testing info:
// Compile on commandline: nvcc least_utilized_GPU.cu -o test
// run on commandline: ./test
//
// Author: Jordan Bonilla
// Date : April 2016
// License: All rights Reserved. See LICENSE.txt
//--------------------------------------------------------------------------
#include <cstdio> // printf
#include <stdlib.h> // popen, pclose, atoi, fread
#include <cuda_runtime.h> // cudaGetDeviceCount, cudaSetDevice
// Select the least utilized GPU on this system. Estimate
// GPU utilization using GPU temperature. UNIX only.
void select_GPU()
{
// Get the number of GPUs on this machine
int num_devices;
cudaGetDeviceCount(&num_devices);
if(num_devices == 1) {
return;
}
// Read GPU info into buffer "output"
const unsigned int MAX_BYTES = 10000;
char output[MAX_BYTES];
FILE *fp = popen("nvidia-smi &> /dev/null", "r");
fread(output, sizeof(char), MAX_BYTES, fp);
pclose(fp);
// array to hold GPU temperatures
int * temperatures = new int[num_devices];
// parse output for temperatures using knowledge of "nvidia-smi" output format
int i = 0;
unsigned int num_temps_parsed = 0;
while(output[i] != '\0') {
if(output[i] == '%') {
unsigned int temp_begin = i + 1;
while(output[i] != 'C') {
++i;
}
unsigned int temp_end = i;
char this_temperature[32];
// Read in the characters cooresponding to this temperature
for(int j = 0; j < temp_end - temp_begin; ++j) {
this_temperature[j] = output[temp_begin + j];
}
this_temperature[temp_end - temp_begin + 1] = '\0';
// Convert the string representation to an int
temperatures[num_temps_parsed] = atoi(this_temperature);
num_temps_parsed++;
}
++i;
}
// Get GPU with lowest temperature
int min_temp = 1e7, index_of_min = -1;
for (int i = 0; i < num_devices; i++)
{
int candidate_min = temperatures[i];
if(candidate_min < min_temp)
{
min_temp = candidate_min;
index_of_min = i;
}
}
// Tell CUDA to use the GPU with the lowest temeprature
printf("Index of the GPU with the lowest temperature: %d (%d C)\n",
index_of_min, min_temp);
cudaSetDevice(index_of_min);
// Free memory and return
delete(temperatures);
return;
}
int main(int argc, char **argv) {
select_GPU();
return 0;
}
|
f1cacfeb529414593cb278284b90075675955a8d.hip | // !!! This is a file automatically generated by hipify!!!
#ifndef CUDAERRORUTILS_INCLUDED
#define CUDAERRORUTILS_INCLUDED
#include <stdio.h>
#include <hiprand/hiprand.h>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define cudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define cudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
#define cudaCheckErrorDev() __cudaCheckErrorDev( __FILE__, __LINE__ )
#define CURAND_CALL(status) __cudaRandCall( status, __FILE__, __LINE__ )
inline void __cudaSafeCall( hipError_t err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( hipSuccess != err ) {
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
//return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err ) {
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err ) {
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, hipGetErrorString( err ) );
exit( -1 );
}
#endif
//return;
}
__device__ inline void __cudaCheckErrorDev( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
hipError_t err = hipGetLastError();
if ( hipSuccess != err ) {
printf("%s %s %d\n", hipGetErrorString(err), file, line);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = hipDeviceSynchronize();
if( hipSuccess != err ) {
printf("%s %s %d\n", hipGetErrorString(err), file, line);
}
#endif
//return;
}
inline void __cudaRandCall(hiprandStatus_t err, const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
if(err != HIPRAND_STATUS_SUCCESS) {
printf("CURAND Error at %s:%d\n", __FILE__, __LINE__);
exit( -1 );
}
#endif
}
#endif
| f1cacfeb529414593cb278284b90075675955a8d.cu | #ifndef CUDAERRORUTILS_INCLUDED
#define CUDAERRORUTILS_INCLUDED
#include <stdio.h>
#include <curand.h>
// Define this to turn on error checking
#define CUDA_ERROR_CHECK
#define cudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define cudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
#define cudaCheckErrorDev() __cudaCheckErrorDev( __FILE__, __LINE__ )
#define CURAND_CALL(status) __cudaRandCall( status, __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err ) {
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
//return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err ) {
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err ) {
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
//return;
}
__device__ inline void __cudaCheckErrorDev( const char *file, const int line )
{
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err ) {
printf("%s %s %d\n", cudaGetErrorString(err), file, line);
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err ) {
printf("%s %s %d\n", cudaGetErrorString(err), file, line);
}
#endif
//return;
}
inline void __cudaRandCall(curandStatus err, const char *file, const int line) {
#ifdef CUDA_ERROR_CHECK
if(err != CURAND_STATUS_SUCCESS) {
printf("CURAND Error at %s:%d\n", __FILE__, __LINE__);
exit( -1 );
}
#endif
}
#endif
|
67daaa28360f4b6c1ed1281a30a5459171c8ce19.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#define N 2//64
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
//printf(" %d; ", p[threadIdx.x]);
}
__global__ void foo(int* p) {
bar(p);
}
| 67daaa28360f4b6c1ed1281a30a5459171c8ce19.cu | #include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__device__ void bar(int* p) {
p[threadIdx.x] = 0;
//printf(" %d; ", p[threadIdx.x]);
}
__global__ void foo(int* p) {
bar(p);
}
|
b8ab9a306b0ded1aaed77e45157375405ae9090e.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Parquet reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <array>
#include <regex>
namespace cudf {
namespace io {
namespace detail {
namespace parquet {
// Import functionality that's independent of legacy code
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Parquet datatype to cuDF type enum
*/
constexpr type_id to_type_id(parquet::Type physical,
parquet::ConvertedType logical,
bool strings_to_categorical,
type_id timestamp_type_id,
int32_t decimal_scale)
{
// Logical type used for actual data interpretation; the legacy converted type
// is superceded by 'logical' type whenever available.
switch (logical) {
case parquet::UINT_8:
case parquet::INT_8: return type_id::INT8;
case parquet::UINT_16:
case parquet::INT_16: return type_id::INT16;
case parquet::DATE: return type_id::TIMESTAMP_DAYS;
case parquet::TIMESTAMP_MICROS:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_MICROSECONDS;
case parquet::TIMESTAMP_MILLIS:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_MILLISECONDS;
case parquet::DECIMAL:
if (decimal_scale != 0 || (physical != parquet::INT32 && physical != parquet::INT64)) {
return type_id::FLOAT64;
}
break;
default: break;
}
// Physical storage type supported by Parquet; controls the on-disk storage
// format in combination with the encoding type.
switch (physical) {
case parquet::BOOLEAN: return type_id::BOOL8;
case parquet::INT32: return type_id::INT32;
case parquet::INT64: return type_id::INT64;
case parquet::FLOAT: return type_id::FLOAT32;
case parquet::DOUBLE: return type_id::FLOAT64;
case parquet::BYTE_ARRAY:
case parquet::FIXED_LEN_BYTE_ARRAY:
// Can be mapped to INT32 (32-bit hash) or STRING
return strings_to_categorical ? type_id::INT32 : type_id::STRING;
case parquet::INT96:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to Parquet clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
/**
* @brief Function that returns the required the number of bits to store a value
*/
template <typename T = uint8_t>
T required_bits(uint32_t max_level)
{
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length)
{
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_timestamp(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64) {
converted_type = parquet::UNKNOWN; // Not converting to float64
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
} // namespace
/**
* @brief Class for parsing dataset metadata
*/
struct metadata : public FileMetaData {
explicit metadata(datasource *source)
{
constexpr auto header_len = sizeof(file_header_s);
constexpr auto ender_len = sizeof(file_ender_s);
const auto len = source->size();
const auto header_buffer = source->host_read(0, header_len);
const auto header = (const file_header_s *)header_buffer->data();
const auto ender_buffer = source->host_read(len - ender_len, ender_len);
const auto ender = (const file_ender_s *)ender_buffer->data();
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
const auto buffer = source->host_read(len - ender->footer_len - ender_len, ender->footer_len);
CompactProtocolReader cp(buffer->data(), ender->footer_len);
CUDF_EXPECTS(cp.read(this), "Cannot parse metadata");
CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema");
}
inline int64_t get_total_rows() const { return num_rows; }
inline int get_num_row_groups() const { return row_groups.size(); }
inline int get_num_columns() const { return row_groups[0].columns.size(); }
std::string get_column_name(const std::vector<std::string> &path_in_schema)
{
std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : "";
for (size_t i = 1; i < path_in_schema.size(); i++) { s += "." + path_in_schema[i]; }
return s;
}
std::vector<std::string> get_column_names()
{
std::vector<std::string> all_names;
if (row_groups.size() != 0) {
for (const auto &chunk : row_groups[0].columns) {
all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema));
}
}
return all_names;
}
/**
* @brief Extracts the pandas "index_columns" section
*
* PANDAS adds its own metadata to the key_value section when writing out the
* dataframe to a file to aid in exact reconstruction. The JSON-formatted
* metadata contains the index column(s) and PANDA-specific datatypes.
*
* @return comma-separated index column names in quotes
*/
std::string get_pandas_index()
{
auto it = std::find_if(key_value_metadata.begin(),
key_value_metadata.end(),
[](const auto &item) { return item.key == "pandas"; });
if (it != key_value_metadata.end()) {
// Captures a list of quoted strings found inside square brackets after `"index_columns":`
// Inside quotes supports newlines, brackets, escaped quotes, etc.
// One-liner regex:
// "index_columns"\s*:\s*\[\s*((?:"(?:|(?:.*?(?![^\\]")).?)[^\\]?",?\s*)*)\]
// Documented below.
std::regex index_columns_expr{
R"("index_columns"\s*:\s*\[\s*)" // match preamble, opening square bracket, whitespace
R"(()" // Open first capturing group
R"((?:")" // Open non-capturing group match opening quote
R"((?:|(?:.*?(?![^\\]")).?))" // match empty string or anything between quotes
R"([^\\]?")" // Match closing non-escaped quote
R"(,?\s*)" // Match optional comma and whitespace
R"()*)" // Close non-capturing group and repeat 0 or more times
R"())" // Close first capturing group
R"(\])" // Match closing square brackets
};
std::smatch sm;
if (std::regex_search(it->value, sm, index_columns_expr)) { return std::move(sm[1].str()); }
}
return "";
}
/**
* @brief Extracts the column name(s) used for the row indexes in a dataframe
*
* @param names List of column names to load, where index column name(s) will be added
*/
void add_pandas_index_names(std::vector<std::string> &names)
{
auto str = get_pandas_index();
if (str.length() != 0) {
std::regex index_name_expr{R"(\"((?:\\.|[^\"])*)\")"};
std::smatch sm;
while (std::regex_search(str, sm, index_name_expr)) {
if (sm.size() == 2) { // 2 = whole match, first item
if (std::find(names.begin(), names.end(), sm[1].str()) == names.end()) {
std::regex esc_quote{R"(\\")"};
names.emplace_back(std::move(std::regex_replace(sm[1].str(), esc_quote, R"(")")));
}
}
str = sm.suffix();
}
}
}
/**
* @brief Filters and reduces down to a selection of row groups
*
* @param row_group Index of the row group to select
* @param max_rowgroup_count Max number of consecutive row groups if > 0
* @param row_group_indices Arbitrary rowgroup list[max_rowgroup_count] if non-null
* @param row_start Starting row of the selection
* @param row_count Total number of rows selected
*
* @return List of row group indexes and its starting row
*/
auto select_row_groups(size_type row_group,
size_type max_rowgroup_count,
const size_type *row_group_indices,
size_type &row_start,
size_type &row_count)
{
std::vector<std::pair<size_type, size_t>> selection;
if (row_group_indices) {
row_count = 0;
for (size_type i = 0; i < max_rowgroup_count; i++) {
auto rowgroup_idx = row_group_indices[i];
CUDF_EXPECTS(rowgroup_idx >= 0 && rowgroup_idx < get_num_row_groups(),
"Invalid rowgroup index");
selection.emplace_back(rowgroup_idx, row_count);
row_count += row_groups[rowgroup_idx].num_rows;
}
} else if (row_group != -1) {
CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group");
row_count = 0;
do {
selection.emplace_back(row_group, row_start + row_count);
row_count += row_groups[row_group].num_rows;
} while (--max_rowgroup_count > 0 && ++row_group < get_num_row_groups());
} else {
row_start = ::max(row_start, 0);
if (row_count < 0) {
row_count = static_cast<size_type>(
std::min<int64_t>(get_total_rows(), std::numeric_limits<size_type>::max()));
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
for (size_t i = 0, count = 0; i < row_groups.size(); ++i) {
size_t chunk_start_row = count;
count += row_groups[i].num_rows;
if (count > static_cast<size_t>(row_start) || count == 0) {
selection.emplace_back(i, chunk_start_row);
}
if (count >= static_cast<size_t>(row_start) + static_cast<size_t>(row_count)) { break; }
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param use_names List of column names to select
* @param include_index Whether to always include the PANDAS index column(s)
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names, bool include_index)
{
std::vector<std::pair<int, std::string>> selection;
const auto names = get_column_names();
if (use_names.empty()) {
// No columns specified; include all in the dataset
for (const auto &name : names) { selection.emplace_back(selection.size(), name); }
} else {
// Load subset of columns; include PANDAS index unless excluded
if (include_index) { add_pandas_index_names(use_names); }
for (const auto &use_name : use_names) {
for (size_t i = 0; i < names.size(); ++i) {
if (names[i] == use_name) {
selection.emplace_back(i, names[i]);
break;
}
}
}
}
return selection;
}
};
void reader::impl::read_column_chunks(std::vector<rmm::device_buffer> &page_data,
hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
size_t begin_chunk,
size_t end_chunk,
const std::vector<size_t> &column_chunk_offsets,
hipStream_t stream)
{
// Transfer chunk data, coalescing adjacent chunks
for (size_t chunk = begin_chunk; chunk < end_chunk;) {
const size_t io_offset = column_chunk_offsets[chunk];
size_t io_size = chunks[chunk].compressed_size;
size_t next_chunk = chunk + 1;
const bool is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED);
while (next_chunk < end_chunk) {
const size_t next_offset = column_chunk_offsets[next_chunk];
const bool is_next_compressed =
(chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED);
if (next_offset != io_offset + io_size || is_next_compressed != is_compressed) {
// Can't merge if not contiguous or mixing compressed and uncompressed
// Not coalescing uncompressed with compressed chunks is so that compressed buffers can be
// freed earlier (immediately after decompression stage) to limit peak memory requirements
break;
}
io_size += chunks[next_chunk].compressed_size;
next_chunk++;
}
if (io_size != 0) {
auto buffer = _source->host_read(io_offset, io_size);
page_data[chunk] = rmm::device_buffer(buffer->data(), buffer->size(), stream);
uint8_t *d_compdata = reinterpret_cast<uint8_t *>(page_data[chunk].data());
do {
chunks[chunk].compressed_data = d_compdata;
d_compdata += chunks[chunk].compressed_size;
} while (++chunk != next_chunk);
} else {
chunk = next_chunk;
}
}
}
size_t reader::impl::count_page_headers(hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hipStream_t stream)
{
size_t total_pages = 0;
CUDA_TRY(hipMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(hipMemcpyAsync(
chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
void reader::impl::decode_page_headers(hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hostdevice_vector<gpu::PageInfo> &pages,
hipStream_t stream)
{
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(hipMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(hipMemcpyAsync(
pages.host_ptr(), pages.device_ptr(), pages.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
}
rmm::device_buffer reader::impl::decompress_page_data(
hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hostdevice_vector<gpu::PageInfo> &pages,
hipStream_t stream)
{
auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)> &f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) { f(page_count + k); }
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_vector<uint8_t> debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
std::array<std::pair<parquet::Compression, size_t>, 3> codecs{std::make_pair(parquet::GZIP, 0),
std::make_pair(parquet::SNAPPY, 0),
std::make_pair(parquet::BROTLI, 0)};
for (auto &codec : codecs) {
for_each_codec_page(codec.first, [&](size_t page) {
total_decomp_size += pages[page].uncompressed_page_size;
codec.second++;
num_comp_pages++;
});
if (codec.first == parquet::BROTLI && codec.second > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second));
}
}
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(total_decomp_size, stream);
hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_comp_pages, stream);
hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_comp_pages, stream);
size_t decomp_offset = 0;
int32_t argc = 0;
for (const auto &codec : codecs) {
if (codec.second > 0) {
int32_t start_pos = argc;
for_each_codec_page(codec.first, [&](size_t page) {
auto dst_base = static_cast<uint8_t *>(decomp_pages.data());
inflate_in[argc].srcDevice = pages[page].page_data;
inflate_in[argc].srcSize = pages[page].compressed_page_size;
inflate_in[argc].dstDevice = dst_base + decomp_offset;
inflate_in[argc].dstSize = pages[page].uncompressed_page_size;
inflate_out[argc].bytes_written = 0;
inflate_out[argc].status = static_cast<uint32_t>(-1000);
inflate_out[argc].reserved = 0;
pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice;
decomp_offset += inflate_in[argc].dstSize;
argc++;
});
CUDA_TRY(hipMemcpyAsync(inflate_in.device_ptr(start_pos),
inflate_in.host_ptr(start_pos),
sizeof(decltype(inflate_in)::value_type) * (argc - start_pos),
hipMemcpyHostToDevice,
stream));
CUDA_TRY(hipMemcpyAsync(inflate_out.device_ptr(start_pos),
inflate_out.host_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
hipMemcpyHostToDevice,
stream));
switch (codec.first) {
case parquet::GZIP:
CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos,
1,
stream))
break;
case parquet::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos,
stream));
break;
case parquet::BROTLI:
CUDA_TRY(gpu_debrotli(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
debrotli_scratch.data().get(),
debrotli_scratch.size(),
argc - start_pos,
stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
CUDA_TRY(hipMemcpyAsync(inflate_out.host_ptr(start_pos),
inflate_out.device_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
hipMemcpyDeviceToHost,
stream));
}
}
CUDA_TRY(hipStreamSynchronize(stream));
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
CUDA_TRY(hipMemcpyAsync(
pages.device_ptr(), pages.host_ptr(), pages.memory_size(), hipMemcpyHostToDevice, stream));
return decomp_pages;
}
void reader::impl::decode_page_data(hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hostdevice_vector<gpu::PageInfo> &pages,
size_t min_row,
size_t total_rows,
const std::vector<int> &chunk_map,
std::vector<column_buffer> &out_buffers,
hipStream_t stream)
{
auto is_dict_chunk = [](const gpu::ColumnChunkDesc &chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_values; }
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
rmm::device_vector<gpu::nvstrdesc_s> str_dict_index;
if (total_str_dict_indexes > 0) { str_dict_index.resize(total_str_dict_indexes); }
// Update chunks with pointers to column data
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs;
str_ofs += pages[page_count].num_values;
}
chunks[c].column_data_base = out_buffers[chunk_map[c]].data();
chunks[c].valid_map_base = out_buffers[chunk_map[c]].null_mask();
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(hipMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), hipMemcpyHostToDevice, stream));
if (total_str_dict_indexes > 0) {
CUDA_TRY(gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), stream));
}
CUDA_TRY(gpu::DecodePageData(pages.device_ptr(),
pages.size(),
chunks.device_ptr(),
chunks.size(),
total_rows,
min_row,
stream));
CUDA_TRY(hipMemcpyAsync(
pages.host_ptr(), pages.device_ptr(), pages.memory_size(), hipMemcpyDeviceToHost, stream));
CUDA_TRY(hipStreamSynchronize(stream));
for (size_t i = 0; i < pages.size(); i++) {
if (pages[i].num_rows > 0) {
const size_t c = pages[i].chunk_idx;
if (c < chunks.size()) {
out_buffers[chunk_map[c]].null_count() += pages[i].num_rows - pages[i].valid_count;
}
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr)
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.columns, options.use_pandas_metadata);
// Override output timestamp resolution if requested
if (options.timestamp_type.id() != EMPTY) { _timestamp_type = options.timestamp_type; }
// Strings may be returned as either string or categorical columns
_strings_to_categorical = options.strings_to_categorical;
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
size_type row_group,
size_type max_rowgroup_count,
const size_type *row_group_indices,
hipStream_t stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only row groups required
const auto selected_row_groups = _metadata->select_row_groups(
row_group, max_rowgroup_count, row_group_indices, skip_rows, num_rows);
// Get a list of column data types
std::vector<data_type> column_types;
if (_metadata->row_groups.size() != 0) {
for (const auto &col : _selected_columns) {
auto &col_schema = _metadata->schema[_metadata->row_groups[0].columns[col.first].schema_idx];
auto col_type = to_type_id(col_schema.type,
col_schema.converted_type,
_strings_to_categorical,
_timestamp_type.id(),
col_schema.decimal_scale);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
}
out_columns.reserve(column_types.size());
if (selected_row_groups.size() != 0 && column_types.size() != 0) {
// Descriptors for all the chunks that make up the selected columns
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_row_groups.size() * num_columns;
hostdevice_vector<gpu::ColumnChunkDesc> chunks(0, num_chunks, stream);
// Association between each column chunk and its column
std::vector<int> chunk_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> page_data(num_chunks);
// Keep track of column chunk file offsets
std::vector<size_t> column_chunk_offsets(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
for (const auto &rg : selected_row_groups) {
const auto &row_group = _metadata->row_groups[rg.first];
auto row_group_start = rg.second;
auto row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
auto io_chunk_idx = chunks.size();
for (size_t i = 0; i < num_columns; ++i) {
auto col = _selected_columns[i];
auto &col_meta = row_group.columns[col.first].meta_data;
auto &col_schema = _metadata->schema[row_group.columns[col.first].schema_idx];
// Spec requires each row group to contain exactly one chunk for every
// column. If there are too many or too few, continue with best effort
if (col.second != _metadata->get_column_name(col_meta.path_in_schema)) {
std::cerr << "Detected mismatched column chunk" << std::endl;
continue;
}
if (chunks.size() >= chunks.max_size()) {
std::cerr << "Detected too many column chunks" << std::endl;
continue;
}
int32_t type_width;
int32_t clock_rate;
int8_t converted_type;
std::tie(type_width, clock_rate, converted_type) =
conversion_info(column_types[i].id(),
_timestamp_type.id(),
col_schema.type,
col_schema.converted_type,
col_schema.type_length);
column_chunk_offsets[chunks.size()] =
(col_meta.dictionary_page_offset != 0)
? ::min(col_meta.data_page_offset, col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
chunks.insert(gpu::ColumnChunkDesc(col_meta.total_compressed_size,
nullptr,
col_meta.num_values,
col_schema.type,
type_width,
row_group_start,
row_group_rows,
col_schema.max_definition_level,
col_schema.max_repetition_level,
required_bits(col_schema.max_definition_level),
required_bits(col_schema.max_repetition_level),
col_meta.codec,
converted_type,
col_schema.decimal_scale,
clock_rate));
// Map each column chunk to its column index
chunk_map[chunks.size() - 1] = i;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
// Read compressed chunk data to device memory
read_column_chunks(
page_data, chunks, io_chunk_idx, chunks.size(), column_chunk_offsets, stream);
remaining_rows -= row_group.num_rows;
}
assert(remaining_rows <= 0);
// Process dataset chunk pages into output columns
const auto total_pages = count_page_headers(chunks, stream);
if (total_pages > 0) {
hostdevice_vector<gpu::PageInfo> pages(total_pages, total_pages, stream);
rmm::device_buffer decomp_page_data;
decode_page_headers(chunks, pages, stream);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages, stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED && page_data[c].size() != 0) {
page_data[c].resize(0);
page_data[c].shrink_to_fit();
}
}
}
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col = _selected_columns[i];
auto &col_schema =
_metadata->schema
[_metadata->row_groups[selected_row_groups[0].first].columns[col.first].schema_idx];
bool is_nullable = (col_schema.max_definition_level != 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_page_data(chunks, pages, skip_rows, num_rows, chunk_map, out_buffers, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(
make_column(column_types[i], num_rows, out_buffers[i], stream, _mr));
}
}
}
// Create empty columns as needed
for (size_t i = out_columns.size(); i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _selected_columns[i].second;
}
// Return user metadata
for (const auto &kv : _metadata->key_value_metadata) {
out_metadata.user_data.insert({kv.key, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::string filepath,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr))
{
}
// Forward to implementation
reader::reader(std::unique_ptr<cudf::io::datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(std::move(source), options, mr))
{
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(hipStream_t stream)
{
return _impl->read(0, -1, -1, -1, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_row_group(size_type row_group,
size_type row_group_count,
hipStream_t stream)
{
return _impl->read(0, -1, row_group, row_group_count, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_row_groups(const std::vector<size_type> &row_group_list,
hipStream_t stream)
{
return _impl->read(
0, -1, -1, static_cast<size_type>(row_group_list.size()), row_group_list.data(), stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, hipStream_t stream)
{
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, -1, nullptr, stream);
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace cudf
| b8ab9a306b0ded1aaed77e45157375405ae9090e.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file reader_impl.cu
* @brief cuDF-IO Parquet reader class implementation
*/
#include "reader_impl.hpp"
#include <io/comp/gpuinflate.h>
#include <cudf/table/table.hpp>
#include <cudf/utilities/error.hpp>
#include <cudf/utilities/traits.hpp>
#include <rmm/thrust_rmm_allocator.h>
#include <rmm/device_buffer.hpp>
#include <algorithm>
#include <array>
#include <regex>
namespace cudf {
namespace io {
namespace detail {
namespace parquet {
// Import functionality that's independent of legacy code
using namespace cudf::io::parquet;
using namespace cudf::io;
namespace {
/**
* @brief Function that translates Parquet datatype to cuDF type enum
*/
constexpr type_id to_type_id(parquet::Type physical,
parquet::ConvertedType logical,
bool strings_to_categorical,
type_id timestamp_type_id,
int32_t decimal_scale)
{
// Logical type used for actual data interpretation; the legacy converted type
// is superceded by 'logical' type whenever available.
switch (logical) {
case parquet::UINT_8:
case parquet::INT_8: return type_id::INT8;
case parquet::UINT_16:
case parquet::INT_16: return type_id::INT16;
case parquet::DATE: return type_id::TIMESTAMP_DAYS;
case parquet::TIMESTAMP_MICROS:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_MICROSECONDS;
case parquet::TIMESTAMP_MILLIS:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_MILLISECONDS;
case parquet::DECIMAL:
if (decimal_scale != 0 || (physical != parquet::INT32 && physical != parquet::INT64)) {
return type_id::FLOAT64;
}
break;
default: break;
}
// Physical storage type supported by Parquet; controls the on-disk storage
// format in combination with the encoding type.
switch (physical) {
case parquet::BOOLEAN: return type_id::BOOL8;
case parquet::INT32: return type_id::INT32;
case parquet::INT64: return type_id::INT64;
case parquet::FLOAT: return type_id::FLOAT32;
case parquet::DOUBLE: return type_id::FLOAT64;
case parquet::BYTE_ARRAY:
case parquet::FIXED_LEN_BYTE_ARRAY:
// Can be mapped to INT32 (32-bit hash) or STRING
return strings_to_categorical ? type_id::INT32 : type_id::STRING;
case parquet::INT96:
return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id
: type_id::TIMESTAMP_NANOSECONDS;
default: break;
}
return type_id::EMPTY;
}
/**
* @brief Function that translates cuDF time unit to Parquet clock frequency
*/
constexpr int32_t to_clockrate(type_id timestamp_type_id)
{
switch (timestamp_type_id) {
case type_id::TIMESTAMP_SECONDS: return 1;
case type_id::TIMESTAMP_MILLISECONDS: return 1000;
case type_id::TIMESTAMP_MICROSECONDS: return 1000000;
case type_id::TIMESTAMP_NANOSECONDS: return 1000000000;
default: return 0;
}
}
/**
* @brief Function that returns the required the number of bits to store a value
*/
template <typename T = uint8_t>
T required_bits(uint32_t max_level)
{
return static_cast<T>(CompactProtocolReader::NumRequiredBits(max_level));
}
std::tuple<int32_t, int32_t, int8_t> conversion_info(type_id column_type_id,
type_id timestamp_type_id,
parquet::Type physical,
int8_t converted,
int32_t length)
{
int32_t type_width = (physical == parquet::FIXED_LEN_BYTE_ARRAY) ? length : 0;
int32_t clock_rate = 0;
if (column_type_id == type_id::INT8) {
type_width = 1; // I32 -> I8
} else if (column_type_id == type_id::INT16) {
type_width = 2; // I32 -> I16
} else if (column_type_id == type_id::INT32) {
type_width = 4; // str -> hash32
} else if (is_timestamp(data_type{column_type_id})) {
clock_rate = to_clockrate(timestamp_type_id);
}
int8_t converted_type = converted;
if (converted_type == parquet::DECIMAL && column_type_id != type_id::FLOAT64) {
converted_type = parquet::UNKNOWN; // Not converting to float64
}
return std::make_tuple(type_width, clock_rate, converted_type);
}
} // namespace
/**
* @brief Class for parsing dataset metadata
*/
struct metadata : public FileMetaData {
explicit metadata(datasource *source)
{
constexpr auto header_len = sizeof(file_header_s);
constexpr auto ender_len = sizeof(file_ender_s);
const auto len = source->size();
const auto header_buffer = source->host_read(0, header_len);
const auto header = (const file_header_s *)header_buffer->data();
const auto ender_buffer = source->host_read(len - ender_len, ender_len);
const auto ender = (const file_ender_s *)ender_buffer->data();
CUDF_EXPECTS(len > header_len + ender_len, "Incorrect data source");
CUDF_EXPECTS(header->magic == PARQUET_MAGIC && ender->magic == PARQUET_MAGIC,
"Corrupted header or footer");
CUDF_EXPECTS(ender->footer_len != 0 && ender->footer_len <= (len - header_len - ender_len),
"Incorrect footer length");
const auto buffer = source->host_read(len - ender->footer_len - ender_len, ender->footer_len);
CompactProtocolReader cp(buffer->data(), ender->footer_len);
CUDF_EXPECTS(cp.read(this), "Cannot parse metadata");
CUDF_EXPECTS(cp.InitSchema(this), "Cannot initialize schema");
}
inline int64_t get_total_rows() const { return num_rows; }
inline int get_num_row_groups() const { return row_groups.size(); }
inline int get_num_columns() const { return row_groups[0].columns.size(); }
std::string get_column_name(const std::vector<std::string> &path_in_schema)
{
std::string s = (path_in_schema.size() > 0) ? path_in_schema[0] : "";
for (size_t i = 1; i < path_in_schema.size(); i++) { s += "." + path_in_schema[i]; }
return s;
}
std::vector<std::string> get_column_names()
{
std::vector<std::string> all_names;
if (row_groups.size() != 0) {
for (const auto &chunk : row_groups[0].columns) {
all_names.emplace_back(get_column_name(chunk.meta_data.path_in_schema));
}
}
return all_names;
}
/**
* @brief Extracts the pandas "index_columns" section
*
* PANDAS adds its own metadata to the key_value section when writing out the
* dataframe to a file to aid in exact reconstruction. The JSON-formatted
* metadata contains the index column(s) and PANDA-specific datatypes.
*
* @return comma-separated index column names in quotes
*/
std::string get_pandas_index()
{
auto it = std::find_if(key_value_metadata.begin(),
key_value_metadata.end(),
[](const auto &item) { return item.key == "pandas"; });
if (it != key_value_metadata.end()) {
// Captures a list of quoted strings found inside square brackets after `"index_columns":`
// Inside quotes supports newlines, brackets, escaped quotes, etc.
// One-liner regex:
// "index_columns"\s*:\s*\[\s*((?:"(?:|(?:.*?(?![^\\]")).?)[^\\]?",?\s*)*)\]
// Documented below.
std::regex index_columns_expr{
R"("index_columns"\s*:\s*\[\s*)" // match preamble, opening square bracket, whitespace
R"(()" // Open first capturing group
R"((?:")" // Open non-capturing group match opening quote
R"((?:|(?:.*?(?![^\\]")).?))" // match empty string or anything between quotes
R"([^\\]?")" // Match closing non-escaped quote
R"(,?\s*)" // Match optional comma and whitespace
R"()*)" // Close non-capturing group and repeat 0 or more times
R"())" // Close first capturing group
R"(\])" // Match closing square brackets
};
std::smatch sm;
if (std::regex_search(it->value, sm, index_columns_expr)) { return std::move(sm[1].str()); }
}
return "";
}
/**
* @brief Extracts the column name(s) used for the row indexes in a dataframe
*
* @param names List of column names to load, where index column name(s) will be added
*/
void add_pandas_index_names(std::vector<std::string> &names)
{
auto str = get_pandas_index();
if (str.length() != 0) {
std::regex index_name_expr{R"(\"((?:\\.|[^\"])*)\")"};
std::smatch sm;
while (std::regex_search(str, sm, index_name_expr)) {
if (sm.size() == 2) { // 2 = whole match, first item
if (std::find(names.begin(), names.end(), sm[1].str()) == names.end()) {
std::regex esc_quote{R"(\\")"};
names.emplace_back(std::move(std::regex_replace(sm[1].str(), esc_quote, R"(")")));
}
}
str = sm.suffix();
}
}
}
/**
* @brief Filters and reduces down to a selection of row groups
*
* @param row_group Index of the row group to select
* @param max_rowgroup_count Max number of consecutive row groups if > 0
* @param row_group_indices Arbitrary rowgroup list[max_rowgroup_count] if non-null
* @param row_start Starting row of the selection
* @param row_count Total number of rows selected
*
* @return List of row group indexes and its starting row
*/
auto select_row_groups(size_type row_group,
size_type max_rowgroup_count,
const size_type *row_group_indices,
size_type &row_start,
size_type &row_count)
{
std::vector<std::pair<size_type, size_t>> selection;
if (row_group_indices) {
row_count = 0;
for (size_type i = 0; i < max_rowgroup_count; i++) {
auto rowgroup_idx = row_group_indices[i];
CUDF_EXPECTS(rowgroup_idx >= 0 && rowgroup_idx < get_num_row_groups(),
"Invalid rowgroup index");
selection.emplace_back(rowgroup_idx, row_count);
row_count += row_groups[rowgroup_idx].num_rows;
}
} else if (row_group != -1) {
CUDF_EXPECTS(row_group < get_num_row_groups(), "Non-existent row group");
row_count = 0;
do {
selection.emplace_back(row_group, row_start + row_count);
row_count += row_groups[row_group].num_rows;
} while (--max_rowgroup_count > 0 && ++row_group < get_num_row_groups());
} else {
row_start = std::max(row_start, 0);
if (row_count < 0) {
row_count = static_cast<size_type>(
std::min<int64_t>(get_total_rows(), std::numeric_limits<size_type>::max()));
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
for (size_t i = 0, count = 0; i < row_groups.size(); ++i) {
size_t chunk_start_row = count;
count += row_groups[i].num_rows;
if (count > static_cast<size_t>(row_start) || count == 0) {
selection.emplace_back(i, chunk_start_row);
}
if (count >= static_cast<size_t>(row_start) + static_cast<size_t>(row_count)) { break; }
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param use_names List of column names to select
* @param include_index Whether to always include the PANDAS index column(s)
*
* @return List of column names
*/
auto select_columns(std::vector<std::string> use_names, bool include_index)
{
std::vector<std::pair<int, std::string>> selection;
const auto names = get_column_names();
if (use_names.empty()) {
// No columns specified; include all in the dataset
for (const auto &name : names) { selection.emplace_back(selection.size(), name); }
} else {
// Load subset of columns; include PANDAS index unless excluded
if (include_index) { add_pandas_index_names(use_names); }
for (const auto &use_name : use_names) {
for (size_t i = 0; i < names.size(); ++i) {
if (names[i] == use_name) {
selection.emplace_back(i, names[i]);
break;
}
}
}
}
return selection;
}
};
void reader::impl::read_column_chunks(std::vector<rmm::device_buffer> &page_data,
hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
size_t begin_chunk,
size_t end_chunk,
const std::vector<size_t> &column_chunk_offsets,
cudaStream_t stream)
{
// Transfer chunk data, coalescing adjacent chunks
for (size_t chunk = begin_chunk; chunk < end_chunk;) {
const size_t io_offset = column_chunk_offsets[chunk];
size_t io_size = chunks[chunk].compressed_size;
size_t next_chunk = chunk + 1;
const bool is_compressed = (chunks[chunk].codec != parquet::Compression::UNCOMPRESSED);
while (next_chunk < end_chunk) {
const size_t next_offset = column_chunk_offsets[next_chunk];
const bool is_next_compressed =
(chunks[next_chunk].codec != parquet::Compression::UNCOMPRESSED);
if (next_offset != io_offset + io_size || is_next_compressed != is_compressed) {
// Can't merge if not contiguous or mixing compressed and uncompressed
// Not coalescing uncompressed with compressed chunks is so that compressed buffers can be
// freed earlier (immediately after decompression stage) to limit peak memory requirements
break;
}
io_size += chunks[next_chunk].compressed_size;
next_chunk++;
}
if (io_size != 0) {
auto buffer = _source->host_read(io_offset, io_size);
page_data[chunk] = rmm::device_buffer(buffer->data(), buffer->size(), stream);
uint8_t *d_compdata = reinterpret_cast<uint8_t *>(page_data[chunk].data());
do {
chunks[chunk].compressed_data = d_compdata;
d_compdata += chunks[chunk].compressed_size;
} while (++chunk != next_chunk);
} else {
chunk = next_chunk;
}
}
}
size_t reader::impl::count_page_headers(hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
cudaStream_t stream)
{
size_t total_pages = 0;
CUDA_TRY(cudaMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(cudaMemcpyAsync(
chunks.host_ptr(), chunks.device_ptr(), chunks.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t c = 0; c < chunks.size(); c++) {
total_pages += chunks[c].num_data_pages + chunks[c].num_dict_pages;
}
return total_pages;
}
void reader::impl::decode_page_headers(hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hostdevice_vector<gpu::PageInfo> &pages,
cudaStream_t stream)
{
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
chunks[c].max_num_pages = chunks[c].num_data_pages + chunks[c].num_dict_pages;
chunks[c].page_info = pages.device_ptr(page_count);
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(cudaMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream));
CUDA_TRY(gpu::DecodePageHeaders(chunks.device_ptr(), chunks.size(), stream));
CUDA_TRY(cudaMemcpyAsync(
pages.host_ptr(), pages.device_ptr(), pages.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
}
rmm::device_buffer reader::impl::decompress_page_data(
hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hostdevice_vector<gpu::PageInfo> &pages,
cudaStream_t stream)
{
auto for_each_codec_page = [&](parquet::Compression codec, const std::function<void(size_t)> &f) {
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
const auto page_stride = chunks[c].max_num_pages;
if (chunks[c].codec == codec) {
for (int k = 0; k < page_stride; k++) { f(page_count + k); }
}
page_count += page_stride;
}
};
// Brotli scratch memory for decompressing
rmm::device_vector<uint8_t> debrotli_scratch;
// Count the exact number of compressed pages
size_t num_comp_pages = 0;
size_t total_decomp_size = 0;
std::array<std::pair<parquet::Compression, size_t>, 3> codecs{std::make_pair(parquet::GZIP, 0),
std::make_pair(parquet::SNAPPY, 0),
std::make_pair(parquet::BROTLI, 0)};
for (auto &codec : codecs) {
for_each_codec_page(codec.first, [&](size_t page) {
total_decomp_size += pages[page].uncompressed_page_size;
codec.second++;
num_comp_pages++;
});
if (codec.first == parquet::BROTLI && codec.second > 0) {
debrotli_scratch.resize(get_gpu_debrotli_scratch_size(codec.second));
}
}
// Dispatch batches of pages to decompress for each codec
rmm::device_buffer decomp_pages(total_decomp_size, stream);
hostdevice_vector<gpu_inflate_input_s> inflate_in(0, num_comp_pages, stream);
hostdevice_vector<gpu_inflate_status_s> inflate_out(0, num_comp_pages, stream);
size_t decomp_offset = 0;
int32_t argc = 0;
for (const auto &codec : codecs) {
if (codec.second > 0) {
int32_t start_pos = argc;
for_each_codec_page(codec.first, [&](size_t page) {
auto dst_base = static_cast<uint8_t *>(decomp_pages.data());
inflate_in[argc].srcDevice = pages[page].page_data;
inflate_in[argc].srcSize = pages[page].compressed_page_size;
inflate_in[argc].dstDevice = dst_base + decomp_offset;
inflate_in[argc].dstSize = pages[page].uncompressed_page_size;
inflate_out[argc].bytes_written = 0;
inflate_out[argc].status = static_cast<uint32_t>(-1000);
inflate_out[argc].reserved = 0;
pages[page].page_data = (uint8_t *)inflate_in[argc].dstDevice;
decomp_offset += inflate_in[argc].dstSize;
argc++;
});
CUDA_TRY(cudaMemcpyAsync(inflate_in.device_ptr(start_pos),
inflate_in.host_ptr(start_pos),
sizeof(decltype(inflate_in)::value_type) * (argc - start_pos),
cudaMemcpyHostToDevice,
stream));
CUDA_TRY(cudaMemcpyAsync(inflate_out.device_ptr(start_pos),
inflate_out.host_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
cudaMemcpyHostToDevice,
stream));
switch (codec.first) {
case parquet::GZIP:
CUDA_TRY(gpuinflate(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos,
1,
stream))
break;
case parquet::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
argc - start_pos,
stream));
break;
case parquet::BROTLI:
CUDA_TRY(gpu_debrotli(inflate_in.device_ptr(start_pos),
inflate_out.device_ptr(start_pos),
debrotli_scratch.data().get(),
debrotli_scratch.size(),
argc - start_pos,
stream));
break;
default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break;
}
CUDA_TRY(cudaMemcpyAsync(inflate_out.host_ptr(start_pos),
inflate_out.device_ptr(start_pos),
sizeof(decltype(inflate_out)::value_type) * (argc - start_pos),
cudaMemcpyDeviceToHost,
stream));
}
}
CUDA_TRY(cudaStreamSynchronize(stream));
// Update the page information in device memory with the updated value of
// page_data; it now points to the uncompressed data buffer
CUDA_TRY(cudaMemcpyAsync(
pages.device_ptr(), pages.host_ptr(), pages.memory_size(), cudaMemcpyHostToDevice, stream));
return decomp_pages;
}
void reader::impl::decode_page_data(hostdevice_vector<gpu::ColumnChunkDesc> &chunks,
hostdevice_vector<gpu::PageInfo> &pages,
size_t min_row,
size_t total_rows,
const std::vector<int> &chunk_map,
std::vector<column_buffer> &out_buffers,
cudaStream_t stream)
{
auto is_dict_chunk = [](const gpu::ColumnChunkDesc &chunk) {
return (chunk.data_type & 0x7) == BYTE_ARRAY && chunk.num_dict_pages > 0;
};
// Count the number of string dictionary entries
// NOTE: Assumes first page in the chunk is always the dictionary page
size_t total_str_dict_indexes = 0;
for (size_t c = 0, page_count = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) { total_str_dict_indexes += pages[page_count].num_values; }
page_count += chunks[c].max_num_pages;
}
// Build index for string dictionaries since they can't be indexed
// directly due to variable-sized elements
rmm::device_vector<gpu::nvstrdesc_s> str_dict_index;
if (total_str_dict_indexes > 0) { str_dict_index.resize(total_str_dict_indexes); }
// Update chunks with pointers to column data
for (size_t c = 0, page_count = 0, str_ofs = 0; c < chunks.size(); c++) {
if (is_dict_chunk(chunks[c])) {
chunks[c].str_dict_index = str_dict_index.data().get() + str_ofs;
str_ofs += pages[page_count].num_values;
}
chunks[c].column_data_base = out_buffers[chunk_map[c]].data();
chunks[c].valid_map_base = out_buffers[chunk_map[c]].null_mask();
page_count += chunks[c].max_num_pages;
}
CUDA_TRY(cudaMemcpyAsync(
chunks.device_ptr(), chunks.host_ptr(), chunks.memory_size(), cudaMemcpyHostToDevice, stream));
if (total_str_dict_indexes > 0) {
CUDA_TRY(gpu::BuildStringDictionaryIndex(chunks.device_ptr(), chunks.size(), stream));
}
CUDA_TRY(gpu::DecodePageData(pages.device_ptr(),
pages.size(),
chunks.device_ptr(),
chunks.size(),
total_rows,
min_row,
stream));
CUDA_TRY(cudaMemcpyAsync(
pages.host_ptr(), pages.device_ptr(), pages.memory_size(), cudaMemcpyDeviceToHost, stream));
CUDA_TRY(cudaStreamSynchronize(stream));
for (size_t i = 0; i < pages.size(); i++) {
if (pages[i].num_rows > 0) {
const size_t c = pages[i].chunk_idx;
if (c < chunks.size()) {
out_buffers[chunk_map[c]].null_count() += pages[i].num_rows - pages[i].valid_count;
}
}
}
}
reader::impl::impl(std::unique_ptr<datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _source(std::move(source)), _mr(mr)
{
// Open and parse the source dataset metadata
_metadata = std::make_unique<metadata>(_source.get());
// Select only columns required by the options
_selected_columns = _metadata->select_columns(options.columns, options.use_pandas_metadata);
// Override output timestamp resolution if requested
if (options.timestamp_type.id() != EMPTY) { _timestamp_type = options.timestamp_type; }
// Strings may be returned as either string or categorical columns
_strings_to_categorical = options.strings_to_categorical;
}
table_with_metadata reader::impl::read(size_type skip_rows,
size_type num_rows,
size_type row_group,
size_type max_rowgroup_count,
const size_type *row_group_indices,
cudaStream_t stream)
{
std::vector<std::unique_ptr<column>> out_columns;
table_metadata out_metadata;
// Select only row groups required
const auto selected_row_groups = _metadata->select_row_groups(
row_group, max_rowgroup_count, row_group_indices, skip_rows, num_rows);
// Get a list of column data types
std::vector<data_type> column_types;
if (_metadata->row_groups.size() != 0) {
for (const auto &col : _selected_columns) {
auto &col_schema = _metadata->schema[_metadata->row_groups[0].columns[col.first].schema_idx];
auto col_type = to_type_id(col_schema.type,
col_schema.converted_type,
_strings_to_categorical,
_timestamp_type.id(),
col_schema.decimal_scale);
CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type");
column_types.emplace_back(col_type);
}
}
out_columns.reserve(column_types.size());
if (selected_row_groups.size() != 0 && column_types.size() != 0) {
// Descriptors for all the chunks that make up the selected columns
const auto num_columns = _selected_columns.size();
const auto num_chunks = selected_row_groups.size() * num_columns;
hostdevice_vector<gpu::ColumnChunkDesc> chunks(0, num_chunks, stream);
// Association between each column chunk and its column
std::vector<int> chunk_map(num_chunks);
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> page_data(num_chunks);
// Keep track of column chunk file offsets
std::vector<size_t> column_chunk_offsets(num_chunks);
// Initialize column chunk information
size_t total_decompressed_size = 0;
auto remaining_rows = num_rows;
for (const auto &rg : selected_row_groups) {
const auto &row_group = _metadata->row_groups[rg.first];
auto row_group_start = rg.second;
auto row_group_rows = std::min<int>(remaining_rows, row_group.num_rows);
auto io_chunk_idx = chunks.size();
for (size_t i = 0; i < num_columns; ++i) {
auto col = _selected_columns[i];
auto &col_meta = row_group.columns[col.first].meta_data;
auto &col_schema = _metadata->schema[row_group.columns[col.first].schema_idx];
// Spec requires each row group to contain exactly one chunk for every
// column. If there are too many or too few, continue with best effort
if (col.second != _metadata->get_column_name(col_meta.path_in_schema)) {
std::cerr << "Detected mismatched column chunk" << std::endl;
continue;
}
if (chunks.size() >= chunks.max_size()) {
std::cerr << "Detected too many column chunks" << std::endl;
continue;
}
int32_t type_width;
int32_t clock_rate;
int8_t converted_type;
std::tie(type_width, clock_rate, converted_type) =
conversion_info(column_types[i].id(),
_timestamp_type.id(),
col_schema.type,
col_schema.converted_type,
col_schema.type_length);
column_chunk_offsets[chunks.size()] =
(col_meta.dictionary_page_offset != 0)
? std::min(col_meta.data_page_offset, col_meta.dictionary_page_offset)
: col_meta.data_page_offset;
chunks.insert(gpu::ColumnChunkDesc(col_meta.total_compressed_size,
nullptr,
col_meta.num_values,
col_schema.type,
type_width,
row_group_start,
row_group_rows,
col_schema.max_definition_level,
col_schema.max_repetition_level,
required_bits(col_schema.max_definition_level),
required_bits(col_schema.max_repetition_level),
col_meta.codec,
converted_type,
col_schema.decimal_scale,
clock_rate));
// Map each column chunk to its column index
chunk_map[chunks.size() - 1] = i;
if (col_meta.codec != Compression::UNCOMPRESSED) {
total_decompressed_size += col_meta.total_uncompressed_size;
}
}
// Read compressed chunk data to device memory
read_column_chunks(
page_data, chunks, io_chunk_idx, chunks.size(), column_chunk_offsets, stream);
remaining_rows -= row_group.num_rows;
}
assert(remaining_rows <= 0);
// Process dataset chunk pages into output columns
const auto total_pages = count_page_headers(chunks, stream);
if (total_pages > 0) {
hostdevice_vector<gpu::PageInfo> pages(total_pages, total_pages, stream);
rmm::device_buffer decomp_page_data;
decode_page_headers(chunks, pages, stream);
if (total_decompressed_size > 0) {
decomp_page_data = decompress_page_data(chunks, pages, stream);
// Free compressed data
for (size_t c = 0; c < chunks.size(); c++) {
if (chunks[c].codec != parquet::Compression::UNCOMPRESSED && page_data[c].size() != 0) {
page_data[c].resize(0);
page_data[c].shrink_to_fit();
}
}
}
std::vector<column_buffer> out_buffers;
out_buffers.reserve(column_types.size());
for (size_t i = 0; i < column_types.size(); ++i) {
auto col = _selected_columns[i];
auto &col_schema =
_metadata->schema
[_metadata->row_groups[selected_row_groups[0].first].columns[col.first].schema_idx];
bool is_nullable = (col_schema.max_definition_level != 0);
out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr);
}
decode_page_data(chunks, pages, skip_rows, num_rows, chunk_map, out_buffers, stream);
for (size_t i = 0; i < column_types.size(); ++i) {
out_columns.emplace_back(
make_column(column_types[i], num_rows, out_buffers[i], stream, _mr));
}
}
}
// Create empty columns as needed
for (size_t i = out_columns.size(); i < column_types.size(); ++i) {
out_columns.emplace_back(make_empty_column(column_types[i]));
}
// Return column names (must match order of returned columns)
out_metadata.column_names.resize(_selected_columns.size());
for (size_t i = 0; i < _selected_columns.size(); i++) {
out_metadata.column_names[i] = _selected_columns[i].second;
}
// Return user metadata
for (const auto &kv : _metadata->key_value_metadata) {
out_metadata.user_data.insert({kv.key, kv.value});
}
return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)};
}
// Forward to implementation
reader::reader(std::string filepath,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(datasource::create(filepath), options, mr))
{
}
// Forward to implementation
reader::reader(std::unique_ptr<cudf::io::datasource> source,
reader_options const &options,
rmm::mr::device_memory_resource *mr)
: _impl(std::make_unique<impl>(std::move(source), options, mr))
{
}
// Destructor within this translation unit
reader::~reader() = default;
// Forward to implementation
table_with_metadata reader::read_all(cudaStream_t stream)
{
return _impl->read(0, -1, -1, -1, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_row_group(size_type row_group,
size_type row_group_count,
cudaStream_t stream)
{
return _impl->read(0, -1, row_group, row_group_count, nullptr, stream);
}
// Forward to implementation
table_with_metadata reader::read_row_groups(const std::vector<size_type> &row_group_list,
cudaStream_t stream)
{
return _impl->read(
0, -1, -1, static_cast<size_type>(row_group_list.size()), row_group_list.data(), stream);
}
// Forward to implementation
table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, cudaStream_t stream)
{
return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, -1, -1, nullptr, stream);
}
} // namespace parquet
} // namespace detail
} // namespace io
} // namespace cudf
|
7608221dcac1a18d10110a1e1a00b1ab499ae8d0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
float data[126] = {0.000003, 0.000007, 0.000019, 0.000036, 0.000059, 0.000086, 0.000144, 0.000231, 0.000319, 0.000406, 0.000519, 0.000656, 0.000794, 0.000931, 0.001219, 0.001656, 0.002094,
0.002531, 0.002969, 0.003406, 0.003844, 0.004281, 0.004844, 0.005531, 0.006219, 0.006906, 0.007594, 0.008281, 0.008969, 0.009656, 0.011094, 0.013281, 0.015469, 0.017656, 0.019844, 0.022031, 0.024219, 0.026406, 0.028594, 0.030781, 0.032969, 0.035156, 0.037344, 0.039531, 0.041719, 0.043906, 0.046719, 0.050156, 0.053594, 0.057031, 0.060469, 0.063906, 0.067344, 0.070781, 0.074219, 0.077656, 0.081094, 0.084531, 0.087969, 0.091406, 0.094844, 0.098281, 0.105469, 0.116406, 0.127344, 0.138281, 0.149219, 0.160156, 0.171094, 0.182031, 0.192969, 0.203906, 0.214844, 0.225781, 0.236719,
0.247656, 0.258594, 0.269531, 0.280469, 0.291406, 0.302344, 0.313281, 0.324219, 0.335156, 0.346094, 0.357031, 0.367969, 0.378906, 0.389844, 0.400781, 0.411719, 0.422656, 0.433594, 0.444531,
0.458594, 0.475781, 0.492969, 0.510156, 0.527344, 0.544531, 0.561719, 0.578906, 0.596094, 0.613281, 0.630469, 0.647656, 0.664844, 0.682031, 0.699219, 0.716406, 0.733594, 0.750781, 0.767969,
0.785156, 0.802344, 0.819531, 0.836719, 0.853906, 0.871094, 0.888281, 0.905469, 0.922656, 0.939844, 0.957031, 0.974219, 0.991406};
hipMalloc((void**)&data8bit, 126*sizeof(float));
hipMemcpy(data8bit, data, 126*sizeof(float), hipMemcpyDefault);
}
Layer::~Layer()
{
counter = 0;
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
hipStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) {
syncStream(); // Make sure I've finished computing before broadcasting
}
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
// printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
// Synchronize if the previous layer is going to actually do a reduction.
// If the previous layer is on the same GPU as us and has no next layers
// on other GPUs then it won't need to do a reduction.
if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// computed bprop activities for them when in fact I've only called a function which
// will eventually compute them.
if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) {
syncStream();
}
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_prevDeviceIDs.size() + 1);
_prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
/*
if( getActs().getNumCols() == 4096)
{
if(!_buffer8bit)
{
cout << "INIT BUFFERS MODEL" << endl;
int size = getActs().getNumRows()*getActs().getNumCols();
cout << size << endl;
size_t bytes = size*sizeof(unsigned char);
hipMalloc((void**)&_buffer8bit, bytes);
_abs_buffer = new NVMatrix(getActs().getNumRows(), getActs().getNumCols(), false);
}
if(getActs().getNumCols() != 10)
{
getActs().abs(*_abs_buffer);
float absMax = (*_abs_buffer).max();
getActs().compress8bit(data8bit, absMax, _buffer8bit);
getActs().decompress8bit(data8bit, absMax, _buffer8bit);
}
}
*/
//cout << getActs().getNumRows() << "x" << getActs().getNumCols() << endl;
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0)
{
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
/*
if(!_buffer8bit)
{
cout << "INIT BUFFERS MODEL" << endl;
int size = getActs().getNumRows()*getActs().getNumCols();
cout << size << endl;
size_t bytes = size*sizeof(unsigned char);
hipMalloc((void**)&_buffer8bit, bytes);
_abs_buffer = new NVMatrix(getActs().getNumRows(), getActs().getNumCols(), false);
}
if(getActs().getNumCols() != 1000)
{
cout << getActs().getNumRows() << "X" << getActs().getNumCols() << endl;
getActs().abs(*_abs_buffer);
float absMax = (*_abs_buffer).max();
getActs().compress8bit(data8bit, absMax, _buffer8bit);
getActs().decompress8bit(data8bit, absMax, _buffer8bit);
}
*/
//cout << getActs().getNumRows() << "x" << getActs().getNumCols() << endl;
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
//cout << "enter!" << endl;
cout << "bprop: " << prevActs_T.getNumRows()<< "x" << prevActs_T.getNumCols() << endl;
/*
if(prevActs_T.getNumCols() != 10)
{
if(!_buffer8bit_bw)
{
cout << "INIT BUFFERS MODEL BW" << endl;
int size = prevActs_T.getNumRows()*prevActs_T.getNumCols();
cout << size << endl;
size_t bytes = size*sizeof(unsigned char);
hipMalloc((void**)&_buffer8bit_bw, bytes);
_abs_buffer_bw = new NVMatrix(prevActs_T.getNumRows(), prevActs_T.getNumCols(), false);
}
prevActs_T.abs(*_abs_buffer_bw);
float absMax = (*_abs_buffer_bw).max();
cout << absMax << endl;
cout << _prev[replicaIdx][inpIdx]->getActsGrad().getNumRows()<< "x" << _prev[replicaIdx][inpIdx]->getActsGrad().getNumCols() << endl;
prevActs_T.compress8bit(data8bit, absMax, _buffer8bit_bw);
prevActs_T.decompress8bit(data8bit, absMax, _buffer8bit_bw);
//_prev[replicaIdx][inpIdx]->getActsGrad().abs(*_abs_buffer_bw);
//absMax = (*_abs_buffer_bw).max();
//cout << absMax << endl;
}
*/
//cout << "post" << endl;
float scaleGrad = getGradScale(inpIdx, passType);
//cout << scaleGrad << endl;
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,hipStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(hipStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
hipStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(hipStreamCreateWithFlags(&_copyStreams[deviceID], hipStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
//_hostMemFwd.copyFromHost(replicaDataMatrix, true);
_hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true);
memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes());
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
_sum = pyDictGetInt(paramsDict, "sum");
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_sum) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<true>());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<false>());
}
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, _sum, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler<false>());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
computeCrossEntCost(labels, probs, _trueLabelLogProbs, _correctProbs);
_costv.clear();
_costv.push_back(-_trueLabelLogProbs.sum());
_costv.push_back(numCases - _correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax"
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->apply(NVMatrixOps::Square(), _tmp);
_costv.clear();
_costv.push_back(_tmp.sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
| 7608221dcac1a18d10110a1e1a00b1ab499ae8d0.cu | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <helper_cuda.h>
#include <iostream>
#include <set>
#include "../../cudaconv3/include/cudaconv2.cuh"
#include "../../util/include/matrix.h"
#include "../include/layer_kernels.cuh"
#include "../include/layer.cuh"
#include "../include/data.cuh"
#include "../include/util.cuh"
#include "../include/weights.cuh"
using namespace std;
/*
* =======================
* Layer
* =======================
*/
Layer::Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans) :
_convNetThread(convNetThread), _replicaID(replicaID), _trans(trans) {
_name = pyDictGetString(paramsDict, "name");
_type = pyDictGetString(paramsDict, "type");
_foundGradConsumers = false;
_gradConsumer = pyDictGetInt(paramsDict, "gradConsumer");
_actsTarget = pyDictGetInt(paramsDict, "actsTarget");
_actsGradTarget = pyDictGetInt(paramsDict, "actsGradTarget");
_numOutputs = pyDictGetInt(paramsDict, "outputs");
_numReplicas = pyDictGetInt(paramsDict, "numReplicas");
_numReplicasPrev = 1;
_rcvdBInputMsgs = 0;
_actBroadcaster = NULL;
_gradReducer = NULL;
_initialized = false;
float data[126] = {0.000003, 0.000007, 0.000019, 0.000036, 0.000059, 0.000086, 0.000144, 0.000231, 0.000319, 0.000406, 0.000519, 0.000656, 0.000794, 0.000931, 0.001219, 0.001656, 0.002094,
0.002531, 0.002969, 0.003406, 0.003844, 0.004281, 0.004844, 0.005531, 0.006219, 0.006906, 0.007594, 0.008281, 0.008969, 0.009656, 0.011094, 0.013281, 0.015469, 0.017656, 0.019844, 0.022031, 0.024219, 0.026406, 0.028594, 0.030781, 0.032969, 0.035156, 0.037344, 0.039531, 0.041719, 0.043906, 0.046719, 0.050156, 0.053594, 0.057031, 0.060469, 0.063906, 0.067344, 0.070781, 0.074219, 0.077656, 0.081094, 0.084531, 0.087969, 0.091406, 0.094844, 0.098281, 0.105469, 0.116406, 0.127344, 0.138281, 0.149219, 0.160156, 0.171094, 0.182031, 0.192969, 0.203906, 0.214844, 0.225781, 0.236719,
0.247656, 0.258594, 0.269531, 0.280469, 0.291406, 0.302344, 0.313281, 0.324219, 0.335156, 0.346094, 0.357031, 0.367969, 0.378906, 0.389844, 0.400781, 0.411719, 0.422656, 0.433594, 0.444531,
0.458594, 0.475781, 0.492969, 0.510156, 0.527344, 0.544531, 0.561719, 0.578906, 0.596094, 0.613281, 0.630469, 0.647656, 0.664844, 0.682031, 0.699219, 0.716406, 0.733594, 0.750781, 0.767969,
0.785156, 0.802344, 0.819531, 0.836719, 0.853906, 0.871094, 0.888281, 0.905469, 0.922656, 0.939844, 0.957031, 0.974219, 0.991406};
cudaMalloc((void**)&data8bit, 126*sizeof(float));
cudaMemcpy(data8bit, data, 126*sizeof(float), cudaMemcpyDefault);
}
Layer::~Layer()
{
counter = 0;
if (_actBroadcaster != NULL) {
_actBroadcaster->stop();
delete _actBroadcaster;
}
if (_gradReducer != NULL) {
_gradReducer->stop();
delete _gradReducer;
}
// For now, gradReducer doesn't have a destructor
// delete _gradReducer;
for (std::map<int, MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
}
cudaStream_t Layer::getStream() {
assert(getDeviceID() >= 0);
return NVMatrix::getDefaultStream(getDeviceID());
}
void Layer::syncStream() {
NVMatrix::syncStream(getStream());
}
void Layer::fpropNext(PASS_TYPE passType, int passIdx) {
if (_next.size() > 0) {
if (getFwdActiveReplicaIdx(passIdx) == 0/*getReplicaIdx()*/) { // 0 turns on pipelining
if (_nextDeviceIDs.size() > 1 || (_nextDeviceIDs.size() == 1 && _nextDeviceIDs[0] != getDeviceID())) {
syncStream(); // Make sure I've finished computing before broadcasting
}
getActBroadcaster().getMessageQueue().enqueue(new BroadcastMessage(getAllActs(), getDeviceID(), getReplicaIdx(), _broadcastFinishQueue));
}
if (getFwdActiveReplicaIdx(passIdx) == getReplicaIdx()) {
_broadcastFinishQueue.dequeue();
assert(_broadcastFinishQueue.getNumElements() == 0);
}
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
bool Layer::fprop(PASS_TYPE passType, int passIdx) {
_rcvdFInputMsgs++;
// I require messages from *all* input replicas because it makes the propagation easier to think about.
// Without this requirement, when all fprop terminal msgs arrive to ConvNet, the forward propagation
// might not actually be finished yet.
if (_rcvdFInputMsgs == getNumExpectedFwdMsgs()) {
// printf("Layer %s[%d] fprop\n", _name.c_str(), getReplicaID());
int ridx = getFwdActiveInputReplicaIdx(passIdx);
assert(getDeviceID() == NVMatrix::getDeviceID());
map<int, NVMatrix*> v;
if (ridx >= 0) {
for (int i = 0; i < getNumLayersPrev(); i++) {
v[i] = &_prev[ridx][i]->getActs(getDeviceID());
}
}
fprop(v, passType, passIdx);
return true;
}
return false;
}
void Layer::fprop(map<int,NVMatrix*>& v, PASS_TYPE passType, int passIdx) {
if (getFwdActiveInputReplicaIdx(passIdx) >= 0) {
assert(v.size() == getNumLayersPrev());
_inputs.clear();
_inputs.insert(v.begin(), v.end());
int numCases = _inputs[0]->getLeadingDim();
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemory(numCases);
}
if (numCases > 0) {
//printf("layer %s fprop, numcases: %d\n", _name.c_str(), numCases);
_rcvdFInputMsgs = getNumExpectedFwdMsgs();
for (map<int,NVMatrix*>::iterator it = v.begin(); it != v.end(); ++it) {
it->second->transpose(_trans);
}
getActs().transpose(_trans);
fpropCommon(passType);
// First do fprop on the input whose acts matrix I'm sharing, if any
if (_actsTarget >= 0) {
fpropActs(_actsTarget, 0, passType, passIdx);
}
// Then add the rest of the inputs to that
for (int i = 0; i < getNumLayersPrev(); i++) {
if (i != _actsTarget) {
fpropActs(i, _actsTarget >= 0 || i > 0, passType, passIdx);
}
}
}
}
fpropNext(passType, passIdx);
}
void Layer::truncBwdActs() {
// Only truncate actsGrad if I own it
if (_actsGradTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
if (_actsTarget < 0) {
for (map<int,MemoryView*>::iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
it->second->getMemorySource().truncate(getName());
}
}
}
int Layer::getNumGradProducersNext() {
return _numGradProducersNext;
}
int Layer::getNumExpectedBwdMsgs() {
return _numGradProducersNext * getNumSiblingReplicas();
}
int Layer::getNumExpectedFwdMsgs() {
return getNumLayersPrev() * getNumInputReplicas();
}
void Layer::bprop(PASS_TYPE passType, int passIdx) {
if (getBwdActiveInputReplicaIdx(passIdx) >= 0 && _rcvdBInputMsgs == getNumExpectedBwdMsgs()) {
// printf("Layer %s[%d] bprop\n", _name.c_str(), getReplicaID());
if (_gradReducer != NULL) {
_gradReducer->waitForFinish();
}
// This does sync, but only if it has grad consumers below! so we must sync again before sending bprop terminal messages
bprop(getActsGrad(), passType, passIdx);
if (_bwdTerminal[passIdx]) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(BPROP_TERMINAL));
}
}
}
void Layer::bpropActsCall(NVMatrix& v, PASS_TYPE passType, int replicaIdx, int inputIdx) {
Layer& prev = *_prev[replicaIdx][inputIdx];
if (prev.isGradConsumer() && isGradProducer(prev.getName())) {
if (v.getLeadingDim() > 0) { // Only do computation if #cases > 0
bpropActs(v, replicaIdx, inputIdx, prev.getNumComputedActsGrads(getDeviceID()) > 0, passType);
}
prev.getNumComputedActsGrads(getDeviceID())++;
// Synchronize if the previous layer is going to actually do a reduction.
// If the previous layer is on the same GPU as us and has no next layers
// on other GPUs then it won't need to do a reduction.
if (prev.getNextDeviceIDs().size() > 1 || (prev.getNextDeviceIDs().size() == 1 && getDeviceID() != prev.getDeviceID())) {
syncStream();
}
prev.getGradReducer().enqueueReduction(getDeviceID());
}
}
void Layer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
v.transpose(_trans);
assert(getDeviceID() == NVMatrix::getDeviceID());
int ridx = getBwdActiveInputReplicaIdx(passIdx);
LayerV& prev = _prev[ridx];
map<int, set<Layer*> > prevByDevice = _prevByDevice[ridx];
for (int i = 0; i < prev.size(); i++) {
_inputs[i]->transpose(_trans);
prev[i]->getActsGrad().transpose(_trans);
}
getActs().transpose(_trans);
// NOTE: this should be here (before the bpropActs) because if you have a layer
// that has a weight matrix AND actsGradTarget >= 0, then the stuff below will overwrite
// v which is used in bpropCommon. So bpropCommon must come first.
bpropCommon(v, ridx, passType);
if (isGradProducer()) {
// First propagate activity gradient to all layers whose activity
// gradient matrix I'm definitely not sharing.
for (map<int, set<Layer*> >::const_iterator it = prevByDevice.begin(); it != prevByDevice.end(); ++it) {
const set<Layer*>& deviceLayers = it->second;
for (set<Layer*>::const_iterator it2 = deviceLayers.begin(); it2 != deviceLayers.end(); ++it2) {
if (_actsGradTarget != (*it2)->getInputIdx(_name)) {
bpropActsCall(v, passType, ridx, (*it2)->getInputIdx(_name));
}
}
}
// Then propagate activity gradient to the layer whose activity gradient
// matrix I'm sharing, if any.
if (_actsGradTarget >= 0) {
bpropActsCall(v, passType, ridx, _actsGradTarget);
}
}
// Synchronization is necessary because the kernel calls that compute my backward acts
// execute asynchronously. Therefore I don't want to tell other threads that I've
// computed bprop activities for them when in fact I've only called a function which
// will eventually compute them.
if (_prevDeviceIDs.size() > 1 || (_prevDeviceIDs.size() == 1 && _prevDeviceIDs[0] != getDeviceID())) {
syncStream();
}
if (getConvNet().isConserveMemory()) {
truncBwdActs();
}
if (isGradProducer()) {
/*for (int i = 0; i < prev.size(); i++) {
if (prev[i]->isGradConsumer() && isGradProducer(prev[i]->getName())) {
prev[i]->getGradReducer().enqueueReduction(getDeviceID());
}
}*/
// Send backward messages to *all* replicas.
// Note that the messages will be dismissed unless the passIdx indicates
// that the previous layer should do some work.
for (int r = 0; r < getNumInputReplicas(); r++) {
for (int i = 0; i < _prev[r].size(); i++) {
if (_prev[r][i]->isGradConsumer() && isGradProducer(_prev[r][i]->getName())) {
_prev[r][i]->getConvNetThread().getMessageQueue().enqueue(new BpropMessage(*_prev[r][i], passType, passIdx));
}
}
}
}
}
IActGradReducer& Layer::getGradReducer() {
return *_gradReducer;
}
// This is called between minibatches
void Layer::reset() {
_rcvdFInputMsgs = 0;
_rcvdBInputMsgs = 0;
for (map<int,int>::iterator it = _numComputedActsGrads.begin(); it != _numComputedActsGrads.end(); ++it) {
it->second = 0;
}
}
// This is called between microbatches
void Layer::resetPassIdx() {
_rcvdFInputMsgs = 0;
if (_rcvdBInputMsgs >= getNumExpectedBwdMsgs()) {
reset();
}
}
/*
* Returns number of cases in given matrix.
*/
int Layer::getNumCases(NVMatrix& v) {
return v.getLeadingDim();
}
int Layer::incRcvdBInputMsgs() {
return ++_rcvdBInputMsgs;
}
std::string& Layer::getName() {
return _name;
}
std::string& Layer::getType() {
return _type;
}
int& Layer::getNumComputedActsGrads(int deviceID) {
return _numComputedActsGrads[deviceID];
}
void Layer::addNext(Layer& l) {
_next.push_back(&l);
_numReplicasNext = l.getNumReplicas();
if (count(_nextDeviceIDs.begin(), _nextDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_nextDeviceIDs.size() + 1);
_nextDeviceIDs.insert(_nextDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addPrev(Layer& l, int replicaIdx) {
_prev[replicaIdx].push_back(&l);
_numReplicasPrev = l.getNumReplicas();
l.setInputIdx(getName(), _prev[replicaIdx].size() - 1);
if (l.getDeviceID() >= 0 && count(_prevDeviceIDs.begin(), _prevDeviceIDs.end(), l.getDeviceID()) == 0) {
int pos = rand() % (_prevDeviceIDs.size() + 1);
_prevDeviceIDs.insert(_prevDeviceIDs.begin() + pos, l.getDeviceID());
}
}
void Layer::addReplica(Layer& l) {
assert(_replicas.count(l.getReplicaID()) == 0);
_replicas[l.getReplicaID()] = &l;
}
bool Layer::hasGradProducerNext(std::string& layerName) {
bool b = _next.size() == 0;
for (int i = 0; i < _next.size(); i++) {
b |= _next[i]->hasGradProducerNext(_name);
}
return b && isGradProducer(layerName);
}
bool Layer::postInit() {
// We choose not to populate _outputs[getDeviceID()] here because we do it instead in fprop().
// In fprop(), we can populate it from the _inputs vector, which is a bit more general than populating
// it from _prev->getActs()
// _outputs = _actsTarget < 0 ? new NVMatrix() : &_prev[_actsTarget]->getActs();
if (!_initialized) {
_initialized = true;
map<int,int> numGradProducersNext;
_numGradProducersNext = 0;
for (int r = 0; r < getNumInputReplicas(); ++r) {
for (vector<Layer*>::const_iterator it = _prev[r].begin(); it != _prev[r].end(); ++it) {
(*it)->postInit();
}
}
_memSrcActs[getDeviceID()] = _actsTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsTarget]->getMemorySourceActs(getDeviceID()).clone(_name);
// _actsGradTarget will only be >= 0 when the number of replicas is the same in both layers, so this justifies the use of _prev[0]
_memSrcActsGrad[getDeviceID()] = _actsGradTarget < 0 ? &MemorySource::make(_numOutputs, getDeviceID(), getName())
: &_prev[0][_actsGradTarget]->getMemorySourceActsGrad(getDeviceID()).clone(_name);
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
_numComputedActsGrads[d] = 0;
if (_next[i]->hasGradProducerNext(_name)) {
if (numGradProducersNext.count(d) == 0) {
numGradProducersNext[d] = 0;
}
numGradProducersNext[d]++;
_numGradProducersNext++;
if (_memSrcActsGrad.count(d) == 0) {
_memSrcActsGrad[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_memSrcActs.count(d) == 0) {
_memSrcActs[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
if (_next.size() == 0) {
_numReplicasNext = getNumReplicas();
}
/*
* Initialize forward broadcaster. First sibling owns it.
*/
if (getReplicaIdx() == 0 && _convNetThread != NULL) {
_actBroadcaster = new ActBroadcaster(getNumSiblingReplicas(), getDeviceCPUs(_convNetThread->getDeviceID()));
_actBroadcaster->start();
}
/*
* Initialize backward reducer.
*/
if (isGradConsumer() && _numGradProducersNext > 0) {
_gradReducer = &IActGradReducer::makeGradReducer(*this, numGradProducersNext);
_gradReducer->start();
}
/*
* Initialize specially sorted previous array
*/
for (int r = 0; r < _prev.size(); ++r) {
for (int i = 0; i < _prev[r].size(); ++i) {
// Previous devices in reverse order of processing by (sequential) GradReducer
_prevByDevice[r][getDeviceID() - _prev[r][i]->getDeviceID()
+ 16 * (_prev[r][i]->getDeviceID() > getDeviceID())].insert(_prev[r][i]);
}
}
return true;
}
return false;
}
ActBroadcaster& Layer::getActBroadcaster() {
return getReplicaIdx() == 0 ? *_actBroadcaster : _replicas[getReplicaID() - getReplicaIdx()]->getActBroadcaster();
}
// Does this layer, or some layer below it, need the gradient
// for parameter updates?
// Only weight layers should be grad consumers themselves.
bool Layer::isGradConsumer() {
if (!_foundGradConsumers && _prev.size() > 0) {
for (int i = 0; i < _prev[0].size(); i++) {
_gradConsumer |= _prev[0][i]->isGradConsumer();
}
_foundGradConsumers = true;
}
return _gradConsumer;
}
// Does this layer produce gradient for layers below?
bool Layer::isGradProducer() {
return true;
}
bool Layer::isGradProducer(std::string& layerName) {
return isGradProducer();
}
map<int,vector<Layer*> >& Layer::getPrev() {
return _prev;
}
vector<Layer*>& Layer::getNext() {
return _next;
}
NVMatrix& Layer::getActs() {
return getActs(getDeviceID());
}
NVMatrix& Layer::getActs(int deviceID) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory();
}
NVMatrix& Layer::getActs(int deviceID, int numCases) {
assert(_memSrcActs.count(deviceID) > 0);
return _memSrcActs[deviceID]->getMemory(numCases);
}
NVMatrix& Layer::getActsGrad(int deviceID) {
assert(_memSrcActsGrad.count(deviceID) > 0);
return _memSrcActsGrad[deviceID]->getMemory(getActs(deviceID).getLeadingDim());
}
NVMatrix& Layer::getActsGrad() {
return getActsGrad(NVMatrix::getDeviceID());
}
map<int, NVMatrix*> Layer::getAllActs() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActs.begin(); it != _memSrcActs.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
map<int, NVMatrix*> Layer::getAllActsGrads() {
map<int, NVMatrix*> m;
for (map<int, MemoryView*>::const_iterator it = _memSrcActsGrad.begin(); it != _memSrcActsGrad.end(); ++it) {
m[it->first] = &it->second->getMemory();
}
return m;
}
int Layer::getDeviceID() {
return _convNetThread == NULL ? -1 : _convNetThread->getDeviceID();
}
ConvNetThread& Layer::getConvNetThread() {
assert(_convNetThread != NULL);
return *_convNetThread;
}
ConvNet& Layer::getConvNet() {
return getConvNetThread().getConvNet();
}
void Layer::setBwdTerminal(int passIdx) {
_bwdTerminal[passIdx] = true;
}
int Layer::getReplicaID() {
return _replicaID;
}
int Layer::getActivePassPeriod() {
return getNumReplicas() / getConvNet().getNumReplicasMin();
}
int Layer::getFwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return passIdx % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getBwdActiveInputReplicaIdx(int passIdx) {
const int edge = (passIdx / getActivePassPeriod()) % getNumInputReplicas();
return (passIdx + 1) % getActivePassPeriod() == 0 ? edge : -1;
}
int Layer::getFwdActiveReplicaIdx(int passIdx) {
assert(_next.size() > 0);
return _next[0]->getFwdActiveInputReplicaIdx(passIdx);
}
int Layer::getNumReplicas() {
return _replicas.size();
}
int Layer::getNumSiblingReplicas() {
return getNumReplicas() / getNumReplicasNext();
}
int Layer::getNumReplicasPrev() {
return _numReplicasPrev;
}
int Layer::getNumReplicasNext() {
return _numReplicasNext;
}
int Layer::getNumInputReplicas() {
return _numReplicasPrev / getNumReplicas();
}
int Layer::getReplicaIdx() {
return getReplicaID() % getNumSiblingReplicas();
}
int Layer::getNumLayersPrev() {
return _prev.size() > 0 ? _prev[0].size() : 0;
}
void Layer::setMemorySourceActs(int deviceID, MemoryView& mem) {
assert(_memSrcActs[deviceID]->isParent());
delete _memSrcActs[deviceID];
_memSrcActs[deviceID] = &mem;
if (_actsTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsTarget]->setMemorySourceActs(deviceID, mem.clone(_prev[0][_actsTarget]->getName()));
}
}
void Layer::setMemorySourceActsGrad(int deviceID, MemoryView& mem) {
assert(_memSrcActsGrad[deviceID]->isParent());
delete _memSrcActsGrad[deviceID];
_memSrcActsGrad[deviceID] = &mem;
if (_actsGradTarget >= 0 && deviceID == getDeviceID()) {
assert(getNumInputReplicas() == 1);
_prev[0][_actsGradTarget]->setMemorySourceActsGrad(deviceID, mem.clone(_prev[0][_actsGradTarget]->getName()));
}
}
MemoryView& Layer::getMemorySourceActs(int deviceID) {
return *_memSrcActs[deviceID];
}
MemoryView& Layer::getMemorySourceActsGrad(int deviceID) {
return *_memSrcActsGrad[deviceID];
}
int Layer::getNumOutputs() {
return _numOutputs;
}
void Layer::setInputIdx(std::string& parentName, int idx) {
_inputIndices[parentName] = idx;
}
int Layer::getInputIdx(std::string& parentName) {
return _inputIndices[parentName];
}
/*
* =======================
* NeuronLayer
* =======================
*/
NeuronLayer::NeuronLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true) {
PyObject* neuronDict = PyDict_GetItemString(paramsDict, "neuron");
_neuronType = pyDictGetString(neuronDict, "type");
_neuron = &Neuron::makeNeuron(neuronDict);
}
NeuronLayer::~NeuronLayer() {
delete _neuron;
}
void NeuronLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
if (!bpropSpecial(v, replicaIdx, inpIdx, scaleTargets, passType)) {
_neuron->computeInputGrad(v, _prev[replicaIdx][0]->getActsGrad(), scaleTargets > 0);
}
}
bool NeuronLayer::bpropSpecial(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// Special optimization for cross-entropy objective with logistic units.
// Better to just compute the input gradient in one go to avoid division by small numbers.
bool doCrossEntGrad = _neuronType == "logistic" && _next.size() == 1
&& (_next[0]->getType() == "cost.bce" || _next[0]->getType() == "cost.dce")
&& _next[0]->getDeviceID() == getDeviceID()
&& _next[0]->getNumReplicas() == getNumReplicas();
LayerV& prev = _prev[replicaIdx];
if (doCrossEntGrad) {
NVMatrix& labels = _next[0]->getPrev()[replicaIdx][0]->getActs(getDeviceID());
BinomialCrossEntropyCostLayer& cost = *static_cast<BinomialCrossEntropyCostLayer*>(_next[0]);
float gradCoeff = cost.getCoeff();
labels.transpose(_trans);
if (cost.getPosWeight() == 1) {
if (scaleTargets == 0) {
getActs().add(labels, -gradCoeff, gradCoeff, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::WeightedAdd>(NVMatrixBinaryOps::WeightedAdd(-gradCoeff, gradCoeff)),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
} else {
if (scaleTargets == 0) {
getActs().applyBinary(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight()), labels, prev[0]->getActsGrad());
} else {
getActs().applyTernary(AddGradientBinaryOperator<CrossEntLogisticGradientOperator>(CrossEntLogisticGradientOperator(gradCoeff, cost.getPosWeight())),
labels, prev[0]->getActsGrad(), prev[0]->getActsGrad());
}
}
}
return doCrossEntGrad;
}
void NeuronLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_neuron->activate(*_inputs[0], getActs());
/*
if( getActs().getNumCols() == 4096)
{
if(!_buffer8bit)
{
cout << "INIT BUFFERS MODEL" << endl;
int size = getActs().getNumRows()*getActs().getNumCols();
cout << size << endl;
size_t bytes = size*sizeof(unsigned char);
cudaMalloc((void**)&_buffer8bit, bytes);
_abs_buffer = new NVMatrix(getActs().getNumRows(), getActs().getNumCols(), false);
}
if(getActs().getNumCols() != 10)
{
getActs().abs(*_abs_buffer);
float absMax = (*_abs_buffer).max();
getActs().compress8bit(data8bit, absMax, _buffer8bit);
getActs().decompress8bit(data8bit, absMax, _buffer8bit);
}
}
*/
//cout << getActs().getNumRows() << "x" << getActs().getNumCols() << endl;
}
std::string& NeuronLayer::getNeuronType() {
return _neuronType;
}
/*
* =======================
* WeightLayer
* =======================
*
* The useGrad parameter here merely expresses a preference by the subclass. It may
* be overridden by the superclass (WeightLayer) and in that case the subclass must follow its wishes.
* So when computing gradient updates, the subclass must always first check weights.isUseGrad().
*
* Note: biases always useGrad.
*/
WeightLayer::WeightLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans, bool useGrad) :
Layer(convNetThread, paramsDict, replicaID, trans) {
_weightUpdatePassPeriod = pyDictGetInt(paramsDict, "updatePeriod");
MatrixV& hWeights = *pyDictGetMatrixV(paramsDict, "weights");
MatrixV& hWeightsInc = *pyDictGetMatrixV(paramsDict, "weightsInc");
Matrix& hBiases = *pyDictGetMatrix(paramsDict, "biases");
Matrix& hBiasesInc = *pyDictGetMatrix(paramsDict, "biasesInc");
PyObject* pyEpsWList = PyDict_GetItemString(paramsDict, "epsW");
PyObject* pyEpsB = PyDict_GetItemString(paramsDict, "epsB");
floatv& momW = *pyDictGetFloatV(paramsDict, "momW");
float momB = pyDictGetFloat(paramsDict, "momB");
floatv& wc = *pyDictGetFloatV(paramsDict, "wc");
floatv& wball = *pyDictGetFloatV(paramsDict, "wballNormed");
/*
* When there are multiple replicas, the present implementation
* requires that useGrad is true. This is because weights.update()
* performs a simultaneous write to both replicas' weightsInc matrix,
* which means that the read should come from somewhere else (i.e. a
* grads matrix).
*/
useGrad |= _numReplicas > 1;
// Source layers for shared weights
stringv& weightSourceLayers = *pyDictGetStringV(paramsDict, "weightSourceLayers");
// Weight matrix indices (inside the above source layers) for shared weights
intv& weightSourceMatrixIndices = *pyDictGetIntV(paramsDict, "weightSourceMatrixIndices");
_weights = new WeightList();
for (int i = 0; i < weightSourceLayers.size(); i++) {
std::string& srcLayerName = weightSourceLayers[i];
int matrixIdx = weightSourceMatrixIndices[i];
PyObject* pyEpsW = PyList_GetItem(pyEpsWList, i);
ParameterSchedule& lrs = ParameterSchedule::make(pyEpsW); // Learning rate schedule
if (srcLayerName == _name) { // Current layer
_weights->addWeights(*new Weights(_weights->at(matrixIdx), lrs, *this));
} else if (srcLayerName != "") {
WeightLayer& srcLayer = *static_cast<WeightLayer*>(&convNetThread->getLayer(srcLayerName));
Weights* srcWeights = &srcLayer.getWeights(matrixIdx);
_weights->addWeights(*new Weights(*srcWeights, lrs, *this));
} else {
_weights->addWeights(*new Weights(*hWeights[i], *hWeightsInc[i], lrs, *this, wc[i], wball[i], momW[i], useGrad));
}
}
_biases = new Weights(hBiases, hBiasesInc, ParameterSchedule::make(pyEpsB), *this, 0, 0, momB, true);
delete &weightSourceLayers;
delete &weightSourceMatrixIndices;
delete &hWeights;
delete &hWeightsInc;
delete &momW;
delete &wc;
delete &wball;
_wStep = 0.02;
_bStep = 0.005;
}
WeightLayer::~WeightLayer() {
delete _weights;
delete _biases;
}
bool WeightLayer::postInit() {
if (Layer::postInit()) {
_weightUpdatePassPeriod = max(_weightUpdatePassPeriod, getActivePassPeriod());
assert(_weightUpdatePassPeriod % getActivePassPeriod() == 0);
return true;
}
return false;
}
void WeightLayer::fpropCommon(PASS_TYPE passType) {
}
void WeightLayer::bpropCommon(NVMatrix& v, int replicaIdx, PASS_TYPE passType) {
if (_biases->getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropBiases(v, passType);
} else {
_biases->getGrad().resize(_biases->getW());
_biases->getGrad().scale(getBIncScale());
}
_biases->incNumUpdates();
}
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
if (v.getNumElements() > 0) {
bpropWeights(v, replicaIdx, i, passType);
} else {
_weights->at(i).getGrad().resize(_weights->at(i).getW());
// This will cause it to forget momentum when shown 0 training cases
// and _useGrad = false but it's not too important.
_weights->at(i).getGrad().scale(getIncScale(i, passType));
}
// Increment its number of updates
_weights->at(i).incNumUpdates();
}
}
}
bool WeightLayer::updateWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0)
{
_weights->update(getConvNet().getTrainingProgress());
_biases->update(getConvNet().getTrainingProgress());
// constrainWeights();
return true;
}
return false;
}
bool WeightLayer::constrainWeights() {
if (getConvNet().getTotalPassesDone() % _weightUpdatePassPeriod == 0) {
_constrainWeights();
return true;
}
return false;
}
void WeightLayer::_constrainWeights() {
}
void WeightLayer::copyToCPU() {
_weights->copyToCPU();
_biases->copyToCPU();
}
void WeightLayer::copyToGPU() {
_weights->copyToGPU();
_biases->copyToGPU();
}
void WeightLayer::checkGradient() {
for (int i = 0; i < _weights->getSize(); i++) {
getConvNet().checkGradient(_name + " weights[" + tostr(i) + "]", _wStep, _weights->at(i));
}
getConvNet().checkGradient(_name + " biases", _bStep, *_biases);
}
void WeightLayer::addReplica(Layer& l) {
Layer::addReplica(l);
_weights->addReplica(*static_cast<WeightLayer*>(&l)->_weights);
_biases->addReplica(*static_cast<WeightLayer*>(&l)->_biases);
}
Weights& WeightLayer::getWeights(int idx) {
return _weights->at(idx);
}
float WeightLayer::getGradScale(int inpIdx, PASS_TYPE passType) {
// weight update period must be multiple of activation period
// TODO: simply accumulate # of cases seen between weight updates. simpler and more accurate.
double numCases = _weightUpdatePassPeriod * (getConvNet().getMinibatchSize() / double(getConvNet().getNumPasses()));
if (_weights->at(inpIdx).isUseGrad()) {
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
return passType == PASS_GC ? 1.0f : _weights->at(inpIdx).getEps(getConvNet().getTrainingProgress()) / numCases;
}
float WeightLayer::getIncScale(int inpIdx, PASS_TYPE passType) {
if (_weights->at(inpIdx).isUseGrad()) {
return _weights->at(inpIdx).getNumUpdates() > 0;
}
return (passType == PASS_GC ? _weights->at(inpIdx).getNumUpdates() > 0
: (_weights->at(inpIdx).getNumUpdates() == 0 ? _weights->at(inpIdx).getMom() : 1.0f));
}
NVMatrix& WeightLayer::getGradTarget(int inpIdx) {
return _weights->at(inpIdx).getGrad();
}
float WeightLayer::getBGradScale(PASS_TYPE passType) {
int numCases = _weightUpdatePassPeriod * DIVUP(getConvNet().getMinibatchSize(), getConvNet().getNumPasses());
return passType == PASS_GC ? 1.0f : 1.0f / numCases;
}
float WeightLayer::getBIncScale() {
return _biases->getNumUpdates() > 0;
}
NVMatrix& WeightLayer::getWeightMatrix(PASS_TYPE passType, int inpIdx) {
return _weights->at(inpIdx).getW();
}
NVMatrix& WeightLayer::getBiasMatrix(PASS_TYPE passType) {
return _biases->getW();
}
/*
* =======================
* FCLayer
* =======================
*/
FCLayer::FCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, true, useGrad) {
_wStep = 0.01;
_bStep = 0.01;
}
void FCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().addProduct(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), scaleTargets, 1);
/*
if(!_buffer8bit)
{
cout << "INIT BUFFERS MODEL" << endl;
int size = getActs().getNumRows()*getActs().getNumCols();
cout << size << endl;
size_t bytes = size*sizeof(unsigned char);
cudaMalloc((void**)&_buffer8bit, bytes);
_abs_buffer = new NVMatrix(getActs().getNumRows(), getActs().getNumCols(), false);
}
if(getActs().getNumCols() != 1000)
{
cout << getActs().getNumRows() << "X" << getActs().getNumCols() << endl;
getActs().abs(*_abs_buffer);
float absMax = (*_abs_buffer).max();
getActs().compress8bit(data8bit, absMax, _buffer8bit);
getActs().decompress8bit(data8bit, absMax, _buffer8bit);
}
*/
//cout << getActs().getNumRows() << "x" << getActs().getNumCols() << endl;
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
}
void FCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().addProduct(v, weights_T, scaleTargets, 1);
delete &weights_T;
}
void FCLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 0, getBIncScale(), getBGradScale(passType));
}
void FCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
//cout << "enter!" << endl;
cout << "bprop: " << prevActs_T.getNumRows()<< "x" << prevActs_T.getNumCols() << endl;
/*
if(prevActs_T.getNumCols() != 10)
{
if(!_buffer8bit_bw)
{
cout << "INIT BUFFERS MODEL BW" << endl;
int size = prevActs_T.getNumRows()*prevActs_T.getNumCols();
cout << size << endl;
size_t bytes = size*sizeof(unsigned char);
cudaMalloc((void**)&_buffer8bit_bw, bytes);
_abs_buffer_bw = new NVMatrix(prevActs_T.getNumRows(), prevActs_T.getNumCols(), false);
}
prevActs_T.abs(*_abs_buffer_bw);
float absMax = (*_abs_buffer_bw).max();
cout << absMax << endl;
cout << _prev[replicaIdx][inpIdx]->getActsGrad().getNumRows()<< "x" << _prev[replicaIdx][inpIdx]->getActsGrad().getNumCols() << endl;
prevActs_T.compress8bit(data8bit, absMax, _buffer8bit_bw);
prevActs_T.decompress8bit(data8bit, absMax, _buffer8bit_bw);
//_prev[replicaIdx][inpIdx]->getActsGrad().abs(*_abs_buffer_bw);
//absMax = (*_abs_buffer_bw).max();
//cout << absMax << endl;
}
*/
//cout << "post" << endl;
float scaleGrad = getGradScale(inpIdx, passType);
//cout << scaleGrad << endl;
float scaleInc = getIncScale(inpIdx, passType);
getGradTarget(inpIdx).addProduct(prevActs_T, v, scaleInc, scaleGrad);
delete &prevActs_T;
}
void FCLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2; // Unfortunate extra weight matrix...
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm2.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* SplitFCLayer
* =======================
*/
SplitFCLayer::SplitFCLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: FCLayer(convNetThread, paramsDict, replicaID, useGrad) {
_numParts = pyDictGetInt(paramsDict, "parts");
}
void SplitFCLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_inputs[inpIdx]->getNumRows(), _numOutputs, true);
NVMatrixV& splitInput = _inputs[inpIdx]->splitCols(_numParts);
NVMatrixV& splitWeights = getWeightMatrix(passType, inpIdx).splitRows(_numParts);
NVMatrixV& splitTarget = getActs().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitInput, splitWeights, splitTarget, scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType), 1, getActs());
}
deleteElements(splitInput, true);
deleteElements(splitWeights, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& weights_T = getWeightMatrix(passType, inpIdx).getTranspose();
_prev[replicaIdx][inpIdx]->getActsGrad().resize(*_inputs[inpIdx]);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitWeights_T = weights_T.splitCols(_numParts);
NVMatrixV& splitTarget = _prev[replicaIdx][inpIdx]->getActsGrad().splitCols(_numParts);
NVMatrix::batchedMatrixMultiply(splitV, splitWeights_T, splitTarget, scaleTargets, 1);
delete &weights_T;
deleteElements(splitV, true);
deleteElements(splitWeights_T, true);
deleteElements(splitTarget, true);
}
void SplitFCLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
NVMatrix& prevActs_T = _inputs[inpIdx]->getTranspose();
NVMatrixV& splitPrevActs_T = prevActs_T.splitRows(_numParts);
NVMatrixV& splitV = v.splitCols(_numParts);
NVMatrixV& splitGradTarget = getGradTarget(inpIdx).splitRows(_numParts);
NVMatrix::batchedMatrixMultiply(splitPrevActs_T, splitV, splitGradTarget, getIncScale(inpIdx, passType), getGradScale(inpIdx, passType));
delete &prevActs_T;
deleteElements(splitPrevActs_T, true);
deleteElements(splitV, true);
deleteElements(splitGradTarget, true);
}
/*
* =======================
* TwoDLayerInterface
* =======================
*/
TwoDLayerInterface::TwoDLayerInterface(PyObject* paramsDict) {
_channels = pyDictGetInt(paramsDict, "channels");
_imgSize = pyDictGetInt(paramsDict, "imgSize");
_imgPixels = _imgSize * _imgSize;
}
/*
* =======================
* LocalLayer
* =======================
*/
LocalLayer::LocalLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool useGrad)
: WeightLayer(convNetThread, paramsDict, replicaID, false, useGrad) {
_padding = pyDictGetIntV(paramsDict, "padding");
_stride = pyDictGetIntV(paramsDict, "stride");
_filterSize = pyDictGetIntV(paramsDict, "filterSize");
_channels = pyDictGetIntV(paramsDict, "channels");
_imgSize = pyDictGetIntV(paramsDict, "imgSize");
_numFilters = pyDictGetInt(paramsDict, "filters");
_groups = pyDictGetIntV(paramsDict, "groups");
_filterChannels = pyDictGetIntV(paramsDict, "filterChannels");
_filterPixels = pyDictGetIntV(paramsDict, "filterPixels");
_imgPixels = pyDictGetIntV(paramsDict, "imgPixels");
_modulesX = pyDictGetInt(paramsDict, "modulesX");
_modules = pyDictGetInt(paramsDict, "modules");
}
LocalLayer::~LocalLayer() {
delete _padding;
delete _stride;
delete _filterSize;
delete _channels;
delete _imgSize;
delete _groups;
delete _filterChannels;
delete _filterPixels;
delete _imgPixels;
}
/*
* =======================
* ConvLayer
* =======================
*/
ConvLayer::ConvLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, true) {
_sumWidth = pyDictGetInt(paramsDict, "sumWidth");
_sharedBiases = pyDictGetInt(paramsDict, "sharedBiases");
_weightContrastNormMin = pyDictGetFloatV(paramsDict, "wcNormMin");
_weightContrastNormMax = pyDictGetFloatV(paramsDict, "wcNormMax");
}
ConvLayer::~ConvLayer() {
delete _weightContrastNormMin;
delete _weightContrastNormMax;
}
void ConvLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
if (_sharedBiases) {
getActs().reshape(_numFilters, getActs().getNumElements() / _numFilters);
getActs().addVector(getBiasMatrix(passType));
getActs().reshape(_numFilters * _modules, getActs().getNumElements() / (_numFilters * _modules));
} else {
getActs().addVector(getBiasMatrix(passType));
}
}
}
void ConvLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
float scaleBGrad = getBGradScale(passType);
float scaleInc = getBIncScale();
if (_sharedBiases) {
v.reshape(_numFilters, v.getNumElements() / _numFilters);
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
v.reshape(_numFilters * _modules, v.getNumElements() / (_numFilters * _modules));
} else {
_biases->getGrad().addSum(v, 1, scaleInc, scaleBGrad);
}
}
void ConvLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
assert(_weights->at(inpIdx).isUseGrad());
bool doPartialSum = _sumWidth < _modulesX;
NVMatrix& tgt = doPartialSum ? _weightGradTmp : _weights->at(inpIdx).getGrad();
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleTargets = getIncScale(inpIdx, passType) * !doPartialSum;
convWeightActs(*_inputs[inpIdx], v, tgt, _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), _sumWidth, scaleTargets, scaleWGrad);
if (doPartialSum) {
scaleTargets = _weights->at(inpIdx).getNumUpdates() > 0;
int outWidth = DIVUP(_modulesX, _sumWidth);
_weightGradTmp.reshape(outWidth*outWidth, _filterChannels->at(inpIdx) * _filterPixels->at(inpIdx) * _numFilters);
_weights->at(inpIdx).getGrad().addSum(_weightGradTmp, 0, scaleTargets, 1);
_weights->at(inpIdx).getGrad().reshape(_filterChannels->at(inpIdx) * _filterPixels->at(inpIdx), _numFilters);
}
}
void ConvLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(), _imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void ConvLayer::truncBwdActs() {
LocalLayer::truncBwdActs();
_weightGradTmp.truncate();
}
void ConvLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weightContrastNormMax->at(i) > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
float fz = _weights->at(i).getW().getNumRows();
NVMatrix tmp;
_weights->at(i).getW().sum(0, tmp);
_weights->at(i).getW().addVector(tmp, -1.0f / fz, _weights->at(i).getGrad());
// Now _weights->at(i).getGrad() contains zero-mean filters
_weights->at(i).getGrad().apply(NVMatrixOps::Square());
_weights->at(i).getGrad().sum(0, tmp);
tmp.apply(WeightContrastNormOperator(_weightContrastNormMin->at(i), _weightContrastNormMax->at(i), 1.0f / fz));
// Now tmp has the stdev
_weights->at(i).getW().eltwiseMultByVector(tmp);
}
// It's pretty silly to do both these things but whatever
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
// NVMatrix norm2;
_weights->at(i).getW().sumOfSquares(0, _norm2);
// norm.apply(MaxWeightConstraintOperator(_weights->at(i).getWBall()));
_norm2.apply(HardWeightConstraintOperator(_weights->at(i).getWBall()));
_weights->at(i).getW().eltwiseMultByVector(_norm2);
}
}
}
/*
* =======================
* LocalUnsharedLayer
* =======================
*/
LocalUnsharedLayer::LocalUnsharedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: LocalLayer(convNetThread, paramsDict, replicaID, false) {
}
void LocalUnsharedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
localFilterActs(*_inputs[inpIdx], getWeightMatrix(passType, inpIdx), getActs(), _imgSize->at(inpIdx), _modulesX, _modulesX, _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
if (scaleTargets == 0) {
getActs().addVector(getBiasMatrix(passType));
}
}
void LocalUnsharedLayer::bpropBiases(NVMatrix& v, PASS_TYPE passType) {
_biases->getGrad().addSum(v, 1, getBIncScale(), getBGradScale(passType));
}
void LocalUnsharedLayer::bpropWeights(NVMatrix& v, int replicaIdx, int inpIdx, PASS_TYPE passType) {
float scaleWGrad = getGradScale(inpIdx, passType);
float scaleInc = getIncScale(inpIdx, passType);
localWeightActs(*_inputs[inpIdx], v, getGradTarget(inpIdx), _imgSize->at(inpIdx), _modulesX, _modulesX, _filterSize->at(inpIdx), _padding->at(inpIdx),
_stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleInc, scaleWGrad);
}
void LocalUnsharedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
localImgActs(v, getWeightMatrix(passType, inpIdx), _prev[replicaIdx][inpIdx]->getActsGrad(),_imgSize->at(inpIdx), _imgSize->at(inpIdx), _modulesX,
_padding->at(inpIdx), _stride->at(inpIdx), _channels->at(inpIdx), _groups->at(inpIdx), scaleTargets, 1);
}
void LocalUnsharedLayer::_constrainWeights() {
for (int i = 0; i < _weights->getSize(); i++) {
if (_weights->at(i).getWBall() > 0 && _weights->at(i).isOwner() && _weights->at(i).getLearningRateSchedule().getBaseValue() > 0) {
normalizeLocalWeights(*_weights->at(i), _modules, _weights->at(i).getWBall());
}
}
}
/*
* =======================
* SoftmaxLayer
* =======================
*/
SoftmaxLayer::SoftmaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, true), _doUpperGrad(false) {
}
void SoftmaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& input = *_inputs[0];
input.max(1, _max);
input.addVector(_max, -1, getActs());
getActs().apply(NVMatrixOps::Exp());
getActs().sum(1, _sum);
getActs().eltwiseDivideByVector(_sum);
}
void SoftmaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
LayerV& prev = _prev[replicaIdx];
if (_doUpperGrad) {
// Todo: rethink replica IDs or idxes... this here doesn't make a huge amount of sense
for (int i = 0; i < _next.size(); ++i) {
if (_next[i]->isGradProducer(getName())) {
NVMatrix& labels = _next[i]->getPrev()[replicaIdx][0]->getActs(getDeviceID()); // Get cost's labels
float gradCoeff = dynamic_cast<CostLayer*>(_next[i])->getCoeff();
computeLogregSoftmaxGrad(labels, getActs(), prev[0]->getActsGrad(), scaleTargets == 1, gradCoeff);
break;
}
}
} else {
computeSoftmaxGrad(getActs(), v, prev[0]->getActsGrad(), scaleTargets, 1);
}
}
void SoftmaxLayer::setDoUpperGrad(bool b) {
_doUpperGrad = b;
}
/*
* =======================
* ConcatenationLayer
* =======================
*/
ConcatenationLayer::ConcatenationLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
_copyOffsets = pyDictGetIntV(paramsDict, "copyOffsets");
_copyOffsets->push_back(_numOutputs);
}
ConcatenationLayer::~ConcatenationLayer() {
delete _copyOffsets;
}
void ConcatenationLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().resize(_numOutputs, _inputs[inpIdx]->getNumCols());
_inputs[inpIdx]->copy(getActs(), 0, -1, 0, -1, _copyOffsets->at(inpIdx), 0);
}
void ConcatenationLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& copySrc = v.sliceRows(_copyOffsets->at(inpIdx), _copyOffsets->at(inpIdx + 1)); // view
_prev[replicaIdx][inpIdx]->getActsGrad().add(copySrc, scaleTargets, 1);
delete ©Src;
}
/*
* =======================
* PassThroughLayer
* =======================
*/
PassThroughLayer::PassThroughLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false) {
}
void PassThroughLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// No-op
}
void PassThroughLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
// No-op
}
bool PassThroughLayer::postInit() {
if (Layer::postInit()) {
assert(getNumInputReplicas() == 1);
for (int i = 0, offset = 0; i < _prev[0].size(); offset += _prev[0][i]->getNumOutputs(), i++) {
MemoryView& vActs = _memSrcActs[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
MemoryView& vActsGrad = _memSrcActsGrad[getDeviceID()]->getMemorySource().addUser(_prev[0][i]->getName(), pair<int,int>(offset, offset + _prev[0][i]->getNumOutputs()));
_prev[0][i]->setMemorySourceActs(getDeviceID(), vActs);
_prev[0][i]->setMemorySourceActsGrad(getDeviceID(), vActsGrad);
}
return true;
}
return false;
}
/*
* =======================
* EltwiseSumLayer
* =======================
*/
EltwiseSumLayer::EltwiseSumLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_coeffs = pyDictGetFloatV(paramsDict, "coeffs");
}
EltwiseSumLayer::~EltwiseSumLayer() {
delete _coeffs;
}
void EltwiseSumLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
getActs().add(*_inputs[inpIdx], scaleTargets, _coeffs->at(inpIdx));
}
void EltwiseSumLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(v, scaleTargets, _coeffs->at(inpIdx));
}
/*
* =======================
* EltwiseMaxLayer
* =======================
*/
EltwiseMaxLayer::EltwiseMaxLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void EltwiseMaxLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (inpIdx == 1) { // First input, do nothing
_inputs[inpIdx]->applyBinary(NVMatrixAggs::Max(), *_inputs[0], getActs());
} else if (inpIdx > 1) {
getActs().applyBinary(NVMatrixAggs::Max(), *_inputs[inpIdx]);
}
}
void EltwiseMaxLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
computeEltwiseMaxGrad(v, *_inputs[inpIdx], getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), scaleTargets != 0);
}
/*
* =======================
* DropoutLayer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
DropoutLayer::DropoutLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_enable = pyDictGetInt(paramsDict, "enable");
_keep = pyDictGetFloat(paramsDict, "keep");
}
void DropoutLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.apply(DropoutSmallerThanOperator(_keep));
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->copy(getActs());
}
}
void DropoutLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
prev[inpIdx]->getActsGrad().add(v, scaleTargets, 1);
}
}
void DropoutLayer::truncBwdActs() {
Layer::truncBwdActs();
_keepMask.truncate();
}
/*
* =======================
* Dropout2Layer
* =======================
*
* TODO: optimize away the case when using dopout over relus. Don't need the keepmask.
*/
Dropout2Layer::Dropout2Layer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : DropoutLayer(convNetThread, paramsDict, replicaID) {
}
void Dropout2Layer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_enable && passType == PASS_TRAIN) {
_keepMask.resize(*_inputs[inpIdx]);
_keepMask.randomizeUniform();
_keepMask.smallerThanScalar(_keep);
_inputs[inpIdx]->eltwiseMult(_keepMask, getActs());
} else {
_inputs[inpIdx]->scale(_keep, getActs());
}
}
void Dropout2Layer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
LayerV& prev = _prev[replicaIdx];
if (_enable && passType == PASS_TRAIN) {
if (scaleTargets != 0) {
v.applyTernary(AddGradientBinaryOperator<NVMatrixBinaryOps::Multiply>(NVMatrixBinaryOps::Multiply()),
_keepMask, prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.eltwiseMult(_keepMask, prev[inpIdx]->getActsGrad());
}
} else {
if (scaleTargets != 0) {
v.applyBinary(AddGradientOperator<NVMatrixOps::MultByScalar>(NVMatrixOps::MultByScalar(_keep)),
prev[inpIdx]->getActsGrad(), prev[inpIdx]->getActsGrad());
} else {
v.scale(_keep, prev[inpIdx]->getActsGrad());
}
}
}
/*
* =======================
* DataLayer
* =======================
*/
DataLayer::DataLayer(ConvNet* convNet, PyObject* paramsDict, int replicaID) : Layer(NULL, paramsDict, replicaID, false) {
_dataIdx = pyDictGetInt(paramsDict, "dataIdx");
_start = pyDictGetInt(paramsDict, "start");
_end = pyDictGetInt(paramsDict, "end");
_useBuffer = false;
_outstandingCopyRequest = false;
_convNet = convNet;
}
DataLayer::~DataLayer() {
for (map<int,cudaStream_t>::const_iterator it = _copyStreams.begin(); it != _copyStreams.end(); ++it) {
checkCudaErrors(cudaStreamDestroy(it->second));
}
for (std::map<int, MemoryView*>::iterator it = _memSrcActs2.begin(); it != _memSrcActs2.end(); ++it) {
if (it->second->getMemorySource().truncate(_name)) {
delete &it->second->getMemorySource();
}
}
_copier->stop();
delete _copier;
}
void DataLayer::fprop(PASS_TYPE passType, int passIdx, bool fromBuffer) {
waitForCopyFinish();
if (fromBuffer && getFwdActiveInputReplicaIdx(passIdx) >= 0) {
_useBuffer = !_useBuffer;
}
for (int i = 0; i < _next.size(); i++) {
_next[i]->getConvNetThread().getMessageQueue().enqueue(new FpropMessage(*_next[i], passType, passIdx));
}
}
void DataLayer::waitForCopyFinish() {
if (_outstandingCopyRequest) {
_copyFinishQueue.dequeue();
assert(_copyFinishQueue.getNumElements() == 0);
_outstandingCopyRequest = false;
}
}
cudaStream_t DataLayer::getCopyStream(int deviceID) {
if (_copyStreams.count(deviceID) == 0) {
NVMatrix::setDeviceID(deviceID);
checkCudaErrors(cudaStreamCreateWithFlags(&_copyStreams[deviceID], cudaStreamNonBlocking));
}
return _copyStreams[deviceID];
}
void DataLayer::copyData(CPUData& data, bool other, int passIdx) {
assert(!_outstandingCopyRequest);
assert(_copyFinishQueue.getNumElements() == 0);
_copier->getQueue().enqueue(new DataCopyMessage(data, other, passIdx));
_outstandingCopyRequest = true;
}
int DataLayer::getNumInputReplicas() {
return _convNet->getNumReplicasMax() / getNumReplicas();
}
void DataLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
}
NVMatrix& DataLayer::getActs(int deviceID) {
return getActs(deviceID, false, -1);
}
NVMatrix& DataLayer::getActs(int deviceID, bool other, int numCases) {
// printf("%s[%d] getActs(%d, %d, %d)\n", _name.c_str(), getReplicaID(), deviceID, other, numCases);
assert(_memSrcActs.count(deviceID) > 0);
assert(_memSrcActs2.count(deviceID) > 0);
return (_useBuffer != other ? _memSrcActs2[deviceID]->getMemory(numCases) : _memSrcActs[deviceID]->getMemory(numCases));
}
ConvNet& DataLayer::getConvNet() {
return *_convNet;
}
bool DataLayer::postInit() {
if (Layer::postInit()) {
for (int i = 0; i < _next.size(); ++i) {
int d = _next[i]->getDeviceID();
if (_memSrcActs2.count(d) == 0) {
_memSrcActs2[d] = &MemorySource::make(_numOutputs, d, getName());
}
}
intv cpus = getDeviceCPUs(_next[0]->getDeviceID());
_copier = new DataCopyThread(*this, cpus);
_copier->start();
return true;
}
return false;
}
bool DataLayer::isGradProducer() {
return false;
}
/*
* =======================
* DataCopyThread
* =======================
*/
DataCopyThread::DataCopyThread(DataLayer& parent, intv& cpus) : _parent(&parent), _sleepUsec(0), Thread(true, cpus) {
}
Queue<DataCopyMessage*>& DataCopyThread::getQueue() {
return _queue;
}
void DataCopyThread::stop() {
getQueue().enqueue(new DataCopyExitMessage());
join();
}
void* DataCopyThread::run() {
NVMatrix::setDeviceID(*_parent->getNextDeviceIDs().begin());
bool exit = false;
while(!exit) {
DataCopyMessage& msg = *_queue.dequeue();
exit = msg.getType() == DataCopyMessage::EXIT;
if (!exit) {
CPUData& data = msg.getData();
int passIdx = msg.getPassIdx();
bool other = msg.isOther();
Matrix& dataMatrix = data.getData(_parent->getDataIdx());
// How many times is this layer going to process microbatches from this minibatch?
assert(_parent->getNumReplicasNext() == _parent->getNumReplicas());
int microIdx = _parent->getFwdActiveInputReplicaIdx(passIdx);
if (microIdx >= 0) {
if (_requestTimer.isStarted()) {
double requestIntervalMsec = _requestTimer.stop();
// Sleep for up to 1/20th the average request interval
_sleepUsec = int(round(0.95 * _sleepUsec + 0.05 * (_parent->getReplicaID() / double(_parent->getNumReplicas())) * requestIntervalMsec * 1000.0 / 20.0));
}
_requestTimer.start();
if (other) {
// Sleeping a bit is helpful because in typical nets, copying input data
// as soon as it's available will produce contention with other communications
// that are happening at the time. This is very much a hack, so in the future
// it might be good to replace it with something smarter which schedules access
// to communication links.
usleep(_sleepUsec);
}
microIdx += _parent->getReplicaID() * _parent->getNumInputReplicas();
// Safer to divup because this way you won't get a minibatch size of 0
int microbatchSize = DIVUP(data.getNumCases(), _parent->getConvNet().getNumReplicasMax());
int microStart = microIdx * microbatchSize;
int microEnd = min(data.getNumCases(), (microIdx + 1) * microbatchSize);
// Check that this replica has some data. This can be false when, for example,
// there are only 7 examples in the minibatch but 8 replicas.
if (microStart < microEnd) {
assert(dataMatrix.isView() == dataMatrix.isTrans());
int pipe = _parent->getConvNet().getDataCopyPD().getPipe(_parent->getReplicaID()/2);
if (dataMatrix.isTrans()) {
Matrix& replicaDataMatrix = dataMatrix.sliceCols(microStart, microEnd);
// In this case, dataMatrix is a view on memory allocated by Python.
//_hostMemFwd.copyFromHost(replicaDataMatrix, true);
_hostMemFwd.resize(replicaDataMatrix.getNumRows(), replicaDataMatrix.getNumCols(), true);
memcpy(_hostMemFwd.getDevData(), replicaDataMatrix.getData(), replicaDataMatrix.getNumDataBytes());
delete &replicaDataMatrix; // view
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
// Note to self: this is the path that gets executed in practice
// in my models. It does a transpose & copy simultaneously.
hostMemFwdSlice.flipTrans(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
} else {
// Hacky way to copy a slice to _hostMemFwd
_hostMemFwd.resize(dataMatrix.getNumRows(), microEnd - microStart);
Matrix tmp(_hostMemFwd.getDevData(), _hostMemFwd.getNumRows(), _hostMemFwd.getNumCols(), _hostMemFwd.isTrans());
dataMatrix.sliceCols(microStart, microEnd, tmp);
NVMatrix& hostMemFwdSlice = _hostMemFwd.sliceRows(_parent->getStart(), _parent->getEnd());
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
// Copy my output to this guy's GPU
NVMatrix::setDeviceID(deviceID);
hostMemFwdSlice.copy(_parent->getActs(deviceID, other, microEnd - microStart), _parent->getCopyStream(deviceID));
}
delete &hostMemFwdSlice;
}
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
NVMatrix::setDeviceID(deviceID);
NVMatrix::syncStream(_parent->getCopyStream(deviceID));
}
_parent->getConvNet().getDataCopyPD().freePipe(pipe);
} else {
for (intv::iterator it = _parent->getNextDeviceIDs().begin(); it != _parent->getNextDeviceIDs().end(); ++it) {
int deviceID = *it;
_parent->getActs(deviceID, other, 0);
}
}
}
_parent->getCopyFinishQueue().enqueue(1);
}
delete &msg;
}
return NULL;
}
/*
* =====================
* PoolLayer
* =====================
*/
PoolLayer::PoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_sizeX = pyDictGetInt(paramsDict, "sizeX");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
_pool = pyDictGetString(paramsDict, "pool");
}
PoolLayer& PoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, false);
} else if(_pool == "maxabs") {
return *new MaxPoolLayer(convNetThread, paramsDict, replicaID, true);
} else if(_pool == "avg") {
return *new AvgPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* AvgPoolLayer
* =====================
*/
AvgPoolLayer::AvgPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : PoolLayer(convNetThread, paramsDict, replicaID, false) {
_sum = pyDictGetInt(paramsDict, "sum");
}
void AvgPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_sum) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<true>());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, AvgPooler<false>());
}
}
void AvgPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convLocalAvgUndo(v, _prev[replicaIdx][0]->getActsGrad(), _sizeX, _start, _stride, _outputsX, _imgSize, _sum, scaleTargets, 1);
}
/*
* =====================
* MaxPoolLayer
* =====================
*/
MaxPoolLayer::MaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool abs) : PoolLayer(convNetThread, paramsDict, replicaID, false), _abs(abs) {
}
void MaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (_abs) {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxAbsPooler());
} else {
convLocalPool(*_inputs[0], getActs(), _channels, _sizeX, _start, _stride, _outputsX, MaxPooler());
}
}
void MaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convLocalMaxUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _sizeX, _start, _stride, _outputsX, scaleTargets, 1);
}
/*
* =====================
* CrossMapPoolLayer
* =====================
*/
CrossMapPoolLayer::CrossMapPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputs = pyDictGetInt(paramsDict, "outputChannels");
_pool = pyDictGetString(paramsDict, "pool");
}
CrossMapPoolLayer& CrossMapPoolLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) {
std::string _pool = pyDictGetString(paramsDict, "pool");
if (_pool == "max") {
return *new CrossMapMaxPoolLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown pooling layer type ") + _pool;
}
/*
* =====================
* CrossMapMaxPoolLayer
* =====================
*/
CrossMapMaxPoolLayer::CrossMapMaxPoolLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CrossMapPoolLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossMapMaxPoolLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convPoolCrossMap(*_inputs[0], getActs(), _start, _size, _outputs, _stride, _imgSize, MaxPooler());
}
void CrossMapMaxPoolLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 0);
convCrossMapMaxPoolUndo(*_inputs[0], v, getActs(), _prev[replicaIdx][0]->getActsGrad(), _imgSize, _start, _size, _stride, scaleTargets, 1);
}
/*
* =====================
* RandomScaleLayer
* =====================
*/
RandomScaleLayer::RandomScaleLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_maxScale = pyDictGetFloat(paramsDict, "maxScale");
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
// The smallest size the image could be after rescaling
_minScaledSize = _imgSize / _maxScale;
// The number of discrete scales we're considering
int numScales = _imgSize - _minScaledSize + 1;
// The total number of squares of size _tgtSize that we can extract
// from all these scales
double numCrops = numScales * (numScales + 1) * (2 * numScales + 1) / 6;
// For each scale, record the fraction of the squares that it has.
// This will be the probability of sampling this scale.
_scaleProbs.push_back(1.0 / numCrops);
for (int s = 1; s < numScales; ++s) {
_scaleProbs.push_back(_scaleProbs[s-1] + (s + 1) * (s + 1) / numCrops);
}
}
void RandomScaleLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
if (IS_TRAIN(passType)) {
// _maxScale is in the range [1, 2)
float r = randf;
int rescaledSize = _tgtSize;
float scaleFactor = _maxScale;
// Find which scale we have sampled
for (int s = 0; s < _scaleProbs.size(); ++s) {
if (r <= _scaleProbs[s]) {
rescaledSize += s;
float scaleFactorEnd = _imgSize / float(rescaledSize);
float scaleFactorStart = max(1.0, _imgSize / (1.0 + rescaledSize));
scaleFactor = scaleFactorStart + randf * (scaleFactorEnd - scaleFactorStart);
break;
}
}
assert(rescaledSize >= _tgtSize);
int maxStart = rescaledSize - _tgtSize;
int startY = rand() % (1 + maxStart), startX = rand() % (1 + maxStart);
if (rescaledSize == _imgSize) {
convCrop(*_inputs[0], getActs(), rescaledSize, _tgtSize, startY, startX);
} else {
convResizeBilinear(*_inputs[0], _rescaledActs, _imgSize, rescaledSize, scaleFactor);
convCrop(_rescaledActs, getActs(), rescaledSize, _tgtSize, startY, startX);
}
_rescaledActs.truncate(); // this'll have a different size each time so may as well truncate it.
} else if (IS_MULTIVIEW_TEST(passType)) { // for now...
_inputs[0]->copy(getActs());
} else if (IS_TEST(passType)) { // Test on center patch
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _maxScale);
}
}
void RandomScaleLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* CropLayer
* =====================
*/
CropLayer::CropLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_startX = pyDictGetInt(paramsDict, "startX");
_startY = pyDictGetInt(paramsDict, "startY");
_tgtSize = pyDictGetInt(paramsDict, "sizeX");
}
void CropLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convCrop(*_inputs[0], getActs(), _imgSize, _tgtSize, _startY, _startX);
}
void CropLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* NailbedLayer
* =====================
*/
NailbedLayer::NailbedLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_start = pyDictGetInt(paramsDict, "start");
_stride = pyDictGetInt(paramsDict, "stride");
_outputsX = pyDictGetInt(paramsDict, "outputsX");
}
void NailbedLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convBedOfNails(*_inputs[0], getActs(), _channels, _imgSize, _start, _stride, 0, 1);
}
void NailbedLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convBedOfNailsUndo(v, _prev[replicaIdx][0]->getActsGrad(), _channels, _imgSize, _start, _stride, scaleTargets, 1);
}
/*
* =====================
* GaussianBlurLayer
* =====================
*/
GaussianBlurLayer::GaussianBlurLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_hFilter = pyDictGetMatrix(paramsDict, "filter");
}
GaussianBlurLayer::~GaussianBlurLayer() {
delete _hFilter;
}
void GaussianBlurLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convGaussianBlur(*_inputs[0], _filter, getActs(), true, _channels, 0, 1);
convGaussianBlur(getActs(), _filter, getActs(), false, _channels, 0, 1);
}
void GaussianBlurLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
NVMatrix& tgt = _prev[replicaIdx][0]->getNumComputedActsGrads(getDeviceID()) > 0 ? _actGradsTmp : _prev[replicaIdx][0]->getActsGrad();
convGaussianBlur(v, _filter, tgt, true, _channels, 0, 1);
convGaussianBlur(tgt, _filter, _prev[replicaIdx][0]->getActsGrad(), false, _channels, scaleTargets, 1);
}
void GaussianBlurLayer::copyToGPU() {
_filter.copyFromHost(*_hFilter, true);
}
/*
* =====================
* HorizontalReflectionLayer
* =====================
*/
HorizontalReflectionLayer::HorizontalReflectionLayer(ConvNetThread* convNet, PyObject* paramsDict, int replicaID) : Layer(convNet, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
assert(_channels >= 1 && _channels <= 3);
}
void HorizontalReflectionLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convReflectHorizontal(*_inputs[0], getActs(), _imgSize);
}
void HorizontalReflectionLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convReflectHorizontal(v, _prev[replicaIdx][0]->getActsGrad(), _imgSize);
}
/*
* =====================
* ResizeLayer
* =====================
*/
ResizeLayer::ResizeLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_tgtSize = pyDictGetInt(paramsDict, "tgtSize");
_scale = pyDictGetFloat(paramsDict, "scale");
}
void ResizeLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResizeBilinear(*_inputs[0], getActs(), _imgSize, _tgtSize, _scale);
}
// Can't do this
void ResizeLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToYUVLayer
* =====================
*/
RGBToYUVLayer::RGBToYUVLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
}
void RGBToYUVLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToYUV(*_inputs[0], getActs());
}
// Can't do this
void RGBToYUVLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* RGBToLABLayer
* =====================
*/
RGBToLABLayer::RGBToLABLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : Layer(convNetThread, paramsDict, replicaID, false) {
_center = pyDictGetInt(paramsDict, "center");
}
void RGBToLABLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convRGBToLAB(*_inputs[0], getActs(), _center);
}
// Can't do this
void RGBToLABLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(false);
}
/*
* =====================
* ResponseNormLayer
* =====================
*/
ResponseNormLayer::ResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: Layer(convNetThread, paramsDict, replicaID, false), TwoDLayerInterface(paramsDict) {
_size = pyDictGetInt(paramsDict, "size");
_scale = pyDictGetFloat(paramsDict, "scale");
_pow = pyDictGetFloat(paramsDict, "pow");
_minDiv = pyDictGetFloat(paramsDict, "minDiv");
}
void ResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
convResponseNorm(*_inputs[0], _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ResponseNormLayer::truncBwdActs() {
Layer::truncBwdActs();
_denoms.truncate();
}
/*
* =====================
* CrossMapResponseNormLayer
* =====================
*/
CrossMapResponseNormLayer::CrossMapResponseNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: ResponseNormLayer(convNetThread, paramsDict, replicaID) {
_blocked = pyDictGetInt(paramsDict, "blocked");
}
void CrossMapResponseNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
assert(inpIdx == 0);
convResponseNormCrossMap(*_inputs[0], getActs(), _channels, _size, _scale, _pow, _minDiv, _blocked);
}
void CrossMapResponseNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convResponseNormCrossMapUndo(v, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, _minDiv, _blocked, scaleTargets, 1);
}
/*
* =====================
* ContrastNormLayer
* =====================
*/
ContrastNormLayer::ContrastNormLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : ResponseNormLayer(convNetThread, paramsDict, replicaID) {
}
void ContrastNormLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
NVMatrix& images = *_inputs[0];
convLocalPool(images, _meanDiffs, _channels, _size, -_size/2, 1, _imgSize, AvgPooler<false>());
_meanDiffs.add(images, -1, 1);
convContrastNorm(images, _meanDiffs, _denoms, getActs(), _channels, _size, _scale, _pow, _minDiv);
}
void ContrastNormLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1);
}
void ContrastNormLayer::truncBwdActs() {
ResponseNormLayer::truncBwdActs();
_meanDiffs.truncate();
}
/*
* =====================
* CostLayer
* =====================
*/
CostLayer::CostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID, bool trans)
: Layer(convNetThread, paramsDict, replicaID, trans) {
_coeff = pyDictGetFloat(paramsDict, "coeff");
_numCases = 0;
_aggregated = pyDictGetInt(paramsDict, "aggregated") != 0;
}
float CostLayer::getCoeff() {
return _coeff;
}
void CostLayer::bprop(NVMatrix& v, PASS_TYPE passType, int passIdx) {
if (_coeff != 0) {
Layer::bprop(v, passType, passIdx);
}
}
bool CostLayer::fprop(PASS_TYPE passType, int passIdx) {
if (Layer::fprop(passType, passIdx)) {
syncStream();
getConvNet().getMessageQueue().enqueue(new Message(FPROP_TERMINAL));
return true;
}
return false;
}
void CostLayer::fpropCommon(PASS_TYPE passType) {
_numCases = Layer::getNumCases(*_inputs[0]);
}
int CostLayer::getNumCases() {
return _numCases;
}
bool CostLayer::isGradProducer() {
return _coeff != 0;
}
doublev& CostLayer::getCost() {
return *new doublev(_costv);
}
// This is called between microbatches
void CostLayer::resetPassIdx() {
Layer::resetPassIdx();
_costv.clear();
}
CostLayer& CostLayer::make(ConvNetThread* convNetThread, PyObject* paramsDict, std::string& type, int replicaID) {
if (type == "cost.crossent") {
return *new CrossEntCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.bce") {
return *new BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.dce") {
return *new DetectionCrossEntropyCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.logreg") {
return *new LogregCostLayer(convNetThread, paramsDict, replicaID);
} else if (type == "cost.sum2") {
return *new SumOfSquaresCostLayer(convNetThread, paramsDict, replicaID);
}
throw std::string("Unknown cost layer type ") + type;
}
/*
* =====================
* CrossEntCostLayer
* =====================
*/
CrossEntCostLayer::CrossEntCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void CrossEntCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
computeCrossEntCost(labels, probs, _trueLabelLogProbs, _correctProbs);
_costv.clear();
_costv.push_back(-_trueLabelLogProbs.sum());
_costv.push_back(numCases - _correctProbs.sum());
}
}
void CrossEntCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax" || prev[1]->getDeviceID() != getDeviceID();
if (doWork) {
computeCrossEntGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
/*
* =====================
* BinomialCrossEntropyCostLayer
* =====================
*/
BinomialCrossEntropyCostLayer::BinomialCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_computeSoftmaxErrorRate = pyDictGetInt(paramsDict, "computeSoftmaxErrorRate");
_posWeight = pyDictGetFloat(paramsDict, "posWeight");
}
void BinomialCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
labels.applyBinary(BinomialCrossEntOperator(_posWeight), probs, _tmpProbs);
_costv.clear();
// Cross-entropy cost
_costv.push_back(-_tmpProbs.sum(_tmpbuf));// / labels.getFollowingDim());
// If aggregated, we don't produce these outputs because they're not additive.
// They have no meaning if this is just a partial cost.
if (!_aggregated) {
// "Correct" classifications. To compute these we threshold probs
// and just count the number of entries that agree with labels.
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.equals(labels);
_costv.push_back((_tmpProbs.getNumElements() - _tmpProbs.sum(_tmpbuf)) / double(labels.getFollowingDim()));
if (_computeSoftmaxErrorRate) {
// Also compute top-1 error as if this is softmax and there's only one correct class
probs.max(0, _tmpVec);
assert(_tmpVec.getNumElements() == numCases); // Make sure we did max on correct axis
probs.equalsVector(_tmpVec, _correctProbs);
_correctProbs.sum(0, _tmpVec); // Divide by the # of labels that we predict as being present
float m = _tmpVec.max();
_correctProbs.eltwiseDivideByVector(_tmpVec);
_correctProbs.eltwiseMult(labels);
_costv.push_back(numCases - _correctProbs.sum(_tmpbuf));
}
}
}
}
void BinomialCrossEntropyCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
assert(inpIdx == 1);
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a logistic neuron layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1
|| prev[1]->getType() != "neuron"
|| static_cast<NeuronLayer*>(prev[1])->getNeuronType() != "logistic"
|| prev[1]->getDeviceID() != getDeviceID()
|| prev[1]->getNumReplicas() != getNumReplicas();
if (doWork) {
printf("Computing cross-entropy gradient the stupid way\n");
if (scaleTargets == 0) {
labels.applyBinary(BinomialCrossEntGradientOperator(_coeff, _posWeight), probs, target);
} else {
labels.applyTernary(AddGradientBinaryOperator<BinomialCrossEntGradientOperator>(BinomialCrossEntGradientOperator(_coeff, _posWeight)), probs, target, target);
}
}
}
float BinomialCrossEntropyCostLayer::getPosWeight() {
return _posWeight;
}
/*
* =====================
* DetectionCrossEntropyCostLayer
* =====================
*/
DetectionCrossEntropyCostLayer::DetectionCrossEntropyCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID)
: BinomialCrossEntropyCostLayer(convNetThread, paramsDict, replicaID) {
assert(!_aggregated);
}
void DetectionCrossEntropyCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
BinomialCrossEntropyCostLayer::fpropActs(inpIdx, scaleTargets, passType, passIdx);
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
int numCases = labels.getLeadingDim();
/*
* Add information sufficient to compute precision and recall for each class.
*/
// NOTE: _tmpProbs contains ((probs > 0.5) == labels)
labels.sum(1, _numPositive); // sum(labels, 1)
_tmpProbs.eltwiseMult(labels); // labels * ((probs > 0.5) == labels)
_tmpProbs.sum(1, _numTruePositive);
probs.biggerThanScalar(0.5, _tmpProbs);
_tmpProbs.sum(1, _numDeclaredPositive);
_numDeclaredPositive.copyToHost(_hNumDeclaredPositive, true);
_numPositive.copyToHost(_hNumPositive, true);
_numTruePositive.copyToHost(_hNumTruePositive, true);
for (int i = 0; i < labels.getFollowingDim(); ++i) {
_costv.push_back(_hNumDeclaredPositive(i, 0)); // 2
_costv.push_back(_hNumPositive(i, 0)); // 3
_costv.push_back(_hNumTruePositive(i, 0)); // 4
}
}
}
/*
* =====================
* LogregCostLayer
* =====================
*/
LogregCostLayer::LogregCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
_topk = pyDictGetInt(paramsDict, "topk");
// _numAccumed = 0;
}
void LogregCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
// This layer uses its two inputs together
if (inpIdx == 0) {
NVMatrix& labels = *_inputs[0];
NVMatrix* probs = _inputs[1];
_doCompute = !IS_MULTIVIEW_TEST(passType);
if (!_doCompute) {
if (IS_MULTIVIEW_TEST_START(passType)) {
if (_probsAccum.count(passIdx) == 0) {
_probsAccum[passIdx] = new NVMatrix(*probs);
}
probs->copy(*_probsAccum[passIdx]);
_numAccumed[passIdx] = 1;
} else {
_probsAccum[passIdx]->add(*probs);
_numAccumed[passIdx] += 1;
}
if (IS_MULTIVIEW_TEST_END(passType)) {
probs = _probsAccum[passIdx];
probs->scale(1.0 / _numAccumed[passIdx]);
_doCompute = true;
}
}
if (_doCompute) {
int numCases = labels.getNumElements();
probs->max(0,_maxProbs);
if (_topk == 1) {
computeLogregCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs);
} else {
computeMultiSoftmaxCost(labels, *probs, _maxProbs, _trueLabelLogProbs, _correctProbs, _topkProbs, _topk);
}
_costv.clear();
double top1 = _correctProbs.sum(_tmpbuf);
_costv.push_back(-_trueLabelLogProbs.sum(_tmpbuf));
_costv.push_back(numCases - top1);
_costv.push_back(numCases - (_topk == 1 ? top1 : _topkProbs.sum(_tmpbuf)));
}
}
}
NVMatrix& LogregCostLayer::getProbsAccum(int replicaIdx) {
return *_probsAccum[replicaIdx];
}
void LogregCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
if (inpIdx == 1) {
LayerV& prev = _prev[replicaIdx];
NVMatrix& labels = *_inputs[0];
NVMatrix& probs = *_inputs[1];
NVMatrix& target = prev[1]->getActsGrad();
// Numerical stability optimization: if the layer below me is a softmax layer, let it handle
// the entire gradient computation to avoid multiplying and dividing by a near-zero quantity.
bool doWork = prev[1]->getNext().size() > 1 || prev[1]->getType() != "softmax"
|| prev[1]->getDeviceID() != getDeviceID() || prev[1]->getNumReplicas() != getNumReplicas();
if (prev[1]->getType() == "softmax") {
static_cast<SoftmaxLayer*>(prev[1])->setDoUpperGrad(!doWork);
}
if (doWork) {
computeLogregGrad(labels, probs, target, scaleTargets == 1, _coeff);
}
}
}
/*
* =====================
* SumOfSquaresCostLayer
* =====================
*/
SumOfSquaresCostLayer::SumOfSquaresCostLayer(ConvNetThread* convNetThread, PyObject* paramsDict, int replicaID) : CostLayer(convNetThread, paramsDict, replicaID, false) {
}
void SumOfSquaresCostLayer::fpropActs(int inpIdx, float scaleTargets, PASS_TYPE passType, int passIdx) {
_inputs[0]->apply(NVMatrixOps::Square(), _tmp);
_costv.clear();
_costv.push_back(_tmp.sum());
}
void SumOfSquaresCostLayer::bpropActs(NVMatrix& v, int replicaIdx, int inpIdx, float scaleTargets, PASS_TYPE passType) {
_prev[replicaIdx][inpIdx]->getActsGrad().add(*_inputs[0], scaleTargets, -2 * _coeff);
}
|
1ac3b245d3e36126687adff7817cc1dcc023e08c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "srad.h"
#include <stdio.h>
__global__ void
srad_cuda_1(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float q0sqr
) {
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float n, w, e, s, jc, g2, l, num, den, qsqr, c;
//shared memory allocation
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float north[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float south[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float west[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
north[ty][tx] = J_cuda[index_n];
south[ty][tx] = J_cuda[index_s];
if ( by == 0 ){
north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx];
}
else if ( by == gridDim.y - 1 ){
south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
west[ty][tx] = J_cuda[index_w];
east[ty][tx] = J_cuda[index_e];
if ( bx == 0 ){
west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty];
}
else if ( bx == gridDim.x - 1 ){
east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
temp[ty][tx] = J_cuda[index];
__syncthreads();
jc = temp[ty][tx];
if ( ty == 0 && tx == 0 ){ //nw
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 ){ //n
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == BLOCK_SIZE -1 ){ //e
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1){ //s
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == 0 ){ //w
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else{ //the data elements which are not on the borders
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc);
l = ( n + s + w + e ) / jc;
num = (0.5*g2) - ((1.0/16.0)*(l*l)) ;
den = 1 + (.25*l);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c < 0){temp_result[ty][tx] = 0;}
else if (c > 1) {temp_result[ty][tx] = 1;}
else {temp_result[ty][tx] = c;}
__syncthreads();
C_cuda[index] = temp_result[ty][tx];
E_C[index] = e;
W_C[index] = w;
S_C[index] = s;
N_C[index] = n;
}
__global__ void
srad_cuda_2(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float lambda,
float q0sqr
)
{
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float cc, cn, cs, ce, cw, d_sum;
//shared memory allocation
__shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
temp[ty][tx] = J_cuda[index];
__syncthreads();
south_c[ty][tx] = C_cuda[index_s];
if ( by == gridDim.y - 1 ){
south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
east_c[ty][tx] = C_cuda[index_e];
if ( bx == gridDim.x - 1 ){
east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
c_cuda_temp[ty][tx] = C_cuda[index];
__syncthreads();
cc = c_cuda_temp[ty][tx];
if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( tx == BLOCK_SIZE -1 ){ //e
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( ty == BLOCK_SIZE -1){ //s
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
else{ //the data elements which are not on the borders
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
// divergence (equ 58)
d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index];
// image update (equ 61)
c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum;
__syncthreads();
J_cuda[index] = c_cuda_result[ty][tx];
}
| 1ac3b245d3e36126687adff7817cc1dcc023e08c.cu | #include "srad.h"
#include <stdio.h>
__global__ void
srad_cuda_1(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float q0sqr
) {
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_n = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + tx - cols;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_w = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty - 1;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float n, w, e, s, jc, g2, l, num, den, qsqr, c;
//shared memory allocation
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float north[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float south[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float west[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
north[ty][tx] = J_cuda[index_n];
south[ty][tx] = J_cuda[index_s];
if ( by == 0 ){
north[ty][tx] = J_cuda[BLOCK_SIZE * bx + tx];
}
else if ( by == gridDim.y - 1 ){
south[ty][tx] = J_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
west[ty][tx] = J_cuda[index_w];
east[ty][tx] = J_cuda[index_e];
if ( bx == 0 ){
west[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + cols * ty];
}
else if ( bx == gridDim.x - 1 ){
east[ty][tx] = J_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
temp[ty][tx] = J_cuda[index];
__syncthreads();
jc = temp[ty][tx];
if ( ty == 0 && tx == 0 ){ //nw
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 && tx == BLOCK_SIZE-1 ){ //ne
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1 && tx == 0 ){//sw
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( ty == 0 ){ //n
n = north[ty][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == BLOCK_SIZE -1 ){ //e
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = east[ty][tx] - jc;
}
else if ( ty == BLOCK_SIZE -1){ //s
n = temp[ty-1][tx] - jc;
s = south[ty][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
else if ( tx == 0 ){ //w
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = west[ty][tx] - jc;
e = temp[ty][tx+1] - jc;
}
else{ //the data elements which are not on the borders
n = temp[ty-1][tx] - jc;
s = temp[ty+1][tx] - jc;
w = temp[ty][tx-1] - jc;
e = temp[ty][tx+1] - jc;
}
g2 = ( n * n + s * s + w * w + e * e ) / (jc * jc);
l = ( n + s + w + e ) / jc;
num = (0.5*g2) - ((1.0/16.0)*(l*l)) ;
den = 1 + (.25*l);
qsqr = num/(den*den);
// diffusion coefficent (equ 33)
den = (qsqr-q0sqr) / (q0sqr * (1+q0sqr)) ;
c = 1.0 / (1.0+den) ;
// saturate diffusion coefficent
if (c < 0){temp_result[ty][tx] = 0;}
else if (c > 1) {temp_result[ty][tx] = 1;}
else {temp_result[ty][tx] = c;}
__syncthreads();
C_cuda[index] = temp_result[ty][tx];
E_C[index] = e;
W_C[index] = w;
S_C[index] = s;
N_C[index] = n;
}
__global__ void
srad_cuda_2(
float *E_C,
float *W_C,
float *N_C,
float *S_C,
float * J_cuda,
float * C_cuda,
int cols,
int rows,
float lambda,
float q0sqr
)
{
//block id
int bx = blockIdx.x;
int by = blockIdx.y;
//thread id
int tx = threadIdx.x;
int ty = threadIdx.y;
//indices
int index = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + tx;
int index_s = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * BLOCK_SIZE + tx;
int index_e = cols * BLOCK_SIZE * by + BLOCK_SIZE * bx + cols * ty + BLOCK_SIZE;
float cc, cn, cs, ce, cw, d_sum;
//shared memory allocation
__shared__ float south_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float east_c[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_temp[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float c_cuda_result[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp[BLOCK_SIZE][BLOCK_SIZE];
//load data to shared memory
temp[ty][tx] = J_cuda[index];
__syncthreads();
south_c[ty][tx] = C_cuda[index_s];
if ( by == gridDim.y - 1 ){
south_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * (gridDim.y - 1) + BLOCK_SIZE * bx + cols * ( BLOCK_SIZE - 1 ) + tx];
}
__syncthreads();
east_c[ty][tx] = C_cuda[index_e];
if ( bx == gridDim.x - 1 ){
east_c[ty][tx] = C_cuda[cols * BLOCK_SIZE * by + BLOCK_SIZE * ( gridDim.x - 1) + cols * ty + BLOCK_SIZE-1];
}
__syncthreads();
c_cuda_temp[ty][tx] = C_cuda[index];
__syncthreads();
cc = c_cuda_temp[ty][tx];
if ( ty == BLOCK_SIZE -1 && tx == BLOCK_SIZE - 1){ //se
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( tx == BLOCK_SIZE -1 ){ //e
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = east_c[ty][tx];
}
else if ( ty == BLOCK_SIZE -1){ //s
cn = cc;
cs = south_c[ty][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
else{ //the data elements which are not on the borders
cn = cc;
cs = c_cuda_temp[ty+1][tx];
cw = cc;
ce = c_cuda_temp[ty][tx+1];
}
// divergence (equ 58)
d_sum = cn * N_C[index] + cs * S_C[index] + cw * W_C[index] + ce * E_C[index];
// image update (equ 61)
c_cuda_result[ty][tx] = temp[ty][tx] + 0.25 * lambda * d_sum;
__syncthreads();
J_cuda[index] = c_cuda_result[ty][tx];
}
|
bc9df74ea02306bb66d41442e60b7df2c18fee12.hip | // !!! This is a file automatically generated by hipify!!!
// includes system
#include <cmath>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <memory>
#include <string>
// includes CUDA
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "nbody_exception.h"
#include "integrator.h"
#include "options.h"
#include "parameter.h"
#include "pp_disk.h"
#include "test.h"
#include "red_type.h"
#include "red_constants.h"
#include "redutilcu.h"
using namespace std;
using namespace redutilcu;
string create_prefix(const options& opt);
namespace print
{
void info(const pp_disk* ppd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU)
{
cout.setf(ios::right);
cout.setf(ios::scientific);
n_objects_t* nb = ppd->n_bodies;
string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU");
cout << "[" << dev << "] "
<< tools::get_time_stamp(false)
<< " t: " << setprecision(4) << setw(10) << ppd->t / constants::Gauss
<< ", dt: " << setprecision(4) << setw(10) << dt / constants::Gauss
<< " (" << setprecision(4) << setw(10) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << ") [d]";
cout << ", dT: " << setprecision(4) << setw(10) << *dT_CPU / (double)CLOCKS_PER_SEC;
cout << " (" << setprecision(4) << setw(10) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << ") [s]";
cout << ", Nc: " << setw(5) << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL]
<< ", Ne: " << setw(5) << ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL]
<< ", Nh: " << setw(5) << ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL]
<< ", N : " << setw(6) << nb->get_n_total_playing() << "(" << setw(3) << nb->get_n_total_inactive() << ", " << setw(5) << nb->get_n_total_removed() << ")"
<< ", nt: " << setw(11) << intgr->get_n_tried_step()
<< ", np: " << setw(11) << intgr->get_n_passed_step()
<< ", nf: " << setw(11) << intgr->get_n_failed_step()
<< endl;
}
void info(ofstream& sout, const pp_disk* ppd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU)
{
static const string header_str = "device, date, time, t [d], dt [d], dt_avg [d], dT [s], dT_avg [s], Nc, Ne, Nh, n_playing, n_inactive, n_removed, n_step_total, n_step_passed, n_step_failed, ns_a, ns_r, ngp_a, ngp_r, nrp_a, nrp_r, npp_a, npp_r, nspl_a, nspl_r, npl_a, npl_r, ntp_a, ntp_r";
static bool first_call = true;
sout.setf(ios::right);
sout.setf(ios::scientific);
n_objects_t* nb = ppd->n_bodies;
string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU");
if (first_call)
{
first_call = false;
sout << header_str << endl;
}
sout << dev << SEP
<< tools::get_time_stamp(true) << SEP
<< setprecision(4) << ppd->t / constants::Gauss << SEP
<< setprecision(4) << dt / constants::Gauss << SEP
<< setprecision(4) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << SEP;
sout << setprecision(4) << (*dT_CPU / (double)CLOCKS_PER_SEC) << SEP;
sout << setprecision(4) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << SEP;
sout << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << SEP
<< ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << SEP
<< ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << SEP
<< nb->get_n_total_playing() << SEP
<< nb->get_n_total_inactive() << SEP
<< nb->get_n_total_removed() << SEP
<< intgr->get_n_tried_step() << SEP
<< intgr->get_n_passed_step() << SEP
<< intgr->get_n_failed_step() << SEP;
for (int i = 0; i < BODY_TYPE_N; i++)
{
sout << nb->playing[i] - nb->inactive[i] << SEP
<< nb->removed[i] << (i < BODY_TYPE_TESTPARTICLE ? " " : "");
}
sout << endl;
}
} /* print */
string create_prefix(const options& opt)
{
static const char* integrator_type_short_name[] =
{
"E",
"RK2",
"RK4",
"RK5",
"RKF7"
};
string prefix;
if (opt.ef)
{
char sep = '_';
string config;
#ifdef _DEBUG
config = "D";
#else
config = "R";
#endif
string dev = (opt.comp_dev == COMPUTING_DEVICE_CPU ? "cpu" : "gpu");
// as: adaptive step-size, fs: fix step-size
string adapt = (opt.param->adaptive == true ? "as" : "fs");
string int_name(integrator_type_short_name[opt.param->int_type]);
prefix += config + sep + dev + sep + adapt + sep + int_name + sep;
}
return prefix;
}
void print_info(ofstream& sout, const pp_disk* ppd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU)
{
static const string header_str = "dev,date ,time ,t [d] ,dt [d] ,dt_avg [d] ,dT [s] ,dT_avg [s] ,Nc ,Ne ,Nh ,nb_p ,nb_i,nb_r ,ns_t ,ns_p ,ns_f ,ns_a,ns_r ,ngp_a,ngp_r,nrp_a,nrp_r,npp_a,npp_r,nspl_a,nspl_r,npl_a,npl_r,ntp_a,ntp_r";
static bool first_call = true;
static string cvs = ",";
cout.setf(ios::right);
cout.setf(ios::scientific);
sout.setf(ios::right);
sout.setf(ios::scientific);
n_objects_t* nb = ppd->n_bodies;
string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU");
cout << "[" << dev << "] "
<< tools::get_time_stamp(false)
<< " t: " << setprecision(4) << setw(10) << ppd->t / constants::Gauss
<< ", dt: " << setprecision(4) << setw(10) << dt / constants::Gauss
<< " (" << setprecision(4) << setw(10) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << ") [d]";
cout << ", dT: " << setprecision(4) << setw(10) << *dT_CPU / (double)CLOCKS_PER_SEC;
cout << " (" << setprecision(4) << setw(10) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << ") [s]";
cout << ", Nc: " << setw(5) << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL]
<< ", Ne: " << setw(5) << ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL]
<< ", Nh: " << setw(5) << ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL]
<< ", N : " << setw(6) << nb->get_n_total_playing() << "(" << setw(3) << nb->get_n_total_inactive() << ", " << setw(5) << nb->get_n_total_removed() << ")"
<< ", nt: " << setw(11) << intgr->get_n_tried_step()
<< ", np: " << setw(11) << intgr->get_n_passed_step()
<< ", nf: " << setw(11) << intgr->get_n_failed_step()
<< endl;
if (first_call)
{
first_call = false;
sout << header_str << endl;
}
sout << dev << cvs
<< tools::get_time_stamp(true) << cvs
<< setprecision(4) << ppd->t / constants::Gauss << cvs
<< setprecision(4) << dt / constants::Gauss << cvs
<< setprecision(4) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << cvs;
sout << setprecision(4) << (*dT_CPU / (double)CLOCKS_PER_SEC) << cvs;
sout << setprecision(4) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << cvs;
sout << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << cvs
<< ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << cvs
<< ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << cvs
<< nb->get_n_total_playing() << cvs
<< nb->get_n_total_inactive() << cvs
<< nb->get_n_total_removed() << cvs
<< intgr->get_n_tried_step() << cvs
<< intgr->get_n_passed_step() << cvs
<< intgr->get_n_failed_step() << cvs;
for (int i = 0; i < BODY_TYPE_N; i++)
{
sout << nb->playing[i] - nb->inactive[i] << cvs
<< nb->removed[i] << (i < BODY_TYPE_TESTPARTICLE ? cvs : "");
}
sout << endl;
}
void print_data(const options& opt, pp_disk* ppd, pp_disk_t::integral_t* integrals, uint32_t& n_print, string& path_integral, string& prefix, string& ext, ofstream* slog)
{
string n_print_str = redutilcu::number_to_string(n_print, OUTPUT_ORDINAL_NUMBER_WIDTH, true);
string fn_data = prefix + opt.out_fn[OUTPUT_NAME_DATA] + "_" + n_print_str + "." + ext;
string path_data = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data);
ppd->print_data(path_data, opt.param->output_data_rep);
string fn_data_info = prefix + opt.out_fn[OUTPUT_NAME_DATA] + "_" + n_print_str + ".info." + ext;
string path_data_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data_info);
ppd->print_data_info(path_data_info, opt.param->output_data_rep);
n_print++;
string path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], "start_files.txt");
ofstream sout(path.c_str(), ios_base::out);
if (sout)
{
sout << fn_data_info << endl;
sout << fn_data << endl;
}
else
{
throw string("Cannot open " + path + "!");
}
ppd->calc_integral(false, integrals[1]);
ppd->print_integral_data(path_integral, integrals[1]);
}
void print_data_before_event(const options& opt, pp_disk* ppd, uint32_t& n_event, string& prefix, string& ext, ofstream* slog)
{
// In order to write out the data in yout
ppd->swap();
string n_event_str = redutilcu::number_to_string(n_event, OUTPUT_ORDINAL_NUMBER_WIDTH, true);
string fn_data = prefix + "event_" + n_event_str + "." + ext;
string path_data = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data);
ppd->print_data(path_data, opt.param->output_data_rep);
string fn_data_info = prefix + "event_" + n_event_str + ".info." + ext;
string path_data_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data_info);
ppd->print_data_info(path_data_info, opt.param->output_data_rep);
n_event++;
// Change back the pointers
ppd->swap();
}
void print_dump(const options& opt, pp_disk* ppd, string& prefix, string& ext, ofstream* slog)
{
string fn_data = prefix + "dump." + ext;
string path_data = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data);
ppd->print_data(path_data, opt.param->output_data_rep);
string fn_data_info = prefix + "dump.info." + ext;
string path_data_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data_info);
ppd->print_data_info(path_data_info, opt.param->output_data_rep);
string path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], "start_files.txt");
ofstream sout(path.c_str(), ios_base::out);
if (sout)
{
sout << fn_data_info << endl;
sout << fn_data << endl;
}
else
{
throw string("Cannot open " + path + "!");
}
}
void run_benchmark(const options& opt, pp_disk* ppd, integrator* intgr, ofstream& sout)
{
cout << "See the log file for the result." << endl;
sout.setf(ios::right);
sout.setf(ios::scientific);
sout.setf(ios::right);
sout.setf(ios::scientific);
size_t size = ppd->n_bodies->get_n_total_playing() * sizeof(var4_t);
// Create aliases
var4_t* r = ppd->sim_data->y[0];
var4_t* v = ppd->sim_data->y[1];
pp_disk_t::param_t* p = ppd->sim_data->p;
pp_disk_t::body_metadata_t* bmd = ppd->sim_data->body_md;
sout << endl;
ttt_t curr_t = 0.0;
if (COMPUTING_DEVICE_GPU == opt.comp_dev)
{
sout << "----------------------------------------------" << endl;
sout << "GPU:" << endl;
sout << "----------------------------------------------" << endl << endl;
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, opt.id_dev);
int half_warp_size = deviceProp.warpSize/2;
vector<float2> execution_time;
var4_t* d_dy = 0x0;
ALLOCATE_DEVICE_VECTOR((void**)&d_dy, size);
uint32_t n_sink = ppd->n_bodies->get_n_SI();
uint32_t n_pass = 0;
if (0 < n_sink)
{
sout << "SI:" << endl;
sout << "----------------------------------------------" << endl;
for (int n_tpb = half_warp_size; n_tpb <= deviceProp.maxThreadsPerBlock; n_tpb += half_warp_size)
{
sout << "n_tpb: " << setw(6) << n_tpb;
ppd->set_n_tpb(n_tpb);
interaction_bound int_bound = ppd->n_bodies->get_bound_SI();
clock_t t_start = clock();
float cu_elt = ppd->benchmark_calc_grav_accel(curr_t, n_sink, int_bound, bmd, p, r, v, d_dy);
ttt_t elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
hipError_t cuda_status = hipGetLastError();
if (hipSuccess != cuda_status)
{
break;
}
float2 exec_t = {(float)elapsed_time, cu_elt};
execution_time.push_back(exec_t);
sout << " dt: " << setprecision(6) << setw(6) << elapsed_time << " (" << setw(6) << cu_elt << ") [ms]" << endl;
n_pass++;
}
float min_y = 1.0e10;
int min_idx = 0;
for (uint32_t i = 0; i < n_pass; i++)
{
if (min_y > execution_time[i].y)
{
min_y = execution_time[i].y;
min_idx = i;
}
}
sout << "Minimum at n_tpb = " << ((min_idx + 1) * half_warp_size) << ", where execution time is: " << execution_time[min_idx].y << " [ms]" << endl;
}
FREE_DEVICE_VECTOR((void**)&d_dy);
// Needed by nvprof.exe
hipDeviceReset();
} /* if */
else
{
sout << "----------------------------------------------" << endl;
sout << "CPU:" << endl;
sout << "----------------------------------------------" << endl << endl;
clock_t t_start;
ttt_t elapsed_time;
var4_t* h_dy = 0x0;
ALLOCATE_HOST_VECTOR((void**)&h_dy, size);
int n_sink = ppd->n_bodies->get_n_SI();
if (0 < n_sink)
{
interaction_bound int_bound = ppd->n_bodies->get_bound_SI();
t_start = clock();
ppd->cpu_calc_grav_accel_SI(curr_t, int_bound, bmd, p, r, v, h_dy, 0x0, 0x0);
elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
sout << "SI:" << endl;
sout << "----------------------------------------------" << endl;
sout << "dt: " << setprecision(10) << setw(16) << elapsed_time << " [ms]" << endl;
}
n_sink = ppd->n_bodies->get_n_NSI();
if (0 < n_sink)
{
interaction_bound int_bound = ppd->n_bodies->get_bound_NSI();
t_start = clock();
ppd->cpu_calc_grav_accel_NSI(curr_t, int_bound, bmd, p, r, v, h_dy, 0x0, 0x0);
elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
sout << "NSI:" << endl;
sout << "----------------------------------------------" << endl;
sout << "dt: " << setprecision(10) << setw(16) << elapsed_time << " [ms]" << endl;
}
n_sink = ppd->n_bodies->get_n_NI();
if (0 < n_sink)
{
interaction_bound int_bound = ppd->n_bodies->get_bound_NI();
t_start = clock();
ppd->cpu_calc_grav_accel_NI(curr_t, int_bound, bmd, p, r, v, h_dy, 0x0, 0x0);
elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
sout << "NI:" << endl;
sout << "----------------------------------------------" << endl;
sout << "dt: " << setprecision(10) << setw(16) << elapsed_time << " [ms]" << endl;
}
}
}
void run_simulation(const options& opt, pp_disk* ppd, integrator* intgr, uint32_t dt_CPU_offset, ofstream* slog)
{
static string prefix = create_prefix(opt);
static string ext = (DATA_REPRESENTATION_ASCII == opt.param->output_data_rep ? "txt" : "dat");
static string path_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INFO] + ".txt");
static string path_integral = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL] + ".txt");
static string path_integral_event = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL_EVENT] + ".txt");
static string path_event = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_EVENT] + ".txt");
ttt_t it = 0.0; // integrator time: measures the time of this integration
ttt_t ps = 0.0;
ttt_t dt = 0.0;
clock_t T_CPU = 0;
clock_t dT_CPU = 0;
time_t time_last_info = clock();
time_t time_last_dump = clock();
pp_disk_t::integral_t integrals[2];
ofstream* sinfo = new ofstream(path_info.c_str(), ios::out | ios::app);
if (!sinfo)
{
throw string("Cannot open " + path_info + ".");
}
ppd->calc_integral(false, integrals[0]);
uint32_t n_print = 0;
uint32_t n_event = 0;
if (4 <= opt.in_fn[INPUT_NAME_DATA].length() && "data" == opt.in_fn[INPUT_NAME_DATA].substr(0, 4))
{
string str = opt.in_fn[INPUT_NAME_DATA];
size_t pos = str.find_first_of("_");
str = str.substr(pos + 1, OUTPUT_ORDINAL_NUMBER_WIDTH);
n_print = atoi(str.c_str());
// TODO: set the n_event counter
// Set ps in order to save the next snapshot at the correct epoch
//ps = ppd->t - n_print * opt.param->output_interval;
n_print++;
}
if (0 == n_print && "dump" != opt.in_fn[INPUT_NAME_DATA].substr(0, 4))
{
print_data(opt, ppd, integrals, n_print, path_integral, prefix, ext, slog);
}
if (COMPUTING_DEVICE_GPU == ppd->get_computing_device())
{
if (opt.verbose)
{
printf("Searching for the optimal thread per block ");
}
uint32_t n_tpb = ppd->benchmark(opt.verbose);
ppd->set_n_tpb(n_tpb);
if (opt.verbose)
{
printf(" done\n");
string msg = "Number of thread per block was set to " + redutilcu::number_to_string(ppd->get_n_tpb());
file::log_message(*slog, msg, opt.print_to_screen);
}
}
/* main cycle */
//while (ppd->t <= opt.param->simulation_length && 1 < ppd->n_bodies->get_n_total_active())
while (fabs(it) <= fabs(opt.param->simulation_length) && 1 < ppd->n_bodies->get_n_total_active())
{
// TEST the change between CPU/GPU
#if 1
if (COMPUTING_DEVICE_GPU == intgr->get_computing_device())
{
printf("The computing device is setting to CPU ... ");
intgr->set_computing_device(COMPUTING_DEVICE_CPU);
printf("done\n");
}
else
{
printf("The computing device is setting to GPU ... ");
intgr->set_computing_device(COMPUTING_DEVICE_GPU);
printf("done\n");
}
#endif
if (COMPUTING_DEVICE_GPU == intgr->get_computing_device() && opt.n_change_to_cpu >= ppd->n_bodies->get_n_SI())
{
intgr->set_computing_device(COMPUTING_DEVICE_CPU);
if (opt.verbose)
{
string msg = "Number of self-interacting bodies dropped below " + redutilcu::number_to_string(opt.n_change_to_cpu) + ". Execution was transferred to CPU.";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
// TODO: egysgesteni ezt s a collision-t: check_for_event()
if ((0.0 < opt.param->threshold[THRESHOLD_EJECTION_DISTANCE] || 0.0 < opt.param->threshold[THRESHOLD_HIT_CENTRUM_DISTANCE]))
{
bool eje_hc = ppd->check_for_ejection_hit_centrum();
if (eje_hc)
{
pp_disk_t::integral_t I;
ppd->calc_integral(true, I);
ppd->print_integral_data(path_integral_event, I);
ppd->handle_ejection_hit_centrum();
ppd->calc_integral(false, I);
ppd->print_integral_data(path_integral_event, I);
ppd->print_event_data(path_event, *slog);
ppd->clear_event_counter();
time_last_dump = clock();
print_dump(opt, ppd, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Dump file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
}
// make the integration step, and measure the time it takes
clock_t T0_CPU = clock();
dt = intgr->step();
dT_CPU = (clock() - T0_CPU);
T_CPU += dT_CPU;
ppd->set_dt_CPU(dt_CPU_offset + T_CPU / CLOCKS_PER_SEC);
it += dt;
ps += dt;
// The stepsize cannot exceed the user requsted output interval
if (fabs(opt.param->output_interval) < fabs(intgr->get_dt_next()))
{
intgr->set_dt_next(0.0 < dt ? opt.param->output_interval : -opt.param->output_interval);
}
if (0.0 < opt.param->threshold[THRESHOLD_RADII_ENHANCE_FACTOR])
{
bool collision = ppd->check_for_collision();
if (collision)
{
if (opt.print_dbe)
{
print_data_before_event(opt, ppd, n_event, prefix, ext, slog);
}
pp_disk_t::integral_t I;
ppd->calc_integral(true, I);
ppd->print_integral_data(path_integral_event, I);
ppd->handle_collision();
ppd->calc_integral(false, I);
ppd->print_integral_data(path_integral_event, I);
ppd->print_event_data(path_event, *slog);
ppd->clear_event_counter();
time_last_dump = clock();
print_dump(opt, ppd, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Dump file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
}
if (opt.param->output_interval <= fabs(ps))
{
ps = 0.0;
print_data(opt, ppd, integrals, n_print, path_integral, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Data file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
if (16 <= ppd->n_event[EVENT_COUNTER_NAME_LAST_CLEAR])
{
ppd->set_event_counter(EVENT_COUNTER_NAME_LAST_CLEAR, 0);
ppd->rebuild_vectors();
if (opt.verbose)
{
string msg = "Rebuild the vectors (removed " + redutilcu::number_to_string(ppd->n_bodies->n_removed) + " inactive bodies at t: " + redutilcu::number_to_string(ppd->t / constants::Gauss) + " [d])";
file::log_message(*slog, msg, opt.print_to_screen);
}
if (COMPUTING_DEVICE_GPU == ppd->get_computing_device())
{
if (opt.verbose)
{
printf("Searching for the optimal thread per block ");
}
uint32_t n_tpb = ppd->benchmark(opt.verbose);
ppd->set_n_tpb(n_tpb);
if (opt.verbose)
{
printf(" done\n");
string msg = "Number of thread per block was set to " + redutilcu::number_to_string(ppd->get_n_tpb());
file::log_message(*slog, msg, opt.print_to_screen);
}
}
}
if (opt.param->dump_dt < (clock() - time_last_dump) / (double)CLOCKS_PER_SEC)
{
time_last_dump = clock();
print_dump(opt, ppd, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Dump file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
if (opt.param->info_dt < (clock() - time_last_info) / (double)CLOCKS_PER_SEC)
{
time_last_info = clock();
print_info(*sinfo, ppd, intgr, dt, &T_CPU, &dT_CPU);
}
} /* while */
print_info(*sinfo, ppd, intgr, dt, &T_CPU, &dT_CPU);
// To avoid duplicate save at the end of the simulation
if (0.0 < fabs(ps))
{
ps = 0.0;
print_data(opt, ppd, integrals, n_print, path_integral, prefix, ext, slog);
}
// Needed by nvprof.exe
if (COMPUTING_DEVICE_GPU == ppd->get_computing_device())
{
hipDeviceReset();
}
}
void run_test()
{
test_n_objects_t();
}
//http://stackoverflow.com/questions/11666049/cuda-kernel-results-different-in-release-mode
//http://developer.download.nvidia.com/assets/cuda/files/NVIDIA-CUDA-Floating-Point.pdf
//-gpu -v -pts -ef -iDir C:\Work\red.cuda.Results\Dvorak\2D\NewRun_2\Run_cf4.0_2 -p parameters.txt -ic run_04.txt
//-cpu -pts -iDir C:\Work\Oktatas\2016\InfoCsill4\TestRun -p parameters.txt -i start_files.txt
int main(int argc, const char** argv, const char** env)
{
time_t start = time(NULL);
uint32_t dt_CPU_offset = 0;
ofstream* slog = 0x0;
try
{
options opt = options(argc, argv);
if (opt.test)
{
run_test();
return (EXIT_SUCCESS);
}
string prefix = create_prefix(opt);
string path_log = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_LOG]) + ".txt";
slog = new ofstream(path_log.c_str(), ios::out | ios::app);
if (!slog)
{
throw string("Cannot open " + path_log + ".");
}
#ifdef __GNUC__
string dummy = opt.param->get_data();
file::log_start(*slog, argc, argv, env, dummy, opt.print_to_screen);
#else
file::log_start(*slog, argc, argv, env, opt.param->get_data(), opt.print_to_screen);
#endif
if (COMPUTING_DEVICE_GPU == opt.comp_dev)
{
set_device(opt.id_dev, std::cout);
device_query(*slog, opt.id_dev, opt.print_to_screen);
}
pp_disk *ppd = opt.create_pp_disk();
// Temporary ugly solution to have the correct input for the colliding bodies.
#if 0
ppd->t *= constants::Gauss;
ppd->dt *= constants::Gauss;
tools::transform_velocity(ppd->n_bodies->get_n_total_active(), ppd->sim_data);
#endif
integrator *intgr = opt.create_integrator(ppd, ppd->dt);
// Number of seconds from previous runs
dt_CPU_offset = ppd->get_dt_CPU();
run_simulation(opt, ppd, intgr, dt_CPU_offset, slog);
//if (opt.benchmark)
//{
// run_benchmark(opt, ppd, intgr, *slog);
//}
//else
//{
// run_simulation(opt, ppd, intgr, dt_CPU_offset, slog);
//}
} /* try */
catch (const string& msg)
{
cerr << "Error: " << msg << endl;
if (0x0 != slog)
{
file::log_message(*slog, "Error: " + msg, false);
}
}
time_t total_time = dt_CPU_offset + time(NULL) - start;
cout << "Total time: " << total_time << " s" << endl;
if (0x0 != slog)
{
file::log_message(*slog, "Total time: " + tools::convert_time_t(total_time) + " s", false);
}
return (EXIT_SUCCESS);
}
| bc9df74ea02306bb66d41442e60b7df2c18fee12.cu | // includes system
#include <cmath>
#include <ctime>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <memory>
#include <string>
// includes CUDA
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "nbody_exception.h"
#include "integrator.h"
#include "options.h"
#include "parameter.h"
#include "pp_disk.h"
#include "test.h"
#include "red_type.h"
#include "red_constants.h"
#include "redutilcu.h"
using namespace std;
using namespace redutilcu;
string create_prefix(const options& opt);
namespace print
{
void info(const pp_disk* ppd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU)
{
cout.setf(ios::right);
cout.setf(ios::scientific);
n_objects_t* nb = ppd->n_bodies;
string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU");
cout << "[" << dev << "] "
<< tools::get_time_stamp(false)
<< " t: " << setprecision(4) << setw(10) << ppd->t / constants::Gauss
<< ", dt: " << setprecision(4) << setw(10) << dt / constants::Gauss
<< " (" << setprecision(4) << setw(10) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << ") [d]";
cout << ", dT: " << setprecision(4) << setw(10) << *dT_CPU / (double)CLOCKS_PER_SEC;
cout << " (" << setprecision(4) << setw(10) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << ") [s]";
cout << ", Nc: " << setw(5) << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL]
<< ", Ne: " << setw(5) << ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL]
<< ", Nh: " << setw(5) << ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL]
<< ", N : " << setw(6) << nb->get_n_total_playing() << "(" << setw(3) << nb->get_n_total_inactive() << ", " << setw(5) << nb->get_n_total_removed() << ")"
<< ", nt: " << setw(11) << intgr->get_n_tried_step()
<< ", np: " << setw(11) << intgr->get_n_passed_step()
<< ", nf: " << setw(11) << intgr->get_n_failed_step()
<< endl;
}
void info(ofstream& sout, const pp_disk* ppd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU)
{
static const string header_str = "device, date, time, t [d], dt [d], dt_avg [d], dT [s], dT_avg [s], Nc, Ne, Nh, n_playing, n_inactive, n_removed, n_step_total, n_step_passed, n_step_failed, ns_a, ns_r, ngp_a, ngp_r, nrp_a, nrp_r, npp_a, npp_r, nspl_a, nspl_r, npl_a, npl_r, ntp_a, ntp_r";
static bool first_call = true;
sout.setf(ios::right);
sout.setf(ios::scientific);
n_objects_t* nb = ppd->n_bodies;
string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU");
if (first_call)
{
first_call = false;
sout << header_str << endl;
}
sout << dev << SEP
<< tools::get_time_stamp(true) << SEP
<< setprecision(4) << ppd->t / constants::Gauss << SEP
<< setprecision(4) << dt / constants::Gauss << SEP
<< setprecision(4) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << SEP;
sout << setprecision(4) << (*dT_CPU / (double)CLOCKS_PER_SEC) << SEP;
sout << setprecision(4) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << SEP;
sout << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << SEP
<< ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << SEP
<< ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << SEP
<< nb->get_n_total_playing() << SEP
<< nb->get_n_total_inactive() << SEP
<< nb->get_n_total_removed() << SEP
<< intgr->get_n_tried_step() << SEP
<< intgr->get_n_passed_step() << SEP
<< intgr->get_n_failed_step() << SEP;
for (int i = 0; i < BODY_TYPE_N; i++)
{
sout << nb->playing[i] - nb->inactive[i] << SEP
<< nb->removed[i] << (i < BODY_TYPE_TESTPARTICLE ? " " : "");
}
sout << endl;
}
} /* print */
string create_prefix(const options& opt)
{
static const char* integrator_type_short_name[] =
{
"E",
"RK2",
"RK4",
"RK5",
"RKF7"
};
string prefix;
if (opt.ef)
{
char sep = '_';
string config;
#ifdef _DEBUG
config = "D";
#else
config = "R";
#endif
string dev = (opt.comp_dev == COMPUTING_DEVICE_CPU ? "cpu" : "gpu");
// as: adaptive step-size, fs: fix step-size
string adapt = (opt.param->adaptive == true ? "as" : "fs");
string int_name(integrator_type_short_name[opt.param->int_type]);
prefix += config + sep + dev + sep + adapt + sep + int_name + sep;
}
return prefix;
}
void print_info(ofstream& sout, const pp_disk* ppd, integrator *intgr, ttt_t dt, clock_t* T_CPU, clock_t* dT_CPU)
{
static const string header_str = "dev,date ,time ,t [d] ,dt [d] ,dt_avg [d] ,dT [s] ,dT_avg [s] ,Nc ,Ne ,Nh ,nb_p ,nb_i,nb_r ,ns_t ,ns_p ,ns_f ,ns_a,ns_r ,ngp_a,ngp_r,nrp_a,nrp_r,npp_a,npp_r,nspl_a,nspl_r,npl_a,npl_r,ntp_a,ntp_r";
static bool first_call = true;
static string cvs = ",";
cout.setf(ios::right);
cout.setf(ios::scientific);
sout.setf(ios::right);
sout.setf(ios::scientific);
n_objects_t* nb = ppd->n_bodies;
string dev = (intgr->get_computing_device() == COMPUTING_DEVICE_CPU ? "CPU" : "GPU");
cout << "[" << dev << "] "
<< tools::get_time_stamp(false)
<< " t: " << setprecision(4) << setw(10) << ppd->t / constants::Gauss
<< ", dt: " << setprecision(4) << setw(10) << dt / constants::Gauss
<< " (" << setprecision(4) << setw(10) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << ") [d]";
cout << ", dT: " << setprecision(4) << setw(10) << *dT_CPU / (double)CLOCKS_PER_SEC;
cout << " (" << setprecision(4) << setw(10) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << ") [s]";
cout << ", Nc: " << setw(5) << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL]
<< ", Ne: " << setw(5) << ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL]
<< ", Nh: " << setw(5) << ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL]
<< ", N : " << setw(6) << nb->get_n_total_playing() << "(" << setw(3) << nb->get_n_total_inactive() << ", " << setw(5) << nb->get_n_total_removed() << ")"
<< ", nt: " << setw(11) << intgr->get_n_tried_step()
<< ", np: " << setw(11) << intgr->get_n_passed_step()
<< ", nf: " << setw(11) << intgr->get_n_failed_step()
<< endl;
if (first_call)
{
first_call = false;
sout << header_str << endl;
}
sout << dev << cvs
<< tools::get_time_stamp(true) << cvs
<< setprecision(4) << ppd->t / constants::Gauss << cvs
<< setprecision(4) << dt / constants::Gauss << cvs
<< setprecision(4) << (ppd->t / constants::Gauss)/intgr->get_n_passed_step() << cvs;
sout << setprecision(4) << (*dT_CPU / (double)CLOCKS_PER_SEC) << cvs;
sout << setprecision(4) << (*T_CPU / (double)CLOCKS_PER_SEC) / intgr->get_n_passed_step() << cvs;
sout << ppd->n_collision[ EVENT_COUNTER_NAME_TOTAL] << cvs
<< ppd->n_ejection[ EVENT_COUNTER_NAME_TOTAL] << cvs
<< ppd->n_hit_centrum[EVENT_COUNTER_NAME_TOTAL] << cvs
<< nb->get_n_total_playing() << cvs
<< nb->get_n_total_inactive() << cvs
<< nb->get_n_total_removed() << cvs
<< intgr->get_n_tried_step() << cvs
<< intgr->get_n_passed_step() << cvs
<< intgr->get_n_failed_step() << cvs;
for (int i = 0; i < BODY_TYPE_N; i++)
{
sout << nb->playing[i] - nb->inactive[i] << cvs
<< nb->removed[i] << (i < BODY_TYPE_TESTPARTICLE ? cvs : "");
}
sout << endl;
}
void print_data(const options& opt, pp_disk* ppd, pp_disk_t::integral_t* integrals, uint32_t& n_print, string& path_integral, string& prefix, string& ext, ofstream* slog)
{
string n_print_str = redutilcu::number_to_string(n_print, OUTPUT_ORDINAL_NUMBER_WIDTH, true);
string fn_data = prefix + opt.out_fn[OUTPUT_NAME_DATA] + "_" + n_print_str + "." + ext;
string path_data = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data);
ppd->print_data(path_data, opt.param->output_data_rep);
string fn_data_info = prefix + opt.out_fn[OUTPUT_NAME_DATA] + "_" + n_print_str + ".info." + ext;
string path_data_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data_info);
ppd->print_data_info(path_data_info, opt.param->output_data_rep);
n_print++;
string path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], "start_files.txt");
ofstream sout(path.c_str(), ios_base::out);
if (sout)
{
sout << fn_data_info << endl;
sout << fn_data << endl;
}
else
{
throw string("Cannot open " + path + "!");
}
ppd->calc_integral(false, integrals[1]);
ppd->print_integral_data(path_integral, integrals[1]);
}
void print_data_before_event(const options& opt, pp_disk* ppd, uint32_t& n_event, string& prefix, string& ext, ofstream* slog)
{
// In order to write out the data in yout
ppd->swap();
string n_event_str = redutilcu::number_to_string(n_event, OUTPUT_ORDINAL_NUMBER_WIDTH, true);
string fn_data = prefix + "event_" + n_event_str + "." + ext;
string path_data = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data);
ppd->print_data(path_data, opt.param->output_data_rep);
string fn_data_info = prefix + "event_" + n_event_str + ".info." + ext;
string path_data_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data_info);
ppd->print_data_info(path_data_info, opt.param->output_data_rep);
n_event++;
// Change back the pointers
ppd->swap();
}
void print_dump(const options& opt, pp_disk* ppd, string& prefix, string& ext, ofstream* slog)
{
string fn_data = prefix + "dump." + ext;
string path_data = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data);
ppd->print_data(path_data, opt.param->output_data_rep);
string fn_data_info = prefix + "dump.info." + ext;
string path_data_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], fn_data_info);
ppd->print_data_info(path_data_info, opt.param->output_data_rep);
string path = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], "start_files.txt");
ofstream sout(path.c_str(), ios_base::out);
if (sout)
{
sout << fn_data_info << endl;
sout << fn_data << endl;
}
else
{
throw string("Cannot open " + path + "!");
}
}
void run_benchmark(const options& opt, pp_disk* ppd, integrator* intgr, ofstream& sout)
{
cout << "See the log file for the result." << endl;
sout.setf(ios::right);
sout.setf(ios::scientific);
sout.setf(ios::right);
sout.setf(ios::scientific);
size_t size = ppd->n_bodies->get_n_total_playing() * sizeof(var4_t);
// Create aliases
var4_t* r = ppd->sim_data->y[0];
var4_t* v = ppd->sim_data->y[1];
pp_disk_t::param_t* p = ppd->sim_data->p;
pp_disk_t::body_metadata_t* bmd = ppd->sim_data->body_md;
sout << endl;
ttt_t curr_t = 0.0;
if (COMPUTING_DEVICE_GPU == opt.comp_dev)
{
sout << "----------------------------------------------" << endl;
sout << "GPU:" << endl;
sout << "----------------------------------------------" << endl << endl;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, opt.id_dev);
int half_warp_size = deviceProp.warpSize/2;
vector<float2> execution_time;
var4_t* d_dy = 0x0;
ALLOCATE_DEVICE_VECTOR((void**)&d_dy, size);
uint32_t n_sink = ppd->n_bodies->get_n_SI();
uint32_t n_pass = 0;
if (0 < n_sink)
{
sout << "SI:" << endl;
sout << "----------------------------------------------" << endl;
for (int n_tpb = half_warp_size; n_tpb <= deviceProp.maxThreadsPerBlock; n_tpb += half_warp_size)
{
sout << "n_tpb: " << setw(6) << n_tpb;
ppd->set_n_tpb(n_tpb);
interaction_bound int_bound = ppd->n_bodies->get_bound_SI();
clock_t t_start = clock();
float cu_elt = ppd->benchmark_calc_grav_accel(curr_t, n_sink, int_bound, bmd, p, r, v, d_dy);
ttt_t elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
cudaError_t cuda_status = cudaGetLastError();
if (cudaSuccess != cuda_status)
{
break;
}
float2 exec_t = {(float)elapsed_time, cu_elt};
execution_time.push_back(exec_t);
sout << " dt: " << setprecision(6) << setw(6) << elapsed_time << " (" << setw(6) << cu_elt << ") [ms]" << endl;
n_pass++;
}
float min_y = 1.0e10;
int min_idx = 0;
for (uint32_t i = 0; i < n_pass; i++)
{
if (min_y > execution_time[i].y)
{
min_y = execution_time[i].y;
min_idx = i;
}
}
sout << "Minimum at n_tpb = " << ((min_idx + 1) * half_warp_size) << ", where execution time is: " << execution_time[min_idx].y << " [ms]" << endl;
}
FREE_DEVICE_VECTOR((void**)&d_dy);
// Needed by nvprof.exe
cudaDeviceReset();
} /* if */
else
{
sout << "----------------------------------------------" << endl;
sout << "CPU:" << endl;
sout << "----------------------------------------------" << endl << endl;
clock_t t_start;
ttt_t elapsed_time;
var4_t* h_dy = 0x0;
ALLOCATE_HOST_VECTOR((void**)&h_dy, size);
int n_sink = ppd->n_bodies->get_n_SI();
if (0 < n_sink)
{
interaction_bound int_bound = ppd->n_bodies->get_bound_SI();
t_start = clock();
ppd->cpu_calc_grav_accel_SI(curr_t, int_bound, bmd, p, r, v, h_dy, 0x0, 0x0);
elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
sout << "SI:" << endl;
sout << "----------------------------------------------" << endl;
sout << "dt: " << setprecision(10) << setw(16) << elapsed_time << " [ms]" << endl;
}
n_sink = ppd->n_bodies->get_n_NSI();
if (0 < n_sink)
{
interaction_bound int_bound = ppd->n_bodies->get_bound_NSI();
t_start = clock();
ppd->cpu_calc_grav_accel_NSI(curr_t, int_bound, bmd, p, r, v, h_dy, 0x0, 0x0);
elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
sout << "NSI:" << endl;
sout << "----------------------------------------------" << endl;
sout << "dt: " << setprecision(10) << setw(16) << elapsed_time << " [ms]" << endl;
}
n_sink = ppd->n_bodies->get_n_NI();
if (0 < n_sink)
{
interaction_bound int_bound = ppd->n_bodies->get_bound_NI();
t_start = clock();
ppd->cpu_calc_grav_accel_NI(curr_t, int_bound, bmd, p, r, v, h_dy, 0x0, 0x0);
elapsed_time = ((double)(clock() - t_start)/(double)CLOCKS_PER_SEC) * 1000.0; // [ms]
sout << "NI:" << endl;
sout << "----------------------------------------------" << endl;
sout << "dt: " << setprecision(10) << setw(16) << elapsed_time << " [ms]" << endl;
}
}
}
void run_simulation(const options& opt, pp_disk* ppd, integrator* intgr, uint32_t dt_CPU_offset, ofstream* slog)
{
static string prefix = create_prefix(opt);
static string ext = (DATA_REPRESENTATION_ASCII == opt.param->output_data_rep ? "txt" : "dat");
static string path_info = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INFO] + ".txt");
static string path_integral = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL] + ".txt");
static string path_integral_event = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_INTEGRAL_EVENT] + ".txt");
static string path_event = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_EVENT] + ".txt");
ttt_t it = 0.0; // integrator time: measures the time of this integration
ttt_t ps = 0.0;
ttt_t dt = 0.0;
clock_t T_CPU = 0;
clock_t dT_CPU = 0;
time_t time_last_info = clock();
time_t time_last_dump = clock();
pp_disk_t::integral_t integrals[2];
ofstream* sinfo = new ofstream(path_info.c_str(), ios::out | ios::app);
if (!sinfo)
{
throw string("Cannot open " + path_info + ".");
}
ppd->calc_integral(false, integrals[0]);
uint32_t n_print = 0;
uint32_t n_event = 0;
if (4 <= opt.in_fn[INPUT_NAME_DATA].length() && "data" == opt.in_fn[INPUT_NAME_DATA].substr(0, 4))
{
string str = opt.in_fn[INPUT_NAME_DATA];
size_t pos = str.find_first_of("_");
str = str.substr(pos + 1, OUTPUT_ORDINAL_NUMBER_WIDTH);
n_print = atoi(str.c_str());
// TODO: set the n_event counter
// Set ps in order to save the next snapshot at the correct epoch
//ps = ppd->t - n_print * opt.param->output_interval;
n_print++;
}
if (0 == n_print && "dump" != opt.in_fn[INPUT_NAME_DATA].substr(0, 4))
{
print_data(opt, ppd, integrals, n_print, path_integral, prefix, ext, slog);
}
if (COMPUTING_DEVICE_GPU == ppd->get_computing_device())
{
if (opt.verbose)
{
printf("Searching for the optimal thread per block ");
}
uint32_t n_tpb = ppd->benchmark(opt.verbose);
ppd->set_n_tpb(n_tpb);
if (opt.verbose)
{
printf(" done\n");
string msg = "Number of thread per block was set to " + redutilcu::number_to_string(ppd->get_n_tpb());
file::log_message(*slog, msg, opt.print_to_screen);
}
}
/* main cycle */
//while (ppd->t <= opt.param->simulation_length && 1 < ppd->n_bodies->get_n_total_active())
while (fabs(it) <= fabs(opt.param->simulation_length) && 1 < ppd->n_bodies->get_n_total_active())
{
// TEST the change between CPU/GPU
#if 1
if (COMPUTING_DEVICE_GPU == intgr->get_computing_device())
{
printf("The computing device is setting to CPU ... ");
intgr->set_computing_device(COMPUTING_DEVICE_CPU);
printf("done\n");
}
else
{
printf("The computing device is setting to GPU ... ");
intgr->set_computing_device(COMPUTING_DEVICE_GPU);
printf("done\n");
}
#endif
if (COMPUTING_DEVICE_GPU == intgr->get_computing_device() && opt.n_change_to_cpu >= ppd->n_bodies->get_n_SI())
{
intgr->set_computing_device(COMPUTING_DEVICE_CPU);
if (opt.verbose)
{
string msg = "Number of self-interacting bodies dropped below " + redutilcu::number_to_string(opt.n_change_to_cpu) + ". Execution was transferred to CPU.";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
// TODO: egységesíteni ezt és a collision-t: check_for_event()
if ((0.0 < opt.param->threshold[THRESHOLD_EJECTION_DISTANCE] || 0.0 < opt.param->threshold[THRESHOLD_HIT_CENTRUM_DISTANCE]))
{
bool eje_hc = ppd->check_for_ejection_hit_centrum();
if (eje_hc)
{
pp_disk_t::integral_t I;
ppd->calc_integral(true, I);
ppd->print_integral_data(path_integral_event, I);
ppd->handle_ejection_hit_centrum();
ppd->calc_integral(false, I);
ppd->print_integral_data(path_integral_event, I);
ppd->print_event_data(path_event, *slog);
ppd->clear_event_counter();
time_last_dump = clock();
print_dump(opt, ppd, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Dump file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
}
// make the integration step, and measure the time it takes
clock_t T0_CPU = clock();
dt = intgr->step();
dT_CPU = (clock() - T0_CPU);
T_CPU += dT_CPU;
ppd->set_dt_CPU(dt_CPU_offset + T_CPU / CLOCKS_PER_SEC);
it += dt;
ps += dt;
// The stepsize cannot exceed the user requsted output interval
if (fabs(opt.param->output_interval) < fabs(intgr->get_dt_next()))
{
intgr->set_dt_next(0.0 < dt ? opt.param->output_interval : -opt.param->output_interval);
}
if (0.0 < opt.param->threshold[THRESHOLD_RADII_ENHANCE_FACTOR])
{
bool collision = ppd->check_for_collision();
if (collision)
{
if (opt.print_dbe)
{
print_data_before_event(opt, ppd, n_event, prefix, ext, slog);
}
pp_disk_t::integral_t I;
ppd->calc_integral(true, I);
ppd->print_integral_data(path_integral_event, I);
ppd->handle_collision();
ppd->calc_integral(false, I);
ppd->print_integral_data(path_integral_event, I);
ppd->print_event_data(path_event, *slog);
ppd->clear_event_counter();
time_last_dump = clock();
print_dump(opt, ppd, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Dump file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
}
if (opt.param->output_interval <= fabs(ps))
{
ps = 0.0;
print_data(opt, ppd, integrals, n_print, path_integral, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Data file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
if (16 <= ppd->n_event[EVENT_COUNTER_NAME_LAST_CLEAR])
{
ppd->set_event_counter(EVENT_COUNTER_NAME_LAST_CLEAR, 0);
ppd->rebuild_vectors();
if (opt.verbose)
{
string msg = "Rebuild the vectors (removed " + redutilcu::number_to_string(ppd->n_bodies->n_removed) + " inactive bodies at t: " + redutilcu::number_to_string(ppd->t / constants::Gauss) + " [d])";
file::log_message(*slog, msg, opt.print_to_screen);
}
if (COMPUTING_DEVICE_GPU == ppd->get_computing_device())
{
if (opt.verbose)
{
printf("Searching for the optimal thread per block ");
}
uint32_t n_tpb = ppd->benchmark(opt.verbose);
ppd->set_n_tpb(n_tpb);
if (opt.verbose)
{
printf(" done\n");
string msg = "Number of thread per block was set to " + redutilcu::number_to_string(ppd->get_n_tpb());
file::log_message(*slog, msg, opt.print_to_screen);
}
}
}
if (opt.param->dump_dt < (clock() - time_last_dump) / (double)CLOCKS_PER_SEC)
{
time_last_dump = clock();
print_dump(opt, ppd, prefix, ext, slog);
if (opt.verbose)
{
string msg = "Dump file was created";
file::log_message(*slog, msg, opt.print_to_screen);
}
}
if (opt.param->info_dt < (clock() - time_last_info) / (double)CLOCKS_PER_SEC)
{
time_last_info = clock();
print_info(*sinfo, ppd, intgr, dt, &T_CPU, &dT_CPU);
}
} /* while */
print_info(*sinfo, ppd, intgr, dt, &T_CPU, &dT_CPU);
// To avoid duplicate save at the end of the simulation
if (0.0 < fabs(ps))
{
ps = 0.0;
print_data(opt, ppd, integrals, n_print, path_integral, prefix, ext, slog);
}
// Needed by nvprof.exe
if (COMPUTING_DEVICE_GPU == ppd->get_computing_device())
{
cudaDeviceReset();
}
}
void run_test()
{
test_n_objects_t();
}
//http://stackoverflow.com/questions/11666049/cuda-kernel-results-different-in-release-mode
//http://developer.download.nvidia.com/assets/cuda/files/NVIDIA-CUDA-Floating-Point.pdf
//-gpu -v -pts -ef -iDir C:\Work\red.cuda.Results\Dvorak\2D\NewRun_2\Run_cf4.0_2 -p parameters.txt -ic run_04.txt
//-cpu -pts -iDir C:\Work\Oktatas\2016\InfoCsill4\TestRun -p parameters.txt -i start_files.txt
int main(int argc, const char** argv, const char** env)
{
time_t start = time(NULL);
uint32_t dt_CPU_offset = 0;
ofstream* slog = 0x0;
try
{
options opt = options(argc, argv);
if (opt.test)
{
run_test();
return (EXIT_SUCCESS);
}
string prefix = create_prefix(opt);
string path_log = file::combine_path(opt.dir[DIRECTORY_NAME_OUT], prefix + opt.out_fn[OUTPUT_NAME_LOG]) + ".txt";
slog = new ofstream(path_log.c_str(), ios::out | ios::app);
if (!slog)
{
throw string("Cannot open " + path_log + ".");
}
#ifdef __GNUC__
string dummy = opt.param->get_data();
file::log_start(*slog, argc, argv, env, dummy, opt.print_to_screen);
#else
file::log_start(*slog, argc, argv, env, opt.param->get_data(), opt.print_to_screen);
#endif
if (COMPUTING_DEVICE_GPU == opt.comp_dev)
{
set_device(opt.id_dev, std::cout);
device_query(*slog, opt.id_dev, opt.print_to_screen);
}
pp_disk *ppd = opt.create_pp_disk();
// Temporary ugly solution to have the correct input for the colliding bodies.
#if 0
ppd->t *= constants::Gauss;
ppd->dt *= constants::Gauss;
tools::transform_velocity(ppd->n_bodies->get_n_total_active(), ppd->sim_data);
#endif
integrator *intgr = opt.create_integrator(ppd, ppd->dt);
// Number of seconds from previous runs
dt_CPU_offset = ppd->get_dt_CPU();
run_simulation(opt, ppd, intgr, dt_CPU_offset, slog);
//if (opt.benchmark)
//{
// run_benchmark(opt, ppd, intgr, *slog);
//}
//else
//{
// run_simulation(opt, ppd, intgr, dt_CPU_offset, slog);
//}
} /* try */
catch (const string& msg)
{
cerr << "Error: " << msg << endl;
if (0x0 != slog)
{
file::log_message(*slog, "Error: " + msg, false);
}
}
time_t total_time = dt_CPU_offset + time(NULL) - start;
cout << "Total time: " << total_time << " s" << endl;
if (0x0 != slog)
{
file::log_message(*slog, "Total time: " + tools::convert_time_t(total_time) + " s", false);
}
return (EXIT_SUCCESS);
}
|
e77f4320ff375b479aba835e7131b4974aeab882.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#pragma clang diagnostic push
#pragma ide diagnostic ignored "openmp-use-default-none"
//
// Created by Francisco Jos Gonzlez Garca.
// Copyright (c) 2020 Universidad de Granada. All rights reserved.
//
#include "utilidades.h"
#include "algorithm"
#include "kernels_hip.cuh"
#include <sys/time.h>
using namespace std;
double utilidades::reduce_max_OMP(const double *v, int n) {
double maximo = -1e36;
#pragma omp parallel for reduction (max : maximo)
for (int i = 0; i < n; i++) {
maximo = max(maximo, abs(v[i]));
}
return maximo;
}
double utilidades::reduce_max_CUDA(const double *d_vi, int n, const int BLOCK_SIZE) {
dim3 block(BLOCK_SIZE);
dim3 grid = (n / 2 + block.x) / block.x;
auto smemSize = block.x * sizeof(double);
double *d_vo, *h_vo = new double[grid.x];
hipMalloc(&d_vo, sizeof(double) * grid.x);
switch (BLOCK_SIZE) {
case 1024:
hipLaunchKernelGGL(( reduction_max<double, 1024>), dim3(grid), dim3(block), smemSize , 0, d_vi, d_vo, n);
break;
case 512:
hipLaunchKernelGGL(( reduction_max<double, 512>), dim3(grid), dim3(block), smemSize , 0, d_vi, d_vo, n);
break;
case 256:
hipLaunchKernelGGL(( reduction_max<double, 256>), dim3(grid), dim3(block), smemSize , 0, d_vi, d_vo, n);
break;
case 128:
hipLaunchKernelGGL(( reduction_max<double, 128>), dim3(grid), dim3(block), smemSize , 0, d_vi, d_vo, n);
break;
case 64:
hipLaunchKernelGGL(( reduction_max<double, 64>), dim3(grid), dim3(block), smemSize , 0, d_vi, d_vo, n);
break;
case 32:
hipLaunchKernelGGL(( reduction_max<double, 32>), dim3(grid), dim3(block), smemSize , 0, d_vi, d_vo, n);
break;
}
hipMemcpy(h_vo, d_vo, sizeof(double) * grid.x, hipMemcpyDeviceToHost);
double maximo = 0.0;
for (int i = 0; i < grid.x; i++) maximo = max(maximo, h_vo[i]);
hipFree(d_vo);
free(h_vo);
return maximo;
}
double utilidades::reduce_max_sec(const double *v, int n) {
double maximo = 0;
for (int line = 0; line < n; ++line) {
maximo = (fabs(v[line]) > maximo ? fabs(v[line]) : maximo);
}
return maximo;
}
double utilidades::cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
#pragma clang diagnostic pop | e77f4320ff375b479aba835e7131b4974aeab882.cu | #pragma clang diagnostic push
#pragma ide diagnostic ignored "openmp-use-default-none"
//
// Created by Francisco José González García.
// Copyright (c) 2020 Universidad de Granada. All rights reserved.
//
#include "utilidades.h"
#include "algorithm"
#include "kernels.cuh"
#include <sys/time.h>
using namespace std;
double utilidades::reduce_max_OMP(const double *v, int n) {
double maximo = -1e36;
#pragma omp parallel for reduction (max : maximo)
for (int i = 0; i < n; i++) {
maximo = max(maximo, abs(v[i]));
}
return maximo;
}
double utilidades::reduce_max_CUDA(const double *d_vi, int n, const int BLOCK_SIZE) {
dim3 block(BLOCK_SIZE);
dim3 grid = (n / 2 + block.x) / block.x;
auto smemSize = block.x * sizeof(double);
double *d_vo, *h_vo = new double[grid.x];
cudaMalloc(&d_vo, sizeof(double) * grid.x);
switch (BLOCK_SIZE) {
case 1024:
reduction_max<double, 1024><<< grid, block, smemSize >>>(d_vi, d_vo, n);
break;
case 512:
reduction_max<double, 512><<< grid, block, smemSize >>>(d_vi, d_vo, n);
break;
case 256:
reduction_max<double, 256><<< grid, block, smemSize >>>(d_vi, d_vo, n);
break;
case 128:
reduction_max<double, 128><<< grid, block, smemSize >>>(d_vi, d_vo, n);
break;
case 64:
reduction_max<double, 64><<< grid, block, smemSize >>>(d_vi, d_vo, n);
break;
case 32:
reduction_max<double, 32><<< grid, block, smemSize >>>(d_vi, d_vo, n);
break;
}
cudaMemcpy(h_vo, d_vo, sizeof(double) * grid.x, cudaMemcpyDeviceToHost);
double maximo = 0.0;
for (int i = 0; i < grid.x; i++) maximo = max(maximo, h_vo[i]);
cudaFree(d_vo);
free(h_vo);
return maximo;
}
double utilidades::reduce_max_sec(const double *v, int n) {
double maximo = 0;
for (int line = 0; line < n; ++line) {
maximo = (fabs(v[line]) > maximo ? fabs(v[line]) : maximo);
}
return maximo;
}
double utilidades::cpuSecond() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double) tp.tv_usec * 1e-6);
}
#pragma clang diagnostic pop |
a75d79070b2ab25588e4f7fc0ce2f6cd3bf39ae7.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
texture<unsigned char, 2, hipReadModeElementType> imgtex;
texture<int, 1, hipReadModeElementType> Ltex;
texture<int, 1, hipReadModeElementType> Rtex;
namespace naive_prop {
const int BLOCK_X = 16;
const int BLOCK_Y = 16;
__global__ void PROP_prescan(int* R, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
R[index] = index;
}
}
__global__ void PROP_scan(int* R, int w, int h, int* d_stop) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
unsigned char v = tex2D(imgtex, x, y);
int label = R[index];
int newlabel = w*h;
if (y>0 && tex2D(imgtex, x, y-1) == v) {
newlabel = min(newlabel, R[index-w]);
}
if (y<h-1 && tex2D(imgtex, x, y+1) == v) {
newlabel = min(newlabel, R[index+w]);
}
if (x>0 && tex2D(imgtex, x-1, y) == v) {
newlabel = min(newlabel, R[index-1]);
}
if (x<w-1 && tex2D(imgtex, x+1, y) == v) {
newlabel = min(newlabel, R[index+1]);
}
if (newlabel< label) {
R[index] = newlabel;
*d_stop = 0;
}
}
}
void CCL(unsigned char* img, int w, int h, int* label) {
hipError_t err;
hipArray* imgarray;
hipChannelFormatDesc uchardesc =
hipCreateChannelDesc<unsigned char>();
hipMallocArray(&imgarray, &uchardesc, w, h);
int* R;
hipMalloc((void**)&R, w*h*sizeof(int));
err = hipGetLastError();
if (err != hipSuccess) {
printf("startERROR: %s\n", hipGetErrorString(err));
return;
}
hipChannelFormatDesc intdesc =
hipCreateChannelDesc<int>();
hipBindTextureToArray(imgtex, imgarray, uchardesc);
hipBindTexture(NULL, Rtex, R, intdesc, w*h*sizeof(int));
int stop;
int* d_stop;
hipMalloc((void**)&d_stop, sizeof(int));
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((w+BLOCK_X-1)/BLOCK_X,
(h+BLOCK_Y-1)/BLOCK_Y);
hipMemcpyToArray(imgarray, 0, 0, img,
w*h*sizeof(unsigned char),
hipMemcpyHostToDevice);
err = hipGetLastError();
if (err != hipSuccess) {
printf("midERROR: %s\n", hipGetErrorString(err));
return;
}
hipLaunchKernelGGL(( PROP_prescan) , dim3(grid), dim3(block), 0, 0,
R, w, h);
stop = 0;
while (stop == 0) {
hipMemset(d_stop, 0xFF, sizeof(int));
hipLaunchKernelGGL(( PROP_scan) , dim3(grid), dim3(block), 0, 0,
R, w, h, d_stop);
hipMemcpy(&stop, d_stop, sizeof(int),
hipMemcpyDeviceToHost);
}
hipMemcpy(label, R, w*h*sizeof(int),
hipMemcpyDeviceToHost);
hipFree(d_stop);
hipFree(R);
hipFreeArray(imgarray);
err = hipGetLastError();
if (err != hipSuccess) {
printf("endERROR: %s\n", hipGetErrorString(err));
return;
}
}
} | a75d79070b2ab25588e4f7fc0ce2f6cd3bf39ae7.cu | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
texture<unsigned char, 2, cudaReadModeElementType> imgtex;
texture<int, 1, cudaReadModeElementType> Ltex;
texture<int, 1, cudaReadModeElementType> Rtex;
namespace naive_prop {
const int BLOCK_X = 16;
const int BLOCK_Y = 16;
__global__ void PROP_prescan(int* R, int w, int h) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
R[index] = index;
}
}
__global__ void PROP_scan(int* R, int w, int h, int* d_stop) {
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int index = x+y*w;
if (x < w && y < h) {
unsigned char v = tex2D(imgtex, x, y);
int label = R[index];
int newlabel = w*h;
if (y>0 && tex2D(imgtex, x, y-1) == v) {
newlabel = min(newlabel, R[index-w]);
}
if (y<h-1 && tex2D(imgtex, x, y+1) == v) {
newlabel = min(newlabel, R[index+w]);
}
if (x>0 && tex2D(imgtex, x-1, y) == v) {
newlabel = min(newlabel, R[index-1]);
}
if (x<w-1 && tex2D(imgtex, x+1, y) == v) {
newlabel = min(newlabel, R[index+1]);
}
if (newlabel< label) {
R[index] = newlabel;
*d_stop = 0;
}
}
}
void CCL(unsigned char* img, int w, int h, int* label) {
cudaError_t err;
cudaArray* imgarray;
cudaChannelFormatDesc uchardesc =
cudaCreateChannelDesc<unsigned char>();
cudaMallocArray(&imgarray, &uchardesc, w, h);
int* R;
cudaMalloc((void**)&R, w*h*sizeof(int));
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("startERROR: %s\n", cudaGetErrorString(err));
return;
}
cudaChannelFormatDesc intdesc =
cudaCreateChannelDesc<int>();
cudaBindTextureToArray(imgtex, imgarray, uchardesc);
cudaBindTexture(NULL, Rtex, R, intdesc, w*h*sizeof(int));
int stop;
int* d_stop;
cudaMalloc((void**)&d_stop, sizeof(int));
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((w+BLOCK_X-1)/BLOCK_X,
(h+BLOCK_Y-1)/BLOCK_Y);
cudaMemcpyToArray(imgarray, 0, 0, img,
w*h*sizeof(unsigned char),
cudaMemcpyHostToDevice);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("midERROR: %s\n", cudaGetErrorString(err));
return;
}
PROP_prescan <<<grid, block>>>
(R, w, h);
stop = 0;
while (stop == 0) {
cudaMemset(d_stop, 0xFF, sizeof(int));
PROP_scan <<<grid, block>>>
(R, w, h, d_stop);
cudaMemcpy(&stop, d_stop, sizeof(int),
cudaMemcpyDeviceToHost);
}
cudaMemcpy(label, R, w*h*sizeof(int),
cudaMemcpyDeviceToHost);
cudaFree(d_stop);
cudaFree(R);
cudaFreeArray(imgarray);
err = cudaGetLastError();
if (err != cudaSuccess) {
printf("endERROR: %s\n", cudaGetErrorString(err));
return;
}
}
} |
39c7d6db10251a9431667837d785c889d27c3c9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matrixMultGPU(int *a_ll,int *a_lr,int *a_ul, int *a_ur,int *b_ll,int *b_lr,int *b_ul, int *b_ur, int *c_ll,int *c_lr,int *c_ul, int *c_ur, int *t_ll,int *t_lr,int *t_ul, int *t_ur,int N){
int k, sum_cur = 0,sum_cul = 0,sum_cll = 0,sum_clr = 0,sum_tur = 0,sum_tul = 0,sum_tll = 0,sum_tlr = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int fil = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && fil < N)
{
for (k = 0; k < N; k++)
{
sum_cul += a_ul[fil * N + k] * b_ul[k * N + col];
sum_cur += a_ul[fil * N + k] * b_ur[k * N + col];
sum_cll += a_ll[fil * N + k] * b_ul[k * N + col];
sum_clr += a_ll[fil * N + k] * b_ur[k * N + col];
sum_tul += a_ur[fil * N + k] * b_ll[k * N + col];
sum_tur += a_ur[fil * N + k] * b_lr[k * N + col];
sum_tll += a_lr[fil * N + k] * b_ll[k * N + col];
sum_tlr += a_lr[fil * N + k] * b_lr[k * N + col];
}
c_ul[fil * N + col] = sum_cul;
c_ur[fil * N + col] = sum_cur;
c_ll[fil * N + col] = sum_cll;
c_lr[fil * N + col] = sum_clr;
t_ul[fil * N + col] = sum_tul;
t_ll[fil * N + col] = sum_tll;
t_lr[fil * N + col] = sum_tlr;
t_ur[fil * N + col] = sum_tur;
__syncthreads();
c_ul[fil * N + col]+=t_ul[fil * N + col];
c_ll[fil * N + col]+=t_ll[fil * N + col];
c_lr[fil * N + col]+=t_lr[fil * N + col];
c_ur[fil * N + col]+=t_ur[fil * N + col];
}
}
int main (void){
//Creacin de variables del sistema
int *a, *b, *c, N,NN;
int *a_ul,*a_ur,*a_ll,*a_lr,*b_ul,*b_ur,*b_ll,*b_lr,*c_ul,*c_ur,*c_ll,*c_lr;
int *da_ul,*da_ur,*da_ll,*da_lr,*db_ul,*db_ur,*db_ll,*db_lr,*dc_ul,*dc_ur,*dc_ll,*dc_lr,*dt_ul,*dt_ur,*dt_ll,*dt_lr;
int i,j;
int T,div=1, iteraciones=10,ind=0;
float elapsedTime;
printf("Ingrese el tamano deseado para las matrices:\n");
scanf("%d",&NN);
if(NN%2!=0 || NN<2)
{
printf("El tamao debe ser mayor a dos y par\n");
exit(1);
}
N=(int)NN/2;
//Creacin de variables de tiempo
hipEvent_t start,stop;
hipEventCreate(&start);
hipEventCreate(&stop);
printf("Creando espacio e inicializando matrices...\n");
//Asignacin e inicializacin de memoria
a=(int*)malloc(NN*NN*sizeof(int));
b=(int*)malloc(NN*NN*sizeof(int));
c=(int*)malloc(NN*NN*sizeof(int));
a_ll=(int*)malloc(N*N*sizeof(int));
a_lr=(int*)malloc(N*N*sizeof(int));
a_ul=(int*)malloc(N*N*sizeof(int));
a_ur=(int*)malloc(N*N*sizeof(int));
b_ll=(int*)malloc(N*N*sizeof(int));
b_lr=(int*)malloc(N*N*sizeof(int));
b_ul=(int*)malloc(N*N*sizeof(int));
b_ur=(int*)malloc(N*N*sizeof(int));
c_ll=(int*)malloc(N*N*sizeof(int));
c_lr=(int*)malloc(N*N*sizeof(int));
c_ul=(int*)malloc(N*N*sizeof(int));
c_ur=(int*)malloc(N*N*sizeof(int));
//Inicializacin de Matrices
for(i=0;i<NN;i++)
{
for(j=0;j<NN;j++)
{
a[i*NN+j]=i*j;
b[i*NN+j]=i*j;
}
}
//Creacin de submatrices
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
a_ul[i*N+j]=a[i*NN+j];
a_ur[i*N+j]=a[i*NN+j+N];
a_ll[i*N+j]=a[(i+N)*NN+j];
a_lr[i*N+j]=a[(i+N)*NN+j+N];
b_ul[i*N+j]=b[i*NN+j];
b_ur[i*N+j]=b[i*NN+j+N];
b_ll[i*N+j]=b[(i+N)*NN+j];
b_lr[i*N+j]=b[(i+N)*NN+j+N];
}
}
{
if(hipMalloc(&da_ll,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&da_ul,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&da_ur,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&da_lr,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&db_ll,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&db_lr,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&db_ul,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&db_ur,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dc_ur,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dc_ul,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dc_ll,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dc_lr,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dt_ur,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dt_ul,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dt_ll,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(hipMalloc(&dt_lr,N*N*sizeof(int))!=hipSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
}
printf("Asignacion de memoria correcta\n");
{
//Copia de memoria a GPU
if(hipMemcpy(da_ll,a_ll,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(da_lr,a_lr,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(da_ul,a_ul,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(da_ur,a_ur,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(db_ll,b_ll,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(db_lr,b_lr,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(db_ul,b_ul,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(hipMemcpy(db_ur,b_ur,N*N*sizeof(int),hipMemcpyHostToDevice)!=hipSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
}
//Clculo de bloques e hilos
while((float)N/(float)div>32)
{
div++;
}
float f_N=(float)N,f_div=(float)div;
T=(int)ceil(f_N/f_div);
dim3 ThreadsBloque(T,T);
dim3 Bloques(div, div);
printf("Se va a realizar la suma con %d bloques y %d hilos\n",div,T);
printf("Se va a realizar %d iteraciones de matrices %dx%d\n",iteraciones,NN,NN);
//Ejecucin de kernel
hipEventRecord(start,0);
while(ind<iteraciones)
{
hipLaunchKernelGGL(( matrixMultGPU), dim3(Bloques), dim3(ThreadsBloque), 0, 0, da_ll,da_lr,da_ul,da_ur,db_ll,db_lr,db_ul,db_ur,dc_ll,dc_lr,dc_ul,dc_ur,dt_ll,dt_lr,dt_ul,dt_ur,N);
ind++;
}
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado para %d iteraciones fue de %3.5f ms\n",iteraciones,elapsedTime);
hipMemcpy(c_ll,dc_ll,N*N*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(c_lr,dc_lr,N*N*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(c_ur,dc_ur,N*N*sizeof(int),hipMemcpyDeviceToHost);
hipMemcpy(c_ul,dc_ul,N*N*sizeof(int),hipMemcpyDeviceToHost);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
c[i*NN+j]=c_ul[i*N+j];
c[i*NN+j+N]=c_ur[i*N+j];
c[(i+N)*NN+j]=c_ll[i*N+j];
c[(i+N)*NN+j+N]=c_lr[i*N+j];
}
}
printf("Por ejemplo %d deberia ser 0\n",c[3*NN]);
printf("Por ejemplo %d deberia ser 0\n",c[(int)NN/2]);
printf("Por ejemplo %d deberia ser %d\n",c[NN+1],(int)((2*pow(NN-1,3)+3*pow(NN-1,2)+NN-1)/6));
/*
for(i=0;i<NN;i++)
{
printf("\n");
for(j=0;j<NN;j++)
{
printf("\t%d",a[i*NN+j]);
}
//printf("\t");
for(j=0;j<NN;j++)
{
printf("\t%d",b[i*NN+j]);
}
//printf("\t");
for(j=0;j<NN;j++)
{
printf("\t%d",c[i*NN+j]);
}
}
*/
free(a);
free(a_ll);
free(a_lr);
free(a_ul);
free(a_ur);
free(b_ur);
free(b_ll);
free(b_lr);
free(b_ul);
free(c_ll);
free(c_lr);
free(c_ul);
free(c_ur);
free(b);
free(c);
hipFree(da_ll);
hipFree(da_lr);
hipFree(da_ul);
hipFree(da_ur);
hipFree(db_ll);
hipFree(db_lr);
hipFree(db_ul);
hipFree(db_ur);
hipFree(dc_ll);
hipFree(dc_lr);
hipFree(dc_ul);
hipFree(dc_ur);
hipFree(dt_ll);
hipFree(dt_lr);
hipFree(dt_ul);
hipFree(dt_ur);
return 0;
} | 39c7d6db10251a9431667837d785c889d27c3c9a.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matrixMultGPU(int *a_ll,int *a_lr,int *a_ul, int *a_ur,int *b_ll,int *b_lr,int *b_ul, int *b_ur, int *c_ll,int *c_lr,int *c_ul, int *c_ur, int *t_ll,int *t_lr,int *t_ul, int *t_ur,int N){
int k, sum_cur = 0,sum_cul = 0,sum_cll = 0,sum_clr = 0,sum_tur = 0,sum_tul = 0,sum_tll = 0,sum_tlr = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int fil = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && fil < N)
{
for (k = 0; k < N; k++)
{
sum_cul += a_ul[fil * N + k] * b_ul[k * N + col];
sum_cur += a_ul[fil * N + k] * b_ur[k * N + col];
sum_cll += a_ll[fil * N + k] * b_ul[k * N + col];
sum_clr += a_ll[fil * N + k] * b_ur[k * N + col];
sum_tul += a_ur[fil * N + k] * b_ll[k * N + col];
sum_tur += a_ur[fil * N + k] * b_lr[k * N + col];
sum_tll += a_lr[fil * N + k] * b_ll[k * N + col];
sum_tlr += a_lr[fil * N + k] * b_lr[k * N + col];
}
c_ul[fil * N + col] = sum_cul;
c_ur[fil * N + col] = sum_cur;
c_ll[fil * N + col] = sum_cll;
c_lr[fil * N + col] = sum_clr;
t_ul[fil * N + col] = sum_tul;
t_ll[fil * N + col] = sum_tll;
t_lr[fil * N + col] = sum_tlr;
t_ur[fil * N + col] = sum_tur;
__syncthreads();
c_ul[fil * N + col]+=t_ul[fil * N + col];
c_ll[fil * N + col]+=t_ll[fil * N + col];
c_lr[fil * N + col]+=t_lr[fil * N + col];
c_ur[fil * N + col]+=t_ur[fil * N + col];
}
}
int main (void){
//Creación de variables del sistema
int *a, *b, *c, N,NN;
int *a_ul,*a_ur,*a_ll,*a_lr,*b_ul,*b_ur,*b_ll,*b_lr,*c_ul,*c_ur,*c_ll,*c_lr;
int *da_ul,*da_ur,*da_ll,*da_lr,*db_ul,*db_ur,*db_ll,*db_lr,*dc_ul,*dc_ur,*dc_ll,*dc_lr,*dt_ul,*dt_ur,*dt_ll,*dt_lr;
int i,j;
int T,div=1, iteraciones=10,ind=0;
float elapsedTime;
printf("Ingrese el tamano deseado para las matrices:\n");
scanf("%d",&NN);
if(NN%2!=0 || NN<2)
{
printf("El tamaño debe ser mayor a dos y par\n");
exit(1);
}
N=(int)NN/2;
//Creación de variables de tiempo
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Creando espacio e inicializando matrices...\n");
//Asignación e inicialización de memoria
a=(int*)malloc(NN*NN*sizeof(int));
b=(int*)malloc(NN*NN*sizeof(int));
c=(int*)malloc(NN*NN*sizeof(int));
a_ll=(int*)malloc(N*N*sizeof(int));
a_lr=(int*)malloc(N*N*sizeof(int));
a_ul=(int*)malloc(N*N*sizeof(int));
a_ur=(int*)malloc(N*N*sizeof(int));
b_ll=(int*)malloc(N*N*sizeof(int));
b_lr=(int*)malloc(N*N*sizeof(int));
b_ul=(int*)malloc(N*N*sizeof(int));
b_ur=(int*)malloc(N*N*sizeof(int));
c_ll=(int*)malloc(N*N*sizeof(int));
c_lr=(int*)malloc(N*N*sizeof(int));
c_ul=(int*)malloc(N*N*sizeof(int));
c_ur=(int*)malloc(N*N*sizeof(int));
//Inicialización de Matrices
for(i=0;i<NN;i++)
{
for(j=0;j<NN;j++)
{
a[i*NN+j]=i*j;
b[i*NN+j]=i*j;
}
}
//Creación de submatrices
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
a_ul[i*N+j]=a[i*NN+j];
a_ur[i*N+j]=a[i*NN+j+N];
a_ll[i*N+j]=a[(i+N)*NN+j];
a_lr[i*N+j]=a[(i+N)*NN+j+N];
b_ul[i*N+j]=b[i*NN+j];
b_ur[i*N+j]=b[i*NN+j+N];
b_ll[i*N+j]=b[(i+N)*NN+j];
b_lr[i*N+j]=b[(i+N)*NN+j+N];
}
}
{
if(cudaMalloc(&da_ll,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&da_ul,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&da_ur,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&da_lr,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&db_ll,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&db_lr,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&db_ul,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&db_ur,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dc_ur,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dc_ul,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dc_ll,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dc_lr,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dt_ur,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dt_ul,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dt_ll,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
if(cudaMalloc(&dt_lr,N*N*sizeof(int))!=cudaSuccess)
{
printf("########\nHubo un problema en la asignacion de memoria en la GPU\n########\n");
exit(1);
}
}
printf("Asignacion de memoria correcta\n");
{
//Copia de memoria a GPU
if(cudaMemcpy(da_ll,a_ll,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(da_lr,a_lr,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(da_ul,a_ul,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(da_ur,a_ur,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(db_ll,b_ll,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(db_lr,b_lr,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(db_ul,b_ul,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
if(cudaMemcpy(db_ur,b_ur,N*N*sizeof(int),cudaMemcpyHostToDevice)!=cudaSuccess)
{
printf("#########\nHubo un problema en la copia de memoria a la GPU\n#########\n");
exit(1);
}
}
//Cálculo de bloques e hilos
while((float)N/(float)div>32)
{
div++;
}
float f_N=(float)N,f_div=(float)div;
T=(int)ceil(f_N/f_div);
dim3 ThreadsBloque(T,T);
dim3 Bloques(div, div);
printf("Se va a realizar la suma con %d bloques y %d hilos\n",div,T);
printf("Se va a realizar %d iteraciones de matrices %dx%d\n",iteraciones,NN,NN);
//Ejecución de kernel
cudaEventRecord(start,0);
while(ind<iteraciones)
{
matrixMultGPU<<<Bloques, ThreadsBloque>>>(da_ll,da_lr,da_ul,da_ur,db_ll,db_lr,db_ul,db_ur,dc_ll,dc_lr,dc_ul,dc_ur,dt_ll,dt_lr,dt_ul,dt_ur,N);
ind++;
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("El tiempo tomado para %d iteraciones fue de %3.5f ms\n",iteraciones,elapsedTime);
cudaMemcpy(c_ll,dc_ll,N*N*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(c_lr,dc_lr,N*N*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(c_ur,dc_ur,N*N*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(c_ul,dc_ul,N*N*sizeof(int),cudaMemcpyDeviceToHost);
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
c[i*NN+j]=c_ul[i*N+j];
c[i*NN+j+N]=c_ur[i*N+j];
c[(i+N)*NN+j]=c_ll[i*N+j];
c[(i+N)*NN+j+N]=c_lr[i*N+j];
}
}
printf("Por ejemplo %d deberia ser 0\n",c[3*NN]);
printf("Por ejemplo %d deberia ser 0\n",c[(int)NN/2]);
printf("Por ejemplo %d deberia ser %d\n",c[NN+1],(int)((2*pow(NN-1,3)+3*pow(NN-1,2)+NN-1)/6));
/*
for(i=0;i<NN;i++)
{
printf("\n");
for(j=0;j<NN;j++)
{
printf("\t%d",a[i*NN+j]);
}
//printf("\t");
for(j=0;j<NN;j++)
{
printf("\t%d",b[i*NN+j]);
}
//printf("\t");
for(j=0;j<NN;j++)
{
printf("\t%d",c[i*NN+j]);
}
}
*/
free(a);
free(a_ll);
free(a_lr);
free(a_ul);
free(a_ur);
free(b_ur);
free(b_ll);
free(b_lr);
free(b_ul);
free(c_ll);
free(c_lr);
free(c_ul);
free(c_ur);
free(b);
free(c);
cudaFree(da_ll);
cudaFree(da_lr);
cudaFree(da_ul);
cudaFree(da_ur);
cudaFree(db_ll);
cudaFree(db_lr);
cudaFree(db_ul);
cudaFree(db_ur);
cudaFree(dc_ll);
cudaFree(dc_lr);
cudaFree(dc_ul);
cudaFree(dc_ur);
cudaFree(dt_ll);
cudaFree(dt_lr);
cudaFree(dt_ul);
cudaFree(dt_ur);
return 0;
} |
labwork.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <include/labwork.h>
#include <hip/hip_runtime_api.h>
#include <omp.h>
#include <math.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2017, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
hipMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
printf("labwork 1 OpenMP ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
labwork.saveOutputImage("labwork5-cpu-out.jpg");
labwork.labwork5_GPU();
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
} }
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
omp_set_num_threads(12); // blocs size
#pragma omp parallel for schedule(dynamic, 3)
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
int getSPcores(hipDeviceProp_t devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int devCount;
hipGetDeviceCount(&devCount);
printf("Device number : %d\n",devCount);
for(int i = 0; i < devCount; ++i)
{
hipDeviceProp_t props;
hipGetDeviceProperties(&props, i);
printf("Major : %d\n",props.major);
printf("Total global memory : %zu\n",props.totalGlobalMem);
printf("Shared memory peu block : %zu\n",props.sharedMemPerBlock);
//printf("%s\n",props.totalConstMem);
printf("Registers per block : %d\n",props.regsPerBlock);
printf("Clock rate : %d\n",props.clockRate);
printf("Multiprocessor count : %d\n",props.multiProcessorCount);
printf("Memory Bus Width : %d\n",props.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*props.memoryClockRate*(props.memoryBusWidth/8)/1.0e6);// source :devblogs.nvidia
printf("Warp size : %d\n",props.warpSize);
printf("Max threads per blocks : %d\n",props.maxThreadsPerBlock);
printf("Max threads dimension :\n 1 : %7d\n2 : %7d\n3 : %7d\n",props.maxThreadsDim[0],props.maxThreadsDim[1],props.maxThreadsDim[2]);
printf("Max grid size :\n 1 : %7d\n2 : %7d\n3 : %7d\n",props.maxGridSize[0],props.maxGridSize[1],props.maxGridSize[2]);
printf("\n\n\n");
}
}
__global__ void imageComputeLab3(uchar3 *devImage, uchar3 *devOutputImage){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
devOutputImage[tid].x = (char) (((int) devImage[tid].x + (int) devImage[tid].y +
(int) devImage[tid].z) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork3_GPU() {
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImage;
int pixelCount =inputImage->width *inputImage->height;
int blockSize = 1024;
int numBlock = pixelCount / blockSize;
hipMalloc(&devImage, pixelCount * 3);
hipMalloc(&devOutputImage, pixelCount * 3);
hostOutputImage = (uchar3 *) malloc(pixelCount * 3);
hipMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice); // Memory transfert
hipLaunchKernelGGL(( imageComputeLab3), dim3(numBlock), dim3(blockSize), 0, 0, devImage,devOutputImage); // Kernel
hipMemcpy(hostOutputImage, devOutputImage,pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
outputImage = (char *)hostOutputImage;
hipFree(devImage);
hipFree(devOutputImage);
}
__global__ void imageComputeLab4(uchar3 *devImage, uchar3 *devOutputImage,int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width +x;
devOutputImage[tid].x = (char) ((int) (ceil((float) devImage[tid].x) + (int) ceil((float) devImage[tid].y) +
(int) ceil((float) devImage[tid].z)) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork4_GPU() {
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImage;
dim3 blockSize = dim3(32,32);
int pixelCount =inputImage->width *inputImage->height;
int width = inputImage->width;
dim3 gridSize = dim3(inputImage->width/blockSize.x,inputImage->height/blockSize.y);
hipMalloc(&devImage, pixelCount * 3);
hipMalloc(&devOutputImage, pixelCount * 3);
hostOutputImage = (uchar3 *) malloc(pixelCount * 3);
hipMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice); // Memory transfert
hipLaunchKernelGGL(( imageComputeLab4), dim3(gridSize), dim3(blockSize), 0, 0, devImage,devOutputImage,width); // Kernel
hipMemcpy(hostOutputImage, devOutputImage,pixelCount * sizeof(uchar3),hipMemcpyDeviceToHost);
outputImage = (char *)hostOutputImage;
hipFree(devImage);
hipFree(devOutputImage);
}
// CPU implementation of Gaussian Blur
void Labwork::labwork5_CPU() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0 };
int pixelCount = inputImage->width * inputImage->height;
outputImage = (char*) malloc(pixelCount * sizeof(char) * 3);
for (int row = 0; row < inputImage->height; row++) {
for (int col = 0; col < inputImage->width; col++) {
int sum = 0;
int c = 0;
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = col + x;
int j = row + y;
if (i < 0) continue;
if (i >= inputImage->width) continue;
if (j < 0) continue;
if (j >= inputImage->height) continue;
int tid = j * inputImage->width + i;
unsigned char gray = (inputImage->buffer[tid * 3] + inputImage->buffer[tid * 3 + 1] + inputImage->buffer[tid * 3 + 2])/3;
int coefficient = kernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = row * inputImage->width + col;
outputImage[posOut * 3] = outputImage[posOut * 3 + 1] = outputImage[posOut * 3 + 2] = sum;
}
}
}
__global__ void filterLab5(uchar3 *filterDevOutputImage, uchar3 *devOutputImage,int width, int height){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width +x;
int filter[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0 };
float outputPixel = 0;
for (int i = -3; i <= 3; i++)
{
for (int j = -3; j <= 3; j++)
{
if ( (x + i) < 0) // left side
continue;
if ( (x + i) >= width ) // right side
continue;
if ((y + j) < 0) // top side
continue;
if ((y + j) >= height ) // bottom side
continue;
int localtid = (x+i)+ (y+j)*width;
unsigned char grey = (devOutputImage[localtid].x + devOutputImage[localtid].y + devOutputImage[localtid].z)/3;
int coefficient = filter[(j+3) * 7 + i + 3];
outputPixel += coefficient * grey;
}
}
filterDevOutputImage[tid].x = outputPixel/1003;
filterDevOutputImage[tid].y = filterDevOutputImage[tid].z = filterDevOutputImage[tid].x;
}
__global__ void greyScalingLab5(uchar3 *devImage, uchar3 *devOutputImage, int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
devOutputImage[tid].x = (char) ((int) (ceil((float) devImage[tid].x) + (int) ceil((float) devImage[tid].y) +
(int) ceil((float) devImage[tid].z)) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork5_GPU() {
/*
float GaussianFilter[7][7] ={
{1,4,7,10,7,4,1},
{4,12,26,33,26,12,4},
{7,26,55,71,55,26,7},
{10,33,71,91,71,33,10},
{7,26,55,71,55,26,7},
{4,12,26,33,26,12,4},
{1,4,7,10,7,4,1},
}; // Sum equal to 1115
*/
/* int filter[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0 };
*/
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImageFilter;
uchar3 *filterDevOutputImage;
dim3 blockSize = dim3(256,256);
int pixelCount =inputImage->width *inputImage->height;
int width = inputImage->width;
int height = inputImage->height;
dim3 gridSize = dim3(inputImage->width/blockSize.x,inputImage->height/blockSize.y);
hostOutputImageFilter = (uchar3 *) malloc(pixelCount*3);
hipMalloc(&devImage, pixelCount * 3);
hipMalloc(&devOutputImage, pixelCount*3);
hipMalloc(&filterDevOutputImage, pixelCount*3);
hipMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice); // Memory transfert
hipLaunchKernelGGL(( greyScalingLab5), dim3(gridSize), dim3(blockSize), 0, 0, devImage,devOutputImage,width); // Kernel greyscaling
hipLaunchKernelGGL(( filterLab5), dim3(gridSize), dim3(blockSize), 0, 0, filterDevOutputImage,devOutputImage,width, height); // Kernel
hipMemcpy(hostOutputImageFilter, filterDevOutputImage, pixelCount*3, hipMemcpyDeviceToHost);
outputImage = (char *)hostOutputImageFilter;
hipFree(devImage);
hipFree(devOutputImage);
hipFree(filterDevOutputImage);
}
__global__ void binarisationLab6(uchar3 *binarisationDevOutputImage, uchar3 *devOutputImage,int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
int therehold = 256/2;
unsigned char grey = devOutputImage[tid].x;
if(grey<therehold){
binarisationDevOutputImage[tid].x = 0;
}
else{
binarisationDevOutputImage[tid].x = 255;
}
binarisationDevOutputImage[tid].y = binarisationDevOutputImage[tid].z = binarisationDevOutputImage[tid].x;
}
__global__ void brightnessLab6(uchar3 *binarisationDevOutputImage, uchar3 *devOutputImage,int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
unsigned char grey = devOutputImage[tid].x;
binarisationDevOutputImage[tid].x = grey + (int) 0.2*grey;
binarisationDevOutputImage[tid].y = binarisationDevOutputImage[tid].z = binarisationDevOutputImage[tid].x;
}
__global__ void greyScalingLab6(uchar3 *devImage, uchar3 *devOutputImage, int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
devOutputImage[tid].x = (char) ((int) (ceil((float) devImage[tid].x) + (int) ceil((float) devImage[tid].y) +
(int) ceil((float) devImage[tid].z)) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork6_GPU() {
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImageFilter;
uchar3 *binarisationDevOutputImage;
dim3 blockSize = dim3(32,32);
int pixelCount =inputImage->width *inputImage->height;
int width = inputImage->width;
dim3 gridSize = dim3(inputImage->width/blockSize.x,inputImage->height/blockSize.y);
hostOutputImageFilter = (uchar3 *) malloc(pixelCount*3);
hipMalloc(&devImage, pixelCount * 3);
hipMalloc(&devOutputImage, pixelCount*3);
hipMalloc(&binarisationDevOutputImage, pixelCount*3);
hipMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),hipMemcpyHostToDevice); // Memory transfert
hipLaunchKernelGGL(( greyScalingLab6), dim3(gridSize), dim3(blockSize), 0, 0, devImage,devOutputImage,width); // Kernel greyscaling
hipLaunchKernelGGL(( binarisationLab6), dim3(gridSize), dim3(blockSize), 0, 0, binarisationDevOutputImage,devOutputImage,width); // Kernel
//brightnessLab6<<<gridSize, blockSize>>>(binarisationDevOutputImage,devOutputImage,width); // Kernel
hipMemcpy(hostOutputImageFilter, binarisationDevOutputImage, pixelCount*3, hipMemcpyDeviceToHost);
outputImage = (char *)hostOutputImageFilter;
hipFree(devImage);
hipFree(devOutputImage);
hipFree(binarisationDevOutputImage);
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU() {
}
| labwork.cu |
#include <stdio.h>
#include <include/labwork.h>
#include <cuda_runtime_api.h>
#include <omp.h>
#include <math.h>
#define ACTIVE_THREADS 4
int main(int argc, char **argv) {
printf("USTH ICT Master 2017, Advanced Programming for HPC.\n");
if (argc < 2) {
printf("Usage: labwork <lwNum> <inputImage>\n");
printf(" lwNum labwork number\n");
printf(" inputImage the input file name, in JPEG format\n");
return 0;
}
int lwNum = atoi(argv[1]);
std::string inputFilename;
// pre-initialize CUDA to avoid incorrect profiling
printf("Warming up...\n");
char *temp;
cudaMalloc(&temp, 1024);
Labwork labwork;
if (lwNum != 2 ) {
inputFilename = std::string(argv[2]);
labwork.loadInputImage(inputFilename);
}
printf("Starting labwork %d\n", lwNum);
Timer timer;
timer.start();
switch (lwNum) {
case 1:
labwork.labwork1_CPU();
labwork.saveOutputImage("labwork2-cpu-out.jpg");
printf("labwork 1 CPU ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
timer.start();
labwork.labwork1_OpenMP();
labwork.saveOutputImage("labwork2-openmp-out.jpg");
printf("labwork 1 OpenMP ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
break;
case 2:
labwork.labwork2_GPU();
break;
case 3:
labwork.labwork3_GPU();
labwork.saveOutputImage("labwork3-gpu-out.jpg");
break;
case 4:
labwork.labwork4_GPU();
labwork.saveOutputImage("labwork4-gpu-out.jpg");
break;
case 5:
labwork.labwork5_CPU();
labwork.saveOutputImage("labwork5-cpu-out.jpg");
labwork.labwork5_GPU();
labwork.saveOutputImage("labwork5-gpu-out.jpg");
break;
case 6:
labwork.labwork6_GPU();
labwork.saveOutputImage("labwork6-gpu-out.jpg");
break;
case 7:
labwork.labwork7_GPU();
labwork.saveOutputImage("labwork7-gpu-out.jpg");
break;
case 8:
labwork.labwork8_GPU();
labwork.saveOutputImage("labwork8-gpu-out.jpg");
break;
case 9:
labwork.labwork9_GPU();
labwork.saveOutputImage("labwork9-gpu-out.jpg");
break;
case 10:
labwork.labwork10_GPU();
labwork.saveOutputImage("labwork10-gpu-out.jpg");
break;
}
printf("labwork %d ellapsed %.1fms\n", lwNum, timer.getElapsedTimeInMilliSec());
}
void Labwork::loadInputImage(std::string inputFileName) {
inputImage = jpegLoader.load(inputFileName);
}
void Labwork::saveOutputImage(std::string outputFileName) {
jpegLoader.save(outputFileName, outputImage, inputImage->width, inputImage->height, 90);
}
void Labwork::labwork1_CPU() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
} }
}
void Labwork::labwork1_OpenMP() {
int pixelCount = inputImage->width * inputImage->height;
outputImage = static_cast<char *>(malloc(pixelCount * 3));
omp_set_num_threads(12); // blocs size
#pragma omp parallel for schedule(dynamic, 3)
for (int j = 0; j < 100; j++) { // let's do it 100 times, otherwise it's too fast!
for (int i = 0; i < pixelCount; i++) {
outputImage[i * 3] = (char) (((int) inputImage->buffer[i * 3] + (int) inputImage->buffer[i * 3 + 1] +
(int) inputImage->buffer[i * 3 + 2]) / 3);
outputImage[i * 3 + 1] = outputImage[i * 3];
outputImage[i * 3 + 2] = outputImage[i * 3];
}
}
}
int getSPcores(cudaDeviceProp devProp) {
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major) {
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
void Labwork::labwork2_GPU() {
int devCount;
cudaGetDeviceCount(&devCount);
printf("Device number : %d\n",devCount);
for(int i = 0; i < devCount; ++i)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
printf("Major : %d\n",props.major);
printf("Total global memory : %zu\n",props.totalGlobalMem);
printf("Shared memory peu block : %zu\n",props.sharedMemPerBlock);
//printf("%s\n",props.totalConstMem);
printf("Registers per block : %d\n",props.regsPerBlock);
printf("Clock rate : %d\n",props.clockRate);
printf("Multiprocessor count : %d\n",props.multiProcessorCount);
printf("Memory Bus Width : %d\n",props.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*props.memoryClockRate*(props.memoryBusWidth/8)/1.0e6);// source :devblogs.nvidia
printf("Warp size : %d\n",props.warpSize);
printf("Max threads per blocks : %d\n",props.maxThreadsPerBlock);
printf("Max threads dimension :\n 1 : %7d\n2 : %7d\n3 : %7d\n",props.maxThreadsDim[0],props.maxThreadsDim[1],props.maxThreadsDim[2]);
printf("Max grid size :\n 1 : %7d\n2 : %7d\n3 : %7d\n",props.maxGridSize[0],props.maxGridSize[1],props.maxGridSize[2]);
printf("\n\n\n");
}
}
__global__ void imageComputeLab3(uchar3 *devImage, uchar3 *devOutputImage){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
devOutputImage[tid].x = (char) (((int) devImage[tid].x + (int) devImage[tid].y +
(int) devImage[tid].z) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork3_GPU() {
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImage;
int pixelCount =inputImage->width *inputImage->height;
int blockSize = 1024;
int numBlock = pixelCount / blockSize;
cudaMalloc(&devImage, pixelCount * 3);
cudaMalloc(&devOutputImage, pixelCount * 3);
hostOutputImage = (uchar3 *) malloc(pixelCount * 3);
cudaMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice); // Memory transfert
imageComputeLab3<<<numBlock, blockSize>>>(devImage,devOutputImage); // Kernel
cudaMemcpy(hostOutputImage, devOutputImage,pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
outputImage = (char *)hostOutputImage;
cudaFree(devImage);
cudaFree(devOutputImage);
}
__global__ void imageComputeLab4(uchar3 *devImage, uchar3 *devOutputImage,int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width +x;
devOutputImage[tid].x = (char) ((int) (ceil((float) devImage[tid].x) + (int) ceil((float) devImage[tid].y) +
(int) ceil((float) devImage[tid].z)) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork4_GPU() {
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImage;
dim3 blockSize = dim3(32,32);
int pixelCount =inputImage->width *inputImage->height;
int width = inputImage->width;
dim3 gridSize = dim3(inputImage->width/blockSize.x,inputImage->height/blockSize.y);
cudaMalloc(&devImage, pixelCount * 3);
cudaMalloc(&devOutputImage, pixelCount * 3);
hostOutputImage = (uchar3 *) malloc(pixelCount * 3);
cudaMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice); // Memory transfert
imageComputeLab4<<<gridSize, blockSize>>>(devImage,devOutputImage,width); // Kernel
cudaMemcpy(hostOutputImage, devOutputImage,pixelCount * sizeof(uchar3),cudaMemcpyDeviceToHost);
outputImage = (char *)hostOutputImage;
cudaFree(devImage);
cudaFree(devOutputImage);
}
// CPU implementation of Gaussian Blur
void Labwork::labwork5_CPU() {
int kernel[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0 };
int pixelCount = inputImage->width * inputImage->height;
outputImage = (char*) malloc(pixelCount * sizeof(char) * 3);
for (int row = 0; row < inputImage->height; row++) {
for (int col = 0; col < inputImage->width; col++) {
int sum = 0;
int c = 0;
for (int y = -3; y <= 3; y++) {
for (int x = -3; x <= 3; x++) {
int i = col + x;
int j = row + y;
if (i < 0) continue;
if (i >= inputImage->width) continue;
if (j < 0) continue;
if (j >= inputImage->height) continue;
int tid = j * inputImage->width + i;
unsigned char gray = (inputImage->buffer[tid * 3] + inputImage->buffer[tid * 3 + 1] + inputImage->buffer[tid * 3 + 2])/3;
int coefficient = kernel[(y+3) * 7 + x + 3];
sum = sum + gray * coefficient;
c += coefficient;
}
}
sum /= c;
int posOut = row * inputImage->width + col;
outputImage[posOut * 3] = outputImage[posOut * 3 + 1] = outputImage[posOut * 3 + 2] = sum;
}
}
}
__global__ void filterLab5(uchar3 *filterDevOutputImage, uchar3 *devOutputImage,int width, int height){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width +x;
int filter[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0 };
float outputPixel = 0;
for (int i = -3; i <= 3; i++)
{
for (int j = -3; j <= 3; j++)
{
if ( (x + i) < 0) // left side
continue;
if ( (x + i) >= width ) // right side
continue;
if ((y + j) < 0) // top side
continue;
if ((y + j) >= height ) // bottom side
continue;
int localtid = (x+i)+ (y+j)*width;
unsigned char grey = (devOutputImage[localtid].x + devOutputImage[localtid].y + devOutputImage[localtid].z)/3;
int coefficient = filter[(j+3) * 7 + i + 3];
outputPixel += coefficient * grey;
}
}
filterDevOutputImage[tid].x = outputPixel/1003;
filterDevOutputImage[tid].y = filterDevOutputImage[tid].z = filterDevOutputImage[tid].x;
}
__global__ void greyScalingLab5(uchar3 *devImage, uchar3 *devOutputImage, int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
devOutputImage[tid].x = (char) ((int) (ceil((float) devImage[tid].x) + (int) ceil((float) devImage[tid].y) +
(int) ceil((float) devImage[tid].z)) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork5_GPU() {
/*
float GaussianFilter[7][7] ={
{1,4,7,10,7,4,1},
{4,12,26,33,26,12,4},
{7,26,55,71,55,26,7},
{10,33,71,91,71,33,10},
{7,26,55,71,55,26,7},
{4,12,26,33,26,12,4},
{1,4,7,10,7,4,1},
}; // Sum equal to 1115
*/
/* int filter[] = { 0, 0, 1, 2, 1, 0, 0,
0, 3, 13, 22, 13, 3, 0,
1, 13, 59, 97, 59, 13, 1,
2, 22, 97, 159, 97, 22, 2,
1, 13, 59, 97, 59, 13, 1,
0, 3, 13, 22, 13, 3, 0,
0, 0, 1, 2, 1, 0, 0 };
*/
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImageFilter;
uchar3 *filterDevOutputImage;
dim3 blockSize = dim3(256,256);
int pixelCount =inputImage->width *inputImage->height;
int width = inputImage->width;
int height = inputImage->height;
dim3 gridSize = dim3(inputImage->width/blockSize.x,inputImage->height/blockSize.y);
hostOutputImageFilter = (uchar3 *) malloc(pixelCount*3);
cudaMalloc(&devImage, pixelCount * 3);
cudaMalloc(&devOutputImage, pixelCount*3);
cudaMalloc(&filterDevOutputImage, pixelCount*3);
cudaMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice); // Memory transfert
greyScalingLab5<<<gridSize, blockSize>>>(devImage,devOutputImage,width); // Kernel greyscaling
filterLab5<<<gridSize, blockSize>>>(filterDevOutputImage,devOutputImage,width, height); // Kernel
cudaMemcpy(hostOutputImageFilter, filterDevOutputImage, pixelCount*3, cudaMemcpyDeviceToHost);
outputImage = (char *)hostOutputImageFilter;
cudaFree(devImage);
cudaFree(devOutputImage);
cudaFree(filterDevOutputImage);
}
__global__ void binarisationLab6(uchar3 *binarisationDevOutputImage, uchar3 *devOutputImage,int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
int therehold = 256/2;
unsigned char grey = devOutputImage[tid].x;
if(grey<therehold){
binarisationDevOutputImage[tid].x = 0;
}
else{
binarisationDevOutputImage[tid].x = 255;
}
binarisationDevOutputImage[tid].y = binarisationDevOutputImage[tid].z = binarisationDevOutputImage[tid].x;
}
__global__ void brightnessLab6(uchar3 *binarisationDevOutputImage, uchar3 *devOutputImage,int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
unsigned char grey = devOutputImage[tid].x;
binarisationDevOutputImage[tid].x = grey + (int) 0.2*grey;
binarisationDevOutputImage[tid].y = binarisationDevOutputImage[tid].z = binarisationDevOutputImage[tid].x;
}
__global__ void greyScalingLab6(uchar3 *devImage, uchar3 *devOutputImage, int width){
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int tid = y * width + x;
devOutputImage[tid].x = (char) ((int) (ceil((float) devImage[tid].x) + (int) ceil((float) devImage[tid].y) +
(int) ceil((float) devImage[tid].z)) / 3);
devOutputImage[tid].y = devOutputImage[tid].x;
devOutputImage[tid].z = devOutputImage[tid].x;
}
void Labwork::labwork6_GPU() {
uchar3 *devImage;
uchar3 *devOutputImage;
uchar3 *hostOutputImageFilter;
uchar3 *binarisationDevOutputImage;
dim3 blockSize = dim3(32,32);
int pixelCount =inputImage->width *inputImage->height;
int width = inputImage->width;
dim3 gridSize = dim3(inputImage->width/blockSize.x,inputImage->height/blockSize.y);
hostOutputImageFilter = (uchar3 *) malloc(pixelCount*3);
cudaMalloc(&devImage, pixelCount * 3);
cudaMalloc(&devOutputImage, pixelCount*3);
cudaMalloc(&binarisationDevOutputImage, pixelCount*3);
cudaMemcpy(devImage, inputImage->buffer,pixelCount * sizeof(uchar3),cudaMemcpyHostToDevice); // Memory transfert
greyScalingLab6<<<gridSize, blockSize>>>(devImage,devOutputImage,width); // Kernel greyscaling
binarisationLab6<<<gridSize, blockSize>>>(binarisationDevOutputImage,devOutputImage,width); // Kernel
//brightnessLab6<<<gridSize, blockSize>>>(binarisationDevOutputImage,devOutputImage,width); // Kernel
cudaMemcpy(hostOutputImageFilter, binarisationDevOutputImage, pixelCount*3, cudaMemcpyDeviceToHost);
outputImage = (char *)hostOutputImageFilter;
cudaFree(devImage);
cudaFree(devOutputImage);
cudaFree(binarisationDevOutputImage);
}
void Labwork::labwork7_GPU() {
}
void Labwork::labwork8_GPU() {
}
void Labwork::labwork9_GPU() {
}
void Labwork::labwork10_GPU() {
}
|
3d5e8b27f411ac1e71e22179bede574be12149f1.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <fftw3.h>
#include <hipfft.h>
#include <sys/time.h>
#include <assert.h>
using namespace std;
#define k_rangeres 30
#define k_calib 1941.05
#define RESULT_SIZE 2
#define DEBUG
inline
hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
float *generate_hamming_coef(int m, int n) {
// Calculate normalization power on range cell
float p_range=0;
for(int i=0; i < m; i++) {
p_range=p_range+pow(0.53836-0.46164*cos(2*M_PI*(i)/(m-1)), 2.0);
}
p_range=p_range/m;
// Calculate normalization power on Doppler cell
float p_doppler=0;
for(int j=0; j < n; j++) {
p_doppler=p_doppler+pow(0.53836-0.46164*cos(2*M_PI*(j)/(n-1)), 2.0);
}
p_doppler=p_doppler/n;
// Constant since FFT is not normalized and the power is computed w.r.t. 50ohm
const float K_wind = -1/(16383.5*m*n*sqrt(50));
const float c = K_wind/sqrt(p_range*p_doppler);
// Generate elements
float *_hamming_coef= new float[m*n];
for(int i=0; i < m; i++) {
for(int j=0; j < n; j++) {
_hamming_coef[i*n+j] = (0.53836-0.46164*cos(2*M_PI*(i)/(m-1))) * (0.53836-0.46164*cos(2*M_PI*(j)/(n-1))) * c;
}
}
return _hamming_coef;
}
float *generate_ma_coef(int n){
float *_ma_coef = new float[n];
float _sum = 0.0;
for(int i=0; i < n; i++) {
_ma_coef[i]=exp(-(pow(i-((n-1)/2), 2.0))/2);
_sum += _ma_coef[i];
}
for(int i=0; i < n; i++){
_ma_coef[i] = _ma_coef[i]/_sum;
}
return _ma_coef;
}
__global__ void __apply_hamming(cuFloatComplex *a, float *b) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx] = make_cuFloatComplex(b[idx]*cuCrealf(a[idx]), b[idx]*cuCimagf(a[idx]));
}
__global__ void __apply_ma(cuFloatComplex *inout, cuFloatComplex *macoef) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = cuCmulf(inout[i*n+j], macoef[j]);
}
__global__ void __conjugate(cuFloatComplex *a) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx].y *= -1;
}
__global__ void __shift(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = threadIdx.x;
cuFloatComplex temp = inout[i*n+j];
inout[i*n+j] = inout[i*n+(j+n/2)];
inout[i*n+(j+n/2)] = temp;
}
__global__ void __clip(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = n-threadIdx.x-1;
inout[i*n+j] = make_cuFloatComplex(0, 0);
}
__global__ void __abssqr(cuFloatComplex *inout, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
float real, imag;
real = cuCrealf(inout[idx]);
imag = cuCimagf(inout[idx]);
inout[idx] = make_cuFloatComplex(real*real + imag*imag, 0);
}
__global__ void __sum(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
out[i*n+j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
out[i*n+j] = cuCaddf(out[i*n+j], out[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_v2(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
#pragma unroll
for (unsigned int d=0; d<2; d++) {
out[i*n+j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y);
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
#pragma unroll
for (unsigned int d=0; d<2; d++) {
out[i*n+j+n*d] = cuCaddf(out[i*n+j+n*d], out[i*n+j+n*d+s]);
}
}
__syncthreads();
}
}
__global__ void __sum_v3(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
out[i*n] = sdata[j];
}
}
__global__ void __sum_inplace(cuFloatComplex *g_idata) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
// __syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
// g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0);
g_idata[i*n+j] = cuCaddf(g_idata[i*n+j], g_idata[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_inplace_v2(cuFloatComplex *g_idata) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
// __syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
// g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0);
#pragma unroll
for (unsigned int d=0; d<2; d++) {
g_idata[i*n+j+n*d] = cuCaddf(g_idata[i*n+j+n*d], g_idata[i*n+j+n*d+s]);
}
}
__syncthreads();
}
}
__global__ void __sum_inplace_v3(cuFloatComplex *in) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
in[i*n] = sdata[j];
}
}
__global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
float avgx = sum[i*n].x/n;
float avgy = sum[i*n].y/n;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x-avgx, (inout[i*n+j].y-avgy)*-1);
}
__global__ void __scale_real(cuFloatComplex *inout) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x/n, 0);
}
__global__ void __calcresult(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = blockIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
__global__ void __calcresult_v2(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = threadIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
void tick(timeval *begin) {
gettimeofday(begin, NULL);
}
void tock(timeval *begin, timeval *end, string caption) {
unsigned long long bb, e;
gettimeofday(end, NULL);
bb = (unsigned long long)(begin->tv_sec) * 1000000 + (unsigned long long)(begin->tv_usec) / 1;
e = (unsigned long long)(end->tv_sec) * 1000000 + (unsigned long long)(end->tv_usec) / 1;
cout << caption << ": " << e-bb << endl;
}
int main(int argc, char **argv) {
ios_base::sync_with_stdio(false);
struct timeval tb, te;
tick(&tb);
cuFloatComplex *iqhh, *iqvv, *iqhv;
float *result;
int sector_id;
const int m = 1024; // cell
const int n = 512; // sweep
const int ma_count = 7;
iqhh = new cuFloatComplex[m*n];
iqvv = new cuFloatComplex[m*n];
iqhv = new cuFloatComplex[m*n];
result = new float[(m/2)*RESULT_SIZE];
float a, b;
// Generate Hamming coefficients
const float *hamming_coef = generate_hamming_coef(m, n);
// Generate MA coefficients
float *ma_coef = generate_ma_coef(ma_count);
fftwf_complex *_fft_ma = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex) * n);
fftwf_plan fft_ma_plan = fftwf_plan_dft_1d(n, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE);
for (int j=0; j<ma_count; j++) {
_fft_ma[j][0] = ma_coef[j];
_fft_ma[j][1] = 0;
}
for (int j=ma_count; j<n; j++) {
_fft_ma[j][0] = 0;
_fft_ma[j][1] = 0;
}
fftwf_execute(fft_ma_plan);
fftwf_destroy_plan(fft_ma_plan);
cuFloatComplex *fft_ma;
fft_ma = new cuFloatComplex[n];
for (int j=0; j<n; j++) {
fft_ma[j] = make_cuFloatComplex(_fft_ma[j][0], _fft_ma[j][1]);
}
fftwf_free(_fft_ma);
// Device buffers
/*__constant__*/ float *d_hamming;
/*__constant__*/ cuFloatComplex *d_ma;
cuFloatComplex *d_iqhh, *d_iqvv, *d_iqhv;
cuFloatComplex *d_sum;
float *d_result;
//float *d_powhh, *d_powvv;
hipMalloc(&d_hamming, m*n*sizeof(float));
hipMalloc(&d_ma, n*sizeof(cuFloatComplex));
hipMalloc(&d_iqhh, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_iqvv, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_iqhv, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_sum, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_result, (m/2)*RESULT_SIZE*sizeof(float));
hipMemcpy(d_hamming, hamming_coef, m*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ma, fft_ma, n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
// CUFFT initialization
hipfftHandle fft_range_handle;
hipfftHandle fft_doppler_handle;
hipfftHandle fft_pdop_handle;
int rank = 1; // --- 1D FFTs
int nn[] = { m }; // --- Size of the Fourier transform
int istride = n, ostride = n; // --- Distance between two successive input/output elements
int idist = 1, odist = 1; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = n; // --- Number of batched executions
hipfftPlanMany(&fft_range_handle, rank, nn,
inembed, istride, idist,
onembed, ostride, odist, HIPFFT_C2C, batch);
hipfftPlan1d(&fft_doppler_handle, n, HIPFFT_C2C, m);
hipfftPlan1d(&fft_pdop_handle, n, HIPFFT_C2C, m/2);
tock(&tb, &te, "initialization");
float ms; // elapsed time in milliseconds
sector_id = -1;
// create events and streams
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
// hipEventCreate(&dummyEvent);
hipEventRecord(startEvent,0);
tick(&tb);
while(sector_id < 126) {
// tick(&tb);
// Read 1 sector data
// cin >> sector_id;
sector_id++;
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhh[i*n+j] = make_cuFloatComplex(i, j);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqvv[i*n+j] = make_cuFloatComplex(j, i);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhv[i*n+j] = make_cuFloatComplex(i, i);
}
}
hipMemcpy(d_iqhh, iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
hipMemcpy(d_iqvv, iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
hipMemcpy(d_iqhv, iqhv, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
// apply Hamming coefficients
hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqhh, d_hamming);
hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqvv, d_hamming);
hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqhv, d_hamming);
// FFT range profile
hipfftExecC2C(fft_range_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD);
hipfftExecC2C(fft_range_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD);
hipfftExecC2C(fft_range_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD);
// FFT+shift Doppler profile
hipLaunchKernelGGL(( __sum_v2), dim3(m/2),dim3(n), 0, 0, d_iqhh, d_sum);
hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqhh, d_sum);
hipLaunchKernelGGL(( __sum_v2), dim3(m/2),dim3(n), 0, 0, d_iqvv, d_sum);
hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqvv, d_sum);
hipLaunchKernelGGL(( __sum_v2), dim3(m/2),dim3(n), 0, 0, d_iqhv, d_sum);
hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqhv, d_sum);
hipfftExecC2C(fft_doppler_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD);
hipfftExecC2C(fft_doppler_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD);
hipfftExecC2C(fft_doppler_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD);
hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqhh);
hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqvv);
hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqhv);
hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqhh, n);
hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqvv, n);
hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqhv, n);
hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqhh, n);
hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqvv, n);
hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqhv, n);
// Get absolute value
hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqhh, n);
hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqvv, n);
hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqhv, n);
// FFT PDOP
hipfftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD);
hipfftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD);
hipfftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD);
// Apply MA coefficients
hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqhh, d_ma);
hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqvv, d_ma);
hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqhv, d_ma);
// Inverse FFT
hipfftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, HIPFFT_BACKWARD);
hipfftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, HIPFFT_BACKWARD);
hipfftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqhh);
hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqvv);
hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqhv);
// Sum
hipLaunchKernelGGL(( __sum_inplace_v2), dim3(m/4),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqhh);
hipLaunchKernelGGL(( __sum_inplace_v2), dim3(m/4),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqvv);
hipLaunchKernelGGL(( __sum_inplace_v2), dim3(m/4),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqhv);
// hipMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// hipMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// float z = pow(i*k_rangeres, 2.0) * k_calib * iqhh[i*n].x;
// float zdb = 10 * log10(z);
// float zdr = 10 * (log10(iqhh[i*n].x)-log10(iqvv[i*n].x));
// cout << zdb << " " << zdr << endl;
// }
// exit(0);
// Calculate ZdB, Zdr
hipLaunchKernelGGL(( __calcresult_v2), dim3(1),dim3(m/2), 0, 0, d_iqhh, d_iqvv, d_iqhv, d_result, n);
hipMemcpy(result, d_result, (m/2)*RESULT_SIZE*sizeof(float), hipMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// for (int j=0; j<RESULT_SIZE; j++) {
// cout << result[i*RESULT_SIZE+j] << " ";
// }
// cout << endl;
// }
// exit(0);
}
tock(&tb, &te, "All (us)");
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&ms, startEvent, stopEvent);
printf("Time for sequential transfer and execute (ms): %f\n", ms);
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
hipFree(d_hamming);
hipFree(d_ma);
hipFree(d_iqhh);
hipFree(d_iqvv);
hipFree(d_iqhv);
delete[] iqhh;
delete[] iqvv;
delete[] iqhv;
return 0;
}
// hipMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// hipMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// for (int i=0; i<m; i++) {
// for (int j=0; j<n; j++) {
// cout << "(" << iqhh[i*n+j].x << "," << iqhh[i*n+j].y << ") ";
// }
// cout << endl;
// }
// // for (int i=0; i<m; i++) {
// // for (int j=0; j<n; j++) {
// // cout << iqvv[i*n+j].x << " ";
// // }
// // cout << endl;
// // }
// exit(0);
| 3d5e8b27f411ac1e71e22179bede574be12149f1.cu | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <cuComplex.h>
#include <fftw3.h>
#include <cufft.h>
#include <sys/time.h>
#include <assert.h>
using namespace std;
#define k_rangeres 30
#define k_calib 1941.05
#define RESULT_SIZE 2
#define DEBUG
inline
cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
float *generate_hamming_coef(int m, int n) {
// Calculate normalization power on range cell
float p_range=0;
for(int i=0; i < m; i++) {
p_range=p_range+pow(0.53836-0.46164*cos(2*M_PI*(i)/(m-1)), 2.0);
}
p_range=p_range/m;
// Calculate normalization power on Doppler cell
float p_doppler=0;
for(int j=0; j < n; j++) {
p_doppler=p_doppler+pow(0.53836-0.46164*cos(2*M_PI*(j)/(n-1)), 2.0);
}
p_doppler=p_doppler/n;
// Constant since FFT is not normalized and the power is computed w.r.t. 50ohm
const float K_wind = -1/(16383.5*m*n*sqrt(50));
const float c = K_wind/sqrt(p_range*p_doppler);
// Generate elements
float *_hamming_coef= new float[m*n];
for(int i=0; i < m; i++) {
for(int j=0; j < n; j++) {
_hamming_coef[i*n+j] = (0.53836-0.46164*cos(2*M_PI*(i)/(m-1))) * (0.53836-0.46164*cos(2*M_PI*(j)/(n-1))) * c;
}
}
return _hamming_coef;
}
float *generate_ma_coef(int n){
float *_ma_coef = new float[n];
float _sum = 0.0;
for(int i=0; i < n; i++) {
_ma_coef[i]=exp(-(pow(i-((n-1)/2), 2.0))/2);
_sum += _ma_coef[i];
}
for(int i=0; i < n; i++){
_ma_coef[i] = _ma_coef[i]/_sum;
}
return _ma_coef;
}
__global__ void __apply_hamming(cuFloatComplex *a, float *b) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx] = make_cuFloatComplex(b[idx]*cuCrealf(a[idx]), b[idx]*cuCimagf(a[idx]));
}
__global__ void __apply_ma(cuFloatComplex *inout, cuFloatComplex *macoef) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = cuCmulf(inout[i*n+j], macoef[j]);
}
__global__ void __conjugate(cuFloatComplex *a) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx].y *= -1;
}
__global__ void __shift(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = threadIdx.x;
cuFloatComplex temp = inout[i*n+j];
inout[i*n+j] = inout[i*n+(j+n/2)];
inout[i*n+(j+n/2)] = temp;
}
__global__ void __clip(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = n-threadIdx.x-1;
inout[i*n+j] = make_cuFloatComplex(0, 0);
}
__global__ void __abssqr(cuFloatComplex *inout, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
float real, imag;
real = cuCrealf(inout[idx]);
imag = cuCimagf(inout[idx]);
inout[idx] = make_cuFloatComplex(real*real + imag*imag, 0);
}
__global__ void __sum(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
out[i*n+j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
out[i*n+j] = cuCaddf(out[i*n+j], out[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_v2(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
#pragma unroll
for (unsigned int d=0; d<2; d++) {
out[i*n+j+n*d] = make_cuFloatComplex(in[i*n+j+n*d].x, in[i*n+j+n*d].y);
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
#pragma unroll
for (unsigned int d=0; d<2; d++) {
out[i*n+j+n*d] = cuCaddf(out[i*n+j+n*d], out[i*n+j+n*d+s]);
}
}
__syncthreads();
}
}
__global__ void __sum_v3(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
out[i*n] = sdata[j];
}
}
__global__ void __sum_inplace(cuFloatComplex *g_idata) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
// __syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
// g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0);
g_idata[i*n+j] = cuCaddf(g_idata[i*n+j], g_idata[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_inplace_v2(cuFloatComplex *g_idata) {
const unsigned int i = 2*blockIdx.x, j = threadIdx.x, n = blockDim.x;
// __syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
// g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0);
#pragma unroll
for (unsigned int d=0; d<2; d++) {
g_idata[i*n+j+n*d] = cuCaddf(g_idata[i*n+j+n*d], g_idata[i*n+j+n*d+s]);
}
}
__syncthreads();
}
}
__global__ void __sum_inplace_v3(cuFloatComplex *in) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
in[i*n] = sdata[j];
}
}
__global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
float avgx = sum[i*n].x/n;
float avgy = sum[i*n].y/n;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x-avgx, (inout[i*n+j].y-avgy)*-1);
}
__global__ void __scale_real(cuFloatComplex *inout) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x/n, 0);
}
__global__ void __calcresult(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = blockIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
__global__ void __calcresult_v2(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = threadIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
void tick(timeval *begin) {
gettimeofday(begin, NULL);
}
void tock(timeval *begin, timeval *end, string caption) {
unsigned long long bb, e;
gettimeofday(end, NULL);
bb = (unsigned long long)(begin->tv_sec) * 1000000 + (unsigned long long)(begin->tv_usec) / 1;
e = (unsigned long long)(end->tv_sec) * 1000000 + (unsigned long long)(end->tv_usec) / 1;
cout << caption << ": " << e-bb << endl;
}
int main(int argc, char **argv) {
ios_base::sync_with_stdio(false);
struct timeval tb, te;
tick(&tb);
cuFloatComplex *iqhh, *iqvv, *iqhv;
float *result;
int sector_id;
const int m = 1024; // cell
const int n = 512; // sweep
const int ma_count = 7;
iqhh = new cuFloatComplex[m*n];
iqvv = new cuFloatComplex[m*n];
iqhv = new cuFloatComplex[m*n];
result = new float[(m/2)*RESULT_SIZE];
float a, b;
// Generate Hamming coefficients
const float *hamming_coef = generate_hamming_coef(m, n);
// Generate MA coefficients
float *ma_coef = generate_ma_coef(ma_count);
fftwf_complex *_fft_ma = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex) * n);
fftwf_plan fft_ma_plan = fftwf_plan_dft_1d(n, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE);
for (int j=0; j<ma_count; j++) {
_fft_ma[j][0] = ma_coef[j];
_fft_ma[j][1] = 0;
}
for (int j=ma_count; j<n; j++) {
_fft_ma[j][0] = 0;
_fft_ma[j][1] = 0;
}
fftwf_execute(fft_ma_plan);
fftwf_destroy_plan(fft_ma_plan);
cuFloatComplex *fft_ma;
fft_ma = new cuFloatComplex[n];
for (int j=0; j<n; j++) {
fft_ma[j] = make_cuFloatComplex(_fft_ma[j][0], _fft_ma[j][1]);
}
fftwf_free(_fft_ma);
// Device buffers
/*__constant__*/ float *d_hamming;
/*__constant__*/ cuFloatComplex *d_ma;
cuFloatComplex *d_iqhh, *d_iqvv, *d_iqhv;
cuFloatComplex *d_sum;
float *d_result;
//float *d_powhh, *d_powvv;
cudaMalloc(&d_hamming, m*n*sizeof(float));
cudaMalloc(&d_ma, n*sizeof(cuFloatComplex));
cudaMalloc(&d_iqhh, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_iqvv, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_iqhv, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_sum, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_result, (m/2)*RESULT_SIZE*sizeof(float));
cudaMemcpy(d_hamming, hamming_coef, m*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ma, fft_ma, n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
// CUFFT initialization
cufftHandle fft_range_handle;
cufftHandle fft_doppler_handle;
cufftHandle fft_pdop_handle;
int rank = 1; // --- 1D FFTs
int nn[] = { m }; // --- Size of the Fourier transform
int istride = n, ostride = n; // --- Distance between two successive input/output elements
int idist = 1, odist = 1; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = n; // --- Number of batched executions
cufftPlanMany(&fft_range_handle, rank, nn,
inembed, istride, idist,
onembed, ostride, odist, CUFFT_C2C, batch);
cufftPlan1d(&fft_doppler_handle, n, CUFFT_C2C, m);
cufftPlan1d(&fft_pdop_handle, n, CUFFT_C2C, m/2);
tock(&tb, &te, "initialization");
float ms; // elapsed time in milliseconds
sector_id = -1;
// create events and streams
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
// cudaEventCreate(&dummyEvent);
cudaEventRecord(startEvent,0);
tick(&tb);
while(sector_id < 126) {
// tick(&tb);
// Read 1 sector data
// cin >> sector_id;
sector_id++;
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhh[i*n+j] = make_cuFloatComplex(i, j);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqvv[i*n+j] = make_cuFloatComplex(j, i);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhv[i*n+j] = make_cuFloatComplex(i, i);
}
}
cudaMemcpy(d_iqhh, iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_iqvv, iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_iqhv, iqhv, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
// apply Hamming coefficients
__apply_hamming<<<m,n>>>(d_iqhh, d_hamming);
__apply_hamming<<<m,n>>>(d_iqvv, d_hamming);
__apply_hamming<<<m,n>>>(d_iqhv, d_hamming);
// FFT range profile
cufftExecC2C(fft_range_handle, d_iqhh, d_iqhh, CUFFT_FORWARD);
cufftExecC2C(fft_range_handle, d_iqvv, d_iqvv, CUFFT_FORWARD);
cufftExecC2C(fft_range_handle, d_iqhv, d_iqhv, CUFFT_FORWARD);
// FFT+shift Doppler profile
__sum_v2<<<m/2,n>>>(d_iqhh, d_sum);
__avgconj<<<m,n>>>(d_iqhh, d_sum);
__sum_v2<<<m/2,n>>>(d_iqvv, d_sum);
__avgconj<<<m,n>>>(d_iqvv, d_sum);
__sum_v2<<<m/2,n>>>(d_iqhv, d_sum);
__avgconj<<<m,n>>>(d_iqhv, d_sum);
cufftExecC2C(fft_doppler_handle, d_iqhh, d_iqhh, CUFFT_FORWARD);
cufftExecC2C(fft_doppler_handle, d_iqvv, d_iqvv, CUFFT_FORWARD);
cufftExecC2C(fft_doppler_handle, d_iqhv, d_iqhv, CUFFT_FORWARD);
__conjugate<<<m,n>>>(d_iqhh);
__conjugate<<<m,n>>>(d_iqvv);
__conjugate<<<m,n>>>(d_iqhv);
__shift<<<m,n/2>>>(d_iqhh, n);
__shift<<<m,n/2>>>(d_iqvv, n);
__shift<<<m,n/2>>>(d_iqhv, n);
__clip<<<m,2>>>(d_iqhh, n);
__clip<<<m,2>>>(d_iqvv, n);
__clip<<<m,2>>>(d_iqhv, n);
// Get absolute value
__abssqr<<<m/2,n>>>(d_iqhh, n);
__abssqr<<<m/2,n>>>(d_iqvv, n);
__abssqr<<<m/2,n>>>(d_iqhv, n);
// FFT PDOP
cufftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, CUFFT_FORWARD);
cufftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, CUFFT_FORWARD);
cufftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, CUFFT_FORWARD);
// Apply MA coefficients
__apply_ma<<<m/2,n>>>(d_iqhh, d_ma);
__apply_ma<<<m/2,n>>>(d_iqvv, d_ma);
__apply_ma<<<m/2,n>>>(d_iqhv, d_ma);
// Inverse FFT
cufftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, CUFFT_INVERSE);
cufftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, CUFFT_INVERSE);
cufftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, CUFFT_INVERSE);
__scale_real<<<m/2,n>>>(d_iqhh);
__scale_real<<<m/2,n>>>(d_iqvv);
__scale_real<<<m/2,n>>>(d_iqhv);
// Sum
__sum_inplace_v2<<<m/4,n,n*sizeof(cuFloatComplex)>>>(d_iqhh);
__sum_inplace_v2<<<m/4,n,n*sizeof(cuFloatComplex)>>>(d_iqvv);
__sum_inplace_v2<<<m/4,n,n*sizeof(cuFloatComplex)>>>(d_iqhv);
// cudaMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// float z = pow(i*k_rangeres, 2.0) * k_calib * iqhh[i*n].x;
// float zdb = 10 * log10(z);
// float zdr = 10 * (log10(iqhh[i*n].x)-log10(iqvv[i*n].x));
// cout << zdb << " " << zdr << endl;
// }
// exit(0);
// Calculate ZdB, Zdr
__calcresult_v2<<<1,m/2>>>(d_iqhh, d_iqvv, d_iqhv, d_result, n);
cudaMemcpy(result, d_result, (m/2)*RESULT_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// for (int j=0; j<RESULT_SIZE; j++) {
// cout << result[i*RESULT_SIZE+j] << " ";
// }
// cout << endl;
// }
// exit(0);
}
tock(&tb, &te, "All (us)");
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("Time for sequential transfer and execute (ms): %f\n", ms);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
cudaFree(d_hamming);
cudaFree(d_ma);
cudaFree(d_iqhh);
cudaFree(d_iqvv);
cudaFree(d_iqhv);
delete[] iqhh;
delete[] iqvv;
delete[] iqhv;
return 0;
}
// cudaMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// for (int i=0; i<m; i++) {
// for (int j=0; j<n; j++) {
// cout << "(" << iqhh[i*n+j].x << "," << iqhh[i*n+j].y << ") ";
// }
// cout << endl;
// }
// // for (int i=0; i<m; i++) {
// // for (int j=0; j<n; j++) {
// // cout << iqvv[i*n+j].x << " ";
// // }
// // cout << endl;
// // }
// exit(0);
|
798a204d4b6fb7ced1ec70e82f1693dbfc58b89d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* This software is Copyright (c) 2011-2012 Lukas Odzioba <ukasz at openwall dot net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
* This file is shared by raw-sha224-cuda and raw-sha256-cuda formats,
* SHA256 definition is used to distinguish between them.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "cuda_common.cuh"
#include "../cuda_rawsha256.h"
static void cuda_rawsha256(sha256_password *, void *, int);
#ifdef SHA256
#define SHA_HASH sha256_hash
__constant__ const uint32_t H[] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c,
0x1f83d9ab, 0x5be0cd19
};
extern "C" void gpu_rawsha256(sha256_password * i, SHA_HASH * o, int lap)
{
cuda_rawsha256(i, o, lap);
}
#endif
#ifdef SHA224
#define SHA_HASH sha224_hash
__constant__ const uint32_t H[] = {
0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511,
0x64f98fa7, 0xbefa4fa4
};
extern "C" void gpu_rawsha224(sha256_password * i, SHA_HASH * o, int lap)
{
cuda_rawsha256(i, o, lap);
}
#endif
const uint32_t DATA_IN_SIZE = KEYS_PER_CRYPT * sizeof(sha256_password);
const uint32_t DATA_OUT_SIZE = KEYS_PER_CRYPT * sizeof(SHA_HASH);
static sha256_password *cuda_data = NULL; ///candidates
static SHA_HASH *cuda_data_out = NULL; ///sha256(candidate) or sha224(candidate)
static hipStream_t stream0, stream1, stream2; ///streams for async cuda calls
static sha256_password *cuda_data0 = NULL; ///candidates
static sha256_password *cuda_data1 = NULL; ///candidates
static sha256_password *cuda_data2 = NULL; ///candidates
static SHA_HASH *cuda_data_out0 = NULL; ///sha256(candidates)
static SHA_HASH *cuda_data_out1 = NULL; ///sha256(candidates)
static SHA_HASH *cuda_data_out2 = NULL; ///sha256(candidates)
__global__ void kernel_sha256(sha256_password * data, SHA_HASH * data_out);
static void cuda_rawsha256(sha256_password * host_in, void *out, int overlap)
{
if (overlap) {
HANDLE_ERROR(hipMalloc(&cuda_data0, DATA_IN_SIZE / 3));
HANDLE_ERROR(hipMalloc(&cuda_data1, DATA_IN_SIZE / 3));
HANDLE_ERROR(hipMalloc(&cuda_data2, DATA_IN_SIZE / 3));
HANDLE_ERROR(hipMalloc(&cuda_data_out0, DATA_OUT_SIZE / 3));
HANDLE_ERROR(hipMalloc(&cuda_data_out1, DATA_OUT_SIZE / 3));
HANDLE_ERROR(hipMalloc(&cuda_data_out2, DATA_OUT_SIZE / 3));
HANDLE_ERROR(hipStreamCreate(&stream0));
HANDLE_ERROR(hipStreamCreate(&stream1));
HANDLE_ERROR(hipStreamCreate(&stream2));
dim3 dimGrid(BLOCKS / 3);
dim3 dimBlock(THREADS);
HANDLE_ERROR(hipMemcpyAsync(cuda_data0, host_in,
DATA_IN_SIZE / 3, hipMemcpyHostToDevice, stream0));
hipLaunchKernelGGL(( kernel_sha256) , dim3(dimGrid), dim3(dimBlock), 0,
stream0 , cuda_data0, cuda_data_out0);
HANDLE_ERROR(hipMemcpyAsync(cuda_data1,
host_in + KEYS_PER_CRYPT / 3, DATA_IN_SIZE / 3,
hipMemcpyHostToDevice, stream1));
hipLaunchKernelGGL(( kernel_sha256) , dim3(dimGrid), dim3(dimBlock), 0,
stream1 , cuda_data1, cuda_data_out1);
hipMemcpyAsync(cuda_data2, host_in + 2 * KEYS_PER_CRYPT / 3,
DATA_IN_SIZE / 3, hipMemcpyHostToDevice, stream2);
hipLaunchKernelGGL(( kernel_sha256) , dim3(dimGrid), dim3(dimBlock), 0,
stream2 , cuda_data2, cuda_data_out2);
HANDLE_ERROR(hipMemcpyAsync((SHA_HASH *) out, cuda_data_out0,
DATA_OUT_SIZE / 3, hipMemcpyDeviceToHost, stream0));
HANDLE_ERROR(hipMemcpyAsync((SHA_HASH *) out +
KEYS_PER_CRYPT / 3, cuda_data_out1, DATA_OUT_SIZE / 3,
hipMemcpyDeviceToHost, stream1));
HANDLE_ERROR(hipMemcpyAsync((SHA_HASH *) out +
2 * KEYS_PER_CRYPT / 3, cuda_data_out2,
DATA_OUT_SIZE / 3, hipMemcpyDeviceToHost, stream2));
HANDLE_ERROR(hipStreamSynchronize(stream0));
HANDLE_ERROR(hipStreamSynchronize(stream1));
HANDLE_ERROR(hipStreamSynchronize(stream2));
hipStreamDestroy(stream0);
hipStreamDestroy(stream1);
hipStreamDestroy(stream2);
hipFree(cuda_data0);
hipFree(cuda_data1);
hipFree(cuda_data2);
hipFree(cuda_data_out0);
hipFree(cuda_data_out1);
hipFree(cuda_data_out2);
} else {
SHA_HASH *host_out = (SHA_HASH *) out;
hipMalloc(&cuda_data, DATA_IN_SIZE);
hipMalloc(&cuda_data_out, DATA_OUT_SIZE);
hipMemcpy(cuda_data, host_in, DATA_IN_SIZE,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_sha256) , dim3(BLOCKS), dim3(THREADS) , 0, 0, cuda_data,
cuda_data_out);
hipDeviceSynchronize();
hipMemcpy(host_out, cuda_data_out, DATA_OUT_SIZE,
hipMemcpyDeviceToHost);
hipFree(cuda_data);
hipFree(cuda_data_out);
}
}
__constant__ uint32_t k[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe,
0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa,
0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb,
0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624,
0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb,
0xbef9a3f7, 0xc67178f2
};
/* highly unoptimal kernel */
__global__ void kernel_sha256(sha256_password * data, SHA_HASH * data_out)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t w[64];//this should be limited do 16 uints
SHA_HASH *out = &data_out[idx];
sha256_password *in = &data[idx];
char dl = in->length;
unsigned char *key = in->v;
int j;
for (j = 0; j < 15; j++)
w[j] = 0;
for (j = 0; j < dl; j++) {
uint32_t tmp = 0;
tmp |= (((uint32_t) key[j]) << ((3 - (j & 0x3)) << 3));
w[j / 4] |= tmp;
}
w[dl / 4] |= (((uint32_t) 0x80) << ((3 - (dl & 0x3)) << 3));
w[15] = 0x00000000 | (dl * 8);
w[16] = sigma0(w[1]) + w[0];
w[17] = sigma1(w[15]) + sigma0(w[2]) + w[1];
w[18] = sigma1(w[16]) + sigma0(w[3]) + w[2];
w[19] = sigma1(w[17]) + sigma0(w[4]) + w[3];
w[20] = sigma1(w[18]) + sigma0(w[5]) + w[4];
w[21] = sigma1(w[19]) + w[5];
w[22] = sigma1(w[20]) + w[15];
w[23] = sigma1(w[21]) + w[16];
w[24] = sigma1(w[22]) + w[17];
w[25] = sigma1(w[23]) + w[18];
w[26] = sigma1(w[24]) + w[19];
w[27] = sigma1(w[25]) + w[20];
w[28] = sigma1(w[26]) + w[21];
w[29] = sigma1(w[27]) + w[22];
w[30] = sigma1(w[28]) + w[23] + sigma0(w[15]);
w[31] = sigma1(w[29]) + w[24] + sigma0(w[16]) + w[15];
#pragma unroll 32
for (uint32_t j = 32; j < 64; j++) {
w[j] =
sigma1(w[j - 2]) + w[j - 7] + sigma0(w[j - 15]) + w[j -
16];
}
uint32_t a = H[0];
uint32_t b = H[1];
uint32_t c = H[2];
uint32_t d = H[3];
uint32_t e = H[4];
uint32_t f = H[5];
uint32_t g = H[6];
uint32_t h = H[7];
#pragma unroll 64
for (uint32_t j = 0; j < 64; j++) {
uint32_t t1 = h + Sigma1(e) + Ch(e, f, g) + k[j] + w[j];
uint32_t t2 = Sigma0(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
out->v[0] = a + H[0];
out->v[1] = b + H[1];
out->v[2] = c + H[2];
out->v[3] = d + H[3];
out->v[4] = e + H[4];
out->v[5] = f + H[5];
out->v[6] = g + H[6];
#ifdef SHA256
out->v[7] = h + H[7];
#endif
}
| 798a204d4b6fb7ced1ec70e82f1693dbfc58b89d.cu | /*
* This software is Copyright (c) 2011-2012 Lukas Odzioba <ukasz at openwall dot net>
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification, are permitted.
* This file is shared by raw-sha224-cuda and raw-sha256-cuda formats,
* SHA256 definition is used to distinguish between them.
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <string.h>
#include "cuda_common.cuh"
#include "../cuda_rawsha256.h"
static void cuda_rawsha256(sha256_password *, void *, int);
#ifdef SHA256
#define SHA_HASH sha256_hash
__constant__ const uint32_t H[] = {
0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c,
0x1f83d9ab, 0x5be0cd19
};
extern "C" void gpu_rawsha256(sha256_password * i, SHA_HASH * o, int lap)
{
cuda_rawsha256(i, o, lap);
}
#endif
#ifdef SHA224
#define SHA_HASH sha224_hash
__constant__ const uint32_t H[] = {
0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511,
0x64f98fa7, 0xbefa4fa4
};
extern "C" void gpu_rawsha224(sha256_password * i, SHA_HASH * o, int lap)
{
cuda_rawsha256(i, o, lap);
}
#endif
const uint32_t DATA_IN_SIZE = KEYS_PER_CRYPT * sizeof(sha256_password);
const uint32_t DATA_OUT_SIZE = KEYS_PER_CRYPT * sizeof(SHA_HASH);
static sha256_password *cuda_data = NULL; ///candidates
static SHA_HASH *cuda_data_out = NULL; ///sha256(candidate) or sha224(candidate)
static cudaStream_t stream0, stream1, stream2; ///streams for async cuda calls
static sha256_password *cuda_data0 = NULL; ///candidates
static sha256_password *cuda_data1 = NULL; ///candidates
static sha256_password *cuda_data2 = NULL; ///candidates
static SHA_HASH *cuda_data_out0 = NULL; ///sha256(candidates)
static SHA_HASH *cuda_data_out1 = NULL; ///sha256(candidates)
static SHA_HASH *cuda_data_out2 = NULL; ///sha256(candidates)
__global__ void kernel_sha256(sha256_password * data, SHA_HASH * data_out);
static void cuda_rawsha256(sha256_password * host_in, void *out, int overlap)
{
if (overlap) {
HANDLE_ERROR(cudaMalloc(&cuda_data0, DATA_IN_SIZE / 3));
HANDLE_ERROR(cudaMalloc(&cuda_data1, DATA_IN_SIZE / 3));
HANDLE_ERROR(cudaMalloc(&cuda_data2, DATA_IN_SIZE / 3));
HANDLE_ERROR(cudaMalloc(&cuda_data_out0, DATA_OUT_SIZE / 3));
HANDLE_ERROR(cudaMalloc(&cuda_data_out1, DATA_OUT_SIZE / 3));
HANDLE_ERROR(cudaMalloc(&cuda_data_out2, DATA_OUT_SIZE / 3));
HANDLE_ERROR(cudaStreamCreate(&stream0));
HANDLE_ERROR(cudaStreamCreate(&stream1));
HANDLE_ERROR(cudaStreamCreate(&stream2));
dim3 dimGrid(BLOCKS / 3);
dim3 dimBlock(THREADS);
HANDLE_ERROR(cudaMemcpyAsync(cuda_data0, host_in,
DATA_IN_SIZE / 3, cudaMemcpyHostToDevice, stream0));
kernel_sha256 <<< dimGrid, dimBlock, 0,
stream0 >>> (cuda_data0, cuda_data_out0);
HANDLE_ERROR(cudaMemcpyAsync(cuda_data1,
host_in + KEYS_PER_CRYPT / 3, DATA_IN_SIZE / 3,
cudaMemcpyHostToDevice, stream1));
kernel_sha256 <<< dimGrid, dimBlock, 0,
stream1 >>> (cuda_data1, cuda_data_out1);
cudaMemcpyAsync(cuda_data2, host_in + 2 * KEYS_PER_CRYPT / 3,
DATA_IN_SIZE / 3, cudaMemcpyHostToDevice, stream2);
kernel_sha256 <<< dimGrid, dimBlock, 0,
stream2 >>> (cuda_data2, cuda_data_out2);
HANDLE_ERROR(cudaMemcpyAsync((SHA_HASH *) out, cuda_data_out0,
DATA_OUT_SIZE / 3, cudaMemcpyDeviceToHost, stream0));
HANDLE_ERROR(cudaMemcpyAsync((SHA_HASH *) out +
KEYS_PER_CRYPT / 3, cuda_data_out1, DATA_OUT_SIZE / 3,
cudaMemcpyDeviceToHost, stream1));
HANDLE_ERROR(cudaMemcpyAsync((SHA_HASH *) out +
2 * KEYS_PER_CRYPT / 3, cuda_data_out2,
DATA_OUT_SIZE / 3, cudaMemcpyDeviceToHost, stream2));
HANDLE_ERROR(cudaStreamSynchronize(stream0));
HANDLE_ERROR(cudaStreamSynchronize(stream1));
HANDLE_ERROR(cudaStreamSynchronize(stream2));
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaFree(cuda_data0);
cudaFree(cuda_data1);
cudaFree(cuda_data2);
cudaFree(cuda_data_out0);
cudaFree(cuda_data_out1);
cudaFree(cuda_data_out2);
} else {
SHA_HASH *host_out = (SHA_HASH *) out;
cudaMalloc(&cuda_data, DATA_IN_SIZE);
cudaMalloc(&cuda_data_out, DATA_OUT_SIZE);
cudaMemcpy(cuda_data, host_in, DATA_IN_SIZE,
cudaMemcpyHostToDevice);
kernel_sha256 <<< BLOCKS, THREADS >>> (cuda_data,
cuda_data_out);
cudaThreadSynchronize();
cudaMemcpy(host_out, cuda_data_out, DATA_OUT_SIZE,
cudaMemcpyDeviceToHost);
cudaFree(cuda_data);
cudaFree(cuda_data_out);
}
}
__constant__ uint32_t k[] = {
0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
0x923f82a4, 0xab1c5ed5,
0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe,
0x9bdc06a7, 0xc19bf174,
0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa,
0x5cb0a9dc, 0x76f988da,
0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
0x06ca6351, 0x14292967,
0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb,
0x81c2c92e, 0x92722c85,
0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624,
0xf40e3585, 0x106aa070,
0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
0x5b9cca4f, 0x682e6ff3,
0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb,
0xbef9a3f7, 0xc67178f2
};
/* highly unoptimal kernel */
__global__ void kernel_sha256(sha256_password * data, SHA_HASH * data_out)
{
uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t w[64];//this should be limited do 16 uints
SHA_HASH *out = &data_out[idx];
sha256_password *in = &data[idx];
char dl = in->length;
unsigned char *key = in->v;
int j;
for (j = 0; j < 15; j++)
w[j] = 0;
for (j = 0; j < dl; j++) {
uint32_t tmp = 0;
tmp |= (((uint32_t) key[j]) << ((3 - (j & 0x3)) << 3));
w[j / 4] |= tmp;
}
w[dl / 4] |= (((uint32_t) 0x80) << ((3 - (dl & 0x3)) << 3));
w[15] = 0x00000000 | (dl * 8);
w[16] = sigma0(w[1]) + w[0];
w[17] = sigma1(w[15]) + sigma0(w[2]) + w[1];
w[18] = sigma1(w[16]) + sigma0(w[3]) + w[2];
w[19] = sigma1(w[17]) + sigma0(w[4]) + w[3];
w[20] = sigma1(w[18]) + sigma0(w[5]) + w[4];
w[21] = sigma1(w[19]) + w[5];
w[22] = sigma1(w[20]) + w[15];
w[23] = sigma1(w[21]) + w[16];
w[24] = sigma1(w[22]) + w[17];
w[25] = sigma1(w[23]) + w[18];
w[26] = sigma1(w[24]) + w[19];
w[27] = sigma1(w[25]) + w[20];
w[28] = sigma1(w[26]) + w[21];
w[29] = sigma1(w[27]) + w[22];
w[30] = sigma1(w[28]) + w[23] + sigma0(w[15]);
w[31] = sigma1(w[29]) + w[24] + sigma0(w[16]) + w[15];
#pragma unroll 32
for (uint32_t j = 32; j < 64; j++) {
w[j] =
sigma1(w[j - 2]) + w[j - 7] + sigma0(w[j - 15]) + w[j -
16];
}
uint32_t a = H[0];
uint32_t b = H[1];
uint32_t c = H[2];
uint32_t d = H[3];
uint32_t e = H[4];
uint32_t f = H[5];
uint32_t g = H[6];
uint32_t h = H[7];
#pragma unroll 64
for (uint32_t j = 0; j < 64; j++) {
uint32_t t1 = h + Sigma1(e) + Ch(e, f, g) + k[j] + w[j];
uint32_t t2 = Sigma0(a) + Maj(a, b, c);
h = g;
g = f;
f = e;
e = d + t1;
d = c;
c = b;
b = a;
a = t1 + t2;
}
out->v[0] = a + H[0];
out->v[1] = b + H[1];
out->v[2] = c + H[2];
out->v[3] = d + H[3];
out->v[4] = e + H[4];
out->v[5] = f + H[5];
out->v[6] = g + H[6];
#ifdef SHA256
out->v[7] = h + H[7];
#endif
}
|
17a5754d8a52e342cd2f44cd1de62d41b45524b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "myKernel.h"
__global__ void kernel( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
a[idx] = a[idx]+1;
}
// Please implement the following kernels2 through kernel6,
// in order to meet the requirements in the write-ups.
__global__ void kernel2( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = (blockIdx.y * gridDim.x) + blockIdx.x;
}
__global__ void kernel3( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = idx;
}
__global__ void kernel4( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(ix<dimx && iy < dimy)
a[idx] = (threadIdx.y * blockDim.x) + threadIdx.x;
}
__global__ void kernel5( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(ix<dimx && iy < dimy)
a[idx] = blockIdx.y;
}
__global__ void kernel6( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
if(ix < dimx && iy < dimy)
a[idx] = blockIdx.x;
}
| 17a5754d8a52e342cd2f44cd1de62d41b45524b8.cu | #include "myKernel.h"
__global__ void kernel( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
a[idx] = a[idx]+1;
}
// Please implement the following kernels2 through kernel6,
// in order to meet the requirements in the write-ups.
__global__ void kernel2( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = (blockIdx.y * gridDim.x) + blockIdx.x;
}
__global__ void kernel3( int *a, int dimx, int dimy )
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
if(iy < dimy && ix < dimx)
a[idx] = idx;
}
__global__ void kernel4( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(ix<dimx && iy < dimy)
a[idx] = (threadIdx.y * blockDim.x) + threadIdx.x;
}
__global__ void kernel5( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * dimx + ix;
if(ix<dimx && iy < dimy)
a[idx] = blockIdx.y;
}
__global__ void kernel6( int *a, int dimx, int dimy )
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy*dimx + ix;
if(ix < dimx && iy < dimy)
a[idx] = blockIdx.x;
}
|
fb779ff7fda1efdd07ba3481eff525602848c81b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
Jacob Sword
Parallelized multiplication of matrix and matrix of random values given matrix dimensions
**/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include <time.h>
#include "wtime.h"
#include "./error_handler.h"
using std::cout;
using std::endl;
const int A_ROWS = 256;
const int A_COLS = 240;
const int PADDING = 16;
const int A_COLS_PADDED = A_COLS + PADDING;
const int B_ROWS = 240;
const int B_COLS = 512;
const int C_ROWS = A_ROWS;
const int C_COLS = B_COLS;
//Sequential mat_mult for testing
void mat_mult(int *mat_a, int *mat_b, int *result, int a_rows, int a_cols, int b_cols, int padding)
{
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
int temp_res = 0;
for (int k = 0; k < a_cols; k++) {
temp_res += mat_a[i * (a_cols + padding) + k] * mat_b[k * b_cols + j];
}
result[i * b_cols + j] = temp_res;
}
}
}
/*Parallel implementation of matrix x matrix - 1 block per row WITH PADDING
* matrix A is 256 x 240, matrix b is 240 * 512
* resultant matrix is 256 rows x 512 cols
*/
__global__ void mat_mult_fixed_dims_padded_kernel(int *mat_a, int *mat_b, int *res) {
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < A_ROWS; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_col = 0; b_col < B_COLS; b_col++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < A_COLS; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= A_COLS)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * A_COLS_PADDED + thread_col] * mat_b[thread_col * B_COLS + b_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * C_COLS + b_col] = total;
}
}
}
}
int main (int args, char **argv) {
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS_PADDED);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix
cout << "Matrix a: " << A_ROWS << " x " << A_COLS
<< "(plus " << PADDING << " padding)" << endl;
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS_PADDED + j] = el;
}
}
// Initialize vector
cout << "Matrix b: " << B_ROWS << " x " << B_COLS << endl;
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(hipMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS_PADDED));
HANDLE_ERR(hipMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(hipMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(hipMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS_PADDED, hipMemcpyHostToDevice));
HANDLE_ERR(hipMemcpy (b_d, b, sizeof (int) * B_ROWS * B_COLS, hipMemcpyHostToDevice));
double starttime = wtime();
hipLaunchKernelGGL(( mat_mult_fixed_dims_padded_kernel) , dim3(128), dim3(128) , 0, 0, a_d, b_d, c_d);
hipDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Padded multiplication time: " << algotime << endl;
HANDLE_ERR(hipMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, hipMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS, PADDING);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
| fb779ff7fda1efdd07ba3481eff525602848c81b.cu | /**
Jacob Sword
Parallelized multiplication of matrix and matrix of random values given matrix dimensions
**/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cassert>
#include <cstdlib>
#include <time.h>
#include "wtime.h"
#include "./error_handler.h"
using std::cout;
using std::endl;
const int A_ROWS = 256;
const int A_COLS = 240;
const int PADDING = 16;
const int A_COLS_PADDED = A_COLS + PADDING;
const int B_ROWS = 240;
const int B_COLS = 512;
const int C_ROWS = A_ROWS;
const int C_COLS = B_COLS;
//Sequential mat_mult for testing
void mat_mult(int *mat_a, int *mat_b, int *result, int a_rows, int a_cols, int b_cols, int padding)
{
for (int i = 0; i < a_rows; i++) {
for (int j = 0; j < b_cols; j++) {
int temp_res = 0;
for (int k = 0; k < a_cols; k++) {
temp_res += mat_a[i * (a_cols + padding) + k] * mat_b[k * b_cols + j];
}
result[i * b_cols + j] = temp_res;
}
}
}
/*Parallel implementation of matrix x matrix - 1 block per row WITH PADDING
* matrix A is 256 x 240, matrix b is 240 * 512
* resultant matrix is 256 rows x 512 cols
*/
__global__ void mat_mult_fixed_dims_padded_kernel(int *mat_a, int *mat_b, int *res) {
// El for each thread, shared per block
__shared__ int smem[128];
for (int row_block = 0; row_block * gridDim.x < A_ROWS; row_block++) {
int a_row = blockIdx.x + (row_block * gridDim.x);
for (int b_col = 0; b_col < B_COLS; b_col++) {
int total = 0;
for (int thread_i = 0; thread_i * blockDim.x < A_COLS; thread_i++) {
int thread_col = threadIdx.x + (thread_i * blockDim.x);
// Need to check because 240 not even multiple of 128
if (thread_col >= A_COLS)
smem[threadIdx.x] = 0;
else
smem[threadIdx.x] = mat_a[a_row * A_COLS_PADDED + thread_col] * mat_b[thread_col * B_COLS + b_col];
__syncthreads();
//Parallel reduction
for (int i = blockDim.x / 2; i > 0; i /= 2) {
if (threadIdx.x < i) {
int temp = smem[threadIdx.x] + smem[threadIdx.x + i];
smem[threadIdx.x] = temp;
}
__syncthreads();
}
if (threadIdx.x == 0) {
total += smem[threadIdx.x];
}
}
if (threadIdx.x == 0) {
res[a_row * C_COLS + b_col] = total;
}
}
}
}
int main (int args, char **argv) {
int *a = (int *) malloc(sizeof(int) * A_ROWS * A_COLS_PADDED);
int *b = (int *) malloc(sizeof(int) * B_ROWS * B_COLS);
int *c = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
srand(time(NULL));
// Initialize matrix
cout << "Matrix a: " << A_ROWS << " x " << A_COLS
<< "(plus " << PADDING << " padding)" << endl;
for (int i = 0; i < A_ROWS; i++) {
for (int j = 0; j < A_COLS; j++) {
int el = rand() % 10;
a[i * A_COLS_PADDED + j] = el;
}
}
// Initialize vector
cout << "Matrix b: " << B_ROWS << " x " << B_COLS << endl;
for (int i = 0; i < B_ROWS; i++) {
for (int j = 0; j < B_COLS; j++) {
int el = rand() % 5;
b[i * B_COLS + j] = el;
}
}
int *a_d, *b_d, *c_d;
HANDLE_ERR(cudaMalloc((void **) &a_d, sizeof (int) * A_ROWS * A_COLS_PADDED));
HANDLE_ERR(cudaMalloc((void **) &b_d, sizeof (int) * B_ROWS * B_COLS));
HANDLE_ERR(cudaMalloc((void **) &c_d, sizeof (int) * C_ROWS * C_COLS));
HANDLE_ERR(cudaMemcpy (a_d, a, sizeof (int) * A_ROWS * A_COLS_PADDED, cudaMemcpyHostToDevice));
HANDLE_ERR(cudaMemcpy (b_d, b, sizeof (int) * B_ROWS * B_COLS, cudaMemcpyHostToDevice));
double starttime = wtime();
mat_mult_fixed_dims_padded_kernel <<< 128, 128 >>> (a_d, b_d, c_d);
cudaDeviceSynchronize();
double algotime = wtime() - starttime;
cout << "Padded multiplication time: " << algotime << endl;
HANDLE_ERR(cudaMemcpy (c, c_d, sizeof (int) * C_ROWS * C_COLS, cudaMemcpyDeviceToHost));
//Make sure parallel work is equal to sequential work (for testing)
int *test_res = (int *) malloc(sizeof(int) * C_ROWS * C_COLS);
mat_mult(a, b, test_res, A_ROWS, A_COLS, B_COLS, PADDING);
for (int i = 0; i < C_ROWS; i++) {
for (int j = 0; j < C_COLS; j++){
int idx = i * C_COLS + j;
if (c[idx] != test_res[idx]) {
cout << "Not Equal at idx: " << i << ", " << j
<< " Parallel work " << c[idx] << ", Sequential Work: " << test_res[idx] << endl;
}
assert(c[idx] == test_res[idx]);
}
}
}
|
e89a84e60e0bcdd9dedb94f09f4b8744b69240a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <memory>
#include <algorithm>
#include <chrono>
#include <vector>
#include <numeric>
#include <iostream>
#include "device_launch_parameters.h"
#include "CUDAUtils.h"
template <typename TFunc>
inline double Timing(TFunc const &func, int times = 3) {
using namespace std::chrono;
if (times > 1)
func();
auto t = std::numeric_limits<double>::max();
for (auto i = 0; i < times; ++i) {
auto start = steady_clock::now();
func();
auto end = steady_clock::now();
t = ::min(t, duration<double>(end - start).count());
}
return t;
}
inline bool IsPowerOf2(size_t i) {
return i > 0 && ((i - 1) & i) == 0;
}
#define USE(v) do { volatile auto _v2 = v; } while(0)
template<typename T>
__device__ inline void Swap(T &a, T& b) {
auto c = a;
a = b;
b = c;
}
#define MAX_CUDA_THREADS 1024
struct Bitonic_Naive {
template<typename TIter>
static void Kernel(TIter first, TIter mid, bool ascent) {
for (auto p = first, q = mid; p != mid; ++p, ++q) {
if ((*p > *q) == ascent)
std::iter_swap(p, q);
}
}
template<typename TIter>
static void Bitonic_Sort(TIter first, TIter mid, TIter last, bool ascent) {
if (first + 1 == last) return;
Kernel(first, mid, ascent);
Bitonic_Sort(first, (mid - first) / 2 + first, mid, ascent);
Bitonic_Sort(mid, (last - mid) / 2 + mid, last, ascent);
}
template<typename TIter>
static void Sort(TIter first, TIter last, bool ascent) {
auto size = last - first;
assert(IsPowerOf2(size));
if (size == 1) return;
auto mid = size / 2 + first;
Sort(first, mid, true);
Sort(mid, last, false);
Bitonic_Sort(first, mid, last, ascent);
}
};
struct Bitonic_Unroll1 {
template<typename TIter>
static void Kernel(TIter first, TIter last, size_t span, bool ascent) {
for (auto p = first; p != last; p += span) {
for (auto q = p + span; p != q; ++p) {
if ((p[0] > p[span]) == ascent)
std::iter_swap(p, p + span);
}
}
}
template<typename TIter>
static void Bitonic_Sort(TIter first, TIter mid, TIter last, bool ascent) {
for (auto span = mid - first; span >= 1; span /= 2) {
Kernel(first, last, span, ascent);
}
}
template<typename TIter>
static void Sort(TIter first, TIter last, bool ascent) {
auto size = last - first;
assert(IsPowerOf2(size));
if (size == 1) return;
auto mid = size / 2 + first;
Sort(first, mid, true);
Sort(mid, last, false);
Bitonic_Sort(first, mid, last, ascent);
}
};
struct Bitonic_Unroll2 {
template<typename T>
static void Sort(T *ptr, size_t size, bool ascent) {
assert(IsPowerOf2(size));
for (auto dirSpan = 2; dirSpan <= size; dirSpan *= 2) {
for (auto span = size / 2; span >= 1; span /= 2) {
for (size_t i = 0; i < size; ++i) {
if ((i / span) % 2 == 0) {
auto dir = (i / dirSpan) % 2 == 0 ? ascent : !ascent;
if ((ptr[i] > ptr[i + span]) == dir) {
std::iter_swap(ptr + i, ptr + i + span);
}
}
}
}
}
}
};
template<typename T>
__global__ void Bitonic_GPU1_Kernel(T *ptr, size_t size, bool ascent) {
auto tid = threadIdx.x;
auto elemCountPerThread = size / blockDim.x;
auto first = tid * elemCountPerThread;
auto last = first + elemCountPerThread;
__shared__ T localPtr[4096];
for (auto i = first; i != last; ++i)
localPtr[i] = ptr[i];
__syncthreads();
for (auto dirSpan = 2; dirSpan <= size; dirSpan *= 2) {
for (auto span = size / 2; span >= 1; span /= 2) {
#pragma unroll
for (auto i = first; i != last; ++i) {
if ((i / span) % 2 == 0) {
auto dir = (i / dirSpan) % 2 == 0 ? ascent : !ascent;
if ((localPtr[i] > localPtr[i + span]) == dir) {
Swap(localPtr[i], localPtr[i + span]);
}
}
}
__syncthreads();
}
}
for (auto i = first; i != last; ++i)
ptr[i] = localPtr[i];
}
struct Bitonic_GPU1 {
template<typename T>
static void Sort(CUDAArrayPtr<T> devPtr, bool ascent, size_t maxThread) {
assert(IsPowerOf2(devPtr->Length));
auto threadCount = ::min(maxThread, devPtr->Length);
Bitonic_GPU1_Kernel << < 1, threadCount >> > (devPtr->Ptr, devPtr->Length, ascent);
devPtr->Device->CheckLastError();
}
template<typename T>
static void Sort(
CUDADevicePtr device,
T *ptr, size_t size, bool ascent, size_t maxThread = 512) {
auto devPtr = device->Alloc<T>(size);
device->Copy(devPtr, ptr);
Sort(devPtr, ascent, maxThread);
device->Copy(ptr, devPtr);
}
};
template<bool ascent, typename T>
__global__ void Bitonic_GPU2_Kernel1(T *ptr, int dirSpan, int span) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i & span) == 0) {
auto dir = (i & dirSpan) == 0 ? ascent : !ascent;
if ((ptr[i] > ptr[i + span]) == dir) {
Swap(ptr[i], ptr[i + span]);
}
}
}
template<bool ascent, typename T>
__global__ void Bitonic_GPU2_Kernel2(T *ptr, int dirSpan, int initSpan) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T localPtr[MAX_CUDA_THREADS];
localPtr[threadIdx.x] = ptr[i];
__syncthreads();
auto dir = (i & dirSpan) == 0 ? ascent : !ascent;
#pragma unroll
for (auto span = initSpan; span >= 1; span >>= 1) {
if ((i & span) == 0) {
if ((localPtr[threadIdx.x] > localPtr[threadIdx.x + span]) == dir) {
Swap(localPtr[threadIdx.x], localPtr[threadIdx.x + span]);
}
}
__syncthreads();
}
ptr[i] = localPtr[threadIdx.x];
}
struct Bitonic_GPU2 {
template<bool ascent, typename T>
static void Sort(CUDAArrayPtr<T> devPtr) {
assert(IsPowerOf2(devPtr->Length));
auto size = int(devPtr->Length);
for (auto dirSpan = 2; dirSpan <= size; dirSpan <<= 1) {
auto span = size >> 1;
for (; span >= MAX_CUDA_THREADS; span >>= 1) {
Bitonic_GPU2_Kernel1
<ascent, T>
<< < devPtr->Length / MAX_CUDA_THREADS, MAX_CUDA_THREADS >> >
(devPtr->Ptr, dirSpan, span);
devPtr->Device->CheckLastError();
}
{
auto threadCount = ::min(size, MAX_CUDA_THREADS);
auto blockCount = size / threadCount;
Bitonic_GPU2_Kernel2
<ascent, T>
<< <blockCount, threadCount >> >
(devPtr->Ptr, dirSpan, span);
devPtr->Device->CheckLastError();
}
}
}
template<bool ascent, typename T>
static void Sort(CUDADevicePtr device, T *ptr, size_t size) {
auto devPtr = device->Alloc<T>(size);
device->Copy(devPtr, ptr);
Sort<ascent>(devPtr);
device->Copy(ptr, devPtr);
}
};
static void Test(CUDADevicePtr device) {
for (auto i = 0; i < 12; ++i) {
std::vector<int> range(1 << i);
iota(range.begin(), range.end(), 0);
auto temp(range);
random_shuffle(temp.begin(), temp.end());
Bitonic_Naive::Sort(temp.begin(), temp.end(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_Unroll1::Sort(temp.begin(), temp.end(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_Unroll2::Sort(&temp[0], temp.size(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_GPU1::Sort(device, &temp[0], temp.size(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_GPU2::Sort<true>(device, &temp[0], temp.size());
assert(std::equal(temp.begin(), temp.end(), range.begin()));
}
}
static void Benchmark(CUDADevicePtr device) {
constexpr size_t kSize = 1 << 12;
constexpr size_t kLoop = 100;
std::vector<int> shuffled(kSize);
iota(shuffled.begin(), shuffled.end(), 0);
random_shuffle(shuffled.begin(), shuffled.end());
{
auto temp(shuffled);
auto baseline = Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
USE(temp.front());
}
}) / kLoop;
printf("%-24s=%f\n", "std::sort", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
sort(temp.begin(), temp.end());
USE(temp.front());
}
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_naive", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
Bitonic_Naive::Sort(temp.begin(), temp.end(), true);
USE(temp.front());
}
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_unroll1", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
Bitonic_Unroll1::Sort(temp.begin(), temp.end(), true);
USE(temp.front());
}
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_unroll2", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
Bitonic_Unroll2::Sort(&temp[0], temp.size(), true);
USE(temp.front());
}
}) / kLoop - baseline);
}
{
auto devShuffled = device->Alloc<int>(kSize);
device->Copy(devShuffled, &shuffled[0]);
auto devTemp = device->Alloc<int>(kSize);
auto baseline = Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
}
device->Synchronize();
}) / kLoop;
printf("%-24s=%f\n", "bitonic_gpu1,128", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
Bitonic_GPU1::Sort(devTemp, true, 128);
}
device->Synchronize();
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_gpu1,1024", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
Bitonic_GPU1::Sort(devTemp, true, 1024);
}
device->Synchronize();
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_gpu2", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
Bitonic_GPU2::Sort<true>(devTemp);
}
device->Synchronize();
}) / kLoop - baseline);
}
}
int main() {
try {
auto device = std::make_shared<CUDADevice>();
Test(device);
#ifdef NDEBUG
Benchmark(device);
#endif
} catch(std::exception const &e) {
std::cerr << e.what() << std::endl;
}
return 0;
}
| e89a84e60e0bcdd9dedb94f09f4b8744b69240a0.cu | #include <cstdio>
#include <memory>
#include <algorithm>
#include <chrono>
#include <vector>
#include <numeric>
#include <iostream>
#include "device_launch_parameters.h"
#include "CUDAUtils.h"
template <typename TFunc>
inline double Timing(TFunc const &func, int times = 3) {
using namespace std::chrono;
if (times > 1)
func();
auto t = std::numeric_limits<double>::max();
for (auto i = 0; i < times; ++i) {
auto start = steady_clock::now();
func();
auto end = steady_clock::now();
t = std::min(t, duration<double>(end - start).count());
}
return t;
}
inline bool IsPowerOf2(size_t i) {
return i > 0 && ((i - 1) & i) == 0;
}
#define USE(v) do { volatile auto _v2 = v; } while(0)
template<typename T>
__device__ inline void Swap(T &a, T& b) {
auto c = a;
a = b;
b = c;
}
#define MAX_CUDA_THREADS 1024
struct Bitonic_Naive {
template<typename TIter>
static void Kernel(TIter first, TIter mid, bool ascent) {
for (auto p = first, q = mid; p != mid; ++p, ++q) {
if ((*p > *q) == ascent)
std::iter_swap(p, q);
}
}
template<typename TIter>
static void Bitonic_Sort(TIter first, TIter mid, TIter last, bool ascent) {
if (first + 1 == last) return;
Kernel(first, mid, ascent);
Bitonic_Sort(first, (mid - first) / 2 + first, mid, ascent);
Bitonic_Sort(mid, (last - mid) / 2 + mid, last, ascent);
}
template<typename TIter>
static void Sort(TIter first, TIter last, bool ascent) {
auto size = last - first;
assert(IsPowerOf2(size));
if (size == 1) return;
auto mid = size / 2 + first;
Sort(first, mid, true);
Sort(mid, last, false);
Bitonic_Sort(first, mid, last, ascent);
}
};
struct Bitonic_Unroll1 {
template<typename TIter>
static void Kernel(TIter first, TIter last, size_t span, bool ascent) {
for (auto p = first; p != last; p += span) {
for (auto q = p + span; p != q; ++p) {
if ((p[0] > p[span]) == ascent)
std::iter_swap(p, p + span);
}
}
}
template<typename TIter>
static void Bitonic_Sort(TIter first, TIter mid, TIter last, bool ascent) {
for (auto span = mid - first; span >= 1; span /= 2) {
Kernel(first, last, span, ascent);
}
}
template<typename TIter>
static void Sort(TIter first, TIter last, bool ascent) {
auto size = last - first;
assert(IsPowerOf2(size));
if (size == 1) return;
auto mid = size / 2 + first;
Sort(first, mid, true);
Sort(mid, last, false);
Bitonic_Sort(first, mid, last, ascent);
}
};
struct Bitonic_Unroll2 {
template<typename T>
static void Sort(T *ptr, size_t size, bool ascent) {
assert(IsPowerOf2(size));
for (auto dirSpan = 2; dirSpan <= size; dirSpan *= 2) {
for (auto span = size / 2; span >= 1; span /= 2) {
for (size_t i = 0; i < size; ++i) {
if ((i / span) % 2 == 0) {
auto dir = (i / dirSpan) % 2 == 0 ? ascent : !ascent;
if ((ptr[i] > ptr[i + span]) == dir) {
std::iter_swap(ptr + i, ptr + i + span);
}
}
}
}
}
}
};
template<typename T>
__global__ void Bitonic_GPU1_Kernel(T *ptr, size_t size, bool ascent) {
auto tid = threadIdx.x;
auto elemCountPerThread = size / blockDim.x;
auto first = tid * elemCountPerThread;
auto last = first + elemCountPerThread;
__shared__ T localPtr[4096];
for (auto i = first; i != last; ++i)
localPtr[i] = ptr[i];
__syncthreads();
for (auto dirSpan = 2; dirSpan <= size; dirSpan *= 2) {
for (auto span = size / 2; span >= 1; span /= 2) {
#pragma unroll
for (auto i = first; i != last; ++i) {
if ((i / span) % 2 == 0) {
auto dir = (i / dirSpan) % 2 == 0 ? ascent : !ascent;
if ((localPtr[i] > localPtr[i + span]) == dir) {
Swap(localPtr[i], localPtr[i + span]);
}
}
}
__syncthreads();
}
}
for (auto i = first; i != last; ++i)
ptr[i] = localPtr[i];
}
struct Bitonic_GPU1 {
template<typename T>
static void Sort(CUDAArrayPtr<T> devPtr, bool ascent, size_t maxThread) {
assert(IsPowerOf2(devPtr->Length));
auto threadCount = std::min(maxThread, devPtr->Length);
Bitonic_GPU1_Kernel << < 1, threadCount >> > (devPtr->Ptr, devPtr->Length, ascent);
devPtr->Device->CheckLastError();
}
template<typename T>
static void Sort(
CUDADevicePtr device,
T *ptr, size_t size, bool ascent, size_t maxThread = 512) {
auto devPtr = device->Alloc<T>(size);
device->Copy(devPtr, ptr);
Sort(devPtr, ascent, maxThread);
device->Copy(ptr, devPtr);
}
};
template<bool ascent, typename T>
__global__ void Bitonic_GPU2_Kernel1(T *ptr, int dirSpan, int span) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i & span) == 0) {
auto dir = (i & dirSpan) == 0 ? ascent : !ascent;
if ((ptr[i] > ptr[i + span]) == dir) {
Swap(ptr[i], ptr[i + span]);
}
}
}
template<bool ascent, typename T>
__global__ void Bitonic_GPU2_Kernel2(T *ptr, int dirSpan, int initSpan) {
auto i = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ T localPtr[MAX_CUDA_THREADS];
localPtr[threadIdx.x] = ptr[i];
__syncthreads();
auto dir = (i & dirSpan) == 0 ? ascent : !ascent;
#pragma unroll
for (auto span = initSpan; span >= 1; span >>= 1) {
if ((i & span) == 0) {
if ((localPtr[threadIdx.x] > localPtr[threadIdx.x + span]) == dir) {
Swap(localPtr[threadIdx.x], localPtr[threadIdx.x + span]);
}
}
__syncthreads();
}
ptr[i] = localPtr[threadIdx.x];
}
struct Bitonic_GPU2 {
template<bool ascent, typename T>
static void Sort(CUDAArrayPtr<T> devPtr) {
assert(IsPowerOf2(devPtr->Length));
auto size = int(devPtr->Length);
for (auto dirSpan = 2; dirSpan <= size; dirSpan <<= 1) {
auto span = size >> 1;
for (; span >= MAX_CUDA_THREADS; span >>= 1) {
Bitonic_GPU2_Kernel1
<ascent, T>
<< < devPtr->Length / MAX_CUDA_THREADS, MAX_CUDA_THREADS >> >
(devPtr->Ptr, dirSpan, span);
devPtr->Device->CheckLastError();
}
{
auto threadCount = std::min(size, MAX_CUDA_THREADS);
auto blockCount = size / threadCount;
Bitonic_GPU2_Kernel2
<ascent, T>
<< <blockCount, threadCount >> >
(devPtr->Ptr, dirSpan, span);
devPtr->Device->CheckLastError();
}
}
}
template<bool ascent, typename T>
static void Sort(CUDADevicePtr device, T *ptr, size_t size) {
auto devPtr = device->Alloc<T>(size);
device->Copy(devPtr, ptr);
Sort<ascent>(devPtr);
device->Copy(ptr, devPtr);
}
};
static void Test(CUDADevicePtr device) {
for (auto i = 0; i < 12; ++i) {
std::vector<int> range(1 << i);
iota(range.begin(), range.end(), 0);
auto temp(range);
random_shuffle(temp.begin(), temp.end());
Bitonic_Naive::Sort(temp.begin(), temp.end(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_Unroll1::Sort(temp.begin(), temp.end(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_Unroll2::Sort(&temp[0], temp.size(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_GPU1::Sort(device, &temp[0], temp.size(), true);
assert(std::equal(temp.begin(), temp.end(), range.begin()));
random_shuffle(temp.begin(), temp.end());
Bitonic_GPU2::Sort<true>(device, &temp[0], temp.size());
assert(std::equal(temp.begin(), temp.end(), range.begin()));
}
}
static void Benchmark(CUDADevicePtr device) {
constexpr size_t kSize = 1 << 12;
constexpr size_t kLoop = 100;
std::vector<int> shuffled(kSize);
iota(shuffled.begin(), shuffled.end(), 0);
random_shuffle(shuffled.begin(), shuffled.end());
{
auto temp(shuffled);
auto baseline = Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
USE(temp.front());
}
}) / kLoop;
printf("%-24s=%f\n", "std::sort", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
sort(temp.begin(), temp.end());
USE(temp.front());
}
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_naive", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
Bitonic_Naive::Sort(temp.begin(), temp.end(), true);
USE(temp.front());
}
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_unroll1", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
Bitonic_Unroll1::Sort(temp.begin(), temp.end(), true);
USE(temp.front());
}
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_unroll2", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
temp.assign(shuffled.begin(), shuffled.end());
Bitonic_Unroll2::Sort(&temp[0], temp.size(), true);
USE(temp.front());
}
}) / kLoop - baseline);
}
{
auto devShuffled = device->Alloc<int>(kSize);
device->Copy(devShuffled, &shuffled[0]);
auto devTemp = device->Alloc<int>(kSize);
auto baseline = Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
}
device->Synchronize();
}) / kLoop;
printf("%-24s=%f\n", "bitonic_gpu1,128", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
Bitonic_GPU1::Sort(devTemp, true, 128);
}
device->Synchronize();
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_gpu1,1024", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
Bitonic_GPU1::Sort(devTemp, true, 1024);
}
device->Synchronize();
}) / kLoop - baseline);
printf("%-24s=%f\n", "bitonic_gpu2", Timing([&]() {
for (size_t i = 0; i < kLoop; ++i) {
device->Copy(devTemp, devShuffled);
Bitonic_GPU2::Sort<true>(devTemp);
}
device->Synchronize();
}) / kLoop - baseline);
}
}
int main() {
try {
auto device = std::make_shared<CUDADevice>();
Test(device);
#ifdef NDEBUG
Benchmark(device);
#endif
} catch(std::exception const &e) {
std::cerr << e.what() << std::endl;
}
return 0;
}
|
60633418eb2378d5874087ce4d40ac95a4b6a348.hip | // !!! This is a file automatically generated by hipify!!!
//xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --boogie-file=${KERNEL_DIR}/axioms.bpl --no-inline
//error: possible null pointer access
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#define N 8
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
// Static pointers to device functions
__device__ funcType p_mul_func = multiplyByTwo;
__device__ funcType p_div_func = divideByTwo;
__global__ void foo(float *v, funcType f, unsigned int size, int i)
{
assert(i != 0);
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
void *x = (void*)f; /*ptr_to_ptr*/
if (i == 0) //*the null pointer occurs when i ==0, this is the case*//
x = x + 5;
funcType g = (funcType)x;
if (tid < size)
{
v[tid] = (*g)(v, tid);
}
}
int main(){
float* w;
float* dev_w;
int nondet;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
hipMalloc((void**)&dev_w, size);
hipMemcpy(dev_w,w, size,hipMemcpyHostToDevice);
funcType host_f;
hipMemcpyFromSymbol( &host_f, p_div_func, sizeof( funcType ) );
funcType dev_f = host_f;
hipLaunchKernelGGL(( foo) , dim3(1),dim3(N), 0, 0, dev_w, dev_f, N, nondet);
hipDeviceSynchronize();
hipMemcpy(w,dev_w,size,hipMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
}
free(w);
return EXIT_SUCCESS;
}
| 60633418eb2378d5874087ce4d40ac95a4b6a348.cu | //xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1 --boogie-file=${KERNEL_DIR}/axioms.bpl --no-inline
//error: possible null pointer access
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda.h>
#define N 8
typedef float(*funcType)(float*, unsigned int);
__device__ float multiplyByTwo(float *v, unsigned int tid)
{
return v[tid] * 2.0f;
}
__device__ float divideByTwo(float *v, unsigned int tid)
{
return v[tid] * 0.5f;
}
// Static pointers to device functions
__device__ funcType p_mul_func = multiplyByTwo;
__device__ funcType p_div_func = divideByTwo;
__global__ void foo(float *v, funcType f, unsigned int size, int i)
{
assert(i != 0);
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
void *x = (void*)f; /*ptr_to_ptr*/
if (i == 0) //*the null pointer occurs when i ==0, this is the case*//
x = x + 5;
funcType g = (funcType)x;
if (tid < size)
{
v[tid] = (*g)(v, tid);
}
}
int main(){
float* w;
float* dev_w;
int nondet;
int size = N*sizeof(float);
w =(float*) malloc(size);
for (int i = 0; i < N; ++i){
w[i] = i;
}
cudaMalloc((void**)&dev_w, size);
cudaMemcpy(dev_w,w, size,cudaMemcpyHostToDevice);
funcType host_f;
cudaMemcpyFromSymbol( &host_f, p_div_func, sizeof( funcType ) );
funcType dev_f = host_f;
foo <<<1,N>>>(dev_w, dev_f, N, nondet);
cudaThreadSynchronize();
cudaMemcpy(w,dev_w,size,cudaMemcpyDeviceToHost);
printf("\nw:");
for (int i = 0; i < N; ++i){
printf(" %f ", w[i]);
}
free(w);
return EXIT_SUCCESS;
}
|
50fc91681c22e1e29e7553ebf5b93315f72ff8b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
const float scale = 10.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(1.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (2.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
for(v=0;v<30;v++)
{
/* the way of playing the game has a way of changing the rules: James Gleick's Chaos */
cue = cue - (moux*(cosc(cue) - ai*moux*sins(cue))/(cosc(cue)+ai*mouy*sins(cue)));
accume = accume *cue;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
} | 50fc91681c22e1e29e7553ebf5b93315f72ff8b4.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
const float scale = 10.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(1.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (2.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
for(v=0;v<30;v++)
{
/* the way of playing the game has a way of changing the rules: James Gleick's Chaos */
cue = cue - (moux*(cosc(cue) - ai*moux*sins(cue))/(cosc(cue)+ai*mouy*sins(cue)));
accume = accume *cue;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
} |
fe02e87d71c80268fe0829b463051ac946905904.hip | // !!! This is a file automatically generated by hipify!!!
#include "../common/common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#define DIM 512
/*
* An example of using shared memory to optimize performance of a parallel
* reduction by constructing partial results for a thread block in shared memory
* before flushing to global memory.
*/
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec ", name, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
__global__ void reduceNeighboredGmem_1(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceNeighboredSmem_2(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0){
smem[tid] += smem[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceNeighboredSmemNoDivergence_3(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int index = 2 * stride * tid;
if (index < blockDim.x) {
smem[index] += smem[index + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceInterleavedSmem_4(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
smem[tid] += smem[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceUnrolling2_5(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2 data blocks
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
smem[tid] += smem[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceUnrollingWarp8_6(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) {
if (tid < stride) {
smem[tid] += smem[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceCompleteUnrolling8_7(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnrolling8Template_8(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = DIM; // initial block size
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)( rand() & 0xFF );
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(hipMalloc((void **) &d_idata, bytes));
CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
long long start_time = start_timer();
int cpu_sum = recursiveReduce (tmp, size);
stop_timer(start_time, "cpu_sum time");
printf(" cpu_sum: %d\n", cpu_sum);
// reduce reduceNeighboredGmem_1
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceNeighboredGmem_1), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceNeighboredGmem_1 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceNeighboredSmem_2
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceNeighboredSmem_2), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceNeighboredSmem_2 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceNeighboredSmemNoDivergence_3
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceNeighboredSmemNoDivergence_3), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceNeighboredSmemNoDivergence_3 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceInterleavedSmem_4
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceInterleavedSmem_4), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceInterleavedSmem_4 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceUnrolling2_5
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceUnrolling2_5), dim3(grid.x/2), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceUnrolling2_5 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/2, block.x);
// reduce reduceUnrollingWarp8_6
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceUnrollingWarp8_6), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceUnrollingWarp8_6 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/8, block.x);
// reduce reduceCompleteUnrolling8_7
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
hipLaunchKernelGGL(( reduceCompleteUnrolling8_7), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
hipDeviceSynchronize();
stop_timer(start_time, "reduceCompleteUnrolling8_7 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/8, block.x);
// reduce reduceCompleteUnrolling8Template_8
CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
hipDeviceSynchronize();
start_time = start_timer();
switch (blocksize) {
case 1024:
hipLaunchKernelGGL(( reduceCompleteUnrolling8Template_8<1024>), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 512:
hipLaunchKernelGGL(( reduceCompleteUnrolling8Template_8<512>), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 256:
hipLaunchKernelGGL(( reduceCompleteUnrolling8Template_8<256>), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 128:
hipLaunchKernelGGL(( reduceCompleteUnrolling8Template_8<128>), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
break;
case 64:
hipLaunchKernelGGL(( reduceCompleteUnrolling8Template_8<64>), dim3(grid.x/8), dim3(block), 0, 0, d_idata, d_odata, size);
break;
}
hipDeviceSynchronize();
stop_timer(start_time, "reduceCompleteUnrolling8Template_8 time");
CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int),
hipMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/8, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(hipFree(d_idata));
CHECK(hipFree(d_odata));
// reset device
CHECK(hipDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
| fe02e87d71c80268fe0829b463051ac946905904.cu | #include "../common/common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define DIM 512
/*
* An example of using shared memory to optimize performance of a parallel
* reduction by constructing partial results for a thread block in shared memory
* before flushing to global memory.
*/
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
printf("%s: %.5f sec ", name, ((float) (end_time - start_time)) / (1000 * 1000));
return end_time - start_time;
}
// Recursive Implementation of Interleaved Pair Approach
int recursiveReduce(int *data, int const size)
{
if (size == 1) return data[0];
int const stride = size / 2;
for (int i = 0; i < stride; i++)
data[i] += data[i + stride];
return recursiveReduce(data, stride);
}
__global__ void reduceNeighboredGmem_1(int *g_idata, int *g_odata,
unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
// in-place reduction in global memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0) {
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
}
__global__ void reduceNeighboredSmem_2(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
if ((tid % (2 * stride)) == 0){
smem[tid] += smem[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceNeighboredSmemNoDivergence_3(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = 1; stride < blockDim.x; stride *= 2) {
int index = 2 * stride * tid;
if (index < blockDim.x) {
smem[index] += smem[index + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceInterleavedSmem_4(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x;
// boundary check
if (idx >= n) return;
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
smem[tid] += smem[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceUnrolling2_5(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 2;
// unrolling 2 data blocks
if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x];
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = blockDim.x / 2; stride > 0; stride >>= 1) {
if (tid < stride) {
smem[tid] += smem[tid + stride];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceUnrollingWarp8_6(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) {
if (tid < stride) {
smem[tid] += smem[tid + stride];
}
// synchronize within threadblock
__syncthreads();
}
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
__global__ void reduceCompleteUnrolling8_7(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
template <unsigned int iBlockSize>
__global__ void reduceCompleteUnrolling8Template_8(int *g_idata, int *g_odata,
unsigned int n)
{
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 8 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8
if (idx + 7 * blockDim.x < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + 2 * blockDim.x];
int a4 = g_idata[idx + 3 * blockDim.x];
int b1 = g_idata[idx + 4 * blockDim.x];
int b2 = g_idata[idx + 5 * blockDim.x];
int b3 = g_idata[idx + 6 * blockDim.x];
int b4 = g_idata[idx + 7 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
smem[tid] = idata[tid];
__syncthreads();
// in-place reduction and complete unroll
if (iBlockSize >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (iBlockSize >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (iBlockSize >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (iBlockSize >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
}
int main(int argc, char **argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s starting reduction at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
bool bResult = false;
// initialization
int size = 1 << 24; // total number of elements to reduce
printf(" with array size %d ", size);
// execution configuration
int blocksize = DIM; // initial block size
dim3 block (blocksize, 1);
dim3 grid ((size + block.x - 1) / block.x, 1);
printf("grid %d block %d\n", grid.x, block.x);
// allocate host memory
size_t bytes = size * sizeof(int);
int *h_idata = (int *) malloc(bytes);
int *h_odata = (int *) malloc(grid.x * sizeof(int));
int *tmp = (int *) malloc(bytes);
// initialize the array
for (int i = 0; i < size; i++)
// mask off high 2 bytes to force max number to 255
h_idata[i] = (int)( rand() & 0xFF );
memcpy (tmp, h_idata, bytes);
int gpu_sum = 0;
// allocate device memory
int *d_idata = NULL;
int *d_odata = NULL;
CHECK(cudaMalloc((void **) &d_idata, bytes));
CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int)));
// cpu reduction
long long start_time = start_timer();
int cpu_sum = recursiveReduce (tmp, size);
stop_timer(start_time, "cpu_sum time");
printf(" cpu_sum: %d\n", cpu_sum);
// reduce reduceNeighboredGmem_1
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceNeighboredGmem_1<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceNeighboredGmem_1 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceNeighboredSmem_2
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceNeighboredSmem_2<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceNeighboredSmem_2 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceNeighboredSmemNoDivergence_3
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceNeighboredSmemNoDivergence_3<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceNeighboredSmemNoDivergence_3 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceInterleavedSmem_4
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceInterleavedSmem_4<<<grid, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceInterleavedSmem_4 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x, block.x);
// reduce reduceUnrolling2_5
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceUnrolling2_5<<<grid.x/2, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceUnrolling2_5 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/2, block.x);
// reduce reduceUnrollingWarp8_6
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceUnrollingWarp8_6<<<grid.x/8, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceUnrollingWarp8_6 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/8, block.x);
// reduce reduceCompleteUnrolling8_7
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
reduceCompleteUnrolling8_7<<<grid.x/8, block>>>(d_idata, d_odata, size);
cudaDeviceSynchronize();
stop_timer(start_time, "reduceCompleteUnrolling8_7 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/8, block.x);
// reduce reduceCompleteUnrolling8Template_8
CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
start_time = start_timer();
switch (blocksize) {
case 1024:
reduceCompleteUnrolling8Template_8<1024><<<grid.x/8, block>>>(d_idata, d_odata, size);
break;
case 512:
reduceCompleteUnrolling8Template_8<512><<<grid.x/8, block>>>(d_idata, d_odata, size);
break;
case 256:
reduceCompleteUnrolling8Template_8<256><<<grid.x/8, block>>>(d_idata, d_odata, size);
break;
case 128:
reduceCompleteUnrolling8Template_8<128><<<grid.x/8, block>>>(d_idata, d_odata, size);
break;
case 64:
reduceCompleteUnrolling8Template_8<64><<<grid.x/8, block>>>(d_idata, d_odata, size);
break;
}
cudaDeviceSynchronize();
stop_timer(start_time, "reduceCompleteUnrolling8Template_8 time");
CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int),
cudaMemcpyDeviceToHost));
gpu_sum = 0;
for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i];
printf(" gpu_sum: %d <<<grid %d block %d>>>\n", gpu_sum, grid.x/8, block.x);
// free host memory
free(h_idata);
free(h_odata);
// free device memory
CHECK(cudaFree(d_idata));
CHECK(cudaFree(d_odata));
// reset device
CHECK(cudaDeviceReset());
// check the results
bResult = (gpu_sum == cpu_sum);
if(!bResult) printf("Test failed!\n");
return EXIT_SUCCESS;
}
|
600211f253d568ade2627c370ec2555241e41029.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.toComplexDouble() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
// By definition, when beta==0, values in self should be ignored. nans and infs
// should not propagate
if (beta.toComplexDouble() == 0.) {
return result.zero_();
}
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
namespace {
inline void dot_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
}
} // anonymous namespace
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
Tensor vdot_cuda(const Tensor& self, const Tensor& other) {
if (!self.is_complex()) {
return dot_cuda(self, other);
}
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(
handle, HIPBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::vdot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
} }
| 600211f253d568ade2627c370ec2555241e41029.cu | #include <ATen/ATen.h>
#include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor baddbmm_cuda(const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm");
return legacy::cuda::_th_baddbmm(b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm_out_cuda(Tensor &result, const Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
Tensor b_self;
std::tie(b_self) = expand_size(self, {batch1.size(0), batch1.size(1), batch2.size(2)}, "baddbmm_out");
return legacy::cuda::_th_baddbmm_out(result, b_self, batch1, batch2, beta, alpha);
}
Tensor& baddbmm__cuda(Tensor& self, const Tensor& batch1, const Tensor& batch2, Scalar beta, Scalar alpha) {
return baddbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
}
Tensor& bmm_out_cuda(Tensor &result, const Tensor& batch1, const Tensor& batch2) {
result.resize_({ batch1.size(0), batch1.size(1), batch2.size(2) });
return legacy::cuda::_th_bmm_out(result, batch1, batch2);
}
Tensor bmm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({0}, self.options());
return native::bmm_out_cuda(result, self, mat2);
}
Tensor prepare_matrix_for_cublas(Tensor& tensor, bool& transpose_tensor) {
Tensor tensor_;
IntArrayRef tensor_strides = tensor.strides();
IntArrayRef tensor_sizes = tensor.sizes();
if ((tensor_strides[0] == 1) && (tensor_strides[1] >= std::max<int64_t>(1, tensor_sizes[0]))) {
tensor_ = tensor;
transpose_tensor = false;
} else if ((tensor_strides[1] == 1) && (tensor_strides[0] >= std::max<int64_t>(1, tensor_sizes[1]))) {
tensor_ = tensor;
transpose_tensor = true;
} else {
transpose_tensor = true;
tensor_ = tensor.clone(at::MemoryFormat::Contiguous);
}
return tensor_;
}
namespace {
Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& self, const Tensor& mat1, const Tensor& mat2, Scalar beta, Scalar alpha) {
TORCH_CHECK(mat1.dim() == 2 && mat2.dim() == 2, "tensors must be 2-D");
TensorArg args[]{{result, "out", 0}, {self, "self", 1}, {mat1, "mat1", 2}, {mat2, "mat2", 3}};
checkAllSameGPU("addmm", args);
Tensor self_;
if (&result != &self) {
std::tie(self_) = expand_size(self, {mat1.size(0), mat2.size(1)}, "addmm");
} else {
self_ = self;
}
IntArrayRef mat1_sizes = mat1.sizes();
IntArrayRef mat2_sizes = mat2.sizes();
IntArrayRef self__sizes = self_.sizes();
TORCH_CHECK(mat1_sizes[1] == mat2_sizes[0], "mat1 dim 1 must match mat2 dim 0");
TORCH_CHECK(self__sizes[0] == mat1_sizes[0], "self_ dim 0 must match mat1 dim 0");
TORCH_CHECK(self__sizes[1] == mat2_sizes[1], "self_ dim 1 must match mat2 dim 1");
if (&result != &self) {
at::native::resize_as_(result, self_);
if (beta.toComplexDouble() != 0.0) {
at::native::copy_(result, self_);
}
}
TORCH_CHECK(result.dim() == 2 && self_.dim() == 2, "tensors must be 2-D");
IntArrayRef result_sizes = result.sizes();
if ((result_sizes[0] == 0) || (result_sizes[1] == 0)) {
return result;
}
bool transpose_result;
Tensor result_ = prepare_matrix_for_cublas(result, transpose_result);
bool transpose_mat1;
bool transpose_mat2;
Tensor mat1_ = transpose_result ? mat2 : mat1;
Tensor mat2_ = transpose_result ? mat1 : mat2;
mat1_ = prepare_matrix_for_cublas(mat1_, transpose_mat1);
mat2_ = prepare_matrix_for_cublas(mat2_, transpose_mat2);
if (transpose_result) {
transpose_mat1 = !transpose_mat1;
transpose_mat2 = !transpose_mat2;
mat1_sizes = mat1_.sizes();
mat2_sizes = mat2_.sizes();
}
int64_t m = mat1_sizes[transpose_result ? 1 : 0];
int64_t k = mat1_sizes[transpose_result ? 0 : 1];
int64_t n = mat2_sizes[transpose_result ? 0 : 1];
int64_t mat1_ld = mat1_.stride((transpose_mat1 == transpose_result) ? 1 : 0);
int64_t mat2_ld = mat2_.stride((transpose_mat2 == transpose_result) ? 1 : 0);
int64_t result_ld = result_.stride(transpose_result ? 0 : 1);
at::ScalarType scalar_type = self_.scalar_type();
if (mat1.numel() == 0) {
// By definition, when beta==0, values in self should be ignored. nans and infs
// should not propagate
if (beta.toComplexDouble() == 0.) {
return result.zero_();
}
return at::native::mul_out(result, self, at::native::scalar_tensor(beta, at::device(at::kCPU).dtype(self.scalar_type())));
}
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "addmm_cuda", [&] {
scalar_t alpha_val = alpha.to<scalar_t>();
scalar_t beta_val = beta.to<scalar_t>();
scalar_t* mat1_ptr = mat1_.data_ptr<scalar_t>();
scalar_t* mat2_ptr = mat2_.data_ptr<scalar_t>();
scalar_t* result_ptr = result_.data_ptr<scalar_t>();
at::cuda::blas::gemm<scalar_t>(
transpose_mat1 ? 't' : 'n',
transpose_mat2 ? 't' : 'n',
m, n, k,
alpha_val,
mat1_ptr, mat1_ld,
mat2_ptr, mat2_ld,
beta_val,
result_ptr, result_ld
);
});
if (result.data_ptr() != result_.data_ptr()) {
result.copy_(result_);
}
return result;
}
} // anonymous namespace
Tensor& mm_out_cuda(Tensor& result, const Tensor& self, const Tensor& mat2) {
result.resize_({ self.size(0), mat2.size(1) });
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor mm_cuda(const Tensor& self, const Tensor& mat2) {
Tensor result = at::empty({ self.size(0), mat2.size(1) }, self.options());
return addmm_out_cuda_impl(result, result, self, mat2, 0, 1);
}
Tensor& addmm_out_cuda(Tensor &out, const Tensor &self,
const Tensor &mat1, const Tensor &mat2,
Scalar beta, Scalar alpha) {
{
at::NoNamesGuard guard;
Tensor& result = addmm_out_cuda_impl(out, self, mat1, mat2, beta, alpha);
}
at::namedinference::propagate_names_for_addmm(out, mat1, mat2, self);
return out;
}
Tensor addmm_cuda(const Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
Tensor out = at::empty({0}, self.options());
addmm_out_cuda(out, self, mat1, mat2, beta, alpha);
return out;
}
Tensor& addmm__cuda(Tensor& self, const Tensor& mat1, const Tensor& mat2,
Scalar beta, Scalar alpha) {
addmm_out_cuda(self, self, mat1, mat2, beta, alpha);
return self;
}
Tensor& addbmm_out_cuda(Tensor& out, const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
TORCH_CHECK(batch1.dim() == 3 && batch2.dim() == 3,
"Batch tensors should be 3D, got dimensions ", batch1.dim(),
" and ", batch2.dim());
Tensor self_;
if (&out != &self) {
std::tie(self_) = expand_size(self, {batch1.size(1), batch2.size(2)}, "addbmm");
} else {
self_ = self;
}
TORCH_CHECK(out.device() == self_.device() &&
out.device() == batch1.device() &&
out.device() == batch2.device(),
"Expected all tensors to be on the same device. Found: ",
out.device(), ", ", self_.device(), ", ",
batch1.device(), " and ", batch2.device());
TORCH_CHECK(self_.dim() == 2,
"2D tensor expected, got ", self_.dim(), "D tensor for input");
int64_t batchnum = batch1.size(0);
int64_t m1d1 = batch1.size(1);
int64_t innerdim = batch1.size(2);
int64_t m2d2 = batch2.size(2);
TORCH_CHECK(batchnum == batch2.size(0),
"equal number of batches expected");
TORCH_CHECK(m1d1 == self_.size(0),
"first dimension of batch1 must match first dimension of input");
TORCH_CHECK(m2d2 == self_.size(1),
"second dimension of batch2 must match second dimension of input");
TORCH_CHECK(innerdim == batch2.size(1),
"second dimension of batch1 must match first dimension of batch2");
if (&out != &self) {
at::native::resize_as_(out, self_);
if (beta.to<double>() != 0.0) {
at::native::copy_(out, self_);
}
}
for (int64_t i=0; i<batchnum; i++) {
addmm_out_cuda(out, out, batch1[i], batch2[i], beta, alpha);
beta = 1;
}
return out;
}
Tensor& addbmm__cuda(Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha) {
addbmm_out_cuda(self, self, batch1, batch2, beta, alpha);
return self;
}
Tensor addbmm_cuda(const Tensor& self,
const Tensor& batch1, const Tensor& batch2,
Scalar beta, Scalar alpha)
{
Tensor out = at::empty({0}, self.options());
addbmm_out_cuda(out, self, batch1, batch2, beta, alpha);
return out;
}
namespace {
inline void dot_check(const Tensor& self, const Tensor& other) {
TORCH_CHECK(
self.dim() == 1 && other.dim() == 1,
"1D tensors expected, but got ",
self.dim(),
"D and ",
other.dim(),
"D tensors");
TORCH_CHECK(
self.scalar_type() == other.scalar_type(),
"dot : expected both vectors to have same dtype, but found ",
self.scalar_type(),
" and ",
other.scalar_type());
TORCH_CHECK(
self.numel() == other.numel(),
"inconsistent tensor size, expected tensor [",
self.numel(),
"] and src [",
other.numel(),
"] to have the same number of elements, but got ",
self.numel(),
" and ",
other.numel(),
" elements respectively");
TORCH_CHECK(
self.device() == other.device(),
"expected all tensors to be on the same device. Found: ",
self.device(),
", ",
other.device());
TORCH_CHECK(
(self.numel() <= INT_MAX) && (self.stride(0) <= INT_MAX) &&
(other.stride(0) <= INT_MAX),
"dot only supports n, incx, incy with the bound [val] <= %d",
INT_MAX);
}
} // anonymous namespace
Tensor dot_cuda(const Tensor& self, const Tensor& other) {
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND1(ScalarType::Half, self.scalar_type(), "dot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::dot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
Tensor vdot_cuda(const Tensor& self, const Tensor& other) {
if (!self.is_complex()) {
return dot_cuda(self, other);
}
at::NoNamesGuard guard;
dot_check(self, other);
const int n = static_cast<int>(self.numel());
int incx = static_cast<int>(self.stride(0));
int incy = static_cast<int>(other.stride(0));
if (n == 1) {
incx = 1;
incy = 1;
}
return AT_DISPATCH_COMPLEX_TYPES(self.scalar_type(), "vdot", [&] {
Tensor result = at::empty({}, self.options());
auto handle = at::cuda::getCurrentCUDABlasHandle();
at::cuda::blas::PointerModeGuard pointerModeGuard(
handle, CUBLAS_POINTER_MODE_DEVICE);
at::cuda::blas::vdot<scalar_t>(
handle,
n,
self.data_ptr<scalar_t>(),
incx,
other.data_ptr<scalar_t>(),
incy,
result.data_ptr<scalar_t>());
return result;
});
}
} }
|
025c487365e2a8c76fbbdbc5477430050f25f544.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include "constants.cuh"
#include "frequencies.cuh"
FrequenciesData* allocateMemoryToFrequenciesData(int numFrequencies){
FrequenciesData* frequenciesData = (FrequenciesData*)malloc(sizeof(FrequenciesData));
frequenciesData->numFrequencies = numFrequencies;
frequenciesData->frequencies = (double*) malloc (sizeof(double)*numFrequencies);
frequenciesData->frequenciesDnu = (double*) malloc (sizeof(double)*numFrequencies);
return frequenciesData;
}
void deallocateFrequenciesData(FrequenciesData* freqData){
free(freqData->frequencies);
free(freqData->frequenciesDnu);
free(freqData);
}
__host__ FrequenciesData* readWavelengthMicron(){
FrequenciesData* freqData;
int numFrequencies;
char line[30];
const char* nameFile = "inputs/wavelength_micron.inp";
FILE* wavelengthMicron = fopen(nameFile,"r");
if (wavelengthMicron == NULL){
printf("Failed to open wavelength_micron. Maybe don't exist\n");
exit(1);
}else{
fgets(line, 30, wavelengthMicron);
numFrequencies = atoi(line);
if (numFrequencies < 1){
printf("ERROR: File explicit < 1 point\n");
exit(1);
}else{
printf("Number frequencies: %d\n",numFrequencies);
freqData = allocateMemoryToFrequenciesData(numFrequencies);
int count=0;
while (fgets(line, 30, wavelengthMicron) != NULL){
freqData-> frequencies[count] = atof(line);
count++;
}
if (count != numFrequencies){
printf("ERROR: number of frequencies listed is not the same\n");
exit(1);
}
fclose(wavelengthMicron);
}
}
return freqData;
}
__host__ void convertMicronToFrequency(FrequenciesData* freqData){
int i;
for (i=0; i < freqData->numFrequencies ; i++){
//printf("freqData%d = %f\n",i,freqData->frequencies[i]);
freqData->frequencies[i] = 10000 * C / freqData->frequencies[i];
}
}
__host__ void calculateFrequenciesDnu(FrequenciesData* freqData){
int numFreq = freqData->numFrequencies;
if (numFreq == 1){
freqData->frequenciesDnu[0] = 1;
}else{
freqData->frequenciesDnu[0] = 0.5 * fabs(freqData->frequencies[1] - freqData->frequencies[0]);
freqData->frequenciesDnu[numFreq-1] = 0.5 * fabs(freqData->frequencies[numFreq-1] - freqData->frequencies[numFreq-2]);
int i;
for (i=1 ; i < numFreq-1 ; i++){
freqData->frequenciesDnu[i] = 0.5 * fabs(freqData->frequencies[i+1] - freqData->frequencies[i-1]);
}
}
}
FrequenciesData* frequenciesTransferToDevice(FrequenciesData* h_freqData){
printf("Transfer frequenciesData to device...\n");
FrequenciesData* d_freqData;
double* frequencies;
double* frequenciesDnu;
size_t sizeFrequencies = sizeof(double) * (h_freqData->numFrequencies);
hipMalloc((void**)&(d_freqData), sizeof(FrequenciesData) );
hipMalloc((void**)&frequencies, sizeFrequencies);
hipMalloc((void**)&frequenciesDnu, sizeFrequencies);
hipMemcpy(d_freqData, h_freqData, sizeof(FrequenciesData), hipMemcpyHostToDevice);
hipMemcpy(&(d_freqData->frequencies), &frequencies, sizeof(double *), hipMemcpyHostToDevice);
hipMemcpy(&(d_freqData->frequenciesDnu), &frequenciesDnu, sizeof(double *), hipMemcpyHostToDevice);
hipMemcpy(frequencies, h_freqData->frequencies, sizeFrequencies, hipMemcpyHostToDevice);
hipMemcpy(frequenciesDnu, h_freqData->frequenciesDnu, sizeFrequencies, hipMemcpyHostToDevice);
return d_freqData;
}
FrequenciesData* setUpFrequenciesData(){
printf("Set up frequencies...\n");
FrequenciesData* freqData = readWavelengthMicron();
convertMicronToFrequency(freqData);
calculateFrequenciesDnu(freqData);
return freqData;
}
| 025c487365e2a8c76fbbdbc5477430050f25f544.cu | #include <math.h>
#include "constants.cuh"
#include "frequencies.cuh"
FrequenciesData* allocateMemoryToFrequenciesData(int numFrequencies){
FrequenciesData* frequenciesData = (FrequenciesData*)malloc(sizeof(FrequenciesData));
frequenciesData->numFrequencies = numFrequencies;
frequenciesData->frequencies = (double*) malloc (sizeof(double)*numFrequencies);
frequenciesData->frequenciesDnu = (double*) malloc (sizeof(double)*numFrequencies);
return frequenciesData;
}
void deallocateFrequenciesData(FrequenciesData* freqData){
free(freqData->frequencies);
free(freqData->frequenciesDnu);
free(freqData);
}
__host__ FrequenciesData* readWavelengthMicron(){
FrequenciesData* freqData;
int numFrequencies;
char line[30];
const char* nameFile = "inputs/wavelength_micron.inp";
FILE* wavelengthMicron = fopen(nameFile,"r");
if (wavelengthMicron == NULL){
printf("Failed to open wavelength_micron. Maybe don't exist\n");
exit(1);
}else{
fgets(line, 30, wavelengthMicron);
numFrequencies = atoi(line);
if (numFrequencies < 1){
printf("ERROR: File explicit < 1 point\n");
exit(1);
}else{
printf("Number frequencies: %d\n",numFrequencies);
freqData = allocateMemoryToFrequenciesData(numFrequencies);
int count=0;
while (fgets(line, 30, wavelengthMicron) != NULL){
freqData-> frequencies[count] = atof(line);
count++;
}
if (count != numFrequencies){
printf("ERROR: number of frequencies listed is not the same\n");
exit(1);
}
fclose(wavelengthMicron);
}
}
return freqData;
}
__host__ void convertMicronToFrequency(FrequenciesData* freqData){
int i;
for (i=0; i < freqData->numFrequencies ; i++){
//printf("freqData%d = %f\n",i,freqData->frequencies[i]);
freqData->frequencies[i] = 10000 * C / freqData->frequencies[i];
}
}
__host__ void calculateFrequenciesDnu(FrequenciesData* freqData){
int numFreq = freqData->numFrequencies;
if (numFreq == 1){
freqData->frequenciesDnu[0] = 1;
}else{
freqData->frequenciesDnu[0] = 0.5 * fabs(freqData->frequencies[1] - freqData->frequencies[0]);
freqData->frequenciesDnu[numFreq-1] = 0.5 * fabs(freqData->frequencies[numFreq-1] - freqData->frequencies[numFreq-2]);
int i;
for (i=1 ; i < numFreq-1 ; i++){
freqData->frequenciesDnu[i] = 0.5 * fabs(freqData->frequencies[i+1] - freqData->frequencies[i-1]);
}
}
}
FrequenciesData* frequenciesTransferToDevice(FrequenciesData* h_freqData){
printf("Transfer frequenciesData to device...\n");
FrequenciesData* d_freqData;
double* frequencies;
double* frequenciesDnu;
size_t sizeFrequencies = sizeof(double) * (h_freqData->numFrequencies);
cudaMalloc((void**)&(d_freqData), sizeof(FrequenciesData) );
cudaMalloc((void**)&frequencies, sizeFrequencies);
cudaMalloc((void**)&frequenciesDnu, sizeFrequencies);
cudaMemcpy(d_freqData, h_freqData, sizeof(FrequenciesData), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_freqData->frequencies), &frequencies, sizeof(double *), cudaMemcpyHostToDevice);
cudaMemcpy(&(d_freqData->frequenciesDnu), &frequenciesDnu, sizeof(double *), cudaMemcpyHostToDevice);
cudaMemcpy(frequencies, h_freqData->frequencies, sizeFrequencies, cudaMemcpyHostToDevice);
cudaMemcpy(frequenciesDnu, h_freqData->frequenciesDnu, sizeFrequencies, cudaMemcpyHostToDevice);
return d_freqData;
}
FrequenciesData* setUpFrequenciesData(){
printf("Set up frequencies...\n");
FrequenciesData* freqData = readWavelengthMicron();
convertMicronToFrequency(freqData);
calculateFrequenciesDnu(freqData);
return freqData;
}
|
560c9e726c24e0123df4afe075bb44f92e36fc64.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_fp16.h"
#include "cuda_fp16.hpp"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
#ifdef TESTPROGRESS16
float* tempBuffer=0;
float* tempWeight = 0;
int iMaxSize=0;
#endif
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = {iGiveSize,iOutSize};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum])
{
DecGenerateMemory(pMSize[cnum] * sizeof(half));
cuda_free_allType(publicMemory[cnum]);
}
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
#ifdef TESTPROGRESS16
if (iMaxSize < pMSize[cnum])
{
iMaxSize = pMSize[cnum];
if (tempBuffer) cuda_free(tempBuffer);
tempBuffer = cuda_make_array(0, iMaxSize);
if (tempWeight) cuda_free_allType(tempWeight);
tempWeight = cuda_make_array(0, iMaxSize);
}
#endif
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
LAYERDATA* layerdata = (LAYERDATA *)l->layerdata;
CONVPROP* prop=(CONVPROP *)layerdata->layerData;
if (prop->bUnSupportBias) return;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
}
#ifdef GPUHALFABILITY
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
#endif
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct,int bUnsportBias)
{
#ifdef GPUHALFABILITY
if (bUnsportBias) return;
if (bUnSupportAct)
{
add_bias_half_gpu(output, biases, batch, n, size);
return;
}
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(hipPeekAtLastError());
#endif
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
#ifdef TESTPROGRESS16
if (output == l.output_gpu)
{
hipMemcpy(publicMemory[1], l.output_gpu, l.outputs * sizeof(half), hipMemcpyDeviceToDevice);
}
cuda_convert_f16_to_f32((half*)publicMemory[1], l.outputs, tempBuffer);
cuda_convert_f16_to_f32((half*)l.biases_gpu, l.n, tempWeight);
add_bias_gpu(tempBuffer, tempWeight, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(tempBuffer, l.outputs * l.batch, l.activation);
cuda_convert_f32_to_f16(tempBuffer, l.outputs, publicMemory[1]);
if (output == l.output_gpu)
{
hipMemcpy(l.output_gpu, publicMemory[1], l.outputs * sizeof(half),hipMemcpyDeviceToDevice);
}
#else
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate,prop->bUnSupportBias);
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
if(prop->bUnSupportBias) add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
if (l.binary || l.xnor) swap_binary(&l);
} | 560c9e726c24e0123df4afe075bb44f92e36fc64.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_fp16.h"
#include "cuda_fp16.hpp"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
#ifdef TESTPROGRESS16
float* tempBuffer=0;
float* tempWeight = 0;
int iMaxSize=0;
#endif
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = {iGiveSize,iOutSize};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum])
{
DecGenerateMemory(pMSize[cnum] * sizeof(half));
cuda_free_allType(publicMemory[cnum]);
}
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
#ifdef TESTPROGRESS16
if (iMaxSize < pMSize[cnum])
{
iMaxSize = pMSize[cnum];
if (tempBuffer) cuda_free(tempBuffer);
tempBuffer = cuda_make_array(0, iMaxSize);
if (tempWeight) cuda_free_allType(tempWeight);
tempWeight = cuda_make_array(0, iMaxSize);
}
#endif
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
LAYERDATA* layerdata = (LAYERDATA *)l->layerdata;
CONVPROP* prop=(CONVPROP *)layerdata->layerData;
if (prop->bUnSupportBias) return;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
}
#ifdef GPUHALFABILITY
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
#endif
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct,int bUnsportBias)
{
#ifdef GPUHALFABILITY
if (bUnsportBias) return;
if (bUnSupportAct)
{
add_bias_half_gpu(output, biases, batch, n, size);
return;
}
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(cudaPeekAtLastError());
#endif
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
#ifdef TESTPROGRESS16
if (output == l.output_gpu)
{
cudaMemcpy(publicMemory[1], l.output_gpu, l.outputs * sizeof(half), cudaMemcpyDeviceToDevice);
}
cuda_convert_f16_to_f32((half*)publicMemory[1], l.outputs, tempBuffer);
cuda_convert_f16_to_f32((half*)l.biases_gpu, l.n, tempWeight);
add_bias_gpu(tempBuffer, tempWeight, l.batch, l.n, l.out_w * l.out_h);
activate_array_ongpu(tempBuffer, l.outputs * l.batch, l.activation);
cuda_convert_f32_to_f16(tempBuffer, l.outputs, publicMemory[1]);
if (output == l.output_gpu)
{
cudaMemcpy(l.output_gpu, publicMemory[1], l.outputs * sizeof(half),cudaMemcpyDeviceToDevice);
}
#else
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate,prop->bUnSupportBias);
#endif
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
//if (prop->bUnSupportActivate) OutPutGPUMemory(l.output_gpu, l.outputs, 0);
#endif
if(prop->bUnSupportBias) add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
if (l.binary || l.xnor) swap_binary(&l);
} |
612f804e358b6acff60a6c1256814f96dcd08e26.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaUtils.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li ([email protected])
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#include "Panda.h"
#ifdef _WIN32
#include <windows.h>
#include <time.h>
#elif MACOS
#include <sys/param.h>
#include <sys/sysctl.h>
#elif __linux
#include <unistd.h>
#include <sys/time.h>
#endif
#ifndef __PANDAUTILS_CU__
#define __PANDAUTILS_CU__
int getGPUCoresNum() {
//assert(tid<total);
int arch_cores_sm[3] = {1, 8, 32 };
hipDeviceProp_t gpu_dev;
int tid = 0;
hipGetDeviceProperties(&gpu_dev, tid);
int sm_per_multiproc = 1;
if (gpu_dev.major == 9999 && gpu_dev.minor == 9999)
sm_per_multiproc = 1;
else if (gpu_dev.major <=2)
sm_per_multiproc = arch_cores_sm[gpu_dev.major];
else
sm_per_multiproc = arch_cores_sm[2];
return ((gpu_dev.multiProcessorCount)*(sm_per_multiproc));
//ShowLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc);
}
void sleep(int sleepMs)
{
#ifdef __linux
usleep(sleepMs * 1000); // usleep takes sleep time in us
#endif
#ifdef _WIN32
Sleep(sleepMs);
#endif
}
int getCPUCoresNum() {
#ifdef WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors;
#elif MACOS
int nm[2];
size_t len = 4;
uint32_t count;
nm[0] = CTL_HW; nm[1] = HW_AVAILCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) {
nm[1] = HW_NCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) { count = 1; }
}
return count;
#elif __linux
return sysconf(_SC_NPROCESSORS_ONLN);
#endif
}
void DoDiskLog(const char *str){
FILE *fptr;
char file_name[128];
sprintf(file_name,"%s","panda.log");
fptr = fopen(file_name,"a");
fprintf(fptr,"[PandaDiskLog]\t\t:%s\n",str);
//fprintf(fptr,"%s",__VA_ARGS__);
fclose(fptr);
//printf("\n");
}//void
double PandaTimer(){
#ifndef _WIN32
static struct timeval tv;
gettimeofday(&tv,NULL);
double curTime = tv.tv_sec + tv.tv_usec/1000000.0;
//ShowLog("\t Panda CurTime:%f", curTime);
return curTime;
#else
//newtime = localtime( &long_time2 );
double curTime = GetTickCount();
//ShowLog("\t Panda CurTime:%f", curTime);
curTime /=1000.0;
return curTime;
#endif
}
void __checkCudaErrors(hipError_t err, const char *file, const int line )
{
if(hipSuccess != err)
{
fprintf(stderr, "[PandaError][%s][%i]: CUDA Runtime API error %d: %s.\n",file, line, (int)err, hipGetErrorString( err ) );
exit((int)err);
}
}
//--------------------------------------------------------
//start_task_id a timer
//param : start_row_id_tv
//--------------------------------------------------------
//--------------------------------------------------------
//end a timer, and print out a message
//--------------------------------------------------------
#endif //__PANDAUTILS_CU__ | 612f804e358b6acff60a6c1256814f96dcd08e26.cu |
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaUtils.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li ([email protected])
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#include "Panda.h"
#ifdef _WIN32
#include <windows.h>
#include <time.h>
#elif MACOS
#include <sys/param.h>
#include <sys/sysctl.h>
#elif __linux
#include <unistd.h>
#include <sys/time.h>
#endif
#ifndef __PANDAUTILS_CU__
#define __PANDAUTILS_CU__
int getGPUCoresNum() {
//assert(tid<total);
int arch_cores_sm[3] = {1, 8, 32 };
cudaDeviceProp gpu_dev;
int tid = 0;
cudaGetDeviceProperties(&gpu_dev, tid);
int sm_per_multiproc = 1;
if (gpu_dev.major == 9999 && gpu_dev.minor == 9999)
sm_per_multiproc = 1;
else if (gpu_dev.major <=2)
sm_per_multiproc = arch_cores_sm[gpu_dev.major];
else
sm_per_multiproc = arch_cores_sm[2];
return ((gpu_dev.multiProcessorCount)*(sm_per_multiproc));
//ShowLog("Configure Device ID:%d: Device Name:%s MultProcessorCount:%d sm_per_multiproc:%d", i, gpu_dev.name,gpu_dev.multiProcessorCount,sm_per_multiproc);
}
void sleep(int sleepMs)
{
#ifdef __linux
usleep(sleepMs * 1000); // usleep takes sleep time in us
#endif
#ifdef _WIN32
Sleep(sleepMs);
#endif
}
int getCPUCoresNum() {
#ifdef WIN32
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors;
#elif MACOS
int nm[2];
size_t len = 4;
uint32_t count;
nm[0] = CTL_HW; nm[1] = HW_AVAILCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) {
nm[1] = HW_NCPU;
sysctl(nm, 2, &count, &len, NULL, 0);
if(count < 1) { count = 1; }
}
return count;
#elif __linux
return sysconf(_SC_NPROCESSORS_ONLN);
#endif
}
void DoDiskLog(const char *str){
FILE *fptr;
char file_name[128];
sprintf(file_name,"%s","panda.log");
fptr = fopen(file_name,"a");
fprintf(fptr,"[PandaDiskLog]\t\t:%s\n",str);
//fprintf(fptr,"%s",__VA_ARGS__);
fclose(fptr);
//printf("\n");
}//void
double PandaTimer(){
#ifndef _WIN32
static struct timeval tv;
gettimeofday(&tv,NULL);
double curTime = tv.tv_sec + tv.tv_usec/1000000.0;
//ShowLog("\t Panda CurTime:%f", curTime);
return curTime;
#else
//newtime = localtime( &long_time2 );
double curTime = GetTickCount();
//ShowLog("\t Panda CurTime:%f", curTime);
curTime /=1000.0;
return curTime;
#endif
}
void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "[PandaError][%s][%i]: CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit((int)err);
}
}
//--------------------------------------------------------
//start_task_id a timer
//param : start_row_id_tv
//--------------------------------------------------------
//--------------------------------------------------------
//end a timer, and print out a message
//--------------------------------------------------------
#endif //__PANDAUTILS_CU__ |
6533b40c1838a8c74f296453647532e239ea0bff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void double_value(double *x, double *y)
{
y[threadIdx.x] = 2. * x[threadIdx.x];
} | 6533b40c1838a8c74f296453647532e239ea0bff.cu | #include "includes.h"
__global__ void double_value(double *x, double *y)
{
y[threadIdx.x] = 2. * x[threadIdx.x];
} |
846bd4f82ee037395ce20d3100b91f3b69d47f7d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_blur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *Pout = NULL;
hipMalloc(&Pout, XSIZE*YSIZE);
unsigned char *Pin = NULL;
hipMalloc(&Pin, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_blur), dim3(gridBlock),dim3(threadBlock), 0, 0, Pout,Pin,width,height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_blur), dim3(gridBlock),dim3(threadBlock), 0, 0, Pout,Pin,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_blur), dim3(gridBlock),dim3(threadBlock), 0, 0, Pout,Pin,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 846bd4f82ee037395ce20d3100b91f3b69d47f7d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_blur.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned char *Pout = NULL;
cudaMalloc(&Pout, XSIZE*YSIZE);
unsigned char *Pin = NULL;
cudaMalloc(&Pin, XSIZE*YSIZE);
int width = XSIZE;
int height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_blur<<<gridBlock,threadBlock>>>(Pout,Pin,width,height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_blur<<<gridBlock,threadBlock>>>(Pout,Pin,width,height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_blur<<<gridBlock,threadBlock>>>(Pout,Pin,width,height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
008cee34ccff7dbc4d42593db15e368f6951f467.hip | // !!! This is a file automatically generated by hipify!!!
/* To compile: nvcc GPUInfor.cu -o GPUInfo -lcudart -run
To run: ./GPUInfo
Archie "Douglas" Rowe
Homework 1*/
//#include "../common/book.h"
#include <stdio.h>
int main(){
hipDeviceProp_t prop;
int count;
hipGetDeviceCount( &count);
for (int i=0; i<count; i++){
hipGetDeviceProperties( &prop, i);
printf( " --- General Information for device %d ---\n", i);
printf("Name: %s\n", prop.name);
printf("Compute capability: S%d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Device copy overlap: ");
if(prop.deviceOverlap)
printf( "Enabled\n");
else
printf("Disabled\n");
printf( "Kernel execution timeout : ");
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else printf( "Disabled\n" );
printf(" --- Memory Information for device %d ---\n", i);
printf("Total global mem: %ld\n", prop.totalGlobalMem );
printf("Total constant Mem: %ld\n", prop.totalConstMem );
printf("Max mem pitch: %ld\n", prop.memPitch);
printf( "texture Alignment: %ld\n", prop.textureAlignment);
printf(" ---MP Information for device %d ---\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n:", prop.sharedMemPerBlock);
printf("Registers per mp: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n:", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1],prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
| 008cee34ccff7dbc4d42593db15e368f6951f467.cu | /* To compile: nvcc GPUInfor.cu -o GPUInfo -lcudart -run
To run: ./GPUInfo
Archie "Douglas" Rowe
Homework 1*/
//#include "../common/book.h"
#include <stdio.h>
int main(){
cudaDeviceProp prop;
int count;
cudaGetDeviceCount( &count);
for (int i=0; i<count; i++){
cudaGetDeviceProperties( &prop, i);
printf( " --- General Information for device %d ---\n", i);
printf("Name: %s\n", prop.name);
printf("Compute capability: S%d.%d\n", prop.major, prop.minor);
printf("Clock rate: %d\n", prop.clockRate);
printf("Device copy overlap: ");
if(prop.deviceOverlap)
printf( "Enabled\n");
else
printf("Disabled\n");
printf( "Kernel execution timeout : ");
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else printf( "Disabled\n" );
printf(" --- Memory Information for device %d ---\n", i);
printf("Total global mem: %ld\n", prop.totalGlobalMem );
printf("Total constant Mem: %ld\n", prop.totalConstMem );
printf("Max mem pitch: %ld\n", prop.memPitch);
printf( "texture Alignment: %ld\n", prop.textureAlignment);
printf(" ---MP Information for device %d ---\n", i);
printf("Multiprocessor count: %d\n", prop.multiProcessorCount);
printf("Shared mem per mp: %ld\n:", prop.sharedMemPerBlock);
printf("Registers per mp: %d\n", prop.regsPerBlock);
printf("Threads in warp: %d\n", prop.warpSize);
printf("Max threads per block: %d\n:", prop.maxThreadsPerBlock);
printf("Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1],prop.maxThreadsDim[2]);
printf("Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("\n");
}
return 0;
}
|
a4e00634fa7dfa74e0237df9f05d34a572c0da3c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <thrust/complex.h>
#include "../fast-fourier.h"
using namespace std;
using namespace fast_fourier;
__global__
void run_test(cfloat* input, cfloat* output, int n)
{
fast_fourier_transform(input, output, n);
}
int main()
{
int n(8);
cfloat input[] = {1,2,3,4,5,6,7,8};
cfloat* expected = discrete_fourier_transform(input, n);
cfloat* d_input(nullptr);
cfloat* d_actual(nullptr);
cfloat actual[n];
// Allocate an input and output array on the GPU
if (hipMalloc( &d_input, sizeof(cfloat) * n ) != hipSuccess)
{
auto t = hipGetLastError();
cout << "Failed to allocate input: "
<< hipGetErrorName(t) << ", "
<< hipGetErrorString(t) << endl;
return 1;
}
if (hipMalloc( &d_actual, sizeof(cfloat) * n ) != hipSuccess)
{
auto t = hipGetLastError();
cout << "Failed to allocate output: "
<< hipGetErrorName(t) << ", "
<< hipGetErrorString(t) << endl;
return 1;
}
// Copy the input array to the GPU
if (hipMemcpy( d_input, input, sizeof(cfloat) * n, hipMemcpyHostToDevice ) != hipSuccess)
{
auto t = hipGetLastError();
cout << "Input failed to copy: "
<< hipGetErrorName(t) << ", "
<< hipGetErrorString(t) << endl;
return 1;
}
hipLaunchKernelGGL(( run_test), dim3(1),dim3(1), 0, 0, d_input, d_actual, n);
// Copy the output array from the GPU
if (hipMemcpy( actual, d_actual, sizeof(cfloat) * n, hipMemcpyDeviceToHost ) != hipSuccess)
{
auto t = hipGetLastError();
cout << "Output failed to copy: "
<< hipGetErrorName(t) << ", "
<< hipGetErrorString(t) << endl;
return 1;
}
for (int j(0) ; j < n ; j++)
cout << actual[j] << "\t\t\t\t" << expected[j] << endl;
hipFree( d_actual );
hipFree( d_input );
return 0;
}
| a4e00634fa7dfa74e0237df9f05d34a572c0da3c.cu | #include <iostream>
#include <thrust/complex.h>
#include "../fast-fourier.h"
using namespace std;
using namespace fast_fourier;
__global__
void run_test(cfloat* input, cfloat* output, int n)
{
fast_fourier_transform(input, output, n);
}
int main()
{
int n(8);
cfloat input[] = {1,2,3,4,5,6,7,8};
cfloat* expected = discrete_fourier_transform(input, n);
cfloat* d_input(nullptr);
cfloat* d_actual(nullptr);
cfloat actual[n];
// Allocate an input and output array on the GPU
if (cudaMalloc( &d_input, sizeof(cfloat) * n ) != cudaSuccess)
{
auto t = cudaGetLastError();
cout << "Failed to allocate input: "
<< cudaGetErrorName(t) << ", "
<< cudaGetErrorString(t) << endl;
return 1;
}
if (cudaMalloc( &d_actual, sizeof(cfloat) * n ) != cudaSuccess)
{
auto t = cudaGetLastError();
cout << "Failed to allocate output: "
<< cudaGetErrorName(t) << ", "
<< cudaGetErrorString(t) << endl;
return 1;
}
// Copy the input array to the GPU
if (cudaMemcpy( d_input, input, sizeof(cfloat) * n, cudaMemcpyHostToDevice ) != cudaSuccess)
{
auto t = cudaGetLastError();
cout << "Input failed to copy: "
<< cudaGetErrorName(t) << ", "
<< cudaGetErrorString(t) << endl;
return 1;
}
run_test<<<1,1>>>(d_input, d_actual, n);
// Copy the output array from the GPU
if (cudaMemcpy( actual, d_actual, sizeof(cfloat) * n, cudaMemcpyDeviceToHost ) != cudaSuccess)
{
auto t = cudaGetLastError();
cout << "Output failed to copy: "
<< cudaGetErrorName(t) << ", "
<< cudaGetErrorString(t) << endl;
return 1;
}
for (int j(0) ; j < n ; j++)
cout << actual[j] << "\t\t\t\t" << expected[j] << endl;
cudaFree( d_actual );
cudaFree( d_input );
return 0;
}
|
d0829e62207e37ffd29b2f740880db8aff48b5a9.hip | // !!! This is a file automatically generated by hipify!!!
// ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bc.cu
*
* @brief Simple test driver program for Gunrock GC.
*/
#include <iostream>
#include <gunrock/app/bc/bc_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return hipError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
hipError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_CSR>
GraphT;
hipError_t retval = hipSuccess;
util::CpuTimer cpu_timer;
GraphT graph;
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// Enable is set sources
GUARD_CU(app::Set_Srcs(parameters, graph));
int num_srcs = 0;
ValueT **reference_bc_values = NULL;
ValueT **reference_sigmas = NULL;
VertexT **reference_source_path = NULL;
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
if (!quick) {
// std::string validation = parameters.Get<std::string>("validation");
util::PrintMsg("Computing reference value ...", !quiet);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT> >("srcs");
num_srcs = srcs.size();
SizeT nodes = graph.nodes;
reference_bc_values = new ValueT *[num_srcs];
reference_sigmas = new ValueT *[num_srcs];
reference_source_path = new VertexT *[num_srcs];
for (int i = 0; i < num_srcs; i++) {
VertexT src = srcs[i];
util::PrintMsg("__________________________", !quiet);
reference_bc_values[i] = new ValueT[nodes];
reference_sigmas[i] = new ValueT[nodes];
reference_source_path[i] = new VertexT[nodes];
float elapsed = app::bc::CPU_Reference(
graph, reference_bc_values[i], reference_sigmas[i],
reference_source_path[i], src, quiet);
util::PrintMsg("--------------------------\nRun " + std::to_string(i) +
" elapsed: " + std::to_string(elapsed) +
" ms, src = " + std::to_string(src),
!quiet);
}
}
std::vector<std::string> switches{"advance-mode"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[reference_bc_values, reference_sigmas, reference_source_path](
util::Parameters ¶meters, GraphT &graph) {
return app::bc::RunTests(parameters, graph, reference_bc_values,
reference_sigmas, reference_source_path);
}));
// Cleanup
if (!quick) {
for (int i = 0; i < num_srcs; i++) {
delete[] reference_bc_values[i];
reference_bc_values[i] = NULL;
delete[] reference_sigmas[i];
reference_sigmas[i] = NULL;
delete[] reference_source_path[i];
reference_source_path[i] = NULL;
}
delete[] reference_bc_values;
reference_bc_values = NULL;
delete[] reference_sigmas;
reference_sigmas = NULL;
delete[] reference_source_path;
reference_source_path = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
hipError_t retval = hipSuccess;
util::Parameters parameters("test bc");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::bc::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return hipSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F32B | app::DIRECTED | app::UNDIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| d0829e62207e37ffd29b2f740880db8aff48b5a9.cu | // ----------------------------------------------------------------
// Gunrock -- Fast and Efficient GPU Graph Library
// ----------------------------------------------------------------
// This source code is distributed under the terms of LICENSE.TXT
// in the root directory of this source distribution.
// ----------------------------------------------------------------
/**
* @file
* test_bc.cu
*
* @brief Simple test driver program for Gunrock GC.
*/
#include <iostream>
#include <gunrock/app/bc/bc_app.cu>
#include <gunrock/app/test_base.cuh>
using namespace gunrock;
/******************************************************************************
* Main
******************************************************************************/
/**
* @brief Enclosure to the main function
*/
struct main_struct {
/**
* @brief the actual main function, after type switching
* @tparam VertexT Type of vertex identifier
* @tparam SizeT Type of graph size, i.e. type of edge identifier
* @tparam ValueT Type of edge values
* @param parameters Command line parameters
* @param v,s,val Place holders for type deduction
* \return cudaError_t error message(s), if any
*/
template <typename VertexT, // Use int as the vertex identifier
typename SizeT, // Use int as the graph size type
typename ValueT> // Use int as the value type
cudaError_t
operator()(util::Parameters ¶meters, VertexT v, SizeT s, ValueT val) {
typedef typename app::TestGraph<VertexT, SizeT, ValueT, graph::HAS_CSR>
GraphT;
cudaError_t retval = cudaSuccess;
util::CpuTimer cpu_timer;
GraphT graph;
cpu_timer.Start();
GUARD_CU(graphio::LoadGraph(parameters, graph));
cpu_timer.Stop();
parameters.Set("load-time", cpu_timer.ElapsedMillis());
// Enable is set sources
GUARD_CU(app::Set_Srcs(parameters, graph));
int num_srcs = 0;
ValueT **reference_bc_values = NULL;
ValueT **reference_sigmas = NULL;
VertexT **reference_source_path = NULL;
bool quick = parameters.Get<bool>("quick");
bool quiet = parameters.Get<bool>("quiet");
if (!quick) {
// std::string validation = parameters.Get<std::string>("validation");
util::PrintMsg("Computing reference value ...", !quiet);
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT> >("srcs");
num_srcs = srcs.size();
SizeT nodes = graph.nodes;
reference_bc_values = new ValueT *[num_srcs];
reference_sigmas = new ValueT *[num_srcs];
reference_source_path = new VertexT *[num_srcs];
for (int i = 0; i < num_srcs; i++) {
VertexT src = srcs[i];
util::PrintMsg("__________________________", !quiet);
reference_bc_values[i] = new ValueT[nodes];
reference_sigmas[i] = new ValueT[nodes];
reference_source_path[i] = new VertexT[nodes];
float elapsed = app::bc::CPU_Reference(
graph, reference_bc_values[i], reference_sigmas[i],
reference_source_path[i], src, quiet);
util::PrintMsg("--------------------------\nRun " + std::to_string(i) +
" elapsed: " + std::to_string(elapsed) +
" ms, src = " + std::to_string(src),
!quiet);
}
}
std::vector<std::string> switches{"advance-mode"};
GUARD_CU(app::Switch_Parameters(
parameters, graph, switches,
[reference_bc_values, reference_sigmas, reference_source_path](
util::Parameters ¶meters, GraphT &graph) {
return app::bc::RunTests(parameters, graph, reference_bc_values,
reference_sigmas, reference_source_path);
}));
// Cleanup
if (!quick) {
for (int i = 0; i < num_srcs; i++) {
delete[] reference_bc_values[i];
reference_bc_values[i] = NULL;
delete[] reference_sigmas[i];
reference_sigmas[i] = NULL;
delete[] reference_source_path[i];
reference_source_path[i] = NULL;
}
delete[] reference_bc_values;
reference_bc_values = NULL;
delete[] reference_sigmas;
reference_sigmas = NULL;
delete[] reference_source_path;
reference_source_path = NULL;
}
return retval;
}
};
int main(int argc, char **argv) {
cudaError_t retval = cudaSuccess;
util::Parameters parameters("test bc");
GUARD_CU(graphio::UseParameters(parameters));
GUARD_CU(app::bc::UseParameters(parameters));
GUARD_CU(app::UseParameters_test(parameters));
GUARD_CU(parameters.Parse_CommandLine(argc, argv));
if (parameters.Get<bool>("help")) {
parameters.Print_Help();
return cudaSuccess;
}
GUARD_CU(parameters.Check_Required());
return app::Switch_Types<app::VERTEXT_U32B | // app::VERTEXT_U64B |
app::SIZET_U32B | // app::SIZET_U64B |
app::VALUET_F32B | app::DIRECTED | app::UNDIRECTED>(
parameters, main_struct());
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
979888b615ca6b20e8329205ad5fde882accb690.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <assert.h>
#include "matmul.h"
#define TILE_WIDTH 32
using namespace std;
void allocateDeviceMemory(void** M, int size)
{
hipError_t err = hipMalloc(M, size);
assert(err==hipSuccess);
}
void deallocateDeviceMemory(void* M)
{
hipError_t err = hipFree(M);
assert(err==hipSuccess);
}
void matmul_ref(const int* const matrixA, const int* const matrixB,
int* const matrixC, const int n) {
// You can assume matrixC is initialized with zero
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
for (int k = 0; k < n; k++)
matrixC[i * n + j] += matrixA[i * n + k] * matrixB[k * n + j];
}
__global__ void MatMulKernel(int* d_A, int* d_B, int* d_C, int n){
__shared__ int subTileA[TILE_WIDTH][TILE_WIDTH];
__shared__ int subTileB[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int value = 0;
if (Col < n && Row < n) {
for (int m = 0; m < n/TILE_WIDTH; ++m){
subTileA[ty][tx] = d_A[Row*n + m*TILE_WIDTH+tx];
subTileB[ty][tx] = d_B[(m*TILE_WIDTH+ty)*n + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k){
value += subTileA[ty][k] * subTileB[k][tx];
}
__syncthreads();
}
d_C[Row*n + Col] = value;
}
}
void matmul_optimized(const int* const matrixA, const int* const matrixB,
int* const matrixC, const int* const d_A, const int* const d_B, int* const d_C, const int n) {
// TODO: Implement your CUDA code
int size = n*n*sizeof(int);
allocateDeviceMemory((void**)&d_A, size);
allocateDeviceMemory((void**)&d_B, size);
allocateDeviceMemory((void**)&d_C, size);
dim3 dimGrid(ceil(n/TILE_WIDTH), ceil(n/TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
hipMemcpy((int *)d_A, matrixA, size, hipMemcpyHostToDevice);
hipMemcpy((int *)d_B, matrixB, size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, (int *)d_A, (int *)d_B, (int *)d_C, n);
hipMemcpy(matrixC, (int *)d_C, size, hipMemcpyDeviceToHost);
}
| 979888b615ca6b20e8329205ad5fde882accb690.cu | #include <stdio.h>
#include <iostream>
#include <chrono>
#include <assert.h>
#include "matmul.h"
#define TILE_WIDTH 32
using namespace std;
void allocateDeviceMemory(void** M, int size)
{
cudaError_t err = cudaMalloc(M, size);
assert(err==cudaSuccess);
}
void deallocateDeviceMemory(void* M)
{
cudaError_t err = cudaFree(M);
assert(err==cudaSuccess);
}
void matmul_ref(const int* const matrixA, const int* const matrixB,
int* const matrixC, const int n) {
// You can assume matrixC is initialized with zero
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
for (int k = 0; k < n; k++)
matrixC[i * n + j] += matrixA[i * n + k] * matrixB[k * n + j];
}
__global__ void MatMulKernel(int* d_A, int* d_B, int* d_C, int n){
__shared__ int subTileA[TILE_WIDTH][TILE_WIDTH];
__shared__ int subTileB[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
int value = 0;
if (Col < n && Row < n) {
for (int m = 0; m < n/TILE_WIDTH; ++m){
subTileA[ty][tx] = d_A[Row*n + m*TILE_WIDTH+tx];
subTileB[ty][tx] = d_B[(m*TILE_WIDTH+ty)*n + Col];
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k){
value += subTileA[ty][k] * subTileB[k][tx];
}
__syncthreads();
}
d_C[Row*n + Col] = value;
}
}
void matmul_optimized(const int* const matrixA, const int* const matrixB,
int* const matrixC, const int* const d_A, const int* const d_B, int* const d_C, const int n) {
// TODO: Implement your CUDA code
int size = n*n*sizeof(int);
allocateDeviceMemory((void**)&d_A, size);
allocateDeviceMemory((void**)&d_B, size);
allocateDeviceMemory((void**)&d_C, size);
dim3 dimGrid(ceil(n/TILE_WIDTH), ceil(n/TILE_WIDTH));
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
cudaMemcpy((int *)d_A, matrixA, size, cudaMemcpyHostToDevice);
cudaMemcpy((int *)d_B, matrixB, size, cudaMemcpyHostToDevice);
MatMulKernel<<<dimGrid, dimBlock>>>((int *)d_A, (int *)d_B, (int *)d_C, n);
cudaMemcpy(matrixC, (int *)d_C, size, cudaMemcpyDeviceToHost);
}
|
bdac1ed40e4927fdceab5c8fa87d41ca6dd94074.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void ori_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int iteration
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
const int zRegionIndex = blockIdx.z;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x;
for (int loop = 0; loop < iteration; loop++) {
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y + yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
__global__ void pers_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int grid_dimension_x,
int grid_dimension_y,
int grid_dimension_z,
int block_dimension_x,
int block_dimension_y,
int block_dimension_z,
int iteration
) {
unsigned int block_pos = blockIdx.x;
int thread_id_x = threadIdx.x / (block_dimension_y * block_dimension_z);
int thread_id_y = (threadIdx.x % (block_dimension_y * block_dimension_z)) / block_dimension_z;
int thread_id_z = (threadIdx.x % (block_dimension_y * block_dimension_z)) % block_dimension_z;
/* thread id */
const int tid = (thread_id_z * block_dimension_y + thread_id_y) * block_dimension_x + thread_id_x;
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
for (;; block_pos += gridDim.x) {
if (block_pos >= grid_dimension_x * grid_dimension_y * grid_dimension_z)
{
return;
}
int block_id_x = block_pos / (grid_dimension_y * grid_dimension_z);
int block_id_y = (block_pos % (grid_dimension_y * grid_dimension_z)) / grid_dimension_z;
int block_id_z = (block_pos % (grid_dimension_y * grid_dimension_z)) % grid_dimension_z;
for (int loop = 0; loop < iteration; loop++) {
int xRegionIndex = block_id_x;
int yRegionIndex = block_id_y;
int zRegionIndex = block_id_z;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*grid_dimension_y + yRegionIndex)*grid_dimension_x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + thread_id_x) * h;
float y = (8 * yRegionIndex + thread_id_y) * h;
float z = (8 * zRegionIndex + thread_id_z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
}
__device__ void mix_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int grid_dimension_x,
int grid_dimension_y,
int grid_dimension_z,
int block_dimension_x,
int block_dimension_y,
int block_dimension_z,
int thread_step,
int iteration
) {
unsigned int block_pos = blockIdx.x;
int thread_id_x = (threadIdx.x - thread_step) / (block_dimension_y * block_dimension_z);
int thread_id_y = ((threadIdx.x - thread_step) % (block_dimension_y * block_dimension_z)) / block_dimension_z;
int thread_id_z = ((threadIdx.x - thread_step) % (block_dimension_y * block_dimension_z)) % block_dimension_z;
/* thread id */
const int tid = (thread_id_z * block_dimension_y + thread_id_y) * block_dimension_x + thread_id_x;
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
for (;; block_pos += CUTCP_GRID_DIM) {
if (block_pos >= grid_dimension_x * grid_dimension_y * grid_dimension_z)
{
return;
}
int block_id_x = block_pos / (grid_dimension_y * grid_dimension_z);
int block_id_y = (block_pos % (grid_dimension_y * grid_dimension_z)) / grid_dimension_z;
int block_id_z = (block_pos % (grid_dimension_y * grid_dimension_z)) % grid_dimension_z;
for (int loop = 0; loop < iteration; loop++) {
int xRegionIndex = block_id_x;
int yRegionIndex = block_id_y;
int zRegionIndex = block_id_z;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*grid_dimension_y + yRegionIndex)*grid_dimension_x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + thread_id_x) * h;
float y = (8 * yRegionIndex + thread_id_y) * h;
float z = (8 * zRegionIndex + thread_id_z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
}
__device__ void mixblock_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int grid_dimension_x,
int grid_dimension_y,
int grid_dimension_z,
int block_dimension_x,
int block_dimension_y,
int block_dimension_z,
int block_step,
int iteration
) {
unsigned int block_pos = blockIdx.x - block_step;
int thread_id_x = threadIdx.x / (block_dimension_y * block_dimension_z);
int thread_id_y = (threadIdx.x % (block_dimension_y * block_dimension_z)) / block_dimension_z;
int thread_id_z = (threadIdx.x % (block_dimension_y * block_dimension_z)) % block_dimension_z;
/* thread id */
const int tid = (thread_id_z * block_dimension_y + thread_id_y) * block_dimension_x + thread_id_x;
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
for (;; block_pos += CUTCP_GRID_DIM) {
if (block_pos >= grid_dimension_x * grid_dimension_y * grid_dimension_z)
{
return;
}
int block_id_x = block_pos / (grid_dimension_y * grid_dimension_z);
int block_id_y = (block_pos % (grid_dimension_y * grid_dimension_z)) / grid_dimension_z;
int block_id_z = (block_pos % (grid_dimension_y * grid_dimension_z)) % grid_dimension_z;
for (int loop = 0; loop < iteration; loop++) {
int xRegionIndex = block_id_x;
int yRegionIndex = block_id_y;
int zRegionIndex = block_id_z;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*grid_dimension_y + yRegionIndex)*grid_dimension_x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + thread_id_x) * h;
float y = (8 * yRegionIndex + thread_id_y) * h;
float z = (8 * zRegionIndex + thread_id_z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
}
| bdac1ed40e4927fdceab5c8fa87d41ca6dd94074.cu |
__global__ void ori_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int iteration
)
{
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
const int xRegionIndex = blockIdx.x;
const int yRegionIndex = blockIdx.y;
const int zRegionIndex = blockIdx.z;
/* thread id */
const int tid = (threadIdx.z*blockDim.y + threadIdx.y)*blockDim.x + threadIdx.x;
for (int loop = 0; loop < iteration; loop++) {
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*gridDim.y + yRegionIndex)*gridDim.x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + threadIdx.x) * h;
float y = (8 * yRegionIndex + threadIdx.y) * h;
float z = (8 * zRegionIndex + threadIdx.z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
__global__ void pers_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int grid_dimension_x,
int grid_dimension_y,
int grid_dimension_z,
int block_dimension_x,
int block_dimension_y,
int block_dimension_z,
int iteration
) {
unsigned int block_pos = blockIdx.x;
int thread_id_x = threadIdx.x / (block_dimension_y * block_dimension_z);
int thread_id_y = (threadIdx.x % (block_dimension_y * block_dimension_z)) / block_dimension_z;
int thread_id_z = (threadIdx.x % (block_dimension_y * block_dimension_z)) % block_dimension_z;
/* thread id */
const int tid = (thread_id_z * block_dimension_y + thread_id_y) * block_dimension_x + thread_id_x;
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
for (;; block_pos += gridDim.x) {
if (block_pos >= grid_dimension_x * grid_dimension_y * grid_dimension_z)
{
return;
}
int block_id_x = block_pos / (grid_dimension_y * grid_dimension_z);
int block_id_y = (block_pos % (grid_dimension_y * grid_dimension_z)) / grid_dimension_z;
int block_id_z = (block_pos % (grid_dimension_y * grid_dimension_z)) % grid_dimension_z;
for (int loop = 0; loop < iteration; loop++) {
int xRegionIndex = block_id_x;
int yRegionIndex = block_id_y;
int zRegionIndex = block_id_z;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*grid_dimension_y + yRegionIndex)*grid_dimension_x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + thread_id_x) * h;
float y = (8 * yRegionIndex + thread_id_y) * h;
float z = (8 * zRegionIndex + thread_id_z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
__syncthreads();
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
__syncthreads();
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
}
__device__ void mix_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int grid_dimension_x,
int grid_dimension_y,
int grid_dimension_z,
int block_dimension_x,
int block_dimension_y,
int block_dimension_z,
int thread_step,
int iteration
) {
unsigned int block_pos = blockIdx.x;
int thread_id_x = (threadIdx.x - thread_step) / (block_dimension_y * block_dimension_z);
int thread_id_y = ((threadIdx.x - thread_step) % (block_dimension_y * block_dimension_z)) / block_dimension_z;
int thread_id_z = ((threadIdx.x - thread_step) % (block_dimension_y * block_dimension_z)) % block_dimension_z;
/* thread id */
const int tid = (thread_id_z * block_dimension_y + thread_id_y) * block_dimension_x + thread_id_x;
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
for (;; block_pos += CUTCP_GRID_DIM) {
if (block_pos >= grid_dimension_x * grid_dimension_y * grid_dimension_z)
{
return;
}
int block_id_x = block_pos / (grid_dimension_y * grid_dimension_z);
int block_id_y = (block_pos % (grid_dimension_y * grid_dimension_z)) / grid_dimension_z;
int block_id_z = (block_pos % (grid_dimension_y * grid_dimension_z)) % grid_dimension_z;
for (int loop = 0; loop < iteration; loop++) {
int xRegionIndex = block_id_x;
int yRegionIndex = block_id_y;
int zRegionIndex = block_id_z;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*grid_dimension_y + yRegionIndex)*grid_dimension_x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + thread_id_x) * h;
float y = (8 * yRegionIndex + thread_id_y) * h;
float z = (8 * zRegionIndex + thread_id_z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
}
__device__ void mixblock_cutcp(
int binDim_x,
int binDim_y,
float4 *binZeroAddr, /* address of atom bins starting at origin */
float h, /* lattice spacing */
float cutoff2, /* square of cutoff distance */
float inv_cutoff2,
float *regionZeroAddr, /* address of lattice regions starting at origin */
int zRegionIndex_t,
int grid_dimension_x,
int grid_dimension_y,
int grid_dimension_z,
int block_dimension_x,
int block_dimension_y,
int block_dimension_z,
int block_step,
int iteration
) {
unsigned int block_pos = blockIdx.x - block_step;
int thread_id_x = threadIdx.x / (block_dimension_y * block_dimension_z);
int thread_id_y = (threadIdx.x % (block_dimension_y * block_dimension_z)) / block_dimension_z;
int thread_id_z = (threadIdx.x % (block_dimension_y * block_dimension_z)) % block_dimension_z;
/* thread id */
const int tid = (thread_id_z * block_dimension_y + thread_id_y) * block_dimension_x + thread_id_x;
__shared__ float AtomBinCache[BIN_CACHE_MAXLEN * BIN_DEPTH * 4];
__shared__ float *myRegionAddr;
__shared__ int3 myBinIndex;
for (;; block_pos += CUTCP_GRID_DIM) {
if (block_pos >= grid_dimension_x * grid_dimension_y * grid_dimension_z)
{
return;
}
int block_id_x = block_pos / (grid_dimension_y * grid_dimension_z);
int block_id_y = (block_pos % (grid_dimension_y * grid_dimension_z)) / grid_dimension_z;
int block_id_z = (block_pos % (grid_dimension_y * grid_dimension_z)) % grid_dimension_z;
for (int loop = 0; loop < iteration; loop++) {
int xRegionIndex = block_id_x;
int yRegionIndex = block_id_y;
int zRegionIndex = block_id_z;
/* neighbor index */
int nbrid;
/* this is the start of the sub-region indexed by tid */
myRegionAddr = regionZeroAddr + ((zRegionIndex*grid_dimension_y + yRegionIndex)*grid_dimension_x + xRegionIndex)*REGION_SIZE;
/* spatial coordinate of this lattice point */
float x = (8 * xRegionIndex + thread_id_x) * h;
float y = (8 * yRegionIndex + thread_id_y) * h;
float z = (8 * zRegionIndex + thread_id_z) * h;
int totalbins = 0;
int numbins;
/* bin number determined by center of region */
myBinIndex.x = (int) floorf((8 * xRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.y = (int) floorf((8 * yRegionIndex + 4) * h * BIN_INVLEN);
myBinIndex.z = (int) floorf((8 * zRegionIndex + 4) * h * BIN_INVLEN);
/* first neighbor in list for me to cache */
nbrid = (tid >> 4);
numbins = BIN_CACHE_MAXLEN;
float energy0 = 0.f;
float energy1 = 0.f;
float energy2 = 0.f;
float energy3 = 0.f;
for (totalbins = 0; totalbins < NbrListLen; totalbins += numbins) {
int bincnt;
/* start of where to write in shared memory */
int startoff = BIN_SIZE * (tid >> 4);
/* each half-warp to cache up to 4 atom bins */
for (bincnt = 0; bincnt < 4 && nbrid < NbrListLen; bincnt++, nbrid += 8) {
int i = myBinIndex.x + NbrList[nbrid].x;
int j = myBinIndex.y + NbrList[nbrid].y;
int k = myBinIndex.z + NbrList[nbrid].z;
/* determine global memory location of atom bin */
float *p_global = ((float *) binZeroAddr) + (((__mul24(k, binDim_y) + j)*binDim_x + i) << BIN_SHIFT);
/* coalesced read from global memory -
* retain same ordering in shared memory for now */
int binIndex = startoff + (bincnt << (3 + BIN_SHIFT));
int tidmask = tid & 15;
AtomBinCache[binIndex + tidmask ] = p_global[tidmask ];
AtomBinCache[binIndex + tidmask+16] = p_global[tidmask+16];
}
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
/* no warp divergence */
if (totalbins + BIN_CACHE_MAXLEN > NbrListLen) {
numbins = NbrListLen - totalbins;
}
int stopbin = (numbins << BIN_SHIFT);
for (bincnt = 0; bincnt < stopbin; bincnt+=BIN_SIZE) {
for (int i = 0; i < BIN_DEPTH; i++) {
int off = bincnt + (i<<2);
float aq = AtomBinCache[off + 3];
if (0.f == aq)
break; /* no more atoms in bin */
float dx = AtomBinCache[off ] - x;
float dz = AtomBinCache[off + 2] - z;
float dxdz2 = dx*dx + dz*dz;
float dy = AtomBinCache[off + 1] - y;
float r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy0 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy1 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy2 += aq * rsqrtf(r2) * s * s;
}
dy -= 2.0f*h;
r2 = dy*dy + dxdz2;
if (r2 < cutoff2) {
float s = (1.f - r2 * inv_cutoff2);
energy3 += aq * rsqrtf(r2) * s * s;
}
} /* end loop over atoms in bin */
} /* end loop over cached atom bins */
// __syncthreads();
asm volatile("bar.sync %0, %1;" : : "r"(0), "r"(128) : "memory");
} /* end loop over neighbor list */
/* store into global memory */
myRegionAddr[(tid>>4)*64 + (tid&15) ] = energy0;
myRegionAddr[(tid>>4)*64 + (tid&15) + 16] = energy1;
myRegionAddr[(tid>>4)*64 + (tid&15) + 32] = energy2;
myRegionAddr[(tid>>4)*64 + (tid&15) + 48] = energy3;
}
}
}
|
52b615befb67eb1568d2aba1de4ccc44a95f1e57.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Equihash solver created by djeZo ([email protected]) for NiceHash
Based on CUDA solver by John Tromp released under MIT license.
Some helper functions taken out of OpenCL solver by Marc Bevand
released under MIT license.
cuda_djezo solver is released by NiceHash (www.nicehash.com) under
GPL 3.0 license. If you don't have a copy, you can obtain one from
https://www.gnu.org/licenses/gpl-3.0.txt
*/
/*
The MIT License (MIT)
Copyright (c) 2016 John Tromp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software, and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/*
The MIT License (MIT)
Copyright (c) 2016 Marc Bevand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software, and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifdef WIN32
#include <Windows.h>
#endif
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include <mutex>
#include "eqcuda.hpp"
#include "sm_32_intrinsics.h"
#define WN 144
#define WK 5
#define NDIGITS (WK+1)
#define DIGITBITS (WN/(NDIGITS))
#define PROOFSIZE (1<<WK)
#define BASE (1<<DIGITBITS)
#define NHASHES (2*BASE)
#define HASHESPERBLAKE (512/WN)
#define HASHOUT (HASHESPERBLAKE*WN/8)
#define NBLOCKS ((NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE)
#define BUCKBITS (DIGITBITS - RB)
#define NBUCKETS (1 << BUCKBITS)
#define BUCKMASK (NBUCKETS - 1)
#define SLOTBITS (RB + 2)
#define SLOTRANGE (1 << SLOTBITS)
#define NSLOTS SM
#define SLOTMASK (SLOTRANGE - 1)
#define NRESTS (1 << RB)
#define RESTMASK (NRESTS - 1)
#define CANTORBITS (2 * SLOTBITS - 2)
#define CANTORMASK ((1 << CANTORBITS) - 1)
#define CANTORMAXSQRT (2 * NSLOTS)
#define RB8_NSLOTS 640
#define RB8_NSLOTS_LD 624
#define FD_THREADS 128
// reduce vstudio warnings (__byteperm, blockIdx...)
#ifdef __INTELLISENSE__
#include <hip/device_functions.h>
#include <device_launch_parameters.h>
#define __launch_bounds__(max_tpb, min_blocks)
#define __CUDA_ARCH__ 520
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __shfl(uint32_t x, uint32_t y, uint32_t z);
uint32_t atomicExch(uint32_t *x, uint32_t y);
uint32_t atomicAdd(uint32_t *x, uint32_t y);
void __syncthreads(void);
void __threadfence(void);
void __threadfence_block(void);
uint32_t __ldg(const uint32_t* address);
uint64_t __ldg(const uint64_t* address);
uint4 __ldca(const uint4 *ptr);
u32 __ldca(const u32 *ptr);
u32 umin(const u32, const u32);
u32 umax(const u32, const u32);
#endif
typedef u32 proof[PROOFSIZE];
struct __align__(32) slot
{
u32 hash[8];
};
struct __align__(16) slotsmall
{
u32 hash[4];
};
struct __align__(8) slottiny
{
u32 hash[2];
};
template <u32 RB, u32 SM>
struct equi
{
slot round0trees[4096][RB8_NSLOTS];
slot trees[1][NBUCKETS][NSLOTS];
struct
{
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round2trees[NBUCKETS];
struct
{
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round3trees[NBUCKETS];
slotsmall treessmall[4][NBUCKETS][NSLOTS];
slottiny treestiny[1][4096][RB8_NSLOTS_LD];
u32 round4bidandsids[NBUCKETS][NSLOTS];
union
{
u64 blake_h[8];
u32 blake_h32[16];
};
struct
{
u32 nslots8[4096];
u32 nslots0[4096];
u32 nslots[9][NBUCKETS];
scontainerreal srealcont;
} edata;
};
__device__ __constant__ const u64 blake_iv[] =
{
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
__device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b)
{
return make_uint2(a.x ^ b.x, a.y ^ b.y);
}
__device__ __forceinline__ uint4 operator^ (uint4 a, uint4 b)
{
return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
__device__ __forceinline__ uint2 ROR2(const uint2 a, const int offset)
{
uint2 result;
{
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
return result;
}
__device__ __forceinline__ uint2 SWAPUINT2(uint2 value)
{
return make_uint2(value.y, value.x);
}
__device__ __forceinline__ uint2 ROR24(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x2107);
result.y = __byte_perm(a.y, a.x, 0x6543);
return result;
}
__device__ __forceinline__ uint2 ROR16(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x1076);
result.y = __byte_perm(a.y, a.x, 0x5432);
return result;
}
__device__ __forceinline__ void G2(u64 & a, u64 & b, u64 & c, u64 & d, u64 x, u64 y)
{
a = a + b + x;
((uint2*)&d)[0] = SWAPUINT2(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR24(((uint2*)&b)[0] ^ ((uint2*)&c)[0]);
a = a + b + y;
((uint2*)&d)[0] = ROR16(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR2(((uint2*)&b)[0] ^ ((uint2*)&c)[0], 63U);
}
struct packer_default
{
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (((bucketid << SLOTBITS) | s0) << SLOTBITS) | s1;
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
// BUCKMASK-ed to prevent illegal memory accesses in case of memory errors
return (bid >> (2 * SLOTBITS)) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return bid & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> SLOTBITS) & SLOTMASK;
}
};
struct packer_cantor
{
__device__ __forceinline__ static u32 cantor(const u32 s0, const u32 s1)
{
u32 a = umax(s0, s1);
u32 b = umin(s0, s1);
return a * (a + 1) / 2 + b;
}
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (bucketid << CANTORBITS) | cantor(s0, s1);
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> CANTORBITS) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return ((bid & CANTORMASK) - cantor(0, s1)) & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
u32 k, q, sqr = 8 * (bid & CANTORMASK) + 1;
// this k=sqrt(sqr) computing loop averages 3.4 iterations out of maximum 9
for (k = CANTORMAXSQRT; (q = sqr / k) < k; k = (k + q) / 2);
return ((k - 1) / 2) & SLOTMASK;
}
};
template <u32 RB, u32 SM, typename PACKER>
__global__ void digit_first(equi<RB, SM>* eq, u32 nonce)
{
const u32 block = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ u64 hash_h[8];
u32* hash_h32 = (u32*)hash_h;
if (threadIdx.x < 16)
hash_h32[threadIdx.x] = __ldca(&eq->blake_h32[threadIdx.x]);
__syncthreads();
u64 m = (u64)block << 32 | (u64)nonce;
union
{
u64 v[16];
u32 v32[32];
uint4 v128[8];
};
v[0] = hash_h[0];
v[1] = hash_h[1];
v[2] = hash_h[2];
v[3] = hash_h[3];
v[4] = hash_h[4];
v[5] = hash_h[5];
v[6] = hash_h[6];
v[7] = hash_h[7];
v[8] = blake_iv[0];
v[9] = blake_iv[1];
v[10] = blake_iv[2];
v[11] = blake_iv[3];
v[12] = blake_iv[4] ^ (128 + 16);
v[13] = blake_iv[5];
v[14] = blake_iv[6] ^ 0xffffffffffffffff;
v[15] = blake_iv[7];
// mix 1
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 2
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 3
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, m);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 4
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, m);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 5
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, m);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 6
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], m, 0);
// mix 7
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], m, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 8
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, m);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 9
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], m, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 10
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], m, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 11
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 12
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
v[0] ^= hash_h[0] ^ v[8];
v[1] ^= hash_h[1] ^ v[9];
v[2] ^= hash_h[2] ^ v[10];
v[3] ^= hash_h[3] ^ v[11];
v[4] ^= hash_h[4] ^ v[12];
v[5] ^= hash_h[5] ^ v[13];
v32[12] ^= hash_h32[12] ^ v32[28];
u32 bexor = __byte_perm(v32[0], 0, 0x4012); // first 20 bits
u32 bucketid;
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
u32 slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[0], v32[1], 0x1234);
tt.y = __byte_perm(v32[1], v32[2], 0x1234);
tt.z = __byte_perm(v32[2], v32[3], 0x1234);
tt.w = __byte_perm(v32[3], v32[4], 0x1234);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[4], v32[5], 0x1234);
tt.y = __byte_perm(v32[5], v32[6], 0x1234);
tt.z = 0;
tt.w = block << 1;
*(uint4*)(&s->hash[4]) = tt;
}
bexor = __byte_perm(v32[6], 0, 0x0123);
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[6], v32[7], 0x2345);
tt.y = __byte_perm(v32[7], v32[8], 0x2345);
tt.z = __byte_perm(v32[8], v32[9], 0x2345);
tt.w = __byte_perm(v32[9], v32[10], 0x2345);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[10], v32[11], 0x2345);
tt.y = __byte_perm(v32[11], v32[12], 0x2345);
tt.z = 0;
tt.w = (block << 1) + 1;
*(uint4*)(&s->hash[4]) = tt;
}
}
/*
Functions digit_1 to digit_8 works by the same principle;
Each thread does 2-3 slot loads (loads are coalesced).
Xorwork of slots is loaded into shared memory and is kept in registers (except for digit_1).
At the same time, restbits (8 or 9 bits) in xorwork are used for collisions.
Restbits determine position in ht.
Following next is pair creation. First one (or two) pairs' xorworks are put into global memory
as soon as possible, the rest pairs are saved in shared memory (one u32 per pair - 16 bit indices).
In most cases, all threads have one (or two) pairs so with this trick, we offload memory writes a bit in last step.
In last step we save xorwork of pairs in memory.
*/
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_1(equi<RB, SM>* eq)
{
__shared__ u16 ht[256][SSM - 1];
__shared__ uint2 lastword1[RB8_NSLOTS];
__shared__ uint4 lastword2[RB8_NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < 256)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots0[bucketid], RB8_NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
uint2 ta[2];
uint4 tb[2];
u32 si[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slot* pslot1 = eq->round0trees[bucketid] + si[i];
// get xhash
uint4 a1 = *(uint4*)(&pslot1->hash[0]);
uint2 a2 = *(uint2*)(&pslot1->hash[4]);
ta[i].x = a1.x;
ta[i].y = a1.y;
lastword1[si[i]] = ta[i];
tb[i].x = a1.z;
tb[i].y = a1.w;
tb[i].z = a2.x;
tb[i].w = a2.y;
lastword2[si[i]] = tb[i];
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(hr[i]) : "r"(ta[i].x));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
int* pairs = ht_len;
u32 xors[6];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = ta[i] ^ lastword1[p];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[si[i]] ^ lastword2[p];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, si[i], p, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[i] ^ lastword2[k];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, i, k, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_2(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][SSM - 1];
__shared__ u32 lastword1[NSLOTS];
__shared__ uint4 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slot* buck = eq->trees[0][bucketid];
u32 bsize = umin(eq->edata.nslots[1][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 ta[2];
uint4 tt[2];
u32 si[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
// get slot
const slot* pslot1 = buck + si[i];
uint4 ttx = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = ta[i] = ttx.x;
uint2 tty = *(uint2*)(&pslot1->hash[4]);
tt[i].x = ttx.y;
tt[i].y = ttx.z;
tt[i].z = ttx.w;
tt[i].w = tty.x;
lastword2[si[i]] = tt[i];
hr[i] = tty.y & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[0] = ta[i] ^ lastword1[p];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = tt[i] ^ lastword2[p];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[0] = lastword1[i] ^ lastword1[k];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = lastword2[i] ^ lastword2[k];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_3(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[2][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
u32 ta[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round2trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round2trees[bucketid].treestiny[si[i]];
tt[i] = *(uint4*)(&xs.hash[0]);
lastword1[si[i]] = tt[i];
ta[i] = xst.hash[0];
lastword2[si[i]] = ta[i];
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[4] = ta[i] ^ lastword2[p];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = tt[i] ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[4] = lastword2[i] ^ lastword2[k];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_4(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[3][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round3trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round3trees[bucketid].treestiny[si[i]];
// get xhash
tt[i] = *(uint4*)(&xs.hash[0]);
lastword[si[i]] = tt[i];
hr[i] = xst.hash[0] & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_5(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slotsmall* buck = eq->treessmall[3][bucketid];
u32 bsize = umin(eq->edata.nslots[4][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword[si[i]] = tt[i];
asm("bfe.u32 %0, %1, 4, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = xors[3];
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[1];
tt.y = xors[2];
tt.z = xors[3];
tt.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = tt;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_6(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint2 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[5][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[2][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = *(uint2*)(&tt[i].x);
lastword2[si[i]] = tt[i].z;
asm("bfe.u32 %0, %1, 16, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
// doing this to save shared memory
int* pairs = ht_len;
__syncthreads();
u32 xors[3];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
u32 pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
xors[2] = lastword2[i] ^ lastword2[k];
if (xors[2] == 0)
continue;
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_7(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[6][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[0][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint4*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].z), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[0];
tt.y = xors[1];
tt.z = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
tt.w = 0;
*(uint4*)(&xs.hash[0]) = tt;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_8(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
next_pair = 0;
pairs_len = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[7][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[1][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint2 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint2*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 8, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot >= RB8_NSLOTS_LD) continue;
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
/*
Last round function is similar to previous ones but has different ending.
We use warps to process final candidates. Each warp process one candidate.
First two bidandsids (u32 of stored bucketid and two slotids) are retreived by
lane 0 and lane 16, next four bidandsids by lane 0, 8, 16 and 24, ... until
all lanes in warp have bidandsids from round 4. Next, each thread retreives
16 indices. While doing so, indices are put into comparison using atomicExch
to determine if there are duplicates (tromp's method). At the end, if no
duplicates are found, candidate solution is saved (all indices). Note that this
dup check method is not exact so CPU dup checking is needed after.
*/
template <u32 RB, u32 SM, int SSM, u32 FCT, typename PACKER, u32 MAXPAIRS, u32 DUPBITS, u32 W>
__global__ void digit_last_wdc(equi<RB, SM>* eq)
{
__shared__ u8 shared_data[8192];
int* ht_len = (int*)(&shared_data[0]);
int* pairs = ht_len;
u32* lastword = (u32*)(&shared_data[256 * 4]);
u16* ht = (u16*)(&shared_data[256 * 4 + RB8_NSLOTS_LD * 4]);
u32* pairs_len = (u32*)(&shared_data[8188]);
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
#pragma unroll
for (u32 i = 0; i != FCT; ++i)
ht_len[(i * (256 / FCT)) + threadid] = 0;
if (threadid == ((256 / FCT) - 1))
*pairs_len = 0;
slottiny* buck = eq->treestiny[0][bucketid];
u32 bsize = umin(eq->edata.nslots8[bucketid], RB8_NSLOTS_LD);
u32 si[3 * FCT];
u32 hr[3 * FCT];
int pos[3 * FCT];
u32 lw[3 * FCT];
#pragma unroll
for (u32 i = 0; i != (3 * FCT); ++i)
pos[i] = SSM;
__syncthreads();
#pragma unroll
for (u32 i = 0; i != (3 * FCT); ++i)
{
si[i] = i * (256 / FCT) + threadid;
if (si[i] >= bsize) break;
const slottiny* pslot1 = buck + si[i];
// get xhash
uint2 tt = *(uint2*)(&pslot1->hash[0]);
lw[i] = tt.x;
lastword[si[i]] = lw[i];
u32 a;
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(a) : "r"(lw[i]));
hr[i] = a;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1))
ht[hr[i] * (SSM - 1) + pos[i]] = si[i];
}
__syncthreads();
#pragma unroll
for (u32 i = 0; i != (3 * FCT); ++i)
{
if (pos[i] >= SSM) continue;
for (int k = 0; k != pos[i]; ++k)
{
u16 prev = ht[hr[i] * (SSM - 1) + k];
if (lw[i] != lastword[prev]) continue;
u32 pindex = atomicAdd(pairs_len, 1);
if (pindex >= MAXPAIRS) break;
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
__syncthreads();
u32 plen = umin(*pairs_len, 64);
#define CALC_LEVEL(a, b, c, d) { \
u32 plvl = levels[b]; \
u32* bucks = eq->round4bidandsids[PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1]; \
levels[c] = bucks[slot0]; \
}
#define CALC_LEVEL_SMALL(a, b, c, d) { \
u32 plvl = levels[b]; \
slotsmall* bucks = eq->treessmall[a][PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1].hash[d]; \
levels[c] = bucks[slot0].hash[d]; \
}
u32 lane = threadIdx.x & 0x1f;
u32 par = threadIdx.x >> 5;
u32* levels = (u32*)&pairs[MAXPAIRS + (par << DUPBITS)];
u32* susp = levels;
while (par < plen)
{
int pair = pairs[par];
par += W;
if (lane % 16 == 0)
{
u32 plvl;
if (lane == 0) plvl = buck[__byte_perm(pair, 0, 0x4510)].hash[1];
else plvl = buck[__byte_perm(pair, 0, 0x4532)].hash[1];
slotsmall* bucks = eq->treessmall[1][PACKER::get_bucketid(plvl, RB, SM)];
u32 slot1 = PACKER::get_slot1(plvl, RB, SM);
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM);
levels[lane] = bucks[slot1].hash[2];
levels[lane + 8] = bucks[slot0].hash[2];
}
if (lane % 8 == 0)
CALC_LEVEL_SMALL(0, lane, lane + 4, 3);
if (lane % 4 == 0)
CALC_LEVEL_SMALL(2, lane, lane + 2, 3);
if (lane % 2 == 0)
CALC_LEVEL(0, lane, lane + 1, 4);
u32 ind[16];
u32 f1 = levels[lane];
const slottiny* buck_v4 = &eq->round3trees[PACKER::get_bucketid(f1, RB, SM)].treestiny[0];
const u32 slot1_v4 = PACKER::get_slot1(f1, RB, SM);
const u32 slot0_v4 = PACKER::get_slot0(f1, slot1_v4, RB, SM);
susp[lane] = 0xffffffff;
susp[32 + lane] = 0xffffffff;
#define CHECK_DUP(a) \
__any(atomicExch(&susp[(ind[a] & ((1 << DUPBITS) - 1))], (ind[a] >> DUPBITS)) == (ind[a] >> DUPBITS))
u32 f2 = buck_v4[slot1_v4].hash[1];
const slottiny* buck_v3_1 = &eq->round2trees[PACKER::get_bucketid(f2, RB, SM)].treestiny[0];
const u32 slot1_v3_1 = PACKER::get_slot1(f2, RB, SM);
const u32 slot0_v3_1 = PACKER::get_slot0(f2, slot1_v3_1, RB, SM);
susp[64 + lane] = 0xffffffff;
susp[96 + lane] = 0xffffffff;
u32 f0 = buck_v3_1[slot1_v3_1].hash[1];
const slot* buck_v2_1 = eq->trees[0][PACKER::get_bucketid(f0, RB, SM)];
const u32 slot1_v2_1 = PACKER::get_slot1(f0, RB, SM);
const u32 slot0_v2_1 = PACKER::get_slot0(f0, slot1_v2_1, RB, SM);
susp[128 + lane] = 0xffffffff;
susp[160 + lane] = 0xffffffff;
u32 f3 = buck_v2_1[slot1_v2_1].hash[6];
const slot* buck_fin_1 = eq->round0trees[packer_default::get_bucketid(f3, 8, RB8_NSLOTS)];
const u32 slot1_fin_1 = packer_default::get_slot1(f3, 8, RB8_NSLOTS);
const u32 slot0_fin_1 = packer_default::get_slot0(f3, slot1_fin_1, 8, RB8_NSLOTS);
susp[192 + lane] = 0xffffffff;
susp[224 + lane] = 0xffffffff;
ind[0] = buck_fin_1[slot1_fin_1].hash[7];
if (CHECK_DUP(0)) continue;
ind[1] = buck_fin_1[slot0_fin_1].hash[7];
if (CHECK_DUP(1)) continue;
u32 f4 = buck_v2_1[slot0_v2_1].hash[6];
const slot* buck_fin_2 = eq->round0trees[packer_default::get_bucketid(f4, 8, RB8_NSLOTS)];
const u32 slot1_fin_2 = packer_default::get_slot1(f4, 8, RB8_NSLOTS);
const u32 slot0_fin_2 = packer_default::get_slot0(f4, slot1_fin_2, 8, RB8_NSLOTS);
ind[2] = buck_fin_2[slot1_fin_2].hash[7];
if (CHECK_DUP(2)) continue;
ind[3] = buck_fin_2[slot0_fin_2].hash[7];
if (CHECK_DUP(3)) continue;
u32 f5 = buck_v3_1[slot0_v3_1].hash[1];
const slot* buck_v2_2 = eq->trees[0][PACKER::get_bucketid(f5, RB, SM)];
const u32 slot1_v2_2 = PACKER::get_slot1(f5, RB, SM);
const u32 slot0_v2_2 = PACKER::get_slot0(f5, slot1_v2_2, RB, SM);
u32 f6 = buck_v2_2[slot1_v2_2].hash[6];
const slot* buck_fin_3 = eq->round0trees[packer_default::get_bucketid(f6, 8, RB8_NSLOTS)];
const u32 slot1_fin_3 = packer_default::get_slot1(f6, 8, RB8_NSLOTS);
const u32 slot0_fin_3 = packer_default::get_slot0(f6, slot1_fin_3, 8, RB8_NSLOTS);
ind[4] = buck_fin_3[slot1_fin_3].hash[7];
if (CHECK_DUP(4)) continue;
ind[5] = buck_fin_3[slot0_fin_3].hash[7];
if (CHECK_DUP(5)) continue;
u32 f7 = buck_v2_2[slot0_v2_2].hash[6];
const slot* buck_fin_4 = eq->round0trees[packer_default::get_bucketid(f7, 8, RB8_NSLOTS)];
const u32 slot1_fin_4 = packer_default::get_slot1(f7, 8, RB8_NSLOTS);
const u32 slot0_fin_4 = packer_default::get_slot0(f7, slot1_fin_4, 8, RB8_NSLOTS);
ind[6] = buck_fin_4[slot1_fin_4].hash[7];
if (CHECK_DUP(6)) continue;
ind[7] = buck_fin_4[slot0_fin_4].hash[7];
if (CHECK_DUP(7)) continue;
u32 f8 = buck_v4[slot0_v4].hash[1];
const slottiny* buck_v3_2 = &eq->round2trees[PACKER::get_bucketid(f8, RB, SM)].treestiny[0];
const u32 slot1_v3_2 = PACKER::get_slot1(f8, RB, SM);
const u32 slot0_v3_2 = PACKER::get_slot0(f8, slot1_v3_2, RB, SM);
u32 f9 = buck_v3_2[slot1_v3_2].hash[1];
const slot* buck_v2_3 = eq->trees[0][PACKER::get_bucketid(f9, RB, SM)];
const u32 slot1_v2_3 = PACKER::get_slot1(f9, RB, SM);
const u32 slot0_v2_3 = PACKER::get_slot0(f9, slot1_v2_3, RB, SM);
u32 f10 = buck_v2_3[slot1_v2_3].hash[6];
const slot* buck_fin_5 = eq->round0trees[packer_default::get_bucketid(f10, 8, RB8_NSLOTS)];
const u32 slot1_fin_5 = packer_default::get_slot1(f10, 8, RB8_NSLOTS);
const u32 slot0_fin_5 = packer_default::get_slot0(f10, slot1_fin_5, 8, RB8_NSLOTS);
ind[8] = buck_fin_5[slot1_fin_5].hash[7];
if (CHECK_DUP(8)) continue;
ind[9] = buck_fin_5[slot0_fin_5].hash[7];
if (CHECK_DUP(9)) continue;
u32 f11 = buck_v2_3[slot0_v2_3].hash[6];
const slot* buck_fin_6 = eq->round0trees[packer_default::get_bucketid(f11, 8, RB8_NSLOTS)];
const u32 slot1_fin_6 = packer_default::get_slot1(f11, 8, RB8_NSLOTS);
const u32 slot0_fin_6 = packer_default::get_slot0(f11, slot1_fin_6, 8, RB8_NSLOTS);
ind[10] = buck_fin_6[slot1_fin_6].hash[7];
if (CHECK_DUP(10)) continue;
ind[11] = buck_fin_6[slot0_fin_6].hash[7];
if (CHECK_DUP(11)) continue;
u32 f12 = buck_v3_2[slot0_v3_2].hash[1];
const slot* buck_v2_4 = eq->trees[0][PACKER::get_bucketid(f12, RB, SM)];
const u32 slot1_v2_4 = PACKER::get_slot1(f12, RB, SM);
const u32 slot0_v2_4 = PACKER::get_slot0(f12, slot1_v2_4, RB, SM);
u32 f13 = buck_v2_4[slot1_v2_4].hash[6];
const slot* buck_fin_7 = eq->round0trees[packer_default::get_bucketid(f13, 8, RB8_NSLOTS)];
const u32 slot1_fin_7 = packer_default::get_slot1(f13, 8, RB8_NSLOTS);
const u32 slot0_fin_7 = packer_default::get_slot0(f13, slot1_fin_7, 8, RB8_NSLOTS);
ind[12] = buck_fin_7[slot1_fin_7].hash[7];
if (CHECK_DUP(12)) continue;
ind[13] = buck_fin_7[slot0_fin_7].hash[7];
if (CHECK_DUP(13)) continue;
u32 f14 = buck_v2_4[slot0_v2_4].hash[6];
const slot* buck_fin_8 = eq->round0trees[packer_default::get_bucketid(f14, 8, RB8_NSLOTS)];
const u32 slot1_fin_8 = packer_default::get_slot1(f14, 8, RB8_NSLOTS);
const u32 slot0_fin_8 = packer_default::get_slot0(f14, slot1_fin_8, 8, RB8_NSLOTS);
ind[14] = buck_fin_8[slot1_fin_8].hash[7];
if (CHECK_DUP(14)) continue;
ind[15] = buck_fin_8[slot0_fin_8].hash[7];
if (CHECK_DUP(15)) continue;
u32 soli;
if (lane == 0)
{
soli = atomicAdd(&eq->edata.srealcont.nsols, 1);
}
soli = __shfl(soli, 0);
if (soli < MAXREALSOLS)
{
u32 pos = lane << 4;
*(uint4*)(&eq->edata.srealcont.sols[soli][pos]) = *(uint4*)(&ind[0]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 4]) = *(uint4*)(&ind[4]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 8]) = *(uint4*)(&ind[8]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 12]) = *(uint4*)(&ind[12]);
}
}
}
std::mutex dev_init;
int dev_init_done[8] = { 0 };
__host__ int compu32(const void *pa, const void *pb)
{
uint32_t a = *(uint32_t *)pa, b = *(uint32_t *)pb;
return a<b ? -1 : a == b ? 0 : +1;
}
__host__ bool duped(uint32_t* prf)
{
uint32_t sortprf[512];
memcpy(sortprf, prf, sizeof(uint32_t) * 512);
qsort(sortprf, 512, sizeof(uint32_t), &compu32);
for (uint32_t i = 1; i<512; i++)
if (sortprf[i] <= sortprf[i - 1])
return true;
return false;
}
__host__ void sort_pair(uint32_t *a, uint32_t len)
{
uint32_t *b = a + len;
uint32_t tmp, need_sorting = 0;
for (uint32_t i = 0; i < len; i++)
if (need_sorting || a[i] > b[i])
{
need_sorting = 1;
tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
else if (a[i] < b[i])
return;
}
__host__ void setheader(blake2b_state *ctx, const char *header, const u32 headerLen, const char* nce, const u32 nonceLen)
{
uint32_t le_N = WN;
uint32_t le_K = WK;
uchar personal[] = "BitcoinZ01230123";
memcpy(personal + 8, &le_N, 4);
memcpy(personal + 12, &le_K, 4);
blake2b_param P[1];
P->digest_length = HASHOUT;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
P->node_offset = 0;
P->node_depth = 0;
P->inner_length = 0;
memset(P->reserved, 0, sizeof(P->reserved));
memset(P->salt, 0, sizeof(P->salt));
memcpy(P->personal, (const uint8_t *)personal, 16);
blake2b_init_param(ctx, P);
blake2b_update(ctx, (const uchar *)header, headerLen);
blake2b_update(ctx, (const uchar *)nce, nonceLen);
}
#ifdef WIN32
typedef hipError_t(CUDAAPI *dec_cuDeviceGet)(hipDevice_t*, int);
typedef hipError_t(CUDAAPI *dec_cuCtxCreate)(hipCtx_t*, unsigned int, hipDevice_t);
typedef hipError_t(CUDAAPI *dec_cuCtxPushCurrent)(hipCtx_t);
typedef hipError_t(CUDAAPI *dec_cuCtxDestroy)(hipCtx_t);
dec_cuDeviceGet _cuDeviceGet = nullptr;
dec_cuCtxCreate _cuCtxCreate = nullptr;
dec_cuCtxPushCurrent _cuCtxPushCurrent = nullptr;
dec_cuCtxDestroy _cuCtxDestroy = nullptr;
#endif
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::eq_cuda_context(int id)
: device_id(id)
{
solutions = nullptr;
dev_init.lock();
if (!dev_init_done[device_id])
{
// only first thread shall init device
checkCudaErrors(hipSetDevice(device_id));
checkCudaErrors(hipDeviceReset());
checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync));
pctx = nullptr;
}
else
{
// create new context
hipDevice_t dev;
#ifdef WIN32
if (_cuDeviceGet == nullptr)
{
HMODULE hmod = LoadLibraryA("nvcuda.dll");
if (hmod == NULL)
throw std::runtime_error("Failed to load nvcuda.dll");
_cuDeviceGet = (dec_cuDeviceGet)GetProcAddress(hmod, "hipDeviceGet");
if (_cuDeviceGet == nullptr)
throw std::runtime_error("Failed to get hipDeviceGet address");
_cuCtxCreate = (dec_cuCtxCreate)GetProcAddress(hmod, "hipCtxCreate");
if (_cuCtxCreate == nullptr)
throw std::runtime_error("Failed to get hipCtxCreate address");
_cuCtxPushCurrent = (dec_cuCtxPushCurrent)GetProcAddress(hmod, "hipCtxPushCurrent");
if (_cuCtxPushCurrent == nullptr)
throw std::runtime_error("Failed to get cuCtxPushCurrent address");
_cuCtxDestroy = (dec_cuCtxDestroy)GetProcAddress(hmod, "hipCtxDestroy");
if (_cuCtxDestroy == nullptr)
throw std::runtime_error("Failed to get hipCtxDestroy address");
}
checkCudaDriverErrors(_cuDeviceGet(&dev, device_id));
checkCudaDriverErrors(_cuCtxCreate(&pctx, HIP_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(_cuCtxPushCurrent(pctx));
#else
checkCudaDriverErrors(hipDeviceGet(&dev, device_id));
checkCudaDriverErrors(hipCtxCreate(&pctx, HIP_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(cuCtxPushCurrent(pctx));
#endif
}
++dev_init_done[device_id];
dev_init.unlock();
if (hipMalloc((void**)&device_eq, sizeof(equi<RB, SM>)) != hipSuccess)
throw std::runtime_error("CUDA: failed to alloc memory");
solutions = (scontainerreal*)malloc(sizeof(scontainerreal));
}
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ void eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
blake2b_state blake_ctx;
int blocks = NBUCKETS;
setheader(&blake_ctx, tequihash_header, tequihash_header_len, nonce, nonce_len);
// todo: improve
// djezo solver allows last 4 bytes of nonce to be iterrated
// this can be used to create internal loop - calc initial blake hash only once, then load 8*8 bytes on device (blake state h)
// then just iterate nn++
// less CPU load, 1 hipMemcpy less -> faster
//u32 nn = *(u32*)&nonce[28];
u32 nn = 0;
checkCudaErrors(hipMemcpy(&device_eq->blake_h, &blake_ctx.h, sizeof(u64) * 8, hipMemcpyHostToDevice));
checkCudaErrors(hipMemset(&device_eq->edata, 0, sizeof(device_eq->edata)));
digit_first<RB, SM, PACKER> << <NBLOCKS / FD_THREADS, FD_THREADS >> >(device_eq, nn);
digit_1<RB, SM, SSM, PACKER, 4 * NRESTS, 512> << <4096, 512 >> >(device_eq);
digit_2<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
digit_3<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
if (cancelf()) return;
digit_4<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
digit_5<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
digit_6<RB, SM, SSM - 1, PACKER, 3 * NRESTS> << <blocks, NRESTS >> >(device_eq);
digit_7<RB, SM, SSM - 1, PACKER, 3 * NRESTS> << <blocks, NRESTS >> >(device_eq);
digit_8<RB, SM, SSM - 1, PACKER, 3 * NRESTS> << <blocks, NRESTS >> >(device_eq);
digit_last_wdc<RB, SM, SSM - 3, 2, PACKER, 64, 8, 4> << <4096, 256 / 2 >> >(device_eq);
checkCudaErrors(hipMemcpy(solutions, &device_eq->edata.srealcont, (MAXREALSOLS * (512 * 4)) + 4, hipMemcpyDeviceToHost));
//printf("nsols: %u\n", solutions->nsols);
//if (solutions->nsols > 9)
// printf("missing sol, total: %u\n", solutions->nsols);
for (u32 s = 0; (s < solutions->nsols) && (s < MAXREALSOLS); s++)
{
// remove dups on CPU (dup removal on GPU is not fully exact and can pass on some invalid solutions)
if (duped(solutions->sols[s])) continue;
// perform sort of pairs
for (uint32_t level = 0; level < 9; level++)
for (uint32_t i = 0; i < (1 << 9); i += (2 << level))
sort_pair(&solutions->sols[s][i], 1 << level);
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions->sols[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
}
hashdonef();
}
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::~eq_cuda_context()
{
if (solutions)
free(solutions);
hipFree(device_eq);
if (pctx)
{
// non primary thread, destroy context
#ifdef WIN32
checkCudaDriverErrors(_cuCtxDestroy(pctx));
#else
checkCudaDriverErrors(hipCtxDestroy(pctx));
#endif
}
else
{
checkCudaErrors(hipDeviceReset());
dev_init_done[device_id] = 0;
}
}
#ifdef CONFIG_MODE_1
template class eq_cuda_context<CONFIG_MODE_1>;
#endif
#ifdef CONFIG_MODE_2
template class eq_cuda_context<CONFIG_MODE_2>;
#endif
#ifdef CONFIG_MODE_3
template class eq_cuda_context<CONFIG_MODE_3>;
#endif
| 52b615befb67eb1568d2aba1de4ccc44a95f1e57.cu | /*
Equihash solver created by djeZo ([email protected]) for NiceHash
Based on CUDA solver by John Tromp released under MIT license.
Some helper functions taken out of OpenCL solver by Marc Bevand
released under MIT license.
cuda_djezo solver is released by NiceHash (www.nicehash.com) under
GPL 3.0 license. If you don't have a copy, you can obtain one from
https://www.gnu.org/licenses/gpl-3.0.txt
*/
/*
The MIT License (MIT)
Copyright (c) 2016 John Tromp
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software, and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
/*
The MIT License (MIT)
Copyright (c) 2016 Marc Bevand
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software, and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#ifdef WIN32
#include <Windows.h>
#endif
#include <stdint.h>
#include <string.h>
#include <stdlib.h>
#include <stdbool.h>
#include <stdio.h>
#include <assert.h>
#include <functional>
#include <vector>
#include <iostream>
#include <mutex>
#include "eqcuda.hpp"
#include "sm_32_intrinsics.h"
#define WN 144
#define WK 5
#define NDIGITS (WK+1)
#define DIGITBITS (WN/(NDIGITS))
#define PROOFSIZE (1<<WK)
#define BASE (1<<DIGITBITS)
#define NHASHES (2*BASE)
#define HASHESPERBLAKE (512/WN)
#define HASHOUT (HASHESPERBLAKE*WN/8)
#define NBLOCKS ((NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE)
#define BUCKBITS (DIGITBITS - RB)
#define NBUCKETS (1 << BUCKBITS)
#define BUCKMASK (NBUCKETS - 1)
#define SLOTBITS (RB + 2)
#define SLOTRANGE (1 << SLOTBITS)
#define NSLOTS SM
#define SLOTMASK (SLOTRANGE - 1)
#define NRESTS (1 << RB)
#define RESTMASK (NRESTS - 1)
#define CANTORBITS (2 * SLOTBITS - 2)
#define CANTORMASK ((1 << CANTORBITS) - 1)
#define CANTORMAXSQRT (2 * NSLOTS)
#define RB8_NSLOTS 640
#define RB8_NSLOTS_LD 624
#define FD_THREADS 128
// reduce vstudio warnings (__byteperm, blockIdx...)
#ifdef __INTELLISENSE__
#include <device_functions.h>
#include <device_launch_parameters.h>
#define __launch_bounds__(max_tpb, min_blocks)
#define __CUDA_ARCH__ 520
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __byte_perm(uint32_t x, uint32_t y, uint32_t z);
uint32_t __shfl(uint32_t x, uint32_t y, uint32_t z);
uint32_t atomicExch(uint32_t *x, uint32_t y);
uint32_t atomicAdd(uint32_t *x, uint32_t y);
void __syncthreads(void);
void __threadfence(void);
void __threadfence_block(void);
uint32_t __ldg(const uint32_t* address);
uint64_t __ldg(const uint64_t* address);
uint4 __ldca(const uint4 *ptr);
u32 __ldca(const u32 *ptr);
u32 umin(const u32, const u32);
u32 umax(const u32, const u32);
#endif
typedef u32 proof[PROOFSIZE];
struct __align__(32) slot
{
u32 hash[8];
};
struct __align__(16) slotsmall
{
u32 hash[4];
};
struct __align__(8) slottiny
{
u32 hash[2];
};
template <u32 RB, u32 SM>
struct equi
{
slot round0trees[4096][RB8_NSLOTS];
slot trees[1][NBUCKETS][NSLOTS];
struct
{
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round2trees[NBUCKETS];
struct
{
slotsmall treessmall[NSLOTS];
slottiny treestiny[NSLOTS];
} round3trees[NBUCKETS];
slotsmall treessmall[4][NBUCKETS][NSLOTS];
slottiny treestiny[1][4096][RB8_NSLOTS_LD];
u32 round4bidandsids[NBUCKETS][NSLOTS];
union
{
u64 blake_h[8];
u32 blake_h32[16];
};
struct
{
u32 nslots8[4096];
u32 nslots0[4096];
u32 nslots[9][NBUCKETS];
scontainerreal srealcont;
} edata;
};
__device__ __constant__ const u64 blake_iv[] =
{
0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
0x510e527fade682d1, 0x9b05688c2b3e6c1f,
0x1f83d9abfb41bd6b, 0x5be0cd19137e2179,
};
__device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b)
{
return make_uint2(a.x ^ b.x, a.y ^ b.y);
}
__device__ __forceinline__ uint4 operator^ (uint4 a, uint4 b)
{
return make_uint4(a.x ^ b.x, a.y ^ b.y, a.z ^ b.z, a.w ^ b.w);
}
__device__ __forceinline__ uint2 ROR2(const uint2 a, const int offset)
{
uint2 result;
{
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset));
asm("shf.r.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset));
}
return result;
}
__device__ __forceinline__ uint2 SWAPUINT2(uint2 value)
{
return make_uint2(value.y, value.x);
}
__device__ __forceinline__ uint2 ROR24(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x2107);
result.y = __byte_perm(a.y, a.x, 0x6543);
return result;
}
__device__ __forceinline__ uint2 ROR16(const uint2 a)
{
uint2 result;
result.x = __byte_perm(a.y, a.x, 0x1076);
result.y = __byte_perm(a.y, a.x, 0x5432);
return result;
}
__device__ __forceinline__ void G2(u64 & a, u64 & b, u64 & c, u64 & d, u64 x, u64 y)
{
a = a + b + x;
((uint2*)&d)[0] = SWAPUINT2(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR24(((uint2*)&b)[0] ^ ((uint2*)&c)[0]);
a = a + b + y;
((uint2*)&d)[0] = ROR16(((uint2*)&d)[0] ^ ((uint2*)&a)[0]);
c = c + d;
((uint2*)&b)[0] = ROR2(((uint2*)&b)[0] ^ ((uint2*)&c)[0], 63U);
}
struct packer_default
{
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (((bucketid << SLOTBITS) | s0) << SLOTBITS) | s1;
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
// BUCKMASK-ed to prevent illegal memory accesses in case of memory errors
return (bid >> (2 * SLOTBITS)) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return bid & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> SLOTBITS) & SLOTMASK;
}
};
struct packer_cantor
{
__device__ __forceinline__ static u32 cantor(const u32 s0, const u32 s1)
{
u32 a = umax(s0, s1);
u32 b = umin(s0, s1);
return a * (a + 1) / 2 + b;
}
__device__ __forceinline__ static u32 set_bucketid_and_slots(const u32 bucketid, const u32 s0, const u32 s1, const u32 RB, const u32 SM)
{
return (bucketid << CANTORBITS) | cantor(s0, s1);
}
__device__ __forceinline__ static u32 get_bucketid(const u32 bid, const u32 RB, const u32 SM)
{
return (bid >> CANTORBITS) & BUCKMASK;
}
__device__ __forceinline__ static u32 get_slot0(const u32 bid, const u32 s1, const u32 RB, const u32 SM)
{
return ((bid & CANTORMASK) - cantor(0, s1)) & SLOTMASK;
}
__device__ __forceinline__ static u32 get_slot1(const u32 bid, const u32 RB, const u32 SM)
{
u32 k, q, sqr = 8 * (bid & CANTORMASK) + 1;
// this k=sqrt(sqr) computing loop averages 3.4 iterations out of maximum 9
for (k = CANTORMAXSQRT; (q = sqr / k) < k; k = (k + q) / 2);
return ((k - 1) / 2) & SLOTMASK;
}
};
template <u32 RB, u32 SM, typename PACKER>
__global__ void digit_first(equi<RB, SM>* eq, u32 nonce)
{
const u32 block = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ u64 hash_h[8];
u32* hash_h32 = (u32*)hash_h;
if (threadIdx.x < 16)
hash_h32[threadIdx.x] = __ldca(&eq->blake_h32[threadIdx.x]);
__syncthreads();
u64 m = (u64)block << 32 | (u64)nonce;
union
{
u64 v[16];
u32 v32[32];
uint4 v128[8];
};
v[0] = hash_h[0];
v[1] = hash_h[1];
v[2] = hash_h[2];
v[3] = hash_h[3];
v[4] = hash_h[4];
v[5] = hash_h[5];
v[6] = hash_h[6];
v[7] = hash_h[7];
v[8] = blake_iv[0];
v[9] = blake_iv[1];
v[10] = blake_iv[2];
v[11] = blake_iv[3];
v[12] = blake_iv[4] ^ (128 + 16);
v[13] = blake_iv[5];
v[14] = blake_iv[6] ^ 0xffffffffffffffff;
v[15] = blake_iv[7];
// mix 1
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 2
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 3
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, m);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 4
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, m);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 5
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, m);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 6
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], m, 0);
// mix 7
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], m, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 8
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, m);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 9
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], m, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 10
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], m, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 11
G2(v[0], v[4], v[8], v[12], 0, m);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], 0, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
// mix 12
G2(v[0], v[4], v[8], v[12], 0, 0);
G2(v[1], v[5], v[9], v[13], 0, 0);
G2(v[2], v[6], v[10], v[14], 0, 0);
G2(v[3], v[7], v[11], v[15], 0, 0);
G2(v[0], v[5], v[10], v[15], m, 0);
G2(v[1], v[6], v[11], v[12], 0, 0);
G2(v[2], v[7], v[8], v[13], 0, 0);
G2(v[3], v[4], v[9], v[14], 0, 0);
v[0] ^= hash_h[0] ^ v[8];
v[1] ^= hash_h[1] ^ v[9];
v[2] ^= hash_h[2] ^ v[10];
v[3] ^= hash_h[3] ^ v[11];
v[4] ^= hash_h[4] ^ v[12];
v[5] ^= hash_h[5] ^ v[13];
v32[12] ^= hash_h32[12] ^ v32[28];
u32 bexor = __byte_perm(v32[0], 0, 0x4012); // first 20 bits
u32 bucketid;
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
u32 slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[0], v32[1], 0x1234);
tt.y = __byte_perm(v32[1], v32[2], 0x1234);
tt.z = __byte_perm(v32[2], v32[3], 0x1234);
tt.w = __byte_perm(v32[3], v32[4], 0x1234);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[4], v32[5], 0x1234);
tt.y = __byte_perm(v32[5], v32[6], 0x1234);
tt.z = 0;
tt.w = block << 1;
*(uint4*)(&s->hash[4]) = tt;
}
bexor = __byte_perm(v32[6], 0, 0x0123);
asm("bfe.u32 %0, %1, 12, 12;" : "=r"(bucketid) : "r"(bexor));
slotp = atomicAdd(&eq->edata.nslots0[bucketid], 1);
if (slotp < RB8_NSLOTS)
{
slot* s = &eq->round0trees[bucketid][slotp];
uint4 tt;
tt.x = __byte_perm(v32[6], v32[7], 0x2345);
tt.y = __byte_perm(v32[7], v32[8], 0x2345);
tt.z = __byte_perm(v32[8], v32[9], 0x2345);
tt.w = __byte_perm(v32[9], v32[10], 0x2345);
*(uint4*)(&s->hash[0]) = tt;
tt.x = __byte_perm(v32[10], v32[11], 0x2345);
tt.y = __byte_perm(v32[11], v32[12], 0x2345);
tt.z = 0;
tt.w = (block << 1) + 1;
*(uint4*)(&s->hash[4]) = tt;
}
}
/*
Functions digit_1 to digit_8 works by the same principle;
Each thread does 2-3 slot loads (loads are coalesced).
Xorwork of slots is loaded into shared memory and is kept in registers (except for digit_1).
At the same time, restbits (8 or 9 bits) in xorwork are used for collisions.
Restbits determine position in ht.
Following next is pair creation. First one (or two) pairs' xorworks are put into global memory
as soon as possible, the rest pairs are saved in shared memory (one u32 per pair - 16 bit indices).
In most cases, all threads have one (or two) pairs so with this trick, we offload memory writes a bit in last step.
In last step we save xorwork of pairs in memory.
*/
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_1(equi<RB, SM>* eq)
{
__shared__ u16 ht[256][SSM - 1];
__shared__ uint2 lastword1[RB8_NSLOTS];
__shared__ uint4 lastword2[RB8_NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < 256)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots0[bucketid], RB8_NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
uint2 ta[2];
uint4 tb[2];
u32 si[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slot* pslot1 = eq->round0trees[bucketid] + si[i];
// get xhash
uint4 a1 = *(uint4*)(&pslot1->hash[0]);
uint2 a2 = *(uint2*)(&pslot1->hash[4]);
ta[i].x = a1.x;
ta[i].y = a1.y;
lastword1[si[i]] = ta[i];
tb[i].x = a1.z;
tb[i].y = a1.w;
tb[i].z = a2.x;
tb[i].w = a2.y;
lastword2[si[i]] = tb[i];
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(hr[i]) : "r"(ta[i].x));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
int* pairs = ht_len;
u32 xors[6];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = ta[i] ^ lastword1[p];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[si[i]] ^ lastword2[p];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, si[i], p, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[1][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[2]) = lastword2[i] ^ lastword2[k];
slot &xs = eq->trees[0][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
uint4 ttx;
ttx.x = xors[5];
ttx.y = xors[0];
ttx.z = packer_default::set_bucketid_and_slots(bucketid, i, k, 8, RB8_NSLOTS);
ttx.w = 0;
*(uint4*)(&xs.hash[4]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_2(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][SSM - 1];
__shared__ u32 lastword1[NSLOTS];
__shared__ uint4 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slot* buck = eq->trees[0][bucketid];
u32 bsize = umin(eq->edata.nslots[1][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 ta[2];
uint4 tt[2];
u32 si[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
// get slot
const slot* pslot1 = buck + si[i];
uint4 ttx = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = ta[i] = ttx.x;
uint2 tty = *(uint2*)(&pslot1->hash[4]);
tt[i].x = ttx.y;
tt[i].y = ttx.z;
tt[i].z = ttx.w;
tt[i].w = tty.x;
lastword2[si[i]] = tt[i];
hr[i] = tty.y & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[0] = ta[i] ^ lastword1[p];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = tt[i] ^ lastword2[p];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[0] = lastword1[i] ^ lastword1[k];
xorbucketid = xors[0] >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[2][xorbucketid], 1);
if (xorslot < NSLOTS)
{
*(uint4*)(&xors[1]) = lastword2[i] ^ lastword2[k];
slotsmall &xs = eq->round2trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
slottiny &xst = eq->round2trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = xors[4];
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_3(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[2][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
u32 ta[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round2trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round2trees[bucketid].treestiny[si[i]];
tt[i] = *(uint4*)(&xs.hash[0]);
lastword1[si[i]] = tt[i];
ta[i] = xst.hash[0];
lastword2[si[i]] = ta[i];
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[5];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[4] = ta[i] ^ lastword2[p];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = tt[i] ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
xors[4] = lastword2[i] ^ lastword2[k];
if (xors[4] != 0)
{
*(uint4*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x2107);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[3][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->round3trees[xorbucketid].treessmall[xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[1]);
slottiny &xst = eq->round3trees[xorbucketid].treestiny[xorslot];
uint2 ttx;
ttx.x = bexor;
ttx.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xst.hash[0]) = ttx;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_4(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
u32 bsize = umin(eq->edata.nslots[3][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
slotsmall &xs = eq->round3trees[bucketid].treessmall[si[i]];
slottiny &xst = eq->round3trees[bucketid].treestiny[si[i]];
// get xhash
tt[i] = *(uint4*)(&xs.hash[0]);
lastword[si[i]] = tt[i];
hr[i] = xst.hash[0] & RESTMASK;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(4 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[4][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[3][xorbucketid][xorslot];
*(uint4*)(&xs.hash[0]) = *(uint4*)(&xors[0]);
eq->round4bidandsids[xorbucketid][xorslot] = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS, u32 THREADS>
__global__ void digit_5(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint4 lastword[NSLOTS];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
if (threadid < NRESTS)
ht_len[threadid] = 0;
else if (threadid == (THREADS - 1))
pairs_len = 0;
else if (threadid == (THREADS - 33))
next_pair = 0;
slotsmall* buck = eq->treessmall[3][bucketid];
u32 bsize = umin(eq->edata.nslots[4][bucketid], NSLOTS);
u32 hr[2];
int pos[2];
pos[0] = pos[1] = SSM;
u32 si[2];
uint4 tt[2];
// enable this to make fully safe shared mem operations;
// disabled gains some speed, but can rarely cause a crash
//__syncthreads();
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
si[i] = i * THREADS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword[si[i]] = tt[i];
asm("bfe.u32 %0, %1, 4, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[4];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 2; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint4*)(&xors[0]) = tt[i] ^ lastword[p];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = xors[3];
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 1; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
u32 i, k;
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
i = __byte_perm(pair, 0, 0x4510);
k = __byte_perm(pair, 0, 0x4532);
*(uint4*)(&xors[0]) = lastword[i] ^ lastword[k];
if (xors[3] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x1076);
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(bexor), "r"(RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[5][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[2][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[1];
tt.y = xors[2];
tt.z = xors[3];
tt.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = tt;
}
}
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_6(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ uint2 lastword1[NSLOTS];
__shared__ u32 lastword2[NSLOTS];
__shared__ int ht_len[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[5][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[2][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
tt[i] = *(uint4*)(&pslot1->hash[0]);
lastword1[si[i]] = *(uint2*)(&tt[i].x);
lastword2[si[i]] = tt[i].z;
asm("bfe.u32 %0, %1, 16, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
// doing this to save shared memory
int* pairs = ht_len;
__syncthreads();
u32 xors[3];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
xors[2] = tt[i].z ^ lastword2[p];
if (xors[2] != 0)
{
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ lastword1[p];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
u32 pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
xors[2] = lastword2[i] ^ lastword2[k];
if (xors[2] == 0)
continue;
*(uint2*)(&xors[0]) = lastword1[i] ^ lastword1[k];
bexor = __byte_perm(xors[0], xors[1], 0x1076);
xorbucketid = bexor >> (12 + RB);
xorslot = atomicAdd(&eq->edata.nslots[6][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[0][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[1];
ttx.y = xors[2];
ttx.z = bexor;
ttx.w = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint4*)(&xs.hash[0]) = ttx;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_7(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
pairs_len = 0;
next_pair = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[6][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[0][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint4 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint4*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 12, %2;" : "=r"(hr[i]) : "r"(tt[i].z), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot < NSLOTS)
{
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 ttx;
ttx.x = xors[0];
ttx.y = xors[1];
ttx.z = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
ttx.w = 0;
*(uint4*)(&xs.hash[0]) = ttx;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
asm("bfe.u32 %0, %1, %2, %3;" : "=r"(xorbucketid) : "r"(xors[0]), "r"(8 + RB), "r"(BUCKBITS));
xorslot = atomicAdd(&eq->edata.nslots[7][xorbucketid], 1);
if (xorslot >= NSLOTS) continue;
slotsmall &xs = eq->treessmall[1][xorbucketid][xorslot];
uint4 tt;
tt.x = xors[0];
tt.y = xors[1];
tt.z = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
tt.w = 0;
*(uint4*)(&xs.hash[0]) = tt;
}
}
template <u32 RB, u32 SM, int SSM, typename PACKER, u32 MAXPAIRS>
__global__ void digit_8(equi<RB, SM>* eq)
{
__shared__ u16 ht[NRESTS][(SSM - 1)];
__shared__ u32 lastword[NSLOTS][2];
__shared__ int ht_len[NRESTS];
__shared__ int pairs[MAXPAIRS];
__shared__ u32 pairs_len;
__shared__ u32 bsize_sh;
__shared__ u32 next_pair;
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
ht_len[threadid] = 0;
if (threadid == (NRESTS - 1))
{
next_pair = 0;
pairs_len = 0;
}
else if (threadid == (NRESTS - 33))
bsize_sh = umin(eq->edata.nslots[7][bucketid], NSLOTS);
slotsmall* buck = eq->treessmall[1][bucketid];
u32 hr[3];
int pos[3];
pos[0] = pos[1] = pos[2] = SSM;
u32 si[3];
uint2 tt[3];
__syncthreads();
u32 bsize = bsize_sh;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
si[i] = i * NRESTS + threadid;
if (si[i] >= bsize) break;
const slotsmall* pslot1 = buck + si[i];
// get xhash
tt[i] = *(uint2*)(&pslot1->hash[0]);
*(uint2*)(&lastword[si[i]][0]) = *(uint2*)(&tt[i].x);
asm("bfe.u32 %0, %1, 8, %2;" : "=r"(hr[i]) : "r"(tt[i].x), "r"(RB));
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1)) ht[hr[i]][pos[i]] = si[i];
}
__syncthreads();
u32 xors[2];
u32 bexor, xorbucketid, xorslot;
#pragma unroll
for (u32 i = 0; i != 3; ++i)
{
if (pos[i] >= SSM) continue;
if (pos[i] > 0)
{
u16 p = ht[hr[i]][0];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
if (pos[i] > 1)
{
p = ht[hr[i]][1];
*(uint2*)(&xors[0]) = *(uint2*)(&tt[i].x) ^ *(uint2*)(&lastword[p][0]);
if (xors[1] != 0)
{
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot < RB8_NSLOTS_LD)
{
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, si[i], p, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
for (int k = 2; k != pos[i]; ++k)
{
u32 pindex = atomicAdd(&pairs_len, 1);
if (pindex >= MAXPAIRS) break;
u16 prev = ht[hr[i]][k];
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
}
}
__syncthreads();
// process pairs
u32 plen = umin(pairs_len, MAXPAIRS);
for (u32 s = atomicAdd(&next_pair, 1); s < plen; s = atomicAdd(&next_pair, 1))
{
int pair = pairs[s];
u32 i = __byte_perm(pair, 0, 0x4510);
u32 k = __byte_perm(pair, 0, 0x4532);
*(uint2*)(&xors[0]) = *(uint2*)(&lastword[i][0]) ^ *(uint2*)(&lastword[k][0]);
if (xors[1] == 0)
continue;
bexor = __byte_perm(xors[0], xors[1], 0x0765);
xorbucketid = bexor >> (12 + 8);
xorslot = atomicAdd(&eq->edata.nslots8[xorbucketid], 1);
if (xorslot >= RB8_NSLOTS_LD) continue;
slottiny &xs = eq->treestiny[0][xorbucketid][xorslot];
uint2 tt;
tt.x = xors[1];
tt.y = PACKER::set_bucketid_and_slots(bucketid, i, k, RB, SM);
*(uint2*)(&xs.hash[0]) = tt;
}
}
/*
Last round function is similar to previous ones but has different ending.
We use warps to process final candidates. Each warp process one candidate.
First two bidandsids (u32 of stored bucketid and two slotids) are retreived by
lane 0 and lane 16, next four bidandsids by lane 0, 8, 16 and 24, ... until
all lanes in warp have bidandsids from round 4. Next, each thread retreives
16 indices. While doing so, indices are put into comparison using atomicExch
to determine if there are duplicates (tromp's method). At the end, if no
duplicates are found, candidate solution is saved (all indices). Note that this
dup check method is not exact so CPU dup checking is needed after.
*/
template <u32 RB, u32 SM, int SSM, u32 FCT, typename PACKER, u32 MAXPAIRS, u32 DUPBITS, u32 W>
__global__ void digit_last_wdc(equi<RB, SM>* eq)
{
__shared__ u8 shared_data[8192];
int* ht_len = (int*)(&shared_data[0]);
int* pairs = ht_len;
u32* lastword = (u32*)(&shared_data[256 * 4]);
u16* ht = (u16*)(&shared_data[256 * 4 + RB8_NSLOTS_LD * 4]);
u32* pairs_len = (u32*)(&shared_data[8188]);
const u32 threadid = threadIdx.x;
const u32 bucketid = blockIdx.x;
// reset hashtable len
#pragma unroll
for (u32 i = 0; i != FCT; ++i)
ht_len[(i * (256 / FCT)) + threadid] = 0;
if (threadid == ((256 / FCT) - 1))
*pairs_len = 0;
slottiny* buck = eq->treestiny[0][bucketid];
u32 bsize = umin(eq->edata.nslots8[bucketid], RB8_NSLOTS_LD);
u32 si[3 * FCT];
u32 hr[3 * FCT];
int pos[3 * FCT];
u32 lw[3 * FCT];
#pragma unroll
for (u32 i = 0; i != (3 * FCT); ++i)
pos[i] = SSM;
__syncthreads();
#pragma unroll
for (u32 i = 0; i != (3 * FCT); ++i)
{
si[i] = i * (256 / FCT) + threadid;
if (si[i] >= bsize) break;
const slottiny* pslot1 = buck + si[i];
// get xhash
uint2 tt = *(uint2*)(&pslot1->hash[0]);
lw[i] = tt.x;
lastword[si[i]] = lw[i];
u32 a;
asm("bfe.u32 %0, %1, 20, 8;" : "=r"(a) : "r"(lw[i]));
hr[i] = a;
pos[i] = atomicAdd(&ht_len[hr[i]], 1);
if (pos[i] < (SSM - 1))
ht[hr[i] * (SSM - 1) + pos[i]] = si[i];
}
__syncthreads();
#pragma unroll
for (u32 i = 0; i != (3 * FCT); ++i)
{
if (pos[i] >= SSM) continue;
for (int k = 0; k != pos[i]; ++k)
{
u16 prev = ht[hr[i] * (SSM - 1) + k];
if (lw[i] != lastword[prev]) continue;
u32 pindex = atomicAdd(pairs_len, 1);
if (pindex >= MAXPAIRS) break;
pairs[pindex] = __byte_perm(si[i], prev, 0x1054);
}
}
__syncthreads();
u32 plen = umin(*pairs_len, 64);
#define CALC_LEVEL(a, b, c, d) { \
u32 plvl = levels[b]; \
u32* bucks = eq->round4bidandsids[PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1]; \
levels[c] = bucks[slot0]; \
}
#define CALC_LEVEL_SMALL(a, b, c, d) { \
u32 plvl = levels[b]; \
slotsmall* bucks = eq->treessmall[a][PACKER::get_bucketid(plvl, RB, SM)]; \
u32 slot1 = PACKER::get_slot1(plvl, RB, SM); \
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM); \
levels[b] = bucks[slot1].hash[d]; \
levels[c] = bucks[slot0].hash[d]; \
}
u32 lane = threadIdx.x & 0x1f;
u32 par = threadIdx.x >> 5;
u32* levels = (u32*)&pairs[MAXPAIRS + (par << DUPBITS)];
u32* susp = levels;
while (par < plen)
{
int pair = pairs[par];
par += W;
if (lane % 16 == 0)
{
u32 plvl;
if (lane == 0) plvl = buck[__byte_perm(pair, 0, 0x4510)].hash[1];
else plvl = buck[__byte_perm(pair, 0, 0x4532)].hash[1];
slotsmall* bucks = eq->treessmall[1][PACKER::get_bucketid(plvl, RB, SM)];
u32 slot1 = PACKER::get_slot1(plvl, RB, SM);
u32 slot0 = PACKER::get_slot0(plvl, slot1, RB, SM);
levels[lane] = bucks[slot1].hash[2];
levels[lane + 8] = bucks[slot0].hash[2];
}
if (lane % 8 == 0)
CALC_LEVEL_SMALL(0, lane, lane + 4, 3);
if (lane % 4 == 0)
CALC_LEVEL_SMALL(2, lane, lane + 2, 3);
if (lane % 2 == 0)
CALC_LEVEL(0, lane, lane + 1, 4);
u32 ind[16];
u32 f1 = levels[lane];
const slottiny* buck_v4 = &eq->round3trees[PACKER::get_bucketid(f1, RB, SM)].treestiny[0];
const u32 slot1_v4 = PACKER::get_slot1(f1, RB, SM);
const u32 slot0_v4 = PACKER::get_slot0(f1, slot1_v4, RB, SM);
susp[lane] = 0xffffffff;
susp[32 + lane] = 0xffffffff;
#define CHECK_DUP(a) \
__any(atomicExch(&susp[(ind[a] & ((1 << DUPBITS) - 1))], (ind[a] >> DUPBITS)) == (ind[a] >> DUPBITS))
u32 f2 = buck_v4[slot1_v4].hash[1];
const slottiny* buck_v3_1 = &eq->round2trees[PACKER::get_bucketid(f2, RB, SM)].treestiny[0];
const u32 slot1_v3_1 = PACKER::get_slot1(f2, RB, SM);
const u32 slot0_v3_1 = PACKER::get_slot0(f2, slot1_v3_1, RB, SM);
susp[64 + lane] = 0xffffffff;
susp[96 + lane] = 0xffffffff;
u32 f0 = buck_v3_1[slot1_v3_1].hash[1];
const slot* buck_v2_1 = eq->trees[0][PACKER::get_bucketid(f0, RB, SM)];
const u32 slot1_v2_1 = PACKER::get_slot1(f0, RB, SM);
const u32 slot0_v2_1 = PACKER::get_slot0(f0, slot1_v2_1, RB, SM);
susp[128 + lane] = 0xffffffff;
susp[160 + lane] = 0xffffffff;
u32 f3 = buck_v2_1[slot1_v2_1].hash[6];
const slot* buck_fin_1 = eq->round0trees[packer_default::get_bucketid(f3, 8, RB8_NSLOTS)];
const u32 slot1_fin_1 = packer_default::get_slot1(f3, 8, RB8_NSLOTS);
const u32 slot0_fin_1 = packer_default::get_slot0(f3, slot1_fin_1, 8, RB8_NSLOTS);
susp[192 + lane] = 0xffffffff;
susp[224 + lane] = 0xffffffff;
ind[0] = buck_fin_1[slot1_fin_1].hash[7];
if (CHECK_DUP(0)) continue;
ind[1] = buck_fin_1[slot0_fin_1].hash[7];
if (CHECK_DUP(1)) continue;
u32 f4 = buck_v2_1[slot0_v2_1].hash[6];
const slot* buck_fin_2 = eq->round0trees[packer_default::get_bucketid(f4, 8, RB8_NSLOTS)];
const u32 slot1_fin_2 = packer_default::get_slot1(f4, 8, RB8_NSLOTS);
const u32 slot0_fin_2 = packer_default::get_slot0(f4, slot1_fin_2, 8, RB8_NSLOTS);
ind[2] = buck_fin_2[slot1_fin_2].hash[7];
if (CHECK_DUP(2)) continue;
ind[3] = buck_fin_2[slot0_fin_2].hash[7];
if (CHECK_DUP(3)) continue;
u32 f5 = buck_v3_1[slot0_v3_1].hash[1];
const slot* buck_v2_2 = eq->trees[0][PACKER::get_bucketid(f5, RB, SM)];
const u32 slot1_v2_2 = PACKER::get_slot1(f5, RB, SM);
const u32 slot0_v2_2 = PACKER::get_slot0(f5, slot1_v2_2, RB, SM);
u32 f6 = buck_v2_2[slot1_v2_2].hash[6];
const slot* buck_fin_3 = eq->round0trees[packer_default::get_bucketid(f6, 8, RB8_NSLOTS)];
const u32 slot1_fin_3 = packer_default::get_slot1(f6, 8, RB8_NSLOTS);
const u32 slot0_fin_3 = packer_default::get_slot0(f6, slot1_fin_3, 8, RB8_NSLOTS);
ind[4] = buck_fin_3[slot1_fin_3].hash[7];
if (CHECK_DUP(4)) continue;
ind[5] = buck_fin_3[slot0_fin_3].hash[7];
if (CHECK_DUP(5)) continue;
u32 f7 = buck_v2_2[slot0_v2_2].hash[6];
const slot* buck_fin_4 = eq->round0trees[packer_default::get_bucketid(f7, 8, RB8_NSLOTS)];
const u32 slot1_fin_4 = packer_default::get_slot1(f7, 8, RB8_NSLOTS);
const u32 slot0_fin_4 = packer_default::get_slot0(f7, slot1_fin_4, 8, RB8_NSLOTS);
ind[6] = buck_fin_4[slot1_fin_4].hash[7];
if (CHECK_DUP(6)) continue;
ind[7] = buck_fin_4[slot0_fin_4].hash[7];
if (CHECK_DUP(7)) continue;
u32 f8 = buck_v4[slot0_v4].hash[1];
const slottiny* buck_v3_2 = &eq->round2trees[PACKER::get_bucketid(f8, RB, SM)].treestiny[0];
const u32 slot1_v3_2 = PACKER::get_slot1(f8, RB, SM);
const u32 slot0_v3_2 = PACKER::get_slot0(f8, slot1_v3_2, RB, SM);
u32 f9 = buck_v3_2[slot1_v3_2].hash[1];
const slot* buck_v2_3 = eq->trees[0][PACKER::get_bucketid(f9, RB, SM)];
const u32 slot1_v2_3 = PACKER::get_slot1(f9, RB, SM);
const u32 slot0_v2_3 = PACKER::get_slot0(f9, slot1_v2_3, RB, SM);
u32 f10 = buck_v2_3[slot1_v2_3].hash[6];
const slot* buck_fin_5 = eq->round0trees[packer_default::get_bucketid(f10, 8, RB8_NSLOTS)];
const u32 slot1_fin_5 = packer_default::get_slot1(f10, 8, RB8_NSLOTS);
const u32 slot0_fin_5 = packer_default::get_slot0(f10, slot1_fin_5, 8, RB8_NSLOTS);
ind[8] = buck_fin_5[slot1_fin_5].hash[7];
if (CHECK_DUP(8)) continue;
ind[9] = buck_fin_5[slot0_fin_5].hash[7];
if (CHECK_DUP(9)) continue;
u32 f11 = buck_v2_3[slot0_v2_3].hash[6];
const slot* buck_fin_6 = eq->round0trees[packer_default::get_bucketid(f11, 8, RB8_NSLOTS)];
const u32 slot1_fin_6 = packer_default::get_slot1(f11, 8, RB8_NSLOTS);
const u32 slot0_fin_6 = packer_default::get_slot0(f11, slot1_fin_6, 8, RB8_NSLOTS);
ind[10] = buck_fin_6[slot1_fin_6].hash[7];
if (CHECK_DUP(10)) continue;
ind[11] = buck_fin_6[slot0_fin_6].hash[7];
if (CHECK_DUP(11)) continue;
u32 f12 = buck_v3_2[slot0_v3_2].hash[1];
const slot* buck_v2_4 = eq->trees[0][PACKER::get_bucketid(f12, RB, SM)];
const u32 slot1_v2_4 = PACKER::get_slot1(f12, RB, SM);
const u32 slot0_v2_4 = PACKER::get_slot0(f12, slot1_v2_4, RB, SM);
u32 f13 = buck_v2_4[slot1_v2_4].hash[6];
const slot* buck_fin_7 = eq->round0trees[packer_default::get_bucketid(f13, 8, RB8_NSLOTS)];
const u32 slot1_fin_7 = packer_default::get_slot1(f13, 8, RB8_NSLOTS);
const u32 slot0_fin_7 = packer_default::get_slot0(f13, slot1_fin_7, 8, RB8_NSLOTS);
ind[12] = buck_fin_7[slot1_fin_7].hash[7];
if (CHECK_DUP(12)) continue;
ind[13] = buck_fin_7[slot0_fin_7].hash[7];
if (CHECK_DUP(13)) continue;
u32 f14 = buck_v2_4[slot0_v2_4].hash[6];
const slot* buck_fin_8 = eq->round0trees[packer_default::get_bucketid(f14, 8, RB8_NSLOTS)];
const u32 slot1_fin_8 = packer_default::get_slot1(f14, 8, RB8_NSLOTS);
const u32 slot0_fin_8 = packer_default::get_slot0(f14, slot1_fin_8, 8, RB8_NSLOTS);
ind[14] = buck_fin_8[slot1_fin_8].hash[7];
if (CHECK_DUP(14)) continue;
ind[15] = buck_fin_8[slot0_fin_8].hash[7];
if (CHECK_DUP(15)) continue;
u32 soli;
if (lane == 0)
{
soli = atomicAdd(&eq->edata.srealcont.nsols, 1);
}
soli = __shfl(soli, 0);
if (soli < MAXREALSOLS)
{
u32 pos = lane << 4;
*(uint4*)(&eq->edata.srealcont.sols[soli][pos]) = *(uint4*)(&ind[0]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 4]) = *(uint4*)(&ind[4]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 8]) = *(uint4*)(&ind[8]);
*(uint4*)(&eq->edata.srealcont.sols[soli][pos + 12]) = *(uint4*)(&ind[12]);
}
}
}
std::mutex dev_init;
int dev_init_done[8] = { 0 };
__host__ int compu32(const void *pa, const void *pb)
{
uint32_t a = *(uint32_t *)pa, b = *(uint32_t *)pb;
return a<b ? -1 : a == b ? 0 : +1;
}
__host__ bool duped(uint32_t* prf)
{
uint32_t sortprf[512];
memcpy(sortprf, prf, sizeof(uint32_t) * 512);
qsort(sortprf, 512, sizeof(uint32_t), &compu32);
for (uint32_t i = 1; i<512; i++)
if (sortprf[i] <= sortprf[i - 1])
return true;
return false;
}
__host__ void sort_pair(uint32_t *a, uint32_t len)
{
uint32_t *b = a + len;
uint32_t tmp, need_sorting = 0;
for (uint32_t i = 0; i < len; i++)
if (need_sorting || a[i] > b[i])
{
need_sorting = 1;
tmp = a[i];
a[i] = b[i];
b[i] = tmp;
}
else if (a[i] < b[i])
return;
}
__host__ void setheader(blake2b_state *ctx, const char *header, const u32 headerLen, const char* nce, const u32 nonceLen)
{
uint32_t le_N = WN;
uint32_t le_K = WK;
uchar personal[] = "BitcoinZ01230123";
memcpy(personal + 8, &le_N, 4);
memcpy(personal + 12, &le_K, 4);
blake2b_param P[1];
P->digest_length = HASHOUT;
P->key_length = 0;
P->fanout = 1;
P->depth = 1;
P->leaf_length = 0;
P->node_offset = 0;
P->node_depth = 0;
P->inner_length = 0;
memset(P->reserved, 0, sizeof(P->reserved));
memset(P->salt, 0, sizeof(P->salt));
memcpy(P->personal, (const uint8_t *)personal, 16);
blake2b_init_param(ctx, P);
blake2b_update(ctx, (const uchar *)header, headerLen);
blake2b_update(ctx, (const uchar *)nce, nonceLen);
}
#ifdef WIN32
typedef CUresult(CUDAAPI *dec_cuDeviceGet)(CUdevice*, int);
typedef CUresult(CUDAAPI *dec_cuCtxCreate)(CUcontext*, unsigned int, CUdevice);
typedef CUresult(CUDAAPI *dec_cuCtxPushCurrent)(CUcontext);
typedef CUresult(CUDAAPI *dec_cuCtxDestroy)(CUcontext);
dec_cuDeviceGet _cuDeviceGet = nullptr;
dec_cuCtxCreate _cuCtxCreate = nullptr;
dec_cuCtxPushCurrent _cuCtxPushCurrent = nullptr;
dec_cuCtxDestroy _cuCtxDestroy = nullptr;
#endif
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::eq_cuda_context(int id)
: device_id(id)
{
solutions = nullptr;
dev_init.lock();
if (!dev_init_done[device_id])
{
// only first thread shall init device
checkCudaErrors(cudaSetDevice(device_id));
checkCudaErrors(cudaDeviceReset());
checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync));
pctx = nullptr;
}
else
{
// create new context
CUdevice dev;
#ifdef WIN32
if (_cuDeviceGet == nullptr)
{
HMODULE hmod = LoadLibraryA("nvcuda.dll");
if (hmod == NULL)
throw std::runtime_error("Failed to load nvcuda.dll");
_cuDeviceGet = (dec_cuDeviceGet)GetProcAddress(hmod, "cuDeviceGet");
if (_cuDeviceGet == nullptr)
throw std::runtime_error("Failed to get cuDeviceGet address");
_cuCtxCreate = (dec_cuCtxCreate)GetProcAddress(hmod, "cuCtxCreate_v2");
if (_cuCtxCreate == nullptr)
throw std::runtime_error("Failed to get cuCtxCreate address");
_cuCtxPushCurrent = (dec_cuCtxPushCurrent)GetProcAddress(hmod, "cuCtxPushCurrent_v2");
if (_cuCtxPushCurrent == nullptr)
throw std::runtime_error("Failed to get cuCtxPushCurrent address");
_cuCtxDestroy = (dec_cuCtxDestroy)GetProcAddress(hmod, "cuCtxDestroy_v2");
if (_cuCtxDestroy == nullptr)
throw std::runtime_error("Failed to get cuCtxDestroy address");
}
checkCudaDriverErrors(_cuDeviceGet(&dev, device_id));
checkCudaDriverErrors(_cuCtxCreate(&pctx, CU_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(_cuCtxPushCurrent(pctx));
#else
checkCudaDriverErrors(cuDeviceGet(&dev, device_id));
checkCudaDriverErrors(cuCtxCreate(&pctx, CU_CTX_SCHED_BLOCKING_SYNC, dev));
checkCudaDriverErrors(cuCtxPushCurrent(pctx));
#endif
}
++dev_init_done[device_id];
dev_init.unlock();
if (cudaMalloc((void**)&device_eq, sizeof(equi<RB, SM>)) != cudaSuccess)
throw std::runtime_error("CUDA: failed to alloc memory");
solutions = (scontainerreal*)malloc(sizeof(scontainerreal));
}
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ void eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::solve(const char *tequihash_header,
unsigned int tequihash_header_len,
const char* nonce,
unsigned int nonce_len,
std::function<bool()> cancelf,
std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf,
std::function<void(void)> hashdonef)
{
blake2b_state blake_ctx;
int blocks = NBUCKETS;
setheader(&blake_ctx, tequihash_header, tequihash_header_len, nonce, nonce_len);
// todo: improve
// djezo solver allows last 4 bytes of nonce to be iterrated
// this can be used to create internal loop - calc initial blake hash only once, then load 8*8 bytes on device (blake state h)
// then just iterate nn++
// less CPU load, 1 cudaMemcpy less -> faster
//u32 nn = *(u32*)&nonce[28];
u32 nn = 0;
checkCudaErrors(cudaMemcpy(&device_eq->blake_h, &blake_ctx.h, sizeof(u64) * 8, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemset(&device_eq->edata, 0, sizeof(device_eq->edata)));
digit_first<RB, SM, PACKER> << <NBLOCKS / FD_THREADS, FD_THREADS >> >(device_eq, nn);
digit_1<RB, SM, SSM, PACKER, 4 * NRESTS, 512> << <4096, 512 >> >(device_eq);
digit_2<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
digit_3<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
if (cancelf()) return;
digit_4<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
digit_5<RB, SM, SSM, PACKER, 4 * NRESTS, THREADS> << <blocks, THREADS >> >(device_eq);
digit_6<RB, SM, SSM - 1, PACKER, 3 * NRESTS> << <blocks, NRESTS >> >(device_eq);
digit_7<RB, SM, SSM - 1, PACKER, 3 * NRESTS> << <blocks, NRESTS >> >(device_eq);
digit_8<RB, SM, SSM - 1, PACKER, 3 * NRESTS> << <blocks, NRESTS >> >(device_eq);
digit_last_wdc<RB, SM, SSM - 3, 2, PACKER, 64, 8, 4> << <4096, 256 / 2 >> >(device_eq);
checkCudaErrors(cudaMemcpy(solutions, &device_eq->edata.srealcont, (MAXREALSOLS * (512 * 4)) + 4, cudaMemcpyDeviceToHost));
//printf("nsols: %u\n", solutions->nsols);
//if (solutions->nsols > 9)
// printf("missing sol, total: %u\n", solutions->nsols);
for (u32 s = 0; (s < solutions->nsols) && (s < MAXREALSOLS); s++)
{
// remove dups on CPU (dup removal on GPU is not fully exact and can pass on some invalid solutions)
if (duped(solutions->sols[s])) continue;
// perform sort of pairs
for (uint32_t level = 0; level < 9; level++)
for (uint32_t i = 0; i < (1 << 9); i += (2 << level))
sort_pair(&solutions->sols[s][i], 1 << level);
std::vector<uint32_t> index_vector(PROOFSIZE);
for (u32 i = 0; i < PROOFSIZE; i++) {
index_vector[i] = solutions->sols[s][i];
}
solutionf(index_vector, DIGITBITS, nullptr);
}
hashdonef();
}
template <u32 RB, u32 SM, u32 SSM, u32 THREADS, typename PACKER>
__host__ eq_cuda_context<RB, SM, SSM, THREADS, PACKER>::~eq_cuda_context()
{
if (solutions)
free(solutions);
cudaFree(device_eq);
if (pctx)
{
// non primary thread, destroy context
#ifdef WIN32
checkCudaDriverErrors(_cuCtxDestroy(pctx));
#else
checkCudaDriverErrors(cuCtxDestroy(pctx));
#endif
}
else
{
checkCudaErrors(cudaDeviceReset());
dev_init_done[device_id] = 0;
}
}
#ifdef CONFIG_MODE_1
template class eq_cuda_context<CONFIG_MODE_1>;
#endif
#ifdef CONFIG_MODE_2
template class eq_cuda_context<CONFIG_MODE_2>;
#endif
#ifdef CONFIG_MODE_3
template class eq_cuda_context<CONFIG_MODE_3>;
#endif
|
c4aeab4f81105f33b8bb399e4b3b3c6af2ebb130.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void sumArraysZeroCopy(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
} | c4aeab4f81105f33b8bb399e4b3b3c6af2ebb130.cu | #include "includes.h"
__global__ void sumArraysZeroCopy(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
} |
2773f664feb76a317815b9649c50a1ecf5f3ff19.hip | // !!! This is a file automatically generated by hipify!!!
#include "Particles.h"
#include "Alloc.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define TPB 64
/** allocate particle arrays */
void particle_allocate(struct parameters* param, struct particles* part, int is)
{
// set species ID
part->species_ID = is;
// number of particles
part->nop = param->np[is];
// maximum number of particles
part->npmax = param->npMax[is];
// choose a different number of mover iterations for ions and electrons
if (param->qom[is] < 0){ //electrons
part->NiterMover = param->NiterMover;
part->n_sub_cycles = param->n_sub_cycles;
} else { // ions: only one iteration
part->NiterMover = 1;
part->n_sub_cycles = 1;
}
// particles per cell
part->npcelx = param->npcelx[is];
part->npcely = param->npcely[is];
part->npcelz = param->npcelz[is];
part->npcel = part->npcelx*part->npcely*part->npcelz;
// cast it to required precision
part->qom = (FPpart) param->qom[is];
long npmax = part->npmax;
// initialize drift and thermal velocities
// drift
part->u0 = (FPpart) param->u0[is];
part->v0 = (FPpart) param->v0[is];
part->w0 = (FPpart) param->w0[is];
// thermal
part->uth = (FPpart) param->uth[is];
part->vth = (FPpart) param->vth[is];
part->wth = (FPpart) param->wth[is];
//////////////////////////////
/// ALLOCATION PARTICLE ARRAYS
//////////////////////////////
part->x = new FPpart[npmax];
part->y = new FPpart[npmax];
part->z = new FPpart[npmax];
// allocate velocity
part->u = new FPpart[npmax];
part->v = new FPpart[npmax];
part->w = new FPpart[npmax];
// allocate charge = q * statistical weight
part->q = new FPinterp[npmax];
}
/** deallocate */
void particle_deallocate(struct particles* part)
{
// deallocate particle variables
delete[] part->x;
delete[] part->y;
delete[] part->z;
delete[] part->u;
delete[] part->v;
delete[] part->w;
delete[] part->q;
}
/** particle mover */
int mover_PC(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "*** MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// move each particle with new fields
for (int i=0; i < part->nop; i++){
xptilde = part->x[i];
yptilde = part->y[i];
zptilde = part->z[i];
// calculate the average velocity iteratively
for(int innter=0; innter < part->NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((part->x[i] - grd->xStart)*grd->invdx);
iy = 2 + int((part->y[i] - grd->yStart)*grd->invdy);
iz = 2 + int((part->z[i] - grd->zStart)*grd->invdz);
// calculate weights
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
Exl += weight[ii][jj][kk]*field->Ex[ix- ii][iy -jj][iz- kk];
Eyl += weight[ii][jj][kk]*field->Ey[ix- ii][iy -jj][iz- kk];
Ezl += weight[ii][jj][kk]*field->Ez[ix- ii][iy -jj][iz -kk];
Bxl += weight[ii][jj][kk]*field->Bxn[ix- ii][iy -jj][iz -kk];
Byl += weight[ii][jj][kk]*field->Byn[ix- ii][iy -jj][iz -kk];
Bzl += weight[ii][jj][kk]*field->Bzn[ix- ii][iy -jj][iz -kk];
}
// end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= part->u[i] + qomdt2*Exl;
vt= part->v[i] + qomdt2*Eyl;
wt= part->w[i] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
part->x[i] = xptilde + uptilde*dto2;
part->y[i] = yptilde + vptilde*dto2;
part->z[i] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
part->u[i]= 2.0*uptilde - part->u[i];
part->v[i]= 2.0*vptilde - part->v[i];
part->w[i]= 2.0*wptilde - part->w[i];
part->x[i] = xptilde + uptilde*dt_sub_cycling;
part->y[i] = yptilde + vptilde*dt_sub_cycling;
part->z[i] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (part->x[i] > grd->Lx){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] - grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = 2*grd->Lx - part->x[i];
}
}
if (part->x[i] < 0){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] + grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = -part->x[i];
}
}
// Y-DIRECTION: BC particles
if (part->y[i] > grd->Ly){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] - grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = 2*grd->Ly - part->y[i];
}
}
if (part->y[i] < 0){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] + grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = -part->y[i];
}
}
// Z-DIRECTION: BC particles
if (part->z[i] > grd->Lz){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] - grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = 2*grd->Lz - part->z[i];
}
}
if (part->z[i] < 0){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] + grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = -part->z[i];
}
}
} // end of subcycling
} // end of one particle
return(0); // exit succcesfully
} // end of the mover
/** Interpolation Particle --> Grid: This is for species */
void interpP2G(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
// arrays needed for interpolation
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// index of the cell
int ix, iy, iz;
for (register long long i = 0; i < part->nop; i++) {
// determine cell: can we change to int()? is it faster?
ix = 2 + int (floor((part->x[i] - grd->xStart) * grd->invdx));
iy = 2 + int (floor((part->y[i] - grd->yStart) * grd->invdy));
iz = 2 + int (floor((part->z[i] - grd->zStart) * grd->invdz));
// distances from node
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
// calculate the weights for different nodes
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = part->q[i] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
//////////////////////////
// add charge density
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->rhon[ix - ii][iy - jj][iz - kk] += weight[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * part->w[i] * weight[ii][jj][kk];
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++)
ids->pzz[ix -ii][iy -jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
}
}
/** particle kernel */
__global__ void single_particle_kernel(FPpart* x, FPpart* y, FPpart* z, FPpart* u, FPpart* v, FPpart* w, FPinterp* q, FPfield* XN_flat, FPfield* YN_flat, FPfield* ZN_flat, int nxn, int nyn, int nzn, double xStart, double yStart, double zStart, FPfield invdx, FPfield invdy, FPfield invdz, double Lx, double Ly, double Lz, FPfield invVOL, FPfield* Ex_flat, FPfield* Ey_flat, FPfield* Ez_flat, FPfield* Bxn_flat, FPfield* Byn_flat, FPfield* Bzn_flat, bool PERIODICX, bool PERIODICY, bool PERIODICZ, FPpart dt_sub_cycling, FPpart dto2, FPpart qomdt2, int NiterMover, int npmax)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int flat_idx = 0;
if(idx > npmax)
{
return;
}
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
xptilde = x[idx];
yptilde = y[idx];
zptilde = z[idx];
// calculate the average velocity iteratively
for(int innter=0; innter < NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((x[idx] - xStart)*invdx);
iy = 2 + int((y[idx] - yStart)*invdy);
iz = 2 + int((z[idx] - zStart)*invdz);
// calculate weights
flat_idx = get_idx(ix-1, iy, iz, nyn, nzn);
xi[0] = x[idx] - XN_flat[flat_idx];
flat_idx = get_idx(ix, iy-1, iz, nyn, nzn);
eta[0] = y[idx] - YN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz-1, nyn, nzn);
zeta[0] = z[idx] - ZN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz, nyn, nzn);
xi[1] = XN_flat[flat_idx] - x[idx];
eta[1] = YN_flat[flat_idx] - y[idx];
zeta[1] = ZN_flat[flat_idx] - z[idx];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
flat_idx = get_idx(ix-ii, iy-jj, iz-kk, nyn, nzn);
Exl += weight[ii][jj][kk]*Ex_flat[flat_idx];
Eyl += weight[ii][jj][kk]*Ey_flat[flat_idx];
Ezl += weight[ii][jj][kk]*Ez_flat[flat_idx];
Bxl += weight[ii][jj][kk]*Bxn_flat[flat_idx];
Byl += weight[ii][jj][kk]*Byn_flat[flat_idx];
Bzl += weight[ii][jj][kk]*Bzn_flat[flat_idx];
} // end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= u[idx] + qomdt2*Exl;
vt= v[idx] + qomdt2*Eyl;
wt= w[idx] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
x[idx] = xptilde + uptilde*dto2;
y[idx] = yptilde + vptilde*dto2;
z[idx] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
u[idx]= 2.0*uptilde - u[idx];
v[idx]= 2.0*vptilde - v[idx];
w[idx]= 2.0*wptilde - w[idx];
x[idx] = xptilde + uptilde*dt_sub_cycling;
y[idx] = yptilde + vptilde*dt_sub_cycling;
z[idx] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (x[idx] > Lx){
if (PERIODICX==true){ // PERIODIC
x[idx] = x[idx] - Lx;
} else { // REFLECTING BC
u[idx] = -u[idx];
x[idx] = 2*Lx - x[idx];
}
}
if (x[idx] < 0){
if (PERIODICX==true){ // PERIODIC
x[idx] = x[idx] + Lx;
} else { // REFLECTING BC
u[idx] = -u[idx];
x[idx] = -x[idx];
}
}
// Y-DIRECTION: BC particles
if (y[idx] > Ly){
if (PERIODICY==true){ // PERIODIC
y[idx] = y[idx] - Ly;
} else { // REFLECTING BC
v[idx] = -v[idx];
y[idx] = 2*Ly - y[idx];
}
}
if (y[idx] < 0){
if (PERIODICY==true){ // PERIODIC
y[idx] = y[idx] + Ly;
} else { // REFLECTING BC
v[idx] = -v[idx];
y[idx] = -y[idx];
}
}
// Z-DIRECTION: BC particles
if (z[idx] > Lz){
if (PERIODICZ==true){ // PERIODIC
z[idx] = z[idx] - Lz;
} else { // REFLECTING BC
w[idx] = -w[idx];
z[idx] = 2*Lz - z[idx];
}
}
if (z[idx] < 0){
if (PERIODICZ==true){ // PERIODIC
z[idx] = z[idx] + Lz;
} else { // REFLECTING BC
w[idx] = -w[idx];
z[idx] = -z[idx];
}
}
}
/** particle mover for GPU*/
int mover_GPU_basic(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "***GPU MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
// allocate memory for variables on device
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp *q_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL, *Ex_flat_dev = NULL, *Ey_flat_dev = NULL, *Ez_flat_dev = NULL, *Bxn_flat_dev = NULL, *Byn_flat_dev, *Bzn_flat_dev = NULL;
hipMalloc(&x_dev, part->npmax * sizeof(FPpart));
hipMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&y_dev, part->npmax * sizeof(FPpart));
hipMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&z_dev, part->npmax * sizeof(FPpart));
hipMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&u_dev, part->npmax * sizeof(FPpart));
hipMemcpy(u_dev, part->u, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&v_dev, part->npmax * sizeof(FPpart));
hipMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&w_dev, part->npmax * sizeof(FPpart));
hipMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&q_dev, part->npmax * sizeof(FPinterp));
hipMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), hipMemcpyHostToDevice);
hipMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Ex_flat_dev, field->Ex_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Ey_flat_dev, field->Ey_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Ez_flat_dev, field->Ez_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Bxn_flat_dev, field->Bxn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Byn_flat_dev, field->Byn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Bzn_flat_dev, field->Bzn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
std::cout<<"Before loop"<<std::endl;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// Call GPU kernel
hipLaunchKernelGGL(( single_particle_kernel), dim3((part->npmax + TPB - 1)/TPB), dim3(TPB), 0, 0, x_dev, y_dev, z_dev,u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->Lx, grd->Ly, grd->Lz, grd->invVOL, Ex_flat_dev, Ey_flat_dev, Ez_flat_dev, Bxn_flat_dev, Byn_flat_dev, Bzn_flat_dev, param->PERIODICX, param->PERIODICY, param->PERIODICZ, dt_sub_cycling, dto2, qomdt2, part->NiterMover, part->nop);
hipDeviceSynchronize();
} // end of one particle
// copy memory back to CPU (only the parts that have been modified inside the kernel)
hipMemcpy(part->x, x_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->y, y_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->z, z_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->u, u_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->v, v_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->w, w_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(field->Ex_flat, Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Ey_flat, Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Ez_flat, Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Bxn_flat, Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Byn_flat, Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Bzn_flat, Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
// clean up
hipFree(x_dev);
hipFree(y_dev);
hipFree(z_dev);
hipFree(u_dev);
hipFree(v_dev);
hipFree(w_dev);
hipFree(XN_flat_dev);
hipFree(YN_flat_dev);
hipFree(ZN_flat_dev);
hipFree(Ex_flat_dev);
hipFree(Ey_flat_dev);
hipFree(Ez_flat_dev);
hipFree(Bxn_flat_dev);
hipFree(Byn_flat_dev);
hipFree(Bzn_flat_dev);
return(0);
}
__global__ void interP2G_kernel( FPpart* x, FPpart* y, FPpart* z, FPpart* u, FPpart* v, FPpart* w, FPinterp* q, FPfield* XN_flat, FPfield* YN_flat, FPfield* ZN_flat, int nxn, int nyn, int nzn, double xStart, double yStart, double zStart, FPfield invdx, FPfield invdy, FPfield invdz, FPfield invVOL, FPinterp* Jx_flat, FPinterp* Jy_flat, FPinterp *Jz_flat, FPinterp *rhon_flat, FPinterp* pxx_flat, FPinterp* pxy_flat, FPinterp* pxz_flat, FPinterp* pyy_flat, FPinterp* pyz_flat, FPinterp* pzz_flat, int npmax)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= npmax)
{
return;
}
// arrays needed for interpolation
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// 3-D index of the cell
int ix, iy, iz, flat_idx;
ix = 2 + int (floor((x[idx] - xStart) * invdx));
iy = 2 + int (floor((y[idx] - yStart) * invdy));
iz = 2 + int (floor((z[idx] - zStart) * invdz));
// distances from node
flat_idx = get_idx(ix-1, iy, iz, nyn, nzn);
xi[0] = x[idx] - XN_flat[flat_idx];
flat_idx = get_idx(ix, iy-1, iz, nyn, nzn);
eta[0] = y[idx] - YN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz-1, nyn, nzn);
zeta[0] = z[idx] - ZN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz, nyn, nzn);
xi[1] = XN_flat[flat_idx] - x[idx];
eta[1] = YN_flat[flat_idx] - y[idx];
zeta[1] = ZN_flat[flat_idx] - z[idx];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
// calculate the weights for different nodes
weight[ii][jj][kk] = q[idx] * xi[ii] * eta[jj] * zeta[kk] * invVOL;
//////////////////////////
// add charge density
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&rhon_flat[flat_idx], weight[ii][jj][kk] * invVOL);
}
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&Jx_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = v[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&Jy_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = w[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&Jz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * u[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pxx_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * v[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pxy_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * w[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pxz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = v[idx] * v[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pyy_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = v[idx] * w[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pyz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = w[idx] * w[idx] * weight[ii][jj][kk];
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pzz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
}
void interpP2G_GPU_basic(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp * q_dev = NULL, *Jx_flat_dev = NULL, *Jy_flat_dev = NULL, *Jz_flat_dev = NULL, *rhon_flat_dev = NULL, *pxx_flat_dev = NULL, *pxy_flat_dev = NULL, *pxz_flat_dev = NULL, *pyy_flat_dev = NULL, *pyz_flat_dev = NULL, *pzz_flat_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL;
hipMalloc(&x_dev, part->npmax * sizeof(FPpart));
hipMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&y_dev, part->npmax * sizeof(FPpart));
hipMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&z_dev, part->npmax * sizeof(FPpart));
hipMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&u_dev, part->npmax * sizeof(FPpart));
hipMemcpy(u_dev, part->u, part->npmax* sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&v_dev, part->npmax * sizeof(FPpart));
hipMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&w_dev, part->npmax * sizeof(FPpart));
hipMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&q_dev, part->npmax * sizeof(FPinterp));
hipMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), hipMemcpyHostToDevice);
hipMalloc(&Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(Jx_flat_dev, ids->Jx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(Jy_flat_dev, ids->Jy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(Jz_flat_dev, ids->Jz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(rhon_flat_dev, ids->rhon_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pxx_flat_dev, ids->pxx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pxy_flat_dev, ids->pxy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pxz_flat_dev, ids->pxz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pyy_flat_dev, ids->pyy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pyz_flat_dev, ids->pyz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pzz_flat_dev, ids->pzz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( interP2G_kernel), dim3((part->npmax + TPB - 1)/TPB), dim3(TPB), 0, 0, x_dev, y_dev, z_dev, u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->invVOL, Jx_flat_dev, Jy_flat_dev, Jz_flat_dev, rhon_flat_dev, pxx_flat_dev , pxy_flat_dev, pxz_flat_dev, pyy_flat_dev, pyz_flat_dev, pzz_flat_dev, part->nop);
hipDeviceSynchronize();
// copy memory back to CPU (only the parts that have been modified inside the kernel)
hipMemcpy(ids->Jx_flat, Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->Jy_flat, Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->Jz_flat, Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->rhon_flat, rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pxx_flat, pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pxy_flat, pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pxz_flat, pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pyy_flat, pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pyz_flat, pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pzz_flat, pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
// clean up
hipFree(x_dev);
hipFree(y_dev);
hipFree(z_dev);
hipFree(u_dev);
hipFree(v_dev);
hipFree(w_dev);
hipFree(q_dev);
hipFree(XN_flat_dev);
hipFree(YN_flat_dev);
hipFree(ZN_flat_dev);
hipFree(rhon_flat_dev);
hipFree(pxx_flat_dev);
hipFree(pxy_flat_dev);
hipFree(pxz_flat_dev);
hipFree(pyy_flat_dev);
hipFree(pyz_flat_dev);
hipFree(pzz_flat_dev);
}
size_t queryFreeMemoryOnGPU(void)
{
size_t free_byte ;
size_t total_byte ;
hipError_t cuda_status = hipMemGetInfo( &free_byte, &total_byte ) ;
if ( hipSuccess != cuda_status ){
printf("Error: hipMemGetInfo fails, %s \n", hipGetErrorString(cuda_status) );
exit(1);
}
return (double)free_byte; //return amount of free memory on GPU in bytes
}
/** particle mover for GPU with batching
int mover_GPU_batch(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "***GPU MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
// allocate memory for variables on device
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp *q_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL, *Ex_flat_dev = NULL, *Ey_flat_dev = NULL, *Ez_flat_dev = NULL, *Bxn_flat_dev = NULL, *Byn_flat_dev, *Bzn_flat_dev = NULL;
size_t free_bytes = 0;
int batch_number = 0;
free_bytes = queryFreeMemoryOnGPU();
const long int to = split_index + MAX_GPU_PARTICILES - 1 < part->npmax - 1 ? split_index + MAX_GPU_PARTICILES - 1 : part->npmax - 1;
const int n_particles = to - split_index + 1;
size_t batch_size = (to - split_index + 1) * sizeof(FPpart);
hipMalloc(&x_dev, part->npmax * sizeof(FPpart));
hipMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&y_dev, part->npmax * sizeof(FPpart));
hipMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&z_dev, part->npmax * sizeof(FPpart));
hipMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&u_dev, part->npmax * sizeof(FPpart));
hipMemcpy(u_dev, part->u, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&v_dev, part->npmax * sizeof(FPpart));
hipMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&w_dev, part->npmax * sizeof(FPpart));
hipMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&q_dev, part->npmax * sizeof(FPinterp));
hipMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), hipMemcpyHostToDevice);
hipMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Ex_flat_dev, field->Ex_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Ey_flat_dev, field->Ey_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Ez_flat_dev, field->Ez_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Bxn_flat_dev, field->Bxn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Byn_flat_dev, field->Byn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(Bzn_flat_dev, field->Bzn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
do
{
const long int to = split_index + MAX_GPU_PARTICILES - 1 < part->npmax - 1 ? split_index + MAX_GPU_PARTICILES - 1 : part->npmax - 1;
const int n_particles = to - split_index + 1;
size_t batch_size = (to - split_index + 1) * sizeof(FPpart)
;
FPpart *d_x, *d_y, *d_z, *d_u, *d_v, *d_w;
hipMalloc(&d_x, batch_size);
hipMalloc(&d_y, batch_size);
hipMalloc(&d_z, batch_size);
hipMalloc(&d_u, batch_size);
hipMalloc(&d_v, batch_size);
hipMalloc(&d_w, batch_size);
//particles
hipMemcpy(d_x, part->x+split_index, batch_size, hipMemcpyHostToDevice);
hipMemcpy(d_y, part->y+split_index, batch_size, hipMemcpyHostToDevice);
hipMemcpy(d_z, part->z+split_index, batch_size, hipMemcpyHostToDevice);
hipMemcpy(d_u, part->u+split_index, batch_size, hipMemcpyHostToDevice);
hipMemcpy(d_v, part->v+split_index, batch_size, hipMemcpyHostToDevice);
hipMemcpy(d_w, part->w+split_index, batch_size, hipMemcpyHostToDevice);
std::cout<<"Before loop"<<std::endl;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// Call GPU kernel
hipLaunchKernelGGL(( single_particle_kernel), dim3((part->npmax + TPB - 1)/TPB), dim3(TPB), 0, 0, x_dev, y_dev, z_dev,u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->Lx, grd->Ly, grd->Lz, grd->invVOL, Ex_flat_dev, Ey_flat_dev, Ez_flat_dev, Bxn_flat_dev, Byn_flat_dev, Bzn_flat_dev, param->PERIODICX, param->PERIODICY, param->PERIODICZ, dt_sub_cycling, dto2, qomdt2, part->NiterMover, part->nop);
hipDeviceSynchronize();
} // end of one particle
// copy memory back to CPU (only the parts that have been modified inside the kernel)
hipMemcpy(part->x, x_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->y, y_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->z, z_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->u, u_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->v, v_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(part->w, w_dev, part->npmax * sizeof(FPpart), hipMemcpyDeviceToHost);
hipMemcpy(field->Ex_flat, Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Ey_flat, Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Ez_flat, Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Bxn_flat, Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Byn_flat, Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
hipMemcpy(field->Bzn_flat, Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyDeviceToHost);
// clean up
hipFree(x_dev);
hipFree(y_dev);
hipFree(z_dev);
hipFree(u_dev);
hipFree(v_dev);
hipFree(w_dev);
hipFree(XN_flat_dev);
hipFree(YN_flat_dev);
hipFree(ZN_flat_dev);
hipFree(Ex_flat_dev);
hipFree(Ey_flat_dev);
hipFree(Ez_flat_dev);
hipFree(Bxn_flat_dev);
hipFree(Byn_flat_dev);
hipFree(Bzn_flat_dev);
return(0);
}
void interpP2G_GPU_batch(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp * q_dev = NULL, *Jx_flat_dev = NULL, *Jy_flat_dev = NULL, *Jz_flat_dev = NULL, *rhon_flat_dev = NULL, *pxx_flat_dev = NULL, *pxy_flat_dev = NULL, *pxz_flat_dev = NULL, *pyy_flat_dev = NULL, *pyz_flat_dev = NULL, *pzz_flat_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL;
hipMalloc(&x_dev, part->npmax * sizeof(FPpart));
hipMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&y_dev, part->npmax * sizeof(FPpart));
hipMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&z_dev, part->npmax * sizeof(FPpart));
hipMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&u_dev, part->npmax * sizeof(FPpart));
hipMemcpy(u_dev, part->u, part->npmax* sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&v_dev, part->npmax * sizeof(FPpart));
hipMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&w_dev, part->npmax * sizeof(FPpart));
hipMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), hipMemcpyHostToDevice);
hipMalloc(&q_dev, part->npmax * sizeof(FPinterp));
hipMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), hipMemcpyHostToDevice);
hipMalloc(&Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(Jx_flat_dev, ids->Jx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(Jy_flat_dev, ids->Jy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(Jz_flat_dev, ids->Jz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(rhon_flat_dev, ids->rhon_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pxx_flat_dev, ids->pxx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pxy_flat_dev, ids->pxy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pxz_flat_dev, ids->pxz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pyy_flat_dev, ids->pyy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pyz_flat_dev, ids->pyz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
hipMemcpy(pzz_flat_dev, ids->pzz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
hipMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( interP2G_kernel), dim3((part->npmax + TPB - 1)/TPB), dim3(TPB), 0, 0, x_dev, y_dev, z_dev, u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->invVOL, Jx_flat_dev, Jy_flat_dev, Jz_flat_dev, rhon_flat_dev, pxx_flat_dev , pxy_flat_dev, pxz_flat_dev, pyy_flat_dev, pyz_flat_dev, pzz_flat_dev, part->nop);
hipDeviceSynchronize();
// copy memory back to CPU (only the parts that have been modified inside the kernel)
hipMemcpy(ids->Jx_flat, Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->Jy_flat, Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->Jz_flat, Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->rhon_flat, rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pxx_flat, pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pxy_flat, pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pxz_flat, pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pyy_flat, pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pyz_flat, pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
hipMemcpy(ids->pzz_flat, pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), hipMemcpyDeviceToHost);
// clean up
hipFree(x_dev);
hipFree(y_dev);
hipFree(z_dev);
hipFree(u_dev);
hipFree(v_dev);
hipFree(w_dev);
hipFree(q_dev);
hipFree(XN_flat_dev);
hipFree(YN_flat_dev);
hipFree(ZN_flat_dev);
hipFree(rhon_flat_dev);
hipFree(pxx_flat_dev);
hipFree(pxy_flat_dev);
hipFree(pxz_flat_dev);
hipFree(pyy_flat_dev);
hipFree(pyz_flat_dev);
hipFree(pzz_flat_dev);
}
*/
| 2773f664feb76a317815b9649c50a1ecf5f3ff19.cu | #include "Particles.h"
#include "Alloc.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define TPB 64
/** allocate particle arrays */
void particle_allocate(struct parameters* param, struct particles* part, int is)
{
// set species ID
part->species_ID = is;
// number of particles
part->nop = param->np[is];
// maximum number of particles
part->npmax = param->npMax[is];
// choose a different number of mover iterations for ions and electrons
if (param->qom[is] < 0){ //electrons
part->NiterMover = param->NiterMover;
part->n_sub_cycles = param->n_sub_cycles;
} else { // ions: only one iteration
part->NiterMover = 1;
part->n_sub_cycles = 1;
}
// particles per cell
part->npcelx = param->npcelx[is];
part->npcely = param->npcely[is];
part->npcelz = param->npcelz[is];
part->npcel = part->npcelx*part->npcely*part->npcelz;
// cast it to required precision
part->qom = (FPpart) param->qom[is];
long npmax = part->npmax;
// initialize drift and thermal velocities
// drift
part->u0 = (FPpart) param->u0[is];
part->v0 = (FPpart) param->v0[is];
part->w0 = (FPpart) param->w0[is];
// thermal
part->uth = (FPpart) param->uth[is];
part->vth = (FPpart) param->vth[is];
part->wth = (FPpart) param->wth[is];
//////////////////////////////
/// ALLOCATION PARTICLE ARRAYS
//////////////////////////////
part->x = new FPpart[npmax];
part->y = new FPpart[npmax];
part->z = new FPpart[npmax];
// allocate velocity
part->u = new FPpart[npmax];
part->v = new FPpart[npmax];
part->w = new FPpart[npmax];
// allocate charge = q * statistical weight
part->q = new FPinterp[npmax];
}
/** deallocate */
void particle_deallocate(struct particles* part)
{
// deallocate particle variables
delete[] part->x;
delete[] part->y;
delete[] part->z;
delete[] part->u;
delete[] part->v;
delete[] part->w;
delete[] part->q;
}
/** particle mover */
int mover_PC(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "*** MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// move each particle with new fields
for (int i=0; i < part->nop; i++){
xptilde = part->x[i];
yptilde = part->y[i];
zptilde = part->z[i];
// calculate the average velocity iteratively
for(int innter=0; innter < part->NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((part->x[i] - grd->xStart)*grd->invdx);
iy = 2 + int((part->y[i] - grd->yStart)*grd->invdy);
iz = 2 + int((part->z[i] - grd->zStart)*grd->invdz);
// calculate weights
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
Exl += weight[ii][jj][kk]*field->Ex[ix- ii][iy -jj][iz- kk];
Eyl += weight[ii][jj][kk]*field->Ey[ix- ii][iy -jj][iz- kk];
Ezl += weight[ii][jj][kk]*field->Ez[ix- ii][iy -jj][iz -kk];
Bxl += weight[ii][jj][kk]*field->Bxn[ix- ii][iy -jj][iz -kk];
Byl += weight[ii][jj][kk]*field->Byn[ix- ii][iy -jj][iz -kk];
Bzl += weight[ii][jj][kk]*field->Bzn[ix- ii][iy -jj][iz -kk];
}
// end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= part->u[i] + qomdt2*Exl;
vt= part->v[i] + qomdt2*Eyl;
wt= part->w[i] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
part->x[i] = xptilde + uptilde*dto2;
part->y[i] = yptilde + vptilde*dto2;
part->z[i] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
part->u[i]= 2.0*uptilde - part->u[i];
part->v[i]= 2.0*vptilde - part->v[i];
part->w[i]= 2.0*wptilde - part->w[i];
part->x[i] = xptilde + uptilde*dt_sub_cycling;
part->y[i] = yptilde + vptilde*dt_sub_cycling;
part->z[i] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (part->x[i] > grd->Lx){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] - grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = 2*grd->Lx - part->x[i];
}
}
if (part->x[i] < 0){
if (param->PERIODICX==true){ // PERIODIC
part->x[i] = part->x[i] + grd->Lx;
} else { // REFLECTING BC
part->u[i] = -part->u[i];
part->x[i] = -part->x[i];
}
}
// Y-DIRECTION: BC particles
if (part->y[i] > grd->Ly){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] - grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = 2*grd->Ly - part->y[i];
}
}
if (part->y[i] < 0){
if (param->PERIODICY==true){ // PERIODIC
part->y[i] = part->y[i] + grd->Ly;
} else { // REFLECTING BC
part->v[i] = -part->v[i];
part->y[i] = -part->y[i];
}
}
// Z-DIRECTION: BC particles
if (part->z[i] > grd->Lz){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] - grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = 2*grd->Lz - part->z[i];
}
}
if (part->z[i] < 0){
if (param->PERIODICZ==true){ // PERIODIC
part->z[i] = part->z[i] + grd->Lz;
} else { // REFLECTING BC
part->w[i] = -part->w[i];
part->z[i] = -part->z[i];
}
}
} // end of subcycling
} // end of one particle
return(0); // exit succcesfully
} // end of the mover
/** Interpolation Particle --> Grid: This is for species */
void interpP2G(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
// arrays needed for interpolation
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// index of the cell
int ix, iy, iz;
for (register long long i = 0; i < part->nop; i++) {
// determine cell: can we change to int()? is it faster?
ix = 2 + int (floor((part->x[i] - grd->xStart) * grd->invdx));
iy = 2 + int (floor((part->y[i] - grd->yStart) * grd->invdy));
iz = 2 + int (floor((part->z[i] - grd->zStart) * grd->invdz));
// distances from node
xi[0] = part->x[i] - grd->XN[ix - 1][iy][iz];
eta[0] = part->y[i] - grd->YN[ix][iy - 1][iz];
zeta[0] = part->z[i] - grd->ZN[ix][iy][iz - 1];
xi[1] = grd->XN[ix][iy][iz] - part->x[i];
eta[1] = grd->YN[ix][iy][iz] - part->y[i];
zeta[1] = grd->ZN[ix][iy][iz] - part->z[i];
// calculate the weights for different nodes
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = part->q[i] * xi[ii] * eta[jj] * zeta[kk] * grd->invVOL;
//////////////////////////
// add charge density
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->rhon[ix - ii][iy - jj][iz - kk] += weight[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->Jz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->u[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxx[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->u[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pxz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->v[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyy[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->v[i] * part->w[i] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
ids->pyz[ix - ii][iy - jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = part->w[i] * part->w[i] * weight[ii][jj][kk];
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++)
ids->pzz[ix -ii][iy -jj][iz - kk] += temp[ii][jj][kk] * grd->invVOL;
}
}
/** particle kernel */
__global__ void single_particle_kernel(FPpart* x, FPpart* y, FPpart* z, FPpart* u, FPpart* v, FPpart* w, FPinterp* q, FPfield* XN_flat, FPfield* YN_flat, FPfield* ZN_flat, int nxn, int nyn, int nzn, double xStart, double yStart, double zStart, FPfield invdx, FPfield invdy, FPfield invdz, double Lx, double Ly, double Lz, FPfield invVOL, FPfield* Ex_flat, FPfield* Ey_flat, FPfield* Ez_flat, FPfield* Bxn_flat, FPfield* Byn_flat, FPfield* Bzn_flat, bool PERIODICX, bool PERIODICY, bool PERIODICZ, FPpart dt_sub_cycling, FPpart dto2, FPpart qomdt2, int NiterMover, int npmax)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int flat_idx = 0;
if(idx > npmax)
{
return;
}
FPpart omdtsq, denom, ut, vt, wt, udotb;
// local (to the particle) electric and magnetic field
FPfield Exl=0.0, Eyl=0.0, Ezl=0.0, Bxl=0.0, Byl=0.0, Bzl=0.0;
// interpolation densities
int ix,iy,iz;
FPfield weight[2][2][2];
FPfield xi[2], eta[2], zeta[2];
// intermediate particle position and velocity
FPpart xptilde, yptilde, zptilde, uptilde, vptilde, wptilde;
xptilde = x[idx];
yptilde = y[idx];
zptilde = z[idx];
// calculate the average velocity iteratively
for(int innter=0; innter < NiterMover; innter++){
// interpolation G-->P
ix = 2 + int((x[idx] - xStart)*invdx);
iy = 2 + int((y[idx] - yStart)*invdy);
iz = 2 + int((z[idx] - zStart)*invdz);
// calculate weights
flat_idx = get_idx(ix-1, iy, iz, nyn, nzn);
xi[0] = x[idx] - XN_flat[flat_idx];
flat_idx = get_idx(ix, iy-1, iz, nyn, nzn);
eta[0] = y[idx] - YN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz-1, nyn, nzn);
zeta[0] = z[idx] - ZN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz, nyn, nzn);
xi[1] = XN_flat[flat_idx] - x[idx];
eta[1] = YN_flat[flat_idx] - y[idx];
zeta[1] = ZN_flat[flat_idx] - z[idx];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
weight[ii][jj][kk] = xi[ii] * eta[jj] * zeta[kk] * invVOL;
// set to zero local electric and magnetic field
Exl=0.0, Eyl = 0.0, Ezl = 0.0, Bxl = 0.0, Byl = 0.0, Bzl = 0.0;
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++){
flat_idx = get_idx(ix-ii, iy-jj, iz-kk, nyn, nzn);
Exl += weight[ii][jj][kk]*Ex_flat[flat_idx];
Eyl += weight[ii][jj][kk]*Ey_flat[flat_idx];
Ezl += weight[ii][jj][kk]*Ez_flat[flat_idx];
Bxl += weight[ii][jj][kk]*Bxn_flat[flat_idx];
Byl += weight[ii][jj][kk]*Byn_flat[flat_idx];
Bzl += weight[ii][jj][kk]*Bzn_flat[flat_idx];
} // end interpolation
omdtsq = qomdt2*qomdt2*(Bxl*Bxl+Byl*Byl+Bzl*Bzl);
denom = 1.0/(1.0 + omdtsq);
// solve the position equation
ut= u[idx] + qomdt2*Exl;
vt= v[idx] + qomdt2*Eyl;
wt= w[idx] + qomdt2*Ezl;
udotb = ut*Bxl + vt*Byl + wt*Bzl;
// solve the velocity equation
uptilde = (ut+qomdt2*(vt*Bzl -wt*Byl + qomdt2*udotb*Bxl))*denom;
vptilde = (vt+qomdt2*(wt*Bxl -ut*Bzl + qomdt2*udotb*Byl))*denom;
wptilde = (wt+qomdt2*(ut*Byl -vt*Bxl + qomdt2*udotb*Bzl))*denom;
// update position
x[idx] = xptilde + uptilde*dto2;
y[idx] = yptilde + vptilde*dto2;
z[idx] = zptilde + wptilde*dto2;
} // end of iteration
// update the final position and velocity
u[idx]= 2.0*uptilde - u[idx];
v[idx]= 2.0*vptilde - v[idx];
w[idx]= 2.0*wptilde - w[idx];
x[idx] = xptilde + uptilde*dt_sub_cycling;
y[idx] = yptilde + vptilde*dt_sub_cycling;
z[idx] = zptilde + wptilde*dt_sub_cycling;
//////////
//////////
////////// BC
// X-DIRECTION: BC particles
if (x[idx] > Lx){
if (PERIODICX==true){ // PERIODIC
x[idx] = x[idx] - Lx;
} else { // REFLECTING BC
u[idx] = -u[idx];
x[idx] = 2*Lx - x[idx];
}
}
if (x[idx] < 0){
if (PERIODICX==true){ // PERIODIC
x[idx] = x[idx] + Lx;
} else { // REFLECTING BC
u[idx] = -u[idx];
x[idx] = -x[idx];
}
}
// Y-DIRECTION: BC particles
if (y[idx] > Ly){
if (PERIODICY==true){ // PERIODIC
y[idx] = y[idx] - Ly;
} else { // REFLECTING BC
v[idx] = -v[idx];
y[idx] = 2*Ly - y[idx];
}
}
if (y[idx] < 0){
if (PERIODICY==true){ // PERIODIC
y[idx] = y[idx] + Ly;
} else { // REFLECTING BC
v[idx] = -v[idx];
y[idx] = -y[idx];
}
}
// Z-DIRECTION: BC particles
if (z[idx] > Lz){
if (PERIODICZ==true){ // PERIODIC
z[idx] = z[idx] - Lz;
} else { // REFLECTING BC
w[idx] = -w[idx];
z[idx] = 2*Lz - z[idx];
}
}
if (z[idx] < 0){
if (PERIODICZ==true){ // PERIODIC
z[idx] = z[idx] + Lz;
} else { // REFLECTING BC
w[idx] = -w[idx];
z[idx] = -z[idx];
}
}
}
/** particle mover for GPU*/
int mover_GPU_basic(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "***GPU MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
// allocate memory for variables on device
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp *q_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL, *Ex_flat_dev = NULL, *Ey_flat_dev = NULL, *Ez_flat_dev = NULL, *Bxn_flat_dev = NULL, *Byn_flat_dev, *Bzn_flat_dev = NULL;
cudaMalloc(&x_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&y_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&z_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&u_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(u_dev, part->u, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&v_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&w_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&q_dev, part->npmax * sizeof(FPinterp));
cudaMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), cudaMemcpyHostToDevice);
cudaMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Ex_flat_dev, field->Ex_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Ey_flat_dev, field->Ey_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Ez_flat_dev, field->Ez_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Bxn_flat_dev, field->Bxn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Byn_flat_dev, field->Byn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Bzn_flat_dev, field->Bzn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
std::cout<<"Before loop"<<std::endl;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// Call GPU kernel
single_particle_kernel<<<(part->npmax + TPB - 1)/TPB, TPB>>>(x_dev, y_dev, z_dev,u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->Lx, grd->Ly, grd->Lz, grd->invVOL, Ex_flat_dev, Ey_flat_dev, Ez_flat_dev, Bxn_flat_dev, Byn_flat_dev, Bzn_flat_dev, param->PERIODICX, param->PERIODICY, param->PERIODICZ, dt_sub_cycling, dto2, qomdt2, part->NiterMover, part->nop);
cudaDeviceSynchronize();
} // end of one particle
// copy memory back to CPU (only the parts that have been modified inside the kernel)
cudaMemcpy(part->x, x_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->y, y_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->z, z_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->u, u_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->v, v_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->w, w_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Ex_flat, Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Ey_flat, Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Ez_flat, Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Bxn_flat, Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Byn_flat, Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Bzn_flat, Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
// clean up
cudaFree(x_dev);
cudaFree(y_dev);
cudaFree(z_dev);
cudaFree(u_dev);
cudaFree(v_dev);
cudaFree(w_dev);
cudaFree(XN_flat_dev);
cudaFree(YN_flat_dev);
cudaFree(ZN_flat_dev);
cudaFree(Ex_flat_dev);
cudaFree(Ey_flat_dev);
cudaFree(Ez_flat_dev);
cudaFree(Bxn_flat_dev);
cudaFree(Byn_flat_dev);
cudaFree(Bzn_flat_dev);
return(0);
}
__global__ void interP2G_kernel( FPpart* x, FPpart* y, FPpart* z, FPpart* u, FPpart* v, FPpart* w, FPinterp* q, FPfield* XN_flat, FPfield* YN_flat, FPfield* ZN_flat, int nxn, int nyn, int nzn, double xStart, double yStart, double zStart, FPfield invdx, FPfield invdy, FPfield invdz, FPfield invVOL, FPinterp* Jx_flat, FPinterp* Jy_flat, FPinterp *Jz_flat, FPinterp *rhon_flat, FPinterp* pxx_flat, FPinterp* pxy_flat, FPinterp* pxz_flat, FPinterp* pyy_flat, FPinterp* pyz_flat, FPinterp* pzz_flat, int npmax)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if(idx >= npmax)
{
return;
}
// arrays needed for interpolation
FPpart weight[2][2][2];
FPpart temp[2][2][2];
FPpart xi[2], eta[2], zeta[2];
// 3-D index of the cell
int ix, iy, iz, flat_idx;
ix = 2 + int (floor((x[idx] - xStart) * invdx));
iy = 2 + int (floor((y[idx] - yStart) * invdy));
iz = 2 + int (floor((z[idx] - zStart) * invdz));
// distances from node
flat_idx = get_idx(ix-1, iy, iz, nyn, nzn);
xi[0] = x[idx] - XN_flat[flat_idx];
flat_idx = get_idx(ix, iy-1, iz, nyn, nzn);
eta[0] = y[idx] - YN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz-1, nyn, nzn);
zeta[0] = z[idx] - ZN_flat[flat_idx];
flat_idx = get_idx(ix, iy, iz, nyn, nzn);
xi[1] = XN_flat[flat_idx] - x[idx];
eta[1] = YN_flat[flat_idx] - y[idx];
zeta[1] = ZN_flat[flat_idx] - z[idx];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
// calculate the weights for different nodes
weight[ii][jj][kk] = q[idx] * xi[ii] * eta[jj] * zeta[kk] * invVOL;
//////////////////////////
// add charge density
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&rhon_flat[flat_idx], weight[ii][jj][kk] * invVOL);
}
////////////////////////////
// add current density - Jx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&Jx_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add current density - Jy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = v[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&Jy_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add current density - Jz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = w[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&Jz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add pressure pxx
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * u[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pxx_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
////////////////////////////
// add pressure pxy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * v[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pxy_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pxz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = u[idx] * w[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pxz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pyy
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = v[idx] * v[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pyy_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pyz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = v[idx] * w[idx] * weight[ii][jj][kk];
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pyz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
/////////////////////////////
// add pressure pzz
for (int ii = 0; ii < 2; ii++)
for (int jj = 0; jj < 2; jj++)
for (int kk = 0; kk < 2; kk++)
temp[ii][jj][kk] = w[idx] * w[idx] * weight[ii][jj][kk];
for (int ii=0; ii < 2; ii++)
for (int jj=0; jj < 2; jj++)
for(int kk=0; kk < 2; kk++) {
flat_idx = get_idx(ix - ii, iy - jj, iz - kk, nyn, nzn);
atomicAdd(&pzz_flat[flat_idx], temp[ii][jj][kk] * invVOL);
}
}
void interpP2G_GPU_basic(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp * q_dev = NULL, *Jx_flat_dev = NULL, *Jy_flat_dev = NULL, *Jz_flat_dev = NULL, *rhon_flat_dev = NULL, *pxx_flat_dev = NULL, *pxy_flat_dev = NULL, *pxz_flat_dev = NULL, *pyy_flat_dev = NULL, *pyz_flat_dev = NULL, *pzz_flat_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL;
cudaMalloc(&x_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&y_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&z_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&u_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(u_dev, part->u, part->npmax* sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&v_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&w_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&q_dev, part->npmax * sizeof(FPinterp));
cudaMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), cudaMemcpyHostToDevice);
cudaMalloc(&Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(Jx_flat_dev, ids->Jx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(Jy_flat_dev, ids->Jy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(Jz_flat_dev, ids->Jz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(rhon_flat_dev, ids->rhon_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pxx_flat_dev, ids->pxx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pxy_flat_dev, ids->pxy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pxz_flat_dev, ids->pxz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pyy_flat_dev, ids->pyy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pyz_flat_dev, ids->pyz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pzz_flat_dev, ids->pzz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
interP2G_kernel<<<(part->npmax + TPB - 1)/TPB, TPB>>>( x_dev, y_dev, z_dev, u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->invVOL, Jx_flat_dev, Jy_flat_dev, Jz_flat_dev, rhon_flat_dev, pxx_flat_dev , pxy_flat_dev, pxz_flat_dev, pyy_flat_dev, pyz_flat_dev, pzz_flat_dev, part->nop);
cudaDeviceSynchronize();
// copy memory back to CPU (only the parts that have been modified inside the kernel)
cudaMemcpy(ids->Jx_flat, Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->Jy_flat, Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->Jz_flat, Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->rhon_flat, rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pxx_flat, pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pxy_flat, pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pxz_flat, pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pyy_flat, pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pyz_flat, pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pzz_flat, pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
// clean up
cudaFree(x_dev);
cudaFree(y_dev);
cudaFree(z_dev);
cudaFree(u_dev);
cudaFree(v_dev);
cudaFree(w_dev);
cudaFree(q_dev);
cudaFree(XN_flat_dev);
cudaFree(YN_flat_dev);
cudaFree(ZN_flat_dev);
cudaFree(rhon_flat_dev);
cudaFree(pxx_flat_dev);
cudaFree(pxy_flat_dev);
cudaFree(pxz_flat_dev);
cudaFree(pyy_flat_dev);
cudaFree(pyz_flat_dev);
cudaFree(pzz_flat_dev);
}
size_t queryFreeMemoryOnGPU(void)
{
size_t free_byte ;
size_t total_byte ;
cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ;
if ( cudaSuccess != cuda_status ){
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(1);
}
return (double)free_byte; //return amount of free memory on GPU in bytes
}
/** particle mover for GPU with batching
int mover_GPU_batch(struct particles* part, struct EMfield* field, struct grid* grd, struct parameters* param)
{
// print species and subcycling
std::cout << "***GPU MOVER with SUBCYCLYING "<< param->n_sub_cycles << " - species " << part->species_ID << " ***" << std::endl;
// auxiliary variables
FPpart dt_sub_cycling = (FPpart) param->dt/((double) part->n_sub_cycles);
FPpart dto2 = .5*dt_sub_cycling, qomdt2 = part->qom*dto2/param->c;
// allocate memory for variables on device
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp *q_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL, *Ex_flat_dev = NULL, *Ey_flat_dev = NULL, *Ez_flat_dev = NULL, *Bxn_flat_dev = NULL, *Byn_flat_dev, *Bzn_flat_dev = NULL;
size_t free_bytes = 0;
int batch_number = 0;
free_bytes = queryFreeMemoryOnGPU();
const long int to = split_index + MAX_GPU_PARTICILES - 1 < part->npmax - 1 ? split_index + MAX_GPU_PARTICILES - 1 : part->npmax - 1;
const int n_particles = to - split_index + 1;
size_t batch_size = (to - split_index + 1) * sizeof(FPpart);
cudaMalloc(&x_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&y_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&z_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&u_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(u_dev, part->u, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&v_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&w_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&q_dev, part->npmax * sizeof(FPinterp));
cudaMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), cudaMemcpyHostToDevice);
cudaMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Ex_flat_dev, field->Ex_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Ey_flat_dev, field->Ey_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Ez_flat_dev, field->Ez_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Bxn_flat_dev, field->Bxn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Byn_flat_dev, field->Byn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(Bzn_flat_dev, field->Bzn_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
do
{
const long int to = split_index + MAX_GPU_PARTICILES - 1 < part->npmax - 1 ? split_index + MAX_GPU_PARTICILES - 1 : part->npmax - 1;
const int n_particles = to - split_index + 1;
size_t batch_size = (to - split_index + 1) * sizeof(FPpart)
;
FPpart *d_x, *d_y, *d_z, *d_u, *d_v, *d_w;
cudaMalloc(&d_x, batch_size);
cudaMalloc(&d_y, batch_size);
cudaMalloc(&d_z, batch_size);
cudaMalloc(&d_u, batch_size);
cudaMalloc(&d_v, batch_size);
cudaMalloc(&d_w, batch_size);
//particles
cudaMemcpy(d_x, part->x+split_index, batch_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, part->y+split_index, batch_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_z, part->z+split_index, batch_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_u, part->u+split_index, batch_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_v, part->v+split_index, batch_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_w, part->w+split_index, batch_size, cudaMemcpyHostToDevice);
std::cout<<"Before loop"<<std::endl;
// start subcycling
for (int i_sub=0; i_sub < part->n_sub_cycles; i_sub++){
// Call GPU kernel
single_particle_kernel<<<(part->npmax + TPB - 1)/TPB, TPB>>>(x_dev, y_dev, z_dev,u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->Lx, grd->Ly, grd->Lz, grd->invVOL, Ex_flat_dev, Ey_flat_dev, Ez_flat_dev, Bxn_flat_dev, Byn_flat_dev, Bzn_flat_dev, param->PERIODICX, param->PERIODICY, param->PERIODICZ, dt_sub_cycling, dto2, qomdt2, part->NiterMover, part->nop);
cudaDeviceSynchronize();
} // end of one particle
// copy memory back to CPU (only the parts that have been modified inside the kernel)
cudaMemcpy(part->x, x_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->y, y_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->z, z_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->u, u_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->v, v_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(part->w, w_dev, part->npmax * sizeof(FPpart), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Ex_flat, Ex_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Ey_flat, Ey_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Ez_flat, Ez_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Bxn_flat, Bxn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Byn_flat, Byn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
cudaMemcpy(field->Bzn_flat, Bzn_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyDeviceToHost);
// clean up
cudaFree(x_dev);
cudaFree(y_dev);
cudaFree(z_dev);
cudaFree(u_dev);
cudaFree(v_dev);
cudaFree(w_dev);
cudaFree(XN_flat_dev);
cudaFree(YN_flat_dev);
cudaFree(ZN_flat_dev);
cudaFree(Ex_flat_dev);
cudaFree(Ey_flat_dev);
cudaFree(Ez_flat_dev);
cudaFree(Bxn_flat_dev);
cudaFree(Byn_flat_dev);
cudaFree(Bzn_flat_dev);
return(0);
}
void interpP2G_GPU_batch(struct particles* part, struct interpDensSpecies* ids, struct grid* grd)
{
FPpart *x_dev = NULL, *y_dev = NULL, *z_dev = NULL, *u_dev = NULL, *v_dev = NULL, *w_dev = NULL;
FPinterp * q_dev = NULL, *Jx_flat_dev = NULL, *Jy_flat_dev = NULL, *Jz_flat_dev = NULL, *rhon_flat_dev = NULL, *pxx_flat_dev = NULL, *pxy_flat_dev = NULL, *pxz_flat_dev = NULL, *pyy_flat_dev = NULL, *pyz_flat_dev = NULL, *pzz_flat_dev = NULL;
FPfield *XN_flat_dev = NULL, *YN_flat_dev = NULL, *ZN_flat_dev = NULL;
cudaMalloc(&x_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(x_dev, part->x, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&y_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(y_dev, part->y, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&z_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(z_dev, part->z, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&u_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(u_dev, part->u, part->npmax* sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&v_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(v_dev, part->v, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&w_dev, part->npmax * sizeof(FPpart));
cudaMemcpy(w_dev, part->w, part->npmax * sizeof(FPpart), cudaMemcpyHostToDevice);
cudaMalloc(&q_dev, part->npmax * sizeof(FPinterp));
cudaMemcpy(q_dev, part->q, part->npmax * sizeof(FPinterp), cudaMemcpyHostToDevice);
cudaMalloc(&Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(Jx_flat_dev, ids->Jx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(Jy_flat_dev, ids->Jy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(Jz_flat_dev, ids->Jz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(rhon_flat_dev, ids->rhon_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pxx_flat_dev, ids->pxx_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pxy_flat_dev, ids->pxy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pxz_flat_dev, ids->pxz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pyy_flat_dev, ids->pyy_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pyz_flat_dev, ids->pyz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp));
cudaMemcpy(pzz_flat_dev, ids->pzz_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&XN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(XN_flat_dev, grd->XN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&YN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(YN_flat_dev, grd->YN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
cudaMalloc(&ZN_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield));
cudaMemcpy(ZN_flat_dev, grd->ZN_flat, grd->nxn * grd->nyn * grd->nzn * sizeof(FPfield), cudaMemcpyHostToDevice);
interP2G_kernel<<<(part->npmax + TPB - 1)/TPB, TPB>>>( x_dev, y_dev, z_dev, u_dev, v_dev, w_dev, q_dev, XN_flat_dev, YN_flat_dev, ZN_flat_dev, grd->nxn, grd->nyn, grd->nzn, grd->xStart, grd->yStart, grd->zStart, grd->invdx, grd->invdy, grd->invdz, grd->invVOL, Jx_flat_dev, Jy_flat_dev, Jz_flat_dev, rhon_flat_dev, pxx_flat_dev , pxy_flat_dev, pxz_flat_dev, pyy_flat_dev, pyz_flat_dev, pzz_flat_dev, part->nop);
cudaDeviceSynchronize();
// copy memory back to CPU (only the parts that have been modified inside the kernel)
cudaMemcpy(ids->Jx_flat, Jx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->Jy_flat, Jy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->Jz_flat, Jz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->rhon_flat, rhon_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pxx_flat, pxx_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pxy_flat, pxy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pxz_flat, pxz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pyy_flat, pyy_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pyz_flat, pyz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
cudaMemcpy(ids->pzz_flat, pzz_flat_dev, grd->nxn * grd->nyn * grd->nzn * sizeof(FPinterp), cudaMemcpyDeviceToHost);
// clean up
cudaFree(x_dev);
cudaFree(y_dev);
cudaFree(z_dev);
cudaFree(u_dev);
cudaFree(v_dev);
cudaFree(w_dev);
cudaFree(q_dev);
cudaFree(XN_flat_dev);
cudaFree(YN_flat_dev);
cudaFree(ZN_flat_dev);
cudaFree(rhon_flat_dev);
cudaFree(pxx_flat_dev);
cudaFree(pxy_flat_dev);
cudaFree(pxz_flat_dev);
cudaFree(pyy_flat_dev);
cudaFree(pyz_flat_dev);
cudaFree(pzz_flat_dev);
}
*/
|
6d19ab04fa2a756bdf3a1c0916e4d0950b9af676.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include "box2d1r-256-10-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
hipError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != hipSuccess) { \
fprintf(stderr, "CUDA error: %s\n", hipGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == hipSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(hipGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(hipMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(hipMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyHostToDevice));
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
hipLaunchKernelGGL(( kernel0_10), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_1), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_2), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_3), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_4), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_5), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_6), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_7), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_8), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
hipLaunchKernelGGL(( kernel0_9), dim3(k0_dimGrid), dim3(k0_dimBlock), 0, 0, dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
hipDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(hipMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), hipMemcpyDeviceToHost));
}
cudaCheckReturn(hipFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-1][j-1] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i-1][j+1] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i+1][j-1] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+1][j+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
| 6d19ab04fa2a756bdf3a1c0916e4d0950b9af676.cu | #include <assert.h>
#include <stdio.h>
#include "box2d1r-256-10-128_kernel.hu"
#define BENCH_DIM 2
#define BENCH_FPP 17
#define BENCH_RAD 1
#include "common.h"
double kernel_stencil(SB_TYPE *A1, int compsize, int timestep, bool scop)
{
double start_time = sb_time(), end_time = 0.0;
int dimsize = compsize + BENCH_RAD * 2;
SB_TYPE (*A)[dimsize][dimsize] = (SB_TYPE (*)[dimsize][dimsize])A1;
if (scop) {
if (dimsize >= 3 && timestep >= 1) {
#define cudaCheckReturn(ret) \
do { \
cudaError_t cudaCheckReturn_e = (ret); \
if (cudaCheckReturn_e != cudaSuccess) { \
fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(cudaCheckReturn_e)); \
fflush(stderr); \
} \
assert(cudaCheckReturn_e == cudaSuccess); \
} while(0)
#define cudaCheckKernel() \
do { \
cudaCheckReturn(cudaGetLastError()); \
} while(0)
double *dev_A;
cudaCheckReturn(cudaMalloc((void **) &dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double)));
{
cudaCheckReturn(cudaMemcpy(dev_A, A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyHostToDevice));
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_START_INSTRUMENTS;
#endif
}
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
AN5D_TYPE c0;
AN5D_TYPE __side0LenMax;
{
const AN5D_TYPE __side0Len = 10;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 236;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
AN5D_TYPE __c0Padr = (__c0Len % 2) != (((__c0Len + __side0Len - 1) / __side0Len) % 2) && __c0Len % __side0Len < 2 ? 1 : 0;
__side0LenMax = __side0Len;
for (c0 = __c0Pad; c0 < __c0Pad + __c0Len / __side0Len - __c0Padr; c0 += 1)
{
kernel0_10<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
if ((__c0Len % 2) != (((__c0Len + __side0LenMax - 1) / __side0LenMax) % 2))
{
if (__c0Len % __side0LenMax == 0)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 1)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 2)
{
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 3)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 4)
{
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 5)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 6)
{
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 7)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 8)
{
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
else if (__c0Len % __side0LenMax == 9)
{
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
c0 += 1;
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
else if (__c0Len % __side0LenMax)
{
if (__c0Len % __side0LenMax == 1)
{
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 254;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_1<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 2)
{
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 252;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_2<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 3)
{
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 250;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_3<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 4)
{
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 248;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_4<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 5)
{
const AN5D_TYPE __side0Len = 5;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 246;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_5<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 6)
{
const AN5D_TYPE __side0Len = 6;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 244;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_6<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 7)
{
const AN5D_TYPE __side0Len = 7;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 242;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_7<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 8)
{
const AN5D_TYPE __side0Len = 8;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 240;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_8<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
else if (__c0Len % __side0LenMax == 9)
{
const AN5D_TYPE __side0Len = 9;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 238;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __blockSize = 1 * __side2LenOl;
assert((__side1Len >= 2 * __side0Len * __halo1) && (__c1Len % __side1Len == 0 || __c1Len % __side1Len >= 2 * __side0Len * __halo1) && "[AN5D ERROR] Too short stream");
dim3 k0_dimBlock(__blockSize, 1, 1);
dim3 k0_dimGrid(1 * ((__c1Len + __side1Len - 1) / __side1Len) * ((__c2Len + __side2Len - 1) / __side2Len), 1, 1);
kernel0_9<<<k0_dimGrid, k0_dimBlock>>> (dev_A, dimsize, timestep, c0);
}
}
}
cudaCheckKernel();
{
#ifdef STENCILBENCH
cudaDeviceSynchronize();
SB_STOP_INSTRUMENTS;
#endif
cudaCheckReturn(cudaMemcpy(A, dev_A, (size_t)(2) * (size_t)(dimsize) * (size_t)(dimsize) * sizeof(double), cudaMemcpyDeviceToHost));
}
cudaCheckReturn(cudaFree(dev_A));
}
}
else {
for (int t = 0; t < timestep; t++)
#pragma omp parallel for
for (int i = BENCH_RAD; i < dimsize - BENCH_RAD; i++)
for (int j = BENCH_RAD; j < dimsize - BENCH_RAD; j++)
A[(t+1)%2][i][j] =
0.09371f * A[t%2][i-1][j-1] + 0.09374f * A[t%2][i-1][j] + 0.09376f * A[t%2][i-1][j+1] +
0.09372f * A[t%2][i][j-1] + 0.25001f * A[t%2][i][j] + 0.09377f * A[t%2][i][j+1] +
0.09373f * A[t%2][i+1][j-1] + 0.09375f * A[t%2][i+1][j] + 0.09378f * A[t%2][i+1][j+1];
}
return (((end_time != 0.0) ? end_time : sb_time()) - start_time);
}
|
f8220d8be1ead643627cc307d6f993b4904934ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
zswapdblk_batched_kernel( int nb, int n_mod_nb,
magmaDoubleComplex **dA_array, int ldda, int inca,
magmaDoubleComplex **dB_array, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int batchid = blockIdx.z;
magmaDoubleComplex *dA = dA_array[batchid];
magmaDoubleComplex *dB = dB_array[batchid];
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaDoubleComplex tmp;
if (bx < gridDim.x-1)
{
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
else
{
for( int i = 0; i < n_mod_nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
}
/**
Purpose
-------
zswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = ceil(n/nb) blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dA, dimension (ldda,n)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of each array dA.
ldda >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dB, dimension (lddb,n)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of each array dB.
lddb >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zswapdblk_batched_q(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex **dA_array, magma_int_t ldda, magma_int_t inca,
magmaDoubleComplex **dB_array, magma_int_t lddb, magma_int_t incb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t nblocks = magma_ceildiv( n, nb );
magma_int_t n_mod_nb = n % nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (n_mod_nb == 0) nblocks += 1; // a dummy thread block for cleanup code
dim3 dimGrid(nblocks, 1, batchCount);
dim3 dimBlock(nb);
if ( nblocks > 0 ) {
hipLaunchKernelGGL(( zswapdblk_batched_kernel), dim3(dimGrid), dim3(dimBlock), 0, queue ,
nb, n_mod_nb, dA_array, ldda, inca,
dB_array, lddb, incb );
}
}
/**
@see magmablas_zswapdblk_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zswapdblk_batched(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex **dA_array, magma_int_t ldda, magma_int_t inca,
magmaDoubleComplex **dB_array, magma_int_t lddb, magma_int_t incb,
magma_int_t batchCount)
{
magmablas_zswapdblk_batched_q( n, nb, dA_array, ldda, inca, dB_array, lddb, incb, batchCount, magma_stream );
}
| f8220d8be1ead643627cc307d6f993b4904934ba.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> s d c
*/
#include "common_magma.h"
/*********************************************************/
/*
* Swap diagonal blocks of two matrices.
* Each thread block swaps one diagonal block.
* Each thread iterates across one row of the block.
*/
__global__ void
zswapdblk_batched_kernel( int nb, int n_mod_nb,
magmaDoubleComplex **dA_array, int ldda, int inca,
magmaDoubleComplex **dB_array, int lddb, int incb )
{
const int tx = threadIdx.x;
const int bx = blockIdx.x;
const int batchid = blockIdx.z;
magmaDoubleComplex *dA = dA_array[batchid];
magmaDoubleComplex *dB = dB_array[batchid];
dA += tx + bx * nb * (ldda + inca);
dB += tx + bx * nb * (lddb + incb);
magmaDoubleComplex tmp;
if (bx < gridDim.x-1)
{
#pragma unroll
for( int i = 0; i < nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
else
{
for( int i = 0; i < n_mod_nb; i++ ) {
tmp = dA[i*ldda];
dA[i*ldda] = dB[i*lddb];
dB[i*lddb] = tmp;
}
}
}
/**
Purpose
-------
zswapdblk swaps diagonal blocks of size nb x nb between matrices
dA and dB on the GPU. It swaps nblocks = ceil(n/nb) blocks.
For i = 1 .. nblocks, submatrices
dA( i*nb*inca, i*nb ) and
dB( i*nb*incb, i*nb ) are swapped.
Arguments
---------
@param[in]
n INTEGER
The number of columns of the matrices dA and dB. N >= 0.
@param[in]
nb INTEGER
The size of diagonal blocks.
NB > 0 and NB <= maximum threads per CUDA block (512 or 1024).
@param[in,out]
dA_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dA, dimension (ldda,n)
The matrix dA.
@param[in]
ldda INTEGER
The leading dimension of each array dA.
ldda >= (nblocks - 1)*nb*inca + nb.
@param[in]
inca INTEGER
The row increment between diagonal blocks of dA. inca >= 0. For example,
inca = 1 means blocks are stored on the diagonal at dA(i*nb, i*nb),
inca = 0 means blocks are stored side-by-side at dA(0, i*nb).
@param[in,out]
dB_array Array of pointers, dimension (batchCount).
Each is a COMPLEX_16 array dB, dimension (lddb,n)
The matrix dB.
@param[in]
lddb INTEGER
The leading dimension of each array dB.
lddb >= (nblocks - 1)*nb*incb + nb.
@param[in]
incb INTEGER
The row increment between diagonal blocks of dB. incb >= 0. See inca.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zswapdblk_batched_q(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex **dA_array, magma_int_t ldda, magma_int_t inca,
magmaDoubleComplex **dB_array, magma_int_t lddb, magma_int_t incb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t nblocks = magma_ceildiv( n, nb );
magma_int_t n_mod_nb = n % nb;
magma_int_t info = 0;
if (n < 0) {
info = -1;
} else if (nb < 1 || nb > 1024) {
info = -2;
} else if (ldda < (nblocks-1)*nb*inca + nb) {
info = -4;
} else if (inca < 0) {
info = -5;
} else if (lddb < (nblocks-1)*nb*incb + nb) {
info = -7;
} else if (incb < 0) {
info = -8;
}
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (n_mod_nb == 0) nblocks += 1; // a dummy thread block for cleanup code
dim3 dimGrid(nblocks, 1, batchCount);
dim3 dimBlock(nb);
if ( nblocks > 0 ) {
zswapdblk_batched_kernel<<< dimGrid, dimBlock, 0, queue >>>
( nb, n_mod_nb, dA_array, ldda, inca,
dB_array, lddb, incb );
}
}
/**
@see magmablas_zswapdblk_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zswapdblk_batched(
magma_int_t n, magma_int_t nb,
magmaDoubleComplex **dA_array, magma_int_t ldda, magma_int_t inca,
magmaDoubleComplex **dB_array, magma_int_t lddb, magma_int_t incb,
magma_int_t batchCount)
{
magmablas_zswapdblk_batched_q( n, nb, dA_array, ldda, inca, dB_array, lddb, incb, batchCount, magma_stream );
}
|
5b77b90f2ca1260cf8ec38fd79bbc0dac96aff1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2019-2022 XGBoost contributors
*/
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include "../common/categorical.h"
#include "../common/hist_util.cuh"
#include "../common/random.h"
#include "./ellpack_page.cuh"
#include "device_adapter_hip.cuh"
#include "gradient_index.h"
#include "xgboost/data.h"
namespace xgboost {
EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {}
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param)
: impl_{new EllpackPageImpl(dmat, param)} {}
EllpackPage::~EllpackPage() = default;
EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistogramCuts::cut_values_
const uint32_t* __restrict__ cut_ptrs, // HistogramCuts::cut_ptrs_
common::Span<FeatureType const> feature_types,
size_t base_row, // batch_row_begin
size_t n_rows,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float* feature_cuts = &cuts[cut_ptrs[feature]];
int ncuts = cut_ptrs[feature + 1] - cut_ptrs[feature];
bool is_cat = common::IsCat(feature_types, ifeature);
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
if (is_cat) {
auto it = dh::MakeTransformIterator<int>(
feature_cuts, [](float v) { return common::AsCat(v); });
bin = thrust::lower_bound(thrust::seq, it, it + ncuts, common::AsCat(fvalue)) - it;
} else {
bin = thrust::upper_bound(thrust::seq, feature_cuts, feature_cuts + ncuts,
fvalue) -
feature_cuts;
}
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_ptrs[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
// Construct an ELLPACK matrix with the given number of empty rows.
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
bool is_dense, size_t row_stride,
size_t n_rows)
: is_dense(is_dense),
cuts_(std::move(cuts)),
row_stride(row_stride),
n_rows(n_rows) {
monitor_.Init("ellpack_page");
dh::safe_cuda(hipSetDevice(device));
monitor_.Start("InitCompressedData");
InitCompressedData(device);
monitor_.Stop("InitCompressedData");
}
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
const SparsePage &page, bool is_dense,
size_t row_stride,
common::Span<FeatureType const> feature_types)
: cuts_(std::move(cuts)), is_dense(is_dense), n_rows(page.Size()),
row_stride(row_stride) {
this->InitCompressedData(device);
this->CreateHistIndices(device, page, feature_types);
}
// Construct an ELLPACK matrix in memory.
EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param)
: is_dense(dmat->IsDense()) {
monitor_.Init("ellpack_page");
dh::safe_cuda(hipSetDevice(param.gpu_id));
n_rows = dmat->Info().num_row_;
monitor_.Start("Quantiles");
// Create the quantile sketches for the dmatrix and initialize HistogramCuts.
row_stride = GetRowStride(dmat);
cuts_ = common::DeviceSketch(param.gpu_id, dmat, param.max_bin);
monitor_.Stop("Quantiles");
monitor_.Start("InitCompressedData");
this->InitCompressedData(param.gpu_id);
monitor_.Stop("InitCompressedData");
dmat->Info().feature_types.SetDevice(param.gpu_id);
auto ft = dmat->Info().feature_types.ConstDeviceSpan();
monitor_.Start("BinningCompression");
CHECK(dmat->SingleColBlock());
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
CreateHistIndices(param.gpu_id, batch, ft);
}
monitor_.Stop("BinningCompression");
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
common::Span<FeatureType const> feature_types,
const data::IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
feature_types(std::move(feature_types)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
common::Span<FeatureType const> feature_types;
data::IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
uint32_t bin_idx = 0;
if (common::IsCat(feature_types, e.column_idx)) {
bin_idx = accessor.SearchBin<true>(e.value, e.column_idx);
} else {
bin_idx = accessor.SearchBin<false>(e.value, e.column_idx);
}
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
template <typename Tuple>
struct TupleScanOp {
__device__ Tuple operator()(Tuple a, Tuple b) {
// Key equal
if (a.template get<0>() == b.template get<0>()) {
b.template get<1>() += a.template get<1>();
return b;
}
// Not equal
return b;
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataToEllpack(const AdapterBatchT &batch,
common::Span<FeatureType const> feature_types,
EllpackPageImpl *dst, int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ELLPACK matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
data::IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) {
return batch.GetElement(idx).row_idx;
});
auto value_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, feature_types,
is_valid);
dh::TypedDiscard<Tuple> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
// Go one level down into hipcub::DeviceScan API to set OffsetT as 64 bit
// So we don't crash on n > 2^31
size_t temp_storage_bytes = 0;
using DispatchScan =
cub::DispatchScan<decltype(key_value_index_iter), decltype(out),
TupleScanOp<Tuple>, cub::NullType, int64_t>;
DispatchScan::Dispatch(nullptr, temp_storage_bytes, key_value_index_iter, out,
TupleScanOp<Tuple>(), cub::NullType(), batch.Size(),
nullptr, false);
dh::TemporaryArray<char> temp_storage(temp_storage_bytes);
DispatchScan::Dispatch(temp_storage.data().get(), temp_storage_bytes,
key_value_index_iter, out, TupleScanOp<Tuple>(),
cub::NullType(), batch.Size(), nullptr, false);
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(row_stride * dst->n_rows, [=] __device__(size_t idx) {
// For some reason this variable got captured as const
auto writer_non_const = writer;
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
template <typename AdapterBatch>
EllpackPageImpl::EllpackPageImpl(AdapterBatch batch, float missing, int device, bool is_dense,
common::Span<size_t> row_counts_span,
common::Span<FeatureType const> feature_types, size_t row_stride,
size_t n_rows, common::HistogramCuts const& cuts) {
dh::safe_cuda(hipSetDevice(device));
*this = EllpackPageImpl(device, cuts, is_dense, row_stride, n_rows);
CopyDataToEllpack(batch, feature_types, this, device, missing);
WriteNullValues(this, device, row_counts_span);
}
#define ELLPACK_BATCH_SPECIALIZE(__BATCH_T) \
template EllpackPageImpl::EllpackPageImpl( \
__BATCH_T batch, float missing, int device, bool is_dense, \
common::Span<size_t> row_counts_span, common::Span<FeatureType const> feature_types, \
size_t row_stride, size_t n_rows, common::HistogramCuts const& cuts);
ELLPACK_BATCH_SPECIALIZE(data::CudfAdapterBatch)
ELLPACK_BATCH_SPECIALIZE(data::CupyAdapterBatch)
namespace {
void CopyGHistToEllpack(GHistIndexMatrix const& page, common::Span<size_t const> d_row_ptr,
size_t row_stride, common::CompressedByteT* d_compressed_buffer,
size_t null) {
dh::device_vector<uint8_t> data(page.index.begin(), page.index.end());
auto d_data = dh::ToSpan(data);
dh::device_vector<size_t> csc_indptr(page.index.Offset(),
page.index.Offset() + page.index.OffsetSize());
auto d_csc_indptr = dh::ToSpan(csc_indptr);
auto bin_type = page.index.GetBinTypeSize();
common::CompressedBufferWriter writer{page.cut.TotalBins() + 1}; // +1 for null value
dh::LaunchN(row_stride * page.Size(), [=] __device__(size_t idx) mutable {
auto ridx = idx / row_stride;
auto ifeature = idx % row_stride;
auto r_begin = d_row_ptr[ridx];
auto r_end = d_row_ptr[ridx + 1];
size_t r_size = r_end - r_begin;
if (ifeature >= r_size) {
writer.AtomicWriteSymbol(d_compressed_buffer, null, idx);
return;
}
size_t offset = 0;
if (!d_csc_indptr.empty()) {
// is dense, ifeature is the actual feature index.
offset = d_csc_indptr[ifeature];
}
common::cuda::DispatchBinType(bin_type, [&](auto t) {
using T = decltype(t);
auto ptr = reinterpret_cast<T const*>(d_data.data());
auto bin_idx = ptr[r_begin + ifeature] + offset;
writer.AtomicWriteSymbol(d_compressed_buffer, bin_idx, idx);
});
});
}
} // anonymous namespace
EllpackPageImpl::EllpackPageImpl(Context const* ctx, GHistIndexMatrix const& page,
common::Span<FeatureType const> ft)
: is_dense{page.IsDense()}, base_rowid{page.base_rowid}, n_rows{page.Size()}, cuts_{page.cut} {
auto it = common::MakeIndexTransformIter(
[&](size_t i) { return page.row_ptr[i + 1] - page.row_ptr[i]; });
row_stride = *std::max_element(it, it + page.Size());
CHECK_GE(ctx->gpu_id, 0);
monitor_.Start("InitCompressedData");
InitCompressedData(ctx->gpu_id);
monitor_.Stop("InitCompressedData");
// copy gidx
common::CompressedByteT* d_compressed_buffer = gidx_buffer.DevicePointer();
dh::device_vector<size_t> row_ptr(page.row_ptr);
auto d_row_ptr = dh::ToSpan(row_ptr);
auto accessor = this->GetDeviceAccessor(ctx->gpu_id, ft);
auto null = accessor.NullValue();
CopyGHistToEllpack(page, d_row_ptr, row_stride, d_compressed_buffer, null);
}
// A functor that copies the data from one EllpackPage to another.
struct CopyPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
// The number of elements to skip.
size_t offset;
CopyPage(EllpackPageImpl *dst, EllpackPageImpl const *src, size_t offset)
: cbw{dst->NumSymbols()}, dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
offset(offset) {}
__device__ void operator()(size_t element_id) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id],
element_id + offset);
}
};
// Copy the data from the given EllpackPage to the current page.
size_t EllpackPageImpl::Copy(int device, EllpackPageImpl const *page,
size_t offset) {
monitor_.Start("Copy");
size_t num_elements = page->n_rows * page->row_stride;
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_GE(n_rows * row_stride, offset + num_elements);
if (page == this) {
LOG(FATAL) << "Concatenating the same Ellpack.";
return this->n_rows * this->row_stride;
}
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(num_elements, CopyPage(this, page, offset));
monitor_.Stop("Copy");
return num_elements;
}
// A functor that compacts the rows from one EllpackPage into another.
struct CompactPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
/*! \brief An array that maps the rows from the full DMatrix to the compacted
* page.
*
* The total size is the number of rows in the original, uncompacted DMatrix.
* Elements are the row ids in the compacted page. Rows not needed are set to
* SIZE_MAX.
*
* An example compacting 16 rows to 8 rows:
* [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6,
* SIZE_MAX, 7, SIZE_MAX, SIZE_MAX]
*/
common::Span<size_t> row_indexes;
size_t base_rowid;
size_t row_stride;
CompactPage(EllpackPageImpl* dst, EllpackPageImpl const* src,
common::Span<size_t> row_indexes)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
row_indexes(row_indexes),
base_rowid{src->base_rowid},
row_stride{src->row_stride} {}
__device__ void operator()(size_t row_id) {
size_t src_row = base_rowid + row_id;
size_t dst_row = row_indexes[src_row];
if (dst_row == SIZE_MAX) return;
size_t dst_offset = dst_row * row_stride;
size_t src_offset = row_id * row_stride;
for (size_t j = 0; j < row_stride; j++) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j],
dst_offset + j);
}
}
};
// Compacts the data from the given EllpackPage into the current page.
void EllpackPageImpl::Compact(int device, EllpackPageImpl const* page,
common::Span<size_t> row_indexes) {
monitor_.Start("Compact");
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_LE(page->base_rowid + page->n_rows, row_indexes.size());
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(page->n_rows, CompactPage(this, page, row_indexes));
monitor_.Stop("Compact");
}
// Initialize the buffer to stored compressed features.
void EllpackPageImpl::InitCompressedData(int device) {
size_t num_symbols = NumSymbols();
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
gidx_buffer.SetDevice(device);
// Don't call fill unnecessarily
if (gidx_buffer.Size() == 0) {
gidx_buffer.Resize(compressed_size_bytes, 0);
} else {
gidx_buffer.Resize(compressed_size_bytes, 0);
thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0);
}
}
// Compress a CSR page into ELLPACK.
void EllpackPageImpl::CreateHistIndices(int device,
const SparsePage& row_batch,
common::Span<FeatureType const> feature_types) {
if (row_batch.Size() == 0) return;
unsigned int null_gidx_value = NumSymbols() - 1;
const auto& offset_vec = row_batch.offset.ConstHostVector();
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(row_batch.Size()));
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end =
::min((gpu_batch + 1) * gpu_batch_nrows, row_batch.Size());
size_t batch_nrows = batch_row_end - batch_row_begin;
const auto ent_cnt_begin = offset_vec[batch_row_begin];
const auto ent_cnt_end = offset_vec[batch_row_end];
/*! \brief row offset in SparsePage (the input data). */
dh::device_vector<size_t> row_ptrs(batch_nrows + 1);
thrust::copy(offset_vec.data() + batch_row_begin,
offset_vec.data() + batch_row_end + 1, row_ptrs.begin());
// number of entries in this batch.
size_t n_entries = ent_cnt_end - ent_cnt_begin;
dh::device_vector<Entry> entries_d(n_entries);
// copy data entries to device.
if (row_batch.data.DeviceCanRead()) {
auto const& d_data = row_batch.data.ConstDeviceSpan();
dh::safe_cuda(hipMemcpyAsync(
entries_d.data().get(), d_data.data() + ent_cnt_begin,
n_entries * sizeof(Entry), hipMemcpyDefault));
} else {
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector();
dh::safe_cuda(hipMemcpyAsync(
entries_d.data().get(), data_vec.data() + ent_cnt_begin,
n_entries * sizeof(Entry), hipMemcpyDefault));
}
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x),
common::DivRoundUp(row_stride, block3.y), 1);
auto device_accessor = GetDeviceAccessor(device);
dh::LaunchKernel {grid3, block3}(
CompressBinEllpackKernel, common::CompressedBufferWriter(NumSymbols()),
gidx_buffer.DevicePointer(), row_ptrs.data().get(),
entries_d.data().get(), device_accessor.gidx_fvalue_map.data(),
device_accessor.feature_segments.data(), feature_types,
batch_row_begin, batch_nrows, row_stride,
null_gidx_value);
}
}
// Return the number of rows contained in this page.
size_t EllpackPageImpl::Size() const { return n_rows; }
// Return the memory cost for storing the compressed features.
size_t EllpackPageImpl::MemCostBytes(size_t num_rows, size_t row_stride,
const common::HistogramCuts& cuts) {
// Required buffer size for storing data matrix in EtoLLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * num_rows,
cuts.TotalBins() + 1);
return compressed_size_bytes;
}
EllpackDeviceAccessor EllpackPageImpl::GetDeviceAccessor(
int device, common::Span<FeatureType const> feature_types) const {
gidx_buffer.SetDevice(device);
return {device,
cuts_,
is_dense,
row_stride,
base_rowid,
n_rows,
common::CompressedIterator<uint32_t>(gidx_buffer.ConstDevicePointer(),
NumSymbols()),
feature_types};
}
} // namespace xgboost
| 5b77b90f2ca1260cf8ec38fd79bbc0dac96aff1c.cu | /*!
* Copyright 2019-2022 XGBoost contributors
*/
#include <thrust/iterator/discard_iterator.h>
#include <thrust/iterator/transform_output_iterator.h>
#include "../common/categorical.h"
#include "../common/hist_util.cuh"
#include "../common/random.h"
#include "./ellpack_page.cuh"
#include "device_adapter.cuh"
#include "gradient_index.h"
#include "xgboost/data.h"
namespace xgboost {
EllpackPage::EllpackPage() : impl_{new EllpackPageImpl()} {}
EllpackPage::EllpackPage(DMatrix* dmat, const BatchParam& param)
: impl_{new EllpackPageImpl(dmat, param)} {}
EllpackPage::~EllpackPage() = default;
EllpackPage::EllpackPage(EllpackPage&& that) { std::swap(impl_, that.impl_); }
size_t EllpackPage::Size() const { return impl_->Size(); }
void EllpackPage::SetBaseRowId(size_t row_id) { impl_->SetBaseRowId(row_id); }
// Bin each input data entry, store the bin indices in compressed form.
__global__ void CompressBinEllpackKernel(
common::CompressedBufferWriter wr,
common::CompressedByteT* __restrict__ buffer, // gidx_buffer
const size_t* __restrict__ row_ptrs, // row offset of input data
const Entry* __restrict__ entries, // One batch of input data
const float* __restrict__ cuts, // HistogramCuts::cut_values_
const uint32_t* __restrict__ cut_ptrs, // HistogramCuts::cut_ptrs_
common::Span<FeatureType const> feature_types,
size_t base_row, // batch_row_begin
size_t n_rows,
size_t row_stride,
unsigned int null_gidx_value) {
size_t irow = threadIdx.x + blockIdx.x * blockDim.x;
int ifeature = threadIdx.y + blockIdx.y * blockDim.y;
if (irow >= n_rows || ifeature >= row_stride) {
return;
}
int row_length = static_cast<int>(row_ptrs[irow + 1] - row_ptrs[irow]);
unsigned int bin = null_gidx_value;
if (ifeature < row_length) {
Entry entry = entries[row_ptrs[irow] - row_ptrs[0] + ifeature];
int feature = entry.index;
float fvalue = entry.fvalue;
// {feature_cuts, ncuts} forms the array of cuts of `feature'.
const float* feature_cuts = &cuts[cut_ptrs[feature]];
int ncuts = cut_ptrs[feature + 1] - cut_ptrs[feature];
bool is_cat = common::IsCat(feature_types, ifeature);
// Assigning the bin in current entry.
// S.t.: fvalue < feature_cuts[bin]
if (is_cat) {
auto it = dh::MakeTransformIterator<int>(
feature_cuts, [](float v) { return common::AsCat(v); });
bin = thrust::lower_bound(thrust::seq, it, it + ncuts, common::AsCat(fvalue)) - it;
} else {
bin = thrust::upper_bound(thrust::seq, feature_cuts, feature_cuts + ncuts,
fvalue) -
feature_cuts;
}
if (bin >= ncuts) {
bin = ncuts - 1;
}
// Add the number of bins in previous features.
bin += cut_ptrs[feature];
}
// Write to gidx buffer.
wr.AtomicWriteSymbol(buffer, bin, (irow + base_row) * row_stride + ifeature);
}
// Construct an ELLPACK matrix with the given number of empty rows.
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
bool is_dense, size_t row_stride,
size_t n_rows)
: is_dense(is_dense),
cuts_(std::move(cuts)),
row_stride(row_stride),
n_rows(n_rows) {
monitor_.Init("ellpack_page");
dh::safe_cuda(cudaSetDevice(device));
monitor_.Start("InitCompressedData");
InitCompressedData(device);
monitor_.Stop("InitCompressedData");
}
EllpackPageImpl::EllpackPageImpl(int device, common::HistogramCuts cuts,
const SparsePage &page, bool is_dense,
size_t row_stride,
common::Span<FeatureType const> feature_types)
: cuts_(std::move(cuts)), is_dense(is_dense), n_rows(page.Size()),
row_stride(row_stride) {
this->InitCompressedData(device);
this->CreateHistIndices(device, page, feature_types);
}
// Construct an ELLPACK matrix in memory.
EllpackPageImpl::EllpackPageImpl(DMatrix* dmat, const BatchParam& param)
: is_dense(dmat->IsDense()) {
monitor_.Init("ellpack_page");
dh::safe_cuda(cudaSetDevice(param.gpu_id));
n_rows = dmat->Info().num_row_;
monitor_.Start("Quantiles");
// Create the quantile sketches for the dmatrix and initialize HistogramCuts.
row_stride = GetRowStride(dmat);
cuts_ = common::DeviceSketch(param.gpu_id, dmat, param.max_bin);
monitor_.Stop("Quantiles");
monitor_.Start("InitCompressedData");
this->InitCompressedData(param.gpu_id);
monitor_.Stop("InitCompressedData");
dmat->Info().feature_types.SetDevice(param.gpu_id);
auto ft = dmat->Info().feature_types.ConstDeviceSpan();
monitor_.Start("BinningCompression");
CHECK(dmat->SingleColBlock());
for (const auto& batch : dmat->GetBatches<SparsePage>()) {
CreateHistIndices(param.gpu_id, batch, ft);
}
monitor_.Stop("BinningCompression");
}
template <typename AdapterBatchT>
struct WriteCompressedEllpackFunctor {
WriteCompressedEllpackFunctor(common::CompressedByteT* buffer,
const common::CompressedBufferWriter& writer,
AdapterBatchT batch,
EllpackDeviceAccessor accessor,
common::Span<FeatureType const> feature_types,
const data::IsValidFunctor& is_valid)
: d_buffer(buffer),
writer(writer),
batch(std::move(batch)),
accessor(std::move(accessor)),
feature_types(std::move(feature_types)),
is_valid(is_valid) {}
common::CompressedByteT* d_buffer;
common::CompressedBufferWriter writer;
AdapterBatchT batch;
EllpackDeviceAccessor accessor;
common::Span<FeatureType const> feature_types;
data::IsValidFunctor is_valid;
using Tuple = thrust::tuple<size_t, size_t, size_t>;
__device__ size_t operator()(Tuple out) {
auto e = batch.GetElement(out.get<2>());
if (is_valid(e)) {
// -1 because the scan is inclusive
size_t output_position =
accessor.row_stride * e.row_idx + out.get<1>() - 1;
uint32_t bin_idx = 0;
if (common::IsCat(feature_types, e.column_idx)) {
bin_idx = accessor.SearchBin<true>(e.value, e.column_idx);
} else {
bin_idx = accessor.SearchBin<false>(e.value, e.column_idx);
}
writer.AtomicWriteSymbol(d_buffer, bin_idx, output_position);
}
return 0;
}
};
template <typename Tuple>
struct TupleScanOp {
__device__ Tuple operator()(Tuple a, Tuple b) {
// Key equal
if (a.template get<0>() == b.template get<0>()) {
b.template get<1>() += a.template get<1>();
return b;
}
// Not equal
return b;
}
};
// Here the data is already correctly ordered and simply needs to be compacted
// to remove missing data
template <typename AdapterBatchT>
void CopyDataToEllpack(const AdapterBatchT &batch,
common::Span<FeatureType const> feature_types,
EllpackPageImpl *dst, int device_idx, float missing) {
// Some witchcraft happens here
// The goal is to copy valid elements out of the input to an ELLPACK matrix
// with a given row stride, using no extra working memory Standard stream
// compaction needs to be modified to do this, so we manually define a
// segmented stream compaction via operators on an inclusive scan. The output
// of this inclusive scan is fed to a custom function which works out the
// correct output position
auto counting = thrust::make_counting_iterator(0llu);
data::IsValidFunctor is_valid(missing);
auto key_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) {
return batch.GetElement(idx).row_idx;
});
auto value_iter = dh::MakeTransformIterator<size_t>(
counting,
[=] __device__(size_t idx) -> size_t {
return is_valid(batch.GetElement(idx));
});
auto key_value_index_iter = thrust::make_zip_iterator(
thrust::make_tuple(key_iter, value_iter, counting));
// Tuple[0] = The row index of the input, used as a key to define segments
// Tuple[1] = Scanned flags of valid elements for each row
// Tuple[2] = The index in the input data
using Tuple = thrust::tuple<size_t, size_t, size_t>;
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
// We redirect the scan output into this functor to do the actual writing
WriteCompressedEllpackFunctor<AdapterBatchT> functor(
d_compressed_buffer, writer, batch, device_accessor, feature_types,
is_valid);
dh::TypedDiscard<Tuple> discard;
thrust::transform_output_iterator<
WriteCompressedEllpackFunctor<AdapterBatchT>, decltype(discard)>
out(discard, functor);
// Go one level down into cub::DeviceScan API to set OffsetT as 64 bit
// So we don't crash on n > 2^31
size_t temp_storage_bytes = 0;
using DispatchScan =
cub::DispatchScan<decltype(key_value_index_iter), decltype(out),
TupleScanOp<Tuple>, cub::NullType, int64_t>;
DispatchScan::Dispatch(nullptr, temp_storage_bytes, key_value_index_iter, out,
TupleScanOp<Tuple>(), cub::NullType(), batch.Size(),
nullptr, false);
dh::TemporaryArray<char> temp_storage(temp_storage_bytes);
DispatchScan::Dispatch(temp_storage.data().get(), temp_storage_bytes,
key_value_index_iter, out, TupleScanOp<Tuple>(),
cub::NullType(), batch.Size(), nullptr, false);
}
void WriteNullValues(EllpackPageImpl* dst, int device_idx,
common::Span<size_t> row_counts) {
// Write the null values
auto device_accessor = dst->GetDeviceAccessor(device_idx);
common::CompressedBufferWriter writer(device_accessor.NumSymbols());
auto d_compressed_buffer = dst->gidx_buffer.DevicePointer();
auto row_stride = dst->row_stride;
dh::LaunchN(row_stride * dst->n_rows, [=] __device__(size_t idx) {
// For some reason this variable got captured as const
auto writer_non_const = writer;
size_t row_idx = idx / row_stride;
size_t row_offset = idx % row_stride;
if (row_offset >= row_counts[row_idx]) {
writer_non_const.AtomicWriteSymbol(d_compressed_buffer,
device_accessor.NullValue(), idx);
}
});
}
template <typename AdapterBatch>
EllpackPageImpl::EllpackPageImpl(AdapterBatch batch, float missing, int device, bool is_dense,
common::Span<size_t> row_counts_span,
common::Span<FeatureType const> feature_types, size_t row_stride,
size_t n_rows, common::HistogramCuts const& cuts) {
dh::safe_cuda(cudaSetDevice(device));
*this = EllpackPageImpl(device, cuts, is_dense, row_stride, n_rows);
CopyDataToEllpack(batch, feature_types, this, device, missing);
WriteNullValues(this, device, row_counts_span);
}
#define ELLPACK_BATCH_SPECIALIZE(__BATCH_T) \
template EllpackPageImpl::EllpackPageImpl( \
__BATCH_T batch, float missing, int device, bool is_dense, \
common::Span<size_t> row_counts_span, common::Span<FeatureType const> feature_types, \
size_t row_stride, size_t n_rows, common::HistogramCuts const& cuts);
ELLPACK_BATCH_SPECIALIZE(data::CudfAdapterBatch)
ELLPACK_BATCH_SPECIALIZE(data::CupyAdapterBatch)
namespace {
void CopyGHistToEllpack(GHistIndexMatrix const& page, common::Span<size_t const> d_row_ptr,
size_t row_stride, common::CompressedByteT* d_compressed_buffer,
size_t null) {
dh::device_vector<uint8_t> data(page.index.begin(), page.index.end());
auto d_data = dh::ToSpan(data);
dh::device_vector<size_t> csc_indptr(page.index.Offset(),
page.index.Offset() + page.index.OffsetSize());
auto d_csc_indptr = dh::ToSpan(csc_indptr);
auto bin_type = page.index.GetBinTypeSize();
common::CompressedBufferWriter writer{page.cut.TotalBins() + 1}; // +1 for null value
dh::LaunchN(row_stride * page.Size(), [=] __device__(size_t idx) mutable {
auto ridx = idx / row_stride;
auto ifeature = idx % row_stride;
auto r_begin = d_row_ptr[ridx];
auto r_end = d_row_ptr[ridx + 1];
size_t r_size = r_end - r_begin;
if (ifeature >= r_size) {
writer.AtomicWriteSymbol(d_compressed_buffer, null, idx);
return;
}
size_t offset = 0;
if (!d_csc_indptr.empty()) {
// is dense, ifeature is the actual feature index.
offset = d_csc_indptr[ifeature];
}
common::cuda::DispatchBinType(bin_type, [&](auto t) {
using T = decltype(t);
auto ptr = reinterpret_cast<T const*>(d_data.data());
auto bin_idx = ptr[r_begin + ifeature] + offset;
writer.AtomicWriteSymbol(d_compressed_buffer, bin_idx, idx);
});
});
}
} // anonymous namespace
EllpackPageImpl::EllpackPageImpl(Context const* ctx, GHistIndexMatrix const& page,
common::Span<FeatureType const> ft)
: is_dense{page.IsDense()}, base_rowid{page.base_rowid}, n_rows{page.Size()}, cuts_{page.cut} {
auto it = common::MakeIndexTransformIter(
[&](size_t i) { return page.row_ptr[i + 1] - page.row_ptr[i]; });
row_stride = *std::max_element(it, it + page.Size());
CHECK_GE(ctx->gpu_id, 0);
monitor_.Start("InitCompressedData");
InitCompressedData(ctx->gpu_id);
monitor_.Stop("InitCompressedData");
// copy gidx
common::CompressedByteT* d_compressed_buffer = gidx_buffer.DevicePointer();
dh::device_vector<size_t> row_ptr(page.row_ptr);
auto d_row_ptr = dh::ToSpan(row_ptr);
auto accessor = this->GetDeviceAccessor(ctx->gpu_id, ft);
auto null = accessor.NullValue();
CopyGHistToEllpack(page, d_row_ptr, row_stride, d_compressed_buffer, null);
}
// A functor that copies the data from one EllpackPage to another.
struct CopyPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
// The number of elements to skip.
size_t offset;
CopyPage(EllpackPageImpl *dst, EllpackPageImpl const *src, size_t offset)
: cbw{dst->NumSymbols()}, dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
offset(offset) {}
__device__ void operator()(size_t element_id) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[element_id],
element_id + offset);
}
};
// Copy the data from the given EllpackPage to the current page.
size_t EllpackPageImpl::Copy(int device, EllpackPageImpl const *page,
size_t offset) {
monitor_.Start("Copy");
size_t num_elements = page->n_rows * page->row_stride;
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_GE(n_rows * row_stride, offset + num_elements);
if (page == this) {
LOG(FATAL) << "Concatenating the same Ellpack.";
return this->n_rows * this->row_stride;
}
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(num_elements, CopyPage(this, page, offset));
monitor_.Stop("Copy");
return num_elements;
}
// A functor that compacts the rows from one EllpackPage into another.
struct CompactPage {
common::CompressedBufferWriter cbw;
common::CompressedByteT* dst_data_d;
common::CompressedIterator<uint32_t> src_iterator_d;
/*! \brief An array that maps the rows from the full DMatrix to the compacted
* page.
*
* The total size is the number of rows in the original, uncompacted DMatrix.
* Elements are the row ids in the compacted page. Rows not needed are set to
* SIZE_MAX.
*
* An example compacting 16 rows to 8 rows:
* [SIZE_MAX, 0, 1, SIZE_MAX, SIZE_MAX, 2, SIZE_MAX, 3, 4, 5, SIZE_MAX, 6,
* SIZE_MAX, 7, SIZE_MAX, SIZE_MAX]
*/
common::Span<size_t> row_indexes;
size_t base_rowid;
size_t row_stride;
CompactPage(EllpackPageImpl* dst, EllpackPageImpl const* src,
common::Span<size_t> row_indexes)
: cbw{dst->NumSymbols()},
dst_data_d{dst->gidx_buffer.DevicePointer()},
src_iterator_d{src->gidx_buffer.DevicePointer(), src->NumSymbols()},
row_indexes(row_indexes),
base_rowid{src->base_rowid},
row_stride{src->row_stride} {}
__device__ void operator()(size_t row_id) {
size_t src_row = base_rowid + row_id;
size_t dst_row = row_indexes[src_row];
if (dst_row == SIZE_MAX) return;
size_t dst_offset = dst_row * row_stride;
size_t src_offset = row_id * row_stride;
for (size_t j = 0; j < row_stride; j++) {
cbw.AtomicWriteSymbol(dst_data_d, src_iterator_d[src_offset + j],
dst_offset + j);
}
}
};
// Compacts the data from the given EllpackPage into the current page.
void EllpackPageImpl::Compact(int device, EllpackPageImpl const* page,
common::Span<size_t> row_indexes) {
monitor_.Start("Compact");
CHECK_EQ(row_stride, page->row_stride);
CHECK_EQ(NumSymbols(), page->NumSymbols());
CHECK_LE(page->base_rowid + page->n_rows, row_indexes.size());
gidx_buffer.SetDevice(device);
page->gidx_buffer.SetDevice(device);
dh::LaunchN(page->n_rows, CompactPage(this, page, row_indexes));
monitor_.Stop("Compact");
}
// Initialize the buffer to stored compressed features.
void EllpackPageImpl::InitCompressedData(int device) {
size_t num_symbols = NumSymbols();
// Required buffer size for storing data matrix in ELLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * n_rows,
num_symbols);
gidx_buffer.SetDevice(device);
// Don't call fill unnecessarily
if (gidx_buffer.Size() == 0) {
gidx_buffer.Resize(compressed_size_bytes, 0);
} else {
gidx_buffer.Resize(compressed_size_bytes, 0);
thrust::fill(dh::tbegin(gidx_buffer), dh::tend(gidx_buffer), 0);
}
}
// Compress a CSR page into ELLPACK.
void EllpackPageImpl::CreateHistIndices(int device,
const SparsePage& row_batch,
common::Span<FeatureType const> feature_types) {
if (row_batch.Size() == 0) return;
unsigned int null_gidx_value = NumSymbols() - 1;
const auto& offset_vec = row_batch.offset.ConstHostVector();
// bin and compress entries in batches of rows
size_t gpu_batch_nrows =
std::min(dh::TotalMemory(device) / (16 * row_stride * sizeof(Entry)),
static_cast<size_t>(row_batch.Size()));
size_t gpu_nbatches = common::DivRoundUp(row_batch.Size(), gpu_batch_nrows);
for (size_t gpu_batch = 0; gpu_batch < gpu_nbatches; ++gpu_batch) {
size_t batch_row_begin = gpu_batch * gpu_batch_nrows;
size_t batch_row_end =
std::min((gpu_batch + 1) * gpu_batch_nrows, row_batch.Size());
size_t batch_nrows = batch_row_end - batch_row_begin;
const auto ent_cnt_begin = offset_vec[batch_row_begin];
const auto ent_cnt_end = offset_vec[batch_row_end];
/*! \brief row offset in SparsePage (the input data). */
dh::device_vector<size_t> row_ptrs(batch_nrows + 1);
thrust::copy(offset_vec.data() + batch_row_begin,
offset_vec.data() + batch_row_end + 1, row_ptrs.begin());
// number of entries in this batch.
size_t n_entries = ent_cnt_end - ent_cnt_begin;
dh::device_vector<Entry> entries_d(n_entries);
// copy data entries to device.
if (row_batch.data.DeviceCanRead()) {
auto const& d_data = row_batch.data.ConstDeviceSpan();
dh::safe_cuda(cudaMemcpyAsync(
entries_d.data().get(), d_data.data() + ent_cnt_begin,
n_entries * sizeof(Entry), cudaMemcpyDefault));
} else {
const std::vector<Entry>& data_vec = row_batch.data.ConstHostVector();
dh::safe_cuda(cudaMemcpyAsync(
entries_d.data().get(), data_vec.data() + ent_cnt_begin,
n_entries * sizeof(Entry), cudaMemcpyDefault));
}
const dim3 block3(32, 8, 1); // 256 threads
const dim3 grid3(common::DivRoundUp(batch_nrows, block3.x),
common::DivRoundUp(row_stride, block3.y), 1);
auto device_accessor = GetDeviceAccessor(device);
dh::LaunchKernel {grid3, block3}(
CompressBinEllpackKernel, common::CompressedBufferWriter(NumSymbols()),
gidx_buffer.DevicePointer(), row_ptrs.data().get(),
entries_d.data().get(), device_accessor.gidx_fvalue_map.data(),
device_accessor.feature_segments.data(), feature_types,
batch_row_begin, batch_nrows, row_stride,
null_gidx_value);
}
}
// Return the number of rows contained in this page.
size_t EllpackPageImpl::Size() const { return n_rows; }
// Return the memory cost for storing the compressed features.
size_t EllpackPageImpl::MemCostBytes(size_t num_rows, size_t row_stride,
const common::HistogramCuts& cuts) {
// Required buffer size for storing data matrix in EtoLLPack format.
size_t compressed_size_bytes =
common::CompressedBufferWriter::CalculateBufferSize(row_stride * num_rows,
cuts.TotalBins() + 1);
return compressed_size_bytes;
}
EllpackDeviceAccessor EllpackPageImpl::GetDeviceAccessor(
int device, common::Span<FeatureType const> feature_types) const {
gidx_buffer.SetDevice(device);
return {device,
cuts_,
is_dense,
row_stride,
base_rowid,
n_rows,
common::CompressedIterator<uint32_t>(gidx_buffer.ConstDevicePointer(),
NumSymbols()),
feature_types};
}
} // namespace xgboost
|
2761b79251ca8fcbe91041d42817fe029746e906.hip | // !!! This is a file automatically generated by hipify!!!
/*! \file fitmultimain.cu
* \author Fang Huang
* \date October 10, 2010
* \brief This file contains the mexFunction() call and relevant matlab
* interface code.
*/
/*
*float Floating point number. 4bytes +/- 3.4e +/- 38 (~7 digits)
*/
#include <windows.h>
#pragma comment(lib, "kernel32.lib")
#include <math.h>
#include "mex.h"
#include <stdlib.h>
#include <stdio.h>
#include "image_operation.h"
#include "filter.h"
#include "definitions.h"
void maxfilter(const float * data , float * result, const int fsz , const int xsz, const int ysz, const int tsz);
void cudasafe( hipError_t err, char* str, int lineNumber);
void CUDAERROR(const char *instr,int lineNumber);
void CUDAERROR(const char *instr,int lineNumber) {
hipError_t errornum;
const char *str;
if (errornum=hipGetLastError()) {
//reset all cuda devices
int deviceCount = 0;
int ii = 0;
cudasafe(hipGetDeviceCount(&deviceCount),"hipGetDeviceCount",__LINE__ ); //query number of GPUs
for (ii = 0; ii< deviceCount;ii++) {
hipSetDevice(ii);
hipDeviceReset();
}
str=hipGetErrorString(errornum);
//mexPrintf("gpuGaussNDmle(line %i): %s in %s\n",lineNumber, str, instr);
hipDeviceReset();
mexErrMsgIdAndTxt("gpuGaussNDmle:cudaFail","gpuGaussNDmle(line %i): %s in %s\n",lineNumber, str, instr);
exit(1); // might not stop matlab
}
}
void cudasafe( hipError_t err, char* str, int lineNumber)
{
if (err != hipSuccess)
{
//reset all cuda devices
int deviceCount = 0;
int ii = 0;
cudasafe(hipGetDeviceCount(&deviceCount),"hipGetDeviceCount",__LINE__ ); //query number of GPUs
for (ii = 0; ii< deviceCount;ii++) {
hipSetDevice(ii);
hipDeviceReset();
}
mexErrMsgIdAndTxt("gpuGaussNDmle:cudaFail","%s failed with error code %i at line %d\n",str,err, lineNumber);
exit(1); // might not stop matlab
}
}
bool mxIsScalar(const mxArray *array_ptr)
{
/*!
* \brief Check that the passed in mxArray is infact a single number.
* \param array_ptr pointer to the array to check.
* \return bool
*/
const mwSize *size;
const size_t dims = mxGetNumberOfDimensions(array_ptr);
size = mxGetDimensions(array_ptr);
if (dims != 2) return false;
if (size[0] != 1) return false;
if (size[1] != 1) return false;
return true;
}
void printTiming(char *var, LARGE_INTEGER start) {
/*!
* \brief This returns the runtime information to Matlab using the high precision timer.
* \param var Name of the variable to assign the timing info to.
* \param start The starting time for this event.
*/
LARGE_INTEGER freq, stop;
char timing[100];
QueryPerformanceFrequency( &freq );
QueryPerformanceCounter(&stop);
sprintf_s(timing,(size_t) 100,"%s=%f", var, (double)(stop.QuadPart-start.QuadPart)/freq.QuadPart);
mexEvalString(timing);
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
/*!
* \brief Entry point in the code for Matlab. Equivalent to main().
* \param nlhs number of left hand mxArrays to return
* \param plhs array of pointers to the output mxArrays
* \param nrhs number of input mxArrays
* \param prhs array of pointers to the input mxArrays.
*/
int ii=0,jj=0,kk=0,tt=0,candicc=0,candi=0; //!< various counters unsed throughout the code
const mwSize *pSizeA=0;
mwSize outsize[2],outsize2[2];
const float *indata=0;
int boxsz=0;
float psfsigma=0;
int fitnum=0;
float Nave=0;
float *unif1=0,*unif2=0,*unif=0; //!< uniformed filtered copies of the subregion
float *maxf=0; //!< maximum filtered copy of the subregion
float *candx=0,*candy=0,*candz=0,*left=0,*top=0,*in_sub=0;
//float *out;
float threshold;
size_t Adims,data_bytes;
float *xx,*yy,*nn,*x,*y,*n,*bb,*div,testx,testy,testn;
float *CRLBarray,*covariance;
float *SuperResim;
float resolution;
float pixelsz;
float zoom;
float Nsigmafit=0;
//fun with timing
LARGE_INTEGER t1, t2, t3, t4;
printf("This code developed development by Lidke Lab at UNM. \nThe author(Fang Huang, Keith Lidke, and Lidke Lab's other group member)\nreserve all rights of using this code.\nReferece: Simultaneous multiple-emitter fitting for single molecule super-resolution imaging, \nFang Huang, Samantha L. Schwartz, Jason M. Byars, and Keith A. Lidke,\nBiomedical Optics Express, Vol. 2, Issue 5, pp. 1377-1393 (2011)\n" );
printf("Start Fitting...\n" );
//mexEvalString("pause(0.1)");
if (nrhs != 9) {// 0 1 2 3 4 5 6 7 8
mexErrMsgIdAndTxt("MATLAB:WrongNumberOfInputs", "This function needs 10 inputs: 3D-image(x,y,frame,matlab matrix,single),PSFsigma(in pixels),initial estimated intensity,fit_type(1-5),pvaluethreshold (0.05),resolution(nm),pixelsz(nm),zm,boxsz");
}
Adims=mxGetNumberOfDimensions(prhs[0]);
if (mxGetClassID(prhs[0])!=mxSINGLE_CLASS)
mexErrMsgIdAndTxt("Multifit:WrongPrecision","3D-Data must be comprised of single floats! Please use single(data) to convert into correct input data type\n");
if (Adims != 3)
mexErrMsgIdAndTxt("Multifit:WrongDimensions", "Only 3D data set can be fit by multi fluorophore fitting. X by Y by Frame");
pSizeA = mxGetDimensions(prhs[0]);
const int data_xsz=(int) pSizeA[0];
const int data_ysz=(int) pSizeA[1];
const int data_frame=(int) pSizeA[2];
if (data_xsz <16 || data_ysz <16)
mexErrMsgTxt("3D-Data must be at least 16 by 16 by frame for this fitting.");
if (!mxIsNumeric(prhs[1]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "psfsigma must be a numeric value\n");
if (!mxIsNumeric(prhs[2]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "estimated must be a numeric value\n");
if (!mxIsNumeric(prhs[3]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "fit_type must be a numeric value\n");
if (!mxIsNumeric(prhs[4]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "threshold must be a numeric value\n");
if (!mxIsNumeric(prhs[5]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "resolution must be a numeric value\n");
if (!mxIsNumeric(prhs[6]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "pixel must be a numeric value\n");
if (!mxIsNumeric(prhs[7]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "zoom must be a numeric value\n");
if (!mxIsScalar(prhs[1]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","psfsigma must be scalar\n");
if (!mxIsScalar(prhs[2]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","estimated intensity must be scalar\n");
if (!mxIsScalar(prhs[3]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","fit_type must be scalar\n");
if (!mxIsScalar(prhs[4]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","threshold must be scalar\n");
if (!mxIsScalar(prhs[5]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","resolution must be scalar\n");
if (!mxIsScalar(prhs[6]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","pixel size must be scalar\n");
if (!mxIsScalar(prhs[7]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","zoom must be scalar\n");
indata=(float *)mxGetData(prhs[0]);
psfsigma=(float)mxGetScalar(prhs[1]);
Nave=(float)mxGetScalar(prhs[2]);
fitnum=(int)mxGetScalar(prhs[3]);
threshold=(float)mxGetScalar(prhs[4]);
resolution=(float)mxGetScalar(prhs[5]);
pixelsz=(float)mxGetScalar(prhs[6]);
zoom=(float)mxGetScalar(prhs[7]);
boxsz=(int)mxGetScalar(prhs[8]);
//Nsigmafit=(float)mxGetScalar(prhs[9]);
Nsigmafit=0.00001; // fixed intensity for position initial estimates, this restriction is lifted on 2nd round iteration of itensity,position fitting.
if (psfsigma<=0.2f) {
mexErrMsgIdAndTxt("Multifit:InputOutofRange","PSFsigma must be greater than 0.2.\nThe camera pixel size is too large." );
}
if (psfsigma>4.0f) {
mexErrMsgIdAndTxt("Multifit:InputOutofRange","PSFsigma should be less than 4.0.\nPlease check your sigma value." );
}
if ((fitnum<1)||(fitnum>8))
mexErrMsgTxt("Fitting model number out of range. Fitnum has to be within the range of 1-8.");
//if (threshold<=0 || threshold > 1000)
// mexErrMsgIdAndTxt("Multifit:InputOutofRange","NLLR threshold is out of range. Please use any number between 0.0001-1000");
if (resolution<=0) {
resolution=20;
mexErrMsgIdAndTxt("Multifit:InputOutofRange","resolution must be greater than 0. Default value is 20(nm).\n" );
}
if (pixelsz<=0) {
pixelsz=106;
mexErrMsgIdAndTxt("Multifit:InputOutofRange","Pixel Size must be greater than 0. Default value is 106(nm).\n" );
}
//What is smallest possible value?
if (zoom<=0) {
zoom=20;
mexErrMsgIdAndTxt("Multifit:InputOutofRange","Zoom must be greater than 0. Default value is 30.\n" );
}
if (zoom - floor(zoom) > 0.000001f) {
zoom = ceil(zoom);
mexWarnMsgIdAndTxt("Multifit:InputOutofRange","Zoom should be an integer. Rounding up to next whole number value.\n");
}
data_bytes=data_xsz*data_ysz*data_frame*sizeof(float);
indata=(float *)mxGetData(prhs[0]);
data_bytes=((float) pSizeA[0]*pSizeA[1]*pSizeA[2])*sizeof(float);
//uniform and max filter is used to find candidates in order to cut box from.
//uniform max filter to find subregions
const int unifsz=(int) round(psfsigma*2+1);
const int maxfsz=(int) (boxsz-1);
unif1=(float*) malloc (data_bytes); //need to be freed.
memset(unif1,0,data_bytes);
unif2=(float*) malloc (data_bytes); //need to be freed.
memset(unif2,0,data_bytes);
unif=(float*) malloc (data_bytes); //need to be freed.
memset(unif,0,data_bytes);
maxf=(float*) malloc (data_bytes); //need to be freed.
memset(maxf,0,data_bytes);
QueryPerformanceCounter(&t1);
unifilter(indata , unif1 , unifsz,data_xsz,data_ysz,data_frame);
unifilter(indata , unif2 , 2*unifsz,data_xsz,data_ysz,data_frame);
//int off = 0;
/*for(tt=0;tt<data_frame;tt++) {
for(jj=0;jj<data_ysz;jj++) {
for(ii=0;ii<data_xsz;ii++) {
off = tt*data_xsz*data_ysz+data_xsz*jj+ii;
unif[tt*data_xsz*data_ysz+data_xsz*jj+ii]=unif1[off]-unif2[off];
}
}
}*/
for (ii=0; ii<data_frame*data_ysz*data_xsz; ii++)
unif[ii]=unif1[ii]-unif2[ii];
free(unif1);
free(unif2);
maxfilter(unif , maxf , maxfsz,data_xsz,data_ysz,data_frame);
float minI=Nave/2/pi/2/psfsigma/psfsigma/3;
//minI=25;
candicc=0;
//find number of candidates for malloc
/*for(tt=0;tt<data_frame;tt++)
for(ii=0;ii<data_xsz;ii++)
for(jj=0;jj<data_ysz;jj++)
{
kk=tt*data_xsz*data_ysz+data_xsz*jj+ii;
if (((0.999*maxf[kk])<=unif[kk]) && (unif[kk]>=minI))
candicc++;
}*/
for (ii=0; ii<data_frame*data_ysz*data_xsz; ii++)
if (((0.999*maxf[ii])<=unif[ii]) && (unif[ii]>=minI))
candicc++;
//alloc memory
candx=(float*) malloc (candicc*sizeof(float));//cand x, need to be freed.
memset(candx,0,candicc*sizeof(float));
candy=(float*) malloc (candicc*sizeof(float));//cand y, need to be freed.
memset(candy,0,candicc*sizeof(float));
candz=(float*) malloc (candicc*sizeof(float));//cand z, need to be freed.
memset(candz,0,candicc*sizeof(float));
candi=0;
//store candidates
for(tt=0;tt<data_frame;tt++)
for(jj=0;jj<data_ysz;jj++)
for(ii=0;ii<data_xsz;ii++)
{
kk=tt*data_xsz*data_ysz+data_xsz*jj+ii;
if (((0.999f*maxf[kk])<=unif[kk]) && (unif[kk]>=minI))
{
candx[candi]=(float) ii;//try
candy[candi]=(float) jj;
candz[candi]=(float) tt;
candi++;
}
}
free(unif);
free(maxf);
// Cut subregions around found candidates
//const int boxsz=5*psfsigma+1;
left=(float*) malloc (candicc*sizeof(float));
memset(left,0,candicc*sizeof(float));// left for cut sub regions, need to be freed
top=(float*) malloc (candicc*sizeof(float)); // top for cut sub regions, need to be freed
memset(top,0,candicc*sizeof(float));
in_sub=(float*) malloc (boxsz*boxsz*candicc*sizeof(float));
memset(in_sub,0,boxsz*boxsz*candicc*sizeof(float));
//test
/* outsize2[0]=boxsz;
outsize2[1]=boxsz;
outsize2[2]=candicc;
plhs[1]= mxCreateNumericArray(Adims ,outsize2, mxSINGLE_CLASS, mxREAL);
out=(float *)mxGetData(plhs[1]); */
MakeSubregions(candicc, indata, data_xsz, data_ysz, data_frame, candx,candy, candz, (float) boxsz, in_sub,left,top);
free(candx);
free(candy);
printTiming("t1",t1);
//Fitting started
x=(float*) malloc (fitnum*candicc*sizeof(float));//x, need to be freed.
memset(x,0,fitnum*candicc*sizeof(float));
y=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(y,0,fitnum*candicc*sizeof(float));
n=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(n,0,fitnum*candicc*sizeof(float));
bb=(float*) malloc (candicc*sizeof(float));//bg, need to be freed.
memset(bb,0,candicc*sizeof(float));
div=(float*) malloc (candicc*sizeof(float));//bg, need to be freed.
memset(div,0,candicc*sizeof(float));
//const float Nsigmafit=0.0001;
const int iterations=50;
/*float *out3;
mwSize outsize3[2];
outsize3[0]=candicc;
outsize3[1]=1;
//outsize3[2]=candicc;
plhs[3]= mxCreateNumericArray(2 ,outsize3, mxSINGLE_CLASS, mxREAL);
out3=(float *)mxGetData(plhs[3]);
*/
QueryPerformanceCounter(&t2);
GPUmultifit(candicc,in_sub,boxsz,psfsigma,iterations,fitnum,Nave,Nsigmafit,threshold,x,y,n,bb,div);
printTiming("t2",t2);
QueryPerformanceCounter(&t3);
mexEvalString("s3=tic");
float *bbb,testb;
// Boundary limitation
xx=(float*) malloc (fitnum*candicc*sizeof(float));//x, need to be freed.
memset(xx,0,fitnum*candicc*sizeof(float));
yy=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(yy,0,fitnum*candicc*sizeof(float));
nn=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(nn,0,fitnum*candicc*sizeof(float));
bbb=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(bbb,0,fitnum*candicc*sizeof(float));
/*float *out3;
mwSize outsize3[2];
outsize3[0]=fitnum;
outsize3[1]=candicc;
//outsize3[2]=candicc;
plhs[3]= mxCreateNumericArray(2 ,outsize3, mxSINGLE_CLASS, mxREAL);
out3=(float *)mxGetData(plhs[3]);
*/
int *fittot;
fittot=(int*) malloc(candicc*sizeof(int));
memset(fittot,0,candicc*sizeof(int));
for(ii=0;ii<candicc;ii++)
{
kk=0;
for(jj=0;jj<fitnum;jj++)
{
if ( (abs(x[fitnum*ii+jj])>0.001) && (abs(x[fitnum*ii+jj])<100) )
fittot[ii]++;
//out3[ii*fitnum+jj]=x[ii*fitnum+jj];//testx
testx=x[ii*fitnum+jj];
testy=y[ii*fitnum+jj];
testn=n[ii*fitnum+jj];
testb=bb[ii];
if (true)
{
xx[ii*fitnum+kk]=testx;//testx
yy[ii*fitnum+kk]=testy;
nn[ii*fitnum+kk]=testn;
bbb[ii*fitnum+kk]=testb;
kk++;
}
}
}
free(x);
free(y);
free(n);
free(bb);
//Calculate CRLB
CRLBarray=(float*) malloc ((fitnum*2+1)*candicc*sizeof(float));//x, need to be freed.
memset(CRLBarray,0,(fitnum*2+1)*candicc*sizeof(float));
covariance=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(covariance,0,fitnum*candicc*sizeof(float));
//test
GPUCRLB(candicc, boxsz, psfsigma,fitnum, xx, yy, nn, bbb, Nave, Nsigmafit,CRLBarray,covariance);
int *fittmp;
fittmp=(int*) malloc(candicc*sizeof(int));
memset(fittmp,0,candicc*sizeof(int));
float LAlim=resolution/pixelsz/1.7f;
// filtering and assembling according to CRLB
int recontot=0;
for(ii=0;ii<candicc;ii++){
for(jj=0;jj<fitnum;jj++){
if ( (abs(xx[fitnum*ii+jj])>0.001) && (abs(xx[fitnum*ii+jj])<100) ) {
fittmp[ii]++;
}
if ((xx[fitnum*ii+jj]>0.01f)&&(xx[fitnum*ii+jj]<(boxsz-1.01f))&&
(yy[fitnum*ii+jj]>0.01f)&&(yy[fitnum*ii+jj]<(boxsz-1.01f))&&
(nn[fitnum*ii+jj]>0)&&(CRLBarray[(fitnum*2+1)*ii+jj]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj]>0.0001f)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]>0.0001f))
recontot++;
}
}
float *xbf=(float*) malloc(recontot*sizeof(float));
memset(xbf,0,recontot*sizeof(float));
float *bbf=(float*) malloc(recontot*sizeof(float));
memset(bbf,0,recontot*sizeof(float));
float *ybf=(float*) malloc(recontot*sizeof(float));
memset(ybf,0,recontot*sizeof(float));
float *nbf=(float*) malloc(recontot*sizeof(float));
memset(nbf,0,recontot*sizeof(float));
float *tbf=(float*) malloc(recontot*sizeof(float));
memset(tbf,0,recontot*sizeof(float));
float *uncerxbf=(float*) malloc(recontot*sizeof(float));
memset(uncerxbf,0,recontot*sizeof(float));
float *uncerybf=(float*) malloc(recontot*sizeof(float));
memset(uncerybf,0,recontot*sizeof(float));
float *uncerbgbf=(float*) malloc(recontot*sizeof(float));
memset(uncerbgbf,0,recontot*sizeof(float));
float *covbf=(float*) malloc(recontot*sizeof(float));
memset(covbf,0,recontot*sizeof(float));
float *NLLRbf=(float*) malloc(recontot*sizeof(float));
memset(NLLRbf,0,recontot*sizeof(float));
float *fitNbf=(float*) malloc(recontot*sizeof(float));
memset(fitNbf,0,recontot*sizeof(float));
float *topbf=(float*) malloc(recontot*sizeof(float));
memset(topbf,0,recontot*sizeof(float));
float *leftbf=(float*) malloc(recontot*sizeof(float));
memset(leftbf,0,recontot*sizeof(float));
/* outsize2[0]=2*fitnum+1;
outsize2[1]=candicc;
plhs[1]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
out=(float *)mxGetData(plhs[1]); */
//adding x,y,t,crlbx,crlby,crlb-bg,NLLR.
//test
//float *cov;
//plhs[4]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
//cov=(float *)mxGetData(plhs[4]);
//test-end
// before filtering coords collections.
int reconii=0;
for(ii=0;ii<candicc;ii++){
for(jj=0;jj<fitnum;jj++){
if ((xx[fitnum*ii+jj]>0.01f)&&(xx[fitnum*ii+jj]<(boxsz-1.01f))&&
(yy[fitnum*ii+jj]>0.01f)&&(yy[fitnum*ii+jj]<(boxsz-1.01f))&&
(nn[fitnum*ii+jj]>0)&&(CRLBarray[(fitnum*2+1)*ii+jj]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj]>0.0001f)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]>0.0001f)
&&(covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1]<1)
&&(covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1]>-1))
{
//reconX[reconii]=zoom*(xx[fitnum*ii+jj]+left[ii]);
//reconY[reconii]=zoom*(yy[fitnum*ii+jj]+top[ii]);
//output section
xbf[reconii]=xx[fitnum*ii+jj]+left[ii];
topbf[reconii]=top[ii];
leftbf[reconii]=left[ii];
ybf[reconii]=yy[fitnum*ii+jj]+top[ii];
bbf[reconii]=bbb[fitnum*ii+jj];
tbf[reconii]=candz[ii];
uncerxbf[reconii]=CRLBarray[(fitnum*2+1)*ii+jj];
uncerybf[reconii]=CRLBarray[(fitnum*2+1)*ii+jj+1];
nbf[reconii]=nn[fitnum*ii+jj];
uncerbgbf[reconii]=CRLBarray[(fitnum*2+1)*ii+fittmp[ii]*2];
covbf[reconii]=covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1];
NLLRbf[reconii]=div[ii];
fitNbf[reconii]=(float) fittot[ii];
//reconN[reconii]=nn[fitnum*ii+jj];
//reconLAx[reconii]=zoom*CRLBarray[(fitnum*2+1)*ii+jj];
////crlbx[reconii]=CRLBarray[(fitnum*2+1)*ii+jj];
//reconLAy[reconii]=zoom*CRLBarray[(fitnum*2+1)*ii+jj+1];
//reconcov[reconii]=covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1];
////cov[reconii]=covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1];
reconii++;
}
}
}
//repeat localization filtering.
int tstart=0;
int framereg=0;
int modindex=0;
float dis=-10;
for(ii=0;ii<reconii;ii++)
{
framereg=(int) tbf[ii];
if(xbf[ii]!=0)
{
for(jj=tstart;jj<reconii;jj++)
{
if(xbf[ii]==0) break;
if(tbf[jj]!=framereg)
{
if(ii==jj)
tstart=jj+1;
break;
}
if((xbf[jj]!=0)&&(ii!=jj)
&&(!((topbf[ii]==topbf[jj])&&(leftbf[ii]==leftbf[jj]))))
{
modindex=((uncerxbf[ii]+uncerybf[ii])<(uncerxbf[jj]+uncerybf[jj]))?jj:ii;
dis=sqrt(pow(xbf[ii]-xbf[jj],2)+pow(ybf[ii]-ybf[jj],2));
if (dis<=1*sqrt(pow(uncerxbf[modindex],2)+pow(uncerybf[modindex],2)))
{
xbf[modindex]=0;
ybf[modindex]=0;
}
}
}
}
}
// counting procesure.
int indextot=0;
for(ii=0;ii<reconii;ii++){
if(xbf[ii]!=0)
{
indextot=indextot+1;
}
}
// allocation of reconstruction candidates
float *reconX=(float*) malloc(indextot*sizeof(float));
memset(reconX,0,indextot*sizeof(float));
float *reconY=(float*) malloc(indextot*sizeof(float));
memset(reconY,0,indextot*sizeof(float));
float *reconN=(float*) malloc(indextot*sizeof(float));
memset(reconN,0,indextot*sizeof(float));
float *reconLAx=(float*) malloc(indextot*sizeof(float));
memset(reconLAx,0,indextot*sizeof(float));
float *reconLAy=(float*) malloc(indextot*sizeof(float));
memset(reconLAy,0,indextot*sizeof(float));
float *reconcov=(float*) malloc(indextot*sizeof(float));
memset(reconcov,0,indextot*sizeof(float));
// allocation of output candidates
float * xout,* yout,* tout,* uncerxout,* unceryout;
float *uncerbgout;
float * covout,* NLLRout,*fitNout,*bout, *nout;
outsize2[0]=indextot;
outsize2[1]=1;
plhs[1]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
xout=(float *)mxGetData(plhs[1]);
plhs[2]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
yout=(float *)mxGetData(plhs[2]);
plhs[3]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
tout=(float *)mxGetData(plhs[3]);
plhs[4]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
uncerxout=(float *)mxGetData(plhs[4]);
plhs[5]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
unceryout=(float *)mxGetData(plhs[5]);
plhs[6]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
uncerbgout=(float *)mxGetData(plhs[6]);
plhs[7]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
covout=(float *)mxGetData(plhs[7]);
plhs[8]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
NLLRout=(float *)mxGetData(plhs[8]);
plhs[9]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
fitNout=(float*)mxGetData(plhs[9]);
plhs[10]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
bout=(float *)mxGetData(plhs[10]);
plhs[11]=mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
nout=(float *)mxGetData(plhs[11]);
//record coordinates for output and reconstruction
int recdx=0;
for(ii=0;ii<reconii;ii++)
{
if(xbf[ii]!=0)
{ //record
//reconstruction candidates
reconX[recdx]=xbf[ii]*zoom;
reconY[recdx]=ybf[ii]*zoom;
reconN[recdx]=nbf[ii];
reconLAx[recdx]=uncerxbf[ii]*zoom;
reconLAy[recdx]=uncerybf[ii]*zoom;
reconcov[recdx]=covbf[ii];
//out put candidates
xout[recdx]=xbf[ii];
yout[recdx]=ybf[ii];
tout[recdx]=tbf[ii];
uncerxout[recdx]=uncerxbf[ii];
unceryout[recdx]=uncerybf[ii];
uncerbgout[recdx]=uncerbgbf[ii];
covout[recdx]=covbf[ii];
NLLRout[recdx]=NLLRbf[ii];
fitNout[recdx]=fitNbf[ii];
bout[recdx]=bbf[ii];
nout[recdx]=nbf[ii];
recdx++;
}
}
printTiming("t3",t3);
QueryPerformanceCounter(&t4);
//reconstruction!
outsize[0]=(int) floor(data_xsz*zoom);
outsize[1]=(int) floor(data_ysz*zoom);
plhs[0]= mxCreateNumericArray(2, outsize, mxSINGLE_CLASS, mxREAL);
SuperResim=(float *)mxGetData(plhs[0]);
int xsz=(int) outsize[0];
int ysz=(int) outsize[1];
/*outsize2[0]=20;
outsize2[1]=20;
outsize2[2]=reconii;
plhs[1]= mxCreateNumericArray(Adims ,outsize2, mxSINGLE_CLASS, mxREAL);
out=(float *)mxGetData(plhs[1]);*/
GPUgenerateblobs(indextot, xsz, ysz,reconX,reconY, reconN, reconLAx,reconLAy,reconcov,SuperResim);
printTiming("t4", t4);
free(CRLBarray);
free(covariance);
free(reconX);
free(reconY);
free(reconN);
free(reconLAx);
free(reconLAy);
free(reconcov);
free(left);
free(top);
free(in_sub);
free(bbb);
free(div);
free(xx);
free(yy);
free(nn);
free(fittot);
free(fittmp);
free(xbf);
free(ybf);
free(tbf);
free(nbf);
free(bbf);
free(uncerxbf);
free(uncerybf);
free(uncerbgbf);
free(covbf);
free(NLLRbf);
free(fitNbf);
} | 2761b79251ca8fcbe91041d42817fe029746e906.cu | /*! \file fitmultimain.cu
* \author Fang Huang
* \date October 10, 2010
* \brief This file contains the mexFunction() call and relevant matlab
* interface code.
*/
/*
*float Floating point number. 4bytes +/- 3.4e +/- 38 (~7 digits)
*/
#include <windows.h>
#pragma comment(lib, "kernel32.lib")
#include <math.h>
#include "mex.h"
#include <stdlib.h>
#include <stdio.h>
#include "image_operation.h"
#include "filter.h"
#include "definitions.h"
void maxfilter(const float * data , float * result, const int fsz , const int xsz, const int ysz, const int tsz);
void cudasafe( cudaError_t err, char* str, int lineNumber);
void CUDAERROR(const char *instr,int lineNumber);
void CUDAERROR(const char *instr,int lineNumber) {
cudaError_t errornum;
const char *str;
if (errornum=cudaGetLastError()) {
//reset all cuda devices
int deviceCount = 0;
int ii = 0;
cudasafe(cudaGetDeviceCount(&deviceCount),"cudaGetDeviceCount",__LINE__ ); //query number of GPUs
for (ii = 0; ii< deviceCount;ii++) {
cudaSetDevice(ii);
cudaDeviceReset();
}
str=cudaGetErrorString(errornum);
//mexPrintf("gpuGaussNDmle(line %i): %s in %s\n",lineNumber, str, instr);
cudaDeviceReset();
mexErrMsgIdAndTxt("gpuGaussNDmle:cudaFail","gpuGaussNDmle(line %i): %s in %s\n",lineNumber, str, instr);
exit(1); // might not stop matlab
}
}
void cudasafe( cudaError_t err, char* str, int lineNumber)
{
if (err != cudaSuccess)
{
//reset all cuda devices
int deviceCount = 0;
int ii = 0;
cudasafe(cudaGetDeviceCount(&deviceCount),"cudaGetDeviceCount",__LINE__ ); //query number of GPUs
for (ii = 0; ii< deviceCount;ii++) {
cudaSetDevice(ii);
cudaDeviceReset();
}
mexErrMsgIdAndTxt("gpuGaussNDmle:cudaFail","%s failed with error code %i at line %d\n",str,err, lineNumber);
exit(1); // might not stop matlab
}
}
bool mxIsScalar(const mxArray *array_ptr)
{
/*!
* \brief Check that the passed in mxArray is infact a single number.
* \param array_ptr pointer to the array to check.
* \return bool
*/
const mwSize *size;
const size_t dims = mxGetNumberOfDimensions(array_ptr);
size = mxGetDimensions(array_ptr);
if (dims != 2) return false;
if (size[0] != 1) return false;
if (size[1] != 1) return false;
return true;
}
void printTiming(char *var, LARGE_INTEGER start) {
/*!
* \brief This returns the runtime information to Matlab using the high precision timer.
* \param var Name of the variable to assign the timing info to.
* \param start The starting time for this event.
*/
LARGE_INTEGER freq, stop;
char timing[100];
QueryPerformanceFrequency( &freq );
QueryPerformanceCounter(&stop);
sprintf_s(timing,(size_t) 100,"%s=%f", var, (double)(stop.QuadPart-start.QuadPart)/freq.QuadPart);
mexEvalString(timing);
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
/*!
* \brief Entry point in the code for Matlab. Equivalent to main().
* \param nlhs number of left hand mxArrays to return
* \param plhs array of pointers to the output mxArrays
* \param nrhs number of input mxArrays
* \param prhs array of pointers to the input mxArrays.
*/
int ii=0,jj=0,kk=0,tt=0,candicc=0,candi=0; //!< various counters unsed throughout the code
const mwSize *pSizeA=0;
mwSize outsize[2],outsize2[2];
const float *indata=0;
int boxsz=0;
float psfsigma=0;
int fitnum=0;
float Nave=0;
float *unif1=0,*unif2=0,*unif=0; //!< uniformed filtered copies of the subregion
float *maxf=0; //!< maximum filtered copy of the subregion
float *candx=0,*candy=0,*candz=0,*left=0,*top=0,*in_sub=0;
//float *out;
float threshold;
size_t Adims,data_bytes;
float *xx,*yy,*nn,*x,*y,*n,*bb,*div,testx,testy,testn;
float *CRLBarray,*covariance;
float *SuperResim;
float resolution;
float pixelsz;
float zoom;
float Nsigmafit=0;
//fun with timing
LARGE_INTEGER t1, t2, t3, t4;
printf("This code developed development by Lidke Lab at UNM. \nThe author(Fang Huang, Keith Lidke, and Lidke Lab's other group member)\nreserve all rights of using this code.\nReferece: Simultaneous multiple-emitter fitting for single molecule super-resolution imaging, \nFang Huang, Samantha L. Schwartz, Jason M. Byars, and Keith A. Lidke,\nBiomedical Optics Express, Vol. 2, Issue 5, pp. 1377-1393 (2011)\n" );
printf("Start Fitting...\n" );
//mexEvalString("pause(0.1)");
if (nrhs != 9) {// 0 1 2 3 4 5 6 7 8
mexErrMsgIdAndTxt("MATLAB:WrongNumberOfInputs", "This function needs 10 inputs: 3D-image(x,y,frame,matlab matrix,single),PSFsigma(in pixels),initial estimated intensity,fit_type(1-5),pvaluethreshold (0.05),resolution(nm),pixelsz(nm),zm,boxsz");
}
Adims=mxGetNumberOfDimensions(prhs[0]);
if (mxGetClassID(prhs[0])!=mxSINGLE_CLASS)
mexErrMsgIdAndTxt("Multifit:WrongPrecision","3D-Data must be comprised of single floats! Please use single(data) to convert into correct input data type\n");
if (Adims != 3)
mexErrMsgIdAndTxt("Multifit:WrongDimensions", "Only 3D data set can be fit by multi fluorophore fitting. X by Y by Frame");
pSizeA = mxGetDimensions(prhs[0]);
const int data_xsz=(int) pSizeA[0];
const int data_ysz=(int) pSizeA[1];
const int data_frame=(int) pSizeA[2];
if (data_xsz <16 || data_ysz <16)
mexErrMsgTxt("3D-Data must be at least 16 by 16 by frame for this fitting.");
if (!mxIsNumeric(prhs[1]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "psfsigma must be a numeric value\n");
if (!mxIsNumeric(prhs[2]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "estimated must be a numeric value\n");
if (!mxIsNumeric(prhs[3]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "fit_type must be a numeric value\n");
if (!mxIsNumeric(prhs[4]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "threshold must be a numeric value\n");
if (!mxIsNumeric(prhs[5]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "resolution must be a numeric value\n");
if (!mxIsNumeric(prhs[6]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "pixel must be a numeric value\n");
if (!mxIsNumeric(prhs[7]))
mexErrMsgIdAndTxt("Multifit:NotNumeric", "zoom must be a numeric value\n");
if (!mxIsScalar(prhs[1]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","psfsigma must be scalar\n");
if (!mxIsScalar(prhs[2]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","estimated intensity must be scalar\n");
if (!mxIsScalar(prhs[3]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","fit_type must be scalar\n");
if (!mxIsScalar(prhs[4]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","threshold must be scalar\n");
if (!mxIsScalar(prhs[5]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","resolution must be scalar\n");
if (!mxIsScalar(prhs[6]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","pixel size must be scalar\n");
if (!mxIsScalar(prhs[7]))
mexErrMsgIdAndTxt("Multifit:WrongDimensions","zoom must be scalar\n");
indata=(float *)mxGetData(prhs[0]);
psfsigma=(float)mxGetScalar(prhs[1]);
Nave=(float)mxGetScalar(prhs[2]);
fitnum=(int)mxGetScalar(prhs[3]);
threshold=(float)mxGetScalar(prhs[4]);
resolution=(float)mxGetScalar(prhs[5]);
pixelsz=(float)mxGetScalar(prhs[6]);
zoom=(float)mxGetScalar(prhs[7]);
boxsz=(int)mxGetScalar(prhs[8]);
//Nsigmafit=(float)mxGetScalar(prhs[9]);
Nsigmafit=0.00001; // fixed intensity for position initial estimates, this restriction is lifted on 2nd round iteration of itensity,position fitting.
if (psfsigma<=0.2f) {
mexErrMsgIdAndTxt("Multifit:InputOutofRange","PSFsigma must be greater than 0.2.\nThe camera pixel size is too large." );
}
if (psfsigma>4.0f) {
mexErrMsgIdAndTxt("Multifit:InputOutofRange","PSFsigma should be less than 4.0.\nPlease check your sigma value." );
}
if ((fitnum<1)||(fitnum>8))
mexErrMsgTxt("Fitting model number out of range. Fitnum has to be within the range of 1-8.");
//if (threshold<=0 || threshold > 1000)
// mexErrMsgIdAndTxt("Multifit:InputOutofRange","NLLR threshold is out of range. Please use any number between 0.0001-1000");
if (resolution<=0) {
resolution=20;
mexErrMsgIdAndTxt("Multifit:InputOutofRange","resolution must be greater than 0. Default value is 20(nm).\n" );
}
if (pixelsz<=0) {
pixelsz=106;
mexErrMsgIdAndTxt("Multifit:InputOutofRange","Pixel Size must be greater than 0. Default value is 106(nm).\n" );
}
//What is smallest possible value?
if (zoom<=0) {
zoom=20;
mexErrMsgIdAndTxt("Multifit:InputOutofRange","Zoom must be greater than 0. Default value is 30.\n" );
}
if (zoom - floor(zoom) > 0.000001f) {
zoom = ceil(zoom);
mexWarnMsgIdAndTxt("Multifit:InputOutofRange","Zoom should be an integer. Rounding up to next whole number value.\n");
}
data_bytes=data_xsz*data_ysz*data_frame*sizeof(float);
indata=(float *)mxGetData(prhs[0]);
data_bytes=((float) pSizeA[0]*pSizeA[1]*pSizeA[2])*sizeof(float);
//uniform and max filter is used to find candidates in order to cut box from.
//uniform max filter to find subregions
const int unifsz=(int) round(psfsigma*2+1);
const int maxfsz=(int) (boxsz-1);
unif1=(float*) malloc (data_bytes); //need to be freed.
memset(unif1,0,data_bytes);
unif2=(float*) malloc (data_bytes); //need to be freed.
memset(unif2,0,data_bytes);
unif=(float*) malloc (data_bytes); //need to be freed.
memset(unif,0,data_bytes);
maxf=(float*) malloc (data_bytes); //need to be freed.
memset(maxf,0,data_bytes);
QueryPerformanceCounter(&t1);
unifilter(indata , unif1 , unifsz,data_xsz,data_ysz,data_frame);
unifilter(indata , unif2 , 2*unifsz,data_xsz,data_ysz,data_frame);
//int off = 0;
/*for(tt=0;tt<data_frame;tt++) {
for(jj=0;jj<data_ysz;jj++) {
for(ii=0;ii<data_xsz;ii++) {
off = tt*data_xsz*data_ysz+data_xsz*jj+ii;
unif[tt*data_xsz*data_ysz+data_xsz*jj+ii]=unif1[off]-unif2[off];
}
}
}*/
for (ii=0; ii<data_frame*data_ysz*data_xsz; ii++)
unif[ii]=unif1[ii]-unif2[ii];
free(unif1);
free(unif2);
maxfilter(unif , maxf , maxfsz,data_xsz,data_ysz,data_frame);
float minI=Nave/2/pi/2/psfsigma/psfsigma/3;
//minI=25;
candicc=0;
//find number of candidates for malloc
/*for(tt=0;tt<data_frame;tt++)
for(ii=0;ii<data_xsz;ii++)
for(jj=0;jj<data_ysz;jj++)
{
kk=tt*data_xsz*data_ysz+data_xsz*jj+ii;
if (((0.999*maxf[kk])<=unif[kk]) && (unif[kk]>=minI))
candicc++;
}*/
for (ii=0; ii<data_frame*data_ysz*data_xsz; ii++)
if (((0.999*maxf[ii])<=unif[ii]) && (unif[ii]>=minI))
candicc++;
//alloc memory
candx=(float*) malloc (candicc*sizeof(float));//cand x, need to be freed.
memset(candx,0,candicc*sizeof(float));
candy=(float*) malloc (candicc*sizeof(float));//cand y, need to be freed.
memset(candy,0,candicc*sizeof(float));
candz=(float*) malloc (candicc*sizeof(float));//cand z, need to be freed.
memset(candz,0,candicc*sizeof(float));
candi=0;
//store candidates
for(tt=0;tt<data_frame;tt++)
for(jj=0;jj<data_ysz;jj++)
for(ii=0;ii<data_xsz;ii++)
{
kk=tt*data_xsz*data_ysz+data_xsz*jj+ii;
if (((0.999f*maxf[kk])<=unif[kk]) && (unif[kk]>=minI))
{
candx[candi]=(float) ii;//try
candy[candi]=(float) jj;
candz[candi]=(float) tt;
candi++;
}
}
free(unif);
free(maxf);
// Cut subregions around found candidates
//const int boxsz=5*psfsigma+1;
left=(float*) malloc (candicc*sizeof(float));
memset(left,0,candicc*sizeof(float));// left for cut sub regions, need to be freed
top=(float*) malloc (candicc*sizeof(float)); // top for cut sub regions, need to be freed
memset(top,0,candicc*sizeof(float));
in_sub=(float*) malloc (boxsz*boxsz*candicc*sizeof(float));
memset(in_sub,0,boxsz*boxsz*candicc*sizeof(float));
//test
/* outsize2[0]=boxsz;
outsize2[1]=boxsz;
outsize2[2]=candicc;
plhs[1]= mxCreateNumericArray(Adims ,outsize2, mxSINGLE_CLASS, mxREAL);
out=(float *)mxGetData(plhs[1]); */
MakeSubregions(candicc, indata, data_xsz, data_ysz, data_frame, candx,candy, candz, (float) boxsz, in_sub,left,top);
free(candx);
free(candy);
printTiming("t1",t1);
//Fitting started
x=(float*) malloc (fitnum*candicc*sizeof(float));//x, need to be freed.
memset(x,0,fitnum*candicc*sizeof(float));
y=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(y,0,fitnum*candicc*sizeof(float));
n=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(n,0,fitnum*candicc*sizeof(float));
bb=(float*) malloc (candicc*sizeof(float));//bg, need to be freed.
memset(bb,0,candicc*sizeof(float));
div=(float*) malloc (candicc*sizeof(float));//bg, need to be freed.
memset(div,0,candicc*sizeof(float));
//const float Nsigmafit=0.0001;
const int iterations=50;
/*float *out3;
mwSize outsize3[2];
outsize3[0]=candicc;
outsize3[1]=1;
//outsize3[2]=candicc;
plhs[3]= mxCreateNumericArray(2 ,outsize3, mxSINGLE_CLASS, mxREAL);
out3=(float *)mxGetData(plhs[3]);
*/
QueryPerformanceCounter(&t2);
GPUmultifit(candicc,in_sub,boxsz,psfsigma,iterations,fitnum,Nave,Nsigmafit,threshold,x,y,n,bb,div);
printTiming("t2",t2);
QueryPerformanceCounter(&t3);
mexEvalString("s3=tic");
float *bbb,testb;
// Boundary limitation
xx=(float*) malloc (fitnum*candicc*sizeof(float));//x, need to be freed.
memset(xx,0,fitnum*candicc*sizeof(float));
yy=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(yy,0,fitnum*candicc*sizeof(float));
nn=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(nn,0,fitnum*candicc*sizeof(float));
bbb=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(bbb,0,fitnum*candicc*sizeof(float));
/*float *out3;
mwSize outsize3[2];
outsize3[0]=fitnum;
outsize3[1]=candicc;
//outsize3[2]=candicc;
plhs[3]= mxCreateNumericArray(2 ,outsize3, mxSINGLE_CLASS, mxREAL);
out3=(float *)mxGetData(plhs[3]);
*/
int *fittot;
fittot=(int*) malloc(candicc*sizeof(int));
memset(fittot,0,candicc*sizeof(int));
for(ii=0;ii<candicc;ii++)
{
kk=0;
for(jj=0;jj<fitnum;jj++)
{
if ( (abs(x[fitnum*ii+jj])>0.001) && (abs(x[fitnum*ii+jj])<100) )
fittot[ii]++;
//out3[ii*fitnum+jj]=x[ii*fitnum+jj];//testx
testx=x[ii*fitnum+jj];
testy=y[ii*fitnum+jj];
testn=n[ii*fitnum+jj];
testb=bb[ii];
if (true)
{
xx[ii*fitnum+kk]=testx;//testx
yy[ii*fitnum+kk]=testy;
nn[ii*fitnum+kk]=testn;
bbb[ii*fitnum+kk]=testb;
kk++;
}
}
}
free(x);
free(y);
free(n);
free(bb);
//Calculate CRLB
CRLBarray=(float*) malloc ((fitnum*2+1)*candicc*sizeof(float));//x, need to be freed.
memset(CRLBarray,0,(fitnum*2+1)*candicc*sizeof(float));
covariance=(float*) malloc (fitnum*candicc*sizeof(float));//y, need to be freed.
memset(covariance,0,fitnum*candicc*sizeof(float));
//test
GPUCRLB(candicc, boxsz, psfsigma,fitnum, xx, yy, nn, bbb, Nave, Nsigmafit,CRLBarray,covariance);
int *fittmp;
fittmp=(int*) malloc(candicc*sizeof(int));
memset(fittmp,0,candicc*sizeof(int));
float LAlim=resolution/pixelsz/1.7f;
// filtering and assembling according to CRLB
int recontot=0;
for(ii=0;ii<candicc;ii++){
for(jj=0;jj<fitnum;jj++){
if ( (abs(xx[fitnum*ii+jj])>0.001) && (abs(xx[fitnum*ii+jj])<100) ) {
fittmp[ii]++;
}
if ((xx[fitnum*ii+jj]>0.01f)&&(xx[fitnum*ii+jj]<(boxsz-1.01f))&&
(yy[fitnum*ii+jj]>0.01f)&&(yy[fitnum*ii+jj]<(boxsz-1.01f))&&
(nn[fitnum*ii+jj]>0)&&(CRLBarray[(fitnum*2+1)*ii+jj]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj]>0.0001f)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]>0.0001f))
recontot++;
}
}
float *xbf=(float*) malloc(recontot*sizeof(float));
memset(xbf,0,recontot*sizeof(float));
float *bbf=(float*) malloc(recontot*sizeof(float));
memset(bbf,0,recontot*sizeof(float));
float *ybf=(float*) malloc(recontot*sizeof(float));
memset(ybf,0,recontot*sizeof(float));
float *nbf=(float*) malloc(recontot*sizeof(float));
memset(nbf,0,recontot*sizeof(float));
float *tbf=(float*) malloc(recontot*sizeof(float));
memset(tbf,0,recontot*sizeof(float));
float *uncerxbf=(float*) malloc(recontot*sizeof(float));
memset(uncerxbf,0,recontot*sizeof(float));
float *uncerybf=(float*) malloc(recontot*sizeof(float));
memset(uncerybf,0,recontot*sizeof(float));
float *uncerbgbf=(float*) malloc(recontot*sizeof(float));
memset(uncerbgbf,0,recontot*sizeof(float));
float *covbf=(float*) malloc(recontot*sizeof(float));
memset(covbf,0,recontot*sizeof(float));
float *NLLRbf=(float*) malloc(recontot*sizeof(float));
memset(NLLRbf,0,recontot*sizeof(float));
float *fitNbf=(float*) malloc(recontot*sizeof(float));
memset(fitNbf,0,recontot*sizeof(float));
float *topbf=(float*) malloc(recontot*sizeof(float));
memset(topbf,0,recontot*sizeof(float));
float *leftbf=(float*) malloc(recontot*sizeof(float));
memset(leftbf,0,recontot*sizeof(float));
/* outsize2[0]=2*fitnum+1;
outsize2[1]=candicc;
plhs[1]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
out=(float *)mxGetData(plhs[1]); */
//adding x,y,t,crlbx,crlby,crlb-bg,NLLR.
//test
//float *cov;
//plhs[4]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
//cov=(float *)mxGetData(plhs[4]);
//test-end
// before filtering coords collections.
int reconii=0;
for(ii=0;ii<candicc;ii++){
for(jj=0;jj<fitnum;jj++){
if ((xx[fitnum*ii+jj]>0.01f)&&(xx[fitnum*ii+jj]<(boxsz-1.01f))&&
(yy[fitnum*ii+jj]>0.01f)&&(yy[fitnum*ii+jj]<(boxsz-1.01f))&&
(nn[fitnum*ii+jj]>0)&&(CRLBarray[(fitnum*2+1)*ii+jj]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj]>0.0001f)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]<LAlim)&&
(CRLBarray[(fitnum*2+1)*ii+jj+1]>0.0001f)
&&(covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1]<1)
&&(covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1]>-1))
{
//reconX[reconii]=zoom*(xx[fitnum*ii+jj]+left[ii]);
//reconY[reconii]=zoom*(yy[fitnum*ii+jj]+top[ii]);
//output section
xbf[reconii]=xx[fitnum*ii+jj]+left[ii];
topbf[reconii]=top[ii];
leftbf[reconii]=left[ii];
ybf[reconii]=yy[fitnum*ii+jj]+top[ii];
bbf[reconii]=bbb[fitnum*ii+jj];
tbf[reconii]=candz[ii];
uncerxbf[reconii]=CRLBarray[(fitnum*2+1)*ii+jj];
uncerybf[reconii]=CRLBarray[(fitnum*2+1)*ii+jj+1];
nbf[reconii]=nn[fitnum*ii+jj];
uncerbgbf[reconii]=CRLBarray[(fitnum*2+1)*ii+fittmp[ii]*2];
covbf[reconii]=covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1];
NLLRbf[reconii]=div[ii];
fitNbf[reconii]=(float) fittot[ii];
//reconN[reconii]=nn[fitnum*ii+jj];
//reconLAx[reconii]=zoom*CRLBarray[(fitnum*2+1)*ii+jj];
////crlbx[reconii]=CRLBarray[(fitnum*2+1)*ii+jj];
//reconLAy[reconii]=zoom*CRLBarray[(fitnum*2+1)*ii+jj+1];
//reconcov[reconii]=covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1];
////cov[reconii]=covariance[fitnum*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj]/CRLBarray[(fitnum*2+1)*ii+jj+1];
reconii++;
}
}
}
//repeat localization filtering.
int tstart=0;
int framereg=0;
int modindex=0;
float dis=-10;
for(ii=0;ii<reconii;ii++)
{
framereg=(int) tbf[ii];
if(xbf[ii]!=0)
{
for(jj=tstart;jj<reconii;jj++)
{
if(xbf[ii]==0) break;
if(tbf[jj]!=framereg)
{
if(ii==jj)
tstart=jj+1;
break;
}
if((xbf[jj]!=0)&&(ii!=jj)
&&(!((topbf[ii]==topbf[jj])&&(leftbf[ii]==leftbf[jj]))))
{
modindex=((uncerxbf[ii]+uncerybf[ii])<(uncerxbf[jj]+uncerybf[jj]))?jj:ii;
dis=sqrt(pow(xbf[ii]-xbf[jj],2)+pow(ybf[ii]-ybf[jj],2));
if (dis<=1*sqrt(pow(uncerxbf[modindex],2)+pow(uncerybf[modindex],2)))
{
xbf[modindex]=0;
ybf[modindex]=0;
}
}
}
}
}
// counting procesure.
int indextot=0;
for(ii=0;ii<reconii;ii++){
if(xbf[ii]!=0)
{
indextot=indextot+1;
}
}
// allocation of reconstruction candidates
float *reconX=(float*) malloc(indextot*sizeof(float));
memset(reconX,0,indextot*sizeof(float));
float *reconY=(float*) malloc(indextot*sizeof(float));
memset(reconY,0,indextot*sizeof(float));
float *reconN=(float*) malloc(indextot*sizeof(float));
memset(reconN,0,indextot*sizeof(float));
float *reconLAx=(float*) malloc(indextot*sizeof(float));
memset(reconLAx,0,indextot*sizeof(float));
float *reconLAy=(float*) malloc(indextot*sizeof(float));
memset(reconLAy,0,indextot*sizeof(float));
float *reconcov=(float*) malloc(indextot*sizeof(float));
memset(reconcov,0,indextot*sizeof(float));
// allocation of output candidates
float * xout,* yout,* tout,* uncerxout,* unceryout;
float *uncerbgout;
float * covout,* NLLRout,*fitNout,*bout, *nout;
outsize2[0]=indextot;
outsize2[1]=1;
plhs[1]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
xout=(float *)mxGetData(plhs[1]);
plhs[2]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
yout=(float *)mxGetData(plhs[2]);
plhs[3]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
tout=(float *)mxGetData(plhs[3]);
plhs[4]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
uncerxout=(float *)mxGetData(plhs[4]);
plhs[5]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
unceryout=(float *)mxGetData(plhs[5]);
plhs[6]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
uncerbgout=(float *)mxGetData(plhs[6]);
plhs[7]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
covout=(float *)mxGetData(plhs[7]);
plhs[8]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
NLLRout=(float *)mxGetData(plhs[8]);
plhs[9]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
fitNout=(float*)mxGetData(plhs[9]);
plhs[10]= mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
bout=(float *)mxGetData(plhs[10]);
plhs[11]=mxCreateNumericArray(2 ,outsize2, mxSINGLE_CLASS, mxREAL);
nout=(float *)mxGetData(plhs[11]);
//record coordinates for output and reconstruction
int recdx=0;
for(ii=0;ii<reconii;ii++)
{
if(xbf[ii]!=0)
{ //record
//reconstruction candidates
reconX[recdx]=xbf[ii]*zoom;
reconY[recdx]=ybf[ii]*zoom;
reconN[recdx]=nbf[ii];
reconLAx[recdx]=uncerxbf[ii]*zoom;
reconLAy[recdx]=uncerybf[ii]*zoom;
reconcov[recdx]=covbf[ii];
//out put candidates
xout[recdx]=xbf[ii];
yout[recdx]=ybf[ii];
tout[recdx]=tbf[ii];
uncerxout[recdx]=uncerxbf[ii];
unceryout[recdx]=uncerybf[ii];
uncerbgout[recdx]=uncerbgbf[ii];
covout[recdx]=covbf[ii];
NLLRout[recdx]=NLLRbf[ii];
fitNout[recdx]=fitNbf[ii];
bout[recdx]=bbf[ii];
nout[recdx]=nbf[ii];
recdx++;
}
}
printTiming("t3",t3);
QueryPerformanceCounter(&t4);
//reconstruction!
outsize[0]=(int) floor(data_xsz*zoom);
outsize[1]=(int) floor(data_ysz*zoom);
plhs[0]= mxCreateNumericArray(2, outsize, mxSINGLE_CLASS, mxREAL);
SuperResim=(float *)mxGetData(plhs[0]);
int xsz=(int) outsize[0];
int ysz=(int) outsize[1];
/*outsize2[0]=20;
outsize2[1]=20;
outsize2[2]=reconii;
plhs[1]= mxCreateNumericArray(Adims ,outsize2, mxSINGLE_CLASS, mxREAL);
out=(float *)mxGetData(plhs[1]);*/
GPUgenerateblobs(indextot, xsz, ysz,reconX,reconY, reconN, reconLAx,reconLAy,reconcov,SuperResim);
printTiming("t4", t4);
free(CRLBarray);
free(covariance);
free(reconX);
free(reconY);
free(reconN);
free(reconLAx);
free(reconLAy);
free(reconcov);
free(left);
free(top);
free(in_sub);
free(bbb);
free(div);
free(xx);
free(yy);
free(nn);
free(fittot);
free(fittmp);
free(xbf);
free(ybf);
free(tbf);
free(nbf);
free(bbf);
free(uncerxbf);
free(uncerybf);
free(uncerbgbf);
free(covbf);
free(NLLRbf);
free(fitNbf);
} |
0bb7840aeaf2832685034a9bf14a33ecf36cc551.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include "normal_cuda/normal_cuda.hpp"
#include <vector>
#include <iostream>
__device__ int eigenJacobiMethod(float *a, float *v, int n, float eps = 1e-8, int iter_max = 100)
{
float *bim, *bjm;
float bii, bij, bjj, bji;
bim = new float[n];
bjm = new float[n];
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
v[i*n+j] = (i == j) ? 1.0 : 0.0;
}
}
int cnt = 0;
for(;;){
int i, j;
float x = 0.0;
for(int ia = 0; ia < n; ++ia){
for(int ja = 0; ja < n; ++ja){
int idx = ia*n+ja;
if(ia != ja && fabs(a[idx]) > x){
i = ia;
j = ja;
x = fabs(a[idx]);
}
}
}
float aii = a[i*n+i];
float ajj = a[j*n+j];
float aij = a[i*n+j];
float alpha, beta;
alpha = (aii-ajj)/2.0;
beta = sqrt(alpha*alpha+aij*aij);
float st, ct;
ct = sqrt((1.0+fabs(alpha)/beta)/2.0); // sin
st = (((aii-ajj) >= 0.0) ? 1.0 : -1.0)*aij/(2.0*beta*ct); // cos
// A = PAP
for(int m = 0; m < n; ++m){
if(m == i || m == j) continue;
float aim = a[i*n+m];
float ajm = a[j*n+m];
bim[m] = aim*ct+ajm*st;
bjm[m] = -aim*st+ajm*ct;
}
bii = aii*ct*ct+2.0*aij*ct*st+ajj*st*st;
bij = 0.0;
bjj = aii*st*st-2.0*aij*ct*st+ajj*ct*ct;
bji = 0.0;
for(int m = 0; m < n; ++m){
a[i*n+m] = a[m*n+i] = bim[m];
a[j*n+m] = a[m*n+j] = bjm[m];
}
a[i*n+i] = bii;
a[i*n+j] = bij;
a[j*n+j] = bjj;
a[j*n+i] = bji;
// V = PV
for(int m = 0; m < n; ++m){
float vmi = v[m*n+i];
float vmj = v[m*n+j];
bim[m] = vmi*ct+vmj*st;
bjm[m] = -vmi*st+vmj*ct;
}
for(int m = 0; m < n; ++m){
v[m*n+i] = bim[m];
v[m*n+j] = bjm[m];
}
float e = 0.0;
for(int ja = 0; ja < n; ++ja){
for(int ia = 0; ia < n; ++ia){
if(ia != ja){
e += fabs(a[ja*n+ia]);
}
}
}
if(e < eps) break;
cnt++;
if(cnt > iter_max) break;
}
delete [] bim;
delete [] bjm;
return cnt;
}
__global__ void covarianceGPU(float* neighbor_points,float* matrix,int point_size) {
//
float x_average=0,y_average=0,z_average=0;
for(int i=0;i<point_size*3;i+=3){
x_average+=neighbor_points[i];
y_average+=neighbor_points[i+1];
z_average+=neighbor_points[i+2];
}
x_average/=point_size;
y_average/=point_size;
z_average/=point_size;
//
float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0;
for(int i=0;i<point_size*3;i+=3){
sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average);
syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average);
szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average);
sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average);
sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average);
syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average);
}
sxx/=point_size;
syy/=point_size;
szz/=point_size;
sxy/=point_size;
sxz/=point_size;
syz/=point_size;
//
matrix[0]=sxx;matrix[1]=sxy;matrix[2]=sxz;
matrix[3]=sxy;matrix[4]=syy;matrix[5]=syz;
matrix[6]=sxz;matrix[7]=syz;matrix[8]=szz;
}
__global__ void eigenGPU(float* neighbor_points,float* eigen_vector,float* eigen_value,int point_size) {
//
float x_average=0,y_average=0,z_average=0;
for(int i=0;i<point_size*3;i+=3){
x_average+=neighbor_points[i];
y_average+=neighbor_points[i+1];
z_average+=neighbor_points[i+2];
}
x_average/=point_size;
y_average/=point_size;
z_average/=point_size;
//
float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0;
for(int i=0;i<point_size*3;i+=3){
sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average);
syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average);
szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average);
sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average);
sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average);
syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average);
}
sxx/=point_size;
syy/=point_size;
szz/=point_size;
sxy/=point_size;
sxz/=point_size;
syz/=point_size;
//
float a[3*3]={
sxx,sxy,sxz,
sxy,syy,syz,
sxz,syz,szz,
};
//
eigenJacobiMethod(a, eigen_vector, 3);
eigen_value[0]=a[0];
eigen_value[1]=a[4];
eigen_value[2]=a[8];
}
__global__ void normalGPU(float* neighbor_points,float* normal_vecotr,int point_size) {
//
float x_average=0,y_average=0,z_average=0;
for(int i=0;i<point_size*3;i+=3){
x_average+=neighbor_points[i];
y_average+=neighbor_points[i+1];
z_average+=neighbor_points[i+2];
}
x_average/=point_size;
y_average/=point_size;
z_average/=point_size;
//
float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0;
for(int i=0;i<point_size*3;i+=3){
sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average);
syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average);
szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average);
sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average);
sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average);
syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average);
}
sxx/=point_size;
syy/=point_size;
szz/=point_size;
sxy/=point_size;
sxz/=point_size;
syz/=point_size;
//
float a[3*3]={
sxx,sxy,sxz,
sxy,syy,syz,
sxz,syz,szz,
};
// printf(" %f ,%f ,%f \ncovariance matrix = %f ,%f ,%f \n %f ,%f ,%f \n\n",sxx,sxy,sxz,sxy,syy,syz,sxz,syz,szz);
//
float eigen_vector[3 * 3];
eigenJacobiMethod(a, eigen_vector, 3);
// printf(" %f ,%f ,%f \neigen vector = %f ,%f ,%f \n %f ,%f ,%f \n\n",eigen_vector[0],eigen_vector[1],eigen_vector[2],eigen_vector[3],eigen_vector[4],eigen_vector[5],eigen_vector[6],eigen_vector[7],eigen_vector[8]);
float eigen_value[3];
eigen_value[0]=a[0];
eigen_value[1]=a[4];
eigen_value[2]=a[8];
int min_eigen_axis=0;
float min_eigen_value=eigen_value[0];
for(int i=1;i<3;i++){
if(eigen_value[i]<min_eigen_value){
min_eigen_value=eigen_value[i];
min_eigen_axis=i;
}
}
normal_vecotr[0]=eigen_vector[min_eigen_axis+0];
normal_vecotr[1]=eigen_vector[min_eigen_axis+3];
normal_vecotr[2]=eigen_vector[min_eigen_axis+6];
// printf("normals = %f, %f, %f\n\n\n\n",normal_vecotr[0],normal_vecotr[1],normal_vecotr[2]);
}
extern void covariance(std::vector<std::vector<float>> neighbor_points,float matrix[3][3]){
//
std::vector<float> h_neighbor_points(neighbor_points.size() * 3);
std::vector<float> h_matrix(3 * 3);
float *d_neighbor_points, *d_matrix;
//
hipMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float));
hipMalloc((void **)&d_matrix, 3 * 3 * sizeof(float));
//
int k=0;
for(int i=0;i<neighbor_points.size();i++){
for(int j=0;j<3;j++){
h_neighbor_points[k]=neighbor_points[i][j];
k++;
}
}
//
hipMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( covarianceGPU), dim3(1), dim3(1), 0, 0, d_neighbor_points,d_matrix,neighbor_points.size());
//
hipMemcpy(&h_matrix[0], d_matrix, 3 * 3 * sizeof(float), hipMemcpyDeviceToHost);
//
k=0;
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
matrix[i][j]=h_matrix[k];
k++;
}
}
//
hipFree(d_neighbor_points);
hipFree(d_matrix);
}
extern void eigen(std::vector<std::vector<float>> neighbor_points,float eigen_vector[3][3],float eigen_value[3]){
//
std::vector<float> h_neighbor_points(neighbor_points.size() * 3);
std::vector<float> h_eigen_vector(3 * 3);
std::vector<float> h_eigen_value(3);
float *d_neighbor_points, *d_eigen_vector, *d_eigen_value;
//
hipMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float));
hipMalloc((void **)&d_eigen_vector, 3 * 3 * sizeof(float));
hipMalloc((void **)&d_eigen_value, 3 * sizeof(float));
//
int k=0;
for(int i=0;i<neighbor_points.size();i++){
for(int j=0;j<3;j++){
h_neighbor_points[k]=neighbor_points[i][j];
k++;
}
}
//
hipMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( eigenGPU), dim3(1), dim3(1), 0, 0, d_neighbor_points,d_eigen_vector,d_eigen_value,neighbor_points.size());
//
hipMemcpy(&h_eigen_vector[0], d_eigen_vector, 3 * 3 * sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&h_eigen_value[0], d_eigen_value, 3 * sizeof(float), hipMemcpyDeviceToHost);
//
k=0;
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
eigen_vector[i][j]=h_eigen_vector[k];
eigen_value[i]=h_eigen_value[i];
k++;
}
}
//
hipFree(d_neighbor_points);
hipFree(d_eigen_vector);
hipFree(d_eigen_value);
}
extern void normal(std::vector<std::vector<float>> neighbor_points,float normal_vecotr[3]){
//
std::vector<float> h_neighbor_points(neighbor_points.size() * 3);
std::vector<float> h_normal_vector(3);
float *d_neighbor_points, *d_normal_vecotr;
//
hipMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float));
hipMalloc((void **)&d_normal_vecotr, 3 * sizeof(float));
//
int k=0;
for(int i=0;i<neighbor_points.size();i++){
for(int j=0;j<3;j++){
h_neighbor_points[k]=neighbor_points[i][j];
k++;
}
}
//
hipMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( normalGPU), dim3(1), dim3(1), 0, 0, d_neighbor_points,d_normal_vecotr,neighbor_points.size());
//
hipMemcpy(&h_normal_vector[0], d_normal_vecotr, 3 * sizeof(float), hipMemcpyDeviceToHost);
//
for(int i=0;i<3;i++){
normal_vecotr[i]=h_normal_vector[i];
}
//
hipFree(d_neighbor_points);
hipFree(d_normal_vecotr);
} | 0bb7840aeaf2832685034a9bf14a33ecf36cc551.cu | #include <stdio.h>
#include "normal_cuda/normal_cuda.hpp"
#include <vector>
#include <iostream>
__device__ int eigenJacobiMethod(float *a, float *v, int n, float eps = 1e-8, int iter_max = 100)
{
float *bim, *bjm;
float bii, bij, bjj, bji;
bim = new float[n];
bjm = new float[n];
for(int i = 0; i < n; ++i){
for(int j = 0; j < n; ++j){
v[i*n+j] = (i == j) ? 1.0 : 0.0;
}
}
int cnt = 0;
for(;;){
int i, j;
float x = 0.0;
for(int ia = 0; ia < n; ++ia){
for(int ja = 0; ja < n; ++ja){
int idx = ia*n+ja;
if(ia != ja && fabs(a[idx]) > x){
i = ia;
j = ja;
x = fabs(a[idx]);
}
}
}
float aii = a[i*n+i];
float ajj = a[j*n+j];
float aij = a[i*n+j];
float alpha, beta;
alpha = (aii-ajj)/2.0;
beta = sqrt(alpha*alpha+aij*aij);
float st, ct;
ct = sqrt((1.0+fabs(alpha)/beta)/2.0); // sinθ
st = (((aii-ajj) >= 0.0) ? 1.0 : -1.0)*aij/(2.0*beta*ct); // cosθ
// A = PAPの計算
for(int m = 0; m < n; ++m){
if(m == i || m == j) continue;
float aim = a[i*n+m];
float ajm = a[j*n+m];
bim[m] = aim*ct+ajm*st;
bjm[m] = -aim*st+ajm*ct;
}
bii = aii*ct*ct+2.0*aij*ct*st+ajj*st*st;
bij = 0.0;
bjj = aii*st*st-2.0*aij*ct*st+ajj*ct*ct;
bji = 0.0;
for(int m = 0; m < n; ++m){
a[i*n+m] = a[m*n+i] = bim[m];
a[j*n+m] = a[m*n+j] = bjm[m];
}
a[i*n+i] = bii;
a[i*n+j] = bij;
a[j*n+j] = bjj;
a[j*n+i] = bji;
// V = PVの計算
for(int m = 0; m < n; ++m){
float vmi = v[m*n+i];
float vmj = v[m*n+j];
bim[m] = vmi*ct+vmj*st;
bjm[m] = -vmi*st+vmj*ct;
}
for(int m = 0; m < n; ++m){
v[m*n+i] = bim[m];
v[m*n+j] = bjm[m];
}
float e = 0.0;
for(int ja = 0; ja < n; ++ja){
for(int ia = 0; ia < n; ++ia){
if(ia != ja){
e += fabs(a[ja*n+ia]);
}
}
}
if(e < eps) break;
cnt++;
if(cnt > iter_max) break;
}
delete [] bim;
delete [] bjm;
return cnt;
}
__global__ void covarianceGPU(float* neighbor_points,float* matrix,int point_size) {
//平均計算
float x_average=0,y_average=0,z_average=0;
for(int i=0;i<point_size*3;i+=3){
x_average+=neighbor_points[i];
y_average+=neighbor_points[i+1];
z_average+=neighbor_points[i+2];
}
x_average/=point_size;
y_average/=point_size;
z_average/=point_size;
//要素計算
float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0;
for(int i=0;i<point_size*3;i+=3){
sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average);
syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average);
szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average);
sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average);
sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average);
syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average);
}
sxx/=point_size;
syy/=point_size;
szz/=point_size;
sxy/=point_size;
sxz/=point_size;
syz/=point_size;
//出力
matrix[0]=sxx;matrix[1]=sxy;matrix[2]=sxz;
matrix[3]=sxy;matrix[4]=syy;matrix[5]=syz;
matrix[6]=sxz;matrix[7]=syz;matrix[8]=szz;
}
__global__ void eigenGPU(float* neighbor_points,float* eigen_vector,float* eigen_value,int point_size) {
//平均計算
float x_average=0,y_average=0,z_average=0;
for(int i=0;i<point_size*3;i+=3){
x_average+=neighbor_points[i];
y_average+=neighbor_points[i+1];
z_average+=neighbor_points[i+2];
}
x_average/=point_size;
y_average/=point_size;
z_average/=point_size;
//要素計算
float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0;
for(int i=0;i<point_size*3;i+=3){
sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average);
syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average);
szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average);
sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average);
sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average);
syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average);
}
sxx/=point_size;
syy/=point_size;
szz/=point_size;
sxy/=point_size;
sxz/=point_size;
syz/=point_size;
//共分散行列
float a[3*3]={
sxx,sxy,sxz,
sxy,syy,syz,
sxz,syz,szz,
};
//固有値計算
eigenJacobiMethod(a, eigen_vector, 3);
eigen_value[0]=a[0];
eigen_value[1]=a[4];
eigen_value[2]=a[8];
}
__global__ void normalGPU(float* neighbor_points,float* normal_vecotr,int point_size) {
//平均計算
float x_average=0,y_average=0,z_average=0;
for(int i=0;i<point_size*3;i+=3){
x_average+=neighbor_points[i];
y_average+=neighbor_points[i+1];
z_average+=neighbor_points[i+2];
}
x_average/=point_size;
y_average/=point_size;
z_average/=point_size;
//要素計算
float sxx=0,sxy=0,sxz=0,syy=0,syz=0,szz=0;
for(int i=0;i<point_size*3;i+=3){
sxx+=(neighbor_points[i]-x_average)*(neighbor_points[i]-x_average);
syy+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+1]-y_average);
szz+=(neighbor_points[i+2]-z_average)*(neighbor_points[i+2]-z_average);
sxy+=(neighbor_points[i]-x_average)*(neighbor_points[i+1]-y_average);
sxz+=(neighbor_points[i]-x_average)*(neighbor_points[i+2]-z_average);
syz+=(neighbor_points[i+1]-y_average)*(neighbor_points[i+2]-z_average);
}
sxx/=point_size;
syy/=point_size;
szz/=point_size;
sxy/=point_size;
sxz/=point_size;
syz/=point_size;
//共分散行列
float a[3*3]={
sxx,sxy,sxz,
sxy,syy,syz,
sxz,syz,szz,
};
// printf(" %f ,%f ,%f \ncovariance matrix = %f ,%f ,%f \n %f ,%f ,%f \n\n",sxx,sxy,sxz,sxy,syy,syz,sxz,syz,szz);
//固有値計算
float eigen_vector[3 * 3];
eigenJacobiMethod(a, eigen_vector, 3);
// printf(" %f ,%f ,%f \neigen vector = %f ,%f ,%f \n %f ,%f ,%f \n\n",eigen_vector[0],eigen_vector[1],eigen_vector[2],eigen_vector[3],eigen_vector[4],eigen_vector[5],eigen_vector[6],eigen_vector[7],eigen_vector[8]);
float eigen_value[3];
eigen_value[0]=a[0];
eigen_value[1]=a[4];
eigen_value[2]=a[8];
int min_eigen_axis=0;
float min_eigen_value=eigen_value[0];
for(int i=1;i<3;i++){
if(eigen_value[i]<min_eigen_value){
min_eigen_value=eigen_value[i];
min_eigen_axis=i;
}
}
normal_vecotr[0]=eigen_vector[min_eigen_axis+0];
normal_vecotr[1]=eigen_vector[min_eigen_axis+3];
normal_vecotr[2]=eigen_vector[min_eigen_axis+6];
// printf("normals = %f, %f, %f\n\n\n\n",normal_vecotr[0],normal_vecotr[1],normal_vecotr[2]);
}
extern void covariance(std::vector<std::vector<float>> neighbor_points,float matrix[3][3]){
//変数宣言
std::vector<float> h_neighbor_points(neighbor_points.size() * 3);
std::vector<float> h_matrix(3 * 3);
float *d_neighbor_points, *d_matrix;
//メモリ確保
cudaMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float));
cudaMalloc((void **)&d_matrix, 3 * 3 * sizeof(float));
//配列化
int k=0;
for(int i=0;i<neighbor_points.size();i++){
for(int j=0;j<3;j++){
h_neighbor_points[k]=neighbor_points[i][j];
k++;
}
}
//コピー
cudaMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), cudaMemcpyHostToDevice);
covarianceGPU<<<1, 1>>>(d_neighbor_points,d_matrix,neighbor_points.size());
//配列にコピー
cudaMemcpy(&h_matrix[0], d_matrix, 3 * 3 * sizeof(float), cudaMemcpyDeviceToHost);
//行列化
k=0;
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
matrix[i][j]=h_matrix[k];
k++;
}
}
//メモリバラシ
cudaFree(d_neighbor_points);
cudaFree(d_matrix);
}
extern void eigen(std::vector<std::vector<float>> neighbor_points,float eigen_vector[3][3],float eigen_value[3]){
//変数宣言
std::vector<float> h_neighbor_points(neighbor_points.size() * 3);
std::vector<float> h_eigen_vector(3 * 3);
std::vector<float> h_eigen_value(3);
float *d_neighbor_points, *d_eigen_vector, *d_eigen_value;
//メモリ確保
cudaMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float));
cudaMalloc((void **)&d_eigen_vector, 3 * 3 * sizeof(float));
cudaMalloc((void **)&d_eigen_value, 3 * sizeof(float));
//配列化
int k=0;
for(int i=0;i<neighbor_points.size();i++){
for(int j=0;j<3;j++){
h_neighbor_points[k]=neighbor_points[i][j];
k++;
}
}
//コピー
cudaMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), cudaMemcpyHostToDevice);
eigenGPU<<<1, 1>>>(d_neighbor_points,d_eigen_vector,d_eigen_value,neighbor_points.size());
//配列にコピー
cudaMemcpy(&h_eigen_vector[0], d_eigen_vector, 3 * 3 * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&h_eigen_value[0], d_eigen_value, 3 * sizeof(float), cudaMemcpyDeviceToHost);
//行列化
k=0;
for(int i=0;i<3;i++){
for(int j=0;j<3;j++){
eigen_vector[i][j]=h_eigen_vector[k];
eigen_value[i]=h_eigen_value[i];
k++;
}
}
//メモリバラシ
cudaFree(d_neighbor_points);
cudaFree(d_eigen_vector);
cudaFree(d_eigen_value);
}
extern void normal(std::vector<std::vector<float>> neighbor_points,float normal_vecotr[3]){
//変数宣言
std::vector<float> h_neighbor_points(neighbor_points.size() * 3);
std::vector<float> h_normal_vector(3);
float *d_neighbor_points, *d_normal_vecotr;
//メモリ確保
cudaMalloc((void **)&d_neighbor_points, neighbor_points.size() * 3 * sizeof(float));
cudaMalloc((void **)&d_normal_vecotr, 3 * sizeof(float));
//配列化
int k=0;
for(int i=0;i<neighbor_points.size();i++){
for(int j=0;j<3;j++){
h_neighbor_points[k]=neighbor_points[i][j];
k++;
}
}
//コピー
cudaMemcpy(d_neighbor_points, &h_neighbor_points[0], neighbor_points.size() * 3 * sizeof(float), cudaMemcpyHostToDevice);
normalGPU<<<1, 1>>>(d_neighbor_points,d_normal_vecotr,neighbor_points.size());
//配列にコピー
cudaMemcpy(&h_normal_vector[0], d_normal_vecotr, 3 * sizeof(float), cudaMemcpyDeviceToHost);
//行列化
for(int i=0;i<3;i++){
normal_vecotr[i]=h_normal_vector[i];
}
//メモリバラシ
cudaFree(d_neighbor_points);
cudaFree(d_normal_vecotr);
} |
65d3238395632f01d7fa6a0fa192938179b669f5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* nvmatrix_kernel.cu
*
* Created on: 21-Jan-2009
* Author: Alex Krizhevsky ([email protected])
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include "nvmatrix_kernel.cuh"
__global__ void kExp(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __expf(gData[i]);
}
__global__ void kLogistic1(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = (1 + tanhf(gData[i] / 2)) / 2;
}
__global__ void kLogistic2(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / (1 + expf(-gData[i]));
}
__global__ void kLog(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __logf(gData[i]);
}
__global__ void kSquare(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] * gData[i];
}
__global__ void kSqrt(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = sqrtf(gData[i]);
}
__global__ void kZero(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 0;
}
__global__ void kReciprocal(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / gData[i];
}
__global__ void kSubtractFromScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = scalar - gData[i];
}
__global__ void kAddScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = scalar + gData[i];
}
__global__ void kBiggerThanScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] > scalar;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kBinarizeProbs(unsigned int* rndMults, unsigned long long* rndWords, float *gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = gData[i] > (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
#define PI 3.1415926535897932f
/*
* TODO: modify to take mean/stdev
*/
__global__ void kAddGaussianNoise(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float stdev, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] += stdev * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] += stdev * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
/*
* TODO: modify to take mean/stdev
*/
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, const float stdev, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = stdev * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = stdev * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kBiggerThan(float* gMat1, float* gMat2, float* gMatTarget, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements)
gMatTarget[idx] = gMat1[idx] > gMat2[idx];
}
__global__ void kCopy(float* srcStart, float* destStart, unsigned int copyWidth, unsigned int jumpWidth, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements)
destStart[(idx / copyWidth) * jumpWidth + idx % copyWidth] = srcStart[(idx / copyWidth) * jumpWidth + idx % copyWidth];
}
__device__ inline int getTransArrayIndex(unsigned int width, unsigned int height, unsigned int i) {
return height * (i % width) + i / width;
}
/*
* like above but assumes destination is transposed.
* note that this is not efficient because there will be
* memory transactions that are not coalesced.
*/
__global__ void kCopyToTransDestSlow(float* srcStart, float* destStart, unsigned int srcCopyWidth,
unsigned int srcJumpWidth, unsigned int destJumpHeight, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements)
destStart[getTransArrayIndex(srcCopyWidth, destJumpHeight, idx)] = srcStart[(idx / srcCopyWidth) * srcJumpWidth + idx % srcCopyWidth];
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kCopyToTransDestFast(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcCopyHeight,
unsigned int srcJumpSize, unsigned int destJumpSize) {
// const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
// const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
// if(idxX < srcCopyWidth && idxY < srcCopyHeight) {
const unsigned int srcReadIdx = (blockIdx.y * blockDim.y + threadIdx.y) * srcJumpSize + blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int destWriteIdx = (blockIdx.x * blockDim.x + threadIdx.y) * destJumpSize + blockIdx.y * blockDim.y + threadIdx.x;
__shared__ float smem[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE + 1];
smem[threadIdx.x][threadIdx.y] = srcStart[srcReadIdx];
__syncthreads();
destStart[destWriteIdx] = smem[threadIdx.y][threadIdx.x];
// }
}
__global__ void kAdd(float* a, float* b, float* dest,
unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[i];
}
}
/*
* a not transposed, b transposed.
* slow because reads not coalesced :(
*/
__global__ void kAddTransSlow(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[getTransArrayIndex(width, height, i)];
}
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kAddTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int bJumpWidth, float scaleA, float scaleB) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = scaleA * a[idx] + scaleB * smem[threadIdx.y][threadIdx.x];
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kMultTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int bJumpWidth) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = a[idx] * smem[threadIdx.y][threadIdx.x];
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kDivideTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int bJumpWidth) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = __fdividef(a[idx], smem[threadIdx.y][threadIdx.x]);
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = __fdividef(a[i], b[i]);
}
}
__global__ void kTranspose(float* a, float* dest, int width, int height) {
const int bx = blockIdx.x * blockDim.x;
const int by = blockIdx.y * blockDim.y;
const int tx = bx + threadIdx.x;
const int ty = by + threadIdx.y;
// unsigned int idx = ty * width + tx;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
if (tx < width && ty < height) {
smem[threadIdx.y][threadIdx.x] = a[ty * width + tx];
}
__syncthreads();
if (by + threadIdx.x < height && threadIdx.y + bx < width) {
// idx = height * (blockIdx.x * blockDim.x + threadIdx.y) + blockIdx.y * blockDim.y + threadIdx.x;
dest[(bx + threadIdx.y) * height + by + threadIdx.x] = smem[threadIdx.x][threadIdx.y];
}
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kSquaredDiffTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int bJumpWidth) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = (a[idx] - smem[threadIdx.y][threadIdx.x]) * (a[idx] - smem[threadIdx.y][threadIdx.x]);
}
__global__ void kSquaredDiff(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = (a[i] - b[i]) * (a[i] - b[i]);
}
}
__global__ void kAdd3(float* a, const float* b, const float* c, const unsigned int numEls,
const float scaleA, const float scaleB, const float scaleC) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = scaleA * a[i] + scaleB * b[i] + scaleC * c[i];
}
}
__global__ void kTile(float* src, float* tgt, unsigned int srcWidth, unsigned int srcHeight, unsigned int tgtWidth, unsigned int tgtHeight) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int numEls = tgtWidth * tgtHeight;
for (unsigned int i = idx; i < tgtWidth * tgtHeight; i += numThreads) {
const unsigned int y = i / tgtWidth;
const unsigned int x = i % tgtWidth;
const unsigned int srcY = y % srcHeight;
const unsigned int srcX = x % srcWidth;
tgt[i] = src[srcY * srcWidth + srcX];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width,
unsigned int height, float scaleVec) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + scaleVec * vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, const unsigned int width,
const unsigned int height, const float scaleVec) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + scaleVec * vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width,unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kDivideByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width,unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = __fdividef(mat[i], vec[i % width]);
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kDivideByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = __fdividef(mat[i], vec[i / width]);
}
}
/*
* Bad when there are few columns. But if there are a few thousand columns, you can't really
* go any faster than this because all the reads are coalesced and processor utilization is maximal.
*/
__global__ void kDumbSumCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float sum = 0;
for (int j = 0; j < height; j++) {
sum += *mat;
mat += width;
}
vec[idx] = sum;
}
}
__global__ void kDumbMaxCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float mx = *mat;
mat += width;
for (int j = 1; j < height; j++) {
mx = myMax(*mat, mx);
mat += width;
}
vec[idx] = mx;
}
}
| 65d3238395632f01d7fa6a0fa192938179b669f5.cu | /*
* nvmatrix_kernel.cu
*
* Created on: 21-Jan-2009
* Author: Alex Krizhevsky ([email protected])
*/
#include <stdio.h>
#include <cuda_runtime.h>
#include "nvmatrix_kernel.cuh"
__global__ void kExp(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __expf(gData[i]);
}
__global__ void kLogistic1(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = (1 + tanhf(gData[i] / 2)) / 2;
}
__global__ void kLogistic2(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / (1 + expf(-gData[i]));
}
__global__ void kLog(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = __logf(gData[i]);
}
__global__ void kSquare(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] * gData[i];
}
__global__ void kSqrt(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = sqrtf(gData[i]);
}
__global__ void kZero(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 0;
}
__global__ void kReciprocal(float* gData, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = 1 / gData[i];
}
__global__ void kSubtractFromScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = scalar - gData[i];
}
__global__ void kAddScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = scalar + gData[i];
}
__global__ void kBiggerThanScalar(float* gData, float scalar, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] > scalar;
}
__global__ void kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
__global__ void kBinarizeProbs(unsigned int* rndMults, unsigned long long* rndWords, float *gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
gData[i] = gData[i] > (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
}
rndWords[idx] = rndWord;
}
#define PI 3.1415926535897932f
/*
* TODO: modify to take mean/stdev
*/
__global__ void kAddGaussianNoise(unsigned int* rndMults, unsigned long long* rndWords, float* gData, float stdev, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] += stdev * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] += stdev * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
/*
* TODO: modify to take mean/stdev
*/
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, const float stdev, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = sqrtf(-2 * __logf(rnd1));
gData[i] = stdev * R * __cosf(T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = stdev * R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
// The initial x is the seed and the initial carry is 1
unsigned long long rndWord = ((unsigned long long)seed << 32) + 1;
const unsigned int rndMult = rndMults[idx];
/*
* Run the chain for a few steps so that all the streams have a chance
* to differentiate. They start out generating similar random numbers
* because all the multipliers are similar.
*/
for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
}
rndWords[idx] = rndWord;
}
__global__ void kBiggerThan(float* gMat1, float* gMat2, float* gMatTarget, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements)
gMatTarget[idx] = gMat1[idx] > gMat2[idx];
}
__global__ void kCopy(float* srcStart, float* destStart, unsigned int copyWidth, unsigned int jumpWidth, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements)
destStart[(idx / copyWidth) * jumpWidth + idx % copyWidth] = srcStart[(idx / copyWidth) * jumpWidth + idx % copyWidth];
}
__device__ inline int getTransArrayIndex(unsigned int width, unsigned int height, unsigned int i) {
return height * (i % width) + i / width;
}
/*
* like above but assumes destination is transposed.
* note that this is not efficient because there will be
* memory transactions that are not coalesced.
*/
__global__ void kCopyToTransDestSlow(float* srcStart, float* destStart, unsigned int srcCopyWidth,
unsigned int srcJumpWidth, unsigned int destJumpHeight, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < numElements)
destStart[getTransArrayIndex(srcCopyWidth, destJumpHeight, idx)] = srcStart[(idx / srcCopyWidth) * srcJumpWidth + idx % srcCopyWidth];
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kCopyToTransDestFast(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcCopyHeight,
unsigned int srcJumpSize, unsigned int destJumpSize) {
// const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
// const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
// if(idxX < srcCopyWidth && idxY < srcCopyHeight) {
const unsigned int srcReadIdx = (blockIdx.y * blockDim.y + threadIdx.y) * srcJumpSize + blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int destWriteIdx = (blockIdx.x * blockDim.x + threadIdx.y) * destJumpSize + blockIdx.y * blockDim.y + threadIdx.x;
__shared__ float smem[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE + 1];
smem[threadIdx.x][threadIdx.y] = srcStart[srcReadIdx];
__syncthreads();
destStart[destWriteIdx] = smem[threadIdx.y][threadIdx.x];
// }
}
__global__ void kAdd(float* a, float* b, float* dest,
unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[i];
}
}
/*
* a not transposed, b transposed.
* slow because reads not coalesced :(
*/
__global__ void kAddTransSlow(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int numEls, float scaleA, float scaleB) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = scaleA * a[i] + scaleB * b[getTransArrayIndex(width, height, i)];
}
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kAddTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int bJumpWidth, float scaleA, float scaleB) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = scaleA * a[idx] + scaleB * smem[threadIdx.y][threadIdx.x];
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kMultTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int bJumpWidth) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = a[idx] * smem[threadIdx.y][threadIdx.x];
}
__global__ void kMult(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = a[i] * b[i];
}
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kDivideTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int height,
unsigned int bJumpWidth) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = __fdividef(a[idx], smem[threadIdx.y][threadIdx.x]);
}
__global__ void kDivide(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int idx = blockIdx.y * height + blockIdx.x * blockDim.x + threadIdx.y*blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = __fdividef(a[i], b[i]);
}
}
__global__ void kTranspose(float* a, float* dest, int width, int height) {
const int bx = blockIdx.x * blockDim.x;
const int by = blockIdx.y * blockDim.y;
const int tx = bx + threadIdx.x;
const int ty = by + threadIdx.y;
// unsigned int idx = ty * width + tx;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
if (tx < width && ty < height) {
smem[threadIdx.y][threadIdx.x] = a[ty * width + tx];
}
__syncthreads();
if (by + threadIdx.x < height && threadIdx.y + bx < width) {
// idx = height * (blockIdx.x * blockDim.x + threadIdx.y) + blockIdx.y * blockDim.y + threadIdx.x;
dest[(bx + threadIdx.y) * height + by + threadIdx.x] = smem[threadIdx.x][threadIdx.y];
}
}
/*
* a not transposed, b transposed.
* coalesced reads and writes, no bank conflicts cause of the +1.
*/
__global__ void kSquaredDiffTransFast(float* a, float* b, float* dest, unsigned int width, unsigned int bJumpWidth) {
const unsigned int idxY = blockIdx.y * blockDim.y + threadIdx.y;
const unsigned int idxX = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int idx = idxY * width + idxX;
__shared__ float smem[ADD_BLOCK_SIZE][ADD_BLOCK_SIZE + 1];
const unsigned int bBlockReadStart = blockDim.x * blockIdx.x * bJumpWidth + blockIdx.y * blockDim.y;
smem[threadIdx.x][threadIdx.y] = b[bBlockReadStart + threadIdx.y * bJumpWidth + threadIdx.x];
__syncthreads();
dest[idx] = (a[idx] - smem[threadIdx.y][threadIdx.x]) * (a[idx] - smem[threadIdx.y][threadIdx.x]);
}
__global__ void kSquaredDiff(float* a, float* b, float* dest, unsigned int numEls) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
dest[i] = (a[i] - b[i]) * (a[i] - b[i]);
}
}
__global__ void kAdd3(float* a, const float* b, const float* c, const unsigned int numEls,
const float scaleA, const float scaleB, const float scaleC) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = scaleA * a[i] + scaleB * b[i] + scaleC * c[i];
}
}
__global__ void kTile(float* src, float* tgt, unsigned int srcWidth, unsigned int srcHeight, unsigned int tgtWidth, unsigned int tgtHeight) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int numEls = tgtWidth * tgtHeight;
for (unsigned int i = idx; i < tgtWidth * tgtHeight; i += numThreads) {
const unsigned int y = i / tgtWidth;
const unsigned int x = i % tgtWidth;
const unsigned int srcY = y % srcHeight;
const unsigned int srcX = x % srcWidth;
tgt[i] = src[srcY * srcWidth + srcX];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width,
unsigned int height, float scaleVec) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + scaleVec * vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kAddColVector(float* mat, float* vec, float* tgtMat, const unsigned int width,
const unsigned int height, const float scaleVec) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] + scaleVec * vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width,unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i % width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = mat[i] * vec[i / width];
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kDivideByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width,unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = __fdividef(mat[i], vec[i % width]);
}
}
/*
* Matrix in ROW-MAJOR order!
*/
__global__ void kDivideByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
tgtMat[i] = __fdividef(mat[i], vec[i / width]);
}
}
/*
* Bad when there are few columns. But if there are a few thousand columns, you can't really
* go any faster than this because all the reads are coalesced and processor utilization is maximal.
*/
__global__ void kDumbSumCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float sum = 0;
for (int j = 0; j < height; j++) {
sum += *mat;
mat += width;
}
vec[idx] = sum;
}
}
__global__ void kDumbMaxCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float mx = *mat;
mat += width;
for (int j = 1; j < height; j++) {
mx = myMax(*mat, mx);
mat += width;
}
vec[idx] = mx;
}
}
|
40de8a29fffdb76c13fcf807ae057b58db98415a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/hip/HIPContext.h>
#include <c10/util/Exception.h>
#include <c10/macros/Macros.h>
#include <THH/THHDeviceUtils.cuh>
#include <THH/THHTensorMathReduce.cuh>
#include <THH/THHReduceApplyUtils.cuh>
#include <ATen/hip/cub.cuh>
#include <ATen/native/hip/EmbeddingBackwardKernel.cuh>
#include <ATen/native/hip/SortingCommon.cuh>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int BLOCKDIMY = 16;
#else
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t,
typename index_t>
__global__ void embedding_backward_feature_kernel
(index_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffsll(matchmask) - 1;
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
#endif
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
#ifdef __HIP_PLATFORM_HCC__
first_remaining_peer = __ffsll(matchmask) - 1;
#else
first_remaining_peer = __ffs(matchmask) - 1;
#endif
my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t, typename index_t>
__global__ void embedding_backward_kernel(
index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
index_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void renorm_kernel(
scalar_t* weights, index_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += ::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = ::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
template<typename index_t>
void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count);
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices_,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices_, "indices", 1);
checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt});
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto indices = indices_.contiguous();
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
if (num_indices <= 3072 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE));
dim3 block(C10_WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
grad.scalar_type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () {
hipLaunchKernelGGL(( embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t>)
, dim3(grid),
dim3(block),
sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY,
stream,
indices_contig.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor count;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () {
auto range = at::arange(num_indices, indices.options());
int64_t nbits = cuda::cub::get_num_bits(num_weights);
cuda::cub::sort_pairs(
indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(),
range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(),
num_indices, false/*, 0, nbits*/);
if (scale_grad_by_freq) {
count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count);
}
});
return embedding_backward_cuda_kernel(grad, orig_indices,
sorted_indices, count, num_weights, padding_idx);
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () {
auto num_indices = indices.numel();
auto indices_contig = std::get<0>(indices.sort()).contiguous();
auto unique_indices = at::empty(indices.numel(), indices.options());
auto num_unique_indices = at::empty({}, indices.options().dtype(kLong));
cuda::cub::unique(
indices_contig.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
num_unique_indices.data_ptr<int64_t>(),
num_indices
);
dim3 grid = num_unique_indices.item<int64_t>();
dim3 block = 128;
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_renorm_cuda_", [&] {
using accscalar_t = acc_type<scalar_t, true>;
hipLaunchKernelGGL(( renorm_kernel), dim3(grid), dim3(block), 128 * sizeof(accscalar_t), stream,
self.data_ptr<scalar_t>(),
unique_indices.data_ptr<index_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
C10_HIP_KERNEL_LAUNCH_CHECK();
});
});
return self;
}
}} // namespace at::native
| 40de8a29fffdb76c13fcf807ae057b58db98415a.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/cuda/CUDAContext.h>
#include <c10/util/Exception.h>
#include <c10/macros/Macros.h>
#include <THC/THCDeviceUtils.cuh>
#include <THC/THCTensorMathReduce.cuh>
#include <THC/THCReduceApplyUtils.cuh>
#include <ATen/cuda/cub.cuh>
#include <ATen/native/cuda/EmbeddingBackwardKernel.cuh>
#include <ATen/native/cuda/SortingCommon.cuh>
namespace at { namespace native {
namespace {
#ifdef __HIP_PLATFORM_HCC__
static const int BLOCKDIMY = 16;
#else
static const int BLOCKDIMY = 32;
#endif
template
<typename scalar_t,
typename accscalar_t,
typename index_t>
__global__ void embedding_backward_feature_kernel
(index_t* indices,
const scalar_t* __restrict__ grad,
scalar_t* __restrict__ grad_weight,
int n, // OK to pass as int, we don't expect 2 billion+ samples in one shot
int64_t stride,
int padding_idx)
{
extern __shared__ char buf[];
accscalar_t* smem = (accscalar_t*)buf;
accscalar_t* my_s = smem + C10_WARP_SIZE*threadIdx.y;
int* indices_batch = (int*)(buf + sizeof(accscalar_t)*C10_WARP_SIZE*blockDim.y);
const int s = (int)stride; // OK to make int, we don't expect 2 billion+ embedding row size
const int f = threadIdx.x + blockIdx.x*blockDim.x; // feature_dim
for(int batch_start = 0; batch_start < n; batch_start += blockDim.x*blockDim.y)
{
// Entire block cooperates to load a batch of 1024 indices to process
int tid = threadIdx.x + threadIdx.y*blockDim.x;
if(batch_start + tid < n)
indices_batch[tid] = (int)indices[batch_start + tid];
int batch_end = batch_start + blockDim.x*blockDim.y < n ?
batch_start + blockDim.x*blockDim.y : n;
// Loop over the batch of <= 1024 loaded indices in chunks of blockDim.y = 32
for(int chunk_start = batch_start; chunk_start < batch_end; chunk_start += blockDim.y)
{
// This does double duty: it makes sure indices_batch is ready, and it makes sure match-group
// leaders are done with their accumulates before other warps start loading again.
__syncthreads();
int n_this_chunk = (batch_end - chunk_start) < blockDim.y ?
(batch_end - chunk_start) : blockDim.y;
int src_row = chunk_start + threadIdx.y;
int dst_row = indices_batch[src_row - batch_start]; // This warp's target row in grad_weight
// All warps load their smem segments with incoming grad data
if(src_row < n && f < s && dst_row != padding_idx)
my_s[threadIdx.x] = static_cast<accscalar_t>(grad[src_row*stride + f]);
__syncthreads();
// To ensure determinism, we can't just have each warp add its grad data to its dst_row.
// We need to check if any other warps pulled grad data targeting dst_row.
// If so, we elect the first warp in each matching group as the leader.
// Each leader warp serializes the accumulates targeting dst_row in shared memory,
// then finishes by adding the accumulated buffer to dst_row in grad_weight.
if(dst_row != padding_idx && src_row < n) // Per-warp exit condition, safe with ballot_sync
{
int match_found_this_thread =
(dst_row == indices_batch[chunk_start - batch_start + threadIdx.x]);
if(threadIdx.x >= n_this_chunk)
match_found_this_thread = 0;
#ifdef __HIP_PLATFORM_HCC__
unsigned long long int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffsll(matchmask) - 1;
#else
unsigned int matchmask = WARP_BALLOT(match_found_this_thread);
int first_remaining_peer = __ffs(matchmask) - 1;
#endif
if(threadIdx.y == first_remaining_peer) // Nominate lowest-indexed warp as the leader
{
matchmask ^= (1 << first_remaining_peer);
while(matchmask)
{
#ifdef __HIP_PLATFORM_HCC__
first_remaining_peer = __ffsll(matchmask) - 1;
#else
first_remaining_peer = __ffs(matchmask) - 1;
#endif
my_s[threadIdx.x] += smem[threadIdx.x + C10_WARP_SIZE*first_remaining_peer];
matchmask ^= (1 << first_remaining_peer);
}
if(f < s)
grad_weight[dst_row*stride + f] += static_cast<scalar_t>(my_s[threadIdx.x]);
}
}
}
}
}
template <typename scalar_t, typename index_t>
__global__ void embedding_backward_kernel(
index_t* input, index_t* indices, scalar_t* grad_output, scalar_t* grad_weight,
index_t* count, int64_t numel, int64_t stride, int padding_idx) {
using accscalar_t = acc_type<scalar_t, true>;
int idx = blockIdx.x * 4 + threadIdx.y;
// Each warp is responsible for an input into the LookupTable.
// If the preceding input has the same as this input, then the warp
// exits immediately. The warp also processes subsequent inputs with the
// same value.
//
// Input Warp
// 1 <warp 1>
// 1 <warp 1> (<warp 2> exits without doing any work)
// 5 <warp 3>
// 8 <warp 4>
// Number of values proceessed by each thread (grain size)
const int SZ = 4;
if (idx < numel
&& (idx == 0 || input[idx] != input[idx - 1])
&& input[idx] != padding_idx) {
do {
const int start_feature = threadIdx.x + blockIdx.y * blockDim.x * SZ;
const int weight_row = ((int) input[idx]) * stride;
const int grad_row = ((int) indices[idx]) * stride;
const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0;
accscalar_t gradient[SZ];
accscalar_t weight[SZ];
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
gradient[ii] = static_cast<accscalar_t>(grad_output[grad_row + feature_dim]);
weight[ii] = static_cast<accscalar_t>(grad_weight[weight_row + feature_dim]);
}
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
weight[ii] += gradient[ii] * scale;
}
#pragma unroll
for (int ii = 0; ii < SZ; ii++) {
int feature_dim = start_feature + ii * C10_WARP_SIZE;
if (feature_dim < stride) {
grad_weight[weight_row + feature_dim] = static_cast<scalar_t>(weight[ii]);
}
}
idx++;
} while (idx < numel && input[idx] == input[idx - 1]);
}
}
/* Calculate norms of the rows of weight_ptr given by idx_ptr and capture them in norms */
template <typename scalar_t, typename accscalar_t, typename index_t>
__global__ void renorm_kernel(
scalar_t* weights, index_t* indices, accscalar_t max_norm,
accscalar_t norm_type, int64_t dim,
int64_t weights_stride0, int64_t weights_stride1) {
// Some casting hacks since dynamic shared memory and templates don't work together:
extern __shared__ unsigned char smem[];
auto sdata = reinterpret_cast<accscalar_t*>(smem);
int tid = threadIdx.x;
int base_index = indices[blockIdx.x] * weights_stride0;
accscalar_t v = 0;
for (int i = tid; i < dim; i += blockDim.x) {
auto x = static_cast<accscalar_t>(weights[base_index + i * weights_stride1]);
if (norm_type == 1) {
v += std::abs(x);
} else if (norm_type == 2) {
v += x * x;
} else {
v += std::pow(x, norm_type);
}
}
using Op = ReduceAdd<accscalar_t>;
v = reduceBlock<accscalar_t>(sdata, blockDim.x, v, Op(), 0);
if (tid == 0) {
sdata[0] = std::pow(v, static_cast<accscalar_t>(1.0 / norm_type));
}
__syncthreads();
// now we renormalize the blocks that need it
if (sdata[0] > max_norm) {
auto factor = static_cast<scalar_t>(max_norm / (sdata[0] + 1e-7));
for (int i = tid; i < dim; i += blockDim.x) {
weights[base_index + i * weights_stride1] *= factor;
}
}
}
} // anonymous namespace
template<typename index_t>
void embedding_dense_backward_cuda_scan(Tensor &sorted_indices, Tensor &count);
Tensor embedding_dense_backward_cuda(const Tensor & grad_, const Tensor & indices_,
int64_t num_weights, int64_t padding_idx,
bool scale_grad_by_freq) {
auto grad_arg = TensorArg(grad_, "grad", 1);
auto indices_arg = TensorArg(indices_, "indices", 1);
checkScalarTypes("embedding_backward", indices_arg, {kLong, kInt});
checkSameGPU("embedding_backward", grad_arg, indices_arg);
auto indices = indices_.contiguous();
auto num_indices = indices.numel();
auto grad = grad_.contiguous().view({num_indices, grad_.size(-1)});
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
if (num_indices <= 3072 && !scale_grad_by_freq) {
auto indices_contig = indices.contiguous();
auto grad_weight = at::zeros({num_weights, grad_.size(-1)}, grad_.options());
int64_t stride = grad_weight.stride(0);
dim3 grid(THCCeilDiv(stride, (int64_t)C10_WARP_SIZE));
dim3 block(C10_WARP_SIZE, BLOCKDIMY);
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half, at::ScalarType::BFloat16,
grad.scalar_type(),
"embedding_backward",
[&]
{
using accscalar_t = acc_type<scalar_t, true>;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () {
embedding_backward_feature_kernel<scalar_t, accscalar_t, index_t>
<<<grid,
block,
sizeof(accscalar_t)*C10_WARP_SIZE*BLOCKDIMY + sizeof(int)*C10_WARP_SIZE*BLOCKDIMY,
stream>>>
(indices_contig.data_ptr<index_t>(),
grad.data_ptr<scalar_t>(),
grad_weight.data_ptr<scalar_t>(),
static_cast<int>(num_indices),
static_cast<int64_t>(stride),
static_cast<int>(padding_idx));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return grad_weight;
}
auto sorted_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
auto orig_indices = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
Tensor count;
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_dense_backward_cuda", [&] () {
auto range = at::arange(num_indices, indices.options());
int64_t nbits = cuda::cub::get_num_bits(num_weights);
cuda::cub::sort_pairs(
indices.data_ptr<index_t>(), sorted_indices.data_ptr<index_t>(),
range.data_ptr<index_t>(), orig_indices.data_ptr<index_t>(),
num_indices, false/*, 0, nbits*/);
if (scale_grad_by_freq) {
count = at::empty_like(indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
embedding_dense_backward_cuda_scan<index_t>(sorted_indices, count);
}
});
return embedding_backward_cuda_kernel(grad, orig_indices,
sorted_indices, count, num_weights, padding_idx);
}
Tensor & embedding_renorm_cuda_(Tensor & self, const Tensor & indices,
double max_norm, double norm_type) {
auto self_arg = TensorArg(self, "self", 1);
auto indices_arg = TensorArg(indices, "indices", 1);
checkDim("embedding_renorm_", self_arg, 2);
checkSameGPU("embedding_renorm", self_arg, indices_arg);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
AT_DISPATCH_INDEX_TYPES(indices.scalar_type(), "embedding_renorm_cuda_", [&] () {
auto num_indices = indices.numel();
auto indices_contig = std::get<0>(indices.sort()).contiguous();
auto unique_indices = at::empty(indices.numel(), indices.options());
auto num_unique_indices = at::empty({}, indices.options().dtype(kLong));
cuda::cub::unique(
indices_contig.data_ptr<index_t>(),
unique_indices.data_ptr<index_t>(),
num_unique_indices.data_ptr<int64_t>(),
num_indices
);
dim3 grid = num_unique_indices.item<int64_t>();
dim3 block = 128;
int dim = self.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "embedding_renorm_cuda_", [&] {
using accscalar_t = acc_type<scalar_t, true>;
renorm_kernel<<<grid, block, 128 * sizeof(accscalar_t), stream>>>(
self.data_ptr<scalar_t>(),
unique_indices.data_ptr<index_t>(),
static_cast<accscalar_t>(max_norm),
static_cast<accscalar_t>(norm_type),
dim, self.stride(0), self.stride(1));
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
});
return self;
}
}} // namespace at::native
|
88b5ad55826518aa83802b42d1f7a525a8b6371c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "hip_runtime.h"
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
// declare texture reference for 1D float texture
#ifdef USE_TEXTURES
texture<float4, 1, hipReadModeElementType> tex;
texture<float4, 1, hipReadModeElementType> txt;
#endif
__device__ float4 sortElem(float4 r) {
float4 nr;
nr.x = (r.x > r.y) ? r.y : r.x;
nr.y = (r.y > r.x) ? r.y : r.x;
nr.z = (r.z > r.w) ? r.w : r.z;
nr.w = (r.w > r.z) ? r.w : r.z;
r.x = (nr.x > nr.z) ? nr.z : nr.x;
r.y = (nr.y > nr.w) ? nr.w : nr.y;
r.z = (nr.z > nr.x) ? nr.z : nr.x;
r.w = (nr.w > nr.y) ? nr.w : nr.y;
nr.x = r.x;
nr.y = (r.y > r.z) ? r.z : r.y;
nr.z = (r.z > r.y) ? r.z : r.y;
nr.w = r.w;
return nr;
}
__device__ float4 getLowest(float4 a, float4 b)
{
//float4 na;
a.x = (a.x < b.w) ? a.x : b.w;
a.y = (a.y < b.z) ? a.y : b.z;
a.z = (a.z < b.y) ? a.z : b.y;
a.w = (a.w < b.x) ? a.w : b.x;
return a;
}
__device__ float4 getHighest(float4 a, float4 b)
{
b.x = (a.w >= b.x) ? a.w : b.x;
b.y = (a.z >= b.y) ? a.z : b.y;
b.z = (a.y >= b.z) ? a.y : b.z;
b.w = (a.x >= b.w) ? a.x : b.w;
return b;
}
#ifdef MEMCPYTOSYMBOL
__constant__ int constStartAddr[DIVISIONS + 1];
__constant__ int finalStartAddr[DIVISIONS + 1];
__constant__ int nullElems[DIVISIONS];
#endif
__global__ void
#ifndef USE_TEXTURES
mergeSortFirst(hipLaunchParm lp, float4 * origList,float4 *result, int listsize)
#else
mergeSortFirst(hipLaunchParm lp, float4 *result, int listsize)
#endif
{
// Block index
int bx = hipBlockIdx_x;
// Thread index
//int tx = hipThreadIdx_x;
if(bx*hipBlockDim_x + hipThreadIdx_x < listsize/4){
#ifndef USE_TEXTURES
float4 r = origList[(int)(bx*hipBlockDim_x + hipThreadIdx_x)];
#else
float4 r = tex1Dfetch(tex, (int)(bx*hipBlockDim_x + hipThreadIdx_x));
#endif
result[bx * hipBlockDim_x + hipThreadIdx_x] = sortElem(r);
}
}
__global__ void
#ifndef USE_TEXTURES
mergeSortPass(hipLaunchParm lp, float4 *origList,float4 *result, int nrElems, int threadsPerDiv
#else
mergeSortPass(hipLaunchParm lp, float4 *result, int nrElems, int threadsPerDiv
#endif
#ifdef MEMCPYTOSYMBOL
)
#else
,int *constStartAddr)
#endif
{
int tid = (hipBlockIdx_x * hipBlockDim_x) + hipThreadIdx_x;
// The division to work on
int division = tid / threadsPerDiv;
if(division >= DIVISIONS) return;
// The block within the division
int int_tid = tid - division * threadsPerDiv;
int Astart = constStartAddr[division] + int_tid * nrElems;
int Bstart = Astart + nrElems/2;
float4 *resStart = &(result[Astart]);
if(Astart >= constStartAddr[division + 1])
return;
if(Bstart >= constStartAddr[division + 1]){
for(int i=0; i<(constStartAddr[division + 1] - Astart); i++)
{
#ifndef USE_TEXTURES
resStart[i] = origList[Astart + i];
#else
resStart[i] = tex1Dfetch(tex, Astart + i);
#endif
}
return;
}
int aidx = 0;
int bidx = 0;
int outidx = 0;
float4 a, b;
#ifndef USE_TEXTURES
a = origList[Astart + aidx];
b = origList[Bstart + bidx];
#else
a = tex1Dfetch(tex, Astart + aidx);
b = tex1Dfetch(tex, Bstart + bidx);
#endif
while(true)//aidx < nrElems/2)// || (bidx < nrElems/2 && (Bstart + bidx < constEndAddr[division])))
{
/**
* For some reason, it's faster to do the texture fetches here than
* after the merge
*/
#ifndef USE_TEXTURES
float4 nextA = origList[ Astart + aidx + 1];
float4 nextB = origList[Bstart + bidx + 1];
#else
float4 nextA = tex1Dfetch(tex, Astart + aidx + 1);
float4 nextB = tex1Dfetch(tex, Bstart + bidx + 1);
#endif
float4 na = getLowest(a,b);
float4 nb = getHighest(a,b);
a = sortElem(na);
b = sortElem(nb);
// Now, a contains the lowest four elements, sorted
resStart[outidx++] = a;
bool elemsLeftInA;
bool elemsLeftInB;
elemsLeftInA = (aidx + 1 < nrElems/2); // Astart + aidx + 1 is allways less than division border
elemsLeftInB = (bidx + 1 < nrElems/2) && (Bstart + bidx + 1 < constStartAddr[division + 1]);
if(elemsLeftInA){
if(elemsLeftInB){
if(nextA.x < nextB.x) { aidx += 1; a = nextA; }
else { bidx += 1; a = nextB; }
}
else {
aidx += 1; a = nextA;
}
}
else {
if(elemsLeftInB){
bidx += 1; a = nextB;
}
else {
break;
}
}
}
resStart[outidx++] = b;
}
__global__ void
mergepack(hipLaunchParm lp, float *orig, float *result
#ifdef MEMCPYTOSYMBOL
)
#else
,int *constStartAddr, unsigned int *finalStartAddr, int *nullElems)
#endif
{
int idx1 = (hipBlockIdx_x * hipBlockDim_x) + hipThreadIdx_x;
int division = hipBlockIdx_y;
if((finalStartAddr[division] + idx1) >= finalStartAddr[division + 1]) return;
result[finalStartAddr[division] + idx1] = orig[constStartAddr[division]*4 + nullElems[division] + idx1];
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 88b5ad55826518aa83802b42d1f7a525a8b6371c.cu | #include "hip_runtime.h"
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
// declare texture reference for 1D float texture
#ifdef USE_TEXTURES
texture<float4, 1, hipReadModeElementType> tex;
texture<float4, 1, hipReadModeElementType> txt;
#endif
__device__ float4 sortElem(float4 r) {
float4 nr;
nr.x = (r.x > r.y) ? r.y : r.x;
nr.y = (r.y > r.x) ? r.y : r.x;
nr.z = (r.z > r.w) ? r.w : r.z;
nr.w = (r.w > r.z) ? r.w : r.z;
r.x = (nr.x > nr.z) ? nr.z : nr.x;
r.y = (nr.y > nr.w) ? nr.w : nr.y;
r.z = (nr.z > nr.x) ? nr.z : nr.x;
r.w = (nr.w > nr.y) ? nr.w : nr.y;
nr.x = r.x;
nr.y = (r.y > r.z) ? r.z : r.y;
nr.z = (r.z > r.y) ? r.z : r.y;
nr.w = r.w;
return nr;
}
__device__ float4 getLowest(float4 a, float4 b)
{
//float4 na;
a.x = (a.x < b.w) ? a.x : b.w;
a.y = (a.y < b.z) ? a.y : b.z;
a.z = (a.z < b.y) ? a.z : b.y;
a.w = (a.w < b.x) ? a.w : b.x;
return a;
}
__device__ float4 getHighest(float4 a, float4 b)
{
b.x = (a.w >= b.x) ? a.w : b.x;
b.y = (a.z >= b.y) ? a.z : b.y;
b.z = (a.y >= b.z) ? a.y : b.z;
b.w = (a.x >= b.w) ? a.x : b.w;
return b;
}
#ifdef MEMCPYTOSYMBOL
__constant__ int constStartAddr[DIVISIONS + 1];
__constant__ int finalStartAddr[DIVISIONS + 1];
__constant__ int nullElems[DIVISIONS];
#endif
__global__ void
#ifndef USE_TEXTURES
mergeSortFirst(hipLaunchParm lp, float4 * origList,float4 *result, int listsize)
#else
mergeSortFirst(hipLaunchParm lp, float4 *result, int listsize)
#endif
{
// Block index
int bx = hipBlockIdx_x;
// Thread index
//int tx = hipThreadIdx_x;
if(bx*hipBlockDim_x + hipThreadIdx_x < listsize/4){
#ifndef USE_TEXTURES
float4 r = origList[(int)(bx*hipBlockDim_x + hipThreadIdx_x)];
#else
float4 r = tex1Dfetch(tex, (int)(bx*hipBlockDim_x + hipThreadIdx_x));
#endif
result[bx * hipBlockDim_x + hipThreadIdx_x] = sortElem(r);
}
}
__global__ void
#ifndef USE_TEXTURES
mergeSortPass(hipLaunchParm lp, float4 *origList,float4 *result, int nrElems, int threadsPerDiv
#else
mergeSortPass(hipLaunchParm lp, float4 *result, int nrElems, int threadsPerDiv
#endif
#ifdef MEMCPYTOSYMBOL
)
#else
,int *constStartAddr)
#endif
{
int tid = (hipBlockIdx_x * hipBlockDim_x) + hipThreadIdx_x;
// The division to work on
int division = tid / threadsPerDiv;
if(division >= DIVISIONS) return;
// The block within the division
int int_tid = tid - division * threadsPerDiv;
int Astart = constStartAddr[division] + int_tid * nrElems;
int Bstart = Astart + nrElems/2;
float4 *resStart = &(result[Astart]);
if(Astart >= constStartAddr[division + 1])
return;
if(Bstart >= constStartAddr[division + 1]){
for(int i=0; i<(constStartAddr[division + 1] - Astart); i++)
{
#ifndef USE_TEXTURES
resStart[i] = origList[Astart + i];
#else
resStart[i] = tex1Dfetch(tex, Astart + i);
#endif
}
return;
}
int aidx = 0;
int bidx = 0;
int outidx = 0;
float4 a, b;
#ifndef USE_TEXTURES
a = origList[Astart + aidx];
b = origList[Bstart + bidx];
#else
a = tex1Dfetch(tex, Astart + aidx);
b = tex1Dfetch(tex, Bstart + bidx);
#endif
while(true)//aidx < nrElems/2)// || (bidx < nrElems/2 && (Bstart + bidx < constEndAddr[division])))
{
/**
* For some reason, it's faster to do the texture fetches here than
* after the merge
*/
#ifndef USE_TEXTURES
float4 nextA = origList[ Astart + aidx + 1];
float4 nextB = origList[Bstart + bidx + 1];
#else
float4 nextA = tex1Dfetch(tex, Astart + aidx + 1);
float4 nextB = tex1Dfetch(tex, Bstart + bidx + 1);
#endif
float4 na = getLowest(a,b);
float4 nb = getHighest(a,b);
a = sortElem(na);
b = sortElem(nb);
// Now, a contains the lowest four elements, sorted
resStart[outidx++] = a;
bool elemsLeftInA;
bool elemsLeftInB;
elemsLeftInA = (aidx + 1 < nrElems/2); // Astart + aidx + 1 is allways less than division border
elemsLeftInB = (bidx + 1 < nrElems/2) && (Bstart + bidx + 1 < constStartAddr[division + 1]);
if(elemsLeftInA){
if(elemsLeftInB){
if(nextA.x < nextB.x) { aidx += 1; a = nextA; }
else { bidx += 1; a = nextB; }
}
else {
aidx += 1; a = nextA;
}
}
else {
if(elemsLeftInB){
bidx += 1; a = nextB;
}
else {
break;
}
}
}
resStart[outidx++] = b;
}
__global__ void
mergepack(hipLaunchParm lp, float *orig, float *result
#ifdef MEMCPYTOSYMBOL
)
#else
,int *constStartAddr, unsigned int *finalStartAddr, int *nullElems)
#endif
{
int idx1 = (hipBlockIdx_x * hipBlockDim_x) + hipThreadIdx_x;
int division = hipBlockIdx_y;
if((finalStartAddr[division] + idx1) >= finalStartAddr[division + 1]) return;
result[finalStartAddr[division] + idx1] = orig[constStartAddr[division]*4 + nullElems[division] + idx1];
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
8b3909ac4cdcd560a9843fc00a4690d800ddca87.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "cutil.h"
#include <iostream>
#include <iomanip>
#include <hiprand/hiprand.h>
#include <ctime>
#define FLAG_PRINT_SPINS 0
#define FLAG_ENERGY 0
#define T_START 2.30
#define T_FACTOR 0.001
#define T_END 2.20
#define GLOBAL_ITERATIONS 10
#define RANDOM_A 1664525
#define RANDOM_B 1013904223
#define BLOCK_SIZE 64
const unsigned int N=4*BLOCK_SIZE*BLOCK_SIZE;
const unsigned int n=2*BLOCK_SIZE;
/****
*
* Function declaration
*
*/
void calc(int argc,char** argv);
__global__ void device_function_main(int*,int*,int*,float,bool);
/****
*
* Main function
*
*/
int main(int argc,char** argv) {
calc(argc,argv);
}
/****
*
* Calc
*
*/
void calc(int argc,char** argv) {
std::cout << " -----------------------------------------------------------------------" << std::endl;
std::cout <<" *" << std::endl;
std::cout <<" * GPU accelerated Monte Carlo simulation of the 2D Ising model" << std::endl;
std::cout <<" *" << std::endl;
std::cout <<" ----------------------------- Ising model ----------------------------- " << std::endl;
std::cout <<" Number of Spins: " << N << std::endl;
std::cout <<" Start Temperature: " << T_START <<std::endl;
std::cout <<" Decreasing Factor: " << T_FACTOR <<std::endl;;
std::cout <<" Final Temperature: " << T_END <<std::endl;
std::cout <<" Global Iterations: " << GLOBAL_ITERATIONS << std::endl;
//Init
CUT_DEVICE_INIT(argc,argv);
srand48(23);
//Allocate and init host memory for output arrays
int num_entries=0;
for(double t=T_START; t>=T_END; t=t-T_FACTOR) num_entries++;
unsigned int mem_out_size=sizeof(float)*num_entries;
float* h_T=(float*) malloc(mem_out_size);
float* h_E=(float*) malloc(mem_out_size);
float* h_U=(float*) malloc(mem_out_size);
unsigned int mem_ref_out_size=sizeof(double)*num_entries;
double* h_ref_E=(double*) malloc(mem_ref_out_size);
double* h_ref_U=(double*) malloc(mem_ref_out_size);
num_entries=0;
for(double t=T_START; t>=T_END; t=t-T_FACTOR) {
h_T[num_entries]=t;
num_entries++;
}
//Allocate and init host memory for simulation arrays
unsigned int mem_size=sizeof(int)*N;
unsigned int mem_size_random=sizeof(int)*BLOCK_SIZE*BLOCK_SIZE;
int* h_random_data=(int*) malloc(mem_size_random);
int* h_S=(int*) malloc(mem_size);
unsigned int mem_size_out=sizeof(int)*BLOCK_SIZE;
int* h_out=(int*) malloc(mem_size_out);
h_random_data[0]=1;
for(int i=1;i<BLOCK_SIZE*BLOCK_SIZE;i++) {
h_random_data[i]=16807*h_random_data[i-1];
}
for(int i=0;i<N;i++) {
if(drand48()>0.5) h_S[i]=-1;
else h_S[i]=1;
}
//Create and start timer
float gpu_sum=0;
unsigned int timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Allocate device memory for arrays
int* d_random_data;
int* d_S;
int* d_out;
CUDA_SAFE_CALL(hipMalloc((void**) &d_random_data,mem_size_random));
CUDA_SAFE_CALL(hipMalloc((void**) &d_S,mem_size));
CUDA_SAFE_CALL(hipMalloc((void**) &d_out,mem_size_out));
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_malloc=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_malloc;
std::cout <<"\n --------------------------------- GPU --------------------------------- \n" <<std::endl;
std::cout <<" Processing time on GPU for allocating:"<< std::setprecision(2) << gpu_dt_malloc << "(ms)" <<std::endl;
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Create and start timer
timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Copy host memory to device and create mirror of d_S
CUDA_SAFE_CALL(hipMemcpy(d_random_data,h_random_data,mem_size_random,hipMemcpyHostToDevice));
CUDA_SAFE_CALL(hipMemcpy(d_S,h_S,mem_size,hipMemcpyHostToDevice));
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_mem=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_mem;
printf(" Processing time on GPU for memory transfer: %f (ms) \n",gpu_dt_mem);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(hipMemcpy(h_S,d_S,mem_size,hipMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
printf("\n");
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Calc energy
num_entries=0;
dim3 threads(BLOCK_SIZE);
dim3 grid(BLOCK_SIZE);
FILE *fp = fopen("gpu_T_U.dat","w");
for(float t=T_START;t>=T_END;t=t-T_FACTOR) {
double avg_H=0;
double avg_H_2=0;
double avg_H_4=0;
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;global_iteration++) {
hipLaunchKernelGGL(( device_function_main), dim3(grid),dim3(threads), 0, 0, d_S,d_out,d_random_data,t,true);
hipLaunchKernelGGL(( device_function_main), dim3(grid),dim3(threads), 0, 0, d_S,d_out,d_random_data,t,false);
CUDA_SAFE_CALL(hipMemcpy(h_out,d_out,mem_size_out,hipMemcpyDeviceToHost));
int energy_sum=0;
for(int i=0;i<BLOCK_SIZE;i++) energy_sum+=h_out[i];
avg_H+=(float)energy_sum/N;
avg_H_2+=pow((float)energy_sum/N,2);
avg_H_4+=pow((float)energy_sum/N,4);
// printf("%f\n",(float)energy_sum/N);
}
h_E[num_entries]=avg_H/GLOBAL_ITERATIONS;
h_U[num_entries]=1.0-((avg_H_4/GLOBAL_ITERATIONS)/(3*pow(avg_H_2/GLOBAL_ITERATIONS,2)));
//h_U[num_entries]=0.5*(3-(avg_H_4/GLOBAL_ITERATIONS)/(3*pow(avg_H_2/GLOBAL_ITERATIONS,2)));
//printf("%f %f %f\n",h_T[num_entries],h_E[num_entries],h_U[num_entries]);
std::cout << h_T[num_entries] << " " << h_E[num_entries] << " " << h_U[num_entries] << std::endl;
fprintf(fp,"%f %f\n",h_T[num_entries],h_U[num_entries]);
printf("\n");
num_entries++;
}
fclose(fp);
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_main=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_main;
std::cout << " Processing time on GPU for main function: "<< std::setprecision(2) <<std::fixed << gpu_dt_main << "(ms)" << std::endl;
std::cout <<" Total processing time on GPU:"<< std::setprecision(2) <<std::fixed << gpu_sum << "(ms)" << std::endl;
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Check kernel execution
CUT_CHECK_ERROR("Kernel execution failed");
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(hipMemcpy(h_S,d_S,mem_size,hipMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Reference solution
//cpu_function(h_ref_E, h_ref_U, h_S);
//Print spins
if(FLAG_PRINT_SPINS) {
printf("\n");
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+1 ");
else printf("-1 ");
}
printf("\n");
}
}
//Stop and destroy timer
CUDA_SAFE_CALL(hipDeviceSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float cpu_sum=cutGetTimerValue(timer);
printf("\n --------------------------------- CPU --------------------------------- \n");
printf(" Total processing time on CPU: %f (ms) \n",cpu_sum);
CUT_SAFE_CALL(cutDeleteTimer(timer));
printf("\n Speedup: %fX \n\n",(cpu_sum/gpu_sum));
//Cleaning memory
free(h_T);
free(h_U);
free(h_E);
free(h_ref_E);
free(h_ref_U);
free(h_random_data);
free(h_S);
free(h_out);
CUDA_SAFE_CALL(hipFree(d_random_data));
CUDA_SAFE_CALL(hipFree(d_S));
CUDA_SAFE_CALL(hipFree(d_out));
}
/****
*
* Device function main
*
*/
__global__ void device_function_main(int* S,int* out,int* R,float t,bool flag) {
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
if(flag) {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0) { //Top
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0) { //Top
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
if(!flag) {
//For reduction shared memory array r is used
if(FLAG_ENERGY) {
//Calc energy
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
}
__syncthreads();
}
else {
//Calc magnetisation
dH=S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
__syncthreads();
}
//Save partial results back to shared memory in new structure
r[threadIdx.x]=dH;
//Reduction on GPU
for(unsigned int dx=1;dx<BLOCK_SIZE;dx*=2) {
if(threadIdx.x%(2*dx)==0) {
r[threadIdx.x]+=r[threadIdx.x+dx];
}
__syncthreads();
}
//Save in out
if(threadIdx.x==0) out[blockIdx.x]=r[0];
}
}
| 8b3909ac4cdcd560a9843fc00a4690d800ddca87.cu | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include "cutil.h"
#include <iostream>
#include <iomanip>
#include <curand.h>
#include <ctime>
#define FLAG_PRINT_SPINS 0
#define FLAG_ENERGY 0
#define T_START 2.30
#define T_FACTOR 0.001
#define T_END 2.20
#define GLOBAL_ITERATIONS 10
#define RANDOM_A 1664525
#define RANDOM_B 1013904223
#define BLOCK_SIZE 64
const unsigned int N=4*BLOCK_SIZE*BLOCK_SIZE;
const unsigned int n=2*BLOCK_SIZE;
/****
*
* Function declaration
*
*/
void calc(int argc,char** argv);
__global__ void device_function_main(int*,int*,int*,float,bool);
/****
*
* Main function
*
*/
int main(int argc,char** argv) {
calc(argc,argv);
}
/****
*
* Calc
*
*/
void calc(int argc,char** argv) {
std::cout << " -----------------------------------------------------------------------" << std::endl;
std::cout <<" *" << std::endl;
std::cout <<" * GPU accelerated Monte Carlo simulation of the 2D Ising model" << std::endl;
std::cout <<" *" << std::endl;
std::cout <<" ----------------------------- Ising model ----------------------------- " << std::endl;
std::cout <<" Number of Spins: " << N << std::endl;
std::cout <<" Start Temperature: " << T_START <<std::endl;
std::cout <<" Decreasing Factor: " << T_FACTOR <<std::endl;;
std::cout <<" Final Temperature: " << T_END <<std::endl;
std::cout <<" Global Iterations: " << GLOBAL_ITERATIONS << std::endl;
//Init
CUT_DEVICE_INIT(argc,argv);
srand48(23);
//Allocate and init host memory for output arrays
int num_entries=0;
for(double t=T_START; t>=T_END; t=t-T_FACTOR) num_entries++;
unsigned int mem_out_size=sizeof(float)*num_entries;
float* h_T=(float*) malloc(mem_out_size);
float* h_E=(float*) malloc(mem_out_size);
float* h_U=(float*) malloc(mem_out_size);
unsigned int mem_ref_out_size=sizeof(double)*num_entries;
double* h_ref_E=(double*) malloc(mem_ref_out_size);
double* h_ref_U=(double*) malloc(mem_ref_out_size);
num_entries=0;
for(double t=T_START; t>=T_END; t=t-T_FACTOR) {
h_T[num_entries]=t;
num_entries++;
}
//Allocate and init host memory for simulation arrays
unsigned int mem_size=sizeof(int)*N;
unsigned int mem_size_random=sizeof(int)*BLOCK_SIZE*BLOCK_SIZE;
int* h_random_data=(int*) malloc(mem_size_random);
int* h_S=(int*) malloc(mem_size);
unsigned int mem_size_out=sizeof(int)*BLOCK_SIZE;
int* h_out=(int*) malloc(mem_size_out);
h_random_data[0]=1;
for(int i=1;i<BLOCK_SIZE*BLOCK_SIZE;i++) {
h_random_data[i]=16807*h_random_data[i-1];
}
for(int i=0;i<N;i++) {
if(drand48()>0.5) h_S[i]=-1;
else h_S[i]=1;
}
//Create and start timer
float gpu_sum=0;
unsigned int timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Allocate device memory for arrays
int* d_random_data;
int* d_S;
int* d_out;
CUDA_SAFE_CALL(cudaMalloc((void**) &d_random_data,mem_size_random));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_S,mem_size));
CUDA_SAFE_CALL(cudaMalloc((void**) &d_out,mem_size_out));
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_malloc=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_malloc;
std::cout <<"\n --------------------------------- GPU --------------------------------- \n" <<std::endl;
std::cout <<" Processing time on GPU for allocating:"<< std::setprecision(2) << gpu_dt_malloc << "(ms)" <<std::endl;
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Create and start timer
timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Copy host memory to device and create mirror of d_S
CUDA_SAFE_CALL(cudaMemcpy(d_random_data,h_random_data,mem_size_random,cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(d_S,h_S,mem_size,cudaMemcpyHostToDevice));
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_mem=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_mem;
printf(" Processing time on GPU for memory transfer: %f (ms) \n",gpu_dt_mem);
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(cudaMemcpy(h_S,d_S,mem_size,cudaMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
printf("\n");
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Calc energy
num_entries=0;
dim3 threads(BLOCK_SIZE);
dim3 grid(BLOCK_SIZE);
FILE *fp = fopen("gpu_T_U.dat","w");
for(float t=T_START;t>=T_END;t=t-T_FACTOR) {
double avg_H=0;
double avg_H_2=0;
double avg_H_4=0;
for(int global_iteration=0;global_iteration<GLOBAL_ITERATIONS;global_iteration++) {
device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,true);
device_function_main<<<grid,threads>>>(d_S,d_out,d_random_data,t,false);
CUDA_SAFE_CALL(cudaMemcpy(h_out,d_out,mem_size_out,cudaMemcpyDeviceToHost));
int energy_sum=0;
for(int i=0;i<BLOCK_SIZE;i++) energy_sum+=h_out[i];
avg_H+=(float)energy_sum/N;
avg_H_2+=pow((float)energy_sum/N,2);
avg_H_4+=pow((float)energy_sum/N,4);
// printf("%f\n",(float)energy_sum/N);
}
h_E[num_entries]=avg_H/GLOBAL_ITERATIONS;
h_U[num_entries]=1.0-((avg_H_4/GLOBAL_ITERATIONS)/(3*pow(avg_H_2/GLOBAL_ITERATIONS,2)));
//h_U[num_entries]=0.5*(3-(avg_H_4/GLOBAL_ITERATIONS)/(3*pow(avg_H_2/GLOBAL_ITERATIONS,2)));
//printf("%f %f %f\n",h_T[num_entries],h_E[num_entries],h_U[num_entries]);
std::cout << h_T[num_entries] << " " << h_E[num_entries] << " " << h_U[num_entries] << std::endl;
fprintf(fp,"%f %f\n",h_T[num_entries],h_U[num_entries]);
printf("\n");
num_entries++;
}
fclose(fp);
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float gpu_dt_main=cutGetTimerValue(timer);
gpu_sum+=gpu_dt_main;
std::cout << " Processing time on GPU for main function: "<< std::setprecision(2) <<std::fixed << gpu_dt_main << "(ms)" << std::endl;
std::cout <<" Total processing time on GPU:"<< std::setprecision(2) <<std::fixed << gpu_sum << "(ms)" << std::endl;
CUT_SAFE_CALL(cutDeleteTimer(timer));
//Check kernel execution
CUT_CHECK_ERROR("Kernel execution failed");
//Print spins
if(FLAG_PRINT_SPINS) {
CUDA_SAFE_CALL(cudaMemcpy(h_S,d_S,mem_size,cudaMemcpyDeviceToHost));
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+ ");
else printf("- ");
}
printf("\n");
}
}
//Create and start timer
timer=0;
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutCreateTimer(&timer));
CUT_SAFE_CALL(cutStartTimer(timer));
//Reference solution
//cpu_function(h_ref_E, h_ref_U, h_S);
//Print spins
if(FLAG_PRINT_SPINS) {
printf("\n");
for(int i=0;i<BLOCK_SIZE*2;i++) {
for(int j=0;j<BLOCK_SIZE*2;j++) {
if(h_S[i*BLOCK_SIZE*2+j]>0) printf("+1 ");
else printf("-1 ");
}
printf("\n");
}
}
//Stop and destroy timer
CUDA_SAFE_CALL(cudaThreadSynchronize());
CUT_SAFE_CALL(cutStopTimer(timer));
float cpu_sum=cutGetTimerValue(timer);
printf("\n --------------------------------- CPU --------------------------------- \n");
printf(" Total processing time on CPU: %f (ms) \n",cpu_sum);
CUT_SAFE_CALL(cutDeleteTimer(timer));
printf("\n Speedup: %fX \n\n",(cpu_sum/gpu_sum));
//Cleaning memory
free(h_T);
free(h_U);
free(h_E);
free(h_ref_E);
free(h_ref_U);
free(h_random_data);
free(h_S);
free(h_out);
CUDA_SAFE_CALL(cudaFree(d_random_data));
CUDA_SAFE_CALL(cudaFree(d_S));
CUDA_SAFE_CALL(cudaFree(d_out));
}
/****
*
* Device function main
*
*/
__global__ void device_function_main(int* S,int* out,int* R,float t,bool flag) {
//Energy variable
int dH=0;
float exp_dH_4=exp(-(4.0)/t);
float exp_dH_8=exp(-(8.0)/t);
//Allocate shared memory
__shared__ int r[BLOCK_SIZE];
//Load random data
r[threadIdx.x]=R[threadIdx.x+BLOCK_SIZE*blockIdx.x];
__syncthreads();
if(flag) {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top left
if(blockIdx.x==0) { //Top
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x]*(
S[2*threadIdx.x+1]+
S[2*threadIdx.x-1]+
S[2*threadIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom right
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
__syncthreads();
}
else {
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update top right
if(blockIdx.x==0) { //Top
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1]*(
S[2*threadIdx.x+2]+
S[2*threadIdx.x]+
S[2*threadIdx.x+1+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+N-2*BLOCK_SIZE]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2-2*BLOCK_SIZE]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
else {
dH=2*S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]+
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x-2*BLOCK_SIZE]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
}
else {
S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]=-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x];
}
//Create new random numbers
r[threadIdx.x]=RANDOM_A*r[threadIdx.x]+RANDOM_B;
//Spin update bottom left
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
else {
if(threadIdx.x==0) { //Left
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
else {
dH=2*S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE+1]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE-1]+
S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)]+
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]);
}
}
if(dH==4) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_4) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else if(dH==8) {
if(fabs(r[threadIdx.x]*4.656612e-10)<exp_dH_8) {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
else {
S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
}
}
//Transfer random data back to global memory
R[threadIdx.x+BLOCK_SIZE*blockIdx.x]=r[threadIdx.x];
if(!flag) {
//For reduction shared memory array r is used
if(FLAG_ENERGY) {
//Calc energy
if(blockIdx.x==BLOCK_SIZE-1) { //Bottom
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1]);
}
}
else {
if(threadIdx.x==BLOCK_SIZE-1) { //Right
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1-2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
else {
dH=-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]*(S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+1]+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE])
-S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+1+2*BLOCK_SIZE]+S[2*threadIdx.x+4*BLOCK_SIZE*(blockIdx.x+1)])
-S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]*(S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2+2*BLOCK_SIZE]+S[2*threadIdx.x+1+4*BLOCK_SIZE*(blockIdx.x+1)]);
}
}
__syncthreads();
}
else {
//Calc magnetisation
dH=S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x]
+S[2*threadIdx.x+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE]
+S[2*threadIdx.x+1+4*BLOCK_SIZE*blockIdx.x+2*BLOCK_SIZE];
__syncthreads();
}
//Save partial results back to shared memory in new structure
r[threadIdx.x]=dH;
//Reduction on GPU
for(unsigned int dx=1;dx<BLOCK_SIZE;dx*=2) {
if(threadIdx.x%(2*dx)==0) {
r[threadIdx.x]+=r[threadIdx.x+dx];
}
__syncthreads();
}
//Save in out
if(threadIdx.x==0) out[blockIdx.x]=r[0];
}
}
|
4d3615e2cd75563ec9f53b1c21d981ae5a13d0ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*-----------
*
* matrixMulGlobal.cu
*
* This is the source file for matrix multiplication with global memory only.
*
* This kernel is from NVIDIA CUDA samples. reduction_kernel.cu.
*
* streamsOptBenchmark/reduction_kernel.cu
*
* By Hao Li
*
*------------
*/
/*
Parallel reduction kernels
*/
#include <stdio.h>
// #include "structs.h"
// #include "functions.h"
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
struct SharedMemory
{
__device__ inline operator float *()
{
extern __shared__ float __smem[];
return (float *)__smem;
}
__device__ inline operator const float *() const
{
extern __shared__ float __smem[];
return (float *)__smem;
}
};
// // specialize for double to avoid unaligned memory
// // access compile errors
// template<>
// struct SharedMemory<double>
// {
// __device__ inline operator double *()
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
// __device__ inline operator const double *() const
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
// };
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reduce0(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
__global__ void reduce1(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
__global__ void reduce2(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce3(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
int mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
// void reduce(int size, int threads, int blocks,
// int whichKernel, int *d_idata, int *d_odata)
// {
// dim3 dimBlock(threads, 1, 1);
// dim3 dimGrid(blocks, 1, 1);
// // when there is only one warp per block, we need to allocate two warps
// // worth of shared memory so that we don't index shared memory out of bounds
// int smemSize = (threads <= 32) ? 2 * threads * sizeof(int) : threads * sizeof(int);
// // choose which of the optimized versions of reduction to launch
// switch (whichKernel)
// {
// case 0:
// reduce0<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 1:
// reduce1<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 2:
// reduce2<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 3:
// reduce3<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// }
// }
// int main(int argc, char **argv){
// int matrixDataSize = sizeof(int) * MATRIX_SIZE * MATRIX_SIZE;
// Matrix h_A, h_C;
// Matrix d_A, d_C;
// initMatrix(h_A, matrixDataSize, onHOST);
// initMatrix(h_C, matrixDataSize, onHOST);
// initMatrix(d_A, matrixDataSize, onDEVICE);
// initMatrix(d_C, matrixDataSize, onDEVICE);
// hipMemcpy(d_A.elements, h_A.elements, matrixDataSize, hipMemcpyHostToDevice);
// // Invoke kernel
// // dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// // dim3 dimGrid(h_B.width / dimBlock.x, h_A.height / dimBlock.y);
// // execute the kernel
// for(int i =0; i < 4; i++){
// reduce(matrixDataSize, h_A.width / BLOCK_SIZE * h_A.height / BLOCK_SIZE,
// BLOCK_SIZE*BLOCK_SIZE, i, d_A.elements, d_C.elements);
// }
// hipMemcpy(h_C.elements, d_C.elements, matrixDataSize, hipMemcpyDeviceToHost);
// free(h_A.elements);
// free(h_C.elements);
// hipFree(d_A.elements);
// hipFree(d_C.elements);
// return 0;
// }
| 4d3615e2cd75563ec9f53b1c21d981ae5a13d0ef.cu | /*-----------
*
* matrixMulGlobal.cu
*
* This is the source file for matrix multiplication with global memory only.
*
* This kernel is from NVIDIA CUDA samples. reduction_kernel.cu.
*
* streamsOptBenchmark/reduction_kernel.cu
*
* By Hao Li
*
*------------
*/
/*
Parallel reduction kernels
*/
#include <stdio.h>
// #include "structs.h"
// #include "functions.h"
// Utility class used to avoid linker errors with extern
// unsized shared memory arrays with templated type
struct SharedMemory
{
__device__ inline operator float *()
{
extern __shared__ float __smem[];
return (float *)__smem;
}
__device__ inline operator const float *() const
{
extern __shared__ float __smem[];
return (float *)__smem;
}
};
// // specialize for double to avoid unaligned memory
// // access compile errors
// template<>
// struct SharedMemory<double>
// {
// __device__ inline operator double *()
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
// __device__ inline operator const double *() const
// {
// extern __shared__ double __smem_d[];
// return (double *)__smem_d;
// }
// };
/*
Parallel sum reduction using shared memory
- takes log(n) steps for n input elements
- uses n threads
- only works for power-of-2 arrays
*/
/* This reduction interleaves which threads are active by using the modulo
operator. This operator is very expensive on GPUs, and the interleaved
inactivity means that no whole warps are active, which is also very
inefficient */
__global__ void reduce0(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
// modulo arithmetic is slow!
if ((tid % (2*s)) == 0)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/* This version uses contiguous threads, but its interleaved
addressing results in many shared memory bank conflicts.
*/
__global__ void reduce1(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=1; s < blockDim.x; s *= 2)
{
int index = 2 * s * tid;
if (index < blockDim.x)
{
sdata[index] += sdata[index + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses sequential addressing -- no divergence or bank conflicts.
*/
__global__ void reduce2(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// load shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
}
/*
This version uses n/2 threads --
it performs the first level of reduction when reading from global memory.
*/
__global__ void reduce3(float *g_idata, float *g_odata, unsigned int n)
{
for(int l = 0; l < 100000; l++)
{
float *sdata = SharedMemory();
// perform first level of reduction,
// reading from global memory, writing to shared memory
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
int mySum = (i < n) ? g_idata[i] : 0;
if (i + blockDim.x < n)
mySum += g_idata[i+blockDim.x];
sdata[tid] = mySum;
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
sdata[tid] = mySum = mySum + sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = mySum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper function for kernel launch
////////////////////////////////////////////////////////////////////////////////
// void reduce(int size, int threads, int blocks,
// int whichKernel, int *d_idata, int *d_odata)
// {
// dim3 dimBlock(threads, 1, 1);
// dim3 dimGrid(blocks, 1, 1);
// // when there is only one warp per block, we need to allocate two warps
// // worth of shared memory so that we don't index shared memory out of bounds
// int smemSize = (threads <= 32) ? 2 * threads * sizeof(int) : threads * sizeof(int);
// // choose which of the optimized versions of reduction to launch
// switch (whichKernel)
// {
// case 0:
// reduce0<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 1:
// reduce1<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 2:
// reduce2<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// case 3:
// reduce3<<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
// break;
// }
// }
// int main(int argc, char **argv){
// int matrixDataSize = sizeof(int) * MATRIX_SIZE * MATRIX_SIZE;
// Matrix h_A, h_C;
// Matrix d_A, d_C;
// initMatrix(h_A, matrixDataSize, onHOST);
// initMatrix(h_C, matrixDataSize, onHOST);
// initMatrix(d_A, matrixDataSize, onDEVICE);
// initMatrix(d_C, matrixDataSize, onDEVICE);
// cudaMemcpy(d_A.elements, h_A.elements, matrixDataSize, cudaMemcpyHostToDevice);
// // Invoke kernel
// // dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// // dim3 dimGrid(h_B.width / dimBlock.x, h_A.height / dimBlock.y);
// // execute the kernel
// for(int i =0; i < 4; i++){
// reduce(matrixDataSize, h_A.width / BLOCK_SIZE * h_A.height / BLOCK_SIZE,
// BLOCK_SIZE*BLOCK_SIZE, i, d_A.elements, d_C.elements);
// }
// cudaMemcpy(h_C.elements, d_C.elements, matrixDataSize, cudaMemcpyDeviceToHost);
// free(h_A.elements);
// free(h_C.elements);
// cudaFree(d_A.elements);
// cudaFree(d_C.elements);
// return 0;
// }
|
cd10d3050e3dc38073ab8171c1af4451bb0d4669.hip | // !!! This is a file automatically generated by hipify!!!
#include <errno.h>
#include <error.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <hip/hip_runtime.h>
#define DEVICE_NUMBER (0)
#define NOF_STAMPS (4096) //32kBytes for uint64_t
#define NOF_BLOCKS (4)
#define SPIN() // spin(10000)
typedef struct {
uint64_t *targetTimes;
uint64_t *hostTimes;
unsigned int *targetSmid;
unsigned int hostSmid[NOF_BLOCKS];
FILE *fd;
} param_t;
// Prints a message and returns zero if the given value is not hipSuccess
#define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__))
// Called internally by CheckCUDAError
static int InternalCheckCUDAError(hipError_t result, const char *fn,
const char *file, int line) {
if (result == hipSuccess) return 0;
printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line,
fn, hipGetErrorString(result));
return -1;
}
static __device__ __inline__ unsigned int get_smid(void)
{
unsigned int ret;
asm("mov.u32 %0, %smid;":"=r"(ret) );
return ret;
}
static __device__ inline void spin(unsigned int spin_duration) {
unsigned int start_time = clock();
while ((clock() - start_time) < spin_duration) {
continue;
}
}
static __global__ void getGlobalTimerJitter(param_t params) {
__shared__ uint64_t times[NOF_STAMPS];
uint64_t tmp;
for(int i = -32; i<NOF_STAMPS; i++){
SPIN();
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(tmp));
if(i>=0){
times[i] = tmp;
}
}
// Store times to global memory
for(int i = 0; i<NOF_STAMPS; i++){
params.targetTimes[i+blockIdx.x*NOF_STAMPS] = times[i];
}
params.targetSmid[blockIdx.x] = get_smid();
}
static int initializeTest(param_t *params){
//allocate buffer
params->hostTimes = NULL;
params->hostTimes = (uint64_t *) malloc(NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t));
if (!params->hostTimes) {
perror("Failed allocating host buffer: ");
return -1;
}
memset(params->hostTimes,0, NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t));
//allocate device random buffer
if (CheckCUDAError(hipMalloc(¶ms->targetTimes, \
NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t)))) return -1;
if (CheckCUDAError(hipMalloc(¶ms->targetSmid, \
NOF_BLOCKS*sizeof(unsigned int)))) return -1;
return 0;
}
static int runTest(param_t *params){
hipLaunchKernelGGL(( getGlobalTimerJitter), dim3(NOF_BLOCKS),dim3(1), 0, 0, *params);
// Synchronize with device
if (CheckCUDAError(hipDeviceSynchronize())) return -1;
// Copyback times
if (CheckCUDAError(hipMemcpy(params->hostTimes, \
params->targetTimes, \
NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t), \
hipMemcpyDeviceToHost))) return -1;
if (CheckCUDAError(hipMemcpy(params->hostSmid, \
params->targetSmid, \
NOF_BLOCKS*sizeof(unsigned int), \
hipMemcpyDeviceToHost))) return -1;
return 0;
}
static int writeResults(param_t *params){
if (fprintf(params->fd,"{\n") < 0 ) return -1;
// Print blocks
if (fprintf(params->fd,"\"blocks\":[\n") < 0 ) return -1;
for(int j = 0; j<NOF_BLOCKS-1;j++){
if (fprintf(params->fd,"\"%d\",\n", j) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%d\"],\n", NOF_BLOCKS-1) < 0 ) return -1;
// Print SMID
for(int j = 0; j<NOF_BLOCKS;j++){
if (fprintf(params->fd,"\"smid_%d\": \"%d\",\n", j,params->hostSmid[j]) < 0 ) return -1;
}
// Write times
for(int j = 0; j<NOF_BLOCKS-1;j++){
if (fprintf(params->fd,"\"times_%d\":[\n", j) < 0 ) return -1;
for (int i = 0; i < NOF_STAMPS-1; i++){
if (fprintf(params->fd,"\"%" PRIu64 "\",\n", params->hostTimes[j*NOF_STAMPS+i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%" PRIu64 "\"],\n", params->hostTimes[j*NOF_STAMPS+NOF_STAMPS-1]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"times_%d\":[\n",NOF_BLOCKS-1) < 0 ) return -1;
for (int i = 0; i < NOF_STAMPS-1; i++){
if (fprintf(params->fd,"\"%" PRIu64 "\",\n", params->hostTimes[(NOF_BLOCKS-1)*NOF_STAMPS+i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%" PRIu64 "\"]\n}", params->hostTimes[(NOF_BLOCKS-1)*NOF_STAMPS+NOF_STAMPS-1]) < 0 ) return -1;
if (fclose(params->fd) < 0) return -1;
return 0;
}
static int cleanUp(param_t *params){
// Free target buffers
hipFree(params->targetTimes);
// Free host buffers
free(params->hostTimes);
return 0;
}
static void PrintUsage(const char *name) {
printf("Usage: %s <output JSON file name>\n", name);
}
int main(int argc, char **argv) {
if (argc != 2) {
PrintUsage(argv[0]);
return 1;
}
param_t params;
params.fd = NULL;
params.fd = fopen(argv[1],"w");
if (params.fd == NULL) {
perror("Error opening output file:");
return EXIT_FAILURE;
}
// Set CUDA device
if (CheckCUDAError(hipSetDevice(DEVICE_NUMBER))) {
return EXIT_FAILURE;
}
// Initialize parameters
if (initializeTest(¶ms) < 0) return EXIT_FAILURE;
// Run test
if (runTest(¶ms) < 0) return EXIT_FAILURE;
// Write results
if (writeResults(¶ms) < 0){
perror("Error while writing outpufile: ");
return EXIT_FAILURE;
}
// Clean up
if (cleanUp(¶ms) < 0) return EXIT_FAILURE;
printf("Finished testrun\n");
hipDeviceReset();
return 0;
}
| cd10d3050e3dc38073ab8171c1af4451bb0d4669.cu | #include <errno.h>
#include <error.h>
#include <stdint.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <cuda_runtime.h>
#define DEVICE_NUMBER (0)
#define NOF_STAMPS (4096) //32kBytes for uint64_t
#define NOF_BLOCKS (4)
#define SPIN() // spin(10000)
typedef struct {
uint64_t *targetTimes;
uint64_t *hostTimes;
unsigned int *targetSmid;
unsigned int hostSmid[NOF_BLOCKS];
FILE *fd;
} param_t;
// Prints a message and returns zero if the given value is not cudaSuccess
#define CheckCUDAError(val) (InternalCheckCUDAError((val), #val, __FILE__, __LINE__))
// Called internally by CheckCUDAError
static int InternalCheckCUDAError(cudaError_t result, const char *fn,
const char *file, int line) {
if (result == cudaSuccess) return 0;
printf("CUDA error %d in %s, line %d (%s): %s\n", (int) result, file, line,
fn, cudaGetErrorString(result));
return -1;
}
static __device__ __inline__ unsigned int get_smid(void)
{
unsigned int ret;
asm("mov.u32 %0, %smid;":"=r"(ret) );
return ret;
}
static __device__ inline void spin(unsigned int spin_duration) {
unsigned int start_time = clock();
while ((clock() - start_time) < spin_duration) {
continue;
}
}
static __global__ void getGlobalTimerJitter(param_t params) {
__shared__ uint64_t times[NOF_STAMPS];
uint64_t tmp;
for(int i = -32; i<NOF_STAMPS; i++){
SPIN();
asm volatile("mov.u64 %0, %%globaltimer;" : "=l"(tmp));
if(i>=0){
times[i] = tmp;
}
}
// Store times to global memory
for(int i = 0; i<NOF_STAMPS; i++){
params.targetTimes[i+blockIdx.x*NOF_STAMPS] = times[i];
}
params.targetSmid[blockIdx.x] = get_smid();
}
static int initializeTest(param_t *params){
//allocate buffer
params->hostTimes = NULL;
params->hostTimes = (uint64_t *) malloc(NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t));
if (!params->hostTimes) {
perror("Failed allocating host buffer: ");
return -1;
}
memset(params->hostTimes,0, NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t));
//allocate device random buffer
if (CheckCUDAError(cudaMalloc(¶ms->targetTimes, \
NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t)))) return -1;
if (CheckCUDAError(cudaMalloc(¶ms->targetSmid, \
NOF_BLOCKS*sizeof(unsigned int)))) return -1;
return 0;
}
static int runTest(param_t *params){
getGlobalTimerJitter<<<NOF_BLOCKS,1>>>(*params);
// Synchronize with device
if (CheckCUDAError(cudaDeviceSynchronize())) return -1;
// Copyback times
if (CheckCUDAError(cudaMemcpy(params->hostTimes, \
params->targetTimes, \
NOF_BLOCKS*NOF_STAMPS*sizeof(uint64_t), \
cudaMemcpyDeviceToHost))) return -1;
if (CheckCUDAError(cudaMemcpy(params->hostSmid, \
params->targetSmid, \
NOF_BLOCKS*sizeof(unsigned int), \
cudaMemcpyDeviceToHost))) return -1;
return 0;
}
static int writeResults(param_t *params){
if (fprintf(params->fd,"{\n") < 0 ) return -1;
// Print blocks
if (fprintf(params->fd,"\"blocks\":[\n") < 0 ) return -1;
for(int j = 0; j<NOF_BLOCKS-1;j++){
if (fprintf(params->fd,"\"%d\",\n", j) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%d\"],\n", NOF_BLOCKS-1) < 0 ) return -1;
// Print SMID
for(int j = 0; j<NOF_BLOCKS;j++){
if (fprintf(params->fd,"\"smid_%d\": \"%d\",\n", j,params->hostSmid[j]) < 0 ) return -1;
}
// Write times
for(int j = 0; j<NOF_BLOCKS-1;j++){
if (fprintf(params->fd,"\"times_%d\":[\n", j) < 0 ) return -1;
for (int i = 0; i < NOF_STAMPS-1; i++){
if (fprintf(params->fd,"\"%" PRIu64 "\",\n", params->hostTimes[j*NOF_STAMPS+i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%" PRIu64 "\"],\n", params->hostTimes[j*NOF_STAMPS+NOF_STAMPS-1]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"times_%d\":[\n",NOF_BLOCKS-1) < 0 ) return -1;
for (int i = 0; i < NOF_STAMPS-1; i++){
if (fprintf(params->fd,"\"%" PRIu64 "\",\n", params->hostTimes[(NOF_BLOCKS-1)*NOF_STAMPS+i]) < 0 ) return -1;
}
if (fprintf(params->fd,"\"%" PRIu64 "\"]\n}", params->hostTimes[(NOF_BLOCKS-1)*NOF_STAMPS+NOF_STAMPS-1]) < 0 ) return -1;
if (fclose(params->fd) < 0) return -1;
return 0;
}
static int cleanUp(param_t *params){
// Free target buffers
cudaFree(params->targetTimes);
// Free host buffers
free(params->hostTimes);
return 0;
}
static void PrintUsage(const char *name) {
printf("Usage: %s <output JSON file name>\n", name);
}
int main(int argc, char **argv) {
if (argc != 2) {
PrintUsage(argv[0]);
return 1;
}
param_t params;
params.fd = NULL;
params.fd = fopen(argv[1],"w");
if (params.fd == NULL) {
perror("Error opening output file:");
return EXIT_FAILURE;
}
// Set CUDA device
if (CheckCUDAError(cudaSetDevice(DEVICE_NUMBER))) {
return EXIT_FAILURE;
}
// Initialize parameters
if (initializeTest(¶ms) < 0) return EXIT_FAILURE;
// Run test
if (runTest(¶ms) < 0) return EXIT_FAILURE;
// Write results
if (writeResults(¶ms) < 0){
perror("Error while writing outpufile: ");
return EXIT_FAILURE;
}
// Clean up
if (cleanUp(¶ms) < 0) return EXIT_FAILURE;
printf("Finished testrun\n");
cudaDeviceReset();
return 0;
}
|
f8c2fbfa36ba4d23f259637158aba35648d5f774.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "read_G_matrix_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int S = 1;
int vertex_index = 1;
int *i_index = NULL;
hipMalloc(&i_index, XSIZE*YSIZE);
int *j_index = NULL;
hipMalloc(&j_index, XSIZE*YSIZE);
bool *is_Bennett = NULL;
hipMalloc(&is_Bennett, XSIZE*YSIZE);
double *exp_Vj = NULL;
hipMalloc(&exp_Vj, XSIZE*YSIZE);
double *N_ptr = NULL;
hipMalloc(&N_ptr, XSIZE*YSIZE);
int LD_N = 1;
double *G_ptr = NULL;
hipMalloc(&G_ptr, XSIZE*YSIZE);
int LD_G = 1;
double *result_ptr = NULL;
hipMalloc(&result_ptr, XSIZE*YSIZE);
int incr = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
read_G_matrix_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, S,vertex_index,i_index,j_index,is_Bennett,exp_Vj,N_ptr,LD_N,G_ptr,LD_G,result_ptr,incr);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
read_G_matrix_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, S,vertex_index,i_index,j_index,is_Bennett,exp_Vj,N_ptr,LD_N,G_ptr,LD_G,result_ptr,incr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
read_G_matrix_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, S,vertex_index,i_index,j_index,is_Bennett,exp_Vj,N_ptr,LD_N,G_ptr,LD_G,result_ptr,incr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | f8c2fbfa36ba4d23f259637158aba35648d5f774.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "read_G_matrix_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int S = 1;
int vertex_index = 1;
int *i_index = NULL;
cudaMalloc(&i_index, XSIZE*YSIZE);
int *j_index = NULL;
cudaMalloc(&j_index, XSIZE*YSIZE);
bool *is_Bennett = NULL;
cudaMalloc(&is_Bennett, XSIZE*YSIZE);
double *exp_Vj = NULL;
cudaMalloc(&exp_Vj, XSIZE*YSIZE);
double *N_ptr = NULL;
cudaMalloc(&N_ptr, XSIZE*YSIZE);
int LD_N = 1;
double *G_ptr = NULL;
cudaMalloc(&G_ptr, XSIZE*YSIZE);
int LD_G = 1;
double *result_ptr = NULL;
cudaMalloc(&result_ptr, XSIZE*YSIZE);
int incr = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
read_G_matrix_kernel<<<gridBlock,threadBlock>>>(S,vertex_index,i_index,j_index,is_Bennett,exp_Vj,N_ptr,LD_N,G_ptr,LD_G,result_ptr,incr);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
read_G_matrix_kernel<<<gridBlock,threadBlock>>>(S,vertex_index,i_index,j_index,is_Bennett,exp_Vj,N_ptr,LD_N,G_ptr,LD_G,result_ptr,incr);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
read_G_matrix_kernel<<<gridBlock,threadBlock>>>(S,vertex_index,i_index,j_index,is_Bennett,exp_Vj,N_ptr,LD_N,G_ptr,LD_G,result_ptr,incr);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
52a40cd9e056fd2336a1a3abfde75791f6582b1d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <cmath>
#include <hip/hip_runtime_api.h>
#define SQ(x) ((x) * (x))
static const float A = -4.0, B = 4.0; // limites de integracin
static const int N = 1 << 22; // nmero de intervalos = 2^22
static const float H = (B - A) / N; // tamao del intervalo de integracin
static const float PI(M_PI); // con precision simple
__device__ float h(float x) {
return .5f + 1.5f / (1.0f + 50.0f * SQ(x));
}
float host_h(float x) {
return .5f + 1.5f / (1.0f + 50.0f * SQ(x));
}
__device__ float f(float x) {
int i;
float sum = 0.0f, x0;
for (i = 0; i < 10; ++i){
x0 = -3.3f + i * 0.7f;
sum += h(x - x0);
}
return sum/10.0f;
}
float host_f(float x) {
int i;
float sum = 0.0f, x0;
for (i = 0; i < 10; ++i){
x0 = -3.3f + i * 0.7f;
sum += host_h(x - x0);
}
return sum/10.0f;
}
__device__ float g(float x) {
float c = cosf(2.0f * PI * f(x) * x);
return expf(-x/16.0f) * SQ(c);
}
float host_g(float x) {
float c = cosf(2.0f * PI * host_f(x) * x);
return expf(-x/16.0f) * SQ(c);
}
__global__ void
clean_blocks(float subtotals[]){
subtotals[blockIdx.x] = 0;
}
__global__ void
integrate_blocks(float subtotals[]) {
// Inicializar variable __shared__
__shared__ float partialValues[512];
//extern __shared__ float partialValues[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int tx = threadIdx.x;
// Cada thread segn su id, calcula la funcin de g(x)
float x = 0;
x = A + i * (H / 2);
partialValues[tx] = (i % 2 == 0 ? 4 : 2) * g(x);
// Sincronizamos las hebras una vez que termine
__syncthreads();
// Hacer una suma por reduccion con los valores del arreglo subtotals[]
for (int offset = blockDim.x/2; offset > 0; offset >>=1){
if (tx < offset)
{
//Aadimos una suma parcial con el offset
partialValues[tx] += partialValues[tx+offset];
}
__syncthreads();
}
// Escribimos el resultado del primer elemento de nuestro arreglo
if(tx == 0)
{
// Resultados lo guarda por bloques y no por hebras
subtotals[blockIdx.x] = partialValues[0];
}
}
__global__ void
reduction(float subtotals[]) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int tx = threadIdx.x;
for (int offset = blockDim.x/2; offset > 0; offset >>=1){
if (tx < offset)
{
subtotals[i] += subtotals[i+offset];
}
__syncthreads();
}
if(tx == 0)
{
subtotals[blockIdx.x] = subtotals[0];
}
}
__global__ void
final_reduction(float subtotals[]) {
float suma;
int n = 2 * N / blockDim.x;
if (threadIdx.x == 0){
for (int i = 0; i < n ; i+= blockDim.x){
suma += subtotals[i];
}
suma += g(B) - g(A);
subtotals[0] = suma;
}
}
int main(int argc, char *argv[]) {
// El programa recibe como parmetro el nmero de hebras por bloque.
// Recuerden que este nmero debe ser mltiplo de 32 (tamao del warp)
// y puede ser a lo ms 512 (limitacin del hardware).
if (argc < 2) {
std::cerr << "Usage: " << argv[0] << " threads_per_block" << std::endl;
std::exit(1);
}
int block_size = std::atoi(argv[1]);
// Al usar N subintervalos, hay que evaluar la funcin en 2N + 1 puntos.
// Para paralelizar, mejor evaluar 2N puntos y sumar el ltimo al final.
// Por lo tanto, necesitamos 2N hebras.
int nr_blocks = 2 * N / block_size;
size_t sharedMem = 512;
// Reservar arreglos en RAM y en la GPU para guardar los resultados.
float *subtotals_h, *subtotals_d;
subtotals_h = new float[nr_blocks];
hipMalloc((void **) &subtotals_d, sizeof(float) * nr_blocks);
// kernel para limpiar el valor del arreglo
hipLaunchKernelGGL(( clean_blocks), dim3(nr_blocks), dim3(1), 0, 0, subtotals_d);
// kernel para calcular los valores de la suma de la integral
hipLaunchKernelGGL(( integrate_blocks), dim3(nr_blocks), dim3(block_size),sharedMem, 0, subtotals_d);
// kernel para hacer la primera reduccion a solo un bloque
hipLaunchKernelGGL(( reduction), dim3(nr_blocks/block_size), dim3(block_size), 0, 0, subtotals_d);
// kernel para realizar la ultima reduccion en un bloque y obtener la suma total.
hipLaunchKernelGGL(( final_reduction), dim3(1),dim3(block_size), 0, 0, subtotals_d);
hipMemcpy(subtotals_h, subtotals_d, sizeof(float) * nr_blocks, hipMemcpyDeviceToHost);
float sum = subtotals_h[0];
float integral = sum * H / 6.0f;
std::cout << "Integral: " << std::setprecision(5) << integral << std::endl;
sum = 0.0;
hipFree(subtotals_d);
std::free(subtotals_h);
}
| 52a40cd9e056fd2336a1a3abfde75791f6582b1d.cu | #include <iostream>
#include <iomanip>
#include <cstdlib>
#include <cmath>
#include <cuda_runtime_api.h>
#define SQ(x) ((x) * (x))
static const float A = -4.0, B = 4.0; // limites de integración
static const int N = 1 << 22; // número de intervalos = 2^22
static const float H = (B - A) / N; // tamaño del intervalo de integración
static const float PI(M_PI); // π con precision simple
__device__ float h(float x) {
return .5f + 1.5f / (1.0f + 50.0f * SQ(x));
}
float host_h(float x) {
return .5f + 1.5f / (1.0f + 50.0f * SQ(x));
}
__device__ float f(float x) {
int i;
float sum = 0.0f, x0;
for (i = 0; i < 10; ++i){
x0 = -3.3f + i * 0.7f;
sum += h(x - x0);
}
return sum/10.0f;
}
float host_f(float x) {
int i;
float sum = 0.0f, x0;
for (i = 0; i < 10; ++i){
x0 = -3.3f + i * 0.7f;
sum += host_h(x - x0);
}
return sum/10.0f;
}
__device__ float g(float x) {
float c = cosf(2.0f * PI * f(x) * x);
return expf(-x/16.0f) * SQ(c);
}
float host_g(float x) {
float c = cosf(2.0f * PI * host_f(x) * x);
return expf(-x/16.0f) * SQ(c);
}
__global__ void
clean_blocks(float subtotals[]){
subtotals[blockIdx.x] = 0;
}
__global__ void
integrate_blocks(float subtotals[]) {
// Inicializar variable __shared__
__shared__ float partialValues[512];
//extern __shared__ float partialValues[];
int i = threadIdx.x + blockIdx.x * blockDim.x;
int tx = threadIdx.x;
// Cada thread según su id, calcula la función de g(x)
float x = 0;
x = A + i * (H / 2);
partialValues[tx] = (i % 2 == 0 ? 4 : 2) * g(x);
// Sincronizamos las hebras una vez que termine
__syncthreads();
// Hacer una suma por reduccion con los valores del arreglo subtotals[]
for (int offset = blockDim.x/2; offset > 0; offset >>=1){
if (tx < offset)
{
//Añadimos una suma parcial con el offset
partialValues[tx] += partialValues[tx+offset];
}
__syncthreads();
}
// Escribimos el resultado del primer elemento de nuestro arreglo
if(tx == 0)
{
// Resultados lo guarda por bloques y no por hebras
subtotals[blockIdx.x] = partialValues[0];
}
}
__global__ void
reduction(float subtotals[]) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int tx = threadIdx.x;
for (int offset = blockDim.x/2; offset > 0; offset >>=1){
if (tx < offset)
{
subtotals[i] += subtotals[i+offset];
}
__syncthreads();
}
if(tx == 0)
{
subtotals[blockIdx.x] = subtotals[0];
}
}
__global__ void
final_reduction(float subtotals[]) {
float suma;
int n = 2 * N / blockDim.x;
if (threadIdx.x == 0){
for (int i = 0; i < n ; i+= blockDim.x){
suma += subtotals[i];
}
suma += g(B) - g(A);
subtotals[0] = suma;
}
}
int main(int argc, char *argv[]) {
// El programa recibe como parámetro el número de hebras por bloque.
// Recuerden que este número debe ser múltiplo de 32 (tamaño del warp)
// y puede ser a lo más 512 (limitación del hardware).
if (argc < 2) {
std::cerr << "Usage: " << argv[0] << " threads_per_block" << std::endl;
std::exit(1);
}
int block_size = std::atoi(argv[1]);
// Al usar N subintervalos, hay que evaluar la función en 2N + 1 puntos.
// Para paralelizar, mejor evaluar 2N puntos y sumar el último al final.
// Por lo tanto, necesitamos 2N hebras.
int nr_blocks = 2 * N / block_size;
size_t sharedMem = 512;
// Reservar arreglos en RAM y en la GPU para guardar los resultados.
float *subtotals_h, *subtotals_d;
subtotals_h = new float[nr_blocks];
cudaMalloc((void **) &subtotals_d, sizeof(float) * nr_blocks);
// kernel para limpiar el valor del arreglo
clean_blocks<<<nr_blocks, 1>>>(subtotals_d);
// kernel para calcular los valores de la suma de la integral
integrate_blocks<<<nr_blocks, block_size,sharedMem>>>(subtotals_d);
// kernel para hacer la primera reduccion a solo un bloque
reduction<<<nr_blocks/block_size, block_size>>>(subtotals_d);
// kernel para realizar la ultima reduccion en un bloque y obtener la suma total.
final_reduction<<<1,block_size>>>(subtotals_d);
cudaMemcpy(subtotals_h, subtotals_d, sizeof(float) * nr_blocks, cudaMemcpyDeviceToHost);
float sum = subtotals_h[0];
float integral = sum * H / 6.0f;
std::cout << "Integral: " << std::setprecision(5) << integral << std::endl;
sum = 0.0;
cudaFree(subtotals_d);
std::free(subtotals_h);
}
|
d4921bf26d2983f140ef5b2a61f88cf1cb1319c9.hip | // !!! This is a file automatically generated by hipify!!!
#include <darknet/darknet.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <rocblas.h>
#include <darknet/activations.h>
#include <darknet/dark_cuda.h>
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2.f);
else return (x - n) + floorf(x/2.f);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.F)/2.F;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float selu_gradient_kernel(float x) { return (x >= 0)*1.0507f + (x < 0)*(x + 1.0507f*1.6732f); }
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
__device__ float stair_gradient_kernel(float x)
{
if (floor(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) {
float de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s / 2 + i] = x1*de;
}
}
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_gradient_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, dx, n / 2, size, a, y);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) y[id] = x1*x2;
}
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_activate_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, n / 2, size, a, y);
CHECK_CUDA(hipPeekAtLastError());
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_swish_kernel(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
float sigmoid = logistic_activate_kernel(x_val);
output_sigmoid_gpu[i] = sigmoid;
output_gpu[i] = x_val * sigmoid;
}
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
__global__ void activate_array_tanh_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = tanh_activate_kernel(x[index]);
}
}
__global__ void activate_array_hardtan_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = hardtan_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu_activate_kernel(x[index]);
}
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cu#L28-L30
__global__ void gradient_array_swish_kernel(float *x, int n, float *sigmoid_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float swish = x[i];
delta[i] *= swish + sigmoid_gpu[i] * (1 - swish); // gradient_kernel(x[i], a);
}
}
__global__ void gradient_array_leaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_selu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= selu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_logistic_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= logistic_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_tanh_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= tanh_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_hardtan_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= hardtan_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu_gradient_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if(a == LEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == TANH) activate_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == HARDTAN) activate_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU) activate_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else
hipLaunchKernelGGL(( activate_array_kernel), dim3(cuda_gridsize(n)), dim3(BLOCK), 0, get_cuda_stream(), x, n, a);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void activate_array_swish_ongpu(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, output_sigmoid_gpu, output_gpu);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) gradient_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == LOGISTIC) gradient_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == TANH) gradient_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == HARDTAN) gradient_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == SELU) gradient_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else
gradient_array_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, a, delta);
CHECK_CUDA(hipPeekAtLastError());
}
extern "C" void gradient_array_swish_ongpu(float *x, int n, float *sigmoid_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, sigmoid_gpu, delta);
CHECK_CUDA(hipPeekAtLastError());
} | d4921bf26d2983f140ef5b2a61f88cf1cb1319c9.cu | #include <darknet/darknet.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <cublas_v2.h>
#include <darknet/activations.h>
#include <darknet/dark_cuda.h>
__device__ float lhtan_activate_kernel(float x)
{
if(x < 0) return .001*x;
if(x > 1) return .001*(x-1) + 1;
return x;
}
__device__ float lhtan_gradient_kernel(float x)
{
if(x > 0 && x < 1) return 1;
return .001;
}
__device__ float hardtan_activate_kernel(float x)
{
if (x < -1) return -1;
if (x > 1) return 1;
return x;
}
__device__ float linear_activate_kernel(float x){return x;}
__device__ float logistic_activate_kernel(float x){return 1.f/(1.f + expf(-x));}
__device__ float loggy_activate_kernel(float x){return 2.f/(1.f + expf(-x)) - 1;}
__device__ float relu_activate_kernel(float x){return x*(x>0);}
__device__ float elu_activate_kernel(float x){return (x >= 0)*x + (x < 0)*(expf(x)-1);}
__device__ float selu_activate_kernel(float x) { return (x >= 0)*1.0507f*x + (x < 0)*1.0507f*1.6732f*(expf(x) - 1); }
__device__ float relie_activate_kernel(float x){return (x>0) ? x : .01f*x;}
__device__ float ramp_activate_kernel(float x){return x*(x>0)+.1f*x;}
__device__ float leaky_activate_kernel(float x){return (x>0) ? x : .1f*x;}
__device__ float tanh_activate_kernel(float x){return (2/(1 + expf(-2*x)) - 1);}
__device__ float plse_activate_kernel(float x)
{
if(x < -4) return .01f * (x + 4);
if(x > 4) return .01f * (x - 4) + 1;
return .125f*x + .5f;
}
__device__ float stair_activate_kernel(float x)
{
int n = floorf(x);
if (n%2 == 0) return floorf(x/2.f);
else return (x - n) + floorf(x/2.f);
}
__device__ float hardtan_gradient_kernel(float x)
{
if (x > -1 && x < 1) return 1;
return 0;
}
__device__ float linear_gradient_kernel(float x){return 1;}
__device__ float logistic_gradient_kernel(float x){return (1-x)*x;}
__device__ float loggy_gradient_kernel(float x)
{
float y = (x+1.F)/2.F;
return 2*(1-y)*y;
}
__device__ float relu_gradient_kernel(float x){return (x>0);}
__device__ float elu_gradient_kernel(float x){return (x >= 0) + (x < 0)*(x + 1);}
__device__ float selu_gradient_kernel(float x) { return (x >= 0)*1.0507f + (x < 0)*(x + 1.0507f*1.6732f); }
__device__ float relie_gradient_kernel(float x){return (x>0) ? 1 : .01f;}
__device__ float ramp_gradient_kernel(float x){return (x>0)+.1f;}
__device__ float leaky_gradient_kernel(float x){return (x>0) ? 1 : .1f;}
__device__ float tanh_gradient_kernel(float x){return 1-x*x;}
__device__ float plse_gradient_kernel(float x){return (x < 0 || x > 1) ? .01f : .125f;}
__device__ float stair_gradient_kernel(float x)
{
if (floor(x) == x) return 0;
return 1;
}
__device__ float activate_kernel(float x, ACTIVATION a)
{
switch(a){
case LINEAR:
return linear_activate_kernel(x);
case LOGISTIC:
return logistic_activate_kernel(x);
case LOGGY:
return loggy_activate_kernel(x);
case RELU:
return relu_activate_kernel(x);
case ELU:
return elu_activate_kernel(x);
case SELU:
return selu_activate_kernel(x);
case RELIE:
return relie_activate_kernel(x);
case RAMP:
return ramp_activate_kernel(x);
case LEAKY:
return leaky_activate_kernel(x);
case TANH:
return tanh_activate_kernel(x);
case PLSE:
return plse_activate_kernel(x);
case STAIR:
return stair_activate_kernel(x);
case HARDTAN:
return hardtan_activate_kernel(x);
case LHTAN:
return lhtan_activate_kernel(x);
}
return 0;
}
__device__ float gradient_kernel(float x, ACTIVATION a)
{
switch (a) {
case LINEAR:
return linear_gradient_kernel(x);
case LOGISTIC:
return logistic_gradient_kernel(x);
case LOGGY:
return loggy_gradient_kernel(x);
case RELU:
return relu_gradient_kernel(x);
case ELU:
return elu_gradient_kernel(x);
case SELU:
return selu_gradient_kernel(x);
case RELIE:
return relie_gradient_kernel(x);
case RAMP:
return ramp_gradient_kernel(x);
case LEAKY:
return leaky_gradient_kernel(x);
case TANH:
return tanh_gradient_kernel(x);
case PLSE:
return plse_gradient_kernel(x);
case STAIR:
return stair_gradient_kernel(x);
case HARDTAN:
return hardtan_gradient_kernel(x);
case LHTAN:
return lhtan_gradient_kernel(x);
}
return 0;
}
__global__ void binary_gradient_array_kernel(float *x, float *dy, int n, int s, BINARY_ACTIVATION a, float *dx)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) {
float de = dy[id];
dx[b*s + i] = x2*de;
dx[b*s + s / 2 + i] = x1*de;
}
}
extern "C" void binary_gradient_array_gpu(float *x, float *dx, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_gradient_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, dx, n / 2, size, a, y);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void binary_activate_array_kernel(float *x, int n, int s, BINARY_ACTIVATION a, float *y)
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
int i = id % s;
int b = id / s;
float x1 = x[b*s + i];
float x2 = x[b*s + s / 2 + i];
if (id < n) y[id] = x1*x2;
}
extern "C" void binary_activate_array_gpu(float *x, int n, int size, BINARY_ACTIVATION a, float *y)
{
binary_activate_array_kernel << <cuda_gridsize(n / 2), BLOCK, 0, get_cuda_stream() >> >(x, n / 2, size, a, y);
CHECK_CUDA(cudaPeekAtLastError());
}
__global__ void activate_array_kernel(float *x, int n, ACTIVATION a)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) x[i] = activate_kernel(x[i], a);
}
__global__ void activate_array_swish_kernel(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float x_val = x[i];
float sigmoid = logistic_activate_kernel(x_val);
output_sigmoid_gpu[i] = sigmoid;
output_gpu[i] = x_val * sigmoid;
}
}
__global__ void activate_array_leaky_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = leaky_activate_kernel(x[index]);
}
}
__global__ void activate_array_selu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = selu_activate_kernel(x[index]);
}
}
__global__ void activate_array_logistic_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = logistic_activate_kernel(x[index]);
}
}
__global__ void activate_array_tanh_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = tanh_activate_kernel(x[index]);
}
}
__global__ void activate_array_hardtan_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = hardtan_activate_kernel(x[index]);
}
}
__global__ void activate_array_relu_kernel(float *x, int n)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
x[index] = relu_activate_kernel(x[index]);
}
}
__global__ void gradient_array_kernel(float *x, int n, ACTIVATION a, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if(i < n) delta[i] *= gradient_kernel(x[i], a);
}
// https://github.com/BVLC/caffe/blob/04ab089db018a292ae48d51732dd6c66766b36b6/src/caffe/layers/swish_layer.cu#L28-L30
__global__ void gradient_array_swish_kernel(float *x, int n, float *sigmoid_gpu, float *delta)
{
int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
float swish = x[i];
delta[i] *= swish + sigmoid_gpu[i] * (1 - swish); // gradient_kernel(x[i], a);
}
}
__global__ void gradient_array_leaky_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= leaky_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_selu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= selu_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_logistic_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= logistic_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_tanh_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= tanh_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_hardtan_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= hardtan_gradient_kernel(x[index]);
}
}
__global__ void gradient_array_relu_kernel(float *x, int n, float *delta)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < n) {
delta[index] *= relu_gradient_kernel(x[index]);
}
}
extern "C" void activate_array_ongpu(float *x, int n, ACTIVATION a)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if(a == LEAKY) activate_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == LOGISTIC) activate_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == TANH) activate_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == HARDTAN) activate_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == RELU) activate_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else if (a == SELU) activate_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n);
else
activate_array_kernel<<<cuda_gridsize(n), BLOCK, 0, get_cuda_stream()>>>(x, n, a);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void activate_array_swish_ongpu(float *x, int n, float *output_sigmoid_gpu, float *output_gpu)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
activate_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> >(x, n, output_sigmoid_gpu, output_gpu);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_ongpu(float *x, int n, ACTIVATION a, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
if (a == LINEAR) return;
else if (a == LEAKY) gradient_array_leaky_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == LOGISTIC) gradient_array_logistic_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == TANH) gradient_array_tanh_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == HARDTAN) gradient_array_hardtan_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == RELU) gradient_array_relu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else if (a == SELU) gradient_array_selu_kernel << <num_blocks, BLOCK, 0, get_cuda_stream() >> >(x, n, delta);
else
gradient_array_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, a, delta);
CHECK_CUDA(cudaPeekAtLastError());
}
extern "C" void gradient_array_swish_ongpu(float *x, int n, float *sigmoid_gpu, float *delta)
{
const int num_blocks = get_number_of_blocks(n, BLOCK);
gradient_array_swish_kernel << <cuda_gridsize(n), BLOCK, 0, get_cuda_stream() >> > (x, n, sigmoid_gpu, delta);
CHECK_CUDA(cudaPeekAtLastError());
} |
8747ea3d500940141203c94d82f5f2047514587b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@generated from magma_zmcsrcompressor_gpu.cu normal z -> s, Sun May 3 11:22:58 2015
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
// copy nonzeros into new structure
__global__ void
magma_smcsrgpu_kernel1( int num_rows,
float *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
float *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
float zero = MAGMA_S_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_smcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_smcsrgpu_kernel3( int num_rows,
float *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
float *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
float zero = MAGMA_S_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param
A magma_s_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_smcsrcompressor_gpu(
magma_s_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_s_matrix B={Magma_CSR}, B2={Magma_CSR};
magma_s_matrix dA={Magma_CSR}, CSRA={Magma_CSR};
magma_index_t *cputmp = NULL;
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 ));
CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 ));
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 );
dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) );
// copying the nonzeros into B and write in B.drow how many there are
hipLaunchKernelGGL(( magma_smcsrgpu_kernel1), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue ,
A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
hipLaunchKernelGGL(( magma_smcsrgpu_kernel2), dim3(grid2), dim3(BLOCK_SIZE2), 0, queue ,
A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
CHECK( magma_index_malloc_cpu( &cputmp, 1 ));
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
CHECK( magma_smalloc( &B.dval, A->nnz ));
CHECK( magma_index_malloc( &B.dcol, A->nnz ));
// copy correct values back
hipLaunchKernelGGL(( magma_smcsrgpu_kernel3), dim3(grid1), dim3(BLOCK_SIZE1), 0, queue ,
A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_smconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue ));
CHECK( magma_smtransfer( *A, &dA, A->memory_location, Magma_DEV, queue ));
CHECK( magma_smcsrcompressor_gpu( &dA, queue ));
magma_smfree( &dA, queue );
magma_smfree( A, queue );
CHECK( magma_smtransfer( dA, &CSRA, Magma_DEV, A_location, queue ));
CHECK( magma_smconvert( CSRA, A, Magma_CSR, A_storage, queue ));
magma_smfree( &dA, queue );
magma_smfree( &CSRA, queue );
}
cleanup:
magma_smfree( &dA, queue );
magma_smfree( &CSRA, queue );
magma_free( B2.drow );
magma_free( B.drow );
return info;
}
| 8747ea3d500940141203c94d82f5f2047514587b.cu | /*
-- MAGMA (version 1.6.2) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date May 2015
@generated from magma_zmcsrcompressor_gpu.cu normal z -> s, Sun May 3 11:22:58 2015
@author Hartwig Anzt
*/
#include "common_magmasparse.h"
#define BLOCK_SIZE1 256
#define BLOCK_SIZE2 1
// copy nonzeros into new structure
__global__ void
magma_smcsrgpu_kernel1( int num_rows,
float *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind,
float *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind ){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if(row<num_rows){
float zero = MAGMA_S_ZERO;
int start = A_rowptr[ row ];
int new_location = start;
int end = A_rowptr[ row+1 ];
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
// B_val[new_location] = A_val[j];
// B_colind[new_location] = A_colind[j];
new_location++;
}
}
// this is not a correctr rowpointer! this is nn_z in this row!
B_rowptr[ row ] = new_location-start;
}
}
// generate a valid rowpointer
__global__ void
magma_smcsrgpu_kernel2( int num_rows,
magma_index_t *B_rowptr,
magma_index_t *A_rowptr ){
int idx = blockIdx.x*blockDim.x+threadIdx.x;
int j, nnz = 0;
if( idx == 0 ){
A_rowptr[ 0 ] = nnz;
for( j=0; j<num_rows; j++ ){
nnz+=B_rowptr[ j ];
A_rowptr[ j+1 ] = nnz;
}
}
}
// copy new structure into original matrix
__global__ void
magma_smcsrgpu_kernel3( int num_rows,
float *B_val,
magma_index_t *B_rowptr,
magma_index_t *B_colind,
magma_index_t *B2_rowptr,
float *A_val,
magma_index_t *A_rowptr,
magma_index_t *A_colind
){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j, new_location;
if(row<num_rows){
new_location = A_rowptr[ row ];
int start = B2_rowptr[ row ];
int end = B2_rowptr[ row+1 ];
float zero = MAGMA_S_ZERO;
for( j=start; j<end; j++ ){
if( A_val[j] != zero ){
B_val[new_location] = A_val[j];
B_colind[new_location] = A_colind[j];
new_location++;
}
// A_val[ j ] = B_val[ j ];
// A_colind[ j ] = B_colind[ j ];
}
}
}
/**
Purpose
-------
Removes zeros in a CSR matrix. This is a GPU implementation of the
CSR compressor.
Arguments
---------
@param
A magma_s_matrix*
input/output matrix
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_saux
********************************************************************/
extern "C" magma_int_t
magma_smcsrcompressor_gpu(
magma_s_matrix *A,
magma_queue_t queue )
{
magma_int_t info = 0;
magma_s_matrix B={Magma_CSR}, B2={Magma_CSR};
magma_s_matrix dA={Magma_CSR}, CSRA={Magma_CSR};
magma_index_t *cputmp = NULL;
if ( A->memory_location == Magma_DEV && A->storage_type == Magma_CSR ) {
CHECK( magma_index_malloc( &B.drow, A->num_rows + 1 ));
CHECK( magma_index_malloc( &B2.drow, A->num_rows + 1 ));
magma_index_copyvector( (A->num_rows+1), A->drow, 1, B2.drow, 1 );
dim3 grid1( magma_ceildiv( A->num_rows, BLOCK_SIZE1 ) );
// copying the nonzeros into B and write in B.drow how many there are
magma_smcsrgpu_kernel1<<< grid1, BLOCK_SIZE1, 0, queue >>>
( A->num_rows, A->dval, A->drow, A->dcol, B.dval, B.drow, B.dcol );
// correct the row pointer
dim3 grid2( 1, 1, 1);
magma_smcsrgpu_kernel2<<< grid2, BLOCK_SIZE2, 0, queue >>>
( A->num_rows, B.drow, A->drow );
// access the true number of nonzeros
CHECK( magma_index_malloc_cpu( &cputmp, 1 ));
magma_index_getvector( 1, A->row+(A->num_rows), 1, cputmp, 1 );
A->nnz = (magma_int_t) cputmp[0];
// reallocate with right size
CHECK( magma_smalloc( &B.dval, A->nnz ));
CHECK( magma_index_malloc( &B.dcol, A->nnz ));
// copy correct values back
magma_smcsrgpu_kernel3<<< grid1, BLOCK_SIZE1, 0, queue >>>
( A->num_rows, B.dval, B.drow, B.dcol, B2.drow, A->dval, A->drow, A->dcol );
magma_free( A->dcol );
magma_free( A->dval );
A->dcol = B.dcol;
A->dval = B.dval;
}
else {
magma_storage_t A_storage = A->storage_type;
magma_location_t A_location = A->memory_location;
CHECK( magma_smconvert( *A, &CSRA, A->storage_type, Magma_CSR, queue ));
CHECK( magma_smtransfer( *A, &dA, A->memory_location, Magma_DEV, queue ));
CHECK( magma_smcsrcompressor_gpu( &dA, queue ));
magma_smfree( &dA, queue );
magma_smfree( A, queue );
CHECK( magma_smtransfer( dA, &CSRA, Magma_DEV, A_location, queue ));
CHECK( magma_smconvert( CSRA, A, Magma_CSR, A_storage, queue ));
magma_smfree( &dA, queue );
magma_smfree( &CSRA, queue );
}
cleanup:
magma_smfree( &dA, queue );
magma_smfree( &CSRA, queue );
magma_free( B2.drow );
magma_free( B.drow );
return info;
}
|
18597fd3003556a3184611f70a97bc9c05aef882.hip | // !!! This is a file automatically generated by hipify!!!
/*Author: Rodrigo Gonalves de Branco
Date: 24/03/2015
*/
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include <cstdio>
#include <cmath>
#include <climits>
#include <stdio.h>
#include "cuda_util.h"
using namespace std;
__global__
void prefixsumJAxis(int* v, int N)
{
int sqrN = N*N;
for(int k = blockIdx.x; k < N; k += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int j = 1; j < N; j++) {
v[sqrN*k + N*i + j] += v[sqrN*k + N*i + j-1];
}
}
}
}
__global__
void prefixsumKAxis(int* v, int N)
{
int sqrN = N*N;
for(int j = blockIdx.x; j < N; j += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int k = 1; k < N; k++) {
v[sqrN*k + N*i + j] += v[sqrN*(k-1) + N*i + j];
}
}
}
}
__device__
int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__
int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__device__
int maxSubArraySum(int* v, int N, int g, int h, int r, int t) {
int max_so_far = 0, max_ending_here = 0;
int sqrN = N*N;
for(int i = 0; i < N; i++)
{
int tmp1 = v[sqrN*t + N*i + h];
int tmp2 = r > 0 ? v[sqrN*(r-1) + N*i + h] : 0;
int tmp3 = g > 0 ? v[sqrN*t + N*i + (g-1)] : 0;
//Maybe repeated elements were subtracted. If that is true, we need correct it!
int tmp4 = r > 0 && g > 0 ? v[sqrN*(r-1) + N*i + (g-1)] : 0 ;
int temp = tmp1 - tmp2 - tmp3 + tmp4;
//printf("g:%d h:%d r:%d t:%d => %d - %d - %d + %d = %d\n",g,h,r,t,tmp1,tmp2,tmp3,tmp4,temp);
max_ending_here = max_ending_here + temp;
if(max_ending_here < 0)
max_ending_here = 0;
if(max_so_far < max_ending_here)
max_so_far = max_ending_here;
}
return max_so_far;
}
__global__
void computeCghrt(int* v, int N, int * result)
{
int computationSize = (N*(N+1))>>1;
int maxsofar = INT_MIN;
//to cover all R e T index
//printf("blk:%d thd:%d gridDim:%d blockDim:%d\n",blockIdx.x,threadIdx.x,gridDim.x,blockDim.x);
for(int blkstep = 0; blkstep < computationSize; blkstep += gridDim.x) {
int r = row_index(blockIdx.x + blkstep,N);
int t = column_index(blockIdx.x + blkstep,N);
if(r >= 0 && t >= 0 && r < N && t < N && r <= t) {
//to cover all G e H index
for(int thdstep = 0; thdstep < computationSize; thdstep += blockDim.x) {
int g = row_index(threadIdx.x + thdstep,N);
int h = column_index(threadIdx.x + thdstep,N);
if(g >= 0 && h >= 0 && g < N && h < N && g <= h) {
int newmax = maxSubArraySum(v,N,g,h,r,t);
maxsofar = newmax > maxsofar ? newmax : maxsofar;
}
}
}
}
atomicMax(result,maxsofar);
}
/*void print(int* v, int N) {
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout<<v[N*N*k + N*i + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
}*/
int main() {
//size of cube
int N;
cin>>N;
//cube representation: O(n^3) of space
int* cube = (int*)malloc(N*N*N*sizeof(int**));
//Reading the values
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cin>>cube[N*N*k + N*i + j];
}
}
}
//cout<<"original:"<<endl;
//print(cube,N);
int* dcube, *dresult;
HANDLE_ERROR( hipMalloc( (void**)&dcube, N*N*N*sizeof(int)));
HANDLE_ERROR( hipMemcpy( dcube, cube, N*N*N*sizeof(int),hipMemcpyHostToDevice ) );
int minValue = INT_MIN;
HANDLE_ERROR( hipMalloc( (void**)&dresult, sizeof(int)));
HANDLE_ERROR( hipMemcpy( dresult, &minValue, sizeof(int),hipMemcpyHostToDevice ) );
int numSMs;
hipDeviceGetAttribute(&numSMs, hipDeviceAttributeMultiprocessorCount, 0);
dim3 dimThreads(256);
dim3 dimBlocks(32*numSMs);
hipEvent_t start,stop;
HANDLE_ERROR( hipEventCreate(&start) );
HANDLE_ERROR( hipEventCreate(&stop) );
HANDLE_ERROR( hipEventRecord(start, 0) );
hipLaunchKernelGGL(( prefixsumJAxis), dim3(dimBlocks),dim3(dimThreads), 0, 0, dcube,N);
HANDLE_ERROR( hipDeviceSynchronize() );
//HANDLE_ERROR( hipMemcpy( cube, dcube,N*N*N*sizeof(int),hipMemcpyDeviceToHost));
//cout<<"first ps:"<<endl;
//print(cube,N);
hipLaunchKernelGGL(( prefixsumKAxis), dim3(dimBlocks),dim3(dimThreads), 0, 0, dcube,N);
HANDLE_ERROR( hipDeviceSynchronize() );
//cout<<endl<<"second ps:"<<endl;
//HANDLE_ERROR( hipMemcpy( cube, dcube,N*N*N*sizeof(int),hipMemcpyDeviceToHost));
//print(cube,N);
//cout<<"computation size: "<<N*(N+1)/2<<endl;
hipLaunchKernelGGL(( computeCghrt), dim3(dimBlocks),dim3(dimThreads), 0, 0, dcube,N,dresult);
HANDLE_ERROR( hipDeviceSynchronize() );
HANDLE_ERROR( hipEventRecord(stop, 0) );
HANDLE_ERROR( hipEventSynchronize(start) );
HANDLE_ERROR( hipEventSynchronize(stop) );
float time;
HANDLE_ERROR( hipEventElapsedTime(&time, start, stop) );
int result;
HANDLE_ERROR( hipMemcpy( &result, dresult, sizeof(int),hipMemcpyDeviceToHost));
free(cube);
hipFree(dcube);
hipFree(dresult);
//cout<<result<<endl;
//printf("%i %.9f\n",result,time);
printf("%.9f\n",time);
return 0;
}
| 18597fd3003556a3184611f70a97bc9c05aef882.cu | /*Author: Rodrigo Gonçalves de Branco
Date: 24/03/2015
*/
#include <iostream>
#include <vector>
#include <cuda.h>
#include <cstdio>
#include <cmath>
#include <climits>
#include <stdio.h>
#include "cuda_util.h"
using namespace std;
__global__
void prefixsumJAxis(int* v, int N)
{
int sqrN = N*N;
for(int k = blockIdx.x; k < N; k += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int j = 1; j < N; j++) {
v[sqrN*k + N*i + j] += v[sqrN*k + N*i + j-1];
}
}
}
}
__global__
void prefixsumKAxis(int* v, int N)
{
int sqrN = N*N;
for(int j = blockIdx.x; j < N; j += gridDim.x) {
for(int i = threadIdx.x; i < N; i += blockDim.x) {
for(int k = 1; k < N; k++) {
v[sqrN*k + N*i + j] += v[sqrN*(k-1) + N*i + j];
}
}
}
}
__device__
int row_index( unsigned int i, unsigned int M ){
double m = M;
double row = (-2*m - 1 + sqrt( (4*m*(m+1) - 8*(double)i - 7) )) / -2;
if( row == (double)(int) row ) row -= 1;
return (unsigned int) row;
}
__device__
int column_index( unsigned int i, unsigned int M ){
unsigned int row = row_index( i, M);
return i - M * row + row*(row+1) / 2;
}
__device__
int maxSubArraySum(int* v, int N, int g, int h, int r, int t) {
int max_so_far = 0, max_ending_here = 0;
int sqrN = N*N;
for(int i = 0; i < N; i++)
{
int tmp1 = v[sqrN*t + N*i + h];
int tmp2 = r > 0 ? v[sqrN*(r-1) + N*i + h] : 0;
int tmp3 = g > 0 ? v[sqrN*t + N*i + (g-1)] : 0;
//Maybe repeated elements were subtracted. If that is true, we need correct it!
int tmp4 = r > 0 && g > 0 ? v[sqrN*(r-1) + N*i + (g-1)] : 0 ;
int temp = tmp1 - tmp2 - tmp3 + tmp4;
//printf("g:%d h:%d r:%d t:%d => %d - %d - %d + %d = %d\n",g,h,r,t,tmp1,tmp2,tmp3,tmp4,temp);
max_ending_here = max_ending_here + temp;
if(max_ending_here < 0)
max_ending_here = 0;
if(max_so_far < max_ending_here)
max_so_far = max_ending_here;
}
return max_so_far;
}
__global__
void computeCghrt(int* v, int N, int * result)
{
int computationSize = (N*(N+1))>>1;
int maxsofar = INT_MIN;
//to cover all R e T index
//printf("blk:%d thd:%d gridDim:%d blockDim:%d\n",blockIdx.x,threadIdx.x,gridDim.x,blockDim.x);
for(int blkstep = 0; blkstep < computationSize; blkstep += gridDim.x) {
int r = row_index(blockIdx.x + blkstep,N);
int t = column_index(blockIdx.x + blkstep,N);
if(r >= 0 && t >= 0 && r < N && t < N && r <= t) {
//to cover all G e H index
for(int thdstep = 0; thdstep < computationSize; thdstep += blockDim.x) {
int g = row_index(threadIdx.x + thdstep,N);
int h = column_index(threadIdx.x + thdstep,N);
if(g >= 0 && h >= 0 && g < N && h < N && g <= h) {
int newmax = maxSubArraySum(v,N,g,h,r,t);
maxsofar = newmax > maxsofar ? newmax : maxsofar;
}
}
}
}
atomicMax(result,maxsofar);
}
/*void print(int* v, int N) {
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cout<<v[N*N*k + N*i + j]<<" ";
}
cout<<endl;
}
cout<<endl;
}
}*/
int main() {
//size of cube
int N;
cin>>N;
//cube representation: O(n^3) of space
int* cube = (int*)malloc(N*N*N*sizeof(int**));
//Reading the values
for(int k = 0; k < N; k++) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
cin>>cube[N*N*k + N*i + j];
}
}
}
//cout<<"original:"<<endl;
//print(cube,N);
int* dcube, *dresult;
HANDLE_ERROR( cudaMalloc( (void**)&dcube, N*N*N*sizeof(int)));
HANDLE_ERROR( cudaMemcpy( dcube, cube, N*N*N*sizeof(int),cudaMemcpyHostToDevice ) );
int minValue = INT_MIN;
HANDLE_ERROR( cudaMalloc( (void**)&dresult, sizeof(int)));
HANDLE_ERROR( cudaMemcpy( dresult, &minValue, sizeof(int),cudaMemcpyHostToDevice ) );
int numSMs;
cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, 0);
dim3 dimThreads(256);
dim3 dimBlocks(32*numSMs);
cudaEvent_t start,stop;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&stop) );
HANDLE_ERROR( cudaEventRecord(start, 0) );
prefixsumJAxis<<<dimBlocks,dimThreads>>>(dcube,N);
HANDLE_ERROR( cudaThreadSynchronize() );
//HANDLE_ERROR( cudaMemcpy( cube, dcube,N*N*N*sizeof(int),cudaMemcpyDeviceToHost));
//cout<<"first ps:"<<endl;
//print(cube,N);
prefixsumKAxis<<<dimBlocks,dimThreads>>>(dcube,N);
HANDLE_ERROR( cudaThreadSynchronize() );
//cout<<endl<<"second ps:"<<endl;
//HANDLE_ERROR( cudaMemcpy( cube, dcube,N*N*N*sizeof(int),cudaMemcpyDeviceToHost));
//print(cube,N);
//cout<<"computation size: "<<N*(N+1)/2<<endl;
computeCghrt<<<dimBlocks,dimThreads>>>(dcube,N,dresult);
HANDLE_ERROR( cudaThreadSynchronize() );
HANDLE_ERROR( cudaEventRecord(stop, 0) );
HANDLE_ERROR( cudaEventSynchronize(start) );
HANDLE_ERROR( cudaEventSynchronize(stop) );
float time;
HANDLE_ERROR( cudaEventElapsedTime(&time, start, stop) );
int result;
HANDLE_ERROR( cudaMemcpy( &result, dresult, sizeof(int),cudaMemcpyDeviceToHost));
free(cube);
cudaFree(dcube);
cudaFree(dresult);
//cout<<result<<endl;
//printf("%i %.9f\n",result,time);
printf("%.9f\n",time);
return 0;
}
|
7c2b35b32be6d53c1cf2f5ce236b0978eaa5bb54.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <stdio.h>
#include "cputime.h"
// float *accnew_gpu;
// float *velnew_gpu;
// float *parforce_gpu;
// float *parpot_gpu;
// float *parvel_gpu;
// float *acc_gpu;
// float *force_gpu;
// float *pos_gpu;
// float *vel_gpu;
thrust::device_vector<float> *parforce_gpuX;
thrust::device_vector<float> *parforce_gpuY;
thrust::device_vector<float> *parforce_gpuZ;
thrust::device_vector<float> *parpot_gpu;
thrust::device_vector<float> *acc_gpuX;
thrust::device_vector<float> *acc_gpuY;
thrust::device_vector<float> *acc_gpuZ;
thrust::device_vector<float> *force_gpuX;
thrust::device_vector<float> *force_gpuY;
thrust::device_vector<float> *force_gpuZ;
thrust::device_vector<float> *pos_gpuX;
thrust::device_vector<float> *pos_gpuY;
thrust::device_vector<float> *pos_gpuZ;
thrust::device_vector<float> *vel_gpuX;
thrust::device_vector<float> *vel_gpuY;
thrust::device_vector<float> *vel_gpuZ;
/*
extern "C"
double cputime()
{
struct timeval tp;
int rtn;
rtn=gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(1.e-6)*tp.tv_usec);
}
*/
extern "C"
void allocMemOnGPU(int nd, int np)
{
parforce_gpuX = (new thrust::device_vector<float>(np));
parforce_gpuY = (new thrust::device_vector<float>(np));
parforce_gpuZ = (new thrust::device_vector<float>(np));
parpot_gpu = (new thrust::device_vector<float>(np) );
acc_gpuX = (new thrust::device_vector<float>(np));
acc_gpuY = (new thrust::device_vector<float>(np));
acc_gpuZ = (new thrust::device_vector<float>(np));
force_gpuX = (new thrust::device_vector<float>(np));
force_gpuY = (new thrust::device_vector<float>(np));
force_gpuZ = (new thrust::device_vector<float>(np));
pos_gpuX = (new thrust::device_vector<float>(np));
pos_gpuY = (new thrust::device_vector<float>(np));
pos_gpuZ = (new thrust::device_vector<float>(np));
vel_gpuX = (new thrust::device_vector<float>(np));
vel_gpuY = (new thrust::device_vector<float>(np));
vel_gpuZ = (new thrust::device_vector<float>(np));
}
extern "C"
void copyDataToGPU(float *h_acc, float *h_force, float *h_vel, float *h_pos, int nd, int np)
{
acc_gpuX->assign( h_acc ,h_acc + np);
acc_gpuY->assign( h_acc +np ,h_acc + 2*np);
acc_gpuZ->assign( h_acc +2*np ,h_acc + nd*np);
force_gpuX->assign( h_force ,h_force + np);
force_gpuY->assign( h_force +np ,h_force + 2*np);
force_gpuZ->assign( h_force +2*np ,h_force + nd*np);
vel_gpuX->assign( h_vel ,h_vel + np);
vel_gpuY->assign( h_vel +np ,h_vel + 2*np);
vel_gpuZ->assign( h_vel +2*np ,h_vel + nd*np);
pos_gpuX->assign( h_pos ,h_pos + np);
pos_gpuY->assign( h_pos +np ,h_pos + 2*np);
pos_gpuZ->assign( h_pos +2*np ,h_pos + nd*np);
}
// __global__ void dummyCopy(float *g_idata, float *g_odata)
// {
// int idx = blockIdx.x * blockDim.x + threadIdx.x ;
//
// g_odata[idx] = g_idata[idx];
// __syncthreads();
// }
// START K1 - Compute Force on Particle
class compute_forceonparticle_functor
{
float PI2;
float currentposx,currentposy,currentposz;
int currentMoleculeIndex;
public:
compute_forceonparticle_functor(float PI2, float currentposx, float currentposy, float currentposz,int currentMoleculeIndex)
{
this->PI2 = PI2;
this->currentposx = currentposx;
this->currentposy = currentposy;
this->currentposz = currentposz;
this->currentMoleculeIndex = currentMoleculeIndex;
}
template <typename TupleInput, typename TupleOutput>
__device__ float operator() (TupleInput input, TupleOutput output)
{
float px = this->currentposx - thrust::get<0>(input);
float py = this->currentposy - thrust::get<1>(input);
float pz = this->currentposz - thrust::get<2>(input);
float dist = px*px + py*py + pz*pz;
dist = sqrt(dist);
float dist2 = (dist < PI2) ? dist : PI2;
if(thrust::get<3>(input)==this->currentMoleculeIndex)
{
thrust::get<0>(output)= 0;
thrust::get<1>(output)= 0;
thrust::get<2>(output)= 0;
return 0;
}
thrust::get<0>(output)=- (px * sin(2.0 * dist2) / dist);
thrust::get<1>(output)=- (py * sin(2.0 * dist2) / dist);
thrust::get<2>(output)=- (pz * sin(2.0 * dist2) / dist);
return 0.5 * sin(dist2) * sin(dist2);
}
};
extern "C"
void GPU_compute_forceonparticle(int nd, int np, int currentMoleculeIndex, const float PI2, int step, double *time_elapsed)
{
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(pos_gpuX->begin(),pos_gpuY->begin(),pos_gpuZ->begin(),thrust::counting_iterator<int>(0))),
thrust::make_zip_iterator(thrust::make_tuple(pos_gpuX->end(),pos_gpuY->end(),pos_gpuZ->end(),thrust::counting_iterator<int>(pos_gpuX->end()-pos_gpuX->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(parforce_gpuX->begin(),parforce_gpuY->begin(),parforce_gpuZ->begin())),
parpot_gpu->begin(),compute_forceonparticle_functor(PI2,(*pos_gpuX)[currentMoleculeIndex],(*pos_gpuY)[currentMoleculeIndex],(*pos_gpuZ)[currentMoleculeIndex],currentMoleculeIndex));
// if(currentMoleculeIndex<100)
// printf("%f \n",(float) parforce_gpuX[0]);
}
float GPU_accumulate_parpot_wShrdMem(int nd, int np, int step, double *time_elapsed)
{
return thrust::reduce(parpot_gpu->begin(),parpot_gpu->end());
}
//END K2 - Accumulate PE with/without shared memory
//START K3 - Accumulate Force with/without shared memory
void GPU_accumulate_parforce_wShrdMem(int nd, int np, int currentMoleculeIndex, int step, double *time_elapsed)
{
(*force_gpuX)[currentMoleculeIndex] = thrust::reduce(parforce_gpuX->begin(),parforce_gpuX->end());
// printf("%f\n",(float) force_gpuX[currentMoleculeIndex]);
(*force_gpuY)[currentMoleculeIndex] = thrust::reduce(parforce_gpuY->begin(),parforce_gpuY->end());
(*force_gpuZ)[currentMoleculeIndex] = thrust::reduce(parforce_gpuZ->begin(),parforce_gpuZ->end());
}
//END K3 - Accumulate Force with/without shared memory
//Accumulates PE and Force using K2 and K3
extern "C"
float GPU_seq_wShrdMem_accumulate_parpot_and_parforce(int nd, int np, int currentMoleculeIndex, int step, double *time_elap1, double *time_elap2)
{
GPU_accumulate_parforce_wShrdMem(nd,np,currentMoleculeIndex, step, time_elap1);
return GPU_accumulate_parpot_wShrdMem(nd, np, step, time_elap2);
}
//START K4 - Compute and accumulate KE without shared memory
//Compute KE with shared memory
struct squareOp
{
__device__ float operator() ( const float input) const
{
return input*input;
}
};
extern "C"
float GPU_accumulate_KE_wShrdMem(int nd, int np, float mass, int step, double *time_elapsed)
{
// for(int i = 0; i<100;i++)
// {
// printf("%f \n",(float) vel_gpuX[i]);
// }
float sum = thrust::transform_reduce(vel_gpuX->begin(),vel_gpuX->end(),squareOp(),0.0f,thrust::plus<float>())
+ thrust::transform_reduce(vel_gpuY->begin(),vel_gpuY->end(),squareOp(),0.0f,thrust::plus<float>())
+ thrust::transform_reduce(vel_gpuZ->begin(),vel_gpuZ->end(),squareOp(),0.0f,thrust::plus<float>());
return 0.5 * mass * sum;
}
//END K4 - Compute and accumulate KE with shared memory
//START K5 - Update position
__global__ void GPU_updatePos(int numberOfThreads, float dt, float * pos_gpu, const float * vel_gpu, const float * acc_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
pos_gpu[idx] += vel_gpu[idx] * dt + 0.5 * acc_gpu[idx] * dt * dt;
}
class updateOp
{
float dt;
public:
updateOp(float dt)
{
this->dt = dt;
}
template <typename Tuple>
__device__ float operator() (Tuple input, float current_value)
{
return thrust::get<0>(input) * (this->dt) + (this->dt) * 0.5 * (this->dt) * thrust::get<1>(input) + current_value;
}
};
extern "C"
void GPU_updatePos(int nd, int np, float dt, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(vel_gpuX->begin(),acc_gpuX->begin())),
thrust::make_zip_iterator(thrust::make_tuple(vel_gpuX->end(),acc_gpuX->end())),
pos_gpuX->begin(),pos_gpuX->begin(),updateOp(dt));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(vel_gpuY->begin(),acc_gpuY->begin())),
thrust::make_zip_iterator(thrust::make_tuple(vel_gpuY->end(),acc_gpuY->end())),
pos_gpuY->begin(),pos_gpuY->begin(),updateOp(dt));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(vel_gpuZ->begin(),acc_gpuZ->begin())),
thrust::make_zip_iterator(thrust::make_tuple(vel_gpuZ->end(),acc_gpuZ->end())),
pos_gpuZ->begin(),pos_gpuZ->begin(),updateOp(dt));
}
//END K5 - Update position
//START K6 - Update velocity
__global__ void GPU_updateVel(int numberOfThreads, float dt, float rmass, float * vel_gpu, const float * force_gpu, const float * acc_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
vel_gpu[idx] += 0.5 * dt * (force_gpu[idx] * rmass + acc_gpu[idx]);
}
class updateVelOp
{
float dt, rmass;
public:
updateVelOp(float dt, float rmass)
{
this->dt = dt;
this->rmass =rmass;
}
template <typename Tuple>
__device__ float operator() (Tuple input, float current_value)
{
return current_value + ( 0.5*(this->dt)*(thrust::get<0>(input)*(this->rmass) + thrust::get<1>(input)) );
}
};
extern "C"
void GPU_updateVel(int nd, int np, float dt, float rmass, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(force_gpuX->begin(),acc_gpuX->begin())),
thrust::make_zip_iterator(thrust::make_tuple(force_gpuX->end(),acc_gpuX->end())),
vel_gpuX->begin(),vel_gpuX->begin(),updateVelOp(dt,rmass));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(force_gpuY->begin(),acc_gpuY->begin())),
thrust::make_zip_iterator(thrust::make_tuple(force_gpuY->end(),acc_gpuY->end())),
vel_gpuY->begin(),vel_gpuY->begin(),updateVelOp(dt,rmass));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(force_gpuZ->begin(),acc_gpuZ->begin())),
thrust::make_zip_iterator(thrust::make_tuple(force_gpuZ->end(),acc_gpuZ->end())),
vel_gpuZ->begin(),vel_gpuZ->begin(),updateVelOp(dt,rmass));
}
//END K6 - Update velocity
//START K7 - Update acceleration
class updateAccOp
{
float rmass;
public:
updateAccOp(float rmass)
{
this->rmass = rmass;
}
__device__ float operator() (float input)
{
return input*(this->rmass);
}
};
extern "C"
void GPU_updateAcc(int nd, int np, float rmass, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
thrust::transform(force_gpuX->begin(),force_gpuX->end(),acc_gpuX->begin(),updateAccOp(rmass));
thrust::transform(force_gpuY->begin(),force_gpuY->end(),acc_gpuY->begin(),updateAccOp(rmass));
thrust::transform(force_gpuZ->begin(),force_gpuZ->end(),acc_gpuZ->begin(),updateAccOp(rmass));
}
//END K7 - Update acceleration
| 7c2b35b32be6d53c1cf2f5ce236b0978eaa5bb54.cu | #include <cuda_runtime_api.h>
#include <thrust/device_vector.h>
#include <thrust/transform_reduce.h>
#include <stdio.h>
#include "cputime.h"
// float *accnew_gpu;
// float *velnew_gpu;
// float *parforce_gpu;
// float *parpot_gpu;
// float *parvel_gpu;
// float *acc_gpu;
// float *force_gpu;
// float *pos_gpu;
// float *vel_gpu;
thrust::device_vector<float> *parforce_gpuX;
thrust::device_vector<float> *parforce_gpuY;
thrust::device_vector<float> *parforce_gpuZ;
thrust::device_vector<float> *parpot_gpu;
thrust::device_vector<float> *acc_gpuX;
thrust::device_vector<float> *acc_gpuY;
thrust::device_vector<float> *acc_gpuZ;
thrust::device_vector<float> *force_gpuX;
thrust::device_vector<float> *force_gpuY;
thrust::device_vector<float> *force_gpuZ;
thrust::device_vector<float> *pos_gpuX;
thrust::device_vector<float> *pos_gpuY;
thrust::device_vector<float> *pos_gpuZ;
thrust::device_vector<float> *vel_gpuX;
thrust::device_vector<float> *vel_gpuY;
thrust::device_vector<float> *vel_gpuZ;
/*
extern "C"
double cputime()
{
struct timeval tp;
int rtn;
rtn=gettimeofday(&tp, NULL);
return ((double)tp.tv_sec+(1.e-6)*tp.tv_usec);
}
*/
extern "C"
void allocMemOnGPU(int nd, int np)
{
parforce_gpuX = (new thrust::device_vector<float>(np));
parforce_gpuY = (new thrust::device_vector<float>(np));
parforce_gpuZ = (new thrust::device_vector<float>(np));
parpot_gpu = (new thrust::device_vector<float>(np) );
acc_gpuX = (new thrust::device_vector<float>(np));
acc_gpuY = (new thrust::device_vector<float>(np));
acc_gpuZ = (new thrust::device_vector<float>(np));
force_gpuX = (new thrust::device_vector<float>(np));
force_gpuY = (new thrust::device_vector<float>(np));
force_gpuZ = (new thrust::device_vector<float>(np));
pos_gpuX = (new thrust::device_vector<float>(np));
pos_gpuY = (new thrust::device_vector<float>(np));
pos_gpuZ = (new thrust::device_vector<float>(np));
vel_gpuX = (new thrust::device_vector<float>(np));
vel_gpuY = (new thrust::device_vector<float>(np));
vel_gpuZ = (new thrust::device_vector<float>(np));
}
extern "C"
void copyDataToGPU(float *h_acc, float *h_force, float *h_vel, float *h_pos, int nd, int np)
{
acc_gpuX->assign( h_acc ,h_acc + np);
acc_gpuY->assign( h_acc +np ,h_acc + 2*np);
acc_gpuZ->assign( h_acc +2*np ,h_acc + nd*np);
force_gpuX->assign( h_force ,h_force + np);
force_gpuY->assign( h_force +np ,h_force + 2*np);
force_gpuZ->assign( h_force +2*np ,h_force + nd*np);
vel_gpuX->assign( h_vel ,h_vel + np);
vel_gpuY->assign( h_vel +np ,h_vel + 2*np);
vel_gpuZ->assign( h_vel +2*np ,h_vel + nd*np);
pos_gpuX->assign( h_pos ,h_pos + np);
pos_gpuY->assign( h_pos +np ,h_pos + 2*np);
pos_gpuZ->assign( h_pos +2*np ,h_pos + nd*np);
}
// __global__ void dummyCopy(float *g_idata, float *g_odata)
// {
// int idx = blockIdx.x * blockDim.x + threadIdx.x ;
//
// g_odata[idx] = g_idata[idx];
// __syncthreads();
// }
// START K1 - Compute Force on Particle
class compute_forceonparticle_functor
{
float PI2;
float currentposx,currentposy,currentposz;
int currentMoleculeIndex;
public:
compute_forceonparticle_functor(float PI2, float currentposx, float currentposy, float currentposz,int currentMoleculeIndex)
{
this->PI2 = PI2;
this->currentposx = currentposx;
this->currentposy = currentposy;
this->currentposz = currentposz;
this->currentMoleculeIndex = currentMoleculeIndex;
}
template <typename TupleInput, typename TupleOutput>
__device__ float operator() (TupleInput input, TupleOutput output)
{
float px = this->currentposx - thrust::get<0>(input);
float py = this->currentposy - thrust::get<1>(input);
float pz = this->currentposz - thrust::get<2>(input);
float dist = px*px + py*py + pz*pz;
dist = sqrt(dist);
float dist2 = (dist < PI2) ? dist : PI2;
if(thrust::get<3>(input)==this->currentMoleculeIndex)
{
thrust::get<0>(output)= 0;
thrust::get<1>(output)= 0;
thrust::get<2>(output)= 0;
return 0;
}
thrust::get<0>(output)=- (px * sin(2.0 * dist2) / dist);
thrust::get<1>(output)=- (py * sin(2.0 * dist2) / dist);
thrust::get<2>(output)=- (pz * sin(2.0 * dist2) / dist);
return 0.5 * sin(dist2) * sin(dist2);
}
};
extern "C"
void GPU_compute_forceonparticle(int nd, int np, int currentMoleculeIndex, const float PI2, int step, double *time_elapsed)
{
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(pos_gpuX->begin(),pos_gpuY->begin(),pos_gpuZ->begin(),thrust::counting_iterator<int>(0))),
thrust::make_zip_iterator(thrust::make_tuple(pos_gpuX->end(),pos_gpuY->end(),pos_gpuZ->end(),thrust::counting_iterator<int>(pos_gpuX->end()-pos_gpuX->begin()))),
thrust::make_zip_iterator(thrust::make_tuple(parforce_gpuX->begin(),parforce_gpuY->begin(),parforce_gpuZ->begin())),
parpot_gpu->begin(),compute_forceonparticle_functor(PI2,(*pos_gpuX)[currentMoleculeIndex],(*pos_gpuY)[currentMoleculeIndex],(*pos_gpuZ)[currentMoleculeIndex],currentMoleculeIndex));
// if(currentMoleculeIndex<100)
// printf("%f \n",(float) parforce_gpuX[0]);
}
float GPU_accumulate_parpot_wShrdMem(int nd, int np, int step, double *time_elapsed)
{
return thrust::reduce(parpot_gpu->begin(),parpot_gpu->end());
}
//END K2 - Accumulate PE with/without shared memory
//START K3 - Accumulate Force with/without shared memory
void GPU_accumulate_parforce_wShrdMem(int nd, int np, int currentMoleculeIndex, int step, double *time_elapsed)
{
(*force_gpuX)[currentMoleculeIndex] = thrust::reduce(parforce_gpuX->begin(),parforce_gpuX->end());
// printf("%f\n",(float) force_gpuX[currentMoleculeIndex]);
(*force_gpuY)[currentMoleculeIndex] = thrust::reduce(parforce_gpuY->begin(),parforce_gpuY->end());
(*force_gpuZ)[currentMoleculeIndex] = thrust::reduce(parforce_gpuZ->begin(),parforce_gpuZ->end());
}
//END K3 - Accumulate Force with/without shared memory
//Accumulates PE and Force using K2 and K3
extern "C"
float GPU_seq_wShrdMem_accumulate_parpot_and_parforce(int nd, int np, int currentMoleculeIndex, int step, double *time_elap1, double *time_elap2)
{
GPU_accumulate_parforce_wShrdMem(nd,np,currentMoleculeIndex, step, time_elap1);
return GPU_accumulate_parpot_wShrdMem(nd, np, step, time_elap2);
}
//START K4 - Compute and accumulate KE without shared memory
//Compute KE with shared memory
struct squareOp
{
__device__ float operator() ( const float input) const
{
return input*input;
}
};
extern "C"
float GPU_accumulate_KE_wShrdMem(int nd, int np, float mass, int step, double *time_elapsed)
{
// for(int i = 0; i<100;i++)
// {
// printf("%f \n",(float) vel_gpuX[i]);
// }
float sum = thrust::transform_reduce(vel_gpuX->begin(),vel_gpuX->end(),squareOp(),0.0f,thrust::plus<float>())
+ thrust::transform_reduce(vel_gpuY->begin(),vel_gpuY->end(),squareOp(),0.0f,thrust::plus<float>())
+ thrust::transform_reduce(vel_gpuZ->begin(),vel_gpuZ->end(),squareOp(),0.0f,thrust::plus<float>());
return 0.5 * mass * sum;
}
//END K4 - Compute and accumulate KE with shared memory
//START K5 - Update position
__global__ void GPU_updatePos(int numberOfThreads, float dt, float * pos_gpu, const float * vel_gpu, const float * acc_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
pos_gpu[idx] += vel_gpu[idx] * dt + 0.5 * acc_gpu[idx] * dt * dt;
}
class updateOp
{
float dt;
public:
updateOp(float dt)
{
this->dt = dt;
}
template <typename Tuple>
__device__ float operator() (Tuple input, float current_value)
{
return thrust::get<0>(input) * (this->dt) + (this->dt) * 0.5 * (this->dt) * thrust::get<1>(input) + current_value;
}
};
extern "C"
void GPU_updatePos(int nd, int np, float dt, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(vel_gpuX->begin(),acc_gpuX->begin())),
thrust::make_zip_iterator(thrust::make_tuple(vel_gpuX->end(),acc_gpuX->end())),
pos_gpuX->begin(),pos_gpuX->begin(),updateOp(dt));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(vel_gpuY->begin(),acc_gpuY->begin())),
thrust::make_zip_iterator(thrust::make_tuple(vel_gpuY->end(),acc_gpuY->end())),
pos_gpuY->begin(),pos_gpuY->begin(),updateOp(dt));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(vel_gpuZ->begin(),acc_gpuZ->begin())),
thrust::make_zip_iterator(thrust::make_tuple(vel_gpuZ->end(),acc_gpuZ->end())),
pos_gpuZ->begin(),pos_gpuZ->begin(),updateOp(dt));
}
//END K5 - Update position
//START K6 - Update velocity
__global__ void GPU_updateVel(int numberOfThreads, float dt, float rmass, float * vel_gpu, const float * force_gpu, const float * acc_gpu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
if (idx >= numberOfThreads)
return ;
vel_gpu[idx] += 0.5 * dt * (force_gpu[idx] * rmass + acc_gpu[idx]);
}
class updateVelOp
{
float dt, rmass;
public:
updateVelOp(float dt, float rmass)
{
this->dt = dt;
this->rmass =rmass;
}
template <typename Tuple>
__device__ float operator() (Tuple input, float current_value)
{
return current_value + ( 0.5*(this->dt)*(thrust::get<0>(input)*(this->rmass) + thrust::get<1>(input)) );
}
};
extern "C"
void GPU_updateVel(int nd, int np, float dt, float rmass, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(force_gpuX->begin(),acc_gpuX->begin())),
thrust::make_zip_iterator(thrust::make_tuple(force_gpuX->end(),acc_gpuX->end())),
vel_gpuX->begin(),vel_gpuX->begin(),updateVelOp(dt,rmass));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(force_gpuY->begin(),acc_gpuY->begin())),
thrust::make_zip_iterator(thrust::make_tuple(force_gpuY->end(),acc_gpuY->end())),
vel_gpuY->begin(),vel_gpuY->begin(),updateVelOp(dt,rmass));
thrust::transform(thrust::make_zip_iterator(thrust::make_tuple(force_gpuZ->begin(),acc_gpuZ->begin())),
thrust::make_zip_iterator(thrust::make_tuple(force_gpuZ->end(),acc_gpuZ->end())),
vel_gpuZ->begin(),vel_gpuZ->begin(),updateVelOp(dt,rmass));
}
//END K6 - Update velocity
//START K7 - Update acceleration
class updateAccOp
{
float rmass;
public:
updateAccOp(float rmass)
{
this->rmass = rmass;
}
__device__ float operator() (float input)
{
return input*(this->rmass);
}
};
extern "C"
void GPU_updateAcc(int nd, int np, float rmass, int step, double *time_elapsedCPU, float *time_elapsedGPU)
{
thrust::transform(force_gpuX->begin(),force_gpuX->end(),acc_gpuX->begin(),updateAccOp(rmass));
thrust::transform(force_gpuY->begin(),force_gpuY->end(),acc_gpuY->begin(),updateAccOp(rmass));
thrust::transform(force_gpuZ->begin(),force_gpuZ->end(),acc_gpuZ->begin(),updateAccOp(rmass));
}
//END K7 - Update acceleration
|
8eda559f15c5621bcf554a586781f64b49c50f35.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MatrixTransposeSolveBankConflicts.h"
#include "config.h"
__global__ void matrixTransposeSolveBankConflicts(const int *d_a, int *d_b, const int rows, const int cols) {
__shared__ int mat[BLOCK_SIZE][BLOCK_SIZE + 1];
int bx = blockIdx.x * BLOCK_SIZE;
int by = blockIdx.y * BLOCK_SIZE;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i<rows && j<cols)
mat[threadIdx.y][threadIdx.x] = d_a[i*cols+j];
__syncthreads();
if (tj < cols && ti<rows)
d_b[ti*rows+tj]=mat[threadIdx.x][threadIdx.y];
}
| 8eda559f15c5621bcf554a586781f64b49c50f35.cu | #include "MatrixTransposeSolveBankConflicts.h"
#include "config.h"
__global__ void matrixTransposeSolveBankConflicts(const int *d_a, int *d_b, const int rows, const int cols) {
__shared__ int mat[BLOCK_SIZE][BLOCK_SIZE + 1];
int bx = blockIdx.x * BLOCK_SIZE;
int by = blockIdx.y * BLOCK_SIZE;
int i = by + threadIdx.y; int j = bx + threadIdx.x;
int ti = bx + threadIdx.y; int tj = by + threadIdx.x;
if (i<rows && j<cols)
mat[threadIdx.y][threadIdx.x] = d_a[i*cols+j];
__syncthreads();
if (tj < cols && ti<rows)
d_b[ti*rows+tj]=mat[threadIdx.x][threadIdx.y];
}
|
925f854f32fed6feda28c7eb130364189a75ea61.hip | // !!! This is a file automatically generated by hipify!!!
// Jin Pyo Jeon
// Lab 07
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#define T 1024 // Shared needs to be known at compile time??
#define N (1024 * 1024)
// Times for Reduced and non-reduced dot product
// N Reduced Non-reduced Thread Count
// 2^27 8.95 8.91 1024
// 2^26 4.49 4.46 1024
// 2^20 0.072 0.072 1024
#define cudaCheckError() { \
hipError_t e = hipGetLastError(); \
if (e != hipSuccess) { \
printf("Cuda failed: %d: %s\n", __LINE__, hipGetErrorString(e)); \
} \
}
__global__ void calculateDot(int* a, int* b, unsigned long long int*c){
__shared__ unsigned long long int partialSum[2 * T];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// printf("%d %d\n", start+t, start + blockDim.x + t);
if (start + t <= N) {
partialSum[t] = a[start + t] * b[start + t];
partialSum[blockDim.x+t] = a[start + blockDim.x+t] *
b[start + blockDim.x+t];
for (int stride = blockDim.x; stride > 0; stride /= 2) {
__syncthreads();
if (t < stride) {
partialSum[t] += partialSum[t + stride];
}
}
if (threadIdx.x == 0) atomicAdd(c, partialSum[0]);
}
}
void random_ints(int * arr, size_t size){
int i = 0;
for (i = 0; i < size; i++) {
arr[i] = rand() % 100;
}
}
int main(int argc, char**argv) {
srand(time(NULL));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
int *a, *b;
unsigned long long int *c, *d_c;
int * d_a, *d_b;
unsigned long long int size = N * sizeof(int);
hipMalloc((void**)&d_a, size);
hipMalloc((void**)&d_b, size);
hipMalloc((void**)&d_c, sizeof(unsigned long long int));
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (unsigned long long int *)malloc(sizeof(unsigned long long int));
random_ints(a, N);
random_ints(b, N);
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
dim3 threadDims(T, 1, 1);
dim3 blockDims(ceil(N / 2.0 / (float) T), 1, 1);
hipLaunchKernelGGL(( calculateDot), dim3(blockDims), dim3(threadDims), 0, 0, d_a, d_b, d_c);
cudaCheckError()
hipMemcpy(c, d_c, sizeof(unsigned long long int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
hipEventDestroy(start);
hipEventDestroy(stop);
printf("The dot product is %llu with elapsed time of %f s\n", *c, elapsedTime / 1000.0);
free(a); free(b); free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return 0;
}
| 925f854f32fed6feda28c7eb130364189a75ea61.cu | // Jin Pyo Jeon
// Lab 07
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#define T 1024 // Shared needs to be known at compile time??
#define N (1024 * 1024)
// Times for Reduced and non-reduced dot product
// N Reduced Non-reduced Thread Count
// 2^27 8.95 8.91 1024
// 2^26 4.49 4.46 1024
// 2^20 0.072 0.072 1024
#define cudaCheckError() { \
cudaError_t e = cudaGetLastError(); \
if (e != cudaSuccess) { \
printf("Cuda failed: %d: %s\n", __LINE__, cudaGetErrorString(e)); \
} \
}
__global__ void calculateDot(int* a, int* b, unsigned long long int*c){
__shared__ unsigned long long int partialSum[2 * T];
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// printf("%d %d\n", start+t, start + blockDim.x + t);
if (start + t <= N) {
partialSum[t] = a[start + t] * b[start + t];
partialSum[blockDim.x+t] = a[start + blockDim.x+t] *
b[start + blockDim.x+t];
for (int stride = blockDim.x; stride > 0; stride /= 2) {
__syncthreads();
if (t < stride) {
partialSum[t] += partialSum[t + stride];
}
}
if (threadIdx.x == 0) atomicAdd(c, partialSum[0]);
}
}
void random_ints(int * arr, size_t size){
int i = 0;
for (i = 0; i < size; i++) {
arr[i] = rand() % 100;
}
}
int main(int argc, char**argv) {
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int *a, *b;
unsigned long long int *c, *d_c;
int * d_a, *d_b;
unsigned long long int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, sizeof(unsigned long long int));
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (unsigned long long int *)malloc(sizeof(unsigned long long int));
random_ints(a, N);
random_ints(b, N);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 threadDims(T, 1, 1);
dim3 blockDims(ceil(N / 2.0 / (float) T), 1, 1);
calculateDot<<<blockDims, threadDims>>>(d_a, d_b, d_c);
cudaCheckError()
cudaMemcpy(c, d_c, sizeof(unsigned long long int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("The dot product is %llu with elapsed time of %f s\n", *c, elapsedTime / 1000.0);
free(a); free(b); free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
c48cd8c4ff5ae52a6919cb5d7031e2de136bfda2.hip | // !!! This is a file automatically generated by hipify!!!
#include "helpers.hpp"
#include "caffe/blob.hpp"
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include <opencv2/cudev.hpp>
#include <opencv2/core/cuda/transform.hpp>
#include <opencv2/core/cuda_stream_accessor.hpp>
float aq::Caffe::iou(const cv::Rect& r1, const cv::Rect& r2)
{
float intersection = (r1 & r2).area();
float rect_union = (r1 | r2).area();
return intersection / rect_union;
}
template<typename T> struct Matrix3D
{
int channels, height, width;
T* data;
__host__ __device__ T& operator()(int c, int h, int w)
{
return data[c*height*width + h*width + w];
}
};
void __global__ argmaxKernel(Matrix3D<const float> data, cv::cuda::PtrStepSz<float> confidence, cv::cuda::PtrStepSz<uchar> label)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
float maxValue = 0;
uchar maxLabel = 0;
for(int i = 0; i < data.channels; ++i)
{
if(data(i, y, x) > maxValue)
{
maxValue = data(i, y, x);
maxLabel = i;
}
}
confidence(y,x) = maxValue;
label(y,x) = maxLabel;
}
void aq::Caffe::argMax(const caffe::Blob<float>* blob_, cv::Mat& h_label, cv::Mat& h_confidence)
{
auto shape = blob_->shape();
CV_Assert(shape.size() == 4);
Matrix3D<const float> cpu_blob;
cpu_blob.data = blob_->cpu_data();
cpu_blob.channels = shape[1];
cpu_blob.height = shape[2];
cpu_blob.width = shape[3];
CV_Assert(cpu_blob.channels < 255);
h_label.create(shape[2], shape[3], CV_8U);
h_confidence.create(shape[2], shape[3], CV_32F);
for(int i = 0; i < cpu_blob.height; ++i)
{
for(int j = 0; j < cpu_blob.width; ++j)
{
float val = 0;
int idx = 0;
for(int c = 0; c < cpu_blob.channels; ++c)
{
if(cpu_blob(c,i, j) > val)
{
val = cpu_blob(c, i, j);
idx = c;
}
}
h_label.at<uchar>(i,j) = idx;
h_confidence.at<float>(i,j) = val;
}
}
}
void aq::Caffe::argMax(const caffe::Blob<float>* blob_, cv::cuda::GpuMat& label, cv::cuda::GpuMat& confidence, cv::cuda::Stream& stream_)
{
const float* data = blob_->gpu_data();
auto shape = blob_->shape();
CV_Assert(shape.size() == 4);
Matrix3D<const float> blob;
blob.data = data;
blob.channels = shape[1];
blob.height = shape[2];
blob.width = shape[3];
label.create(shape[2], shape[3], CV_8U);
confidence.create(shape[2], shape[3], CV_32F);
dim3 threads(16, 16, 1);
dim3 blocks(cv::cudev::divUp(shape[3], 16),
cv::cudev::divUp(shape[2], 16),
1);
auto stream = cv::cuda::StreamAccessor::getStream(stream_);
hipLaunchKernelGGL(( argmaxKernel), dim3(blocks), dim3(threads), 0, stream, blob, confidence, label);
}
| c48cd8c4ff5ae52a6919cb5d7031e2de136bfda2.cu | #include "helpers.hpp"
#include "caffe/blob.hpp"
#include "cuda.h"
#include "cuda_runtime.h"
#include <opencv2/cudev.hpp>
#include <opencv2/core/cuda/transform.hpp>
#include <opencv2/core/cuda_stream_accessor.hpp>
float aq::Caffe::iou(const cv::Rect& r1, const cv::Rect& r2)
{
float intersection = (r1 & r2).area();
float rect_union = (r1 | r2).area();
return intersection / rect_union;
}
template<typename T> struct Matrix3D
{
int channels, height, width;
T* data;
__host__ __device__ T& operator()(int c, int h, int w)
{
return data[c*height*width + h*width + w];
}
};
void __global__ argmaxKernel(Matrix3D<const float> data, cv::cuda::PtrStepSz<float> confidence, cv::cuda::PtrStepSz<uchar> label)
{
const int x = threadIdx.x + blockIdx.x * blockDim.x;
const int y = threadIdx.y + blockIdx.y * blockDim.y;
float maxValue = 0;
uchar maxLabel = 0;
for(int i = 0; i < data.channels; ++i)
{
if(data(i, y, x) > maxValue)
{
maxValue = data(i, y, x);
maxLabel = i;
}
}
confidence(y,x) = maxValue;
label(y,x) = maxLabel;
}
void aq::Caffe::argMax(const caffe::Blob<float>* blob_, cv::Mat& h_label, cv::Mat& h_confidence)
{
auto shape = blob_->shape();
CV_Assert(shape.size() == 4);
Matrix3D<const float> cpu_blob;
cpu_blob.data = blob_->cpu_data();
cpu_blob.channels = shape[1];
cpu_blob.height = shape[2];
cpu_blob.width = shape[3];
CV_Assert(cpu_blob.channels < 255);
h_label.create(shape[2], shape[3], CV_8U);
h_confidence.create(shape[2], shape[3], CV_32F);
for(int i = 0; i < cpu_blob.height; ++i)
{
for(int j = 0; j < cpu_blob.width; ++j)
{
float val = 0;
int idx = 0;
for(int c = 0; c < cpu_blob.channels; ++c)
{
if(cpu_blob(c,i, j) > val)
{
val = cpu_blob(c, i, j);
idx = c;
}
}
h_label.at<uchar>(i,j) = idx;
h_confidence.at<float>(i,j) = val;
}
}
}
void aq::Caffe::argMax(const caffe::Blob<float>* blob_, cv::cuda::GpuMat& label, cv::cuda::GpuMat& confidence, cv::cuda::Stream& stream_)
{
const float* data = blob_->gpu_data();
auto shape = blob_->shape();
CV_Assert(shape.size() == 4);
Matrix3D<const float> blob;
blob.data = data;
blob.channels = shape[1];
blob.height = shape[2];
blob.width = shape[3];
label.create(shape[2], shape[3], CV_8U);
confidence.create(shape[2], shape[3], CV_32F);
dim3 threads(16, 16, 1);
dim3 blocks(cv::cudev::divUp(shape[3], 16),
cv::cudev::divUp(shape[2], 16),
1);
auto stream = cv::cuda::StreamAccessor::getStream(stream_);
argmaxKernel<<<blocks, threads, 0, stream>>>(blob, confidence, label);
}
|
c93ac7e300307334f871f40951e2ec1e990cec34.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#include <thrust/execution_policy.h>
#include <thrust/inner_product.h>
#include <xgboost/data.h>
#include <xgboost/linear_updater.h>
#include "../common/common.h"
#include "../common/span.h"
#include "../common/device_helpers.cuh"
#include "../common/timer.h"
#include "./param.h"
#include "coordinate_common.h"
namespace xgboost {
namespace linear {
DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate);
void RescaleIndices(size_t ridx_begin, dh::DVec<xgboost::Entry> *data) {
auto d_data = data->GetSpan();
dh::LaunchN(data->DeviceIdx(), data->Size(),
[=] __device__(size_t idx) { d_data[idx].index -= ridx_begin; });
}
class DeviceShard {
int device_id_;
dh::BulkAllocator<dh::MemoryType::kDevice> ba_;
std::vector<size_t> row_ptr_;
dh::DVec<xgboost::Entry> data_;
dh::DVec<GradientPair> gpair_;
dh::CubMemory temp_;
size_t ridx_begin_;
size_t ridx_end_;
public:
DeviceShard(int device_id,
const SparsePage &batch, // column batch
bst_uint row_begin, bst_uint row_end,
const LinearTrainParam ¶m,
const gbm::GBLinearModelParam &model_param)
: device_id_(device_id),
ridx_begin_(row_begin),
ridx_end_(row_end) {
if ( IsEmpty() ) { return; }
dh::safe_cuda(hipSetDevice(device_id_));
// The begin and end indices for the section of each column associated with
// this shard
std::vector<std::pair<bst_uint, bst_uint>> column_segments;
row_ptr_ = {0};
// iterate through columns
for (auto fidx = 0; fidx < batch.Size(); fidx++) {
common::Span<Entry const> col = batch[fidx];
auto cmp = [](Entry e1, Entry e2) {
return e1.index < e2.index;
};
auto column_begin =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(row_begin, 0.0f), cmp);
auto column_end =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(row_end, 0.0f), cmp);
column_segments.push_back(
std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin()));
row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin));
}
ba_.Allocate(device_id_, &data_, row_ptr_.back(), &gpair_,
(row_end - row_begin) * model_param.num_output_group);
for (int fidx = 0; fidx < batch.Size(); fidx++) {
auto col = batch[fidx];
auto seg = column_segments[fidx];
dh::safe_cuda(hipMemcpy(
data_.GetSpan().subspan(row_ptr_[fidx]).data(),
col.data() + seg.first,
sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice));
}
// Rescale indices with respect to current shard
RescaleIndices(ridx_begin_, &data_);
}
bool IsEmpty() {
return (ridx_end_ - ridx_begin_) == 0;
}
void UpdateGpair(const std::vector<GradientPair> &host_gpair,
const gbm::GBLinearModelParam &model_param) {
gpair_.copy(host_gpair.begin() + ridx_begin_ * model_param.num_output_group,
host_gpair.begin() + ridx_end_ * model_param.num_output_group);
}
GradientPair GetBiasGradient(int group_idx, int num_group) {
dh::safe_cuda(hipSetDevice(device_id_));
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
return idx * num_group + group_idx;
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip(
counting, f);
auto perm = thrust::make_permutation_iterator(gpair_.tbegin(), skip);
return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_);
}
void UpdateBiasResidual(float dbias, int group_idx, int num_groups) {
if (dbias == 0.0f) return;
auto d_gpair = gpair_.GetSpan();
dh::LaunchN(device_id_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) {
auto &g = d_gpair[idx * num_groups + group_idx];
g += GradientPair(g.GetHess() * dbias, 0);
});
}
GradientPair GetGradient(int group_idx, int num_group, int fidx) {
dh::safe_cuda(hipSetDevice(device_id_));
common::Span<xgboost::Entry> d_col = data_.GetSpan().subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
common::Span<GradientPair> d_gpair = gpair_.GetSpan();
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto g = d_gpair[entry.index * num_group + group_idx];
return GradientPair(g.GetGrad() * entry.fvalue,
g.GetHess() * entry.fvalue * entry.fvalue);
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), GradientPair>
multiply_iterator(counting, f);
return dh::SumReduction(temp_, multiply_iterator, col_size);
}
void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) {
common::Span<GradientPair> d_gpair = gpair_.GetSpan();
common::Span<Entry> d_col = data_.GetSpan().subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
dh::LaunchN(device_id_, col_size, [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto &g = d_gpair[entry.index * num_groups + group_idx];
g += GradientPair(g.GetHess() * dw * entry.fvalue, 0);
});
}
};
/**
* \class GPUCoordinateUpdater
*
* \brief Coordinate descent algorithm that updates one feature per iteration
*/
class GPUCoordinateUpdater : public LinearUpdater {
public:
// set training parameter
void Init(
const std::vector<std::pair<std::string, std::string>> &args) override {
tparam_.InitAllowUnknown(args);
selector.reset(FeatureSelector::Create(tparam_.feature_selector));
monitor.Init("GPUCoordinateUpdater");
}
void LazyInitShards(DMatrix *p_fmat,
const gbm::GBLinearModelParam &model_param) {
if (!shards.empty()) return;
dist_ = GPUDistribution::Block(GPUSet::All(tparam_.gpu_id, tparam_.n_gpus,
p_fmat->Info().num_row_));
auto devices = dist_.Devices();
size_t n_devices = static_cast<size_t>(devices.Size());
size_t row_begin = 0;
size_t num_row = static_cast<size_t>(p_fmat->Info().num_row_);
// Partition input matrix into row segments
std::vector<size_t> row_segments;
row_segments.push_back(0);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
size_t shard_size = dist_.ShardSize(num_row, d_idx);
size_t row_end = row_begin + shard_size;
row_segments.push_back(row_end);
row_begin = row_end;
}
CHECK(p_fmat->SingleColBlock());
SparsePage const& batch = *(p_fmat->GetColumnBatches().begin());
shards.resize(n_devices);
// Create device shards
dh::ExecuteIndexShards(&shards,
[&](int i, std::unique_ptr<DeviceShard>& shard) {
shard = std::unique_ptr<DeviceShard>(
new DeviceShard(devices.DeviceId(i), batch, row_segments[i],
row_segments[i + 1], tparam_, model_param));
});
}
void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat,
gbm::GBLinearModel *model, double sum_instance_weight) override {
tparam_.DenormalizePenalties(sum_instance_weight);
monitor.Start("LazyInitShards");
this->LazyInitShards(p_fmat, model->param);
monitor.Stop("LazyInitShards");
monitor.Start("UpdateGpair");
// Update gpair
dh::ExecuteIndexShards(&shards, [&](int idx, std::unique_ptr<DeviceShard>& shard) {
if (!shard->IsEmpty()) {
shard->UpdateGpair(in_gpair->ConstHostVector(), model->param);
}
});
monitor.Stop("UpdateGpair");
monitor.Start("UpdateBias");
this->UpdateBias(p_fmat, model);
monitor.Stop("UpdateBias");
// prepare for updating the weights
selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat,
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm,
coord_param_.top_k);
monitor.Start("UpdateFeature");
for (auto group_idx = 0; group_idx < model->param.num_output_group;
++group_idx) {
for (auto i = 0U; i < model->param.num_feature; i++) {
auto fidx = selector->NextFeature(
i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat,
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm);
if (fidx < 0) break;
this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model);
}
}
monitor.Stop("UpdateFeature");
}
void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) {
for (int group_idx = 0; group_idx < model->param.num_output_group;
++group_idx) {
// Get gradient
auto grad = dh::ReduceShards<GradientPair>(
&shards, [&](std::unique_ptr<DeviceShard> &shard) {
if (!shard->IsEmpty()) {
GradientPair result =
shard->GetBiasGradient(group_idx,
model->param.num_output_group);
return result;
}
return GradientPair(0, 0);
});
auto dbias = static_cast<float>(
tparam_.learning_rate *
CoordinateDeltaBias(grad.GetGrad(), grad.GetHess()));
model->bias()[group_idx] += dbias;
// Update residual
dh::ExecuteIndexShards(&shards, [&](int idx, std::unique_ptr<DeviceShard>& shard) {
if (!shard->IsEmpty()) {
shard->UpdateBiasResidual(dbias, group_idx,
model->param.num_output_group);
}
});
}
}
void UpdateFeature(int fidx, int group_idx,
std::vector<GradientPair> *in_gpair,
gbm::GBLinearModel *model) {
bst_float &w = (*model)[fidx][group_idx];
// Get gradient
auto grad = dh::ReduceShards<GradientPair>(
&shards, [&](std::unique_ptr<DeviceShard> &shard) {
if (!shard->IsEmpty()) {
return shard->GetGradient(group_idx, model->param.num_output_group,
fidx);
}
return GradientPair(0, 0);
});
auto dw = static_cast<float>(tparam_.learning_rate *
CoordinateDelta(grad.GetGrad(), grad.GetHess(),
w, tparam_.reg_alpha_denorm,
tparam_.reg_lambda_denorm));
w += dw;
dh::ExecuteIndexShards(&shards, [&](int idx,
std::unique_ptr<DeviceShard> &shard) {
if (!shard->IsEmpty()) {
shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx);
}
});
}
// training parameter
LinearTrainParam tparam_;
CoordinateParam coord_param_;
GPUDistribution dist_;
std::unique_ptr<FeatureSelector> selector;
common::Monitor monitor;
std::vector<std::unique_ptr<DeviceShard>> shards;
};
XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent")
.describe(
"Update linear model according to coordinate descent algorithm. GPU "
"accelerated.")
.set_body([]() { return new GPUCoordinateUpdater(); });
} // namespace linear
} // namespace xgboost
| c93ac7e300307334f871f40951e2ec1e990cec34.cu | /*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#include <thrust/execution_policy.h>
#include <thrust/inner_product.h>
#include <xgboost/data.h>
#include <xgboost/linear_updater.h>
#include "../common/common.h"
#include "../common/span.h"
#include "../common/device_helpers.cuh"
#include "../common/timer.h"
#include "./param.h"
#include "coordinate_common.h"
namespace xgboost {
namespace linear {
DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate);
void RescaleIndices(size_t ridx_begin, dh::DVec<xgboost::Entry> *data) {
auto d_data = data->GetSpan();
dh::LaunchN(data->DeviceIdx(), data->Size(),
[=] __device__(size_t idx) { d_data[idx].index -= ridx_begin; });
}
class DeviceShard {
int device_id_;
dh::BulkAllocator<dh::MemoryType::kDevice> ba_;
std::vector<size_t> row_ptr_;
dh::DVec<xgboost::Entry> data_;
dh::DVec<GradientPair> gpair_;
dh::CubMemory temp_;
size_t ridx_begin_;
size_t ridx_end_;
public:
DeviceShard(int device_id,
const SparsePage &batch, // column batch
bst_uint row_begin, bst_uint row_end,
const LinearTrainParam ¶m,
const gbm::GBLinearModelParam &model_param)
: device_id_(device_id),
ridx_begin_(row_begin),
ridx_end_(row_end) {
if ( IsEmpty() ) { return; }
dh::safe_cuda(cudaSetDevice(device_id_));
// The begin and end indices for the section of each column associated with
// this shard
std::vector<std::pair<bst_uint, bst_uint>> column_segments;
row_ptr_ = {0};
// iterate through columns
for (auto fidx = 0; fidx < batch.Size(); fidx++) {
common::Span<Entry const> col = batch[fidx];
auto cmp = [](Entry e1, Entry e2) {
return e1.index < e2.index;
};
auto column_begin =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(row_begin, 0.0f), cmp);
auto column_end =
std::lower_bound(col.cbegin(), col.cend(),
xgboost::Entry(row_end, 0.0f), cmp);
column_segments.push_back(
std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin()));
row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin));
}
ba_.Allocate(device_id_, &data_, row_ptr_.back(), &gpair_,
(row_end - row_begin) * model_param.num_output_group);
for (int fidx = 0; fidx < batch.Size(); fidx++) {
auto col = batch[fidx];
auto seg = column_segments[fidx];
dh::safe_cuda(cudaMemcpy(
data_.GetSpan().subspan(row_ptr_[fidx]).data(),
col.data() + seg.first,
sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice));
}
// Rescale indices with respect to current shard
RescaleIndices(ridx_begin_, &data_);
}
bool IsEmpty() {
return (ridx_end_ - ridx_begin_) == 0;
}
void UpdateGpair(const std::vector<GradientPair> &host_gpair,
const gbm::GBLinearModelParam &model_param) {
gpair_.copy(host_gpair.begin() + ridx_begin_ * model_param.num_output_group,
host_gpair.begin() + ridx_end_ * model_param.num_output_group);
}
GradientPair GetBiasGradient(int group_idx, int num_group) {
dh::safe_cuda(cudaSetDevice(device_id_));
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
return idx * num_group + group_idx;
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip(
counting, f);
auto perm = thrust::make_permutation_iterator(gpair_.tbegin(), skip);
return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_);
}
void UpdateBiasResidual(float dbias, int group_idx, int num_groups) {
if (dbias == 0.0f) return;
auto d_gpair = gpair_.GetSpan();
dh::LaunchN(device_id_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) {
auto &g = d_gpair[idx * num_groups + group_idx];
g += GradientPair(g.GetHess() * dbias, 0);
});
}
GradientPair GetGradient(int group_idx, int num_group, int fidx) {
dh::safe_cuda(cudaSetDevice(device_id_));
common::Span<xgboost::Entry> d_col = data_.GetSpan().subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
common::Span<GradientPair> d_gpair = gpair_.GetSpan();
auto counting = thrust::make_counting_iterator(0ull);
auto f = [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto g = d_gpair[entry.index * num_group + group_idx];
return GradientPair(g.GetGrad() * entry.fvalue,
g.GetHess() * entry.fvalue * entry.fvalue);
}; // NOLINT
thrust::transform_iterator<decltype(f), decltype(counting), GradientPair>
multiply_iterator(counting, f);
return dh::SumReduction(temp_, multiply_iterator, col_size);
}
void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) {
common::Span<GradientPair> d_gpair = gpair_.GetSpan();
common::Span<Entry> d_col = data_.GetSpan().subspan(row_ptr_[fidx]);
size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx];
dh::LaunchN(device_id_, col_size, [=] __device__(size_t idx) {
auto entry = d_col[idx];
auto &g = d_gpair[entry.index * num_groups + group_idx];
g += GradientPair(g.GetHess() * dw * entry.fvalue, 0);
});
}
};
/**
* \class GPUCoordinateUpdater
*
* \brief Coordinate descent algorithm that updates one feature per iteration
*/
class GPUCoordinateUpdater : public LinearUpdater {
public:
// set training parameter
void Init(
const std::vector<std::pair<std::string, std::string>> &args) override {
tparam_.InitAllowUnknown(args);
selector.reset(FeatureSelector::Create(tparam_.feature_selector));
monitor.Init("GPUCoordinateUpdater");
}
void LazyInitShards(DMatrix *p_fmat,
const gbm::GBLinearModelParam &model_param) {
if (!shards.empty()) return;
dist_ = GPUDistribution::Block(GPUSet::All(tparam_.gpu_id, tparam_.n_gpus,
p_fmat->Info().num_row_));
auto devices = dist_.Devices();
size_t n_devices = static_cast<size_t>(devices.Size());
size_t row_begin = 0;
size_t num_row = static_cast<size_t>(p_fmat->Info().num_row_);
// Partition input matrix into row segments
std::vector<size_t> row_segments;
row_segments.push_back(0);
for (int d_idx = 0; d_idx < n_devices; ++d_idx) {
size_t shard_size = dist_.ShardSize(num_row, d_idx);
size_t row_end = row_begin + shard_size;
row_segments.push_back(row_end);
row_begin = row_end;
}
CHECK(p_fmat->SingleColBlock());
SparsePage const& batch = *(p_fmat->GetColumnBatches().begin());
shards.resize(n_devices);
// Create device shards
dh::ExecuteIndexShards(&shards,
[&](int i, std::unique_ptr<DeviceShard>& shard) {
shard = std::unique_ptr<DeviceShard>(
new DeviceShard(devices.DeviceId(i), batch, row_segments[i],
row_segments[i + 1], tparam_, model_param));
});
}
void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat,
gbm::GBLinearModel *model, double sum_instance_weight) override {
tparam_.DenormalizePenalties(sum_instance_weight);
monitor.Start("LazyInitShards");
this->LazyInitShards(p_fmat, model->param);
monitor.Stop("LazyInitShards");
monitor.Start("UpdateGpair");
// Update gpair
dh::ExecuteIndexShards(&shards, [&](int idx, std::unique_ptr<DeviceShard>& shard) {
if (!shard->IsEmpty()) {
shard->UpdateGpair(in_gpair->ConstHostVector(), model->param);
}
});
monitor.Stop("UpdateGpair");
monitor.Start("UpdateBias");
this->UpdateBias(p_fmat, model);
monitor.Stop("UpdateBias");
// prepare for updating the weights
selector->Setup(*model, in_gpair->ConstHostVector(), p_fmat,
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm,
coord_param_.top_k);
monitor.Start("UpdateFeature");
for (auto group_idx = 0; group_idx < model->param.num_output_group;
++group_idx) {
for (auto i = 0U; i < model->param.num_feature; i++) {
auto fidx = selector->NextFeature(
i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat,
tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm);
if (fidx < 0) break;
this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model);
}
}
monitor.Stop("UpdateFeature");
}
void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) {
for (int group_idx = 0; group_idx < model->param.num_output_group;
++group_idx) {
// Get gradient
auto grad = dh::ReduceShards<GradientPair>(
&shards, [&](std::unique_ptr<DeviceShard> &shard) {
if (!shard->IsEmpty()) {
GradientPair result =
shard->GetBiasGradient(group_idx,
model->param.num_output_group);
return result;
}
return GradientPair(0, 0);
});
auto dbias = static_cast<float>(
tparam_.learning_rate *
CoordinateDeltaBias(grad.GetGrad(), grad.GetHess()));
model->bias()[group_idx] += dbias;
// Update residual
dh::ExecuteIndexShards(&shards, [&](int idx, std::unique_ptr<DeviceShard>& shard) {
if (!shard->IsEmpty()) {
shard->UpdateBiasResidual(dbias, group_idx,
model->param.num_output_group);
}
});
}
}
void UpdateFeature(int fidx, int group_idx,
std::vector<GradientPair> *in_gpair,
gbm::GBLinearModel *model) {
bst_float &w = (*model)[fidx][group_idx];
// Get gradient
auto grad = dh::ReduceShards<GradientPair>(
&shards, [&](std::unique_ptr<DeviceShard> &shard) {
if (!shard->IsEmpty()) {
return shard->GetGradient(group_idx, model->param.num_output_group,
fidx);
}
return GradientPair(0, 0);
});
auto dw = static_cast<float>(tparam_.learning_rate *
CoordinateDelta(grad.GetGrad(), grad.GetHess(),
w, tparam_.reg_alpha_denorm,
tparam_.reg_lambda_denorm));
w += dw;
dh::ExecuteIndexShards(&shards, [&](int idx,
std::unique_ptr<DeviceShard> &shard) {
if (!shard->IsEmpty()) {
shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx);
}
});
}
// training parameter
LinearTrainParam tparam_;
CoordinateParam coord_param_;
GPUDistribution dist_;
std::unique_ptr<FeatureSelector> selector;
common::Monitor monitor;
std::vector<std::unique_ptr<DeviceShard>> shards;
};
XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent")
.describe(
"Update linear model according to coordinate descent algorithm. GPU "
"accelerated.")
.set_body([]() { return new GPUCoordinateUpdater(); });
} // namespace linear
} // namespace xgboost
|
63c06d817b470c1a97c50210d8c1e03ba1edfc29.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "upper_left_opt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dst = NULL;
hipMalloc(&dst, XSIZE*YSIZE);
int *input_itemsets = NULL;
hipMalloc(&input_itemsets, XSIZE*YSIZE);
int *reference = NULL;
hipMalloc(&reference, XSIZE*YSIZE);
int max_rows = 1;
int max_cols = 1;
int i = 1;
int penalty = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
upper_left_opt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,input_itemsets,reference,max_rows,max_cols,i,penalty);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
upper_left_opt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,input_itemsets,reference,max_rows,max_cols,i,penalty);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
upper_left_opt), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,input_itemsets,reference,max_rows,max_cols,i,penalty);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 63c06d817b470c1a97c50210d8c1e03ba1edfc29.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "upper_left_opt.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *dst = NULL;
cudaMalloc(&dst, XSIZE*YSIZE);
int *input_itemsets = NULL;
cudaMalloc(&input_itemsets, XSIZE*YSIZE);
int *reference = NULL;
cudaMalloc(&reference, XSIZE*YSIZE);
int max_rows = 1;
int max_cols = 1;
int i = 1;
int penalty = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
upper_left_opt<<<gridBlock,threadBlock>>>(dst,input_itemsets,reference,max_rows,max_cols,i,penalty);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
upper_left_opt<<<gridBlock,threadBlock>>>(dst,input_itemsets,reference,max_rows,max_cols,i,penalty);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
upper_left_opt<<<gridBlock,threadBlock>>>(dst,input_itemsets,reference,max_rows,max_cols,i,penalty);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6337a2d672a8dc02ba25eaa4c5f82a0a1fd49c61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/nd_index_slice_kernels.h"
#include "oneflow/core/cuda/atomic.cuh"
namespace oneflow {
namespace {
template<typename T, typename I>
__global__ void CudaGatherNd(NdIndexSliceArgs<T, I> args, const I* indices, const T* dense,
T* slices) {
DoGatherNd(args.num_slices * args.slice_size, args.slice_size, args.index_ndims, args.dense_shape,
indices, dense, slices);
}
template<typename T, typename I>
__global__ void CudaScatterNdAdd(NdIndexSliceArgs<T, I> args, const I* indices, const T* slices,
T* dense) {
DoScatterNdAdd<DeviceType::kGPU>(args.num_slices * args.slice_size, args.slice_size,
args.index_ndims, args.dense_shape, indices, slices, dense);
}
template<typename T, typename I>
__global__ void CudaFillByNdIndex(NdIndexSliceArgs<T, I> args, const I* indices, T* dense,
T value) {
DoFillByNdIndex(args.num_slices * args.slice_size, args.slice_size, args.index_ndims,
args.dense_shape, indices, dense, value);
}
} // namespace
template<typename T, typename I>
struct GatherNdFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* dense, T* slices) const {
RUN_CUDA_KERNEL((CudaGatherNd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
dense, slices);
}
};
template<typename T, typename I>
struct ScatterNdAddFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* slices, T* dense) const {
RUN_CUDA_KERNEL((CudaScatterNdAdd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
slices, dense);
}
};
template<typename T, typename I>
struct FillByNdIndexFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices, T* dense,
T value) const {
RUN_CUDA_KERNEL((CudaFillByNdIndex<T, I>), ctx, args.num_slices * args.slice_size, args,
indices, dense, value);
}
};
template<typename T>
struct DeviceAdd<DeviceType::kGPU, T> {
__device__ __forceinline__ static void Invoke(const T* x, T* y) { cuda::atomic::Add(y, *x); }
};
#define GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ \
FLOATING_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GATHER_ND_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_SCATTER_ND_ADD_FUNCTOR, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_FILL_BY_ND_INDEX_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_GATHER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_LIKE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_UPDATE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_ADD_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && TORCH_HIP_VERSION >= 10000
template<>
struct DeviceAdd<DeviceType::kGPU, float16> {
__device__ __forceinline__ static void Invoke(const float16* x, float16* y) {
cuda::atomic::Add(reinterpret_cast<half*>(y), *(reinterpret_cast<const half*>(x)));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ND_INDEX_SLICE_FUNCTORS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ND_INDEX_SLICE_KERNELS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#endif
} // namespace oneflow
| 6337a2d672a8dc02ba25eaa4c5f82a0a1fd49c61.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/user/kernels/nd_index_slice_kernels.h"
#include "oneflow/core/cuda/atomic.cuh"
namespace oneflow {
namespace {
template<typename T, typename I>
__global__ void CudaGatherNd(NdIndexSliceArgs<T, I> args, const I* indices, const T* dense,
T* slices) {
DoGatherNd(args.num_slices * args.slice_size, args.slice_size, args.index_ndims, args.dense_shape,
indices, dense, slices);
}
template<typename T, typename I>
__global__ void CudaScatterNdAdd(NdIndexSliceArgs<T, I> args, const I* indices, const T* slices,
T* dense) {
DoScatterNdAdd<DeviceType::kGPU>(args.num_slices * args.slice_size, args.slice_size,
args.index_ndims, args.dense_shape, indices, slices, dense);
}
template<typename T, typename I>
__global__ void CudaFillByNdIndex(NdIndexSliceArgs<T, I> args, const I* indices, T* dense,
T value) {
DoFillByNdIndex(args.num_slices * args.slice_size, args.slice_size, args.index_ndims,
args.dense_shape, indices, dense, value);
}
} // namespace
template<typename T, typename I>
struct GatherNdFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* dense, T* slices) const {
RUN_CUDA_KERNEL((CudaGatherNd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
dense, slices);
}
};
template<typename T, typename I>
struct ScatterNdAddFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices,
const T* slices, T* dense) const {
RUN_CUDA_KERNEL((CudaScatterNdAdd<T, I>), ctx, args.num_slices * args.slice_size, args, indices,
slices, dense);
}
};
template<typename T, typename I>
struct FillByNdIndexFunctor<DeviceType::kGPU, T, I> final {
void operator()(DeviceCtx* ctx, const NdIndexSliceArgs<T, I>& args, const I* indices, T* dense,
T value) const {
RUN_CUDA_KERNEL((CudaFillByNdIndex<T, I>), ctx, args.num_slices * args.slice_size, args,
indices, dense, value);
}
};
template<typename T>
struct DeviceAdd<DeviceType::kGPU, T> {
__device__ __forceinline__ static void Invoke(const T* x, T* y) { cuda::atomic::Add(y, *x); }
};
#define GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ \
FLOATING_DATA_TYPE_SEQ \
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_GATHER_ND_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_SCATTER_ND_ADD_FUNCTOR, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_FILL_BY_ND_INDEX_FUNCTOR, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_GATHER_ND_KERNELS, (DeviceType::kGPU),
ARITHMETIC_DATA_TYPE_SEQ UNSIGNED_INT_DATA_TYPE_SEQ,
INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_SCATTER_ND_LIKE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_UPDATE_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_TENSOR_GATHER_ND_ADD_KERNELS, (DeviceType::kGPU),
GPU_ATOMIC_ADD_SUPPORTED_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 && CUDA_VERSION >= 10000
template<>
struct DeviceAdd<DeviceType::kGPU, float16> {
__device__ __forceinline__ static void Invoke(const float16* x, float16* y) {
cuda::atomic::Add(reinterpret_cast<half*>(y), *(reinterpret_cast<const half*>(x)));
}
};
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_ND_INDEX_SLICE_FUNCTORS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(REGISTER_ND_INDEX_SLICE_KERNELS, (DeviceType::kGPU),
FLOAT16_DATA_TYPE_SEQ, INDEX_DATA_TYPE_SEQ)
#endif
} // namespace oneflow
|
103a1e90a620b5d99b36e27b0bf6011e873788a9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgmerge_spmv1_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * p,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgstab_alphakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_z_matrix
system matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dp magmaDoubleComplex_ptr
input vector p
@param[in]
dr magmaDoubleComplex_ptr
input vector r
@param[in]
dv magmaDoubleComplex_ptr
output vector v
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv1(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dp,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dv,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_zbicgmerge_spmv1_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_alphakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_zreduce_kernel_spmv2(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_zbicgmerge_spmv2_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_omegakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
ds magmaDoubleComplex_ptr
input vector s
@param[in]
dt magmaDoubleComplex_ptr
output vector t
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv2(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr ds,
magmaDoubleComplex_ptr dt,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
hipLaunchKernelGGL(( magma_zbicgmerge_spmv2_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_omegakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex * rr,
magmaDoubleComplex * r,
magmaDoubleComplex * p,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * skp,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if( i<n ){
magmaDoubleComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_betakernel(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
rr magmaDoubleComplex_ptr
input vector rr
@param[in]
r magmaDoubleComplex_ptr
input/output vector r
@param[in]
p magmaDoubleComplex_ptr
input vector p
@param[in]
s magmaDoubleComplex_ptr
input vector s
@param[in]
t magmaDoubleComplex_ptr
input vector t
@param[out]
x magmaDoubleComplex_ptr
output vector x
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_xrbeta(
magma_int_t n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr rr,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_zbicgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream(),
n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_zreduce_kernel_spmv2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream(),
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_zbicgstab_betakernel), dim3(Gs2), dim3(Bs2), 0, queue->cuda_stream(), skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
| 103a1e90a620b5d99b36e27b0bf6011e873788a9.cu | /*
-- MAGMA (version 2.5.4) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date October 2020
@precisions normal z -> c d s
@author Hartwig Anzt
*/
#include "magmasparse_internal.h"
#define BLOCK_SIZE 256
#define PRECISION_z
// These routines merge multiple kernels from zmergebicgstab into one
// This is the code used for the ASHES2014 paper
// "Accelerating Krylov Subspace Solvers on Graphics Processing Units".
// notice that only CSR format is supported so far.
// accelerated reduction for one vector
__global__ void
magma_zreduce_kernel_spmv1(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_Z_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgmerge_spmv1_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * p,
magmaDoubleComplex * r,
magmaDoubleComplex * v,
magmaDoubleComplex * vtmp)
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * p[ dcolind[j] ];
v[ i ] = dot;
}
__syncthreads();
temp[ Idx ] = ( i < n ) ? v[ i ] * r[ i ] : MAGMA_Z_MAKE( 0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads();
temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
__global__ void
magma_zbicgstab_alphakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp = skp[0];
skp[0] = skp[4]/tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using CSR with the dot product
and the computation of alpha
Arguments
---------
@param[in]
A magma_z_matrix
system matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
dp magmaDoubleComplex_ptr
input vector p
@param[in]
dr magmaDoubleComplex_ptr
input vector r
@param[in]
dv magmaDoubleComplex_ptr
output vector v
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters ( skp[0]=alpha )
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv1(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr dp,
magmaDoubleComplex_ptr dr,
magmaDoubleComplex_ptr dv,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_zbicgmerge_spmv1_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, dp, dr, dv, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_alphakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// accelerated block reduction for multiple vectors
__global__ void
magma_zreduce_kernel_spmv2(
int Gs,
int n,
magmaDoubleComplex * vtmp,
magmaDoubleComplex * vtmp2 )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
int j;
for( j=0; j<2; j++){
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
temp[Idx+j*(blockSize)] = MAGMA_Z_MAKE( 0.0, 0.0);
while (i < Gs ) {
temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ];
temp[ Idx+j*(blockSize) ] +=
( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ]
: MAGMA_Z_MAKE( 0.0, 0.0);
i += gridSize;
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ];
temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ];
}
}
}
__global__ void
magma_zbicgmerge_spmv2_kernel(
int n,
magmaDoubleComplex * dval,
magma_index_t * drowptr,
magma_index_t * dcolind,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
if( i<n ){
magmaDoubleComplex dot = MAGMA_Z_ZERO;
int start = drowptr[ i ];
int end = drowptr[ i+1 ];
for( j=start; j<end; j++)
dot += dval[ j ] * s[ dcolind[j] ];
t[ i ] = dot;
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = t[i];
temp[Idx] = s[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_omegakernel(
magmaDoubleComplex * skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
skp[2] = skp[6]/skp[7];
skp[3] = skp[4];
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
A magma_z_matrix
input matrix
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
ds magmaDoubleComplex_ptr
input vector s
@param[in]
dt magmaDoubleComplex_ptr
output vector t
@param[in,out]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_spmv2(
magma_z_matrix A,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr ds,
magmaDoubleComplex_ptr dt,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int n = A.num_rows;
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
if ( A.storage_type == Magma_CSR)
magma_zbicgmerge_spmv2_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, A.dval, A.drow, A.dcol, ds, dt, d1 );
else
printf("error: only CSR format supported.\n");
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+6, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+7, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_omegakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
__global__ void
magma_zbicgmerge_xrbeta_kernel(
int n,
magmaDoubleComplex * rr,
magmaDoubleComplex * r,
magmaDoubleComplex * p,
magmaDoubleComplex * s,
magmaDoubleComplex * t,
magmaDoubleComplex * x,
magmaDoubleComplex * skp,
magmaDoubleComplex * vtmp )
{
extern __shared__ magmaDoubleComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
magmaDoubleComplex alpha=skp[0];
magmaDoubleComplex omega=skp[2];
if( i<n ){
magmaDoubleComplex sl;
sl = s[i];
x[i] = x[i] + alpha * p[i] + omega * sl;
r[i] = sl - omega * t[i];
}
__syncthreads();
// 2 vectors
if (i<n){
magmaDoubleComplex tmp2 = r[i];
temp[Idx] = rr[i] * tmp2;
temp[Idx+blockDim.x] = tmp2 * tmp2;
}
else {
for( j=0; j<2; j++)
temp[Idx+j*blockDim.x] = MAGMA_Z_MAKE( 0.0, 0.0);
}
__syncthreads();
if ( Idx < 128 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ];
}
}
__syncthreads();
if ( Idx < 64 ){
for( j=0; j<2; j++){
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ];
}
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ];
__syncthreads();
for( j=0; j<2; j++)
temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ];
__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile double *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
for( j=0; j<2; j++){
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ];
temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ];
}
}
#endif
if ( Idx == 0 ){
for( j=0; j<2; j++){
vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ];
}
}
}
__global__ void
magma_zbicgstab_betakernel(
magmaDoubleComplex * skp )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaDoubleComplex tmp1 = skp[4]/skp[3];
magmaDoubleComplex tmp2 = skp[0] / skp[2];
skp[1] = tmp1*tmp2;
}
}
/**
Purpose
-------
Merges the second SpmV using CSR with the dot product
and the computation of omega
Arguments
---------
@param[in]
n int
dimension n
@param[in]
d1 magmaDoubleComplex_ptr
temporary vector
@param[in]
d2 magmaDoubleComplex_ptr
temporary vector
@param[in]
rr magmaDoubleComplex_ptr
input vector rr
@param[in]
r magmaDoubleComplex_ptr
input/output vector r
@param[in]
p magmaDoubleComplex_ptr
input vector p
@param[in]
s magmaDoubleComplex_ptr
input vector s
@param[in]
t magmaDoubleComplex_ptr
input vector t
@param[out]
x magmaDoubleComplex_ptr
output vector x
@param[in]
skp magmaDoubleComplex_ptr
array for parameters
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zgegpuk
********************************************************************/
extern "C" magma_int_t
magma_zbicgmerge_xrbeta(
magma_int_t n,
magmaDoubleComplex_ptr d1,
magmaDoubleComplex_ptr d2,
magmaDoubleComplex_ptr rr,
magmaDoubleComplex_ptr r,
magmaDoubleComplex_ptr p,
magmaDoubleComplex_ptr s,
magmaDoubleComplex_ptr t,
magmaDoubleComplex_ptr x,
magmaDoubleComplex_ptr skp,
magma_queue_t queue )
{
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( magma_ceildiv( n, local_block_size ) );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaDoubleComplex );
magmaDoubleComplex_ptr aux1 = d1, aux2 = d2;
int b = 1;
magma_zbicgmerge_xrbeta_kernel<<< Gs, Bs, Ms, queue->cuda_stream()>>>
( n, rr, r, p, s, t, x, skp, d1);
while( Gs.x > 1 ) {
Gs_next.x = magma_ceildiv( Gs.x, Bs.x );
if ( Gs_next.x == 1 ) Gs_next.x = 2;
magma_zreduce_kernel_spmv2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream()>>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if ( b ) { aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_zcopyvector( 1, aux1, 1, skp+4, 1, queue );
magma_zcopyvector( 1, aux1+n, 1, skp+5, 1, queue );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_zbicgstab_betakernel<<< Gs2, Bs2, 0, queue->cuda_stream()>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
de00c87e69461cbc8f034fb66c218b68938167fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
hipMalloc((void**) &d_in, ARRAY_BYTES);
hipMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//hipMemcpy(h_out, d_out, ARRAY_BYTES, hipMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
hipFree(d_in);
hipFree(d_out);
return 0;
}
| de00c87e69461cbc8f034fb66c218b68938167fc.cu | #include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include<thrust/scan.h>
/*
Somethings so confuse me, why i can't get same correct result every time.
*/
/*
These two kernel could be used on large array, but slow
Best advice: use __syncthreads() before you want to use different index
*/
__global__ void hillis_steele_scan_forward(float * d_out, float * d_in, const int array_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = d_out[idx];
__syncthreads();
d_out[idx + step] += in1;
}
}
__global__ void hillis_steele_scan_backward(float * d_out, float * d_in){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
d_out[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = d_out[idx - step];
__syncthreads();
d_out[idx] += in1;
}
}
/*
These two kernel could be used on small array, but fast
*/
__global__ void shared_hillis_steele_scan_forward(float *d_out, float *d_in, const int array_size) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
} // the code below performs iterative scan on XY
for(int step = 1; step < array_size; step *= 2){
if(idx + step >= array_size) return;
__syncthreads();
float in1 = sdata[idx];
__syncthreads();
sdata[idx + step] += in1;
}
d_out[idx] = sdata[idx];
}
__global__ void shared_hillis_steele_scan_backward(float * d_out, float * d_in, const int array_size){
extern __shared__ float sdata[];
int idx = threadIdx.x;
if(idx < array_size) {
sdata[idx] = d_in[idx];
} else {
return;
}
sdata[idx] = d_in[idx];
for(int step = 1; step <= idx; step *= 2){
if(idx - step < 0) return;
__syncthreads();
float in1 = sdata[idx - step];
__syncthreads();
sdata[idx] += in1;
}
d_out[idx] = sdata[idx];
}
/*
This kernel will get correct result when array size is power of 2
*/
__global__ void blelloch_exclusive_scan(float *g_odata, float *g_idata, int n)
{
extern __shared__ float temp[];// allocated on invocation
int thid = threadIdx.x;
int offset = 1;
temp[thid] = g_idata[thid];
for (int d = n / 2; d > 0; d /= 2) // build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset <<= 1; //multiply by 2 implemented as bitwise operation
}
if (thid == 0) { temp[n - 1] = 0; } // clear the last element
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
g_odata[thid] = temp[thid];
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 1025;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int maxThreadPerBlock = 512;
const int numBlock = ARRAY_SIZE / maxThreadPerBlock + 1;
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_in;
float * d_out;
// allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
//hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in, ARRAY_SIZE);
//hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock>>>(d_out, d_in);
//shared_hillis_steele_scan_forward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//shared_hillis_steele_scan_backward<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
//blelloch_exclusive_scan<<<numBlock, maxThreadPerBlock, maxThreadPerBlock * sizeof(float)>>>(d_out, d_in, ARRAY_SIZE);
thrust::inclusive_scan(h_in, h_in + ARRAY_SIZE, h_out);
//copy back the result array to the CPU
//cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
3674b867b4244229a9761840e3b7079ef1f40950.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/adjustedRandIndex.cuh>
#include <metrics/contingencyMatrix.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
struct AdjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class AdjustedRandIndexTest
: public ::testing::TestWithParam<AdjustedRandIndexParam> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AdjustedRandIndexParam>::GetParam();
nElements = params.nElements;
allocate(firstClusterArray, nElements, true);
allocate(secondClusterArray, nElements, true);
CUDA_CHECK(hipStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(new defaultDeviceAllocator);
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
//allocating and initializing memory to the GPU
computedAdjustedRandIndex = computeAdjustedRandIndex<T, MathT>(
firstClusterArray, secondClusterArray, nElements, allocator, stream);
}
void TearDown() override {
CUDA_CHECK(hipFree(firstClusterArray));
CUDA_CHECK(hipFree(secondClusterArray));
CUDA_CHECK(hipStreamDestroy(stream));
}
void SetUpDifferentArrays() {
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int *hGoldenOutput = (int *)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int *a = (int *)malloc(numUniqueClasses * sizeof(int));
int *b = (int *)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
//calculating the sum of number of pairwise points in each index
//and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
//claculating the sum of number pairwise points in ever column sum
//claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
//calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex =
(double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truthAdjustedRandIndex =
(index - expectedIndex) / (maxIndex - expectedIndex);
else
truthAdjustedRandIndex = 0;
updateDevice(firstClusterArray, &arr1[0], nElements, stream);
updateDevice(secondClusterArray, &arr2[0], nElements, stream);
}
void SetupZeroArray() {
lowerLabelRange = 0;
upperLabelRange = 0;
truthAdjustedRandIndex = 1.0;
}
AdjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
T *firstClusterArray = nullptr;
T *secondClusterArray = nullptr;
int nElements = 0;
double truthAdjustedRandIndex = 0;
double computedAdjustedRandIndex = 0;
hipStream_t stream;
};
const std::vector<AdjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false}, {200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false}, {10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false}, {300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false}, {200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false}, {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false}, {300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true}, {200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true}, {10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true}, {300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true}, {200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true}, {10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true}, {300, 0, 0, true, 0.000001, true},
};
const std::vector<AdjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef AdjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_ii, ::testing::ValuesIn(inputs));
typedef AdjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AdjustedRandIndexLarge, ARI_il,
::testing::ValuesIn(large_inputs));
} //end namespace Metrics
} //end namespace MLCommon
| 3674b867b4244229a9761840e3b7079ef1f40950.cu | /*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include <algorithm>
#include <cuml/common/cuml_allocator.hpp>
#include <iostream>
#include <metrics/adjustedRandIndex.cuh>
#include <metrics/contingencyMatrix.cuh>
#include <random>
#include "test_utils.h"
namespace MLCommon {
namespace Metrics {
struct AdjustedRandIndexParam {
int nElements;
int lowerLabelRange;
int upperLabelRange;
bool sameArrays;
double tolerance;
// if this is true, then it is assumed that `sameArrays` is also true
// further it also assumes `lowerLabelRange` and `upperLabelRange` are 0
bool testZeroArray;
};
template <typename T, typename MathT = int>
class AdjustedRandIndexTest
: public ::testing::TestWithParam<AdjustedRandIndexParam> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AdjustedRandIndexParam>::GetParam();
nElements = params.nElements;
allocate(firstClusterArray, nElements, true);
allocate(secondClusterArray, nElements, true);
CUDA_CHECK(cudaStreamCreate(&stream));
std::shared_ptr<deviceAllocator> allocator(new defaultDeviceAllocator);
if (!params.testZeroArray) {
SetUpDifferentArrays();
} else {
SetupZeroArray();
}
//allocating and initializing memory to the GPU
computedAdjustedRandIndex = computeAdjustedRandIndex<T, MathT>(
firstClusterArray, secondClusterArray, nElements, allocator, stream);
}
void TearDown() override {
CUDA_CHECK(cudaFree(firstClusterArray));
CUDA_CHECK(cudaFree(secondClusterArray));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void SetUpDifferentArrays() {
lowerLabelRange = params.lowerLabelRange;
upperLabelRange = params.upperLabelRange;
std::vector<int> arr1(nElements, 0);
std::vector<int> arr2(nElements, 0);
std::random_device rd;
std::default_random_engine dre(rd());
std::uniform_int_distribution<int> intGenerator(lowerLabelRange,
upperLabelRange);
std::generate(arr1.begin(), arr1.end(),
[&]() { return intGenerator(dre); });
if (params.sameArrays) {
arr2 = arr1;
} else {
std::generate(arr2.begin(), arr2.end(),
[&]() { return intGenerator(dre); });
}
// calculating golden output
int numUniqueClasses = upperLabelRange - lowerLabelRange + 1;
size_t sizeOfMat = numUniqueClasses * numUniqueClasses * sizeof(int);
int *hGoldenOutput = (int *)malloc(sizeOfMat);
memset(hGoldenOutput, 0, sizeOfMat);
for (int i = 0; i < nElements; i++) {
int row = arr1[i] - lowerLabelRange;
int column = arr2[i] - lowerLabelRange;
hGoldenOutput[row * numUniqueClasses + column] += 1;
}
int sumOfNijCTwo = 0;
int *a = (int *)malloc(numUniqueClasses * sizeof(int));
int *b = (int *)malloc(numUniqueClasses * sizeof(int));
memset(a, 0, numUniqueClasses * sizeof(int));
memset(b, 0, numUniqueClasses * sizeof(int));
int sumOfAiCTwo = 0;
int sumOfBiCTwo = 0;
//calculating the sum of number of pairwise points in each index
//and also the reducing contingency matrix along row and column
for (int i = 0; i < numUniqueClasses; ++i) {
for (int j = 0; j < numUniqueClasses; ++j) {
int Nij = hGoldenOutput[i * numUniqueClasses + j];
sumOfNijCTwo += ((Nij) * (Nij - 1)) / 2;
a[i] += hGoldenOutput[i * numUniqueClasses + j];
b[i] += hGoldenOutput[j * numUniqueClasses + i];
}
}
//claculating the sum of number pairwise points in ever column sum
//claculating the sum of number pairwise points in ever row sum
for (int i = 0; i < numUniqueClasses; ++i) {
sumOfAiCTwo += ((a[i]) * (a[i] - 1)) / 2;
sumOfBiCTwo += ((b[i]) * (b[i] - 1)) / 2;
}
//calculating the ARI
double nCTwo = double(nElements) * double(nElements - 1) / 2.0;
double expectedIndex =
(double(sumOfBiCTwo) * double(sumOfAiCTwo)) / double(nCTwo);
double maxIndex = (double(sumOfAiCTwo) + double(sumOfBiCTwo)) / 2.0;
double index = (double)sumOfNijCTwo;
if (maxIndex - expectedIndex)
truthAdjustedRandIndex =
(index - expectedIndex) / (maxIndex - expectedIndex);
else
truthAdjustedRandIndex = 0;
updateDevice(firstClusterArray, &arr1[0], nElements, stream);
updateDevice(secondClusterArray, &arr2[0], nElements, stream);
}
void SetupZeroArray() {
lowerLabelRange = 0;
upperLabelRange = 0;
truthAdjustedRandIndex = 1.0;
}
AdjustedRandIndexParam params;
T lowerLabelRange, upperLabelRange;
T *firstClusterArray = nullptr;
T *secondClusterArray = nullptr;
int nElements = 0;
double truthAdjustedRandIndex = 0;
double computedAdjustedRandIndex = 0;
cudaStream_t stream;
};
const std::vector<AdjustedRandIndexParam> inputs = {
{199, 1, 10, false, 0.000001, false}, {200, 15, 100, false, 0.000001, false},
{100, 1, 20, false, 0.000001, false}, {10, 1, 10, false, 0.000001, false},
{198, 1, 100, false, 0.000001, false}, {300, 3, 99, false, 0.000001, false},
{199, 1, 10, true, 0.000001, false}, {200, 15, 100, true, 0.000001, false},
{100, 1, 20, true, 0.000001, false}, {10, 1, 10, true, 0.000001, false},
{198, 1, 100, true, 0.000001, false}, {300, 3, 99, true, 0.000001, false},
{199, 0, 0, false, 0.000001, true}, {200, 0, 0, false, 0.000001, true},
{100, 0, 0, false, 0.000001, true}, {10, 0, 0, false, 0.000001, true},
{198, 0, 0, false, 0.000001, true}, {300, 0, 0, false, 0.000001, true},
{199, 0, 0, true, 0.000001, true}, {200, 0, 0, true, 0.000001, true},
{100, 0, 0, true, 0.000001, true}, {10, 0, 0, true, 0.000001, true},
{198, 0, 0, true, 0.000001, true}, {300, 0, 0, true, 0.000001, true},
};
const std::vector<AdjustedRandIndexParam> large_inputs = {
{2000000, 1, 1000, false, 0.000001, false},
{2000000, 1, 1000, true, 0.000001, false},
{2000000, 0, 0, false, 0.000001, true},
{2000000, 0, 0, true, 0.000001, true},
};
typedef AdjustedRandIndexTest<int, int> ARI_ii;
TEST_P(ARI_ii, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_ii, ::testing::ValuesIn(inputs));
typedef AdjustedRandIndexTest<int, unsigned long long> ARI_il;
TEST_P(ARI_il, Result) {
ASSERT_NEAR(computedAdjustedRandIndex, truthAdjustedRandIndex,
params.tolerance);
}
INSTANTIATE_TEST_CASE_P(AdjustedRandIndex, ARI_il, ::testing::ValuesIn(inputs));
INSTANTIATE_TEST_CASE_P(AdjustedRandIndexLarge, ARI_il,
::testing::ValuesIn(large_inputs));
} //end namespace Metrics
} //end namespace MLCommon
|
d17ac0d6e078794ead84a767b24316ade707d879.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#include <cutil_inline.h> // includes cuda.h and hip/hip_runtime_api.h
#include <shrQATest.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_gl_interop.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel.cu"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
} else {
devID = cutGetMaxGflopsDeviceId();
hipSetDevice( devID );
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
cutilDeviceInit(argc, argv);
} else {
hipGLSetGLDevice( cutGetMaxGflopsDeviceId() );
}
}
void allocateArray(void **devPtr, size_t size)
{
cutilSafeCall(hipMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
cutilSafeCall(hipFree(devPtr));
}
void threadSync()
{
cutilSafeCall(cutilDeviceSynchronize());
}
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
cutilSafeCall(hipMemcpy((char *) device + offset, host, size, hipMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
cutilSafeCall(hipGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
hipGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(hipGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
cutilSafeCall(hipGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
cutilSafeCall(hipGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(hipGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void* host, const void* device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
device = mapGLBufferObject(cuda_vbo_resource);
cutilSafeCall(hipMemcpy(host, device, size, hipMemcpyDeviceToHost));
if (cuda_vbo_resource)
unmapGLBufferObject(*cuda_vbo_resource);
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
cutilSafeCall( hipMemcpyToSymbol(params, hostParams, sizeof(SimParams)) );
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
thrust::device_ptr<float4> d_pos4((float4 *)pos);
thrust::device_ptr<float4> d_vel4((float4 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_vel4)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos4+numParticles, d_vel4+numParticles)),
integrate_functor(deltaTime));
}
void calcHash(uint* gridParticleHash,
uint* gridParticleIndex,
float* pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( calcHashD), dim3(numBlocks), dim3(numThreads) , 0, 0, gridParticleHash,
gridParticleIndex,
(float4 *) pos,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint* cellStart,
uint* cellEnd,
float* sortedPos,
float* sortedVel,
uint* gridParticleHash,
uint* gridParticleIndex,
float* oldPos,
float* oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
cutilSafeCall(hipMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
cutilSafeCall(hipBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
hipLaunchKernelGGL(( reorderDataAndFindCellStartD), dim3(numBlocks), dim3(numThreads), smemSize, 0,
cellStart,
cellEnd,
(float4 *) sortedPos,
(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
(float4 *) oldVel,
numParticles);
cutilCheckMsg("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
cutilSafeCall(hipUnbindTexture(oldPosTex));
cutilSafeCall(hipUnbindTexture(oldVelTex));
#endif
}
void collide(float* newVel,
float* sortedPos,
float* sortedVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles,
uint numCells)
{
#if USE_TEX
cutilSafeCall(hipBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
cutilSafeCall(hipBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
hipLaunchKernelGGL(( collideD), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4*)newVel,
(float4*)sortedPos,
(float4*)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
#if USE_TEX
cutilSafeCall(hipUnbindTexture(oldPosTex));
cutilSafeCall(hipUnbindTexture(oldVelTex));
cutilSafeCall(hipUnbindTexture(cellStartTex));
cutilSafeCall(hipUnbindTexture(cellEndTex));
#endif
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
void eject(float* newVel,
float* sortedPos,
float* sortedVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles,
uint numCells)
{
int N = 10;
//dim3 tpb(N,1,1);
hiprandState_t* devStates;
hipMalloc ( &devStates, N*sizeof( hiprandState_t ) );
#if USE_TEX
cutilSafeCall(hipBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
cutilSafeCall(hipBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
cutilSafeCall(hipBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
float *devRand, *hostResults ;
int i;
computeGridSize(numParticles, 64, numBlocks, numThreads);
//setup_kernel <<< numThreads, numBlocks >>> ( devStates);
//hostResults = (float*) calloc(64 * 64 , sizeof(float)) ;
//hipMalloc((void **) & devRand , 64 * 64 * sizeof(float)) ;
//hipMemset(devRand , 0, 64*64*sizeof(int));
hipLaunchKernelGGL(( random), dim3(numBlocks), dim3(numThreads) , 0, 0, (float4*)newVel,
(float4*)sortedPos,
(float4*)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles
);
// check if kernel invocation generated an error
/*cutilCheckMsg("Kernel execution failed");
hipMemcpy(hostResults, devRand, 64*64*sizeof(int), hipMemcpyDeviceToHost);
for ( i = 0; i < 64 * 64; i ++) {
printf("Fraction with low bit set was %10.13f \n" ,(float) hostResults[i]);}*/
#if USE_TEX
cutilSafeCall(hipUnbindTexture(oldPosTex));
cutilSafeCall(hipUnbindTexture(oldVelTex));
cutilSafeCall(hipUnbindTexture(cellStartTex));
cutilSafeCall(hipUnbindTexture(cellEndTex));
#endif
}
} // extern "C"
| d17ac0d6e078794ead84a767b24316ade707d879.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This file contains C wrappers around the some of the CUDA API and the
// kernel functions so that they can be called from "particleSystem.cpp"
#include <cutil_inline.h> // includes cuda.h and cuda_runtime_api.h
#include <shrQATest.h>
#include <cstdlib>
#include <cstdio>
#include <string.h>
#if defined(__APPLE__) || defined(MACOSX)
#include <GLUT/glut.h>
#else
#include <GL/freeglut.h>
#endif
#include <cuda_gl_interop.h>
#include "thrust/device_ptr.h"
#include "thrust/for_each.h"
#include "thrust/iterator/zip_iterator.h"
#include "thrust/sort.h"
#include "particles_kernel.cu"
extern "C"
{
void cudaInit(int argc, char **argv)
{
int devID;
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
devID = cutilDeviceInit(argc, argv);
if (devID < 0) {
printf("No CUDA Capable devices found, exiting...\n");
shrQAFinishExit(argc, (const char **)argv, QA_WAIVED);
}
} else {
devID = cutGetMaxGflopsDeviceId();
cudaSetDevice( devID );
}
}
void cudaGLInit(int argc, char **argv)
{
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
if( cutCheckCmdLineFlag(argc, (const char**)argv, "device") ) {
cutilDeviceInit(argc, argv);
} else {
cudaGLSetGLDevice( cutGetMaxGflopsDeviceId() );
}
}
void allocateArray(void **devPtr, size_t size)
{
cutilSafeCall(cudaMalloc(devPtr, size));
}
void freeArray(void *devPtr)
{
cutilSafeCall(cudaFree(devPtr));
}
void threadSync()
{
cutilSafeCall(cutilDeviceSynchronize());
}
void copyArrayToDevice(void* device, const void* host, int offset, int size)
{
cutilSafeCall(cudaMemcpy((char *) device + offset, host, size, cudaMemcpyHostToDevice));
}
void registerGLBufferObject(uint vbo, struct cudaGraphicsResource **cuda_vbo_resource)
{
cutilSafeCall(cudaGraphicsGLRegisterBuffer(cuda_vbo_resource, vbo,
cudaGraphicsMapFlagsNone));
}
void unregisterGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(cudaGraphicsUnregisterResource(cuda_vbo_resource));
}
void *mapGLBufferObject(struct cudaGraphicsResource **cuda_vbo_resource)
{
void *ptr;
cutilSafeCall(cudaGraphicsMapResources(1, cuda_vbo_resource, 0));
size_t num_bytes;
cutilSafeCall(cudaGraphicsResourceGetMappedPointer((void **)&ptr, &num_bytes,
*cuda_vbo_resource));
return ptr;
}
void unmapGLBufferObject(struct cudaGraphicsResource *cuda_vbo_resource)
{
cutilSafeCall(cudaGraphicsUnmapResources(1, &cuda_vbo_resource, 0));
}
void copyArrayFromDevice(void* host, const void* device,
struct cudaGraphicsResource **cuda_vbo_resource, int size)
{
if (cuda_vbo_resource)
device = mapGLBufferObject(cuda_vbo_resource);
cutilSafeCall(cudaMemcpy(host, device, size, cudaMemcpyDeviceToHost));
if (cuda_vbo_resource)
unmapGLBufferObject(*cuda_vbo_resource);
}
void setParameters(SimParams *hostParams)
{
// copy parameters to constant memory
cutilSafeCall( cudaMemcpyToSymbol(params, hostParams, sizeof(SimParams)) );
}
//Round a / b to nearest higher integer value
uint iDivUp(uint a, uint b){
return (a % b != 0) ? (a / b + 1) : (a / b);
}
// compute grid and thread block size for a given number of elements
void computeGridSize(uint n, uint blockSize, uint &numBlocks, uint &numThreads)
{
numThreads = min(blockSize, n);
numBlocks = iDivUp(n, numThreads);
}
void integrateSystem(float *pos,
float *vel,
float deltaTime,
uint numParticles)
{
thrust::device_ptr<float4> d_pos4((float4 *)pos);
thrust::device_ptr<float4> d_vel4((float4 *)vel);
thrust::for_each(
thrust::make_zip_iterator(thrust::make_tuple(d_pos4, d_vel4)),
thrust::make_zip_iterator(thrust::make_tuple(d_pos4+numParticles, d_vel4+numParticles)),
integrate_functor(deltaTime));
}
void calcHash(uint* gridParticleHash,
uint* gridParticleIndex,
float* pos,
int numParticles)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// execute the kernel
calcHashD<<< numBlocks, numThreads >>>(gridParticleHash,
gridParticleIndex,
(float4 *) pos,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
}
void reorderDataAndFindCellStart(uint* cellStart,
uint* cellEnd,
float* sortedPos,
float* sortedVel,
uint* gridParticleHash,
uint* gridParticleIndex,
float* oldPos,
float* oldVel,
uint numParticles,
uint numCells)
{
uint numThreads, numBlocks;
computeGridSize(numParticles, 256, numBlocks, numThreads);
// set all cells to empty
cutilSafeCall(cudaMemset(cellStart, 0xffffffff, numCells*sizeof(uint)));
#if USE_TEX
cutilSafeCall(cudaBindTexture(0, oldPosTex, oldPos, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, oldVelTex, oldVel, numParticles*sizeof(float4)));
#endif
uint smemSize = sizeof(uint)*(numThreads+1);
reorderDataAndFindCellStartD<<< numBlocks, numThreads, smemSize>>>(
cellStart,
cellEnd,
(float4 *) sortedPos,
(float4 *) sortedVel,
gridParticleHash,
gridParticleIndex,
(float4 *) oldPos,
(float4 *) oldVel,
numParticles);
cutilCheckMsg("Kernel execution failed: reorderDataAndFindCellStartD");
#if USE_TEX
cutilSafeCall(cudaUnbindTexture(oldPosTex));
cutilSafeCall(cudaUnbindTexture(oldVelTex));
#endif
}
void collide(float* newVel,
float* sortedPos,
float* sortedVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles,
uint numCells)
{
#if USE_TEX
cutilSafeCall(cudaBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
cutilSafeCall(cudaBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
computeGridSize(numParticles, 64, numBlocks, numThreads);
// execute the kernel
collideD<<< numBlocks, numThreads >>>((float4*)newVel,
(float4*)sortedPos,
(float4*)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles);
// check if kernel invocation generated an error
cutilCheckMsg("Kernel execution failed");
#if USE_TEX
cutilSafeCall(cudaUnbindTexture(oldPosTex));
cutilSafeCall(cudaUnbindTexture(oldVelTex));
cutilSafeCall(cudaUnbindTexture(cellStartTex));
cutilSafeCall(cudaUnbindTexture(cellEndTex));
#endif
}
void sortParticles(uint *dGridParticleHash, uint *dGridParticleIndex, uint numParticles)
{
thrust::sort_by_key(thrust::device_ptr<uint>(dGridParticleHash),
thrust::device_ptr<uint>(dGridParticleHash + numParticles),
thrust::device_ptr<uint>(dGridParticleIndex));
}
void eject(float* newVel,
float* sortedPos,
float* sortedVel,
uint* gridParticleIndex,
uint* cellStart,
uint* cellEnd,
uint numParticles,
uint numCells)
{
int N = 10;
//dim3 tpb(N,1,1);
curandState* devStates;
cudaMalloc ( &devStates, N*sizeof( curandState ) );
#if USE_TEX
cutilSafeCall(cudaBindTexture(0, oldPosTex, sortedPos, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, oldVelTex, sortedVel, numParticles*sizeof(float4)));
cutilSafeCall(cudaBindTexture(0, cellStartTex, cellStart, numCells*sizeof(uint)));
cutilSafeCall(cudaBindTexture(0, cellEndTex, cellEnd, numCells*sizeof(uint)));
#endif
// thread per particle
uint numThreads, numBlocks;
float *devRand, *hostResults ;
int i;
computeGridSize(numParticles, 64, numBlocks, numThreads);
//setup_kernel <<< numThreads, numBlocks >>> ( devStates);
//hostResults = (float*) calloc(64 * 64 , sizeof(float)) ;
//cudaMalloc((void **) & devRand , 64 * 64 * sizeof(float)) ;
//cudaMemset(devRand , 0, 64*64*sizeof(int));
random<<< numBlocks, numThreads >>>((float4*)newVel,
(float4*)sortedPos,
(float4*)sortedVel,
gridParticleIndex,
cellStart,
cellEnd,
numParticles
);
// check if kernel invocation generated an error
/*cutilCheckMsg("Kernel execution failed");
cudaMemcpy(hostResults, devRand, 64*64*sizeof(int), cudaMemcpyDeviceToHost);
for ( i = 0; i < 64 * 64; i ++) {
printf("Fraction with low bit set was %10.13f \n" ,(float) hostResults[i]);}*/
#if USE_TEX
cutilSafeCall(cudaUnbindTexture(oldPosTex));
cutilSafeCall(cudaUnbindTexture(oldVelTex));
cutilSafeCall(cudaUnbindTexture(cellStartTex));
cutilSafeCall(cudaUnbindTexture(cellEndTex));
#endif
}
} // extern "C"
|
87a8137fc136502f278df704c203652d3980c03a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "IntegratorVerlet.h"
#include <chrono>
#undef _XOPEN_SOURCE
#undef _POSIX_C_SOURCE
#include <boost/python.hpp>
#include <boost/shared_ptr.hpp>
#include "Logging.h"
#include "State.h"
#include "Fix.h"
#include "cutils_func.h"
using namespace MD_ENGINE;
namespace py = boost::python;
__global__ void nve_v_cu(int nAtoms, float4 *vs, float4 *fs, float dtf) {
int idx = GETIDX();
if (idx < nAtoms) {
// Update velocity by a half timestep
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
fs[idx] = make_float4(0.0f, 0.0f, 0.0f, force.w);
}
}
__global__ void nve_x_cu(int nAtoms, float4 *xs, float4 *vs, float dt) {
int idx = GETIDX();
if (idx < nAtoms) {
// Update position by a full timestep
float4 vel = vs[idx];
float4 pos = xs[idx];
//printf("pos %f %f %f\n", pos.x, pos.y, pos.z);
//printf("vel %f %f %f\n", vel.x, vel.y, vel.z);
float3 dx = dt*make_float3(vel);
pos += dx;
xs[idx] = pos;
}
}
__global__ void nve_xPIMD_cu(int nAtoms, int nPerRingPoly, float omegaP, float4 *xs, float4 *vs, float dt) {
// Declare relevant variables for NM transformation
int idx = GETIDX();
extern __shared__ float3 xsvs[];
float3 *xsNM = xsvs; // normal-mode transform of position
float3 *vsNM = xsvs + PERBLOCK; // normal-mode transform of velocity
float3 *tbr = xsvs + 2*PERBLOCK; // working array to place variables "to be reduced"
bool useThread = idx < nAtoms;
float3 xn = make_float3(0, 0, 0);
float3 vn = make_float3(0, 0, 0);
float xW;
float vW;
// helpful reference indices/identifiers
bool needSync= nPerRingPoly>warpSize;
bool amRoot = (threadIdx.x % nPerRingPoly) == 0;
int rootIdx = (threadIdx.x / nPerRingPoly) * nPerRingPoly;
int beadIdx = idx % nPerRingPoly;
int n = beadIdx + 1;
// 1. Transform to normal mode positions and velocities
// xNM_k = \sum_{n=1}^P x_n* Cnk
// Cnk = \sqrt(1/P) k = 0
// Cnk = \sqrt(2/P) cosf(2*pi*k*n/P) 1<= k <= P/2 -1
// Cnk = \sqrt(1/P)(-1)^n k = P/2
// Cnk = \sqrt(2/P) sinf(2*pi*k*n/P) P/2+1<= k <= P -1
// 2. advance positions/velocities by full timestep according
// to free ring-polymer evolution
// 3. back transform to regular coordinates
float invP = 1.0f / (float) nPerRingPoly;
float twoPiInvP = 2.0f * M_PI * invP;
float invSqrtP = sqrtf(invP);
float sqrt2 = sqrtf(2.0f);
int halfP = nPerRingPoly / 2; // P must be even for the following transformation!!!
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 1. COORDINATE TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// first we will compute the normal mode coordinates for the positions, and then the velocities
// using an identical structure. Each thread (bead) will contribute to each mode index, and
// the strategy will be too store the results of each bead for a given mode in a working array
// reduce the result by summation and store that in the final array for generating the positions
// %%%%%%%%%%% POSITIONS %%%%%%%%%%%
if (useThread) {
float4 xWhole = xs[idx];
float4 vWhole = vs[idx];
xn = make_float3(xWhole);
vn = make_float3(vWhole);
xW = xWhole.w;
vW = vWhole.w;
}
// k = 0, n = 1,...,P
tbr[threadIdx.x] = xn;
if (needSync) __syncthreads();
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = xn * -1;
} else {
tbr[threadIdx.x] = xn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cos(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// %%%%%%%%%%% VELOCITIES %%%%%%%%%%%
// k = 0, n = 1,...,P
tbr[threadIdx.x] = vn;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = vn * -1;
} else {
tbr[threadIdx.x] = vn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cos(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
if (useThread ) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 2. NORMAL-MODE RP COORDINATE EVOLUTION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// here each bead will handle the evolution of a particular normal-mode coordinate
// xk(t+dt) = xk(t)*cos(om_k*dt) + vk(t)*sinf(om_k*dt)/om_k
// vk(t+dt) = vk(t)*cosf(om_k*dt) - xk(t)*sinf(om_k*dt)*om_k
// k = 0
if (amRoot) {
xsNM[threadIdx.x] += vsNM[threadIdx.x] * dt;
} else {
float omegaK = 2.0f * omegaP * sinf( beadIdx * twoPiInvP * 0.5f);
float cosdt = cosf(omegaK * dt);
float sindt = sinf(omegaK * dt);
float3 xsNMk = xsNM[threadIdx.x];
float3 vsNMk = vsNM[threadIdx.x];
xsNM[threadIdx.x] *= cosdt;
vsNM[threadIdx.x] *= cosdt;
xsNM[threadIdx.x] += vsNMk * sindt / omegaK;
vsNM[threadIdx.x] -= xsNMk * sindt * omegaK;
}
}
if (needSync) {__syncthreads();}
if (useThread) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 3. COORDINATE BACK TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// k = 0
xn = xsNM[rootIdx];
vn = vsNM[rootIdx];
// k = halfP
// xn += xsNM[halfP]*(-1)**n
if (threadIdx.x % 2) {
xn += xsNM[rootIdx+halfP];
vn += vsNM[rootIdx+halfP];
} else {
xn -= xsNM[rootIdx+halfP];
vn -= vsNM[rootIdx+halfP];
}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * cosval;
vn += vsNM[rootIdx+k] * sqrt2 * cosval;
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * sinval;
vn += vsNM[rootIdx+k] * sqrt2 * sinval;
}
// replace evolved back-transformation
xn *= invSqrtP;
vn *= invSqrtP;
xs[idx] = make_float4(xn.x,xn.y,xn.z,xW);
vs[idx] = make_float4(vn.x,vn.y,vn.z,vW);
}
}
//so preForce_cu is split into two steps (nve_v, nve_x) if any of the fixes (barostat, for example), need to throw a step in there (as determined by requiresPostNVE_V flag)
__global__ void preForce_cu(int nAtoms, float4 *xs, float4 *vs, float4 *fs,
float dt, float dtf)
{
int idx = GETIDX();
if (idx < nAtoms) {
// Update velocity by a half timestep
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
// Update position by a full timestep
float4 pos = xs[idx];
//printf("vel %f %f %f\n", vel.x, vel.y, vel.z);
float3 dx = dt*make_float3(vel);
pos += dx;
xs[idx] = pos;
// Set forces to zero before force calculation
fs[idx] = make_float4(0.0f, 0.0f, 0.0f, force.w);
}
}
// alternative version of preForce_cu which allows for normal-mode propagation of RP dynamics
// need to pass nPerRingPoly and omega_P
__global__ void preForcePIMD_cu(int nAtoms, int nPerRingPoly, float omegaP, float4 *xs, float4 *vs, float4 *fs,
float dt, float dtf)
{
// Declare relevant variables for NM transformation
int idx = GETIDX();
extern __shared__ float3 xsvs[];
float3 *xsNM = xsvs; // normal-mode transform of position
float3 *vsNM = xsvs + PERBLOCK; // normal-mode transform of velocity
float3 *tbr = xsvs + 2*PERBLOCK; // working array to place variables "to be reduced"
bool useThread = idx < nAtoms;
float3 xn = make_float3(0, 0, 0);
float3 vn = make_float3(0, 0, 0);
float xW;
float vW;
// helpful reference indices/identifiers
bool needSync= nPerRingPoly>warpSize;
bool amRoot = (threadIdx.x % nPerRingPoly) == 0;
int rootIdx = (threadIdx.x / nPerRingPoly) * nPerRingPoly;
int beadIdx = idx % nPerRingPoly;
int n = beadIdx + 1;
// Update velocity by a half timestep for all beads in the ring polymer
if (useThread) {
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
fs[idx] = make_float4(0.0f,0.0f,0.0f,force.w); // reset forces to zero before force calculation
}
//NOT SYNCED
// 1. Transform to normal mode positions and velocities
// xNM_k = \sum_{n=1}^P x_n* Cnk
// Cnk = \sqrt(1/P) k = 0
// Cnk = \sqrt(2/P) cosf(2*pi*k*n/P) 1<= k <= P/2 -1
// Cnk = \sqrt(1/P)(-1)^n k = P/2
// Cnk = \sqrt(2/P) sinf(2*pi*k*n/P) P/2+1<= k <= P -1
// 2. advance positions/velocities by full timestep according
// to free ring-polymer evolution
// 3. back transform to regular coordinates
float invP = 1.0f / (float) nPerRingPoly;
float twoPiInvP = 2.0f * M_PI * invP;
float invSqrtP = sqrtf(invP);
float sqrt2 = sqrtf(2.0f);
int halfP = nPerRingPoly / 2; // P must be even for the following transformation!!!
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 1. COORDINATE TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// first we will compute the normal mode coordinates for the positions, and then the velocities
// using an identical structure. Each thread (bead) will contribute to each mode index, and
// the strategy will be too store the results of each bead for a given mode in a working array
// reduce the result by summation and store that in the final array for generating the positions
// %%%%%%%%%%% POSITIONS %%%%%%%%%%%
if (useThread) {
float4 xWhole = xs[idx];
float4 vWhole = vs[idx];
xn = make_float3(xWhole);
vn = make_float3(vWhole);
xW = xWhole.w;
vW = vWhole.w;
}
//STILL NOT SYNCED
// k = 0, n = 1,...,P
tbr[threadIdx.x] = xn;
if (needSync) __syncthreads();
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
//SYNCED
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = xn * -1;
} else {
tbr[threadIdx.x] = xn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// %%%%%%%%%%% VELOCITIES %%%%%%%%%%%
// k = 0, n = 1,...,P
tbr[threadIdx.x] = vn;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = vn * -1;
} else {
tbr[threadIdx.x] = vn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
if (useThread ) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 2. NORMAL-MODE RP COORDINATE EVOLUTION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// here each bead will handle the evolution of a particular normal-mode coordinate
// xk(t+dt) = xk(t)*cosf(om_k*dt) + vk(t)*sinf(om_k*dt)/om_k
// vk(t+dt) = vk(t)*cosf(om_k*dt) - xk(t)*sinf(om_k*dt)*om_k
// k = 0
if (amRoot) {
xsNM[threadIdx.x] += vsNM[threadIdx.x] * dt;
} else {
float omegaK = 2.0f * omegaP * sinf( beadIdx * twoPiInvP * 0.5);
float cosdt = cosf(omegaK * dt);
float sindt = sinf(omegaK * dt);
float3 xsNMk = xsNM[threadIdx.x];
float3 vsNMk = vsNM[threadIdx.x];
xsNM[threadIdx.x] *= cosdt;
vsNM[threadIdx.x] *= cosdt;
xsNM[threadIdx.x] += vsNMk * sindt / omegaK;
vsNM[threadIdx.x] -= xsNMk * sindt * omegaK;
}
}
if (needSync) {__syncthreads();}
if (useThread) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 3. COORDINATE BACK TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// k = 0
xn = xsNM[rootIdx];
vn = vsNM[rootIdx];
// k = halfP
// xn += xsNM[halfP]*(-1)**n
if (threadIdx.x % 2) {
//POTENTIAL PROBLEM
xn += xsNM[rootIdx+halfP];
vn += vsNM[rootIdx+halfP];
} else {//THIS TOO
xn -= xsNM[rootIdx+halfP];
vn -= vsNM[rootIdx+halfP];
}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * cosval;
vn += vsNM[rootIdx+k] * sqrt2 * cosval;
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * sinval;
vn += vsNM[rootIdx+k] * sqrt2 * sinval;
}
// replace evolved back-transformation
xn *= invSqrtP;
vn *= invSqrtP;
xs[idx] = make_float4(xn.x,xn.y,xn.z,xW);
vs[idx] = make_float4(vn.x,vn.y,vn.z,vW);
}
}
//if (useThread && amRoot ) {
// printf("--xx = %f\n",xs[idx].x);
// printf("--vx = %f\n",vs[idx].x);
// printf("--fx = %f\n",fs[idx].x);
// printf("R = np.array([");
// for (int i = 0; i <nPerRingPoly; i++) {
// printf("%f, ",xs[threadIdx.x+i].x);
// }
// printf("])\n");
// printf("V = np.array([");
// for (int i = 0; i <nPerRingPoly; i++) {
// printf("%f, ",vs[threadIdx.x+i].x);
// }
// printf("])\n");
//}
__global__ void postForce_cu(int nAtoms, float4 *vs, float4 *fs, float dtf)
{
int idx = GETIDX();
if (idx < nAtoms) {
// Update velocities by a halftimestep
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
}
}
IntegratorVerlet::IntegratorVerlet(State *state_)
: Integrator(state_)
{
}
void IntegratorVerlet::run(int numTurns)
{
basicPreRunChecks();
//basicPrepare(numTurns); //nlist built here
//force(false);
std::vector<bool> prepared = basicPrepare(numTurns);
force(true);
for (int i = 0; i<prepared.size(); i++) {
if (!prepared[i]) {
for (Fix *f : state->fixes) {
bool isPrepared = f->prepareForRun();
if (!isPrepared) {
mdError("A fix is unable to be instantiated correctly.");
}
}
}
}
int periodicInterval = state->periodicInterval;
auto start = std::chrono::high_resolution_clock::now();
DataManager &dataManager = state->dataManager;
dtf = 0.5f * state->dt * state->units.ftm_to_v;
for (int i=0; i<numTurns; ++i) {
if (state->turn % periodicInterval == 0) {
state->gridGPU.periodicBoundaryConditions();
}
int virialMode = dataManager.getVirialModeForTurn(state->turn);
stepInit(virialMode==1 or virialMode==2);
// Perform first half of velocity-Verlet step
if (state->requiresPostNVE_V) {
nve_v();
postNVE_V();
nve_x();
} else {
preForce();
}
postNVE_X();
//printf("preForce IS COMMENTED OUT\n");
handleBoundsChange();
// Recalculate forces
force(virialMode);
//quits if ctrl+c has been pressed
checkQuit();
// Perform second half of velocity-Verlet step
postForce();
stepFinal();
asyncOperations();
//HEY - MAKE DATA APPENDING HAPPEN WHILE SOMETHING IS GOING ON THE GPU.
doDataComputation();
doDataAppending();
dataManager.clearVirialTurn(state->turn);
//! \todo The following parts could also be moved into stepFinal
state->turn++;
if (state->verbose && (i+1 == numTurns || state->turn % state->shoutEvery == 0)) {
mdMessage("Turn %d %.2f percent done.\n", (int)state->turn, 100.0*(i+1)/numTurns);
}
}
//! \todo These parts could be moved to basicFinish()
hipDeviceSynchronize();
CUT_CHECK_ERROR("after run\n");
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> duration = end - start;
mdMessage("runtime %f\n%e particle timesteps per second\n",
duration.count(), state->atoms.size()*numTurns / duration.count());
basicFinish();
}
void IntegratorVerlet::nve_v() {
uint activeIdx = state->gpd.activeIdx();
hipLaunchKernelGGL(( nve_v_cu), dim3(NBLOCK(state->atoms.size())), dim3(PERBLOCK), 0, 0,
state->atoms.size(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
dtf);
}
void IntegratorVerlet::nve_x() {
uint activeIdx = state->gpd.activeIdx();
if (state->nPerRingPoly == 1) {
hipLaunchKernelGGL(( nve_x_cu), dim3(NBLOCK(state->atoms.size())), dim3(PERBLOCK), 0, 0,
state->atoms.size(),
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->dt); }
else {
// get target temperature from thermostat fix
double temp;
for (Fix *f: state->fixes) {
if ( f->isThermostat && f->groupHandle == "all" ) {
std::string t = "temp";
temp = f->getInterpolator(t)->getCurrentVal();
}
}
int nPerRingPoly = state->nPerRingPoly;
int nRingPoly = state->atoms.size() / nPerRingPoly;
float omegaP = (float) state->units.boltz * temp / state->units.hbar ;
hipLaunchKernelGGL(( nve_xPIMD_cu), dim3(NBLOCK(state->atoms.size())), dim3(PERBLOCK), sizeof(float3) * 3 *PERBLOCK, 0,
state->atoms.size(),
nPerRingPoly,
omegaP,
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->dt);
}
}
void IntegratorVerlet::preForce()
{
uint activeIdx = state->gpd.activeIdx();
if (state->nPerRingPoly == 1) {
hipLaunchKernelGGL(( preForce_cu), dim3(NBLOCK(state->atoms.size())), dim3(PERBLOCK), 0, 0,
state->atoms.size(),
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
state->dt,
dtf); }
else {
// get target temperature from thermostat fix
// XXX: need to think about how to handle if no thermostat
double temp;
for (Fix *f: state->fixes) {
if ( f->isThermostat && f->groupHandle == "all" ) {
std::string t = "temp";
temp = f->getInterpolator(t)->getCurrentVal();
}
}
int nPerRingPoly = state->nPerRingPoly;
int nRingPoly = state->atoms.size() / nPerRingPoly;
float omegaP = (float) state->units.boltz * temp / state->units.hbar ;
// called on a per bead basis
hipLaunchKernelGGL(( preForcePIMD_cu), dim3(NBLOCK(state->atoms.size())), dim3(PERBLOCK), sizeof(float3) * 3 *PERBLOCK , 0,
state->atoms.size(),
nPerRingPoly,
omegaP,
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
state->dt,
dtf );
}
}
void IntegratorVerlet::postForce()
{
uint activeIdx = state->gpd.activeIdx();
hipLaunchKernelGGL(( postForce_cu), dim3(NBLOCK(state->atoms.size())), dim3(PERBLOCK), 0, 0,
state->atoms.size(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
dtf);
}
void export_IntegratorVerlet()
{
py::class_<IntegratorVerlet,
boost::shared_ptr<IntegratorVerlet>,
py::bases<Integrator>,
boost::noncopyable>
(
"IntegratorVerlet",
py::init<State *>()
)
.def("run", &IntegratorVerlet::run,(py::arg("numTurns")))
;
}
| 87a8137fc136502f278df704c203652d3980c03a.cu | #include "IntegratorVerlet.h"
#include <chrono>
#undef _XOPEN_SOURCE
#undef _POSIX_C_SOURCE
#include <boost/python.hpp>
#include <boost/shared_ptr.hpp>
#include "Logging.h"
#include "State.h"
#include "Fix.h"
#include "cutils_func.h"
using namespace MD_ENGINE;
namespace py = boost::python;
__global__ void nve_v_cu(int nAtoms, float4 *vs, float4 *fs, float dtf) {
int idx = GETIDX();
if (idx < nAtoms) {
// Update velocity by a half timestep
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
fs[idx] = make_float4(0.0f, 0.0f, 0.0f, force.w);
}
}
__global__ void nve_x_cu(int nAtoms, float4 *xs, float4 *vs, float dt) {
int idx = GETIDX();
if (idx < nAtoms) {
// Update position by a full timestep
float4 vel = vs[idx];
float4 pos = xs[idx];
//printf("pos %f %f %f\n", pos.x, pos.y, pos.z);
//printf("vel %f %f %f\n", vel.x, vel.y, vel.z);
float3 dx = dt*make_float3(vel);
pos += dx;
xs[idx] = pos;
}
}
__global__ void nve_xPIMD_cu(int nAtoms, int nPerRingPoly, float omegaP, float4 *xs, float4 *vs, float dt) {
// Declare relevant variables for NM transformation
int idx = GETIDX();
extern __shared__ float3 xsvs[];
float3 *xsNM = xsvs; // normal-mode transform of position
float3 *vsNM = xsvs + PERBLOCK; // normal-mode transform of velocity
float3 *tbr = xsvs + 2*PERBLOCK; // working array to place variables "to be reduced"
bool useThread = idx < nAtoms;
float3 xn = make_float3(0, 0, 0);
float3 vn = make_float3(0, 0, 0);
float xW;
float vW;
// helpful reference indices/identifiers
bool needSync= nPerRingPoly>warpSize;
bool amRoot = (threadIdx.x % nPerRingPoly) == 0;
int rootIdx = (threadIdx.x / nPerRingPoly) * nPerRingPoly;
int beadIdx = idx % nPerRingPoly;
int n = beadIdx + 1;
// 1. Transform to normal mode positions and velocities
// xNM_k = \sum_{n=1}^P x_n* Cnk
// Cnk = \sqrt(1/P) k = 0
// Cnk = \sqrt(2/P) cosf(2*pi*k*n/P) 1<= k <= P/2 -1
// Cnk = \sqrt(1/P)(-1)^n k = P/2
// Cnk = \sqrt(2/P) sinf(2*pi*k*n/P) P/2+1<= k <= P -1
// 2. advance positions/velocities by full timestep according
// to free ring-polymer evolution
// 3. back transform to regular coordinates
float invP = 1.0f / (float) nPerRingPoly;
float twoPiInvP = 2.0f * M_PI * invP;
float invSqrtP = sqrtf(invP);
float sqrt2 = sqrtf(2.0f);
int halfP = nPerRingPoly / 2; // P must be even for the following transformation!!!
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 1. COORDINATE TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// first we will compute the normal mode coordinates for the positions, and then the velocities
// using an identical structure. Each thread (bead) will contribute to each mode index, and
// the strategy will be too store the results of each bead for a given mode in a working array
// reduce the result by summation and store that in the final array for generating the positions
// %%%%%%%%%%% POSITIONS %%%%%%%%%%%
if (useThread) {
float4 xWhole = xs[idx];
float4 vWhole = vs[idx];
xn = make_float3(xWhole);
vn = make_float3(vWhole);
xW = xWhole.w;
vW = vWhole.w;
}
// k = 0, n = 1,...,P
tbr[threadIdx.x] = xn;
if (needSync) __syncthreads();
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = xn * -1;
} else {
tbr[threadIdx.x] = xn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cos(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// %%%%%%%%%%% VELOCITIES %%%%%%%%%%%
// k = 0, n = 1,...,P
tbr[threadIdx.x] = vn;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = vn * -1;
} else {
tbr[threadIdx.x] = vn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cos(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
if (useThread ) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 2. NORMAL-MODE RP COORDINATE EVOLUTION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// here each bead will handle the evolution of a particular normal-mode coordinate
// xk(t+dt) = xk(t)*cos(om_k*dt) + vk(t)*sinf(om_k*dt)/om_k
// vk(t+dt) = vk(t)*cosf(om_k*dt) - xk(t)*sinf(om_k*dt)*om_k
// k = 0
if (amRoot) {
xsNM[threadIdx.x] += vsNM[threadIdx.x] * dt;
} else {
float omegaK = 2.0f * omegaP * sinf( beadIdx * twoPiInvP * 0.5f);
float cosdt = cosf(omegaK * dt);
float sindt = sinf(omegaK * dt);
float3 xsNMk = xsNM[threadIdx.x];
float3 vsNMk = vsNM[threadIdx.x];
xsNM[threadIdx.x] *= cosdt;
vsNM[threadIdx.x] *= cosdt;
xsNM[threadIdx.x] += vsNMk * sindt / omegaK;
vsNM[threadIdx.x] -= xsNMk * sindt * omegaK;
}
}
if (needSync) {__syncthreads();}
if (useThread) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 3. COORDINATE BACK TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// k = 0
xn = xsNM[rootIdx];
vn = vsNM[rootIdx];
// k = halfP
// xn += xsNM[halfP]*(-1)**n
if (threadIdx.x % 2) {
xn += xsNM[rootIdx+halfP];
vn += vsNM[rootIdx+halfP];
} else {
xn -= xsNM[rootIdx+halfP];
vn -= vsNM[rootIdx+halfP];
}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * cosval;
vn += vsNM[rootIdx+k] * sqrt2 * cosval;
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * sinval;
vn += vsNM[rootIdx+k] * sqrt2 * sinval;
}
// replace evolved back-transformation
xn *= invSqrtP;
vn *= invSqrtP;
xs[idx] = make_float4(xn.x,xn.y,xn.z,xW);
vs[idx] = make_float4(vn.x,vn.y,vn.z,vW);
}
}
//so preForce_cu is split into two steps (nve_v, nve_x) if any of the fixes (barostat, for example), need to throw a step in there (as determined by requiresPostNVE_V flag)
__global__ void preForce_cu(int nAtoms, float4 *xs, float4 *vs, float4 *fs,
float dt, float dtf)
{
int idx = GETIDX();
if (idx < nAtoms) {
// Update velocity by a half timestep
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
// Update position by a full timestep
float4 pos = xs[idx];
//printf("vel %f %f %f\n", vel.x, vel.y, vel.z);
float3 dx = dt*make_float3(vel);
pos += dx;
xs[idx] = pos;
// Set forces to zero before force calculation
fs[idx] = make_float4(0.0f, 0.0f, 0.0f, force.w);
}
}
// alternative version of preForce_cu which allows for normal-mode propagation of RP dynamics
// need to pass nPerRingPoly and omega_P
__global__ void preForcePIMD_cu(int nAtoms, int nPerRingPoly, float omegaP, float4 *xs, float4 *vs, float4 *fs,
float dt, float dtf)
{
// Declare relevant variables for NM transformation
int idx = GETIDX();
extern __shared__ float3 xsvs[];
float3 *xsNM = xsvs; // normal-mode transform of position
float3 *vsNM = xsvs + PERBLOCK; // normal-mode transform of velocity
float3 *tbr = xsvs + 2*PERBLOCK; // working array to place variables "to be reduced"
bool useThread = idx < nAtoms;
float3 xn = make_float3(0, 0, 0);
float3 vn = make_float3(0, 0, 0);
float xW;
float vW;
// helpful reference indices/identifiers
bool needSync= nPerRingPoly>warpSize;
bool amRoot = (threadIdx.x % nPerRingPoly) == 0;
int rootIdx = (threadIdx.x / nPerRingPoly) * nPerRingPoly;
int beadIdx = idx % nPerRingPoly;
int n = beadIdx + 1;
// Update velocity by a half timestep for all beads in the ring polymer
if (useThread) {
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
fs[idx] = make_float4(0.0f,0.0f,0.0f,force.w); // reset forces to zero before force calculation
}
//NOT SYNCED
// 1. Transform to normal mode positions and velocities
// xNM_k = \sum_{n=1}^P x_n* Cnk
// Cnk = \sqrt(1/P) k = 0
// Cnk = \sqrt(2/P) cosf(2*pi*k*n/P) 1<= k <= P/2 -1
// Cnk = \sqrt(1/P)(-1)^n k = P/2
// Cnk = \sqrt(2/P) sinf(2*pi*k*n/P) P/2+1<= k <= P -1
// 2. advance positions/velocities by full timestep according
// to free ring-polymer evolution
// 3. back transform to regular coordinates
float invP = 1.0f / (float) nPerRingPoly;
float twoPiInvP = 2.0f * M_PI * invP;
float invSqrtP = sqrtf(invP);
float sqrt2 = sqrtf(2.0f);
int halfP = nPerRingPoly / 2; // P must be even for the following transformation!!!
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 1. COORDINATE TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// first we will compute the normal mode coordinates for the positions, and then the velocities
// using an identical structure. Each thread (bead) will contribute to each mode index, and
// the strategy will be too store the results of each bead for a given mode in a working array
// reduce the result by summation and store that in the final array for generating the positions
// %%%%%%%%%%% POSITIONS %%%%%%%%%%%
if (useThread) {
float4 xWhole = xs[idx];
float4 vWhole = vs[idx];
xn = make_float3(xWhole);
vn = make_float3(vWhole);
xW = xWhole.w;
vW = vWhole.w;
}
//STILL NOT SYNCED
// k = 0, n = 1,...,P
tbr[threadIdx.x] = xn;
if (needSync) __syncthreads();
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
//SYNCED
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = xn * -1;
} else {
tbr[threadIdx.x] = xn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = xn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {xsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// %%%%%%%%%%% VELOCITIES %%%%%%%%%%%
// k = 0, n = 1,...,P
tbr[threadIdx.x] = vn;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x] = tbr[threadIdx.x]*invSqrtP;}
// k = P/2, n = 1,...,P
if (threadIdx.x % 2 == 0) {
tbr[threadIdx.x] = vn * -1;
} else {
tbr[threadIdx.x] = vn ;
}
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+halfP] = tbr[threadIdx.x]*invSqrtP;}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*cosval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
tbr[threadIdx.x] = vn*sqrt2*sinval;
if (needSync) { __syncthreads();}
reduceByN<float3>(tbr, nPerRingPoly, warpSize);
if (useThread && amRoot) {vsNM[threadIdx.x+k] = tbr[threadIdx.x]*invSqrtP;}
}
if (useThread ) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 2. NORMAL-MODE RP COORDINATE EVOLUTION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// here each bead will handle the evolution of a particular normal-mode coordinate
// xk(t+dt) = xk(t)*cosf(om_k*dt) + vk(t)*sinf(om_k*dt)/om_k
// vk(t+dt) = vk(t)*cosf(om_k*dt) - xk(t)*sinf(om_k*dt)*om_k
// k = 0
if (amRoot) {
xsNM[threadIdx.x] += vsNM[threadIdx.x] * dt;
} else {
float omegaK = 2.0f * omegaP * sinf( beadIdx * twoPiInvP * 0.5);
float cosdt = cosf(omegaK * dt);
float sindt = sinf(omegaK * dt);
float3 xsNMk = xsNM[threadIdx.x];
float3 vsNMk = vsNM[threadIdx.x];
xsNM[threadIdx.x] *= cosdt;
vsNM[threadIdx.x] *= cosdt;
xsNM[threadIdx.x] += vsNMk * sindt / omegaK;
vsNM[threadIdx.x] -= xsNMk * sindt * omegaK;
}
}
if (needSync) {__syncthreads();}
if (useThread) {
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// 3. COORDINATE BACK TRANSFORMATION
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// k = 0
xn = xsNM[rootIdx];
vn = vsNM[rootIdx];
// k = halfP
// xn += xsNM[halfP]*(-1)**n
if (threadIdx.x % 2) {
//POTENTIAL PROBLEM
xn += xsNM[rootIdx+halfP];
vn += vsNM[rootIdx+halfP];
} else {//THIS TOO
xn -= xsNM[rootIdx+halfP];
vn -= vsNM[rootIdx+halfP];
}
// k = 1,...,P/2-1; n = 1,...,P
for (int k = 1; k < halfP; k++) {
float cosval = cosf(twoPiInvP * k * n); // cosf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * cosval;
vn += vsNM[rootIdx+k] * sqrt2 * cosval;
}
// k = P/2+1,...,P-1; n = 1,...,P
for (int k = halfP+1; k < nPerRingPoly; k++) {
float sinval = sinf(twoPiInvP * k * n); // sinf(2*pi*k*n/P)
xn += xsNM[rootIdx+k] * sqrt2 * sinval;
vn += vsNM[rootIdx+k] * sqrt2 * sinval;
}
// replace evolved back-transformation
xn *= invSqrtP;
vn *= invSqrtP;
xs[idx] = make_float4(xn.x,xn.y,xn.z,xW);
vs[idx] = make_float4(vn.x,vn.y,vn.z,vW);
}
}
//if (useThread && amRoot ) {
// printf("--xx = %f\n",xs[idx].x);
// printf("--vx = %f\n",vs[idx].x);
// printf("--fx = %f\n",fs[idx].x);
// printf("R = np.array([");
// for (int i = 0; i <nPerRingPoly; i++) {
// printf("%f, ",xs[threadIdx.x+i].x);
// }
// printf("])\n");
// printf("V = np.array([");
// for (int i = 0; i <nPerRingPoly; i++) {
// printf("%f, ",vs[threadIdx.x+i].x);
// }
// printf("])\n");
//}
__global__ void postForce_cu(int nAtoms, float4 *vs, float4 *fs, float dtf)
{
int idx = GETIDX();
if (idx < nAtoms) {
// Update velocities by a halftimestep
float4 vel = vs[idx];
float invmass = vel.w;
float4 force = fs[idx];
float3 dv = dtf * invmass * make_float3(force);
vel += dv;
vs[idx] = vel;
}
}
IntegratorVerlet::IntegratorVerlet(State *state_)
: Integrator(state_)
{
}
void IntegratorVerlet::run(int numTurns)
{
basicPreRunChecks();
//basicPrepare(numTurns); //nlist built here
//force(false);
std::vector<bool> prepared = basicPrepare(numTurns);
force(true);
for (int i = 0; i<prepared.size(); i++) {
if (!prepared[i]) {
for (Fix *f : state->fixes) {
bool isPrepared = f->prepareForRun();
if (!isPrepared) {
mdError("A fix is unable to be instantiated correctly.");
}
}
}
}
int periodicInterval = state->periodicInterval;
auto start = std::chrono::high_resolution_clock::now();
DataManager &dataManager = state->dataManager;
dtf = 0.5f * state->dt * state->units.ftm_to_v;
for (int i=0; i<numTurns; ++i) {
if (state->turn % periodicInterval == 0) {
state->gridGPU.periodicBoundaryConditions();
}
int virialMode = dataManager.getVirialModeForTurn(state->turn);
stepInit(virialMode==1 or virialMode==2);
// Perform first half of velocity-Verlet step
if (state->requiresPostNVE_V) {
nve_v();
postNVE_V();
nve_x();
} else {
preForce();
}
postNVE_X();
//printf("preForce IS COMMENTED OUT\n");
handleBoundsChange();
// Recalculate forces
force(virialMode);
//quits if ctrl+c has been pressed
checkQuit();
// Perform second half of velocity-Verlet step
postForce();
stepFinal();
asyncOperations();
//HEY - MAKE DATA APPENDING HAPPEN WHILE SOMETHING IS GOING ON THE GPU.
doDataComputation();
doDataAppending();
dataManager.clearVirialTurn(state->turn);
//! \todo The following parts could also be moved into stepFinal
state->turn++;
if (state->verbose && (i+1 == numTurns || state->turn % state->shoutEvery == 0)) {
mdMessage("Turn %d %.2f percent done.\n", (int)state->turn, 100.0*(i+1)/numTurns);
}
}
//! \todo These parts could be moved to basicFinish()
cudaDeviceSynchronize();
CUT_CHECK_ERROR("after run\n");
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> duration = end - start;
mdMessage("runtime %f\n%e particle timesteps per second\n",
duration.count(), state->atoms.size()*numTurns / duration.count());
basicFinish();
}
void IntegratorVerlet::nve_v() {
uint activeIdx = state->gpd.activeIdx();
nve_v_cu<<<NBLOCK(state->atoms.size()), PERBLOCK>>>(
state->atoms.size(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
dtf);
}
void IntegratorVerlet::nve_x() {
uint activeIdx = state->gpd.activeIdx();
if (state->nPerRingPoly == 1) {
nve_x_cu<<<NBLOCK(state->atoms.size()), PERBLOCK>>>(
state->atoms.size(),
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->dt); }
else {
// get target temperature from thermostat fix
double temp;
for (Fix *f: state->fixes) {
if ( f->isThermostat && f->groupHandle == "all" ) {
std::string t = "temp";
temp = f->getInterpolator(t)->getCurrentVal();
}
}
int nPerRingPoly = state->nPerRingPoly;
int nRingPoly = state->atoms.size() / nPerRingPoly;
float omegaP = (float) state->units.boltz * temp / state->units.hbar ;
nve_xPIMD_cu<<<NBLOCK(state->atoms.size()), PERBLOCK, sizeof(float3) * 3 *PERBLOCK>>>(
state->atoms.size(),
nPerRingPoly,
omegaP,
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->dt);
}
}
void IntegratorVerlet::preForce()
{
uint activeIdx = state->gpd.activeIdx();
if (state->nPerRingPoly == 1) {
preForce_cu<<<NBLOCK(state->atoms.size()), PERBLOCK>>>(
state->atoms.size(),
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
state->dt,
dtf); }
else {
// get target temperature from thermostat fix
// XXX: need to think about how to handle if no thermostat
double temp;
for (Fix *f: state->fixes) {
if ( f->isThermostat && f->groupHandle == "all" ) {
std::string t = "temp";
temp = f->getInterpolator(t)->getCurrentVal();
}
}
int nPerRingPoly = state->nPerRingPoly;
int nRingPoly = state->atoms.size() / nPerRingPoly;
float omegaP = (float) state->units.boltz * temp / state->units.hbar ;
// called on a per bead basis
preForcePIMD_cu<<<NBLOCK(state->atoms.size()), PERBLOCK, sizeof(float3) * 3 *PERBLOCK >>>(
state->atoms.size(),
nPerRingPoly,
omegaP,
state->gpd.xs.getDevData(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
state->dt,
dtf );
}
}
void IntegratorVerlet::postForce()
{
uint activeIdx = state->gpd.activeIdx();
postForce_cu<<<NBLOCK(state->atoms.size()), PERBLOCK>>>(
state->atoms.size(),
state->gpd.vs.getDevData(),
state->gpd.fs.getDevData(),
dtf);
}
void export_IntegratorVerlet()
{
py::class_<IntegratorVerlet,
boost::shared_ptr<IntegratorVerlet>,
py::bases<Integrator>,
boost::noncopyable>
(
"IntegratorVerlet",
py::init<State *>()
)
.def("run", &IntegratorVerlet::run,(py::arg("numTurns")))
;
}
|
804f2f74ce990836dd935948032e19ab281820fa.hip | // !!! This is a file automatically generated by hipify!!!
#include "THHUNN.h"
struct absupdateOutput_functor
{
__device__ void operator()(float* output, const float* input) const
{
*output = abs(*input);
}
};
void THNN_CudaAbs_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, absupdateOutput_functor());
}
struct absupdateGradInput_functor
{
__device__ void operator()(float* gradInput, const float* input, const float* gradOutput) const
{
*gradInput = *input < 0 ? - *gradOutput : *gradOutput;
}
};
void THNN_CudaAbs_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput)
{
THAssert(THCudaTensor_checkGPU(state, 3, input, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, absupdateGradInput_functor());
}
| 804f2f74ce990836dd935948032e19ab281820fa.cu | #include "THCUNN.h"
struct absupdateOutput_functor
{
__device__ void operator()(float* output, const float* input) const
{
*output = abs(*input);
}
};
void THNN_CudaAbs_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output)
{
THAssert(THCudaTensor_checkGPU(state, 2, input, output));
THCudaTensor_resizeAs(state, output, input);
THCudaTensor_pointwiseApply2(state, output, input, absupdateOutput_functor());
}
struct absupdateGradInput_functor
{
__device__ void operator()(float* gradInput, const float* input, const float* gradOutput) const
{
*gradInput = *input < 0 ? - *gradOutput : *gradOutput;
}
};
void THNN_CudaAbs_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput)
{
THAssert(THCudaTensor_checkGPU(state, 3, input, gradOutput, gradInput));
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_pointwiseApply3(state, gradInput, input, gradOutput, absupdateGradInput_functor());
}
|
d641389dc02dbdc92311317fa60359134eec96ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
/* 2015004693_YangSangheon */
#include<stdlib.h>
#include<iostream>
#include<fstream>
#include<vector>
#include<string>
#define TILE_WIDTH 32 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) {
// input : input_matrix address
// output : output buffer address
// input_size : width, height of input matrix
// filter_size : filter_size of maxpolling
// all input, output matrices are vectorized
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
// out of bound
// CHANGE
float tmp = 0.0;
float Max = -999999.9;
for(int i = 0; i < filter_size; i++){
for(int j = 0; j < filter_size; j++){
tmp = input[(input_size*filter_size*row)+(filter_size*col)+(input_size*j)+i];
if(Max<tmp)
Max = tmp;
}
}
if(col < (input_size/filter_size) && row < (input_size/filter_size))
output[((input_size/filter_size)*row)+col] = Max;
//printf("thread_made\n");
}
__global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){
// a, b, c : input matrix address
// alpha, beta : input constant
// output : output buffer address
// input_size : width, height of input matrix
// all input, output matrices are vectorized
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
//if(row>=input_size ||col>=input_size) { return; }
if(row >= (input_size/TILE_WIDTH+1)*TILE_WIDTH ||col >= (input_size/TILE_WIDTH+1)*TILE_WIDTH) {return;}
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
float result = 0;
// make sure you handle the case when the matrix sizes are not
// multiple of TILE_WIDTH!
// loop over the tiles of the input in phases
int a_index;
int b_index;
for(int p = 0; p < input_size/TILE_WIDTH+1 ;p++){
// CHANGE
// You need to use __syncthreads() a few times
// to synchronize the threads in a thread block.
a_index = row*input_size + p*TILE_WIDTH +tx;
b_index = (ty + p*TILE_WIDTH)*input_size + col;
if(a_index < input_size * input_size )
s_a[ty][tx] = a[a_index];
else
s_a[ty][tx] = 0.0;
if(b_index < input_size*input_size )
s_b[ty][tx] = b[b_index];
else
s_b[ty][tx] = 0.0;
// s_a[ty][tx] = a[row*input_size + p*TILE_WIDTH+tx];
// s_b[ty][tx] = b[(ty+p*TILE_WIDTH)*input_size + col];
__syncthreads();
for(int i = 0; i<TILE_WIDTH; i++)
result += s_a[ty][i] * s_b[i][tx];
__syncthreads();
}
//__syncthreads();
// write out the result to output[row*input_size + col]
// CHANGE
if(row < input_size && col < input_size)
output[row*input_size + col] = (alpha * result) + (beta * c[row*input_size + col]);
//__syncthreads();
}
int main(int argc, char **argv) {
if(argc < 4) {
cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n";
return 1;
}
const int input_size = stoi(argv[1]);
const int filter_size = stoi(argv[2]); // used for maxpooling
const float alpha = stof(argv[3]);
const float beta = stof(argv[4]);
const int maxpool_output_size = input_size/filter_size;
// check input_siize is power of 2
if(input_size == 0 && (input_size & (input_size-1))){
cout << "input_size must be power of 2\n";
return 1;
}
if(filter_size == 0){
cout << "filter_size cannot be 0\n";
return 1;
}
float maxpool_input[input_size*input_size];
float a[input_size*input_size];
float b[input_size*input_size];
float c[input_size*input_size];
// read input matrices
ifstream input_in(MAXPOOL_INPUT_FILENAME);
ifstream a_in(A_FILENAME);
ifstream b_in(B_FILENAME);
ifstream c_in(C_FILENAME);
for (int i = 0; i < input_size*input_size; ++i) {
input_in >> maxpool_input[i];
a_in >> a[i];
b_in >> b[i];
c_in >> c[i];
}
// prints inputs for debugging.
cout<<"filter size : "<<filter_size;
cout<<"\n========== MAXPOOL_INPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<maxpool_input[i]<<" ";
}
cout<<"\nalpha : "<<alpha<<'\n';
cout<<"========== A ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<a[i]<<" ";
}
cout<<"\n========== B ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<b[i]<<" ";
}
cout<<"\nbeta : "<<beta<<'\n';
cout<<"========== C ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<c[i]<<" ";
}
cout<<'\n';
// set thread, block dimensions
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1);
const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1);
// memory allocation for the device
float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output;
hipMalloc(&dev_mem_a, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_b, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_c, sizeof(float) * input_size * input_size);
hipMalloc(&gemm_output, sizeof(float) * input_size * input_size);
hipMalloc(&dev_mem_input, sizeof(float) * input_size * input_size);
hipMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size);
// copy variable to device memory
hipMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
hipMemcpy(dev_mem_input, maxpool_input, sizeof(float) * input_size * input_size, hipMemcpyHostToDevice);
// launch CUDA kernels
// First launch gemm kernel
hipLaunchKernelGGL(( gemm), dim3(num_of_blocks), dim3(block_size), 0, 0, dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size);
hipDeviceSynchronize();
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr, "ERROR %s\n", hipGetErrorString(error));
return 1;
}
// Then run maxpooling
hipLaunchKernelGGL(( maxpool), dim3(num_of_maxpool_blocks), dim3(block_size), 0, 0, dev_mem_input, maxpool_output, input_size, filter_size);
hipDeviceSynchronize();
error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr, "ERROR %s\n", hipGetErrorString(error));
return 1;
}
// allocate output buf in main memory
float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size);
float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size);
// copy results from device to host
hipMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, hipMemcpyDeviceToHost);
hipMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, hipMemcpyDeviceToHost);
// prints the results
cout<<"\n========== GEMM OUTPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<gemm_output_buf[i]<<" ";
}
cout<<"\n========== MAXPOOL OUTPUT ==========\n";
for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) {
if(i%maxpool_output_size==0) cout<<"\n";
cout<<maxpool_output_buf[i]<<" ";
}
cout<<'\n';
hipFree(dev_mem_a);
hipFree(dev_mem_b);
hipFree(dev_mem_c);
hipFree(gemm_output);
hipFree(dev_mem_input);
hipFree(maxpool_output);
free(gemm_output_buf);
free(maxpool_output_buf);
return 0;
}
| d641389dc02dbdc92311317fa60359134eec96ef.cu | /*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
/* 2015004693_YangSangheon */
#include<stdlib.h>
#include<iostream>
#include<fstream>
#include<vector>
#include<string>
#define TILE_WIDTH 32 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) {
// input : input_matrix address
// output : output buffer address
// input_size : width, height of input matrix
// filter_size : filter_size of maxpolling
// all input, output matrices are vectorized
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
// out of bound
// CHANGE
float tmp = 0.0;
float Max = -999999.9;
for(int i = 0; i < filter_size; i++){
for(int j = 0; j < filter_size; j++){
tmp = input[(input_size*filter_size*row)+(filter_size*col)+(input_size*j)+i];
if(Max<tmp)
Max = tmp;
}
}
if(col < (input_size/filter_size) && row < (input_size/filter_size))
output[((input_size/filter_size)*row)+col] = Max;
//printf("thread_made\n");
}
__global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){
// a, b, c : input matrix address
// alpha, beta : input constant
// output : output buffer address
// input_size : width, height of input matrix
// all input, output matrices are vectorized
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
//if(row>=input_size ||col>=input_size) { return; }
if(row >= (input_size/TILE_WIDTH+1)*TILE_WIDTH ||col >= (input_size/TILE_WIDTH+1)*TILE_WIDTH) {return;}
// allocate 2D tiles in __shared__ memory
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
float result = 0;
// make sure you handle the case when the matrix sizes are not
// multiple of TILE_WIDTH!
// loop over the tiles of the input in phases
int a_index;
int b_index;
for(int p = 0; p < input_size/TILE_WIDTH+1 ;p++){
// CHANGE
// You need to use __syncthreads() a few times
// to synchronize the threads in a thread block.
a_index = row*input_size + p*TILE_WIDTH +tx;
b_index = (ty + p*TILE_WIDTH)*input_size + col;
if(a_index < input_size * input_size )
s_a[ty][tx] = a[a_index];
else
s_a[ty][tx] = 0.0;
if(b_index < input_size*input_size )
s_b[ty][tx] = b[b_index];
else
s_b[ty][tx] = 0.0;
// s_a[ty][tx] = a[row*input_size + p*TILE_WIDTH+tx];
// s_b[ty][tx] = b[(ty+p*TILE_WIDTH)*input_size + col];
__syncthreads();
for(int i = 0; i<TILE_WIDTH; i++)
result += s_a[ty][i] * s_b[i][tx];
__syncthreads();
}
//__syncthreads();
// write out the result to output[row*input_size + col]
// CHANGE
if(row < input_size && col < input_size)
output[row*input_size + col] = (alpha * result) + (beta * c[row*input_size + col]);
//__syncthreads();
}
int main(int argc, char **argv) {
if(argc < 4) {
cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n";
return 1;
}
const int input_size = stoi(argv[1]);
const int filter_size = stoi(argv[2]); // used for maxpooling
const float alpha = stof(argv[3]);
const float beta = stof(argv[4]);
const int maxpool_output_size = input_size/filter_size;
// check input_siize is power of 2
if(input_size == 0 && (input_size & (input_size-1))){
cout << "input_size must be power of 2\n";
return 1;
}
if(filter_size == 0){
cout << "filter_size cannot be 0\n";
return 1;
}
float maxpool_input[input_size*input_size];
float a[input_size*input_size];
float b[input_size*input_size];
float c[input_size*input_size];
// read input matrices
ifstream input_in(MAXPOOL_INPUT_FILENAME);
ifstream a_in(A_FILENAME);
ifstream b_in(B_FILENAME);
ifstream c_in(C_FILENAME);
for (int i = 0; i < input_size*input_size; ++i) {
input_in >> maxpool_input[i];
a_in >> a[i];
b_in >> b[i];
c_in >> c[i];
}
// prints inputs for debugging.
cout<<"filter size : "<<filter_size;
cout<<"\n========== MAXPOOL_INPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<maxpool_input[i]<<" ";
}
cout<<"\nalpha : "<<alpha<<'\n';
cout<<"========== A ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<a[i]<<" ";
}
cout<<"\n========== B ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<b[i]<<" ";
}
cout<<"\nbeta : "<<beta<<'\n';
cout<<"========== C ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<c[i]<<" ";
}
cout<<'\n';
// set thread, block dimensions
const dim3 block_size(TILE_WIDTH, TILE_WIDTH);
const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1);
const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1);
// memory allocation for the device
float *dev_mem_a, *dev_mem_b, *dev_mem_c, *dev_mem_input, *gemm_output, *maxpool_output;
cudaMalloc(&dev_mem_a, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_b, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_c, sizeof(float) * input_size * input_size);
cudaMalloc(&gemm_output, sizeof(float) * input_size * input_size);
cudaMalloc(&dev_mem_input, sizeof(float) * input_size * input_size);
cudaMalloc(&maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size);
// copy variable to device memory
cudaMemcpy(dev_mem_a, a, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_b, b, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_c, c, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_mem_input, maxpool_input, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);
// launch CUDA kernels
// First launch gemm kernel
gemm<<<num_of_blocks, block_size>>>(dev_mem_a, dev_mem_b, dev_mem_c, alpha, beta, gemm_output, input_size);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error));
return 1;
}
// Then run maxpooling
maxpool<<<num_of_maxpool_blocks, block_size>>>(dev_mem_input, maxpool_output, input_size, filter_size);
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error));
return 1;
}
// allocate output buf in main memory
float *gemm_output_buf = (float*) malloc (sizeof(float)*input_size*input_size);
float *maxpool_output_buf = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size);
// copy results from device to host
cudaMemcpy(gemm_output_buf, gemm_output, sizeof(float)*input_size*input_size, cudaMemcpyDeviceToHost);
cudaMemcpy(maxpool_output_buf, maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, cudaMemcpyDeviceToHost);
// prints the results
cout<<"\n========== GEMM OUTPUT ==========\n";
for (int i = 0; i < input_size * input_size; ++i) {
if(i%input_size==0) cout<<"\n";
cout<<gemm_output_buf[i]<<" ";
}
cout<<"\n========== MAXPOOL OUTPUT ==========\n";
for (int i = 0; i < maxpool_output_size * maxpool_output_size; ++i) {
if(i%maxpool_output_size==0) cout<<"\n";
cout<<maxpool_output_buf[i]<<" ";
}
cout<<'\n';
cudaFree(dev_mem_a);
cudaFree(dev_mem_b);
cudaFree(dev_mem_c);
cudaFree(gemm_output);
cudaFree(dev_mem_input);
cudaFree(maxpool_output);
free(gemm_output_buf);
free(maxpool_output_buf);
return 0;
}
|
066b9460c89e1aa4967ba6439883c9ebd171f299.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <rocblas.h>
#include <time.h>
#define n 1024
__global__ void matmul(int *a, int *b, int *c){
int my_x = blockIdx.y * blockDim.y + threadIdx.y;
int my_y = blockIdx.x * blockDim.x + threadIdx.x;
int local_c = 0;
for (int i = 0; i < n; i++) {
local_c += a[my_x*n + i] * b[my_y*n + i];
}
c[my_x*n + my_y] = local_c;
}
int main(){
//Allocate memory in CPU for a and b matrices
int *a = (int*)malloc(sizeof(int) * n * n);
int *b = (int*)malloc(sizeof(int) * n * n);
int *c = (int*)malloc(sizeof(int) * n * n);
//Fill in the elements of a and b as specified
for(int i = 0; i < (n*n); i++){
a[i]=1;
b[i]=2;
}
int *gpu_a, *gpu_b, *gpu_c; //alloacte gpu space to a, b and c
hipMalloc((void**)&gpu_a, sizeof(int) * n * n);
hipMalloc((void**)&gpu_b, sizeof(int) * n * n);
hipMalloc((void**)&gpu_c, sizeof(int) * n * n);
struct timespec start, stop;
double time;
hipMemcpy(gpu_a, a, sizeof(int)*n*n, hipMemcpyHostToDevice);
hipMemcpy(gpu_b, b, sizeof(int)*n*n, hipMemcpyHostToDevice);
dim3 dimGrid(64, 64); //Grid configuration is 64x64
dim3 dimBlock(16, 16); //Thread block configuration is 16x16
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );}
hipLaunchKernelGGL(( matmul), dim3(dimGrid), dim3(dimBlock), 0, 0, gpu_a, gpu_b, gpu_c);
hipMemcpy(c, gpu_c, sizeof(int)*n*n, hipMemcpyDeviceToHost);
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );}
time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9;
printf("Execution time is %f ns\n", time*1e9);
printf("c[451][451]= %d\n", c[451*n + 451]);
free(a);
free(b);
free(c);
hipFree(gpu_a);
hipFree(gpu_b);
hipFree(gpu_c);
return 0;
} | 066b9460c89e1aa4967ba6439883c9ebd171f299.cu | #include <stdlib.h>
#include <stdio.h>
#include <cublas.h>
#include <time.h>
#define n 1024
__global__ void matmul(int *a, int *b, int *c){
int my_x = blockIdx.y * blockDim.y + threadIdx.y;
int my_y = blockIdx.x * blockDim.x + threadIdx.x;
int local_c = 0;
for (int i = 0; i < n; i++) {
local_c += a[my_x*n + i] * b[my_y*n + i];
}
c[my_x*n + my_y] = local_c;
}
int main(){
//Allocate memory in CPU for a and b matrices
int *a = (int*)malloc(sizeof(int) * n * n);
int *b = (int*)malloc(sizeof(int) * n * n);
int *c = (int*)malloc(sizeof(int) * n * n);
//Fill in the elements of a and b as specified
for(int i = 0; i < (n*n); i++){
a[i]=1;
b[i]=2;
}
int *gpu_a, *gpu_b, *gpu_c; //alloacte gpu space to a, b and c
cudaMalloc((void**)&gpu_a, sizeof(int) * n * n);
cudaMalloc((void**)&gpu_b, sizeof(int) * n * n);
cudaMalloc((void**)&gpu_c, sizeof(int) * n * n);
struct timespec start, stop;
double time;
cudaMemcpy(gpu_a, a, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, sizeof(int)*n*n, cudaMemcpyHostToDevice);
dim3 dimGrid(64, 64); //Grid configuration is 64x64
dim3 dimBlock(16, 16); //Thread block configuration is 16x16
if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" );}
matmul<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c);
cudaMemcpy(c, gpu_c, sizeof(int)*n*n, cudaMemcpyDeviceToHost);
if( clock_gettime( CLOCK_REALTIME, &stop) == -1 ) { perror( "clock gettime" );}
time = (stop.tv_sec - start.tv_sec)+ (double)(stop.tv_nsec - start.tv_nsec)/1e9;
printf("Execution time is %f ns\n", time*1e9);
printf("c[451][451]= %d\n", c[451*n + 451]);
free(a);
free(b);
free(c);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
return 0;
} |
0c18fa1161b43c11cddd4f30b65596c9fba87ba8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
//#include <opencv2/opencv.hpp>
//#include <opencv2/gpu/gpu.hpp>
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
hipLaunchKernelGGL(( NearestNeighborKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
hipLaunchKernelGGL(( InterpolationKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(topsize)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for(int i=0; i<propagate_down.size(); i++)
if(propagate_down[i]) LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
| 0c18fa1161b43c11cddd4f30b65596c9fba87ba8.cu | // Copyright 2014 BVLC and contributors.
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/resample_layer.hpp"
#include "caffe/util/math_functions.hpp"
//#include <opencv2/opencv.hpp>
//#include <opencv2/gpu/gpu.hpp>
namespace caffe {
static __device__ __forceinline__ float bicubicCoeff(float x_)
{
float x = fabsf(x_);
if (x <= 1.0f) return x * x * (1.5f * x - 2.5f) + 1.0f;
else if (x < 2.0f) return x * (x * (-0.5f * x + 2.5f) - 4.0f) + 2.0f;
else return 0.0f;
}
static __device__ __forceinline__ float boxCoeff(float x)
{
if (-0.5 <= x && x<0.5) return 1.0;
return 0;
}
static __device__ __forceinline__ float triangleCoeff(float x)
{
if (-1<=x && x<0) return x+1;
if (0<=x && x<=1) return 1-x;
return 0;
}
#define FILTER_BICUBIC 0
#define FILTER_BOX 1
#define FILTER_TRIANGLE 2
template <typename Dtype>
__global__ void InterpolationKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height,
int filter_type,
int kernel_width,
const bool antialias)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
Dtype sum=0;
Dtype wsum=0;
float ax = 1.0f / (antialias ? fx : 1.0f);
float ay = 1.0f / (antialias ? fy : 1.0f);
int rx = (fx < 1.0f) ? 2 : ceil(float(kernel_width)/ax);
int ry = (fy < 1.0f) ? 2 : ceil(float(kernel_width)/ay);
for(int y=y_in_round-ry; y<=y_in_round+ry; y++)
for(int x=x_in_round-rx; x<=x_in_round+rx; x++)
{
if(y<0 || x<0) continue;
if(y>=in_height || x>=in_width) continue;
float dx = x_in - x;
float dy = y_in - y;
float w;
if(filter_type == FILTER_BICUBIC) w = ax*bicubicCoeff(ax*dx) * ay*bicubicCoeff(ay*dy);
else if(filter_type == FILTER_BOX) w = ax*boxCoeff(ax*dx) * ay*boxCoeff(ay*dy);
else w = ax*triangleCoeff(ax*dx) * ay*triangleCoeff(ay*dy);
sum += w * in_ptr[c*in_channelsize + y*in_width+x];
wsum += w;
}
out_ptr[index] = (!wsum) ? 0 : (sum / wsum);
}
}
template <typename Dtype>
__global__ void NearestNeighborKernel(
const int nthreads,
const int in_channelsize,
const int out_channelsize,
const Dtype* in_ptr,
const int in_width,
const int in_height,
const float fx,
const float fy,
Dtype* out_ptr,
const int out_width,
const int out_height)
{
CUDA_KERNEL_LOOP(index, nthreads)
{
int c = index / out_channelsize;
int x_out = (index % out_channelsize) % out_width;
int y_out = (index % out_channelsize) / out_width;
float x_in = x_out * fx + fy / 2.0f - 0.5f;
float y_in = y_out * fy + fx / 2.0f - 0.5f;
int x_in_round = round(x_in);
int y_in_round = round(y_in);
out_ptr[index] = in_ptr[c*in_channelsize + y_in_round*in_width+x_in_round];
}
}
template <typename Dtype>
void ResampleLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data(); // dest
int topwidth = top[0]->width();
int topheight = top[0]->height();
int topchannels = top[0]->channels();
int topcount = top[0]->count();
Dtype* bottom_data = bottom[0]->mutable_gpu_data(); // source
int bottomnum = (bottom)[0]->num();
int bottomchannels = (bottom)[0]->channels();
int bottomwidth = (bottom)[0]->width();
int bottomheight = (bottom)[0]->height();
int bottomcount = (bottom)[0]->count();
CHECK_EQ(topchannels, bottomchannels) << "ResampleLayer top channel count must match bottom channel count";
float fx = float(bottomwidth)/float(topwidth);
float fy = float(bottomheight)/float(topheight);
//int botsize = bottomwidth*bottomheight*bottomchannels*bottomnum;
int topsize = topwidth*topheight*topchannels*bottomnum;
int topchannelsize = topwidth*topheight;
int botchannelsize = bottomwidth*bottomheight;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_NEAREST)
{
NearestNeighborKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight
);
CUDA_POST_KERNEL_CHECK;
}
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC || this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
{
int filter_type;
if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_CUBIC)
filter_type = FILTER_BICUBIC;
else if(this->layer_param().resample_param().type() == ResampleParameter_ResampleType_LINEAR)
filter_type = FILTER_TRIANGLE;
bool isDownsample = (fx > 1) || (fy > 1);
bool antialias = isDownsample && this->layer_param_.resample_param().antialias();
int kernel_width;
if(filter_type == FILTER_BICUBIC) kernel_width = 4;
else if(filter_type == FILTER_BOX) kernel_width = 1;
else kernel_width = 2;
InterpolationKernel<Dtype><<<CAFFE_GET_BLOCKS(topsize), CAFFE_CUDA_NUM_THREADS>>>(
topsize,
botchannelsize,
topchannelsize,
(Dtype*)bottom_data,
bottomwidth,
bottomheight,
fx,
fy,
(Dtype*)top_data,
topwidth,
topheight,
filter_type,
kernel_width,
antialias);
CUDA_POST_KERNEL_CHECK;
}
else
LOG(FATAL) << "unsupported downsampling type";
}
template <typename Dtype>
void ResampleLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
for(int i=0; i<propagate_down.size(); i++)
if(propagate_down[i]) LOG(FATAL) << "ResampleLayer cannot do backward.";
}
INSTANTIATE_LAYER_GPU_FUNCS(ResampleLayer);
} // namespace caffe
// cv::gpu::GpuMat input(bottomheight, bottomwidth, CV_32FC3);
// float* input_ptr=(float*)input.data;
// int input_stride=input.step/4;
// BlobToOpenCV<Dtype><<<CAFFE_GET_BLOCKS(bottomwidth*bottomheight), CAFFE_CUDA_NUM_THREADS>>>(
// bottomwidth*bottomheight,
// (Dtype*)bottom_data,
// bottomwidth,
// bottomheight,
// input_stride,
// (Dtype*)input_ptr);
// cv::gpu::GpuMat output;
// cv::Size output_size;
// output_size.width = topwidth;
// output_size.height = topheight;
// cv::gpu::resize(input,output,output_size,0,0,interpolation,cv::gpu::Stream::Null(),false);
// float* output_ptr=(float*)output.data;
// int output_stride=output.step/4;
// OpenCVToBlob<Dtype><<<CAFFE_GET_BLOCKS(topwidth*topheight), CAFFE_CUDA_NUM_THREADS>>>(
// topwidth*topheight,
// (Dtype*)output_ptr,
// topwidth,
// topheight,
// output_stride,
// (Dtype*)top_data);
// top_data += topsize;
// bottom_data += botsize;
//template <typename Dtype>
//__global__ void BlobToOpenCV(
// const int nthreads,
// const Dtype* blob_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* mat_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// mat_ptr[y*stride+x*3+c]=blob_ptr[((c*height)+y)*width+x];
// }
//}
//template <typename Dtype>
//__global__ void OpenCVToBlob(
// const int nthreads,
// const Dtype* mat_ptr,
// const int width,
// const int height,
// const int stride,
// Dtype* blob_ptr)
//{
// CUDA_KERNEL_LOOP(index, nthreads)
// {
// int x=index % width;
// int y=index / width;
// for(int c=0; c<3; c++)
// blob_ptr[((c*height)+y)*width+x]=mat_ptr[y*stride+x*3+c];
// }
//}
|
a2b75c761745598fa22f0b8c5fe5f3bb86729d8a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This sample is an implementation of a simple line-of-sight algorithm:
// Given a height map and a ray originating at some observation point,
// it computes all the points along the ray that are visible from the
// observation point.
// It is based on the description made in "Guy E. Blelloch. Vector models
// for data-parallel computing. MIT Press, 1990" and uses open source CUDA
// Thrust Library
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_math.h>
// includes, library
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, types
// Boolean
typedef unsigned char Bool;
enum
{
False = 0,
True = 1
};
// 2D height field
struct HeightField
{
int width;
float *height;
};
// Ray
struct Ray
{
float3 origin;
float2 dir;
int length;
float oneOverLength;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
__global__ void computeAngles_kernel(const Ray, float *, hipTextureObject_t);
__global__ void computeVisibilities_kernel(const float *, const float *, int, Bool *);
void lineOfSight_gold(const HeightField, const Ray, Bool *);
__device__ __host__ float2 getLocation(const Ray, int);
__device__ __host__ float getAngle(const Ray, float2, float);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
int res = runTest(argc, argv);
if (res != 1)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a line-of-sight test for CUDA
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
////////////////////////////////////////////////////////////////////////////
// Device initialization
printf("[%s] - Starting...\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
////////////////////////////////////////////////////////////////////////////
// Timer
// Create
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Number of iterations to get accurate timing
uint numIterations = 100;
////////////////////////////////////////////////////////////////////////////
// Height field
HeightField heightField;
// Allocate in host memory
int2 dim = make_int2(10000, 100);
heightField.width = dim.x;
thrust::host_vector<float> height(dim.x * dim.y);
heightField.height = (float *)&height[0];
//
// Fill in with an arbitrary sine surface
for (int x = 0; x < dim.x; ++x)
for (int y = 0; y < dim.y; ++y)
{
float amp = 0.1f * (x + y);
float period = 2.0f + amp;
*(heightField.height + dim.x * y + x) =
amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f);
}
// Allocate CUDA array in device memory
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipArray *heightFieldArray;
checkCudaErrors(hipMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y));
// Initialize device memory
checkCudaErrors(hipMemcpy2DToArray(heightFieldArray, 0, 0, heightField.height,
dim.x * sizeof(float), dim.x * sizeof(float),
dim.y, hipMemcpyHostToDevice));
hipTextureObject_t heightFieldTex;
hipResourceDesc texRes;
memset(&texRes,0,sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = heightFieldArray;
hipTextureDesc texDescr;
memset(&texDescr,0,sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModePoint;
texDescr.addressMode[0] = hipAddressModeClamp;
texDescr.addressMode[1] = hipAddressModeClamp;
texDescr.readMode = hipReadModeElementType;
checkCudaErrors(hipCreateTextureObject(&heightFieldTex, &texRes, &texDescr, NULL));
////////////////////////////////////////////////////////////////////////////
// Ray (starts at origin and traverses the height field diagonally)
Ray ray;
ray.origin = make_float3(0, 0, 2.0f);
int2 dir = make_int2(dim.x - 1, dim.y - 1);
ray.dir = make_float2((float)dir.x, (float)dir.y);
ray.length = max(abs(dir.x), abs(dir.y));
ray.oneOverLength = 1.0f / ray.length;
////////////////////////////////////////////////////////////////////////////
// View angles
// Allocate view angles for each point along the ray
thrust::device_vector<float> d_angles(ray.length);
// Allocate result of max-scan operation on the array of view angles
thrust::device_vector<float> d_scannedAngles(ray.length);
////////////////////////////////////////////////////////////////////////////
// Visibility results
// Allocate visibility results for each point along the ray
thrust::device_vector<Bool> d_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilitiesRef(ray.length);
////////////////////////////////////////////////////////////////////////////
// Reference solution
lineOfSight_gold(heightField, ray, (Bool *)&h_visibilitiesRef[0]);
////////////////////////////////////////////////////////////////////////////
// Device solution
// Execution configuration
dim3 block(256);
dim3 grid((uint)ceil(ray.length / (double)block.x));
// Compute device solution
printf("Line of sight\n");
sdkStartTimer(&timer);
for (uint i = 0; i < numIterations; ++i)
{
// Compute view angle for each point along the ray
hipLaunchKernelGGL(( computeAngles_kernel), dim3(grid), dim3(block), 0, 0, ray, thrust::raw_pointer_cast(&d_angles[0]), heightFieldTex);
getLastCudaError("Kernel execution failed");
// Perform a max-scan operation on the array of view angles
thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum<float>());
getLastCudaError("Kernel execution failed");
// Compute visibility results based on the array of view angles
// and its scanned version
hipLaunchKernelGGL(( computeVisibilities_kernel), dim3(grid), dim3(block), 0, 0, thrust::raw_pointer_cast(&d_angles[0]),
thrust::raw_pointer_cast(&d_scannedAngles[0]),
ray.length,
thrust::raw_pointer_cast(&d_visibilities[0]));
getLastCudaError("Kernel execution failed");
}
hipDeviceSynchronize();
sdkStopTimer(&timer);
getLastCudaError("Kernel execution failed");
// Copy visibility results back to the host
thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin());
// Compare device visibility results against reference results
bool res = compareData(thrust::raw_pointer_cast(&h_visibilitiesRef[0]),
thrust::raw_pointer_cast(&h_visibilities[0]), ray.length, 0.0f, 0.0f);
printf("Average time: %f ms\n\n", sdkGetTimerValue(&timer) / numIterations);
sdkResetTimer(&timer);
// Cleanup memory
checkCudaErrors(hipFreeArray(heightFieldArray));
return res;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute view angles for each point along the ray
//! @param ray ray
//! @param angles view angles
////////////////////////////////////////////////////////////////////////////////
__global__ void computeAngles_kernel(const Ray ray, float *angles, hipTextureObject_t HeightFieldTex)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < ray.length)
{
float2 location = getLocation(ray, i + 1);
float height = tex2D<float>(HeightFieldTex, location.x, location.y);
float angle = getAngle(ray, location, height);
angles[i] = angle;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute visibility for each point along the ray
//! @param angles view angles
//! @param scannedAngles max-scanned view angles
//! @param numAngles number of view angles
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
__global__ void computeVisibilities_kernel(const float *angles,
const float *scannedAngles,
int numAngles,
Bool *visibilities)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numAngles)
{
visibilities[i] = scannedAngles[i] <= angles[i];
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set
//! @param heightField height field
//! @param ray ray
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
void lineOfSight_gold(const HeightField heightField, const Ray ray,
Bool *visibilities)
{
float angleMax = asinf(-1.0f);
for (int i = 0; i < ray.length; ++i)
{
float2 location = getLocation(ray, i + 1);
float height = *(heightField.height
+ heightField.width * (int)floorf(location.y)
+ (int)floorf(location.x));
float angle = getAngle(ray, location, height);
if (angle > angleMax)
{
angleMax = angle;
visibilities[i] = True;
}
else
{
visibilities[i] = False;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the 2D coordinates of the point located at i steps from the origin
//! of the ray
//! @param ray ray
//! @param i integer offset along the ray
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float2 getLocation(const Ray ray, int i)
{
float step = i * ray.oneOverLength;
return make_float2(ray.origin.x, ray.origin.y) + ray.dir * step;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the angle of view between a 3D point and the origin of the ray
//! @param ray ray
//! @param location 2D coordinates of the input point
//! @param height height of the input point
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float getAngle(const Ray ray, float2 location, float height)
{
float2 dir = location - make_float2(ray.origin.x, ray.origin.y);
return atanf((height - ray.origin.z) / length(dir));
}
| a2b75c761745598fa22f0b8c5fe5f3bb86729d8a.cu | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// This sample is an implementation of a simple line-of-sight algorithm:
// Given a height map and a ray originating at some observation point,
// it computes all the points along the ray that are visible from the
// observation point.
// It is based on the description made in "Guy E. Blelloch. Vector models
// for data-parallel computing. MIT Press, 1990" and uses open source CUDA
// Thrust Library
#ifdef _WIN32
# define NOMINMAX
#endif
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
#include <helper_functions.h>
#include <helper_cuda.h>
#include <helper_math.h>
// includes, library
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
////////////////////////////////////////////////////////////////////////////////
// declaration, types
// Boolean
typedef unsigned char Bool;
enum
{
False = 0,
True = 1
};
// 2D height field
struct HeightField
{
int width;
float *height;
};
// Ray
struct Ray
{
float3 origin;
float2 dir;
int length;
float oneOverLength;
};
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
int runTest(int argc, char **argv);
__global__ void computeAngles_kernel(const Ray, float *, cudaTextureObject_t);
__global__ void computeVisibilities_kernel(const float *, const float *, int, Bool *);
void lineOfSight_gold(const HeightField, const Ray, Bool *);
__device__ __host__ float2 getLocation(const Ray, int);
__device__ __host__ float getAngle(const Ray, float2, float);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char **argv)
{
int res = runTest(argc, argv);
if (res != 1)
{
printf("Test failed!\n");
exit(EXIT_FAILURE);
}
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a line-of-sight test for CUDA
////////////////////////////////////////////////////////////////////////////////
int runTest(int argc, char **argv)
{
////////////////////////////////////////////////////////////////////////////
// Device initialization
printf("[%s] - Starting...\n", argv[0]);
// use command-line specified CUDA device, otherwise use device with highest Gflops/s
findCudaDevice(argc, (const char **)argv);
////////////////////////////////////////////////////////////////////////////
// Timer
// Create
StopWatchInterface *timer;
sdkCreateTimer(&timer);
// Number of iterations to get accurate timing
uint numIterations = 100;
////////////////////////////////////////////////////////////////////////////
// Height field
HeightField heightField;
// Allocate in host memory
int2 dim = make_int2(10000, 100);
heightField.width = dim.x;
thrust::host_vector<float> height(dim.x * dim.y);
heightField.height = (float *)&height[0];
//
// Fill in with an arbitrary sine surface
for (int x = 0; x < dim.x; ++x)
for (int y = 0; y < dim.y; ++y)
{
float amp = 0.1f * (x + y);
float period = 2.0f + amp;
*(heightField.height + dim.x * y + x) =
amp * (sinf(sqrtf((float)(x * x + y * y)) * 2.0f * 3.1416f / period) + 1.0f);
}
// Allocate CUDA array in device memory
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaArray *heightFieldArray;
checkCudaErrors(cudaMallocArray(&heightFieldArray, &channelDesc, dim.x, dim.y));
// Initialize device memory
checkCudaErrors(cudaMemcpy2DToArray(heightFieldArray, 0, 0, heightField.height,
dim.x * sizeof(float), dim.x * sizeof(float),
dim.y, cudaMemcpyHostToDevice));
cudaTextureObject_t heightFieldTex;
cudaResourceDesc texRes;
memset(&texRes,0,sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = heightFieldArray;
cudaTextureDesc texDescr;
memset(&texDescr,0,sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModePoint;
texDescr.addressMode[0] = cudaAddressModeClamp;
texDescr.addressMode[1] = cudaAddressModeClamp;
texDescr.readMode = cudaReadModeElementType;
checkCudaErrors(cudaCreateTextureObject(&heightFieldTex, &texRes, &texDescr, NULL));
////////////////////////////////////////////////////////////////////////////
// Ray (starts at origin and traverses the height field diagonally)
Ray ray;
ray.origin = make_float3(0, 0, 2.0f);
int2 dir = make_int2(dim.x - 1, dim.y - 1);
ray.dir = make_float2((float)dir.x, (float)dir.y);
ray.length = max(abs(dir.x), abs(dir.y));
ray.oneOverLength = 1.0f / ray.length;
////////////////////////////////////////////////////////////////////////////
// View angles
// Allocate view angles for each point along the ray
thrust::device_vector<float> d_angles(ray.length);
// Allocate result of max-scan operation on the array of view angles
thrust::device_vector<float> d_scannedAngles(ray.length);
////////////////////////////////////////////////////////////////////////////
// Visibility results
// Allocate visibility results for each point along the ray
thrust::device_vector<Bool> d_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilities(ray.length);
thrust::host_vector<Bool> h_visibilitiesRef(ray.length);
////////////////////////////////////////////////////////////////////////////
// Reference solution
lineOfSight_gold(heightField, ray, (Bool *)&h_visibilitiesRef[0]);
////////////////////////////////////////////////////////////////////////////
// Device solution
// Execution configuration
dim3 block(256);
dim3 grid((uint)ceil(ray.length / (double)block.x));
// Compute device solution
printf("Line of sight\n");
sdkStartTimer(&timer);
for (uint i = 0; i < numIterations; ++i)
{
// Compute view angle for each point along the ray
computeAngles_kernel<<<grid, block>>>(ray, thrust::raw_pointer_cast(&d_angles[0]), heightFieldTex);
getLastCudaError("Kernel execution failed");
// Perform a max-scan operation on the array of view angles
thrust::inclusive_scan(d_angles.begin(), d_angles.end(), d_scannedAngles.begin(), thrust::maximum<float>());
getLastCudaError("Kernel execution failed");
// Compute visibility results based on the array of view angles
// and its scanned version
computeVisibilities_kernel<<<grid, block>>>(thrust::raw_pointer_cast(&d_angles[0]),
thrust::raw_pointer_cast(&d_scannedAngles[0]),
ray.length,
thrust::raw_pointer_cast(&d_visibilities[0]));
getLastCudaError("Kernel execution failed");
}
cudaDeviceSynchronize();
sdkStopTimer(&timer);
getLastCudaError("Kernel execution failed");
// Copy visibility results back to the host
thrust::copy(d_visibilities.begin(), d_visibilities.end(), h_visibilities.begin());
// Compare device visibility results against reference results
bool res = compareData(thrust::raw_pointer_cast(&h_visibilitiesRef[0]),
thrust::raw_pointer_cast(&h_visibilities[0]), ray.length, 0.0f, 0.0f);
printf("Average time: %f ms\n\n", sdkGetTimerValue(&timer) / numIterations);
sdkResetTimer(&timer);
// Cleanup memory
checkCudaErrors(cudaFreeArray(heightFieldArray));
return res;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute view angles for each point along the ray
//! @param ray ray
//! @param angles view angles
////////////////////////////////////////////////////////////////////////////////
__global__ void computeAngles_kernel(const Ray ray, float *angles, cudaTextureObject_t HeightFieldTex)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < ray.length)
{
float2 location = getLocation(ray, i + 1);
float height = tex2D<float>(HeightFieldTex, location.x, location.y);
float angle = getAngle(ray, location, height);
angles[i] = angle;
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute visibility for each point along the ray
//! @param angles view angles
//! @param scannedAngles max-scanned view angles
//! @param numAngles number of view angles
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
__global__ void computeVisibilities_kernel(const float *angles,
const float *scannedAngles,
int numAngles,
Bool *visibilities)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numAngles)
{
visibilities[i] = scannedAngles[i] <= angles[i];
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute reference data set
//! @param heightField height field
//! @param ray ray
//! @param visibilities boolean array indicating the visibility of each point
//! along the ray
////////////////////////////////////////////////////////////////////////////////
void lineOfSight_gold(const HeightField heightField, const Ray ray,
Bool *visibilities)
{
float angleMax = asinf(-1.0f);
for (int i = 0; i < ray.length; ++i)
{
float2 location = getLocation(ray, i + 1);
float height = *(heightField.height
+ heightField.width * (int)floorf(location.y)
+ (int)floorf(location.x));
float angle = getAngle(ray, location, height);
if (angle > angleMax)
{
angleMax = angle;
visibilities[i] = True;
}
else
{
visibilities[i] = False;
}
}
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the 2D coordinates of the point located at i steps from the origin
//! of the ray
//! @param ray ray
//! @param i integer offset along the ray
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float2 getLocation(const Ray ray, int i)
{
float step = i * ray.oneOverLength;
return make_float2(ray.origin.x, ray.origin.y) + ray.dir * step;
}
////////////////////////////////////////////////////////////////////////////////
//! Compute the angle of view between a 3D point and the origin of the ray
//! @param ray ray
//! @param location 2D coordinates of the input point
//! @param height height of the input point
////////////////////////////////////////////////////////////////////////////////
__device__ __host__ float getAngle(const Ray ray, float2 location, float height)
{
float2 dir = location - make_float2(ray.origin.x, ray.origin.y);
return atanf((height - ray.origin.z) / length(dir));
}
|
7fd3815b838f312a423fcedc0a4da447f7b3c7cd.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <cassert>
//! Get the block id
__device__ int block_idx(int grid_dim) {
int block_id = blockIdx.x + (grid_dim == 2 ? 1 : 0) * blockIdx.y * gridDim.x +
(grid_dim == 3 ? 1 : 0) * blockIdx.z * gridDim.x * gridDim.y;
return block_id;
}
//! Get the global thread id
__device__ int thread_idx(int grid_dim, int block_dim) {
// thread id inside a block
unsigned long int threadIdInBlock =
threadIdx.x + (block_dim == 2 ? 1 : 0) * threadIdx.y * blockDim.x +
(block_dim == 3 ? 1 : 0) * threadIdx.z * blockDim.x * blockDim.z;
// block id
unsigned long int block_id = block_idx(grid_dim);
// block size
unsigned long int threadsPerblock = blockDim.x *
(block_dim == 2 ? blockDim.y : 1) *
(block_dim == 3 ? blockDim.z : 1);
unsigned long int thread_id = block_id * threadsPerblock + threadIdInBlock;
return thread_id;
}
//! Get the transposed matrix
__global__ void gpu_array_swap(int* ptr_gpu, int nbrows, int nbcols,
int grid_dim, int block_dim) {
int thread_id = thread_idx(grid_dim, block_dim);
// check if the array is correct.
printf("%d %d\n", ptr_gpu[thread_id], thread_id);
// pass the matrix to shared memory
extern __shared__ int sdata[];
sdata[thread_id] = ptr_gpu[thread_id];
__syncthreads();
int row = floorf((thread_id + 1) / nbcols);
int col = thread_id + 1 - row * nbcols;
int swap_thread_id = col * nbcols + row - 1;
ptr_gpu[thread_id] = sdata[swap_thread_id];
printf("%d %d\n", ptr_gpu[thread_id], thread_id);
}
void print_array(int** array, int nbrows, int nbcols) {
for (int i = 0; i < nbrows; ++i) {
for (int j = 0; j < nbcols; ++j) {
std::cout << array[i][j] << " ";
if (j == (nbcols - 1)) std::cout << std::endl;
}
}
}
int main() {
// declare a vector on the host
int **ptr_cpu = NULL, *ptr_gpu = NULL;
const int nbcols = 4, nbrows = 5;
int N = nbrows * nbcols;
int nbytes = N * sizeof(int);
ptr_cpu = new int*[nbrows];
// for (int i = 0; i < nbrows; i++) ptr_cpu[i] = new int[nbcols];
// !The memory should be contiguous on the host
ptr_cpu[0] = (int*)malloc(nbytes);
// ptr_cpu[0]=new int[N];
for (int i = 1; i < nbrows; ++i) ptr_cpu[i] = ptr_cpu[i - 1] + nbcols;
int k = 0;
for (int i = 0; i < nbrows; ++i) {
for (int j = 0; j < nbcols; ++j) ptr_cpu[i][j] = k++;
};
print_array(ptr_cpu, nbrows, nbcols);
// allocate gpu memory
hipMalloc(&ptr_gpu, nbytes);
// copy data to gpu
hipMemcpy(ptr_gpu, ptr_cpu[0], nbytes, hipMemcpyHostToDevice);
if (ptr_gpu == NULL) {
printf("Couldn't allocate GPU memory\n");
};
// 2d block: threads in x and y directions
dim3 bs = dim3(nbrows, nbcols);
// 1d grid: block in x direction
int grid = 1; // Only 1 block
// grid and block dimensions
int grid_dim = 1, block_dim = 2;
hipLaunchKernelGGL(( gpu_array_swap), dim3(grid), dim3(bs), nbytes, 0, ptr_gpu, nbrows, nbcols, grid_dim,
block_dim);
//! Copy data from device to host
hipDeviceSynchronize();
memset(ptr_cpu, 0, nbytes);
hipMemcpy(ptr_cpu[0], ptr_gpu, nbytes, hipMemcpyDeviceToHost);
if (hipMemcpy(ptr_cpu[0], ptr_gpu, nbytes, hipMemcpyDeviceToHost) !=
hipSuccess)
std::cout << "Fail to copy back to cpu!" << std::endl;
print_array(ptr_cpu, nbrows, nbcols);
hipFree(ptr_gpu);
free(ptr_cpu);
}
| 7fd3815b838f312a423fcedc0a4da447f7b3c7cd.cu | #include <iostream>
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <cuda.h>
#include <stdio.h>
#include <cassert>
//! Get the block id
__device__ int block_idx(int grid_dim) {
int block_id = blockIdx.x + (grid_dim == 2 ? 1 : 0) * blockIdx.y * gridDim.x +
(grid_dim == 3 ? 1 : 0) * blockIdx.z * gridDim.x * gridDim.y;
return block_id;
}
//! Get the global thread id
__device__ int thread_idx(int grid_dim, int block_dim) {
// thread id inside a block
unsigned long int threadIdInBlock =
threadIdx.x + (block_dim == 2 ? 1 : 0) * threadIdx.y * blockDim.x +
(block_dim == 3 ? 1 : 0) * threadIdx.z * blockDim.x * blockDim.z;
// block id
unsigned long int block_id = block_idx(grid_dim);
// block size
unsigned long int threadsPerblock = blockDim.x *
(block_dim == 2 ? blockDim.y : 1) *
(block_dim == 3 ? blockDim.z : 1);
unsigned long int thread_id = block_id * threadsPerblock + threadIdInBlock;
return thread_id;
}
//! Get the transposed matrix
__global__ void gpu_array_swap(int* ptr_gpu, int nbrows, int nbcols,
int grid_dim, int block_dim) {
int thread_id = thread_idx(grid_dim, block_dim);
// check if the array is correct.
printf("%d %d\n", ptr_gpu[thread_id], thread_id);
// pass the matrix to shared memory
extern __shared__ int sdata[];
sdata[thread_id] = ptr_gpu[thread_id];
__syncthreads();
int row = floorf((thread_id + 1) / nbcols);
int col = thread_id + 1 - row * nbcols;
int swap_thread_id = col * nbcols + row - 1;
ptr_gpu[thread_id] = sdata[swap_thread_id];
printf("%d %d\n", ptr_gpu[thread_id], thread_id);
}
void print_array(int** array, int nbrows, int nbcols) {
for (int i = 0; i < nbrows; ++i) {
for (int j = 0; j < nbcols; ++j) {
std::cout << array[i][j] << " ";
if (j == (nbcols - 1)) std::cout << std::endl;
}
}
}
int main() {
// declare a vector on the host
int **ptr_cpu = NULL, *ptr_gpu = NULL;
const int nbcols = 4, nbrows = 5;
int N = nbrows * nbcols;
int nbytes = N * sizeof(int);
ptr_cpu = new int*[nbrows];
// for (int i = 0; i < nbrows; i++) ptr_cpu[i] = new int[nbcols];
// !The memory should be contiguous on the host
ptr_cpu[0] = (int*)malloc(nbytes);
// ptr_cpu[0]=new int[N];
for (int i = 1; i < nbrows; ++i) ptr_cpu[i] = ptr_cpu[i - 1] + nbcols;
int k = 0;
for (int i = 0; i < nbrows; ++i) {
for (int j = 0; j < nbcols; ++j) ptr_cpu[i][j] = k++;
};
print_array(ptr_cpu, nbrows, nbcols);
// allocate gpu memory
cudaMalloc(&ptr_gpu, nbytes);
// copy data to gpu
cudaMemcpy(ptr_gpu, ptr_cpu[0], nbytes, cudaMemcpyHostToDevice);
if (ptr_gpu == NULL) {
printf("Couldn't allocate GPU memory\n");
};
// 2d block: threads in x and y directions
dim3 bs = dim3(nbrows, nbcols);
// 1d grid: block in x direction
int grid = 1; // Only 1 block
// grid and block dimensions
int grid_dim = 1, block_dim = 2;
gpu_array_swap<<<grid, bs, nbytes>>>(ptr_gpu, nbrows, nbcols, grid_dim,
block_dim);
//! Copy data from device to host
cudaDeviceSynchronize();
memset(ptr_cpu, 0, nbytes);
cudaMemcpy(ptr_cpu[0], ptr_gpu, nbytes, cudaMemcpyDeviceToHost);
if (cudaMemcpy(ptr_cpu[0], ptr_gpu, nbytes, cudaMemcpyDeviceToHost) !=
cudaSuccess)
std::cout << "Fail to copy back to cpu!" << std::endl;
print_array(ptr_cpu, nbrows, nbcols);
cudaFree(ptr_gpu);
free(ptr_cpu);
}
|
36c9334a3e9cdbe6ca82ade5732d54aa651e88a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "iter.cuh"
#include "mesh.h"
#include <mpi.h>
#include <getopt.h>
#include <cstring>
#include <fstream>
#include <cstring>
#include <cstdlib>
#include <iostream>
#include <cmath>
double phi(PointD p){
double x = p.first;
double y = p.second;
return sqrt(4 + x*y);
}
PointUi splitFunction(int N0, int N1, int p) {
double n0, n1;
int p0, i;
n0 = (double) N0; n1 = (double) N1;
p0 = 0;
for(i = 0; i < p; i++) {
if(n0 > n1) {
n0 = n0 / 2.0;
++p0;
} else {
n1 = n1 / 2.0;
}
}
return PointUi(p0, p-p0);
}
int getPowOfTwo(int val){
int pwr = 0;
while(val >>= 1) ++pwr;
return pwr;
}
struct Params {
long a;
long b;
long rows;
long cols;
std::string fname;
};
Params getParams(int argc, char** argv) {
int c;
char *opt_val = NULL;
Params result;
while ((c = getopt (argc, argv, "a:b:r:c:f:")) != -1) {
switch(c) {
case 'a':
opt_val = optarg;
result.a = strtol(opt_val, NULL, 10);
break;
case 'b':
opt_val = optarg;
result.b = strtol(opt_val, NULL, 10);
break;
case 'r':
opt_val = optarg;
result.rows = strtol(opt_val, NULL, 10);
break;
case 'c':
opt_val = optarg;
result.cols = strtol(opt_val, NULL, 10);
break;
case 'f':
opt_val = optarg;
result.fname = std::string(opt_val);
break;
}
}
return result;
}
const double EPSILON = 0.0001;
int main(int argc, char **argv) {
int size, rank;
Params pars = getParams(argc, argv);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
hipSetDevice(0);
long A = pars.a, B = pars.b;
int totalRows = pars.cols, totalCols = pars.rows;
long rowsShift, colsShift;
long rows, cols;
MPI_Status status;
double start;
int sizePower = getPowOfTwo(size);
PointUi ps = splitFunction(totalRows, totalCols, sizePower);
int procRows = 1 << ps.first, procCols = 1<< ps.second;
std::map<int, Mesh> splited;
if (rank == 0) {
start = MPI_Wtime();
MeshConfig globalConf = {PointUi(0,0), PointUi(A,B), 0, 0, totalRows, totalCols};
Mesh result(totalRows, totalCols, globalConf);
initMeshBoundaries(result, phi);
splited = splitMesh(result, sizePower);
for(std::map<int, Mesh>::iterator itr = splited.begin(); itr != splited.end(); ++itr) {
long r = itr->second.getRows();
long c = itr->second.getColumns();
long rShift = itr->second.getRowsShift();
long cShift = itr->second.getColumnsShift();
if(itr->first != rank) {
MPI_Send(&r, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
MPI_Send(&c, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
MPI_Send(&rShift, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
MPI_Send(&cShift, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
} else {
rows = r;
cols = c;
rowsShift = rShift;
colsShift = cShift;
}
}
} else {
MPI_Recv(&rows, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&cols, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&rowsShift, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&colsShift, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
Mesh curMesh;
if (rank == 0) {
for(std::map<int, Mesh>::iterator itr = splited.begin(); itr != splited.end(); ++itr) {
if(itr->first != rank) {
MPI_Send(itr->second.getData(), itr->second.getRows()*itr->second.getColumns(),
MPI_DOUBLE, itr->first, 0, MPI_COMM_WORLD);
}
}
curMesh = splited[0];
} else {
double *recdata = new double[rows*cols];
MPI_Recv(recdata, rows*cols, MPI_DOUBLE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MeshConfig conf = { PointUi(0,0), PointUi(A,B),rowsShift, colsShift, totalRows, totalCols };
curMesh = Mesh(rows, cols, recdata, conf);
}
int procCol = rank%procCols;
int procRow = (rank - procCol) / procCols;
long left = procCol - 1 >=0 ? procRow*procCols + procCol - 1 : -1;
long right = procCol + 1 < procCols ? procRow*procCols + procCol + 1 : -1;
long up = procRow - 1 >=0? (procRow - 1)*procCols + procCol : -1;
long down = procRow + 1 < procRows ? (procRow + 1)*procCols + procCol : -1;
MPI_Barrier(MPI_COMM_WORLD);
dim3 gridDim;
gridDim.x = (int)((curMesh.getRows() + 2) / BLOCK_SIZE_X + 1);
gridDim.y = (int)((curMesh.getColumns() + 2) / BLOCK_SIZE_Y + 1);
CudaIterator iter(gridDim, curMesh, rank, left, right, up, down, size);
double err = iter.iterate();
int iterCount = 1;
while(err > EPSILON) {
err = iter.iterate();
if (rank == 0) {
std::cout <<"Iteration: " << iterCount++ <<" Error: " << err <<"\n";
}
}
iter.getPMesh(curMesh);
if (rank != 0) {
MPI_Send(&rows, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(&cols, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(&rowsShift, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(&colsShift, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(curMesh.getData(), rows*cols, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
} else {
std::map<int, Mesh> submeshs;
submeshs[0] = curMesh;
std::vector<MPI_Request> requests;
for (int i = 1; i < size; ++i ) {
MPI_Recv(&rows, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&cols, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&rowsShift, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&colsShift, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
double *recIdata = new double[rows*cols];
MPI_Recv(recIdata, rows*cols, MPI_DOUBLE, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MeshConfig conf = { PointUi(0,0), PointUi(A,B), rowsShift, colsShift, totalRows, totalCols };
Mesh curMesh(rows, cols, recIdata, conf);
submeshs[i] = curMesh;
}
Mesh result = collectMesh(submeshs);
double elapsed = MPI_Wtime() - start;
std::ofstream ofs(pars.fname.c_str());
dropToStream(ofs, result);
ofs <<"stats:" <<iterCount <<'\t' <<totalRows << '\t' << totalCols << '\t' << elapsed;
}
MPI_Finalize();
return 0;
}
| 36c9334a3e9cdbe6ca82ade5732d54aa651e88a2.cu | #include "iter.cuh"
#include "mesh.h"
#include <mpi.h>
#include <getopt.h>
#include <cstring>
#include <fstream>
#include <cstring>
#include <cstdlib>
#include <iostream>
#include <cmath>
double phi(PointD p){
double x = p.first;
double y = p.second;
return sqrt(4 + x*y);
}
PointUi splitFunction(int N0, int N1, int p) {
double n0, n1;
int p0, i;
n0 = (double) N0; n1 = (double) N1;
p0 = 0;
for(i = 0; i < p; i++) {
if(n0 > n1) {
n0 = n0 / 2.0;
++p0;
} else {
n1 = n1 / 2.0;
}
}
return PointUi(p0, p-p0);
}
int getPowOfTwo(int val){
int pwr = 0;
while(val >>= 1) ++pwr;
return pwr;
}
struct Params {
long a;
long b;
long rows;
long cols;
std::string fname;
};
Params getParams(int argc, char** argv) {
int c;
char *opt_val = NULL;
Params result;
while ((c = getopt (argc, argv, "a:b:r:c:f:")) != -1) {
switch(c) {
case 'a':
opt_val = optarg;
result.a = strtol(opt_val, NULL, 10);
break;
case 'b':
opt_val = optarg;
result.b = strtol(opt_val, NULL, 10);
break;
case 'r':
opt_val = optarg;
result.rows = strtol(opt_val, NULL, 10);
break;
case 'c':
opt_val = optarg;
result.cols = strtol(opt_val, NULL, 10);
break;
case 'f':
opt_val = optarg;
result.fname = std::string(opt_val);
break;
}
}
return result;
}
const double EPSILON = 0.0001;
int main(int argc, char **argv) {
int size, rank;
Params pars = getParams(argc, argv);
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD,&size);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
cudaSetDevice(0);
long A = pars.a, B = pars.b;
int totalRows = pars.cols, totalCols = pars.rows;
long rowsShift, colsShift;
long rows, cols;
MPI_Status status;
double start;
int sizePower = getPowOfTwo(size);
PointUi ps = splitFunction(totalRows, totalCols, sizePower);
int procRows = 1 << ps.first, procCols = 1<< ps.second;
std::map<int, Mesh> splited;
if (rank == 0) {
start = MPI_Wtime();
MeshConfig globalConf = {PointUi(0,0), PointUi(A,B), 0, 0, totalRows, totalCols};
Mesh result(totalRows, totalCols, globalConf);
initMeshBoundaries(result, phi);
splited = splitMesh(result, sizePower);
for(std::map<int, Mesh>::iterator itr = splited.begin(); itr != splited.end(); ++itr) {
long r = itr->second.getRows();
long c = itr->second.getColumns();
long rShift = itr->second.getRowsShift();
long cShift = itr->second.getColumnsShift();
if(itr->first != rank) {
MPI_Send(&r, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
MPI_Send(&c, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
MPI_Send(&rShift, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
MPI_Send(&cShift, 1, MPI_LONG, itr->first, 0, MPI_COMM_WORLD);
} else {
rows = r;
cols = c;
rowsShift = rShift;
colsShift = cShift;
}
}
} else {
MPI_Recv(&rows, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&cols, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&rowsShift, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&colsShift, 1, MPI_LONG, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
Mesh curMesh;
if (rank == 0) {
for(std::map<int, Mesh>::iterator itr = splited.begin(); itr != splited.end(); ++itr) {
if(itr->first != rank) {
MPI_Send(itr->second.getData(), itr->second.getRows()*itr->second.getColumns(),
MPI_DOUBLE, itr->first, 0, MPI_COMM_WORLD);
}
}
curMesh = splited[0];
} else {
double *recdata = new double[rows*cols];
MPI_Recv(recdata, rows*cols, MPI_DOUBLE, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MeshConfig conf = { PointUi(0,0), PointUi(A,B),rowsShift, colsShift, totalRows, totalCols };
curMesh = Mesh(rows, cols, recdata, conf);
}
int procCol = rank%procCols;
int procRow = (rank - procCol) / procCols;
long left = procCol - 1 >=0 ? procRow*procCols + procCol - 1 : -1;
long right = procCol + 1 < procCols ? procRow*procCols + procCol + 1 : -1;
long up = procRow - 1 >=0? (procRow - 1)*procCols + procCol : -1;
long down = procRow + 1 < procRows ? (procRow + 1)*procCols + procCol : -1;
MPI_Barrier(MPI_COMM_WORLD);
dim3 gridDim;
gridDim.x = (int)((curMesh.getRows() + 2) / BLOCK_SIZE_X + 1);
gridDim.y = (int)((curMesh.getColumns() + 2) / BLOCK_SIZE_Y + 1);
CudaIterator iter(gridDim, curMesh, rank, left, right, up, down, size);
double err = iter.iterate();
int iterCount = 1;
while(err > EPSILON) {
err = iter.iterate();
if (rank == 0) {
std::cout <<"Iteration: " << iterCount++ <<" Error: " << err <<"\n";
}
}
iter.getPMesh(curMesh);
if (rank != 0) {
MPI_Send(&rows, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(&cols, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(&rowsShift, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(&colsShift, 1, MPI_LONG, 0, 0, MPI_COMM_WORLD);
MPI_Send(curMesh.getData(), rows*cols, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
} else {
std::map<int, Mesh> submeshs;
submeshs[0] = curMesh;
std::vector<MPI_Request> requests;
for (int i = 1; i < size; ++i ) {
MPI_Recv(&rows, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&cols, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&rowsShift, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MPI_Recv(&colsShift, 1, MPI_LONG, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
double *recIdata = new double[rows*cols];
MPI_Recv(recIdata, rows*cols, MPI_DOUBLE, i, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
MeshConfig conf = { PointUi(0,0), PointUi(A,B), rowsShift, colsShift, totalRows, totalCols };
Mesh curMesh(rows, cols, recIdata, conf);
submeshs[i] = curMesh;
}
Mesh result = collectMesh(submeshs);
double elapsed = MPI_Wtime() - start;
std::ofstream ofs(pars.fname.c_str());
dropToStream(ofs, result);
ofs <<"stats:" <<iterCount <<'\t' <<totalRows << '\t' << totalCols << '\t' << elapsed;
}
MPI_Finalize();
return 0;
}
|
94eb3f310aca102acd5afa792767e59ce72b09f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdio>
#include <cmath>
#include <chrono>
#include "common.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
//Funcion de imagen pixelBluePixelr
__global__ void blur_kernel(unsigned char* input, unsigned char* output, int width, int height, int step) {
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
// Matriz
//Valoresn en X y Y
if((xIndex < width) && (yIndex < height)) {
//Location of pixel in input and output
const int tid = yIndex * step + (3 * xIndex);
int pixelBlue = 0;
int pixelGreen = 0;
int pixelRed = 0;
int tm = 0;
// Pixeles vecinos
for(int filX=-2; filX<3; filX++) {
for(int filY=-2; filY<3; filY++) {
int tid = (yIndex+filY) * step + (3 * (xIndex+filX));
// Bordes
if((xIndex+filX)%width>1 && (yIndex+filY)%height>1) {
pixelBlue += input[tid];
pixelGreen += input[tid+1];
pixelRed += input[tid+2];
tm++;
}
}
}
// Promedio
output[tid] = static_cast<unsigned char>(pixelBlue/tm);
output[tid+1] = static_cast<unsigned char>(pixelGreen/tm);
output[tid+2] = static_cast<unsigned char>(pixelRed/tm);
}
}
void blur(const cv::Mat& input, cv::Mat& output)
{
cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl;
size_t colorBytes = input.step * input.rows;
size_t grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
// Allocate device memory
SAFE_CALL(hipMalloc(&d_input, colorBytes), "CUDA Malloc Failed");
SAFE_CALL(hipMalloc(&d_output, grayBytes), "CUDA Malloc Failed");
// Copy data from OpenCV input image to device memory
SAFE_CALL(hipMemcpy(d_input, input.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(hipMemcpy(d_output, output.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// Specify a reasonable block size
const dim3 block(16, 16);
// Calculate grid size to cover the whole image
const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y));
printf("blur_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y);
// Launch the color conversion kernel
hipLaunchKernelGGL(( blur_kernel) , dim3(grid), dim3(block) , 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step));
// Synchronize to check for any kernel launch errors
SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed");
// Copy back data from destination device meory to OpenCV output image
SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// Free the device memory
SAFE_CALL(hipFree(d_input), "CUDA Free Failed");
SAFE_CALL(hipFree(d_output), "CUDA Free Failed");
}
int main(int argc, char *argv[]) {
// Lectura de la imagen
string imagePath;
if (argc < 2)
imagePath = "imagenMedia.jpg";
else
imagePath = argv[1];
//Lee la imagen desde dentro
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
//Crear una salida de la imagen
cv::Mat output(input.rows, input.cols, CV_8UC3);
//Llamada de la funcion
auto start_cpu = std::chrono::high_resolution_clock::now();
blur(input, output);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Timepo en compilar
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("blur elapsed %f ms\n", duration_ms.count());
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output", cv::WINDOW_NORMAL);
//Show the input and output
imshow("Input", input);
imshow("Output", output);
//Wait for key press
cv::waitKey();
return 0;
}
| 94eb3f310aca102acd5afa792767e59ce72b09f4.cu | #include <iostream>
#include <cstdio>
#include <cmath>
#include <chrono>
#include "common.h"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace std;
//Funcion de imagen pixelBluePixelr
__global__ void blur_kernel(unsigned char* input, unsigned char* output, int width, int height, int step) {
//2D Index of current thread
const int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
// Matriz
//Valoresn en X y Y
if((xIndex < width) && (yIndex < height)) {
//Location of pixel in input and output
const int tid = yIndex * step + (3 * xIndex);
int pixelBlue = 0;
int pixelGreen = 0;
int pixelRed = 0;
int tm = 0;
// Pixeles vecinos
for(int filX=-2; filX<3; filX++) {
for(int filY=-2; filY<3; filY++) {
int tid = (yIndex+filY) * step + (3 * (xIndex+filX));
// Bordes
if((xIndex+filX)%width>1 && (yIndex+filY)%height>1) {
pixelBlue += input[tid];
pixelGreen += input[tid+1];
pixelRed += input[tid+2];
tm++;
}
}
}
// Promedio
output[tid] = static_cast<unsigned char>(pixelBlue/tm);
output[tid+1] = static_cast<unsigned char>(pixelGreen/tm);
output[tid+2] = static_cast<unsigned char>(pixelRed/tm);
}
}
void blur(const cv::Mat& input, cv::Mat& output)
{
cout << "Input image step: " << input.step << " rows: " << input.rows << " cols: " << input.cols << endl;
size_t colorBytes = input.step * input.rows;
size_t grayBytes = output.step * output.rows;
unsigned char *d_input, *d_output;
// Allocate device memory
SAFE_CALL(cudaMalloc(&d_input, colorBytes), "CUDA Malloc Failed");
SAFE_CALL(cudaMalloc(&d_output, grayBytes), "CUDA Malloc Failed");
// Copy data from OpenCV input image to device memory
SAFE_CALL(cudaMemcpy(d_input, input.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
SAFE_CALL(cudaMemcpy(d_output, output.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed");
// Specify a reasonable block size
const dim3 block(16, 16);
// Calculate grid size to cover the whole image
const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y));
printf("blur_kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y);
// Launch the color conversion kernel
blur_kernel <<<grid, block >>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step));
// Synchronize to check for any kernel launch errors
SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed");
// Copy back data from destination device meory to OpenCV output image
SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed");
// Free the device memory
SAFE_CALL(cudaFree(d_input), "CUDA Free Failed");
SAFE_CALL(cudaFree(d_output), "CUDA Free Failed");
}
int main(int argc, char *argv[]) {
// Lectura de la imagen
string imagePath;
if (argc < 2)
imagePath = "imagenMedia.jpg";
else
imagePath = argv[1];
//Lee la imagen desde dentro
cv::Mat input = cv::imread(imagePath, CV_LOAD_IMAGE_COLOR);
//Crear una salida de la imagen
cv::Mat output(input.rows, input.cols, CV_8UC3);
//Llamada de la funcion
auto start_cpu = std::chrono::high_resolution_clock::now();
blur(input, output);
auto end_cpu = std::chrono::high_resolution_clock::now();
// Timepo en compilar
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("blur elapsed %f ms\n", duration_ms.count());
//Allow the windows to resize
namedWindow("Input", cv::WINDOW_NORMAL);
namedWindow("Output", cv::WINDOW_NORMAL);
//Show the input and output
imshow("Input", input);
imshow("Output", output);
//Wait for key press
cv::waitKey();
return 0;
}
|
37ea4e13e6a35c76ee982fa8d7a019645d96ad51.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <cstdlib>
#include <cstdio>
#include <vector>
#include <utility>
#include <cmath>
using namespace std;
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define CUDA_CALL(func)\
{\
hipError_t e = (func);\
if(e != hipSuccess)\ \
cout << "CUDA: " << hipGetErrorString(e) << endl; \
}
#define CURAND_CALL(func) \
{ \
hiprandStatus_t e = (func); \
if(e != HIPRAND_STATUS_SUCCESS) \
cout << "CURAND: " << e << endl; \
}
#define NUM_THREADS 1024
hipStream_t stream;
float * copyFloatToGPU(const vector<float> & x) {
float * ret;
CUDA_CALL(hipMalloc((void**)&ret, sizeof(float) * x.size()));
CUDA_CALL(hipMemcpyAsync(ret, x.data(), sizeof(float) * x.size(), hipMemcpyHostToDevice, stream));
return ret;
}
int * copyIntToGPU(const vector<int> & x) {
int * ret;
CUDA_CALL(hipMalloc((void**)&ret, sizeof(int) * x.size()));
CUDA_CALL(hipMemcpyAsync(ret, x.data(), sizeof(int) * x.size(), hipMemcpyHostToDevice, stream));
return ret;
}
int total_count(const vector< vector< pair<int, float> > > & data) {
int ret = 0;
for(int i = 0; i < data.size(); i++) ret += data[i].size();
return ret;
}
struct CooMatrix {
float *val, *err, *label, *act;
int *row_ind, *col_ind;
int nnz, nrow;
int max_length;
};
__global__ void dot(float * val, int *row_ind, int *col_ind, int nnz, float * ret, float * w) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
atomicAdd(&ret[r], v * w[c]);
}
}
__global__ void vec_sigmoid(float * d, int num) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < num) {
if(d[tid] > 10.0) d[tid] = 1.0;
else if(d[tid] < -10.0) d[tid] = 0.0;
else d[tid] = 1.0 / (1.0 + exp(-1.0 * d[tid]));
}
}
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err,
int nnz, float *act, float *label,
float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
}
CooMatrix zeroCooMatrix(int batch_size, int max_length) {
CooMatrix mat;
mat.max_length = max_length;
CUDA_CALL(hipMalloc((void**)&mat.val, max_length * sizeof(float)));
CUDA_CALL(hipMalloc((void**)&mat.act, batch_size * sizeof(float)));
CUDA_CALL(hipMalloc((void**)&mat.label, batch_size * sizeof(float)));
CUDA_CALL(hipMalloc((void**)&mat.err, max_length * sizeof(float)));
CUDA_CALL(hipMalloc((void**)&mat.row_ind, max_length * sizeof(int)));
CUDA_CALL(hipMalloc((void**)&mat.col_ind, max_length * sizeof(int)));
return mat;
}
void freeCooMatrix(CooMatrix * mat) {
CUDA_CALL(hipFree(mat->val));
CUDA_CALL(hipFree(mat->act));
CUDA_CALL(hipFree(mat->label));
CUDA_CALL(hipFree(mat->err));
CUDA_CALL(hipFree(mat->row_ind));
CUDA_CALL(hipFree(mat->col_ind));
}
struct CooMatrixHost {
float * val;
int *row_ind;
int *col_ind;
int max_length;
int nnz;
};
CooMatrixHost zeroCooMatrixHost(int batch_size, int max_length) {
CooMatrixHost mat;
mat.max_length = max_length;
CUDA_CALL(hipHostMalloc((void**)&mat.val, sizeof(float)*max_length));
CUDA_CALL(hipHostMalloc((void**)&mat.row_ind, sizeof(int)*max_length));
CUDA_CALL(hipHostMalloc((void**)&mat.col_ind, sizeof(int)*max_length));
return mat;
}
void freeCooMatrixHost(CooMatrixHost * mat){
CUDA_CALL(hipHostFree(mat->val));
CUDA_CALL(hipHostFree(mat->row_ind));
CUDA_CALL(hipHostFree(mat->col_ind));
}
void vec2coo(const vector< vector< pair<int, float> > > & data, CooMatrixHost * mat_host, CooMatrix * mat) {
int nnz = total_count(data);
if(nnz > mat->max_length) cout << nnz << "\t" << mat->max_length << endl;
mat->nnz = nnz;
mat->nrow = data.size();
CUDA_CALL(hipMemset(mat->err, 0, mat->max_length * sizeof(float)));
int n = 0;
for(int i = 0; i < data.size(); i++){
for(vector< pair<int, float> >::const_iterator j = data[i].begin();
j != data[i].end(); j++) {
mat_host->val[n] = j->second;
mat_host->row_ind[n] = i;
mat_host->col_ind[n] = j->first;
++n;
}
}
CUDA_CALL(hipMemcpyAsync(mat->val, mat_host->val, nnz*sizeof(float),
hipMemcpyHostToDevice, stream));
CUDA_CALL(hipMemcpyAsync(mat->row_ind, mat_host->row_ind, nnz*sizeof(int),
hipMemcpyHostToDevice, stream));
CUDA_CALL(hipMemcpyAsync(mat->col_ind, mat_host->col_ind, nnz*sizeof(int),
hipMemcpyHostToDevice, stream));
}
void lr(const vector< vector< pair<int, float> > > & data,
const vector<float> & label,
CooMatrixHost * coo_mat_host,
CooMatrix * coo_mat,
float * w, int ncol, int batch) {
vec2coo(data, coo_mat_host, coo_mat);
CUDA_CALL(hipMemcpyAsync(coo_mat->label, label.data(), sizeof(float) * label.size(), hipMemcpyHostToDevice, stream));
CUDA_CALL(hipMemset(coo_mat->act, 0, sizeof(float) * data.size()));
int shared_memory_usage = 1;
int num_blocks = ((coo_mat->nnz + (NUM_THREADS - 1)) / NUM_THREADS);
hipLaunchKernelGGL(( dot), dim3(num_blocks), dim3(NUM_THREADS), shared_memory_usage, stream, coo_mat->val,
coo_mat->row_ind,
coo_mat->col_ind,
coo_mat->nnz,
coo_mat->act, w);
num_blocks = ((data.size() + (NUM_THREADS - 1)) / NUM_THREADS);
hipLaunchKernelGGL(( vec_sigmoid), dim3(num_blocks), dim3(NUM_THREADS), shared_memory_usage, stream, coo_mat->act, data.size());
num_blocks = ((coo_mat->nnz + (NUM_THREADS - 1)) / NUM_THREADS);
hipLaunchKernelGGL(( grad), dim3(num_blocks), dim3(NUM_THREADS), shared_memory_usage, stream, coo_mat->val,
coo_mat->row_ind,
coo_mat->col_ind,
coo_mat->err,
coo_mat->nnz,
coo_mat->act,
coo_mat->label,
w, 0.01);
if (batch % 10000 == 0){
float * err = (float*) malloc(sizeof(float) * coo_mat->nnz);
CUDA_CALL(hipMemcpyAsync(err, coo_mat->err, sizeof(float) * coo_mat->nnz, hipMemcpyDeviceToHost, stream));
float total = 0.;
for(int i = 0; i < coo_mat->nnz; i++) total += err[i];
cout << total / (float) coo_mat->nnz << endl;
}
}
void mock_sample(const int max_feature_id, vector< pair<int, float> > & out, int * label) {
int count = rand() % 100 + 10;
int ret = 0;
for(int i = 0; i < count; i++) {
int fid = rand() % max_feature_id;
if(fid % 2 == 0) ret += 1;
else ret -= 1;
if(abs(ret) > 10) break;
out.push_back(make_pair<int, float>(fid, 1.0));
}
*label = (ret > 0) ? 1 : 0;
}
#define MODEL_SIZE 1000000
__global__ void fill(float * w, float val, int size) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < size) w[tid] = val;
}
int main(int argc, char ** argv) {
srand(time(NULL));
CUDA_CALL(hipSetDevice(1));
CUDA_CALL(hipStreamCreateWithFlags(&stream,hipStreamNonBlocking));
float * w;
CUDA_CALL(hipMalloc((void**)&w, sizeof(float) * MODEL_SIZE));
CUDA_CALL(hipMemset(w, 0, sizeof(float) * MODEL_SIZE));
const int shared_memory_usage = 0;
const int num_blocks = ((MODEL_SIZE + (NUM_THREADS - 1)) / NUM_THREADS);
hipLaunchKernelGGL(( fill), dim3(num_blocks),
dim3(NUM_THREADS),
shared_memory_usage,
stream, w, 1, MODEL_SIZE);
hiprandGenerator_t rand_gen;
const hiprandRngType_t gen_type = HIPRAND_RNG_PSEUDO_DEFAULT;
CURAND_CALL(hiprandCreateGenerator(&rand_gen, gen_type));
CURAND_CALL(hiprandSetStream(rand_gen, stream));
CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(rand_gen, time(NULL)));
CURAND_CALL(hiprandGenerateNormal(rand_gen, w, MODEL_SIZE, 0, 0.1));
int batch_size = atoi(argv[1]);
int total_batch = 1024 * 1024 / batch_size;
CooMatrix mat = zeroCooMatrix(batch_size, batch_size * 256);
CooMatrixHost mat_host = zeroCooMatrixHost(batch_size, batch_size * 256);
for(int batch = 0; batch < total_batch; batch++){
vector< vector< pair<int, float> > > samples;
vector<float> labels;
for(int i = 0; i < batch_size; i++){
vector< pair<int, float> > sample;
int label;
mock_sample(MODEL_SIZE, sample, &label);
samples.push_back(sample);
labels.push_back((float)label);
}
lr(samples, labels, &mat_host, &mat, w, MODEL_SIZE, batch);
}
CUDA_CALL(hipStreamDestroy(stream));
freeCooMatrix(&mat);
freeCooMatrixHost(&mat_host);
}
| 37ea4e13e6a35c76ee982fa8d7a019645d96ad51.cu | #include <iostream>
#include <cstdlib>
#include <cstdio>
#include <vector>
#include <utility>
#include <cmath>
using namespace std;
#include <cuda_runtime.h>
#include <curand_kernel.h>
#define CUDA_CALL(func)\
{\
cudaError_t e = (func);\
if(e != cudaSuccess)\ \
cout << "CUDA: " << cudaGetErrorString(e) << endl; \
}
#define CURAND_CALL(func) \
{ \
curandStatus_t e = (func); \
if(e != CURAND_STATUS_SUCCESS) \
cout << "CURAND: " << e << endl; \
}
#define NUM_THREADS 1024
cudaStream_t stream;
float * copyFloatToGPU(const vector<float> & x) {
float * ret;
CUDA_CALL(cudaMalloc((void**)&ret, sizeof(float) * x.size()));
CUDA_CALL(cudaMemcpyAsync(ret, x.data(), sizeof(float) * x.size(), cudaMemcpyHostToDevice, stream));
return ret;
}
int * copyIntToGPU(const vector<int> & x) {
int * ret;
CUDA_CALL(cudaMalloc((void**)&ret, sizeof(int) * x.size()));
CUDA_CALL(cudaMemcpyAsync(ret, x.data(), sizeof(int) * x.size(), cudaMemcpyHostToDevice, stream));
return ret;
}
int total_count(const vector< vector< pair<int, float> > > & data) {
int ret = 0;
for(int i = 0; i < data.size(); i++) ret += data[i].size();
return ret;
}
struct CooMatrix {
float *val, *err, *label, *act;
int *row_ind, *col_ind;
int nnz, nrow;
int max_length;
};
__global__ void dot(float * val, int *row_ind, int *col_ind, int nnz, float * ret, float * w) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
atomicAdd(&ret[r], v * w[c]);
}
}
__global__ void vec_sigmoid(float * d, int num) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < num) {
if(d[tid] > 10.0) d[tid] = 1.0;
else if(d[tid] < -10.0) d[tid] = 0.0;
else d[tid] = 1.0 / (1.0 + exp(-1.0 * d[tid]));
}
}
__global__ void grad(float * val, int * row_ind, int *col_ind, float * mat_err,
int nnz, float *act, float *label,
float *w, float learning_rate) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < nnz) {
int r = row_ind[tid];
int c = col_ind[tid];
float v = val[tid];
mat_err[tid] = abs(label[r] - act[r]);
float err = v * (label[r] - act[r]);
atomicAdd(&w[c], learning_rate * err);
}
}
CooMatrix zeroCooMatrix(int batch_size, int max_length) {
CooMatrix mat;
mat.max_length = max_length;
CUDA_CALL(cudaMalloc((void**)&mat.val, max_length * sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&mat.act, batch_size * sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&mat.label, batch_size * sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&mat.err, max_length * sizeof(float)));
CUDA_CALL(cudaMalloc((void**)&mat.row_ind, max_length * sizeof(int)));
CUDA_CALL(cudaMalloc((void**)&mat.col_ind, max_length * sizeof(int)));
return mat;
}
void freeCooMatrix(CooMatrix * mat) {
CUDA_CALL(cudaFree(mat->val));
CUDA_CALL(cudaFree(mat->act));
CUDA_CALL(cudaFree(mat->label));
CUDA_CALL(cudaFree(mat->err));
CUDA_CALL(cudaFree(mat->row_ind));
CUDA_CALL(cudaFree(mat->col_ind));
}
struct CooMatrixHost {
float * val;
int *row_ind;
int *col_ind;
int max_length;
int nnz;
};
CooMatrixHost zeroCooMatrixHost(int batch_size, int max_length) {
CooMatrixHost mat;
mat.max_length = max_length;
CUDA_CALL(cudaMallocHost((void**)&mat.val, sizeof(float)*max_length));
CUDA_CALL(cudaMallocHost((void**)&mat.row_ind, sizeof(int)*max_length));
CUDA_CALL(cudaMallocHost((void**)&mat.col_ind, sizeof(int)*max_length));
return mat;
}
void freeCooMatrixHost(CooMatrixHost * mat){
CUDA_CALL(cudaFreeHost(mat->val));
CUDA_CALL(cudaFreeHost(mat->row_ind));
CUDA_CALL(cudaFreeHost(mat->col_ind));
}
void vec2coo(const vector< vector< pair<int, float> > > & data, CooMatrixHost * mat_host, CooMatrix * mat) {
int nnz = total_count(data);
if(nnz > mat->max_length) cout << nnz << "\t" << mat->max_length << endl;
mat->nnz = nnz;
mat->nrow = data.size();
CUDA_CALL(cudaMemset(mat->err, 0, mat->max_length * sizeof(float)));
int n = 0;
for(int i = 0; i < data.size(); i++){
for(vector< pair<int, float> >::const_iterator j = data[i].begin();
j != data[i].end(); j++) {
mat_host->val[n] = j->second;
mat_host->row_ind[n] = i;
mat_host->col_ind[n] = j->first;
++n;
}
}
CUDA_CALL(cudaMemcpyAsync(mat->val, mat_host->val, nnz*sizeof(float),
cudaMemcpyHostToDevice, stream));
CUDA_CALL(cudaMemcpyAsync(mat->row_ind, mat_host->row_ind, nnz*sizeof(int),
cudaMemcpyHostToDevice, stream));
CUDA_CALL(cudaMemcpyAsync(mat->col_ind, mat_host->col_ind, nnz*sizeof(int),
cudaMemcpyHostToDevice, stream));
}
void lr(const vector< vector< pair<int, float> > > & data,
const vector<float> & label,
CooMatrixHost * coo_mat_host,
CooMatrix * coo_mat,
float * w, int ncol, int batch) {
vec2coo(data, coo_mat_host, coo_mat);
CUDA_CALL(cudaMemcpyAsync(coo_mat->label, label.data(), sizeof(float) * label.size(), cudaMemcpyHostToDevice, stream));
CUDA_CALL(cudaMemset(coo_mat->act, 0, sizeof(float) * data.size()));
int shared_memory_usage = 1;
int num_blocks = ((coo_mat->nnz + (NUM_THREADS - 1)) / NUM_THREADS);
dot<<<num_blocks, NUM_THREADS, shared_memory_usage, stream>>>(coo_mat->val,
coo_mat->row_ind,
coo_mat->col_ind,
coo_mat->nnz,
coo_mat->act, w);
num_blocks = ((data.size() + (NUM_THREADS - 1)) / NUM_THREADS);
vec_sigmoid<<<num_blocks, NUM_THREADS, shared_memory_usage, stream>>>(coo_mat->act, data.size());
num_blocks = ((coo_mat->nnz + (NUM_THREADS - 1)) / NUM_THREADS);
grad<<<num_blocks, NUM_THREADS, shared_memory_usage, stream>>>(coo_mat->val,
coo_mat->row_ind,
coo_mat->col_ind,
coo_mat->err,
coo_mat->nnz,
coo_mat->act,
coo_mat->label,
w, 0.01);
if (batch % 10000 == 0){
float * err = (float*) malloc(sizeof(float) * coo_mat->nnz);
CUDA_CALL(cudaMemcpyAsync(err, coo_mat->err, sizeof(float) * coo_mat->nnz, cudaMemcpyDeviceToHost, stream));
float total = 0.;
for(int i = 0; i < coo_mat->nnz; i++) total += err[i];
cout << total / (float) coo_mat->nnz << endl;
}
}
void mock_sample(const int max_feature_id, vector< pair<int, float> > & out, int * label) {
int count = rand() % 100 + 10;
int ret = 0;
for(int i = 0; i < count; i++) {
int fid = rand() % max_feature_id;
if(fid % 2 == 0) ret += 1;
else ret -= 1;
if(abs(ret) > 10) break;
out.push_back(make_pair<int, float>(fid, 1.0));
}
*label = (ret > 0) ? 1 : 0;
}
#define MODEL_SIZE 1000000
__global__ void fill(float * w, float val, int size) {
const int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < size) w[tid] = val;
}
int main(int argc, char ** argv) {
srand(time(NULL));
CUDA_CALL(cudaSetDevice(1));
CUDA_CALL(cudaStreamCreateWithFlags(&stream,cudaStreamNonBlocking));
float * w;
CUDA_CALL(cudaMalloc((void**)&w, sizeof(float) * MODEL_SIZE));
CUDA_CALL(cudaMemset(w, 0, sizeof(float) * MODEL_SIZE));
const int shared_memory_usage = 0;
const int num_blocks = ((MODEL_SIZE + (NUM_THREADS - 1)) / NUM_THREADS);
fill<<<num_blocks,
NUM_THREADS,
shared_memory_usage,
stream>>>(w, 1, MODEL_SIZE);
curandGenerator_t rand_gen;
const curandRngType_t gen_type = CURAND_RNG_PSEUDO_DEFAULT;
CURAND_CALL(curandCreateGenerator(&rand_gen, gen_type));
CURAND_CALL(curandSetStream(rand_gen, stream));
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(rand_gen, time(NULL)));
CURAND_CALL(curandGenerateNormal(rand_gen, w, MODEL_SIZE, 0, 0.1));
int batch_size = atoi(argv[1]);
int total_batch = 1024 * 1024 / batch_size;
CooMatrix mat = zeroCooMatrix(batch_size, batch_size * 256);
CooMatrixHost mat_host = zeroCooMatrixHost(batch_size, batch_size * 256);
for(int batch = 0; batch < total_batch; batch++){
vector< vector< pair<int, float> > > samples;
vector<float> labels;
for(int i = 0; i < batch_size; i++){
vector< pair<int, float> > sample;
int label;
mock_sample(MODEL_SIZE, sample, &label);
samples.push_back(sample);
labels.push_back((float)label);
}
lr(samples, labels, &mat_host, &mat, w, MODEL_SIZE, batch);
}
CUDA_CALL(cudaStreamDestroy(stream));
freeCooMatrix(&mat);
freeCooMatrixHost(&mat_host);
}
|
c9ed91a087ca3fbc07e39ba81e71486fb3b50e33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ReflectionWithBuildingCalculationOnGPU.cuh"
__device__ void RWBC_UitizeVectorOnGPU(float *a,float *b,float *c)//
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
__device__ Point RWBC_GetReflectedVectorOnGPU(Point d,Point n)//
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
__device__ Ray RWBC_CalculateReflectedRayOnGPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)
{
Ray reflectedRay;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*c3-a3*c2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=incidentRay.originalPoint.x+incidentRay.direction.x*t;
reflectedRay.originalPoint.y=incidentRay.originalPoint.y+incidentRay.direction.y*t;
reflectedRay.originalPoint.z=incidentRay.originalPoint.z+incidentRay.direction.z*t;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
RWBC_UitizeVectorOnGPU(&n.x,&n.y,&n.z);
reflectedRay.direction=RWBC_GetReflectedVectorOnGPU(incidentRay.direction,n);
if ((u>0)&&(u<1)&&((u+v)>0)&&((u+v)<1))
{
*flag=1;
*reflectedFace=i;
}
}
}
}
return reflectedRay;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (faceNumber1==faceNumber2)
{
return true;
}
if (a%2==0)
{
if (b==a+1)
{
return true;
}
}
if (a%2==1)
{
if (b==a-1)
{
return true;
}
}
return false;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (a%2==0&&a!=0)
{
if (b==a-1)
{
return true;
}
}
if (a%2==1&&a!=7)
{
if (b==a+1)
{
return true;
}
}
if ((a==0&&b==7)||(a==7&&b==0))
{
return true;
}
return false;
}
__device__ void RWBC_GetThreeRemainingNumbersOnGPU(int number,int *remainingNumber1,int *remainingNumber2,int *remainingNumber3)
{
if (number==0)
{
*remainingNumber1=1;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==1)
{
*remainingNumber1=0;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==2)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=3;
}
if (number==3)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=2;
}
}
__device__ void RWBC_GetOneRemainingNumberOnGPU(int number1,int number2,int number3,int *remainingNumber)
{
*remainingNumber=6-number1-number2-number3;
}
__device__ void GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(int flagNumber1,int flagNumber2,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist,int i)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
}
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
int remainingNumber1,remainingNumber2,remainingNumber3;
RWBC_GetThreeRemainingNumbersOnGPU(flagNumber1,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i].ray1=reflectedRays[flagNumber1];
reflectedRayTubes[2*i].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i].ray4=reflectedRays[remainingNumber2];
RWBC_GetThreeRemainingNumbersOnGPU(flagNumber2,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i+1].ray1=reflectedRays[flagNumber2];
reflectedRayTubes[2*i+1].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[remainingNumber3];
}
}
__device__ void GetReflectedRayTubesWhenThreeFlagsEqualOneOnGPU(int flagNumber1,int flagNumber2,int flagNumber3,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,QuadrangleRayTube *incidentRayTubes,Face *face,int *reflectionExist,int i)
{
//int remainingNumber;
//RWBC_GetOneRemainingNumberOnGPU(flagNumber1,flagNumber2,flagNumber3,&remainingNumber);
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(flagNumber1,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber2]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(flagNumber2,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber2],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(flagNumber1,flagNumber2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
__device__ void RWBC_AddPathNodeToReflectedRayTubeOnGPU(QuadrangleRayTube *incidentRayTubes,QuadrangleRayTube *reflectedRayTubes,int i,Face face1,Face face2)
{
if(incidentRayTubes[i].path.nodeLevel=0)
{
reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].path.node1.point1=face1.A;
reflectedRayTubes[2*i].path.node1.point2=face1.B;
reflectedRayTubes[2*i].path.node1.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].path.node1.point1=face2.A;
reflectedRayTubes[2*i+1].path.node1.point2=face2.B;
reflectedRayTubes[2*i+1].path.node1.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=1)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.nodeLevel=2;
reflectedRayTubes[2*i].path.node2.point1=face1.A;
reflectedRayTubes[2*i].path.node2.point2=face1.B;
reflectedRayTubes[2*i].path.node2.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=2;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2.point1=face2.A;
reflectedRayTubes[2*i+1].path.node2.point2=face2.B;
reflectedRayTubes[2*i+1].path.node2.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=2)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i].path.nodeLevel=3;
reflectedRayTubes[2*i].path.node3.point1=face1.A;
reflectedRayTubes[2*i].path.node3.point2=face1.B;
reflectedRayTubes[2*i].path.node3.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=3;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i+1].path.node3.point1=face2.A;
reflectedRayTubes[2*i+1].path.node3.point2=face2.B;
reflectedRayTubes[2*i+1].path.node3.point3=face2.C;
}
}
__global__ void CalculateReflectionWithBuildingFaceOnGPU(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist)
{
int i=blockIdx.x * blockDim.x+threadIdx.x;
int reflectedFace[4];
int flag[4]={0,0,0,0};
Ray reflectedRays[4],incidentRays[4];
incidentRays[0]=incidentRayTubes[i].ray1;
incidentRays[1]=incidentRayTubes[i].ray2;
incidentRays[2]=incidentRayTubes[i].ray3;
incidentRays[3]=incidentRayTubes[i].ray4;
reflectedRays[0]=RWBC_CalculateReflectedRayOnGPU(incidentRays[0],face,faceCount,&reflectedFace[0],&flag[0]);
reflectedRays[1]=RWBC_CalculateReflectedRayOnGPU(incidentRays[1],face,faceCount,&reflectedFace[1],&flag[1]);
reflectedRays[2]=RWBC_CalculateReflectedRayOnGPU(incidentRays[2],face,faceCount,&reflectedFace[2],&flag[2]);
reflectedRays[3]=RWBC_CalculateReflectedRayOnGPU(incidentRays[3],face,faceCount,&reflectedFace[3],&flag[3]);
if (flag[0]+flag[1]+flag[2]+flag[3]==0)
{
reflectionExist[i]=0;
}
if (flag[0]+flag[1]+flag[2]+flag[3]==1)
{
reflectionExist[i]=1;
//reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
//reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[j]],face[reflectedFace[j]]);
}
}
}
if (flag[0]+flag[1]+flag[2]+flag[3]==2)
{
int m,n;
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
m=j;
for (int t=j+1;t<4;t++)
{
if(flag[t]==1)
{
n=t;
}
}
}
}
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(m,n,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[m]],face[reflectedFace[n]]);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==3)
{
int j=0;
int remainingNumber1,remainingNumber2,remainingNumber3;
for (;j<4;j++)
{
if (flag[j]==0)
{
break;
}
}
RWBC_GetThreeRemainingNumbersOnGPU(j,&remainingNumber1,&remainingNumber2,&remainingNumber3);
GetReflectedRayTubesWhenThreeFlagsEqualOneOnGPU(remainingNumber1,remainingNumber2,remainingNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,incidentRayTubes,face,reflectionExist,i);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==4)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[1]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[2]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(0,2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[1]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(1,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[3]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[1]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[1]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(0,1,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[2]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(2,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
}
}
hipError_t GetReflectionWithBuildingFaceOnGPU(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,Face *buildingFace,QuadrangleRayTube *reflectedRayTubes,int *reflectionExist)
{
const int pointPerFace=9;
hipError_t cudaStatus;
QuadrangleRayTube *device_incidentRayTubes=0;
QuadrangleRayTube *device_reflectedRayTubes=0;
Face *device_face=0;
//float *device_distance=0;
//int *device_faceNumber1=0;
//int *device_faceNumber2=0;
int *device_reflectionExist=0;
//float *device_a=0;
cudaStatus=hipSetDevice(0);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"CUDA capable GPU is not available!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_incidentRayTubes hipMalloc error!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_reflectedRayTubes hipMalloc error!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_face,faceCount*sizeof(Face));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_face hipMalloc error!");
goto Error;
}
/*cudaStatus=hipMalloc((void**)&device_faceNumber1,rayTubeCount*sizeof(int));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_faceNumber1 hipMalloc error!");
goto Error;
}
cudaStatus=hipMalloc((void**)&device_faceNumber2,rayTubeCount*sizeof(int));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_faceNumber2 hipMalloc error!");
goto Error;
}*/
cudaStatus=hipMalloc((void**)&device_reflectionExist,rayTubeCount*sizeof(int));
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"device_reflectionExis hipMalloc error!");
goto Error;
}
//cudaStatus=hipMalloc((void**)&device_a,rayTubeCount*sizeof(float));
//if (cudaStatus!=hipSuccess)
//{
// fprintf(stderr,"device_reflectionExis hipMalloc error!");
// goto Error;
//}
cudaStatus=hipMemcpy(device_incidentRayTubes,incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube),hipMemcpyHostToDevice);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"incidentRayTubes Memcpy failed!");
}
cudaStatus=hipMemcpy(device_face,buildingFace,faceCount*sizeof(Face),hipMemcpyHostToDevice);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"face Memcpy failed!");
}
const int num_blocks=32;
const int num_threads=640;
//CalculateReflectionAndDiffractionWithBuildingFace<<<num_blocks,num_threads>>>(device_incidentRayTubes,faceCount,rayTubeCount,device_reflectedRayTubes,device_face,device_edge,device_faceNumber1,device_faceNumber2,device_reflectionExist,device_diffractionExist);
cudaStatus=hipMemcpy(reflectedRayTubes,device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube),hipMemcpyDeviceToHost);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"reflectedRayTubes hipMemcpy failed!");
}
//cudaStatus=hipMemcpy(faceNumber1,device_faceNumber1,rayTubeCount*sizeof(int),hipMemcpyDeviceToHost);
//if (cudaStatus!=hipSuccess)
//{
// fprintf(stderr,"faceNumber1 hipMemcpy failed!");
//}
//cudaStatus=hipMemcpy(faceNumber2,device_faceNumber2,rayTubeCount*sizeof(int),hipMemcpyDeviceToHost);
//if (cudaStatus!=hipSuccess)
//{
// fprintf(stderr,"faceNumber2 hipMemcpy failed!");
//}
cudaStatus=hipMemcpy(reflectionExist,device_reflectionExist,rayTubeCount*sizeof(int),hipMemcpyDeviceToHost);
if (cudaStatus!=hipSuccess)
{
fprintf(stderr,"reflectionExist hipMemcpy failed!");
}
return cudaStatus;
Error:
hipFree(device_incidentRayTubes);
hipFree(device_reflectedRayTubes);
hipFree(device_face);
//hipFree(device_faceNumber1);
//hipFree(device_faceNumber2);
hipFree(device_reflectionExist);
} | c9ed91a087ca3fbc07e39ba81e71486fb3b50e33.cu | #include "ReflectionWithBuildingCalculationOnGPU.cuh"
__device__ void RWBC_UitizeVectorOnGPU(float *a,float *b,float *c)//单位化向量
{
float length=sqrt((*a)*(*a)+(*b)*(*b)+(*c)*(*c));
*a=*a/length;
*b=*b/length;
*c=*c/length;
}
__device__ Point RWBC_GetReflectedVectorOnGPU(Point d,Point n)//计算反射射线方向向量
{
Point reflectedPoint;
float temp=2*(d.x*n.x+d.y*n.y+d.z*n.z);
reflectedPoint.x=d.x-temp*n.x;
reflectedPoint.y=d.y-temp*n.y;
reflectedPoint.z=d.z-temp*n.z;
return reflectedPoint;
}
__device__ Ray RWBC_CalculateReflectedRayOnGPU(Ray incidentRay,Face *face,int faceCount,int *reflectedFace,int *flag)
{
Ray reflectedRay;
float t=50000;
for (int i=0;i<faceCount;i++)
{
float a1=-incidentRay.direction.x,a2=-incidentRay.direction.y,a3=-incidentRay.direction.z;
float b1=face[i]. B.x-face[i].A.x,b2=face[i].B.y-face[i].A.y,b3=face[i].B.z-face[i].A.z;
float c1=face[i]. C.x-face[i].A.x,c2=face[i].C.y-face[i].A.y,c3=face[i].C.z-face[i].A.z;
float x1=incidentRay.originalPoint.x-face[i].A.x,x2=incidentRay.originalPoint.y-face[i].A.y,x3=incidentRay.originalPoint.z-face[i].A.z;
float denominator=a1*(b2*c3-b3*c2)-b1*(a2*c3-a3*c2)+c1*(a2*b3-a3*b2);
float t_numerator=x1*(b2*c3-b3*c2)-b1*(x2*c3-x3*c2)+c1*(x2*b3-x3*b2);
float u_numerator=a1*(x2*c3-x3*c2)-x1*(a2*c3-a3*c2)+c1*(a2*c3-a3*c2);
float v_numerator=a1*(b2*x3-b3*x2)-b1*(a2*x3-a3*x2)+x1*(a2*b3-a3*b2);
if (abs(denominator)>0.000001)
{
float u=u_numerator/denominator;
float v=v_numerator/denominator;
if(t_numerator/denominator<t&&t_numerator/denominator>1)
{
t=t_numerator/denominator;
reflectedRay.originalPoint.x=incidentRay.originalPoint.x+incidentRay.direction.x*t;
reflectedRay.originalPoint.y=incidentRay.originalPoint.y+incidentRay.direction.y*t;
reflectedRay.originalPoint.z=incidentRay.originalPoint.z+incidentRay.direction.z*t;
Point n;
n.x=b2*c3-b3*c2;n.y=b3*c1-b1*c3;n.z=b1*c2-c1*b2;
RWBC_UitizeVectorOnGPU(&n.x,&n.y,&n.z);
reflectedRay.direction=RWBC_GetReflectedVectorOnGPU(incidentRay.direction,n);
if ((u>0)&&(u<1)&&((u+v)>0)&&((u+v)<1))
{
*flag=1;
*reflectedFace=i;
}
}
}
}
return reflectedRay;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (faceNumber1==faceNumber2)
{
return true;
}
if (a%2==0)
{
if (b==a+1)
{
return true;
}
}
if (a%2==1)
{
if (b==a-1)
{
return true;
}
}
return false;
}
__device__ bool RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(int faceNumber1,int faceNumber2)
{
int a=faceNumber1%12;
int b=faceNumber2%12;
if (a%2==0&&a!=0)
{
if (b==a-1)
{
return true;
}
}
if (a%2==1&&a!=7)
{
if (b==a+1)
{
return true;
}
}
if ((a==0&&b==7)||(a==7&&b==0))
{
return true;
}
return false;
}
__device__ void RWBC_GetThreeRemainingNumbersOnGPU(int number,int *remainingNumber1,int *remainingNumber2,int *remainingNumber3)
{
if (number==0)
{
*remainingNumber1=1;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==1)
{
*remainingNumber1=0;
*remainingNumber2=2;
*remainingNumber3=3;
}
if (number==2)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=3;
}
if (number==3)
{
*remainingNumber1=0;
*remainingNumber2=1;
*remainingNumber3=2;
}
}
__device__ void RWBC_GetOneRemainingNumberOnGPU(int number1,int number2,int number3,int *remainingNumber)
{
*remainingNumber=6-number1-number2-number3;
}
__device__ void GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(int flagNumber1,int flagNumber2,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist,int i)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
}
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
reflectionExist[i]=1;
int remainingNumber1,remainingNumber2,remainingNumber3;
RWBC_GetThreeRemainingNumbersOnGPU(flagNumber1,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i].ray1=reflectedRays[flagNumber1];
reflectedRayTubes[2*i].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i].ray4=reflectedRays[remainingNumber2];
RWBC_GetThreeRemainingNumbersOnGPU(flagNumber2,&remainingNumber1,&remainingNumber2,&remainingNumber3);
reflectedRayTubes[2*i+1].ray1=reflectedRays[flagNumber2];
reflectedRayTubes[2*i+1].ray2=reflectedRays[remainingNumber1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[remainingNumber2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[remainingNumber3];
}
}
__device__ void GetReflectedRayTubesWhenThreeFlagsEqualOneOnGPU(int flagNumber1,int flagNumber2,int flagNumber3,int *reflectedFace,Ray *reflectedRays,Ray *incidentRays,QuadrangleRayTube *reflectedRayTubes,QuadrangleRayTube *incidentRayTubes,Face *face,int *reflectionExist,int i)
{
//int remainingNumber;
//RWBC_GetOneRemainingNumberOnGPU(flagNumber1,flagNumber2,flagNumber3,&remainingNumber);
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber2]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(flagNumber1,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber1],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber2]],face[reflectedFace[flagNumber3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(flagNumber2,flagNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[flagNumber2],reflectedFace[flagNumber3]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[flagNumber1]],face[reflectedFace[flagNumber2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(flagNumber1,flagNumber2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
__device__ void RWBC_AddPathNodeToReflectedRayTubeOnGPU(QuadrangleRayTube *incidentRayTubes,QuadrangleRayTube *reflectedRayTubes,int i,Face face1,Face face2)
{
if(incidentRayTubes[i].path.nodeLevel=0)
{
reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].path.node1.point1=face1.A;
reflectedRayTubes[2*i].path.node1.point2=face1.B;
reflectedRayTubes[2*i].path.node1.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].path.node1.point1=face2.A;
reflectedRayTubes[2*i+1].path.node1.point2=face2.B;
reflectedRayTubes[2*i+1].path.node1.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=1)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.nodeLevel=2;
reflectedRayTubes[2*i].path.node2.point1=face1.A;
reflectedRayTubes[2*i].path.node2.point2=face1.B;
reflectedRayTubes[2*i].path.node2.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=2;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2.point1=face2.A;
reflectedRayTubes[2*i+1].path.node2.point2=face2.B;
reflectedRayTubes[2*i+1].path.node2.point3=face2.C;
}
if(incidentRayTubes[i].path.nodeLevel=2)
{
reflectedRayTubes[2*i].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i].path.nodeLevel=3;
reflectedRayTubes[2*i].path.node3.point1=face1.A;
reflectedRayTubes[2*i].path.node3.point2=face1.B;
reflectedRayTubes[2*i].path.node3.point3=face1.C;
reflectedRayTubes[2*i+1].path.nodeLevel=3;
reflectedRayTubes[2*i+1].path.node1=incidentRayTubes[i].path.node1;
reflectedRayTubes[2*i+1].path.node2=incidentRayTubes[i].path.node2;
reflectedRayTubes[2*i+1].path.node3.point1=face2.A;
reflectedRayTubes[2*i+1].path.node3.point2=face2.B;
reflectedRayTubes[2*i+1].path.node3.point3=face2.C;
}
}
__global__ void CalculateReflectionWithBuildingFaceOnGPU(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,QuadrangleRayTube *reflectedRayTubes,Face *face,int *reflectionExist)
{
int i=blockIdx.x * blockDim.x+threadIdx.x;
int reflectedFace[4];
int flag[4]={0,0,0,0};
Ray reflectedRays[4],incidentRays[4];
incidentRays[0]=incidentRayTubes[i].ray1;
incidentRays[1]=incidentRayTubes[i].ray2;
incidentRays[2]=incidentRayTubes[i].ray3;
incidentRays[3]=incidentRayTubes[i].ray4;
reflectedRays[0]=RWBC_CalculateReflectedRayOnGPU(incidentRays[0],face,faceCount,&reflectedFace[0],&flag[0]);
reflectedRays[1]=RWBC_CalculateReflectedRayOnGPU(incidentRays[1],face,faceCount,&reflectedFace[1],&flag[1]);
reflectedRays[2]=RWBC_CalculateReflectedRayOnGPU(incidentRays[2],face,faceCount,&reflectedFace[2],&flag[2]);
reflectedRays[3]=RWBC_CalculateReflectedRayOnGPU(incidentRays[3],face,faceCount,&reflectedFace[3],&flag[3]);
if (flag[0]+flag[1]+flag[2]+flag[3]==0)
{
reflectionExist[i]=0;
}
if (flag[0]+flag[1]+flag[2]+flag[3]==1)
{
reflectionExist[i]=1;
//reflectedRayTubes[2*i].path.nodeLevel=1;
reflectedRayTubes[2*i].ray1=reflectedRays[0];
reflectedRayTubes[2*i].ray2=reflectedRays[1];
reflectedRayTubes[2*i].ray3=reflectedRays[2];
reflectedRayTubes[2*i].ray4=reflectedRays[3];
//reflectedRayTubes[2*i+1].path.nodeLevel=1;
reflectedRayTubes[2*i+1].ray1=reflectedRays[0];
reflectedRayTubes[2*i+1].ray2=reflectedRays[1];
reflectedRayTubes[2*i+1].ray3=reflectedRays[2];
reflectedRayTubes[2*i+1].ray4=reflectedRays[3];
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[j]],face[reflectedFace[j]]);
}
}
}
if (flag[0]+flag[1]+flag[2]+flag[3]==2)
{
int m,n;
for (int j=0;j<4;j++)
{
if (flag[j]==1)
{
m=j;
for (int t=j+1;t<4;t++)
{
if(flag[t]==1)
{
n=t;
}
}
}
}
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(m,n,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[m]],face[reflectedFace[n]]);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==3)
{
int j=0;
int remainingNumber1,remainingNumber2,remainingNumber3;
for (;j<4;j++)
{
if (flag[j]==0)
{
break;
}
}
RWBC_GetThreeRemainingNumbersOnGPU(j,&remainingNumber1,&remainingNumber2,&remainingNumber3);
GetReflectedRayTubesWhenThreeFlagsEqualOneOnGPU(remainingNumber1,remainingNumber2,remainingNumber3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,incidentRayTubes,face,reflectionExist,i);
}
if (flag[0]+flag[1]+flag[2]+flag[3]==4)
{
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[1]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[2]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[2]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(0,2,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[1]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(1,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
if (RWBC_JudgeTwoTriangleFacesOnTheSameQuadrangle(reflectedFace[0],reflectedFace[3]))
{
if (RWBC_JudgeTwoTriangleFacesOnTheAdjacentQuadrangle(reflectedFace[0],reflectedFace[1]))
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[0]],face[reflectedFace[1]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(0,1,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
else
{
RWBC_AddPathNodeToReflectedRayTubeOnGPU(incidentRayTubes,reflectedRayTubes,i,face[reflectedFace[2]],face[reflectedFace[3]]);
GetReflectedRayTubesWhenTwoFlagsEqualOneOnGPU(2,3,reflectedFace,reflectedRays,incidentRays,reflectedRayTubes,face,reflectionExist,i);
}
}
}
}
cudaError_t GetReflectionWithBuildingFaceOnGPU(QuadrangleRayTube *incidentRayTubes,int faceCount,int rayTubeCount,Face *buildingFace,QuadrangleRayTube *reflectedRayTubes,int *reflectionExist)
{
const int pointPerFace=9;
cudaError_t cudaStatus;
QuadrangleRayTube *device_incidentRayTubes=0;
QuadrangleRayTube *device_reflectedRayTubes=0;
Face *device_face=0;
//float *device_distance=0;
//int *device_faceNumber1=0;
//int *device_faceNumber2=0;
int *device_reflectionExist=0;
//float *device_a=0;
cudaStatus=cudaSetDevice(0);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"CUDA capable GPU is not available!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_incidentRayTubes cudaMalloc error!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_reflectedRayTubes cudaMalloc error!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_face,faceCount*sizeof(Face));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_face cudaMalloc error!");
goto Error;
}
/*cudaStatus=cudaMalloc((void**)&device_faceNumber1,rayTubeCount*sizeof(int));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_faceNumber1 cudaMalloc error!");
goto Error;
}
cudaStatus=cudaMalloc((void**)&device_faceNumber2,rayTubeCount*sizeof(int));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_faceNumber2 cudaMalloc error!");
goto Error;
}*/
cudaStatus=cudaMalloc((void**)&device_reflectionExist,rayTubeCount*sizeof(int));
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"device_reflectionExis cudaMalloc error!");
goto Error;
}
//cudaStatus=cudaMalloc((void**)&device_a,rayTubeCount*sizeof(float));
//if (cudaStatus!=cudaSuccess)
//{
// fprintf(stderr,"device_reflectionExis cudaMalloc error!");
// goto Error;
//}
cudaStatus=cudaMemcpy(device_incidentRayTubes,incidentRayTubes,rayTubeCount*sizeof(QuadrangleRayTube),cudaMemcpyHostToDevice);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"incidentRayTubes Memcpy failed!");
}
cudaStatus=cudaMemcpy(device_face,buildingFace,faceCount*sizeof(Face),cudaMemcpyHostToDevice);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"face Memcpy failed!");
}
const int num_blocks=32;
const int num_threads=640;
//CalculateReflectionAndDiffractionWithBuildingFace<<<num_blocks,num_threads>>>(device_incidentRayTubes,faceCount,rayTubeCount,device_reflectedRayTubes,device_face,device_edge,device_faceNumber1,device_faceNumber2,device_reflectionExist,device_diffractionExist);
cudaStatus=cudaMemcpy(reflectedRayTubes,device_reflectedRayTubes,rayTubeCount*2*sizeof(QuadrangleRayTube),cudaMemcpyDeviceToHost);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"reflectedRayTubes cudaMemcpy failed!");
}
//cudaStatus=cudaMemcpy(faceNumber1,device_faceNumber1,rayTubeCount*sizeof(int),cudaMemcpyDeviceToHost);
//if (cudaStatus!=cudaSuccess)
//{
// fprintf(stderr,"faceNumber1 cudaMemcpy failed!");
//}
//cudaStatus=cudaMemcpy(faceNumber2,device_faceNumber2,rayTubeCount*sizeof(int),cudaMemcpyDeviceToHost);
//if (cudaStatus!=cudaSuccess)
//{
// fprintf(stderr,"faceNumber2 cudaMemcpy failed!");
//}
cudaStatus=cudaMemcpy(reflectionExist,device_reflectionExist,rayTubeCount*sizeof(int),cudaMemcpyDeviceToHost);
if (cudaStatus!=cudaSuccess)
{
fprintf(stderr,"reflectionExist cudaMemcpy failed!");
}
return cudaStatus;
Error:
cudaFree(device_incidentRayTubes);
cudaFree(device_reflectedRayTubes);
cudaFree(device_face);
//cudaFree(device_faceNumber1);
//cudaFree(device_faceNumber2);
cudaFree(device_reflectionExist);
} |
77c216e0014df404eda444f58bc3aad55ec8b674.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// === GPUQREngine/Include/Kernel/Apply/pipelined_rearrange.cu =================
// =============================================================================
//------------------------------------------------------------------------------
// pipelined_rearrange
//------------------------------------------------------------------------------
/*
PSEUDO #define MACROS (copied from vt_factorize.cu)
N
The # of columns to operate on (should always be TILESIZE).
INSIDE
Substitute in a condition depending on compilation options.
For this code, we always assume we need to check edge cases.
NACHUNKS
A chunking scheme used in the factorization kernel. We use
the same layout and thread dimension for our tile load/stores.
glA
Shorthand for the index computation into the global A.
shA
Shorthand for accessing the shared memory tiles of A in the union.
it
Row indices of a tile owned by a thread.
jt
Col indices of a tile owned by a thread.
ACHUNKSIZE
The amount of A do load in a chunk
*/
#define N (TILESIZE)
#define INSIDE(COND) (COND)
// when all threads work on a tile.
// (N*N / NUMTHREADS) does not have to be an integer. With a tile
// size of N=32, and NUMTHREADS=384, it isn't. So compute the ceiling,
// and handle the clean up by testing i < N below.
#define NACHUNKS CEIL (N*N, NUMTHREADS)
#define glA(i,j) (myTask.F[((i)*fn + (j))])
#define shA shMemory.factorize.A
// ACHUNKSIZE must be an integer
#define it (threadIdx.x / N)
#define jt (threadIdx.x % N)
#define ACHUNKSIZE (NUMTHREADS / N)
/*
NEW #define MACROS
SAFELOAD
Loads a tile from global memory. Checks edge cases.
SH_TRANSFER
Moves a tile within shared memory
SAFESTORE
Stores a tile back to global memory. Checks edge cases.
*/
#define SAFELOAD(SLOT, ROWTILE) \
{ \
int rowTile = (ROWTILE); \
if (INSIDE (rowTile != EMPTY)) \
{ \
/* load the tile of A from global memory */ \
for (int ii = 0 ; ii < NACHUNKS ; ii++) \
{ \
int i = ii * ACHUNKSIZE + it ; \
if (ii < NACHUNKS-1 || i < N) \
{ \
shA [i + (SLOT)*TILESIZE][jt] = \
(INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) ? \
glA (i+rowTile, jt+j1) : 0 ; \
} \
} \
} \
else \
{ \
/* clear the tile of A */ \
for (int ii = 0 ; ii < NACHUNKS ; ii++) \
{ \
int i = ii * ACHUNKSIZE + it ; \
if (ii < NACHUNKS-1 || i < N) \
{ \
shA [i + SLOT*TILESIZE][jt] = 0 ; \
} \
} \
} \
} \
#define SH_TRANSFER(TO, FROM) \
{ \
for (int th=threadIdx.x; th<TILESIZE*TILESIZE; th+=blockDim.x) \
{ \
int ito = (TILESIZE*(TO)) + (th / TILESIZE); \
int ifr = (TILESIZE*(FROM)) + (th / TILESIZE); \
int j = (th % TILESIZE); \
shA[ito][j] = shA[ifr][j]; \
} \
} \
#define SAFESTORE(SLOT, ROWTILE) \
{ \
int rowTile = ROWTILE; \
if (INSIDE (rowTile != EMPTY)) \
{ \
for (int ii = 0 ; ii < NACHUNKS ; ii++) \
{ \
int i = ii * ACHUNKSIZE + it ; \
if (ii < NACHUNKS-1 || i < N) \
{ \
if (INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) \
{ \
glA (i+rowTile, jt+j1) = shA [i + (SLOT)*TILESIZE][jt]; \
} \
} \
} \
} \
} \
/* ALL THREADS PARTICIPATE */
{
int delta = myTask.extra[8];
int secondMin = myTask.extra[9];
int fc = IsApplyFactorize;
int j1 = myTask.extra[4] + TILESIZE;
/*** DO MEMORY SHUFFLES ***/
SAFESTORE(0, myTask.extra[0]);
/* 0 <-- secondMin */
if(delta != EMPTY && secondMin == delta)
{
SAFELOAD(0, myTask.extra[secondMin]);
}
else
{
SH_TRANSFER(0, secondMin);
}
/* secondMin <-- fc */
if(fc != secondMin)
{
if(delta != EMPTY && fc >= delta)
{
SAFELOAD(secondMin, myTask.extra[fc]);
}
else
{
SH_TRANSFER(secondMin, fc);
}
}
/* Hard-load D from global in the 2-3 case where [1] is secondMin. */
if(fc == 3 && delta == 2 && secondMin == 1)
{
SAFELOAD(2, myTask.extra[2]);
}
/* Rearrange tiles so the tile store at the end doesn't explode.
This is non-essential until the very end, so we can easilly justify
piggybacking this integer shuffle to the next natural __syncthreads
that we encounter. */
__syncthreads();
if(threadIdx.x == 0)
{
myTask.extra[4] = j1;
myTask.AuxAddress[0] = myTask.AuxAddress[1];
myTask.AuxAddress[1] = NULL;
myTask.extra[0] = myTask.extra[secondMin];
if(fc != secondMin)
{
myTask.extra[secondMin] = myTask.extra[fc];
}
}
__syncthreads();
}
#undef N
#undef INSIDE
#undef NACHUNKS
#undef glA
#undef shA
#undef it
#undef jt
#undef ACHUNKSIZE
#undef SAFELOAD
#undef SH_TRANSFER
#undef SAFESTORE
| 77c216e0014df404eda444f58bc3aad55ec8b674.cu | // =============================================================================
// === GPUQREngine/Include/Kernel/Apply/pipelined_rearrange.cu =================
// =============================================================================
//------------------------------------------------------------------------------
// pipelined_rearrange
//------------------------------------------------------------------------------
/*
PSEUDO #define MACROS (copied from vt_factorize.cu)
N
The # of columns to operate on (should always be TILESIZE).
INSIDE
Substitute in a condition depending on compilation options.
For this code, we always assume we need to check edge cases.
NACHUNKS
A chunking scheme used in the factorization kernel. We use
the same layout and thread dimension for our tile load/stores.
glA
Shorthand for the index computation into the global A.
shA
Shorthand for accessing the shared memory tiles of A in the union.
it
Row indices of a tile owned by a thread.
jt
Col indices of a tile owned by a thread.
ACHUNKSIZE
The amount of A do load in a chunk
*/
#define N (TILESIZE)
#define INSIDE(COND) (COND)
// when all threads work on a tile.
// (N*N / NUMTHREADS) does not have to be an integer. With a tile
// size of N=32, and NUMTHREADS=384, it isn't. So compute the ceiling,
// and handle the clean up by testing i < N below.
#define NACHUNKS CEIL (N*N, NUMTHREADS)
#define glA(i,j) (myTask.F[((i)*fn + (j))])
#define shA shMemory.factorize.A
// ACHUNKSIZE must be an integer
#define it (threadIdx.x / N)
#define jt (threadIdx.x % N)
#define ACHUNKSIZE (NUMTHREADS / N)
/*
NEW #define MACROS
SAFELOAD
Loads a tile from global memory. Checks edge cases.
SH_TRANSFER
Moves a tile within shared memory
SAFESTORE
Stores a tile back to global memory. Checks edge cases.
*/
#define SAFELOAD(SLOT, ROWTILE) \
{ \
int rowTile = (ROWTILE); \
if (INSIDE (rowTile != EMPTY)) \
{ \
/* load the tile of A from global memory */ \
for (int ii = 0 ; ii < NACHUNKS ; ii++) \
{ \
int i = ii * ACHUNKSIZE + it ; \
if (ii < NACHUNKS-1 || i < N) \
{ \
shA [i + (SLOT)*TILESIZE][jt] = \
(INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) ? \
glA (i+rowTile, jt+j1) : 0 ; \
} \
} \
} \
else \
{ \
/* clear the tile of A */ \
for (int ii = 0 ; ii < NACHUNKS ; ii++) \
{ \
int i = ii * ACHUNKSIZE + it ; \
if (ii < NACHUNKS-1 || i < N) \
{ \
shA [i + SLOT*TILESIZE][jt] = 0 ; \
} \
} \
} \
} \
#define SH_TRANSFER(TO, FROM) \
{ \
for (int th=threadIdx.x; th<TILESIZE*TILESIZE; th+=blockDim.x) \
{ \
int ito = (TILESIZE*(TO)) + (th / TILESIZE); \
int ifr = (TILESIZE*(FROM)) + (th / TILESIZE); \
int j = (th % TILESIZE); \
shA[ito][j] = shA[ifr][j]; \
} \
} \
#define SAFESTORE(SLOT, ROWTILE) \
{ \
int rowTile = ROWTILE; \
if (INSIDE (rowTile != EMPTY)) \
{ \
for (int ii = 0 ; ii < NACHUNKS ; ii++) \
{ \
int i = ii * ACHUNKSIZE + it ; \
if (ii < NACHUNKS-1 || i < N) \
{ \
if (INSIDE (i+rowTile < fm) && INSIDE (jt+j1 < fn)) \
{ \
glA (i+rowTile, jt+j1) = shA [i + (SLOT)*TILESIZE][jt]; \
} \
} \
} \
} \
} \
/* ALL THREADS PARTICIPATE */
{
int delta = myTask.extra[8];
int secondMin = myTask.extra[9];
int fc = IsApplyFactorize;
int j1 = myTask.extra[4] + TILESIZE;
/*** DO MEMORY SHUFFLES ***/
SAFESTORE(0, myTask.extra[0]);
/* 0 <-- secondMin */
if(delta != EMPTY && secondMin == delta)
{
SAFELOAD(0, myTask.extra[secondMin]);
}
else
{
SH_TRANSFER(0, secondMin);
}
/* secondMin <-- fc */
if(fc != secondMin)
{
if(delta != EMPTY && fc >= delta)
{
SAFELOAD(secondMin, myTask.extra[fc]);
}
else
{
SH_TRANSFER(secondMin, fc);
}
}
/* Hard-load D from global in the 2-3 case where [1] is secondMin. */
if(fc == 3 && delta == 2 && secondMin == 1)
{
SAFELOAD(2, myTask.extra[2]);
}
/* Rearrange tiles so the tile store at the end doesn't explode.
This is non-essential until the very end, so we can easilly justify
piggybacking this integer shuffle to the next natural __syncthreads
that we encounter. */
__syncthreads();
if(threadIdx.x == 0)
{
myTask.extra[4] = j1;
myTask.AuxAddress[0] = myTask.AuxAddress[1];
myTask.AuxAddress[1] = NULL;
myTask.extra[0] = myTask.extra[secondMin];
if(fc != secondMin)
{
myTask.extra[secondMin] = myTask.extra[fc];
}
}
__syncthreads();
}
#undef N
#undef INSIDE
#undef NACHUNKS
#undef glA
#undef shA
#undef it
#undef jt
#undef ACHUNKSIZE
#undef SAFELOAD
#undef SH_TRANSFER
#undef SAFESTORE
|
2099c901891b0be6d51b5cb3a7611fbad418a505.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define NUM_BITS 4
#define NUM_BASES 5
#define SIZE_HW_WORD 32
#define MAX_VALUE 0xFFFFFFFF
#define HIGH_MASK_32 0x80000000
#define LOW_MASK_32 0x00000001
#define BASES_PER_ENTRY 8
#define HANDLE_ERROR(error) (HandleError(error, __FILE__, __LINE__ ))
#ifndef MIN
#define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif
typedef struct {
uint32_t bitmap[NUM_BASES];
} qryEntry_t;
typedef struct {
uint32_t column;
uint32_t score;
} resEntry_t;
typedef struct {
uint32_t query;
uint32_t position;
} candInfo_t;
typedef struct {
uint32_t size;
uint32_t numEntries;
uint32_t *h_reference;
uint32_t *d_reference;
} ref_t;
typedef struct {
uint32_t numResults;
resEntry_t* h_results;
resEntry_t* d_results;
} res_t;
typedef struct {
uint32_t totalSizeQueries;
uint32_t totalQueriesEntries;
uint32_t sizeQueries;
uint32_t numQueries;
uint32_t numCandidates;
float distance;
qryEntry_t *h_queries;
qryEntry_t *d_queries;
candInfo_t *h_candidates;
candInfo_t *d_candidates;
uint32_t *d_Pv;
uint32_t *d_Mv;
} qry_t;
extern "C"
static void HandleError( hipError_t err, const char *file, int line ) {
if (err != hipSuccess) {
printf( "%s in %s at line %d\n", hipGetErrorString(err), file, line );
exit( EXIT_FAILURE );
}
}
__global__ void myersKernel(const qryEntry_t *d_queries, const uint32_t __restrict *d_reference, const candInfo_t *d_candidates, resEntry_t *d_results,
const uint32_t sizeCandidate, const uint32_t sizeQueries, const uint32_t sizeRef, const uint32_t numEntriesPerQuery,
const uint32_t numCandidates)
{
__shared__
uint32_t s_Pv[CUDA_NUM_THREADS * N_ENTRIES], s_Mv[CUDA_NUM_THREADS * N_ENTRIES];
uint32_t *tmpPv, *tmpMv;
uint32_t Ph, Mh, Pv, Mv, Xv, Xh, Eq;
uint32_t initEntry, idEntry, idColumn, indexBase, aline, mask;
int8_t carry, nextCarry;
uint32_t candidate;
uint32_t idCandidate = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x;
if ((threadIdx.x < MAX_THREADS_PER_SM) && (idCandidate < numCandidates)){
uint32_t positionRef = d_candidates[idCandidate].position;
uint32_t entryRef = positionRef / BASES_PER_ENTRY;
int32_t score = sizeQueries, minScore = sizeQueries;
uint32_t minColumn = 0;
uint32_t finalMask = ((sizeQueries % SIZE_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQueries % SIZE_HW_WORD) - 1);
uint32_t word = 0;
if((positionRef < sizeRef) && (sizeRef - positionRef) > sizeCandidate){
tmpPv = s_Pv + (threadIdx.x * numEntriesPerQuery);
tmpMv = s_Mv + (threadIdx.x * numEntriesPerQuery);
initEntry = d_candidates[idCandidate].query * numEntriesPerQuery;
for(idEntry = 0; idEntry < numEntriesPerQuery; idEntry++){
tmpPv[idEntry] = MAX_VALUE;
tmpMv[idEntry] = 0;
}
for(idColumn = 0; idColumn < sizeCandidate; idColumn++){
carry = 0;
aline = (positionRef % BASES_PER_ENTRY);
if((aline == 0) || (idColumn == 0)) {
candidate = d_reference[entryRef + word] >> (aline * NUM_BITS);
word++;
}
indexBase = candidate & 0x07;
for(idEntry = 0; idEntry < numEntriesPerQuery; idEntry++){
Pv = tmpPv[idEntry];
Mv = tmpMv[idEntry];
Eq = d_queries[initEntry + idEntry].bitmap[indexBase];
mask = (idEntry + 1 == numEntriesPerQuery) ? finalMask : HIGH_MASK_32;
Xv = Eq | Mv;
Eq |= (carry >> 1) & 1;
Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq;
Ph = Mv | ~(Xh | Pv);
Mh = Pv & Xh;
nextCarry = ((Ph & mask) != 0) - ((Mh & mask) != 0);
Ph <<= 1;
Mh <<= 1;
Mh |= (carry >> 1) & 1;
Ph |= (carry + 1) >> 1;
carry = nextCarry;
tmpPv[idEntry] = Mh | ~(Xv | Ph);
tmpMv[idEntry] = Ph & Xv;
}
candidate >>= 4;
positionRef++;
score += carry;
if(score < minScore){
minScore = score;
minColumn = idColumn;
}
}
d_results[idCandidate].column = minColumn;
d_results[idCandidate].score = minScore;
}
}
}
extern "C"
void computeAllQueriesGPU(void *reference, void *queries, void *results)
{
ref_t *ref = (ref_t *) reference;
qry_t *qry = (qry_t *) queries;
res_t *res = (res_t *) results;
uint32_t blocks, threads = MAX_THREADS_PER_SM;
uint32_t sizeCandidate = qry->sizeQueries * (1 + 2 * qry->distance);
uint32_t numEntriesPerQuery = (qry->sizeQueries / SIZE_HW_WORD) + ((qry->sizeQueries % SIZE_HW_WORD) ? 1 : 0);
uint32_t maxCandidates, numCandidates, lastCandidates, processedCandidates;
uint32_t numLaunches, kernelIdx, maxThreads;
/////////LAUNCH GPU KERNELS:
//LAUNCH KERNELS FOR KEPLERs GPUs
if(DEVICE == 0){
blocks = (qry->numCandidates / MAX_THREADS_PER_SM) + ((qry->numCandidates % MAX_THREADS_PER_SM) ? 1 : 0);
printf("KEPLER: LAUNCH KERNEL 0 -- Bloques: %d - Th_block %d - Th_sm %d\n", blocks, threads, MAX_THREADS_PER_SM);
hipLaunchKernelGGL(( myersKernel), dim3(blocks),dim3(threads), 0, 0, qry->d_queries, ref->d_reference, qry->d_candidates, res->d_results,
sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, qry->numCandidates);
hipDeviceSynchronize();
}
//LAUNCH KERNELS FOR FERMIs GPUs
if(DEVICE == 1){
maxThreads = threads * 65535;
numLaunches = (qry->numCandidates / maxThreads) + ((qry->numCandidates % maxThreads) ? 1 : 0);
lastCandidates = qry->numCandidates;
processedCandidates = 0;
for(kernelIdx=0; kernelIdx<numLaunches; kernelIdx++){
maxCandidates = maxThreads;
numCandidates = MIN(lastCandidates, maxCandidates);
blocks = (numCandidates / MAX_THREADS_PER_SM) + ((numCandidates % MAX_THREADS_PER_SM) ? 1 : 0);
printf("FERMI: LAUNCH KERNEL %d -- Bloques: %d - Th_block %d - Th_sm %d\n", kernelIdx, blocks, threads, MAX_THREADS_PER_SM);
hipLaunchKernelGGL(( myersKernel), dim3(blocks),dim3(threads), 0, 0, qry->d_queries, ref->d_reference, qry->d_candidates + processedCandidates, res->d_results + processedCandidates,
sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, numCandidates);
hipDeviceSynchronize();
lastCandidates -= numCandidates;
processedCandidates += numCandidates;
}
}
}
extern "C"
int transferCPUtoGPU(void *reference, void *queries, void *results)
{
ref_t *ref = (ref_t *) reference;
qry_t *qry = (qry_t *) queries;
res_t *res = (res_t *) results;
HANDLE_ERROR(hipSetDevice(DEVICE));
//allocate & transfer Binary Reference to GPU
HANDLE_ERROR(hipMalloc((void**) &ref->d_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t)));
HANDLE_ERROR(hipMemcpy(ref->d_reference, ref->h_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t), hipMemcpyHostToDevice));
//allocate & transfer Binary Queries to GPU
HANDLE_ERROR(hipMalloc((void**) &qry->d_queries, qry->totalQueriesEntries * sizeof(qryEntry_t)));
HANDLE_ERROR(hipMemcpy(qry->d_queries, qry->h_queries, qry->totalQueriesEntries * sizeof(qryEntry_t), hipMemcpyHostToDevice));
//allocate & transfer Candidates to GPU
HANDLE_ERROR(hipMalloc((void**) &qry->d_candidates, qry->numCandidates * sizeof(candInfo_t)));
HANDLE_ERROR(hipMemcpy(qry->d_candidates, qry->h_candidates, qry->numCandidates * sizeof(candInfo_t), hipMemcpyHostToDevice));
//allocate Results
HANDLE_ERROR(hipMalloc((void**) &res->d_results, res->numResults * sizeof(resEntry_t)));
HANDLE_ERROR(hipMemset(res->d_results, 0, res->numResults * sizeof(resEntry_t)));
return (0);
}
extern "C"
int transferGPUtoCPU(void *results)
{
res_t *res = (res_t *) results;
HANDLE_ERROR(hipMemcpy(res->h_results, res->d_results, res->numResults * sizeof(resEntry_t), hipMemcpyDeviceToHost));
return (0);
}
extern "C"
int freeReferenceGPU(void *reference)
{
ref_t *ref = (ref_t *) reference;
if(ref->d_reference != NULL){
hipFree(ref->d_reference);
ref->d_reference=NULL;
}
return(0);
}
extern "C"
int freeQueriesGPU(void *queries)
{
qry_t *qry = (qry_t *) queries;
if(qry->d_queries != NULL){
hipFree(qry->d_queries);
qry->d_queries=NULL;
}
if(qry->d_candidates != NULL){
hipFree(qry->d_candidates);
qry->d_candidates = NULL;
}
return(0);
}
extern "C"
int freeResultsGPU(void *results)
{
res_t *res = (res_t *) results;
if(res->d_results != NULL){
hipFree(res->d_results);
res->d_results=NULL;
}
return(0);
}
| 2099c901891b0be6d51b5cb3a7611fbad418a505.cu | #include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define NUM_BITS 4
#define NUM_BASES 5
#define SIZE_HW_WORD 32
#define MAX_VALUE 0xFFFFFFFF
#define HIGH_MASK_32 0x80000000
#define LOW_MASK_32 0x00000001
#define BASES_PER_ENTRY 8
#define HANDLE_ERROR(error) (HandleError(error, __FILE__, __LINE__ ))
#ifndef MIN
#define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b))
#endif
typedef struct {
uint32_t bitmap[NUM_BASES];
} qryEntry_t;
typedef struct {
uint32_t column;
uint32_t score;
} resEntry_t;
typedef struct {
uint32_t query;
uint32_t position;
} candInfo_t;
typedef struct {
uint32_t size;
uint32_t numEntries;
uint32_t *h_reference;
uint32_t *d_reference;
} ref_t;
typedef struct {
uint32_t numResults;
resEntry_t* h_results;
resEntry_t* d_results;
} res_t;
typedef struct {
uint32_t totalSizeQueries;
uint32_t totalQueriesEntries;
uint32_t sizeQueries;
uint32_t numQueries;
uint32_t numCandidates;
float distance;
qryEntry_t *h_queries;
qryEntry_t *d_queries;
candInfo_t *h_candidates;
candInfo_t *d_candidates;
uint32_t *d_Pv;
uint32_t *d_Mv;
} qry_t;
extern "C"
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line );
exit( EXIT_FAILURE );
}
}
__global__ void myersKernel(const qryEntry_t *d_queries, const uint32_t __restrict *d_reference, const candInfo_t *d_candidates, resEntry_t *d_results,
const uint32_t sizeCandidate, const uint32_t sizeQueries, const uint32_t sizeRef, const uint32_t numEntriesPerQuery,
const uint32_t numCandidates)
{
__shared__
uint32_t s_Pv[CUDA_NUM_THREADS * N_ENTRIES], s_Mv[CUDA_NUM_THREADS * N_ENTRIES];
uint32_t *tmpPv, *tmpMv;
uint32_t Ph, Mh, Pv, Mv, Xv, Xh, Eq;
uint32_t initEntry, idEntry, idColumn, indexBase, aline, mask;
int8_t carry, nextCarry;
uint32_t candidate;
uint32_t idCandidate = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x;
if ((threadIdx.x < MAX_THREADS_PER_SM) && (idCandidate < numCandidates)){
uint32_t positionRef = d_candidates[idCandidate].position;
uint32_t entryRef = positionRef / BASES_PER_ENTRY;
int32_t score = sizeQueries, minScore = sizeQueries;
uint32_t minColumn = 0;
uint32_t finalMask = ((sizeQueries % SIZE_HW_WORD) == 0) ? HIGH_MASK_32 : 1 << ((sizeQueries % SIZE_HW_WORD) - 1);
uint32_t word = 0;
if((positionRef < sizeRef) && (sizeRef - positionRef) > sizeCandidate){
tmpPv = s_Pv + (threadIdx.x * numEntriesPerQuery);
tmpMv = s_Mv + (threadIdx.x * numEntriesPerQuery);
initEntry = d_candidates[idCandidate].query * numEntriesPerQuery;
for(idEntry = 0; idEntry < numEntriesPerQuery; idEntry++){
tmpPv[idEntry] = MAX_VALUE;
tmpMv[idEntry] = 0;
}
for(idColumn = 0; idColumn < sizeCandidate; idColumn++){
carry = 0;
aline = (positionRef % BASES_PER_ENTRY);
if((aline == 0) || (idColumn == 0)) {
candidate = d_reference[entryRef + word] >> (aline * NUM_BITS);
word++;
}
indexBase = candidate & 0x07;
for(idEntry = 0; idEntry < numEntriesPerQuery; idEntry++){
Pv = tmpPv[idEntry];
Mv = tmpMv[idEntry];
Eq = d_queries[initEntry + idEntry].bitmap[indexBase];
mask = (idEntry + 1 == numEntriesPerQuery) ? finalMask : HIGH_MASK_32;
Xv = Eq | Mv;
Eq |= (carry >> 1) & 1;
Xh = (((Eq & Pv) + Pv) ^ Pv) | Eq;
Ph = Mv | ~(Xh | Pv);
Mh = Pv & Xh;
nextCarry = ((Ph & mask) != 0) - ((Mh & mask) != 0);
Ph <<= 1;
Mh <<= 1;
Mh |= (carry >> 1) & 1;
Ph |= (carry + 1) >> 1;
carry = nextCarry;
tmpPv[idEntry] = Mh | ~(Xv | Ph);
tmpMv[idEntry] = Ph & Xv;
}
candidate >>= 4;
positionRef++;
score += carry;
if(score < minScore){
minScore = score;
minColumn = idColumn;
}
}
d_results[idCandidate].column = minColumn;
d_results[idCandidate].score = minScore;
}
}
}
extern "C"
void computeAllQueriesGPU(void *reference, void *queries, void *results)
{
ref_t *ref = (ref_t *) reference;
qry_t *qry = (qry_t *) queries;
res_t *res = (res_t *) results;
uint32_t blocks, threads = MAX_THREADS_PER_SM;
uint32_t sizeCandidate = qry->sizeQueries * (1 + 2 * qry->distance);
uint32_t numEntriesPerQuery = (qry->sizeQueries / SIZE_HW_WORD) + ((qry->sizeQueries % SIZE_HW_WORD) ? 1 : 0);
uint32_t maxCandidates, numCandidates, lastCandidates, processedCandidates;
uint32_t numLaunches, kernelIdx, maxThreads;
/////////LAUNCH GPU KERNELS:
//LAUNCH KERNELS FOR KEPLERs GPUs
if(DEVICE == 0){
blocks = (qry->numCandidates / MAX_THREADS_PER_SM) + ((qry->numCandidates % MAX_THREADS_PER_SM) ? 1 : 0);
printf("KEPLER: LAUNCH KERNEL 0 -- Bloques: %d - Th_block %d - Th_sm %d\n", blocks, threads, MAX_THREADS_PER_SM);
myersKernel<<<blocks,threads>>>(qry->d_queries, ref->d_reference, qry->d_candidates, res->d_results,
sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, qry->numCandidates);
cudaThreadSynchronize();
}
//LAUNCH KERNELS FOR FERMIs GPUs
if(DEVICE == 1){
maxThreads = threads * 65535;
numLaunches = (qry->numCandidates / maxThreads) + ((qry->numCandidates % maxThreads) ? 1 : 0);
lastCandidates = qry->numCandidates;
processedCandidates = 0;
for(kernelIdx=0; kernelIdx<numLaunches; kernelIdx++){
maxCandidates = maxThreads;
numCandidates = MIN(lastCandidates, maxCandidates);
blocks = (numCandidates / MAX_THREADS_PER_SM) + ((numCandidates % MAX_THREADS_PER_SM) ? 1 : 0);
printf("FERMI: LAUNCH KERNEL %d -- Bloques: %d - Th_block %d - Th_sm %d\n", kernelIdx, blocks, threads, MAX_THREADS_PER_SM);
myersKernel<<<blocks,threads>>>(qry->d_queries, ref->d_reference, qry->d_candidates + processedCandidates, res->d_results + processedCandidates,
sizeCandidate, qry->sizeQueries, ref->size, numEntriesPerQuery, numCandidates);
cudaThreadSynchronize();
lastCandidates -= numCandidates;
processedCandidates += numCandidates;
}
}
}
extern "C"
int transferCPUtoGPU(void *reference, void *queries, void *results)
{
ref_t *ref = (ref_t *) reference;
qry_t *qry = (qry_t *) queries;
res_t *res = (res_t *) results;
HANDLE_ERROR(cudaSetDevice(DEVICE));
//allocate & transfer Binary Reference to GPU
HANDLE_ERROR(cudaMalloc((void**) &ref->d_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t)));
HANDLE_ERROR(cudaMemcpy(ref->d_reference, ref->h_reference, ((uint64_t) ref->numEntries) * sizeof(uint32_t), cudaMemcpyHostToDevice));
//allocate & transfer Binary Queries to GPU
HANDLE_ERROR(cudaMalloc((void**) &qry->d_queries, qry->totalQueriesEntries * sizeof(qryEntry_t)));
HANDLE_ERROR(cudaMemcpy(qry->d_queries, qry->h_queries, qry->totalQueriesEntries * sizeof(qryEntry_t), cudaMemcpyHostToDevice));
//allocate & transfer Candidates to GPU
HANDLE_ERROR(cudaMalloc((void**) &qry->d_candidates, qry->numCandidates * sizeof(candInfo_t)));
HANDLE_ERROR(cudaMemcpy(qry->d_candidates, qry->h_candidates, qry->numCandidates * sizeof(candInfo_t), cudaMemcpyHostToDevice));
//allocate Results
HANDLE_ERROR(cudaMalloc((void**) &res->d_results, res->numResults * sizeof(resEntry_t)));
HANDLE_ERROR(cudaMemset(res->d_results, 0, res->numResults * sizeof(resEntry_t)));
return (0);
}
extern "C"
int transferGPUtoCPU(void *results)
{
res_t *res = (res_t *) results;
HANDLE_ERROR(cudaMemcpy(res->h_results, res->d_results, res->numResults * sizeof(resEntry_t), cudaMemcpyDeviceToHost));
return (0);
}
extern "C"
int freeReferenceGPU(void *reference)
{
ref_t *ref = (ref_t *) reference;
if(ref->d_reference != NULL){
cudaFree(ref->d_reference);
ref->d_reference=NULL;
}
return(0);
}
extern "C"
int freeQueriesGPU(void *queries)
{
qry_t *qry = (qry_t *) queries;
if(qry->d_queries != NULL){
cudaFree(qry->d_queries);
qry->d_queries=NULL;
}
if(qry->d_candidates != NULL){
cudaFree(qry->d_candidates);
qry->d_candidates = NULL;
}
return(0);
}
extern "C"
int freeResultsGPU(void *results)
{
res_t *res = (res_t *) results;
if(res->d_results != NULL){
cudaFree(res->d_results);
res->d_results=NULL;
}
return(0);
}
|
335d6666f8b2c17f4264ee3b1a13adf1c3f848c9.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zaicc_chow_csr_s.cu normal z -> c, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#include "../include/magmasparse_c.h"
#include "../../include/magma.h"
// includes CUDA
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <cusparse_v2.h>
#include "sm_32_intrinsics.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 64
#define PRECISION_c
// every row is handled by one threadblock
__global__ void
magma_caic_csr_s_kernel( magma_int_t num_rows,
magma_int_t nnz,
const magmaFloatComplex * __restrict__ A_val,
magmaFloatComplex *val,
magma_index_t *rowptr,
magma_index_t *rowidx,
magma_index_t *colidx,
magmaFloatComplex *A2 ){
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x);// % nnz;
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex s, sp;
int il, iu, jl, ju;
if (k < nnz)
{
i = rowidx[k];
j = colidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1])
{
sp = zero;
jl = colidx[il];
ju = colidx[iu];
// avoid branching
sp = ( jl == ju ) ? val[il] * val[iu] : sp;
s = ( jl == ju ) ? s-sp : s;
il = ( jl <= ju ) ? il+1 : il;
iu = ( jl >= ju ) ? iu+1 : iu;
/*
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else
{
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
*/
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
// modify entry
if (i == j)
A2[k] = MAGMA_C_MAKE(sqrt(abs(MAGMA_C_REAL(s))), 0.0);
else
A2[k] = s / val[rowptr[j+1]-1];
}
}// kernel
/**
Purpose
-------
This routine computes the IC approximation of a matrix iteratively.
The idea is according to Edmond Chow's presentation at SIAM 2014.
The input format of the matrix is Magma_CSRCOO.
Arguments
---------
@param
A magma_c_sparse_matrix
input matrix A - initial guess (lower triangular)
@param
A_CSR magma_c_sparse_matrix
input/output matrix containing the IC approximation
@ingroup magmasparse_csygpuk
********************************************************************/
extern "C" magma_int_t
magma_caic_csr_s( magma_c_sparse_matrix A,
magma_c_sparse_matrix A_CSR ){
int blocksize1 = 1;
int blocksize2 = 1;
int dimgrid1 = ( A.nnz + blocksize1 -1 ) / blocksize1;
int dimgrid2 = 1;
int dimgrid3 = 1;
magma_c_vector A2;
// init DEV vectors
magma_c_vinit( &A2, Magma_DEV, A.nnz, MAGMA_C_ONE );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
hipLaunchKernelGGL(( magma_caic_csr_s_kernel), dim3(grid), dim3(block), 0, magma_stream ,
A.num_rows, A.nnz, A.val, A_CSR.val, A_CSR.row,
A_CSR.rowidx, A_CSR.col, A2.val );
magma_ccopy( A.nnz, A2.val, 1, A_CSR.val, 1 ); // rr = b
magma_c_vfree(&A2);
return MAGMA_SUCCESS;
}
| 335d6666f8b2c17f4264ee3b1a13adf1c3f848c9.cu | /*
-- MAGMA (version 1.5.0-beta3) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date July 2014
@generated from zaicc_chow_csr_s.cu normal z -> c, Fri Jul 18 17:34:28 2014
*/
#include "common_magma.h"
#include "../include/magmasparse_c.h"
#include "../../include/magma.h"
// includes CUDA
#include <cuda_runtime_api.h>
#include <cublas.h>
#include <cusparse_v2.h>
#include "sm_32_intrinsics.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 64
#define PRECISION_c
// every row is handled by one threadblock
__global__ void
magma_caic_csr_s_kernel( magma_int_t num_rows,
magma_int_t nnz,
const magmaFloatComplex * __restrict__ A_val,
magmaFloatComplex *val,
magma_index_t *rowptr,
magma_index_t *rowidx,
magma_index_t *colidx,
magmaFloatComplex *A2 ){
int i, j;
int k = (blockDim.x * blockIdx.x + threadIdx.x);// % nnz;
magmaFloatComplex zero = MAGMA_C_MAKE(0.0, 0.0);
magmaFloatComplex s, sp;
int il, iu, jl, ju;
if (k < nnz)
{
i = rowidx[k];
j = colidx[k];
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
s = __ldg( A_val+k );
#else
s = A_val[k];
#endif
il = rowptr[i];
iu = rowptr[j];
while (il < rowptr[i+1] && iu < rowptr[j+1])
{
sp = zero;
jl = colidx[il];
ju = colidx[iu];
// avoid branching
sp = ( jl == ju ) ? val[il] * val[iu] : sp;
s = ( jl == ju ) ? s-sp : s;
il = ( jl <= ju ) ? il+1 : il;
iu = ( jl >= ju ) ? iu+1 : iu;
/*
if (jl < ju)
il++;
else if (ju < jl)
iu++;
else
{
// we are going to modify this u entry
sp = val[il] * val[iu];
s -= sp;
il++;
iu++;
}
*/
}
// undo the last operation (it must be the last)
s += sp;
__syncthreads();
// modify entry
if (i == j)
A2[k] = MAGMA_C_MAKE(sqrt(abs(MAGMA_C_REAL(s))), 0.0);
else
A2[k] = s / val[rowptr[j+1]-1];
}
}// kernel
/**
Purpose
-------
This routine computes the IC approximation of a matrix iteratively.
The idea is according to Edmond Chow's presentation at SIAM 2014.
The input format of the matrix is Magma_CSRCOO.
Arguments
---------
@param
A magma_c_sparse_matrix
input matrix A - initial guess (lower triangular)
@param
A_CSR magma_c_sparse_matrix
input/output matrix containing the IC approximation
@ingroup magmasparse_csygpuk
********************************************************************/
extern "C" magma_int_t
magma_caic_csr_s( magma_c_sparse_matrix A,
magma_c_sparse_matrix A_CSR ){
int blocksize1 = 1;
int blocksize2 = 1;
int dimgrid1 = ( A.nnz + blocksize1 -1 ) / blocksize1;
int dimgrid2 = 1;
int dimgrid3 = 1;
magma_c_vector A2;
// init DEV vectors
magma_c_vinit( &A2, Magma_DEV, A.nnz, MAGMA_C_ONE );
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
magma_caic_csr_s_kernel<<< grid, block, 0, magma_stream >>>
( A.num_rows, A.nnz, A.val, A_CSR.val, A_CSR.row,
A_CSR.rowidx, A_CSR.col, A2.val );
magma_ccopy( A.nnz, A2.val, 1, A_CSR.val, 1 ); // rr = b
magma_c_vfree(&A2);
return MAGMA_SUCCESS;
}
|
9991e11c96cec03e8981a73627bd0ee6de57c333.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
static void HandleError(hipError_t err,
const char *file,
int line) {
if (err != hipSuccess) {
printf("%s in %s at line %d\n",
hipGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
int getThreadNum() {
hipDeviceProp_t prop;
int count;
HANDLE_ERROR(hipGetDeviceCount(&count));
printf("gpu num %d\n", count);
HANDLE_ERROR(hipGetDeviceProperties(&prop, 0));
printf("max thread num: %d\n", prop.maxThreadsPerBlock);
printf("max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv(float *img, float *kernel, float *result, int width, int height, int kernelSize) {
int ti = threadIdx.x;
int bi = blockIdx.x;
int id = ti + bi * blockDim.x;
int row = id / width;
int col = id % width;
if (id >= width * height) {
return;
}
for (int i = 0; i < kernelSize; ++i) {
for (int j = 0; j < kernelSize; ++j) {
float imgValue = 0;
int curRow = row - kernelSize / 2 + i;
int curCol = col - kernelSize / 2 + j;
if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
;
}
else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[i + kernelSize * j] * imgValue;
}
}
}
int main(int argc, char* argv[]) {
int width = 1920;
int height = 1080;
// int threadNum = getThreadNum();
float *img = new float[width * height];
float *result = new float[width * height];
for (int row = 0; row < height; ++row) {
for (int col=0; col < width; ++col) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 3;
float *kernel = new float[kernelSize * kernelSize];
for (int i = 0; i < kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float *imgGpu;
float *kernelGpu;
float *resultGpu;
HANDLE_ERROR(hipMalloc((void**)&imgGpu, width * height * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&kernelGpu, kernelSize * kernelSize * sizeof(float)));
HANDLE_ERROR(hipMalloc((void**)&resultGpu, width * height * sizeof(float)));
HANDLE_ERROR(hipMemcpy(imgGpu, img,
width * height * sizeof(float),
hipMemcpyHostToDevice));
HANDLE_ERROR(hipMemcpy(kernelGpu, kernel,
kernelSize * kernelSize * sizeof(float),
hipMemcpyHostToDevice));
int threadNum = getThreadNum();
printf("threadNum is: %d\n", threadNum);
int blockNum = (width * height - 0.5) / threadNum + 1;
hipLaunchKernelGGL(( conv), dim3(blockNum), dim3(threadNum), 0, 0, imgGpu, kernelGpu, resultGpu, width, height, kernelSize);
HANDLE_ERROR(hipMemcpy(result, resultGpu,
width * height * sizeof(float),
hipMemcpyDeviceToHost));
// visualization
printf("img\n");
for (int row = 0; row < 10; ++row) {
for (int col = 0; col < 10; ++col) {
printf("%2.0f ", img[col + row * width]);
}
printf("\n");
}
printf("kernel\n");
for (int row = 0; row < kernelSize; ++row) {
for (int col = 0; col < kernelSize; ++col) {
printf("%2.0f ", kernel[col + row * kernelSize]);
}
printf("\n");
}
printf("conv result\n");
for (int row = 0; row < 10; ++row) {
for (int col = 0; col < 10; ++col) {
printf("%2.0f ", result[col + row * width]);
}
printf("\n");
}
return 0;
}
| 9991e11c96cec03e8981a73627bd0ee6de57c333.cu | #include <stdio.h>
static void HandleError(cudaError_t err,
const char *file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n",
cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__))
int getThreadNum() {
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
printf("gpu num %d\n", count);
HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0));
printf("max thread num: %d\n", prop.maxThreadsPerBlock);
printf("max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv(float *img, float *kernel, float *result, int width, int height, int kernelSize) {
int ti = threadIdx.x;
int bi = blockIdx.x;
int id = ti + bi * blockDim.x;
int row = id / width;
int col = id % width;
if (id >= width * height) {
return;
}
for (int i = 0; i < kernelSize; ++i) {
for (int j = 0; j < kernelSize; ++j) {
float imgValue = 0;
int curRow = row - kernelSize / 2 + i;
int curCol = col - kernelSize / 2 + j;
if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
;
}
else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[i + kernelSize * j] * imgValue;
}
}
}
int main(int argc, char* argv[]) {
int width = 1920;
int height = 1080;
// int threadNum = getThreadNum();
float *img = new float[width * height];
float *result = new float[width * height];
for (int row = 0; row < height; ++row) {
for (int col=0; col < width; ++col) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 3;
float *kernel = new float[kernelSize * kernelSize];
for (int i = 0; i < kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float *imgGpu;
float *kernelGpu;
float *resultGpu;
HANDLE_ERROR(cudaMalloc((void**)&imgGpu, width * height * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&kernelGpu, kernelSize * kernelSize * sizeof(float)));
HANDLE_ERROR(cudaMalloc((void**)&resultGpu, width * height * sizeof(float)));
HANDLE_ERROR(cudaMemcpy(imgGpu, img,
width * height * sizeof(float),
cudaMemcpyHostToDevice));
HANDLE_ERROR(cudaMemcpy(kernelGpu, kernel,
kernelSize * kernelSize * sizeof(float),
cudaMemcpyHostToDevice));
int threadNum = getThreadNum();
printf("threadNum is: %d\n", threadNum);
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height, kernelSize);
HANDLE_ERROR(cudaMemcpy(result, resultGpu,
width * height * sizeof(float),
cudaMemcpyDeviceToHost));
// visualization
printf("img\n");
for (int row = 0; row < 10; ++row) {
for (int col = 0; col < 10; ++col) {
printf("%2.0f ", img[col + row * width]);
}
printf("\n");
}
printf("kernel\n");
for (int row = 0; row < kernelSize; ++row) {
for (int col = 0; col < kernelSize; ++col) {
printf("%2.0f ", kernel[col + row * kernelSize]);
}
printf("\n");
}
printf("conv result\n");
for (int row = 0; row < 10; ++row) {
for (int col = 0; col < 10; ++col) {
printf("%2.0f ", result[col + row * width]);
}
printf("\n");
}
return 0;
}
|
d66f9c5ee6f6f1c54ed77221719335b924046ee6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <math.h>
#include "dot.h"
__global__ void kernel(unsigned int rows, unsigned int cols , float* ddata,float* vdata ,float *results){
int i;
float dp =0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(i =0; i<cols ;i++ )
{
dp+= ddata[i*rows+tid]*vdata[i];
}
results[tid] = dp;
}
| d66f9c5ee6f6f1c54ed77221719335b924046ee6.cu | #include <stdio.h>
#include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include "dot.h"
__global__ void kernel(unsigned int rows, unsigned int cols , float* ddata,float* vdata ,float *results){
int i;
float dp =0;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for(i =0; i<cols ;i++ )
{
dp+= ddata[i*rows+tid]*vdata[i];
}
results[tid] = dp;
}
|
4d0595d9fba220acf83fb85b8a48bd4db2b5f1b5.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Simulacion simplificada de bombardeo de particulas de alta energia
*
* Computacion Paralela (Grado en Informatica)
* 2017/2018
*
* (c) 2018 Arturo Gonzalez Escribano
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cputils.h>
#define PI 3.14159f
#define UMBRAL 0.001f
/* Estructura para almacenar los datos de una tormenta de particulas */
typedef struct {
int size;
int *posval;
} Storm;
/* ESTA FUNCION PUEDE SER MODIFICADA */
/* Funcion para actualizar una posicion de la capa */
void actualiza( float *layer, int k, int pos, float energia ) {
/* 1. Calcular valor absoluto de la distancia entre el
punto de impacto y el punto k de la capa */
int distancia = pos - k;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= UMBRAL || energia_k <= -UMBRAL )
layer[k] = layer[k] + energia_k;
}
/* FUNCIONES AUXILIARES: No se utilizan dentro de la medida de tiempo, dejar como estan */
/* Funcion de DEBUG: Imprimir el estado de la capa */
void debug_print(int layer_size, float *layer, int *posiciones, float *maximos, int num_storms ) {
int i,k;
if ( layer_size <= 35 ) {
/* Recorrer capa */
for( k=0; k<layer_size; k++ ) {
/* Escribir valor del punto */
printf("%10.4f |", layer[k] );
/* Calcular el numero de caracteres normalizado con el maximo a 60 */
int ticks = (int)( 60 * layer[k] / maximos[num_storms-1] );
/* Escribir todos los caracteres menos el ultimo */
for (i=0; i<ticks-1; i++ ) printf("o");
/* Para maximos locales escribir ultimo caracter especial */
if ( k>0 && k<layer_size-1 && layer[k] > layer[k-1] && layer[k] > layer[k+1] )
printf("x");
else
printf("o");
/* Si el punto es uno de los maximos especiales, annadir marca */
for (i=0; i<num_storms; i++)
if ( posiciones[i] == k ) printf(" M%d", i );
/* Fin de linea */
printf("\n");
}
}
}
/*
* Funcion: Lectura de fichero con datos de tormenta de particulas
*/
Storm read_storm_file( char *fname ) {
FILE *fstorm = cp_abrir_fichero( fname );
if ( fstorm == NULL ) {
fprintf(stderr,"Error: Opening storm file %s\n", fname );
exit( EXIT_FAILURE );
}
Storm storm;
int ok = fscanf(fstorm, "%d", &(storm.size) );
if ( ok != 1 ) {
fprintf(stderr,"Error: Reading size of storm file %s\n", fname );
exit( EXIT_FAILURE );
}
storm.posval = (int *)malloc( sizeof(int) * storm.size * 2 );
if ( storm.posval == NULL ) {
fprintf(stderr,"Error: Allocating memory for storm file %s, with size %d\n", fname, storm.size );
exit( EXIT_FAILURE );
}
int elem;
for ( elem=0; elem<storm.size; elem++ ) {
ok = fscanf(fstorm, "%d %d\n",
&(storm.posval[elem*2]),
&(storm.posval[elem*2+1]) );
if ( ok != 2 ) {
fprintf(stderr,"Error: Reading element %d in storm file %s\n", elem, fname );
exit( EXIT_FAILURE );
}
}
fclose( fstorm );
return storm;
}
/*
* PROGRAMA PRINCIPAL
*/
int main(int argc, char *argv[]) {
int i,j,k;
/* 1.1. Leer argumentos */
if (argc<3) {
fprintf(stderr,"Usage: %s <size> <storm_1_file> [ <storm_i_file> ] ... \n", argv[0] );
exit( EXIT_FAILURE );
}
int layer_size = atoi( argv[1] );
int num_storms = argc-2;
Storm storms[ num_storms ];
/* 1.2. Leer datos de storms */
for( i=2; i<argc; i++ )
storms[i-2] = read_storm_file( argv[i] );
/* 1.3. Inicializar maximos a cero */
float maximos[ num_storms ];
int posiciones[ num_storms ];
for (i=0; i<num_storms; i++) {
maximos[i] = 0.0f;
posiciones[i] = 0;
}
/* 2. Inicia medida de tiempo */
hipSetDevice(0);
hipDeviceSynchronize();
double ttotal = cp_Wtime();
/* COMIENZO: No optimizar/paralelizar el main por encima de este punto */
/* 3. Reservar memoria para las capas e inicializar a cero */
float *layer = (float *)malloc( sizeof(float) * layer_size );
float *layer_copy = (float *)malloc( sizeof(float) * layer_size );
if ( layer == NULL || layer_copy == NULL ) {
fprintf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
for( k=0; k<layer_size; k++ ) layer[k] = 0.0f;
for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f;
/* 4. Fase de bombardeos */
for( i=0; i<num_storms; i++) {
printf("Valores de tormenta: %f %d\n",storms[i].posval[1],storms[i].posval[0]);
/* 4.1. Suma energia de impactos */
/* Para cada particula */
for( j=0; j<storms[i].size; j++ ) {
/* Energia de impacto (en milesimas) */
float energia = (float)storms[i].posval[j*2+1] / 1000;
/* Posicion de impacto */
int posicion = storms[i].posval[j*2];
/* Para cada posicion de la capa */
for( k=0; k<layer_size; k++ ) {
/* Actualizar posicion */
actualiza( layer, k, posicion, energia );
}
}
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
for( k=0; k<layer_size; k++ )
layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
for( k=1; k<layer_size-1; k++ )
layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
/* 4.3. Localizar maximo */
for( k=1; k<layer_size-1; k++ ) {
/* Comprobar solo maximos locales */
if ( layer[k] > layer[k-1] && layer[k] > layer[k+1] ) {
if ( layer[k] > maximos[i] ) {
maximos[i] = layer[k];
posiciones[i] = k;
}
}
}
}
/* FINAL: No optimizar/paralelizar por debajo de este punto */
/* 6. Final de medida de tiempo */
hipDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 7. DEBUG: Dibujar resultado (Solo para capas con hasta 35 puntos) */
#ifdef DEBUG
debug_print( layer_size, layer, posiciones, maximos, num_storms );
#endif
/* 8. Salida de resultados para tablon */
printf("\n");
/* 8.1. Tiempo total de la computacion */
printf("Time: %lf\n", ttotal );
/* 8.2. Escribir los maximos */
printf("Result:");
for (i=0; i<num_storms; i++)
printf(" %d %f", posiciones[i], maximos[i] );
printf("\n");
/* 9. Liberar recursos */
for( i=0; i<argc-2; i++ )
free( storms[i].posval );
/* 10. Final correcto */
return 0;
}
| 4d0595d9fba220acf83fb85b8a48bd4db2b5f1b5.cu | /*
* Simulacion simplificada de bombardeo de particulas de alta energia
*
* Computacion Paralela (Grado en Informatica)
* 2017/2018
*
* (c) 2018 Arturo Gonzalez Escribano
*/
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cputils.h>
#define PI 3.14159f
#define UMBRAL 0.001f
/* Estructura para almacenar los datos de una tormenta de particulas */
typedef struct {
int size;
int *posval;
} Storm;
/* ESTA FUNCION PUEDE SER MODIFICADA */
/* Funcion para actualizar una posicion de la capa */
void actualiza( float *layer, int k, int pos, float energia ) {
/* 1. Calcular valor absoluto de la distancia entre el
punto de impacto y el punto k de la capa */
int distancia = pos - k;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= UMBRAL || energia_k <= -UMBRAL )
layer[k] = layer[k] + energia_k;
}
/* FUNCIONES AUXILIARES: No se utilizan dentro de la medida de tiempo, dejar como estan */
/* Funcion de DEBUG: Imprimir el estado de la capa */
void debug_print(int layer_size, float *layer, int *posiciones, float *maximos, int num_storms ) {
int i,k;
if ( layer_size <= 35 ) {
/* Recorrer capa */
for( k=0; k<layer_size; k++ ) {
/* Escribir valor del punto */
printf("%10.4f |", layer[k] );
/* Calcular el numero de caracteres normalizado con el maximo a 60 */
int ticks = (int)( 60 * layer[k] / maximos[num_storms-1] );
/* Escribir todos los caracteres menos el ultimo */
for (i=0; i<ticks-1; i++ ) printf("o");
/* Para maximos locales escribir ultimo caracter especial */
if ( k>0 && k<layer_size-1 && layer[k] > layer[k-1] && layer[k] > layer[k+1] )
printf("x");
else
printf("o");
/* Si el punto es uno de los maximos especiales, annadir marca */
for (i=0; i<num_storms; i++)
if ( posiciones[i] == k ) printf(" M%d", i );
/* Fin de linea */
printf("\n");
}
}
}
/*
* Funcion: Lectura de fichero con datos de tormenta de particulas
*/
Storm read_storm_file( char *fname ) {
FILE *fstorm = cp_abrir_fichero( fname );
if ( fstorm == NULL ) {
fprintf(stderr,"Error: Opening storm file %s\n", fname );
exit( EXIT_FAILURE );
}
Storm storm;
int ok = fscanf(fstorm, "%d", &(storm.size) );
if ( ok != 1 ) {
fprintf(stderr,"Error: Reading size of storm file %s\n", fname );
exit( EXIT_FAILURE );
}
storm.posval = (int *)malloc( sizeof(int) * storm.size * 2 );
if ( storm.posval == NULL ) {
fprintf(stderr,"Error: Allocating memory for storm file %s, with size %d\n", fname, storm.size );
exit( EXIT_FAILURE );
}
int elem;
for ( elem=0; elem<storm.size; elem++ ) {
ok = fscanf(fstorm, "%d %d\n",
&(storm.posval[elem*2]),
&(storm.posval[elem*2+1]) );
if ( ok != 2 ) {
fprintf(stderr,"Error: Reading element %d in storm file %s\n", elem, fname );
exit( EXIT_FAILURE );
}
}
fclose( fstorm );
return storm;
}
/*
* PROGRAMA PRINCIPAL
*/
int main(int argc, char *argv[]) {
int i,j,k;
/* 1.1. Leer argumentos */
if (argc<3) {
fprintf(stderr,"Usage: %s <size> <storm_1_file> [ <storm_i_file> ] ... \n", argv[0] );
exit( EXIT_FAILURE );
}
int layer_size = atoi( argv[1] );
int num_storms = argc-2;
Storm storms[ num_storms ];
/* 1.2. Leer datos de storms */
for( i=2; i<argc; i++ )
storms[i-2] = read_storm_file( argv[i] );
/* 1.3. Inicializar maximos a cero */
float maximos[ num_storms ];
int posiciones[ num_storms ];
for (i=0; i<num_storms; i++) {
maximos[i] = 0.0f;
posiciones[i] = 0;
}
/* 2. Inicia medida de tiempo */
cudaSetDevice(0);
cudaDeviceSynchronize();
double ttotal = cp_Wtime();
/* COMIENZO: No optimizar/paralelizar el main por encima de este punto */
/* 3. Reservar memoria para las capas e inicializar a cero */
float *layer = (float *)malloc( sizeof(float) * layer_size );
float *layer_copy = (float *)malloc( sizeof(float) * layer_size );
if ( layer == NULL || layer_copy == NULL ) {
fprintf(stderr,"Error: Allocating the layer memory\n");
exit( EXIT_FAILURE );
}
for( k=0; k<layer_size; k++ ) layer[k] = 0.0f;
for( k=0; k<layer_size; k++ ) layer_copy[k] = 0.0f;
/* 4. Fase de bombardeos */
for( i=0; i<num_storms; i++) {
printf("Valores de tormenta: %f %d\n",storms[i].posval[1],storms[i].posval[0]);
/* 4.1. Suma energia de impactos */
/* Para cada particula */
for( j=0; j<storms[i].size; j++ ) {
/* Energia de impacto (en milesimas) */
float energia = (float)storms[i].posval[j*2+1] / 1000;
/* Posicion de impacto */
int posicion = storms[i].posval[j*2];
/* Para cada posicion de la capa */
for( k=0; k<layer_size; k++ ) {
/* Actualizar posicion */
actualiza( layer, k, posicion, energia );
}
}
/* 4.2. Relajacion entre tormentas de particulas */
/* 4.2.1. Copiar valores a capa auxiliar */
for( k=0; k<layer_size; k++ )
layer_copy[k] = layer[k];
/* 4.2.2. Actualizar capa, menos los extremos, usando valores del array auxiliar */
for( k=1; k<layer_size-1; k++ )
layer[k] = ( layer_copy[k-1] + layer_copy[k] + layer_copy[k+1] ) / 3;
/* 4.3. Localizar maximo */
for( k=1; k<layer_size-1; k++ ) {
/* Comprobar solo maximos locales */
if ( layer[k] > layer[k-1] && layer[k] > layer[k+1] ) {
if ( layer[k] > maximos[i] ) {
maximos[i] = layer[k];
posiciones[i] = k;
}
}
}
}
/* FINAL: No optimizar/paralelizar por debajo de este punto */
/* 6. Final de medida de tiempo */
cudaDeviceSynchronize();
ttotal = cp_Wtime() - ttotal;
/* 7. DEBUG: Dibujar resultado (Solo para capas con hasta 35 puntos) */
#ifdef DEBUG
debug_print( layer_size, layer, posiciones, maximos, num_storms );
#endif
/* 8. Salida de resultados para tablon */
printf("\n");
/* 8.1. Tiempo total de la computacion */
printf("Time: %lf\n", ttotal );
/* 8.2. Escribir los maximos */
printf("Result:");
for (i=0; i<num_storms; i++)
printf(" %d %f", posiciones[i], maximos[i] );
printf("\n");
/* 9. Liberar recursos */
for( i=0; i<argc-2; i++ )
free( storms[i].posval );
/* 10. Final correcto */
return 0;
}
|
280d93a8c0ac85575d169a49597e3c212c6365a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "GradientAverageKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *D = NULL;
hipMalloc(&D, XSIZE*YSIZE);
float4 *TD = NULL;
hipMalloc(&TD, XSIZE*YSIZE);
unsigned int *NEIGHBOR = NULL;
hipMalloc(&NEIGHBOR, XSIZE*YSIZE);
unsigned int *NBOFFSETS = NULL;
hipMalloc(&NBOFFSETS, XSIZE*YSIZE);
unsigned int *nNeighbors = NULL;
hipMalloc(&nNeighbors, XSIZE*YSIZE);
unsigned int nVertices = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
GradientAverageKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, D,TD,NEIGHBOR,NBOFFSETS,nNeighbors,nVertices);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
GradientAverageKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, D,TD,NEIGHBOR,NBOFFSETS,nNeighbors,nVertices);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
GradientAverageKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, D,TD,NEIGHBOR,NBOFFSETS,nNeighbors,nVertices);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 280d93a8c0ac85575d169a49597e3c212c6365a2.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "GradientAverageKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *D = NULL;
cudaMalloc(&D, XSIZE*YSIZE);
float4 *TD = NULL;
cudaMalloc(&TD, XSIZE*YSIZE);
unsigned int *NEIGHBOR = NULL;
cudaMalloc(&NEIGHBOR, XSIZE*YSIZE);
unsigned int *NBOFFSETS = NULL;
cudaMalloc(&NBOFFSETS, XSIZE*YSIZE);
unsigned int *nNeighbors = NULL;
cudaMalloc(&nNeighbors, XSIZE*YSIZE);
unsigned int nVertices = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
GradientAverageKernel<<<gridBlock,threadBlock>>>(D,TD,NEIGHBOR,NBOFFSETS,nNeighbors,nVertices);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
GradientAverageKernel<<<gridBlock,threadBlock>>>(D,TD,NEIGHBOR,NBOFFSETS,nNeighbors,nVertices);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
GradientAverageKernel<<<gridBlock,threadBlock>>>(D,TD,NEIGHBOR,NBOFFSETS,nNeighbors,nVertices);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
855fbe9d34ce321de333738424c2e419a458557f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_calc_gjL_2( int layer_id, int *l, int *s_ext, int *sw_ext, float *z_ext_arr, float *a_ext_arr, float *t_arr, float *gjl_ext, float *w_ext_arr ){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx = threadIdx.y;
int lidx = threadIdx.x;
extern __shared__ int sm[];
float *sm_g = (float*)&sm[0];
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count-1) return;
float sum = 0;
for (int k = lidx; k < neuron_count_next-1; k+=h) {
sum += w_ext_arr[sw_ext[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl_ext[s_ext[layer_id + 1] + k];
}
sm_g[pidx*h + lidx] = sum;
__syncthreads();
if(lidx == 0){
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv = expf(-z) / (tmp*tmp);
sum = 0;
for(int i = 0; i < h; i++)
sum += sm_g[pidx*h + i];
gjl_ext[s_ext[layer_id] + idx] = f_deriv*sum;
}
} | 855fbe9d34ce321de333738424c2e419a458557f.cu | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_calc_gjL_2( int layer_id, int *l, int *s_ext, int *sw_ext, float *z_ext_arr, float *a_ext_arr, float *t_arr, float *gjl_ext, float *w_ext_arr ){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx = threadIdx.y;
int lidx = threadIdx.x;
extern __shared__ int sm[];
float *sm_g = (float*)&sm[0];
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count-1) return;
float sum = 0;
for (int k = lidx; k < neuron_count_next-1; k+=h) {
sum += w_ext_arr[sw_ext[layer_id] + idx*(l[layer_id + 1] - 1) + k] * gjl_ext[s_ext[layer_id + 1] + k];
}
sm_g[pidx*h + lidx] = sum;
__syncthreads();
if(lidx == 0){
float z = z_ext_arr[s_ext[layer_id] + idx];
float tmp = 1 + expf(-z);
float f_deriv = expf(-z) / (tmp*tmp);
sum = 0;
for(int i = 0; i < h; i++)
sum += sm_g[pidx*h + i];
gjl_ext[s_ext[layer_id] + idx] = f_deriv*sum;
}
} |
7f626d3d2d98dedfe2882a50eaaf142748b6aa7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/phi/kernels/funcs/norm_utils.h"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/kernels/gpu/batch_norm_utils.h"
#ifdef __HIPCC__
#define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim)
#else
#define LAUNCH_BOUNDS(BlockDim)
#endif
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace phi {
template <typename T>
using CudnnDataType = paddle::platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy,
const T *x,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
const double epsilon,
const int N,
const int C,
const int HxW,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, phi::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon,
const int C,
const int HxW,
const int num,
T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y,
int grid2,
const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
hipLaunchKernelGGL(( KeBNRestoreData), dim3(grid2), dim3(block), 0, stream,
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, hipcub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, hipcub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, hipcub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C,
const int N,
const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef hipcub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, hipcub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, hipcub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T, typename Context>
void BatchNormGradRawKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
bool is_inplace,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const auto *d_y = &y_grad;
auto *d_x = x_grad;
auto *d_scale = scale_grad;
auto *d_bias = bias_grad;
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(),
x_dims));
int N, C, H, W, D;
phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
if (d_x) {
ctx.template Alloc<T>(d_x);
}
if (d_scale && d_bias) {
ctx.template Alloc<BatchNormParamType<T>>(d_scale);
ctx.template Alloc<BatchNormParamType<T>>(d_bias);
}
PADDLE_ENFORCE_EQ(
scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale.dims().size(),
scale.dims()));
PADDLE_ENFORCE_EQ(
scale.dims()[0],
C,
phi::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
scale.dims()[0]));
auto dtype = paddle::platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format =
data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF &&
FLAGS_cudnn_batchnorm_spatial_persistent &&
(reserve_space.get_ptr() != nullptr);
auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
DenseTensor transformed_x(x.type());
DenseTensor transformed_d_y(d_y->type());
DenseTensor transformed_d_x;
if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW &&
x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x);
ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
if (d_x) {
ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x);
}
} else {
transformed_x.ShareDataWith(x);
transformed_d_y.ShareDataWith(*d_y);
if (d_x) {
transformed_d_x.ShareDataWith(*d_x);
}
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = ::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = ::min(C, max_blocks);
auto stream = ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
if (d_x) {
paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
}
phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor;
functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(
&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = ::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_,
CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4,
dims.data(),
strides.data()));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_, mode_));
#endif
const auto *saved_mean_data =
saved_mean.template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_variance.template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format,
transformed_x.data<T>(),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
epsilon,
C,
H * W * D,
num,
transformed_x.data<T>(),
grid2,
block,
stream);
}
// This branch calls CUDNN APIs
if (d_x && d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
DenseTensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_tensor.Resize({static_cast<int64_t>(workspace_size)});
workspace_ptr =
static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/ctx.template Alloc<T>(&transformed_d_x),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale.template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/ctx.template Alloc<BatchNormParamType<T>>(
d_scale),
/*dBnBiasData=*/ctx.template Alloc<BatchNormParamType<T>>(d_bias),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<uint8_t *>(
reserve_space->template data<uint8_t>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
hipLaunchKernelGGL(( BNBackward<T,
block,
DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
} else {
hipLaunchKernelGGL(( BNBackward<T,
block,
DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, ctx.stream(),
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackward(
ctx.cudnn_handle(),
mode_,
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
data_desc_,
transformed_x.template data<T>(),
data_desc_,
transformed_d_y.template data<T>(),
data_desc_,
ctx.template Alloc<T>(&transformed_d_x),
bn_param_desc_,
scale.template data<BatchNormParamType<T>>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias),
epsilon,
saved_mean_data,
saved_var_data));
#endif
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<
T,
block,
phi::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( BNBackwardData<
T,
block,
phi::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, ctx.stream(),
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(
bn_param_desc_));
#endif
} else {
const auto *running_mean = mean.get_ptr();
const auto *running_var = variance.get_ptr();
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = x;
inplace_functor(data_layout,
ctx.template Alloc<T>(&px),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
running_mean_data,
running_var_data,
epsilon,
C,
H * W * D,
num,
x.data<T>(),
grid2,
block,
stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T,
phi::DataLayout::kNCHW>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
hipLaunchKernelGGL(( KeBNBackwardData<T,
phi::DataLayout::kNHWC>), dim3(grid1), dim3(block), 0, stream,
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
hipLaunchKernelGGL(( KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC>), dim3(grid2), dim3(block), 0, stream,
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
template <typename T, typename Context>
void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon,
const std::string &data_layout,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
BatchNormGradRawKernel<T, Context>(dev_ctx,
x,
scale,
bias,
mean,
variance,
saved_mean,
saved_variance,
reserve_space,
y_grad,
momentum,
epsilon,
data_layout,
is_test,
use_global_stats,
trainable_statistics,
fuse_with_relu,
false,
x_grad,
scale_grad,
bias_grad);
}
template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const DenseTensor &y_grad,
const DenseTensor &x_grad_grad,
const DenseTensor &scale_grad_grad,
const DenseTensor &bias_grad_grad,
float momentum,
float epsilon,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
if (use_global_stats) {
running_mean = mean.get_ptr();
running_variance = variance.get_ptr();
}
paddle::operators::NormDoubleGradFunctor<Context, T>(ctx,
data_layout,
&x,
&scale,
&y_grad,
&saved_mean,
&saved_variance,
running_mean,
running_variance,
epsilon,
use_global_stats,
&x_grad_grad,
&scale_grad_grad,
&bias_grad_grad,
x_grad,
scale_grad,
y_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
phi::dtype::float16) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
#endif
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#endif
| 7f626d3d2d98dedfe2882a50eaaf142748b6aa7d.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/batch_norm_kernel.h"
#include "paddle/phi/kernels/funcs/eigen/common.h"
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
#include "paddle/fluid/operators/norm_utils.cu.h"
#include "paddle/phi/kernels/funcs/norm_utils.h"
#include "paddle/fluid/framework/data_layout.h"
#include "paddle/fluid/operators/layout_utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/fluid/platform/flags.h"
#include "paddle/phi/kernels/gpu/batch_norm_utils.h"
#ifdef __HIPCC__
#define LAUNCH_BOUNDS(BlockDim) __launch_bounds__(BlockDim)
#else
#define LAUNCH_BOUNDS(BlockDim)
#endif
DECLARE_bool(cudnn_batchnorm_spatial_persistent);
namespace phi {
template <typename T>
using CudnnDataType = paddle::platform::CudnnDataType<T>;
template <typename T>
using BatchNormParamType = typename CudnnDataType<T>::BatchNormParamType;
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void KeBNBackwardScaleBias(
const T *dy,
const T *x,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
const double epsilon,
const int N,
const int C,
const int HxW,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> inv_var_i = 1.0 / sqrt(variance[i] + epsilon);
BatchNormParamType<T> mean_i = mean[i];
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
ds_sum += static_cast<BatchNormParamType<T>>(dy[index]) *
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
db_sum += static_cast<BatchNormParamType<T>>(dy[index]);
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale[i] = ds_sum * inv_var_i;
dbias[i] = db_sum;
}
__syncthreads();
}
}
template <typename T, phi::DataLayout layout>
static __global__ void KeBNBackwardData(const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *variance,
const double epsilon,
const int C,
const int HxW,
const int num,
T *dx) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? i / HxW % C : i % C;
BatchNormParamType<T> inv_var = 1.0 / sqrt(variance[c] + epsilon);
dx[i] = static_cast<T>(static_cast<BatchNormParamType<T>>(dy[i]) *
scale[c] * inv_var);
}
}
template <typename T>
static __global__ void KeBNRestoreData(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = gid; i < num; i += stride) {
const int c = layout == phi::DataLayout::kNCHW ? (i / M) % C : i % C;
auto y_i = static_cast<BatchNormParamType<T>>(y[i]);
auto x_i = (y_i - bias[c]) / scale[c] / variance[c] + mean[c];
x[i] = static_cast<T>(x_i);
}
}
template <typename T>
class InplaceHelper {
public:
void operator()(const phi::DataLayout layout,
T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *bias,
const BatchNormParamType<T> *mean,
const BatchNormParamType<T> *variance,
double epsilon,
int C,
int M,
const int num,
const T *y,
int grid2,
const int block,
const gpuStream_t &stream) {
PADDLE_ENFORCE_EQ(x,
y,
phi::errors::InvalidArgument(
"X and Y should be inplaced in inplace mode"));
KeBNRestoreData<<<grid2, block, 0, stream>>>(
layout, x, scale, bias, mean, variance, epsilon, C, M, num, y);
}
};
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackward(
const T *dy,
const T *x,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *saved_mean,
const BatchNormParamType<T> *saved_inv_variance,
const int C,
const int N,
const int HxW,
const double epsilon,
T *dx,
BatchNormParamType<T> *dscale,
BatchNormParamType<T> *dbias) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage ds_storage;
__shared__ typename BlockReduce::TempStorage db_storage;
__shared__ typename BlockReduce::TempStorage mean_storage;
__shared__ typename BlockReduce::TempStorage variance_storeage;
__shared__ BatchNormParamType<T> inv_var_val;
__shared__ BatchNormParamType<T> mean_val;
__shared__ BatchNormParamType<T> dscale_val;
__shared__ BatchNormParamType<T> dbias_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> ds_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> db_sum = static_cast<BatchNormParamType<T>>(0);
if (saved_mean && saved_inv_variance) {
if (threadIdx.x == 0) {
inv_var_val = saved_inv_variance[i];
mean_val = saved_mean[i];
}
} else {
BatchNormParamType<T> x_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> x_square_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> x_i =
static_cast<BatchNormParamType<T>>(x[index]);
x_sum += x_i;
x_square_sum += x_i * x_i;
}
x_sum = BlockReduce(mean_storage).Reduce(x_sum, cub::Sum());
x_square_sum =
BlockReduce(variance_storeage).Reduce(x_square_sum, cub::Sum());
if (threadIdx.x == 0) {
mean_val = x_sum / inner_size;
inv_var_val =
1 / sqrt(x_square_sum / inner_size - mean_val * mean_val + epsilon);
}
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
ds_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_val);
db_sum += dy_i;
}
ds_sum = BlockReduce(ds_storage).Reduce(ds_sum, cub::Sum());
db_sum = BlockReduce(db_storage).Reduce(db_sum, cub::Sum());
if (threadIdx.x == 0) {
dscale_val = ds_sum * inv_var_val;
dbias_val = db_sum;
dscale[i] = dscale_val;
dbias[i] = dbias_val;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] = scale[i] * inv_var_val *
(static_cast<BatchNormParamType<T>>(dy[index]) -
dbias_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_val) *
inv_var_val * dscale_val / inner_size);
}
}
}
template <typename T, int BlockDim, phi::DataLayout layout>
static __global__ LAUNCH_BOUNDS(BlockDim) void BNBackwardData(
const T *dy,
const BatchNormParamType<T> *scale,
const BatchNormParamType<T> *mean,
const T *x,
const BatchNormParamType<T> *variance,
const int C,
const int N,
const int HxW,
T *dx) {
const int outer_size = C;
const int inner_size = N * HxW;
typedef cub::BlockReduce<BatchNormParamType<T>, BlockDim> BlockReduce;
__shared__ typename BlockReduce::TempStorage dy_storage;
__shared__ typename BlockReduce::TempStorage dy_x_sub_mean_storage;
__shared__ BatchNormParamType<T> dy_sum_val;
__shared__ BatchNormParamType<T> dy_x_sub_mean_sum_val;
for (int i = blockIdx.x; i < outer_size; i += gridDim.x) {
BatchNormParamType<T> inv_var_i = variance[i];
BatchNormParamType<T> mean_i = mean[i];
BatchNormParamType<T> dy_sum = static_cast<BatchNormParamType<T>>(0);
BatchNormParamType<T> dy_x_sub_mean_sum =
static_cast<BatchNormParamType<T>>(0);
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
BatchNormParamType<T> dy_i =
static_cast<BatchNormParamType<T>>(dy[index]);
dy_sum += dy_i;
dy_x_sub_mean_sum +=
dy_i * (static_cast<BatchNormParamType<T>>(x[index]) - mean_i);
}
dy_sum = BlockReduce(dy_storage).Reduce(dy_sum, cub::Sum());
dy_x_sub_mean_sum = BlockReduce(dy_x_sub_mean_storage)
.Reduce(dy_x_sub_mean_sum, cub::Sum());
if (threadIdx.x == 0) {
dy_sum_val = dy_sum;
dy_x_sub_mean_sum_val = dy_x_sub_mean_sum;
}
__syncthreads();
for (int j = threadIdx.x; j < inner_size; j += blockDim.x) {
const int index = layout == phi::DataLayout::kNCHW
? (j / HxW * C + i) * HxW + j % HxW
: j * outer_size + i;
dx[index] =
(static_cast<BatchNormParamType<T>>(dy[index]) -
dy_sum_val / static_cast<BatchNormParamType<T>>(inner_size) -
(static_cast<BatchNormParamType<T>>(x[index]) - mean_i) *
dy_x_sub_mean_sum_val * inv_var_i * inv_var_i / inner_size) *
scale[i] * inv_var_i;
}
}
}
template <typename T, typename Context>
void BatchNormGradRawKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon_f,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
bool is_inplace,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
double epsilon = static_cast<double>(epsilon_f);
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const auto *d_y = &y_grad;
auto *d_x = x_grad;
auto *d_scale = scale_grad;
auto *d_bias = bias_grad;
use_global_stats = is_test || use_global_stats;
const auto &x_dims = x.dims();
PADDLE_ENFORCE_EQ(
x_dims.size() >= 2 && x_dims.size() <= 5,
true,
phi::errors::InvalidArgument(
"The size of input's dimensions should be between 2 and 5."
"But received: the size of input's dimensions is [%d],"
"the dimensions of input is [%s]",
x_dims.size(),
x_dims));
int N, C, H, W, D;
phi::funcs::ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D);
// init output
if (d_x) {
ctx.template Alloc<T>(d_x);
}
if (d_scale && d_bias) {
ctx.template Alloc<BatchNormParamType<T>>(d_scale);
ctx.template Alloc<BatchNormParamType<T>>(d_bias);
}
PADDLE_ENFORCE_EQ(
scale.dims().size(),
1UL,
phi::errors::InvalidArgument(
"The size of scale's dimensions must equal to 1. But received: "
"the size of scale's dimensions is [%d], the dimensions of scale "
"is [%s].",
scale.dims().size(),
scale.dims()));
PADDLE_ENFORCE_EQ(
scale.dims()[0],
C,
phi::errors::InvalidArgument(
"The first dimension of scale must equal to Channels[%d]. But "
"received: the first dimension of scale is [%d]",
C,
scale.dims()[0]));
auto dtype = paddle::platform::CudnnDataType<T>::type;
#ifdef PADDLE_WITH_HIP
auto compute_format =
data_layout == DataLayout::kNHWC ? DataLayout::kNHWC : DataLayout::kNCHW;
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// HIP do not support compute format of NHWC
// auto compute_format = DataLayout::kNCHW;
#else
const bool fast_nhwc_batch_norm = dtype == CUDNN_DATA_HALF &&
FLAGS_cudnn_batchnorm_spatial_persistent &&
(reserve_space.get_ptr() != nullptr);
auto compute_format = fast_nhwc_batch_norm && data_layout == DataLayout::kNHWC
? DataLayout::kNHWC
: DataLayout::kNCHW;
#endif
DenseTensor transformed_x(x.type());
DenseTensor transformed_d_y(d_y->type());
DenseTensor transformed_d_x;
if (data_layout == DataLayout::kNHWC && compute_format == DataLayout::kNCHW &&
x_dims.size() > 2) {
VLOG(3) << "Transform input tensor from NHWC to NCHW.";
ResizeToChannelFirst<Context, T>(ctx, &x, &transformed_x);
TransToChannelFirst<Context, T>(ctx, &x, &transformed_x);
ResizeToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
TransToChannelFirst<Context, T>(ctx, d_y, &transformed_d_y);
if (d_x) {
ResizeToChannelFirst<Context, T>(ctx, d_x, &transformed_d_x);
}
} else {
transformed_x.ShareDataWith(x);
transformed_d_y.ShareDataWith(*d_y);
if (d_x) {
transformed_d_x.ShareDataWith(*d_x);
}
}
std::vector<int> dims;
std::vector<int> strides;
if (compute_format == DataLayout::kNCHW) {
dims = {N, C, H, W, D};
strides = {C * H * W * D, H * W * D, W * D, D, 1};
} else {
dims = {N, C, H, W, D};
strides = {H * W * C * D, 1, W * D * C, D * C, C};
}
const int num = transformed_x.numel();
#ifdef HIPCC
const int block = 256;
#else
const int block = 512;
#endif
int max_threads = ctx.GetMaxPhysicalThreadCount();
const int max_blocks = std::max(max_threads / block, 1);
int grid1 = (num + block - 1) / block;
int grid2 = std::min(C, max_blocks);
auto stream = ctx.stream();
InplaceHelper<T> inplace_functor;
if (!use_global_stats) {
if ((N * H * W * D) == 1) {
if (d_x) {
paddle::framework::TensorCopy(*d_y, ctx.GetPlace(), d_x);
}
phi::funcs::SetConstant<Context, BatchNormParamType<T>> functor;
functor(ctx, d_scale, static_cast<BatchNormParamType<T>>(0));
functor(ctx, d_bias, static_cast<BatchNormParamType<T>>(0));
return;
}
// ------------------- cudnn descriptors ---------------------
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// miopenTensorDescriptor_t data_desc_;
// miopenTensorDescriptor_t bn_param_desc_;
// miopenBatchNormMode_t mode_;
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenCreateTensorDescriptor(&bn_param_desc_));
#else
cudnnTensorDescriptor_t data_desc_;
cudnnTensorDescriptor_t bn_param_desc_;
cudnnBatchNormMode_t mode_;
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(&data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnCreateTensorDescriptor(
&bn_param_desc_));
#endif
if (epsilon <= CUDNN_BN_MIN_EPSILON - FLT_EPSILON) {
LOG(ERROR) << "Provided epsilon is smaller than "
<< "CUDNN_BN_MIN_EPSILON. Setting it to "
<< "CUDNN_BN_MIN_EPSILON instead.";
}
epsilon = std::max(epsilon, CUDNN_BN_MIN_EPSILON);
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// mode_ = miopenBNSpatial;
#elif CUDNN_VERSION_MIN(7, 0, 1)
if (FLAGS_cudnn_batchnorm_spatial_persistent) {
mode_ = CUDNN_BATCHNORM_SPATIAL_PERSISTENT;
} else if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#else
if (H == 1 && W == 1) {
mode_ = CUDNN_BATCHNORM_PER_ACTIVATION;
} else {
mode_ = CUDNN_BATCHNORM_SPATIAL;
}
#endif // CUDNN_VERSION_MIN(7, 0, 1)
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(platform::dynload::miopenSetTensorDescriptor(
// data_desc_, CudnnDataType<T>::type,
// x_dims.size() > 3 ? x_dims.size() : 4, const_cast<int *>(dims.data()),
// const_cast<int *>(strides.data())));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDeriveBNTensorDescriptor(bn_param_desc_,
// data_desc_, mode_));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnSetTensorNdDescriptor(
data_desc_,
CudnnDataType<T>::type,
x_dims.size() > 3 ? x_dims.size() : 4,
dims.data(),
strides.data()));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDeriveBNTensorDescriptor(
bn_param_desc_, data_desc_, mode_));
#endif
const auto *saved_mean_data =
saved_mean.template data<BatchNormParamType<T>>();
const auto *saved_var_data =
saved_variance.template data<BatchNormParamType<T>>();
if (is_inplace) {
inplace_functor(compute_format,
transformed_x.data<T>(),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
epsilon,
C,
H * W * D,
num,
transformed_x.data<T>(),
grid2,
block,
stream);
}
// This branch calls CUDNN APIs
if (d_x && d_scale && d_bias) {
bool called = false;
#if CUDNN_VERSION_MIN(7, 4, 1)
called = true;
size_t workspace_size = 0;
void *workspace_ptr = nullptr;
DenseTensor workspace_tensor;
auto reserve_space_size = reserve_space->memory_size();
// --------------- cudnn batchnorm workspace ---------------
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::
cudnnGetBatchNormalizationBackwardExWorkspaceSize(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnIps=*/CUDNN_BATCHNORM_OPS_BN,
/*xDesc=*/data_desc_,
/*yDesc=*/data_desc_,
/*dyDesc=*/data_desc_,
/*dzDesc=*/nullptr,
/*dxDesc=*/data_desc_,
/*bnScaleBiasMeanVarDesc=*/bn_param_desc_,
/*activationDesc=*/nullptr,
/*sizeInBytes=*/&workspace_size));
workspace_tensor.Resize({static_cast<int64_t>(workspace_size)});
workspace_ptr =
static_cast<void *>(ctx.template Alloc<uint8_t>(&workspace_tensor));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackwardEx(
/*handle=*/ctx.cudnn_handle(),
/*mode=*/mode_,
/*bnOps=*/CUDNN_BATCHNORM_OPS_BN,
/*alphaDataDiff=*/CudnnDataType<T>::kOne(),
/*betaDataDiff=*/CudnnDataType<T>::kZero(),
/*alphaParamDiff=*/CudnnDataType<T>::kOne(),
/*betaParamDiff=*/CudnnDataType<T>::kZero(),
/*xDesc=*/data_desc_,
/*xData=*/transformed_x.template data<T>(),
/*yDesc=*/nullptr,
/*yData=*/nullptr,
/*dyDesc=*/data_desc_,
/*dyData=*/transformed_d_y.template data<T>(),
/*dzDesc=*/nullptr,
/*dzData=*/nullptr,
/*dxDesc=*/data_desc_,
/*dxData=*/ctx.template Alloc<T>(&transformed_d_x),
/*dBnScaleBiasDesc=*/bn_param_desc_,
/*bnScaleData=*/scale.template data<BatchNormParamType<T>>(),
/*bnBiasData=*/nullptr,
/*dBnScaleData=*/ctx.template Alloc<BatchNormParamType<T>>(
d_scale),
/*dBnBiasData=*/ctx.template Alloc<BatchNormParamType<T>>(d_bias),
/*epsilon=*/epsilon,
/*savedMean=*/saved_mean_data,
/*savedInvVariance=*/saved_var_data,
/*activationDesc=*/nullptr,
/*workspace=*/workspace_ptr,
/*workSpaceSizeInBytes=*/workspace_size,
/*reserveSpace=*/const_cast<uint8_t *>(
reserve_space->template data<uint8_t>()),
/*reserveSpaceSizeInBytes=*/reserve_space_size));
#endif // CUDNN_VERSION_MIN(7, 4, 1)
if (!called) {
#ifdef PADDLE_WITH_HIP
if (compute_format == DataLayout::kNCHW) {
BNBackward<T,
block,
DataLayout::kNCHW><<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
} else {
BNBackward<T,
block,
DataLayout::kNHWC><<<grid2, block, 0, ctx.stream()>>>(
transformed_d_y.template data<T>(),
transformed_x.template data<T>(),
scale.template data<BatchNormParamType<T>>(),
saved_mean_data,
saved_var_data,
C,
N,
H * W * D,
epsilon,
transformed_d_x.template data<T>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias));
}
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenBatchNormalizationBackward(
// dev_ctx.cudnn_handle(), mode_, CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), CudnnDataType<T>::kOne(),
// CudnnDataType<T>::kZero(), data_desc_,
// transformed_x.template data<T>(), data_desc_,
// transformed_d_y.template data<T>(), data_desc_,
// transformed_d_x.template mutable_data<T>(ctx.GetPlace()),
// bn_param_desc_, scale->template data<BatchNormParamType<T>>(),
// d_scale->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// d_bias->template mutable_data<BatchNormParamType<T>>(
// ctx.GetPlace()),
// epsilon, saved_mean_data, saved_var_data));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnBatchNormalizationBackward(
ctx.cudnn_handle(),
mode_,
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
CudnnDataType<T>::kOne(),
CudnnDataType<T>::kZero(),
data_desc_,
transformed_x.template data<T>(),
data_desc_,
transformed_d_y.template data<T>(),
data_desc_,
ctx.template Alloc<T>(&transformed_d_x),
bn_param_desc_,
scale.template data<BatchNormParamType<T>>(),
ctx.template Alloc<BatchNormParamType<T>>(d_scale),
ctx.template Alloc<BatchNormParamType<T>>(d_bias),
epsilon,
saved_mean_data,
saved_var_data));
#endif
}
if (data_layout == DataLayout::kNHWC &&
compute_format == DataLayout::kNCHW) {
VLOG(3) << "Transform batchnorm output from NCHW to NHWC";
TransToChannelLast<Context, T>(ctx, &transformed_d_x, d_x);
}
} else {
// This branch call CUDA kernels
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
BNBackwardData<
T,
block,
phi::DataLayout::kNCHW><<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
BNBackwardData<
T,
block,
phi::DataLayout::kNHWC><<<grid2, block, 0, ctx.stream()>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
saved_mean_data,
x.data<T>(),
saved_var_data,
C,
N,
H * W * D,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
saved_mean_data,
saved_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
#ifdef PADDLE_WITH_HIP
// TODO(wangran16): wait for MIOpen to improve the performance of BN
// clean when exit.
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(data_desc_));
// PADDLE_ENFORCE_GPU_SUCCESS(
// platform::dynload::miopenDestroyTensorDescriptor(bn_param_desc_));
#else
// clean when exit.
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(data_desc_));
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnDestroyTensorDescriptor(
bn_param_desc_));
#endif
} else {
const auto *running_mean = mean.get_ptr();
const auto *running_var = variance.get_ptr();
const auto *running_mean_data =
running_mean->template data<BatchNormParamType<T>>();
const auto *running_var_data =
running_var->template data<BatchNormParamType<T>>();
if (is_inplace) {
auto px = x;
inplace_functor(data_layout,
ctx.template Alloc<T>(&px),
scale.template data<BatchNormParamType<T>>(),
bias.template data<BatchNormParamType<T>>(),
running_mean_data,
running_var_data,
epsilon,
C,
H * W * D,
num,
x.data<T>(),
grid2,
block,
stream);
}
if (compute_format == DataLayout::kNCHW) {
if (d_x) {
KeBNBackwardData<T,
phi::DataLayout::kNCHW><<<grid1, block, 0, stream>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNCHW><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
} else {
if (d_x) {
KeBNBackwardData<T,
phi::DataLayout::kNHWC><<<grid1, block, 0, stream>>>(
d_y->data<T>(),
scale.data<BatchNormParamType<T>>(),
running_var_data,
epsilon,
C,
H * W,
num,
d_x->data<T>());
}
if (d_scale && d_bias) {
KeBNBackwardScaleBias<
T,
block,
phi::DataLayout::kNHWC><<<grid2, block, 0, stream>>>(
d_y->data<T>(),
x.data<T>(),
running_mean_data,
running_var_data,
epsilon,
N,
C,
H * W * D,
d_scale->data<BatchNormParamType<T>>(),
d_bias->data<BatchNormParamType<T>>());
}
}
}
}
template <typename T, typename Context>
void BatchNormGradKernel(const Context &dev_ctx,
const DenseTensor &x,
const DenseTensor &scale,
const DenseTensor &bias,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const paddle::optional<DenseTensor> &reserve_space,
const DenseTensor &y_grad,
float momentum,
float epsilon,
const std::string &data_layout,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *bias_grad) {
BatchNormGradRawKernel<T, Context>(dev_ctx,
x,
scale,
bias,
mean,
variance,
saved_mean,
saved_variance,
reserve_space,
y_grad,
momentum,
epsilon,
data_layout,
is_test,
use_global_stats,
trainable_statistics,
fuse_with_relu,
false,
x_grad,
scale_grad,
bias_grad);
}
template <typename T, typename Context>
void BatchNormDoubleGradKernel(const Context &ctx,
const DenseTensor &x,
const DenseTensor &scale,
const paddle::optional<DenseTensor> &mean,
const paddle::optional<DenseTensor> &variance,
const DenseTensor &saved_mean,
const DenseTensor &saved_variance,
const DenseTensor &y_grad,
const DenseTensor &x_grad_grad,
const DenseTensor &scale_grad_grad,
const DenseTensor &bias_grad_grad,
float momentum,
float epsilon,
const std::string &data_layout_str,
bool is_test,
bool use_global_stats,
bool trainable_statistics,
bool fuse_with_relu,
DenseTensor *x_grad,
DenseTensor *scale_grad,
DenseTensor *y_grad_grad) {
PADDLE_ENFORCE_EQ(is_test,
false,
phi::errors::InvalidArgument(
"`is_test = True` CANNOT be used in train program. If "
"you want to use global status in pre_train model, "
"please set `use_global_stats = True`"));
const DataLayout data_layout =
paddle::framework::StringToDataLayout(data_layout_str);
const DenseTensor *running_mean = nullptr;
const DenseTensor *running_variance = nullptr;
if (use_global_stats) {
running_mean = mean.get_ptr();
running_variance = variance.get_ptr();
}
paddle::operators::NormDoubleGradFunctor<Context, T>(ctx,
data_layout,
&x,
&scale,
&y_grad,
&saved_mean,
&saved_variance,
running_mean,
running_variance,
epsilon,
use_global_stats,
&x_grad_grad,
&scale_grad_grad,
&bias_grad_grad,
x_grad,
scale_grad,
y_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
phi::dtype::float16) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormGradKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
PD_REGISTER_KERNEL(batch_norm_grad_raw,
GPU,
ALL_LAYOUT,
phi::BatchNormGradRawKernel,
float,
double,
phi::dtype::float16) {
if (kernel_key.dtype() == phi::DataType::FLOAT16) {
kernel->OutputAt(0).SetDataType(phi::DataType::FLOAT32); // x_grad
kernel->OutputAt(1).SetDataType(phi::DataType::FLOAT32); // scale_grad
kernel->OutputAt(2).SetDataType(phi::DataType::FLOAT32); // bias_grad
}
}
#endif
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#else
PD_REGISTER_KERNEL(batch_norm_grad_grad,
GPU,
ALL_LAYOUT,
phi::BatchNormDoubleGradKernel,
float,
double) {}
#endif
|
6825327b499085c175291f7dadc60d6a72b7bcb9.hip | // !!! This is a file automatically generated by hipify!!!
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_s
#include "gemv_template_kernel_batched_hip.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/***************************************************************************//**
Purpose
-------
SGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta REAL
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv_batched
*******************************************************************************/
extern "C" void
magmablas_sgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_ptr dA_array[], magma_int_t ldda,
magmaFloat_ptr dx_array[], magma_int_t incx,
float beta,
magmaFloat_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat matrix
if ( m <= 8)
{
gemvn_template_batched<float, version(N, 32)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 16)
{
gemvn_template_batched<float, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<float, version(N, 97)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 130)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall matrix
if ( n <= 16)
{
gemvn_template_batched<float, version(N, 118)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 16)
{
gemvn_template_batched<float, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<float, version(N, 103)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 64)
{
gemvn_template_batched<float, version(N, 126)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall or square matrix
if (m <= 256)
{
gemvn_template_batched<float, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) // small size
{
gemvc_template_batched<float, version(T, 46)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else // big size
{
if (m <= n) // Fat or square matrix
{
if (m <= 64)
{
gemvc_template_batched<float, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 133)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else// (m > n) Tall matrix
{
if (n <= 8)
{
gemvc_template_batched<float, version(T, 130)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 131)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
| 6825327b499085c175291f7dadc60d6a72b7bcb9.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@author Tingxing Dong
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define PRECISION_s
#include "gemv_template_kernel_batched.cuh"
#include "gemv_config/gemvn_param.h"
#include "gemv_config/gemvt_param.h"
#define version(s,v) s ## _V_ ## v
/***************************************************************************//**
Purpose
-------
SGEMV performs one of the matrix-vector operations
y := alpha*A*x + beta*y, or
y := alpha*A**T*x + beta*y, or
y := alpha*A**H*x + beta*y,
where alpha and beta are scalars, x and y are vectors and A is an
m by n matrix.
Arguments
----------
@param[in]
trans magma_trans_t
On entry, TRANS specifies the operation to be performed as
follows:
- = MagmaNoTrans: y := alpha*A *x + beta*y
- = MagmaTrans: y := alpha*A^T*x + beta*y
- = MagmaConjTrans: y := alpha*A^H*x + beta*y
@param[in]
m INTEGER
On entry, m specifies the number of rows of the matrix A.
@param[in]
n INTEGER
On entry, n specifies the number of columns of the matrix A
@param[in]
alpha REAL
On entry, ALPHA specifies the scalar alpha.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a REAL array A of DIMENSION ( ldda, n ) on the GPU
@param[in]
ldda INTEGER
LDDA specifies the leading dimension of A.
@param[in]
dx_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
n if trans == MagmaNoTrans
m if trans == MagmaTrans or MagmaConjTrans
@param[in]
incx Specifies the increment for the elements of X.
INCX must not be zero.
@param[in]
beta REAL
On entry, ALPHA specifies the scalar beta. When BETA is
supplied as zero then Y need not be set on input.
@param[out]
dy_array Array of pointers, dimension (batchCount).
Each is a REAL array of dimension
m if trans == MagmaNoTrans
n if trans == MagmaTrans or MagmaConjTrans
@param[in]
incy Specifies the increment for the elements of Y.
INCY must not be zero.
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_gemv_batched
*******************************************************************************/
extern "C" void
magmablas_sgemv_batched(
magma_trans_t trans, magma_int_t m, magma_int_t n,
float alpha,
magmaFloat_ptr dA_array[], magma_int_t ldda,
magmaFloat_ptr dx_array[], magma_int_t incx,
float beta,
magmaFloat_ptr dy_array[], magma_int_t incy,
magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if ( trans != MagmaNoTrans && trans != MagmaTrans && trans != MagmaConjTrans )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < m )
info = -6;
else if ( incx == 0 )
info = -8;
else if ( incy == 0 )
info = -11;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( trans == MagmaNoTrans ) {
if (max(m, n) <= 96) { // small size
if (m < n) { // Fat matrix
if ( m <= 8)
{
gemvn_template_batched<float, version(N, 32)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 16)
{
gemvn_template_batched<float, version(N, 72)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 32)
{
gemvn_template_batched<float, version(N, 97)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if ( m <= 64)
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 130)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall matrix
if ( n <= 16)
{
gemvn_template_batched<float, version(N, 118)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 120)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
else { // big size
if (m < n) { // Fat matrix
if (m <= 16)
{
gemvn_template_batched<float, version(N, 79)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 32)
{
gemvn_template_batched<float, version(N, 103)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else if (m <= 64)
{
gemvn_template_batched<float, version(N, 126)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 135)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else { // Tall or square matrix
if (m <= 256)
{
gemvn_template_batched<float, version(N, 137)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvn_template_batched<float, version(N, 140)>
( m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}// big size
}
else {
if (max(m, n) <= 96) // small size
{
gemvc_template_batched<float, version(T, 46)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else // big size
{
if (m <= n) // Fat or square matrix
{
if (m <= 64)
{
gemvc_template_batched<float, version(T, 47)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 133)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
else// (m > n) Tall matrix
{
if (n <= 8)
{
gemvc_template_batched<float, version(T, 130)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
else
{
gemvc_template_batched<float, version(T, 131)>
( trans, m, n, alpha, dA_array, ldda, dx_array, incx, beta, dy_array, incy, batchCount, queue );
}
}
}
}
}
|
16ae1127cc39cd6ba98b973847a250dca53e11d8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "derive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *data = NULL;
hipMalloc(&data, XSIZE*YSIZE);
double *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int stride = 2;
int gsize = XSIZE*YSIZE;
double dx = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
derive), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,stride,gsize,dx);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
derive), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,stride,gsize,dx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
derive), dim3(gridBlock),dim3(threadBlock), 0, 0, data,out,stride,gsize,dx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 16ae1127cc39cd6ba98b973847a250dca53e11d8.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "derive.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *data = NULL;
cudaMalloc(&data, XSIZE*YSIZE);
double *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int stride = 2;
int gsize = XSIZE*YSIZE;
double dx = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
derive<<<gridBlock,threadBlock>>>(data,out,stride,gsize,dx);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
derive<<<gridBlock,threadBlock>>>(data,out,stride,gsize,dx);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
derive<<<gridBlock,threadBlock>>>(data,out,stride,gsize,dx);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
d9eb280258036149b4e6814bd990b51dfe73e2f4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <unitarization_links.h>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
#include <hipfft.h>
#ifdef GPU_GAUGE_ALG
#include <CUFFT_Plans.h>
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
//Comment if you don't want to use textures for Delta(x) and g(x)
#define GAUGEFIXING_SITE_MATRIX_LOAD_TEX
//UNCOMMENT THIS IF YOU WAN'T TO USE LESS MEMORY
#define GAUGEFIXING_DONT_USE_GX
//Without using the precalculation of g(x),
//we loose some performance, because Delta(x) is written in normal lattice coordinates need for the FFTs
//and the gauge array in even/odd format
#ifdef GAUGEFIXING_DONT_USE_GX
#warning Don't use precalculated g(x)
#else
#warning Using precalculated g(x)
#endif
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
texture<float2, 1, hipReadModeElementType> GXTexSingle;
texture<int4, 1, hipReadModeElementType> GXTexDouble;
//Delta is only stored using 12 real number parameters,
// (0,0), (0,1), (0,2), (1,1), (1,2) and (2,2)
// (0,0), (1,1) and (0,1) don't have real part, however we need a complex for the FFTs
texture<float2, 1, hipReadModeElementType> DELTATexSingle;
texture<int4, 1, hipReadModeElementType> DELTATexDouble;
template <class T>
inline __device__ T TEXTURE_GX(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_GX<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(GXTexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_GX<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(GXTexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
template <class T>
inline __device__ T TEXTURE_DELTA(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_DELTA<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(DELTATexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_DELTA<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(DELTATexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
static void BindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipBindTexture(0, GXTexSingle, gx, bytes);
#endif
hipBindTexture(0, DELTATexSingle, delta, bytes);
#endif
}
static void BindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipBindTexture(0, GXTexDouble, gx, bytes);
#endif
hipBindTexture(0, DELTATexDouble, delta, bytes);
#endif
}
static void UnBindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipUnbindTexture(GXTexSingle);
#endif
hipUnbindTexture(DELTATexSingle);
#endif
}
static void UnBindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
hipUnbindTexture(GXTexDouble);
#endif
hipUnbindTexture(DELTATexDouble);
#endif
}
template <typename Cmplx>
struct GaugeFixFFTRotateArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Cmplx *tmp0;
Cmplx *tmp1;
GaugeFixFFTRotateArg(const cudaGaugeField &data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
tmp0 = 0;
tmp1 = 0;
}
};
template <int direction, typename Cmplx>
__global__ void fft_rotate_kernel_2D2D(GaugeFixFFTRotateArg<Cmplx> arg){ //Cmplx *data_in, Cmplx *data_out){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
if ( direction == 0 ) {
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
int id = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
int id_out = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
if ( direction == 1 ) {
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
int id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
int id_out = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
}
template<typename Cmplx>
class GaugeFixFFTRotate : Tunable {
GaugeFixFFTRotateArg<Cmplx> arg;
unsigned int direction;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixFFTRotate(GaugeFixFFTRotateArg<Cmplx> &arg)
: arg(arg) {
direction = 0;
}
~GaugeFixFFTRotate () {
}
void setDirection(unsigned int dir, Cmplx *data_in, Cmplx *data_out){
direction = dir;
arg.tmp0 = data_in;
arg.tmp1 = data_out;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if ( direction == 0 )
fft_rotate_kernel_2D2D<0, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else if ( direction == 1 )
fft_rotate_kernel_2D2D<1, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else
errorQuda("Error in GaugeFixFFTRotate option.\n");
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Cmplx) / 2);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 0;
}
long long bytes() const {
return 2LL * sizeof(Cmplx) * arg.threads;
}
};
template <typename Cmplx, typename Gauge>
struct GaugeFixQualityArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
Gauge dataOr;
Cmplx *delta;
GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data, Cmplx * delta)
: ReduceArg<double2>(), dataOr(dataOr), delta(delta) {
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = data.VolumeCB();
}
double getAction(){ return result_h[0].x; }
double getTheta(){ return result_h[0].y; }
};
template<int blockSize, unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix_quality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
double2 data = make_double2(0.0,0.0);
if ( idx < argQ.threads ) {
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, idx, argQ.X, parity);
Matrix<Cmplx,3> delta;
setZero(&delta);
//idx = linkIndex(x,X);
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),idx, mu, parity);
delta -= U;
}
//18*gauge_dir
data.x = -delta(0,0).x - delta(1,1).x - delta(2,2).x;
//2
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,argQ.X,mu), mu, 1 - parity);
delta += U;
}
//18*gauge_dir
delta -= conj(delta);
//18
//SAVE DELTA!!!!!
SubTraceUnit(delta);
idx = getIndexFull(idx, argQ.X, parity);
//Saving Delta
argQ.delta[idx] = delta(0,0);
argQ.delta[idx + 2 * argQ.threads] = delta(0,1);
argQ.delta[idx + 4 * argQ.threads] = delta(0,2);
argQ.delta[idx + 6 * argQ.threads] = delta(1,1);
argQ.delta[idx + 8 * argQ.threads] = delta(1,2);
argQ.delta[idx + 10 * argQ.threads] = delta(2,2);
//12
data.y = getRealTraceUVdagger(delta, delta);
//35
//T=36*gauge_dir+65
}
reduce2d<blockSize,2>(argQ, data);
}
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
class GaugeFixQuality : TunableLocalParity {
GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int minThreads() const { return argQ.threads; }
public:
GaugeFixQuality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> &argQ)
: argQ(argQ) {
}
~GaugeFixQuality () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
argQ.result_h[0] = make_double2(0.0,0.0);
LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Elems, Float, Gauge, gauge_dir);
hipDeviceSynchronize();
argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads);
argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << argQ.X[0] << "x" << argQ.X[1] << "x" << argQ.X[2] << "x" << argQ.X[3];
sprintf(aux_string,"threads=%d,prec=%d,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return (36LL * gauge_dir + 65LL) * 2 * argQ.threads;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return (2LL * gauge_dir + 2LL) * Elems * 2 * argQ.threads * sizeof(Float);
} //Not accounting the reduction!!!
};
template <typename Float>
struct GaugeFixArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
cudaGaugeField &data;
Float *invpsq;
typename ComplexTypeId<Float>::Type *delta;
typename ComplexTypeId<Float>::Type *gx;
GaugeFixArg( cudaGaugeField & data, const unsigned int Elems) : data(data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
invpsq = (Float*)device_malloc(sizeof(Float) * threads);
delta = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * 6);
#ifdef GAUGEFIXING_DONT_USE_GX
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads);
#else
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
#endif
BindTex(delta, gx, sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
}
void free(){
UnBindTex(delta, gx);
device_free(invpsq);
device_free(delta);
device_free(gx);
}
};
template <typename Float>
__global__ void kernel_gauge_set_invpsq(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
//id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
Float sx = sin( (Float)x0 * FL_UNITARIZE_PI / (Float)arg.X[0]);
Float sy = sin( (Float)x1 * FL_UNITARIZE_PI / (Float)arg.X[1]);
Float sz = sin( (Float)x2 * FL_UNITARIZE_PI / (Float)arg.X[2]);
Float st = sin( (Float)x3 * FL_UNITARIZE_PI / (Float)arg.X[3]);
Float sinsq = sx * sx + sy * sy + sz * sz + st * st;
Float prcfact = 0.0;
//The FFT normalization is done here
if ( sinsq > 0.00001 ) prcfact = 4.0 / (sinsq * (Float)arg.threads);
arg.invpsq[id] = prcfact;
}
template<typename Float>
class GaugeFixSETINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixSETINVPSP(GaugeFixArg<Float> &arg) : arg(arg) { }
~GaugeFixSETINVPSP () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_set_invpsq<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 21 * arg.threads;
}
long long bytes() const {
return sizeof(Float) * arg.threads;
}
};
template<typename Float>
__global__ void kernel_gauge_mult_norm_2D(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id < arg.threads ) arg.gx[id] = arg.gx[id] * arg.invpsq[id];
}
template<typename Float>
class GaugeFixINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixINVPSP(GaugeFixArg<Float> &arg)
: arg(arg){
hipFuncSetCacheConfig( kernel_gauge_mult_norm_2D<Float>, hipFuncCachePreferL1);
}
~GaugeFixINVPSP () {
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_mult_norm_2D<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){
//since delta contents are irrelevant at this point, we can swap gx with delta
typename ComplexTypeId<Float>::Type *tmp = arg.gx;
arg.gx = arg.delta;
arg.delta = tmp;
}
void postTune(){
arg.gx = arg.delta;
}
long long flops() const {
return 2LL * arg.threads;
}
long long bytes() const {
return 5LL * sizeof(Float) * arg.threads;
}
};
template<typename Cmplx>
__device__ __host__ inline typename RealTypeId<Cmplx>::Type Abs2(const Cmplx & a){
return a.x * a.x + a.y * a.y;
}
template <typename Float>
__host__ __device__ inline void reunit_link( Matrix<typename ComplexTypeId<Float>::Type,3> &U ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
Cmplx t2 = makeComplex((Float)0.0, (Float)0.0);
Float t1 = 0.0;
//first normalize first row
//sum of squares of row
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(0, c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(0,c) *= t1;
//6
#pragma unroll
for ( int c = 0; c < 3; c++ ) t2 += Conj(U(0,c)) * U(1,c);
//24
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c);
//24
//normalize second row
//sum of squares of row
t1 = 0.0;
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(1,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1, c) *= t1;
//6
//Reconstruct lat row
U(2,0) = Conj(U(0,1) * U(1,2) - U(0,2) * U(1,1));
U(2,1) = Conj(U(0,2) * U(1,0) - U(0,0) * U(1,2));
U(2,2) = Conj(U(0,0) * U(1,1) - U(0,1) * U(1,0));
//42
//T=130
}
#ifdef GAUGEFIXING_DONT_USE_GX
template <typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO_NEW( GaugeFixArg<Float> arg, Gauge dataOr, Float half_alpha){
int id = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
if ( id >= arg.threads/2 ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, id, arg.X, parity);
int idx = ((x[3] * arg.X[2] + x[2]) * arg.X[1] + x[1]) * arg.X[0] + x[0];
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
idx = linkNormalIndexP1(x,arg.X,mu);
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
setIdentity(&g0);
g0 += de * half_alpha;
//36
reunit_link<Float>( g0 );
//130
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
}
template<typename Float, typename Gauge>
class GaugeFixNEW : TunableLocalParity {
GaugeFixArg<Float> arg;
Float half_alpha;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
// since GaugeFixArg is used by other kernels that don't use
// tunableLocalParity, arg.threads stores Volume and not VolumeCB
// so we need to divide by two
unsigned int minThreads() const { return arg.threads/2; }
public:
GaugeFixNEW(Gauge & dataOr, GaugeFixArg<Float> &arg, Float alpha)
: dataOr(dataOr), arg(arg) {
half_alpha = alpha * 0.5;
hipFuncSetCacheConfig( kernel_gauge_fix_U_EO_NEW<Float, Gauge>, hipFuncCachePreferL1);
}
~GaugeFixNEW () { }
void setAlpha(Float alpha){ half_alpha = alpha * 0.5; }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO_NEW<Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 2414LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return ( dataOr.Bytes() * 4LL + 5 * 12LL * sizeof(Float)) * arg.threads;
}
};
#else
template <unsigned int Elems, typename Float>
__global__ void kernel_gauge_GX(GaugeFixArg<Float> arg, Float half_alpha){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(id);
de(0,1) = TEXTURE_DELTA<Cmplx>(id + arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(id + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(id + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(id + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(id + 5 * arg.threads);
#else
de(0,0) = arg.delta[id];
de(0,1) = arg.delta[id + arg.threads];
de(0,2) = arg.delta[id + 2 * arg.threads];
de(1,1) = arg.delta[id + 3 * arg.threads];
de(1,2) = arg.delta[id + 4 * arg.threads];
de(2,2) = arg.delta[id + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
//gx is represented in even/odd order
//normal lattice index to even/odd index
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
id = (x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]) >> 1;
id += ((x0 + x1 + x2 + x3) & 1 ) * arg.threads / 2;
for ( int i = 0; i < Elems; i++ ) arg.gx[id + i * arg.threads] = g.data[i];
//T=166 for Elems 9
//T=208 for Elems 6
}
template<unsigned int Elems, typename Float>
class GaugeFix_GX : Tunable {
GaugeFixArg<Float> arg;
Float half_alpha;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix_GX(GaugeFixArg<Float> &arg, Float alpha)
: arg(arg) {
half_alpha = alpha * 0.5;
hipFuncSetCacheConfig( kernel_gauge_GX<Elems, Float>, hipFuncCachePreferL1);
}
~GaugeFix_GX () {
}
void setAlpha(Float alpha){
half_alpha = alpha * 0.5;
}
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_GX<Elems, Float><< < tp.grid, tp.block, 0, stream >> > (arg, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
if ( Elems == 6 ) return 208LL * arg.threads;
else return 166LL * arg.threads;
}
long long bytes() const {
return 4LL * Elems * sizeof(Float) * arg.threads;
}
};
template <unsigned int Elems, typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO( GaugeFixArg<Float> arg, Gauge dataOr){
int idd = threadIdx.x + blockIdx.x * blockDim.x;
if ( idd >= arg.threads ) return;
int parity = 0;
int id = idd;
if ( idd >= arg.threads / 2 ) {
parity = 1;
id -= arg.threads / 2;
}
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> g;
//for(int i = 0; i < Elems; i++) g.data[i] = arg.gx[idd + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g.data[i] = TEXTURE_GX<Cmplx>(idd + i * arg.threads);
#else
g.data[i] = arg.gx[idd + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g(2,0) = Conj(g(0,1) * g(1,2) - g(0,2) * g(1,1));
g(2,1) = Conj(g(0,2) * g(1,0) - g(0,0) * g(1,2));
g(2,2) = Conj(g(0,0) * g(1,1) - g(0,1) * g(1,0));
//42
}
int x[4];
getCoords(x, id, arg.X, parity);
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
int idm1 = linkIndexP1(x,arg.X,mu);
idm1 += (1 - parity) * arg.threads / 2;
//for(int i = 0; i < Elems; i++) g0.data[i] = arg.gx[idm1 + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g0.data[i] = TEXTURE_GX<Cmplx>(idm1 + i * arg.threads);
#else
g0.data[i] = arg.gx[idm1 + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g0(2,0) = Conj(g0(0,1) * g0(1,2) - g0(0,2) * g0(1,1));
g0(2,1) = Conj(g0(0,2) * g0(1,0) - g0(0,0) * g0(1,2));
g0(2,2) = Conj(g0(0,0) * g0(1,1) - g0(0,1) * g0(1,0));
//42
}
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
//T=42+4*(198*2+42) Elems=6
//T=4*(198*2) Elems=9
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
template<unsigned int Elems, typename Float, typename Gauge>
class GaugeFix : Tunable {
GaugeFixArg<Float> arg;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix(Gauge & dataOr, GaugeFixArg<Float> &arg)
: dataOr(dataOr), arg(arg) {
hipFuncSetCacheConfig( kernel_gauge_fix_U_EO<Elems, Float, Gauge>, hipFuncCachePreferL1);
}
~GaugeFix () { }
void apply(const hipStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO<Elems, Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
if ( Elems == 6 ) return 1794LL * arg.threads;
else return 1536LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return 26LL * Elems * sizeof(Float) * arg.threads;
}
};
#endif
//GAUGEFIXING_DONT_USE_GX
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, \
const unsigned int Nsteps, const unsigned int verbose_interval, \
const Float alpha0, const unsigned int autotune, const double tolerance, \
const unsigned int stopWtheta) {
TimeProfile profileInternalGaugeFixFFT("InternalGaugeFixQudaFFT", false);
profileInternalGaugeFixFFT.TPSTART(QUDA_PROFILE_COMPUTE);
Float alpha = alpha0;
std::cout << "\tAlpha parameter of the Steepest Descent Method: " << alpha << std::endl;
if ( autotune ) std::cout << "\tAuto tune active: yes" << std::endl;
else std::cout << "\tAuto tune active: no" << std::endl;
std::cout << "\tStop criterium: " << tolerance << std::endl;
if ( stopWtheta ) std::cout << "\tStop criterium method: theta" << std::endl;
else std::cout << "\tStop criterium method: Delta" << std::endl;
std::cout << "\tMaximum number of iterations: " << Nsteps << std::endl;
std::cout << "\tPrint convergence results at every " << verbose_interval << " steps" << std::endl;
typedef typename ComplexTypeId<Float>::Type Cmplx;
unsigned int delta_pad = data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3];
int4 size = make_int4( data.X()[0], data.X()[1], data.X()[2], data.X()[3] );
hipfftHandle plan_xy;
hipfftHandle plan_zt;
GaugeFixArg<Float> arg(data, Elems);
SetPlanFFT2DMany( plan_zt, size, 0, arg.delta); //for space and time ZT
SetPlanFFT2DMany( plan_xy, size, 1, arg.delta); //with space only XY
GaugeFixFFTRotateArg<Cmplx> arg_rotate(data);
GaugeFixFFTRotate<Cmplx> GFRotate(arg_rotate);
GaugeFixSETINVPSP<Float> setinvpsp(arg);
setinvpsp.apply(0);
GaugeFixINVPSP<Float> invpsp(arg);
#ifdef GAUGEFIXING_DONT_USE_GX
//without using GX, gx will be created only for plane rotation but with less size
GaugeFixNEW<Float, Gauge> gfixNew(dataOr, arg, alpha);
#else
//using GX
GaugeFix_GX<Elems, Float> calcGX(arg, alpha);
GaugeFix<Elems, Float, Gauge> gfix(dataOr, arg);
#endif
GaugeFixQualityArg<Cmplx, Gauge> argQ(dataOr, data, arg.delta);
GaugeFixQuality<Elems, Float, Gauge, gauge_dir> gfixquality(argQ);
gfixquality.apply(0);
double action0 = argQ.getAction();
printf("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta());
double diff = 0.0;
int iter = 0;
for ( iter = 0; iter < Nsteps; iter++ ) {
for ( int k = 0; k < 6; k++ ) {
//------------------------------------------------------------------------
// Set a pointer do the element k in lattice volume
// each element is stored with stride lattice volume
// it uses gx as temporary array!!!!!!
//------------------------------------------------------------------------
Cmplx *_array = arg.delta + k * delta_pad;
////// 2D FFT + 2D FFT
//------------------------------------------------------------------------
// Perform FFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, _array, arg.gx, HIPFFT_FORWARD);
//------------------------------------------------------------------------
// Rotate hypercube, xyzt -> ztxy
//------------------------------------------------------------------------
GFRotate.setDirection(0, arg.gx, _array);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform FFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, _array, arg.gx, HIPFFT_FORWARD);
//------------------------------------------------------------------------
// Normalize FFT and apply pmax^2/p^2
//------------------------------------------------------------------------
invpsp.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, arg.gx, _array, HIPFFT_BACKWARD);
//------------------------------------------------------------------------
// Rotate hypercube, ztxy -> xyzt
//------------------------------------------------------------------------
GFRotate.setDirection(1, _array, arg.gx);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, arg.gx, _array, HIPFFT_BACKWARD);
}
#ifdef GAUGEFIXING_DONT_USE_GX
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfixNew.apply(0);
#else
//------------------------------------------------------------------------
// Calculate g(x)
//------------------------------------------------------------------------
calcGX.apply(0);
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfix.apply(0);
#endif
//------------------------------------------------------------------------
// Measure gauge quality and recalculate new Delta(x)
//------------------------------------------------------------------------
gfixquality.apply(0);
double action = argQ.getAction();
diff = abs(action0 - action);
if ((iter % verbose_interval) == (verbose_interval - 1))
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
if ( autotune && ((action - action0) < -1e-14) ) {
if ( alpha > 0.01 ) {
alpha = 0.95 * alpha;
#ifdef GAUGEFIXING_DONT_USE_GX
gfixNew.setAlpha(alpha);
#else
calcGX.setAlpha(alpha);
#endif
printf(">>>>>>>>>>>>>> Warning: changing alpha down -> %.4e\n", alpha );
}
}
//------------------------------------------------------------------------
// Check gauge fix quality criterium
//------------------------------------------------------------------------
if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; }
else { if ( diff < tolerance ) break; }
action0 = action;
}
if ((iter % verbose_interval) != 0 )
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter, argQ.getAction(), argQ.getTheta(), diff);
// Reunitarize at end
const double unitarize_eps = 1e-14;
const double max_error = 1e-10;
const int reunit_allow_svd = 1;
const int reunit_svd_only = 0;
const double svd_rel_error = 1e-6;
const double svd_abs_error = 1e-6;
setUnitarizeLinksConstants(unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only,
svd_rel_error, svd_abs_error);
int num_failures = 0;
int* num_failures_dev;
hipMalloc((void**)&num_failures_dev, sizeof(int));
hipMemset(num_failures_dev, 0, sizeof(int));
if ( num_failures_dev == NULL ) errorQuda("hipMalloc failed for dev_pointer\n");
unitarizeLinksQuda(data, data, num_failures_dev);
hipMemcpy(&num_failures, num_failures_dev, sizeof(int), hipMemcpyDeviceToHost);
if ( num_failures > 0 ) {
hipFree(num_failures_dev);
errorQuda("Error in the unitarization\n");
exit(1);
}
hipFree(num_failures_dev);
// end reunitarize
arg.free();
CUFFT_SAFE_CALL(hipfftDestroy(plan_zt));
CUFFT_SAFE_CALL(hipfftDestroy(plan_xy));
checkCudaError();
hipDeviceSynchronize();
profileInternalGaugeFixFFT.TPSTOP(QUDA_PROFILE_COMPUTE);
if (getVerbosity() > QUDA_SUMMARIZE){
double secs = profileInternalGaugeFixFFT.Last(QUDA_PROFILE_COMPUTE);
double fftflop = 5.0 * (log2((double)( data.X()[0] * data.X()[1]) ) + log2( (double)(data.X()[2] * data.X()[3] )));
fftflop *= (double)( data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3] );
double gflops = setinvpsp.flops() + gfixquality.flops();
double gbytes = setinvpsp.bytes() + gfixquality.bytes();
double flop = invpsp.flops() * Elems;
double byte = invpsp.bytes() * Elems;
flop += (GFRotate.flops() + fftflop) * Elems * 2;
byte += GFRotate.bytes() * Elems * 4; //includes FFT reads, assuming 1 read and 1 write per site
#ifdef GAUGEFIXING_DONT_USE_GX
flop += gfixNew.flops();
byte += gfixNew.bytes();
#else
flop += calcGX.flops();
byte += calcGX.bytes();
flop += gfix.flops();
byte += gfix.bytes();
#endif
flop += gfixquality.flops();
byte += gfixquality.bytes();
gflops += flop * iter;
gbytes += byte * iter;
gflops += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; //Reunitarize at end
gbytes += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes() ; //Reunitarize at end
gflops = (gflops * 1e-9) / (secs);
gbytes = gbytes / (secs * 1e9);
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
}
}
template<unsigned int Elems, typename Float, typename Gauge>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
if ( gauge_dir != 3 ) {
printf("Starting Landau gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 4>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
else {
printf("Starting Coulomb gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 3>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
}
template<typename Float>
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
// Need to fix this!!
//9 and 6 means the number of complex elements used to store g(x) and Delta(x)
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
gaugefixingFFT<9, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
//printfQuda("QUDA_RECONSTRUCT_12\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
//printfQuda("QUDA_RECONSTRUCT_8\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/**
* @brief Gauge fixing with Steepest descent method with FFTs with support for single GPU only.
* @param[in,out] data, quda gauge field
* @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing
* @param[in] Nsteps, maximum number of steps to perform gauge fixing
* @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this
* @param[in] alpha, gauge fixing parameter of the method, most common value is 0.08
* @param[in] autotune, 1 to autotune the method, i.e., if the Fg inverts its tendency we decrease the alpha value
* @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps
* @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value
*/
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const double alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
#ifdef GPU_GAUGE_ALG
#ifdef MULTI_GPU
if(comm_dim_partitioned(0) || comm_dim_partitioned(1) || comm_dim_partitioned(2) || comm_dim_partitioned(3))
errorQuda("Gauge Fixing with FFTs in multi-GPU support NOT implemented yet!\n");
#endif
if ( data.Precision() == QUDA_HALF_PRECISION ) {
errorQuda("Half precision not supported\n");
}
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
gaugefixingFFT<float> (data, gauge_dir, Nsteps, verbose_interval, (float)alpha, autotune, tolerance, stopWtheta);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
gaugefixingFFT<double>(data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge fixing has bot been built");
#endif
}
}
| d9eb280258036149b4e6814bd990b51dfe73e2f4.cu | #include <quda_internal.h>
#include <quda_matrix.h>
#include <tune_quda.h>
#include <gauge_field.h>
#include <gauge_field_order.h>
#include <launch_kernel.cuh>
#include <unitarization_links.h>
#include <atomic.cuh>
#include <cub_helper.cuh>
#include <index_helper.cuh>
#include <cufft.h>
#ifdef GPU_GAUGE_ALG
#include <CUFFT_Plans.h>
#endif
namespace quda {
#ifdef GPU_GAUGE_ALG
//Comment if you don't want to use textures for Delta(x) and g(x)
#define GAUGEFIXING_SITE_MATRIX_LOAD_TEX
//UNCOMMENT THIS IF YOU WAN'T TO USE LESS MEMORY
#define GAUGEFIXING_DONT_USE_GX
//Without using the precalculation of g(x),
//we loose some performance, because Delta(x) is written in normal lattice coordinates need for the FFTs
//and the gauge array in even/odd format
#ifdef GAUGEFIXING_DONT_USE_GX
#warning Don't use precalculated g(x)
#else
#warning Using precalculated g(x)
#endif
#ifndef FL_UNITARIZE_PI
#define FL_UNITARIZE_PI 3.14159265358979323846
#endif
texture<float2, 1, cudaReadModeElementType> GXTexSingle;
texture<int4, 1, cudaReadModeElementType> GXTexDouble;
//Delta is only stored using 12 real number parameters,
// (0,0), (0,1), (0,2), (1,1), (1,2) and (2,2)
// (0,0), (1,1) and (0,1) don't have real part, however we need a complex for the FFTs
texture<float2, 1, cudaReadModeElementType> DELTATexSingle;
texture<int4, 1, cudaReadModeElementType> DELTATexDouble;
template <class T>
inline __device__ T TEXTURE_GX(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_GX<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(GXTexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_GX<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(GXTexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
template <class T>
inline __device__ T TEXTURE_DELTA(int id){
return 0.0;
}
template <>
inline __device__ typename ComplexTypeId<float>::Type TEXTURE_DELTA<typename ComplexTypeId<float>::Type>(int id){
return tex1Dfetch(DELTATexSingle, id);
}
template <>
inline __device__ typename ComplexTypeId<double>::Type TEXTURE_DELTA<typename ComplexTypeId<double>::Type>(int id){
int4 u = tex1Dfetch(DELTATexDouble, id);
return makeComplex(__hiloint2double(u.y, u.x), __hiloint2double(u.w, u.z));
}
static void BindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaBindTexture(0, GXTexSingle, gx, bytes);
#endif
cudaBindTexture(0, DELTATexSingle, delta, bytes);
#endif
}
static void BindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx, size_t bytes){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaBindTexture(0, GXTexDouble, gx, bytes);
#endif
cudaBindTexture(0, DELTATexDouble, delta, bytes);
#endif
}
static void UnBindTex(typename ComplexTypeId<float>::Type *delta, typename ComplexTypeId<float>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaUnbindTexture(GXTexSingle);
#endif
cudaUnbindTexture(DELTATexSingle);
#endif
}
static void UnBindTex(typename ComplexTypeId<double>::Type *delta, typename ComplexTypeId<double>::Type *gx){
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
#ifndef GAUGEFIXING_DONT_USE_GX
cudaUnbindTexture(GXTexDouble);
#endif
cudaUnbindTexture(DELTATexDouble);
#endif
}
template <typename Cmplx>
struct GaugeFixFFTRotateArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
Cmplx *tmp0;
Cmplx *tmp1;
GaugeFixFFTRotateArg(const cudaGaugeField &data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
tmp0 = 0;
tmp1 = 0;
}
};
template <int direction, typename Cmplx>
__global__ void fft_rotate_kernel_2D2D(GaugeFixFFTRotateArg<Cmplx> arg){ //Cmplx *data_in, Cmplx *data_out){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
if ( direction == 0 ) {
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
int id = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
int id_out = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
if ( direction == 1 ) {
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
int id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
int id_out = x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0];
arg.tmp1[id_out] = arg.tmp0[id];
//data_out[id_out] = data_in[id];
}
}
template<typename Cmplx>
class GaugeFixFFTRotate : Tunable {
GaugeFixFFTRotateArg<Cmplx> arg;
unsigned int direction;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixFFTRotate(GaugeFixFFTRotateArg<Cmplx> &arg)
: arg(arg) {
direction = 0;
}
~GaugeFixFFTRotate () {
}
void setDirection(unsigned int dir, Cmplx *data_in, Cmplx *data_out){
direction = dir;
arg.tmp0 = data_in;
arg.tmp1 = data_out;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
if ( direction == 0 )
fft_rotate_kernel_2D2D<0, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else if ( direction == 1 )
fft_rotate_kernel_2D2D<1, Cmplx><< < tp.grid, tp.block, 0, stream >> > (arg);
else
errorQuda("Error in GaugeFixFFTRotate option.\n");
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Cmplx) / 2);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 0;
}
long long bytes() const {
return 2LL * sizeof(Cmplx) * arg.threads;
}
};
template <typename Cmplx, typename Gauge>
struct GaugeFixQualityArg : public ReduceArg<double2> {
int threads; // number of active threads required
int X[4]; // grid dimensions
Gauge dataOr;
Cmplx *delta;
GaugeFixQualityArg(const Gauge &dataOr, const cudaGaugeField &data, Cmplx * delta)
: ReduceArg<double2>(), dataOr(dataOr), delta(delta) {
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = data.VolumeCB();
}
double getAction(){ return result_h[0].x; }
double getTheta(){ return result_h[0].y; }
};
template<int blockSize, unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
__global__ void computeFix_quality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
double2 data = make_double2(0.0,0.0);
if ( idx < argQ.threads ) {
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, idx, argQ.X, parity);
Matrix<Cmplx,3> delta;
setZero(&delta);
//idx = linkIndex(x,X);
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),idx, mu, parity);
delta -= U;
}
//18*gauge_dir
data.x = -delta(0,0).x - delta(1,1).x - delta(2,2).x;
//2
for ( int mu = 0; mu < gauge_dir; mu++ ) {
Matrix<Cmplx,3> U;
argQ.dataOr.load((Float*)(U.data),linkIndexM1(x,argQ.X,mu), mu, 1 - parity);
delta += U;
}
//18*gauge_dir
delta -= conj(delta);
//18
//SAVE DELTA!!!!!
SubTraceUnit(delta);
idx = getIndexFull(idx, argQ.X, parity);
//Saving Delta
argQ.delta[idx] = delta(0,0);
argQ.delta[idx + 2 * argQ.threads] = delta(0,1);
argQ.delta[idx + 4 * argQ.threads] = delta(0,2);
argQ.delta[idx + 6 * argQ.threads] = delta(1,1);
argQ.delta[idx + 8 * argQ.threads] = delta(1,2);
argQ.delta[idx + 10 * argQ.threads] = delta(2,2);
//12
data.y = getRealTraceUVdagger(delta, delta);
//35
//T=36*gauge_dir+65
}
reduce2d<blockSize,2>(argQ, data);
}
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
class GaugeFixQuality : TunableLocalParity {
GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> argQ;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int minThreads() const { return argQ.threads; }
public:
GaugeFixQuality(GaugeFixQualityArg<typename ComplexTypeId<Float>::Type, Gauge> &argQ)
: argQ(argQ) {
}
~GaugeFixQuality () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
argQ.result_h[0] = make_double2(0.0,0.0);
LAUNCH_KERNEL_LOCAL_PARITY(computeFix_quality, tp, stream, argQ, Elems, Float, Gauge, gauge_dir);
cudaDeviceSynchronize();
argQ.result_h[0].x /= (double)(3 * gauge_dir * 2 * argQ.threads);
argQ.result_h[0].y /= (double)(3 * 2 * argQ.threads);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << argQ.X[0] << "x" << argQ.X[1] << "x" << argQ.X[2] << "x" << argQ.X[3];
sprintf(aux_string,"threads=%d,prec=%d,gaugedir=%d",argQ.threads, sizeof(Float),gauge_dir);
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return (36LL * gauge_dir + 65LL) * 2 * argQ.threads;
} // Only correct if there is no link reconstruction, no cub reduction accounted also
long long bytes() const {
return (2LL * gauge_dir + 2LL) * Elems * 2 * argQ.threads * sizeof(Float);
} //Not accounting the reduction!!!
};
template <typename Float>
struct GaugeFixArg {
int threads; // number of active threads required
int X[4]; // grid dimensions
cudaGaugeField &data;
Float *invpsq;
typename ComplexTypeId<Float>::Type *delta;
typename ComplexTypeId<Float>::Type *gx;
GaugeFixArg( cudaGaugeField & data, const unsigned int Elems) : data(data){
for ( int dir = 0; dir < 4; ++dir ) X[dir] = data.X()[dir];
threads = X[0] * X[1] * X[2] * X[3];
invpsq = (Float*)device_malloc(sizeof(Float) * threads);
delta = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * 6);
#ifdef GAUGEFIXING_DONT_USE_GX
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads);
#else
gx = (typename ComplexTypeId<Float>::Type *)device_malloc(sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
#endif
BindTex(delta, gx, sizeof(typename ComplexTypeId<Float>::Type) * threads * Elems);
}
void free(){
UnBindTex(delta, gx);
device_free(invpsq);
device_free(delta);
device_free(gx);
}
};
template <typename Float>
__global__ void kernel_gauge_set_invpsq(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
int x1 = id / (arg.X[2] * arg.X[3] * arg.X[0]);
int x0 = (id / (arg.X[2] * arg.X[3])) % arg.X[0];
int x3 = (id / arg.X[2]) % arg.X[3];
int x2 = id % arg.X[2];
//id = x2 + (x3 + (x0 + x1 * arg.X[0]) * arg.X[3]) * arg.X[2];
Float sx = sin( (Float)x0 * FL_UNITARIZE_PI / (Float)arg.X[0]);
Float sy = sin( (Float)x1 * FL_UNITARIZE_PI / (Float)arg.X[1]);
Float sz = sin( (Float)x2 * FL_UNITARIZE_PI / (Float)arg.X[2]);
Float st = sin( (Float)x3 * FL_UNITARIZE_PI / (Float)arg.X[3]);
Float sinsq = sx * sx + sy * sy + sz * sz + st * st;
Float prcfact = 0.0;
//The FFT normalization is done here
if ( sinsq > 0.00001 ) prcfact = 4.0 / (sinsq * (Float)arg.threads);
arg.invpsq[id] = prcfact;
}
template<typename Float>
class GaugeFixSETINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
bool tuneSharedBytes() const {
return false;
} // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixSETINVPSP(GaugeFixArg<Float> &arg) : arg(arg) { }
~GaugeFixSETINVPSP () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_set_invpsq<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
return 21 * arg.threads;
}
long long bytes() const {
return sizeof(Float) * arg.threads;
}
};
template<typename Float>
__global__ void kernel_gauge_mult_norm_2D(GaugeFixArg<Float> arg){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id < arg.threads ) arg.gx[id] = arg.gx[id] * arg.invpsq[id];
}
template<typename Float>
class GaugeFixINVPSP : Tunable {
GaugeFixArg<Float> arg;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFixINVPSP(GaugeFixArg<Float> &arg)
: arg(arg){
cudaFuncSetCacheConfig( kernel_gauge_mult_norm_2D<Float>, cudaFuncCachePreferL1);
}
~GaugeFixINVPSP () {
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_mult_norm_2D<Float><< < tp.grid, tp.block, 0, stream >> > (arg);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
void preTune(){
//since delta contents are irrelevant at this point, we can swap gx with delta
typename ComplexTypeId<Float>::Type *tmp = arg.gx;
arg.gx = arg.delta;
arg.delta = tmp;
}
void postTune(){
arg.gx = arg.delta;
}
long long flops() const {
return 2LL * arg.threads;
}
long long bytes() const {
return 5LL * sizeof(Float) * arg.threads;
}
};
template<typename Cmplx>
__device__ __host__ inline typename RealTypeId<Cmplx>::Type Abs2(const Cmplx & a){
return a.x * a.x + a.y * a.y;
}
template <typename Float>
__host__ __device__ inline void reunit_link( Matrix<typename ComplexTypeId<Float>::Type,3> &U ){
typedef typename ComplexTypeId<Float>::Type Cmplx;
Cmplx t2 = makeComplex((Float)0.0, (Float)0.0);
Float t1 = 0.0;
//first normalize first row
//sum of squares of row
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(0, c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(0,c) *= t1;
//6
#pragma unroll
for ( int c = 0; c < 3; c++ ) t2 += Conj(U(0,c)) * U(1,c);
//24
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1,c) -= t2 * U(0,c);
//24
//normalize second row
//sum of squares of row
t1 = 0.0;
#pragma unroll
for ( int c = 0; c < 3; c++ ) t1 += Abs2(U(1,c));
t1 = (Float)1.0 / sqrt(t1);
//14
//used to normalize row
#pragma unroll
for ( int c = 0; c < 3; c++ ) U(1, c) *= t1;
//6
//Reconstruct lat row
U(2,0) = Conj(U(0,1) * U(1,2) - U(0,2) * U(1,1));
U(2,1) = Conj(U(0,2) * U(1,0) - U(0,0) * U(1,2));
U(2,2) = Conj(U(0,0) * U(1,1) - U(0,1) * U(1,0));
//42
//T=130
}
#ifdef GAUGEFIXING_DONT_USE_GX
template <typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO_NEW( GaugeFixArg<Float> arg, Gauge dataOr, Float half_alpha){
int id = threadIdx.x + blockIdx.x * blockDim.x;
int parity = threadIdx.y;
if ( id >= arg.threads/2 ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
int x[4];
getCoords(x, id, arg.X, parity);
int idx = ((x[3] * arg.X[2] + x[2]) * arg.X[1] + x[1]) * arg.X[0] + x[0];
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
idx = linkNormalIndexP1(x,arg.X,mu);
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(idx + 0 * arg.threads);
de(0,1) = TEXTURE_DELTA<Cmplx>(idx + 1 * arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(idx + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(idx + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(idx + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(idx + 5 * arg.threads);
#else
de(0,0) = arg.delta[idx + 0 * arg.threads];
de(0,1) = arg.delta[idx + 1 * arg.threads];
de(0,2) = arg.delta[idx + 2 * arg.threads];
de(1,1) = arg.delta[idx + 3 * arg.threads];
de(1,2) = arg.delta[idx + 4 * arg.threads];
de(2,2) = arg.delta[idx + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
setIdentity(&g0);
g0 += de * half_alpha;
//36
reunit_link<Float>( g0 );
//130
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
}
template<typename Float, typename Gauge>
class GaugeFixNEW : TunableLocalParity {
GaugeFixArg<Float> arg;
Float half_alpha;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
// since GaugeFixArg is used by other kernels that don't use
// tunableLocalParity, arg.threads stores Volume and not VolumeCB
// so we need to divide by two
unsigned int minThreads() const { return arg.threads/2; }
public:
GaugeFixNEW(Gauge & dataOr, GaugeFixArg<Float> &arg, Float alpha)
: dataOr(dataOr), arg(arg) {
half_alpha = alpha * 0.5;
cudaFuncSetCacheConfig( kernel_gauge_fix_U_EO_NEW<Float, Gauge>, cudaFuncCachePreferL1);
}
~GaugeFixNEW () { }
void setAlpha(Float alpha){ half_alpha = alpha * 0.5; }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO_NEW<Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x" << arg.X[1] << "x" << arg.X[2] << "x" << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
return 2414LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return ( dataOr.Bytes() * 4LL + 5 * 12LL * sizeof(Float)) * arg.threads;
}
};
#else
template <unsigned int Elems, typename Float>
__global__ void kernel_gauge_GX(GaugeFixArg<Float> arg, Float half_alpha){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if ( id >= arg.threads ) return;
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> de;
//Read Delta
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
de(0,0) = TEXTURE_DELTA<Cmplx>(id);
de(0,1) = TEXTURE_DELTA<Cmplx>(id + arg.threads);
de(0,2) = TEXTURE_DELTA<Cmplx>(id + 2 * arg.threads);
de(1,1) = TEXTURE_DELTA<Cmplx>(id + 3 * arg.threads);
de(1,2) = TEXTURE_DELTA<Cmplx>(id + 4 * arg.threads);
de(2,2) = TEXTURE_DELTA<Cmplx>(id + 5 * arg.threads);
#else
de(0,0) = arg.delta[id];
de(0,1) = arg.delta[id + arg.threads];
de(0,2) = arg.delta[id + 2 * arg.threads];
de(1,1) = arg.delta[id + 3 * arg.threads];
de(1,2) = arg.delta[id + 4 * arg.threads];
de(2,2) = arg.delta[id + 5 * arg.threads];
#endif
de(1,0) = makeComplex(-de(0,1).x, de(0,1).y);
de(2,0) = makeComplex(-de(0,2).x, de(0,2).y);
de(2,1) = makeComplex(-de(1,2).x, de(1,2).y);
Matrix<Cmplx,3> g;
setIdentity(&g);
g += de * half_alpha;
//36
reunit_link<Float>( g );
//130
//gx is represented in even/odd order
//normal lattice index to even/odd index
int x3 = id / (arg.X[0] * arg.X[1] * arg.X[2]);
int x2 = (id / (arg.X[0] * arg.X[1])) % arg.X[2];
int x1 = (id / arg.X[0]) % arg.X[1];
int x0 = id % arg.X[0];
id = (x0 + (x1 + (x2 + x3 * arg.X[2]) * arg.X[1]) * arg.X[0]) >> 1;
id += ((x0 + x1 + x2 + x3) & 1 ) * arg.threads / 2;
for ( int i = 0; i < Elems; i++ ) arg.gx[id + i * arg.threads] = g.data[i];
//T=166 for Elems 9
//T=208 for Elems 6
}
template<unsigned int Elems, typename Float>
class GaugeFix_GX : Tunable {
GaugeFixArg<Float> arg;
Float half_alpha;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix_GX(GaugeFixArg<Float> &arg, Float alpha)
: arg(arg) {
half_alpha = alpha * 0.5;
cudaFuncSetCacheConfig( kernel_gauge_GX<Elems, Float>, cudaFuncCachePreferL1);
}
~GaugeFix_GX () {
}
void setAlpha(Float alpha){
half_alpha = alpha * 0.5;
}
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_GX<Elems, Float><< < tp.grid, tp.block, 0, stream >> > (arg, half_alpha);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
long long flops() const {
if ( Elems == 6 ) return 208LL * arg.threads;
else return 166LL * arg.threads;
}
long long bytes() const {
return 4LL * Elems * sizeof(Float) * arg.threads;
}
};
template <unsigned int Elems, typename Float, typename Gauge>
__global__ void kernel_gauge_fix_U_EO( GaugeFixArg<Float> arg, Gauge dataOr){
int idd = threadIdx.x + blockIdx.x * blockDim.x;
if ( idd >= arg.threads ) return;
int parity = 0;
int id = idd;
if ( idd >= arg.threads / 2 ) {
parity = 1;
id -= arg.threads / 2;
}
typedef typename ComplexTypeId<Float>::Type Cmplx;
Matrix<Cmplx,3> g;
//for(int i = 0; i < Elems; i++) g.data[i] = arg.gx[idd + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g.data[i] = TEXTURE_GX<Cmplx>(idd + i * arg.threads);
#else
g.data[i] = arg.gx[idd + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g(2,0) = Conj(g(0,1) * g(1,2) - g(0,2) * g(1,1));
g(2,1) = Conj(g(0,2) * g(1,0) - g(0,0) * g(1,2));
g(2,2) = Conj(g(0,0) * g(1,1) - g(0,1) * g(1,0));
//42
}
int x[4];
getCoords(x, id, arg.X, parity);
for ( int mu = 0; mu < 4; mu++ ) {
Matrix<Cmplx,3> U;
Matrix<Cmplx,3> g0;
dataOr.load((Float*)(U.data),id, mu, parity);
U = g * U;
//198
int idm1 = linkIndexP1(x,arg.X,mu);
idm1 += (1 - parity) * arg.threads / 2;
//for(int i = 0; i < Elems; i++) g0.data[i] = arg.gx[idm1 + i * arg.threads];
for ( int i = 0; i < Elems; i++ ) {
#ifdef GAUGEFIXING_SITE_MATRIX_LOAD_TEX
g0.data[i] = TEXTURE_GX<Cmplx>(idm1 + i * arg.threads);
#else
g0.data[i] = arg.gx[idm1 + i * arg.threads];
#endif
}
if ( Elems == 6 ) {
g0(2,0) = Conj(g0(0,1) * g0(1,2) - g0(0,2) * g0(1,1));
g0(2,1) = Conj(g0(0,2) * g0(1,0) - g0(0,0) * g0(1,2));
g0(2,2) = Conj(g0(0,0) * g0(1,1) - g0(0,1) * g0(1,0));
//42
}
U = U * conj(g0);
//198
dataOr.save((Float*)(U.data),id, mu, parity);
}
//T=42+4*(198*2+42) Elems=6
//T=4*(198*2) Elems=9
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
template<unsigned int Elems, typename Float, typename Gauge>
class GaugeFix : Tunable {
GaugeFixArg<Float> arg;
Gauge dataOr;
mutable char aux_string[128]; // used as a label in the autotuner
private:
unsigned int sharedBytesPerThread() const {
return 0;
}
unsigned int sharedBytesPerBlock(const TuneParam ¶m) const {
return 0;
}
//bool tuneSharedBytes() const { return false; } // Don't tune shared memory
bool tuneGridDim() const {
return false;
} // Don't tune the grid dimensions.
unsigned int minThreads() const {
return arg.threads;
}
public:
GaugeFix(Gauge & dataOr, GaugeFixArg<Float> &arg)
: dataOr(dataOr), arg(arg) {
cudaFuncSetCacheConfig( kernel_gauge_fix_U_EO<Elems, Float, Gauge>, cudaFuncCachePreferL1);
}
~GaugeFix () { }
void apply(const cudaStream_t &stream){
TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity());
kernel_gauge_fix_U_EO<Elems, Float, Gauge><< < tp.grid, tp.block, 0, stream >> > (arg, dataOr);
}
TuneKey tuneKey() const {
std::stringstream vol;
vol << arg.X[0] << "x";
vol << arg.X[1] << "x";
vol << arg.X[2] << "x";
vol << arg.X[3];
sprintf(aux_string,"threads=%d,prec=%d",arg.threads, sizeof(Float));
return TuneKey(vol.str().c_str(), typeid(*this).name(), aux_string);
}
std::string paramString(const TuneParam ¶m) const {
std::stringstream ps;
ps << "block=(" << param.block.x << "," << param.block.y << "," << param.block.z << ")";
ps << "shared=" << param.shared_bytes;
return ps.str();
}
//need this
void preTune() {
arg.data.backup();
}
void postTune() {
arg.data.restore();
}
long long flops() const {
if ( Elems == 6 ) return 1794LL * arg.threads;
else return 1536LL * arg.threads;
//Not accounting here the reconstruction of the gauge if 12 or 8!!!!!!
}
long long bytes() const {
return 26LL * Elems * sizeof(Float) * arg.threads;
}
};
#endif
//GAUGEFIXING_DONT_USE_GX
template<unsigned int Elems, typename Float, typename Gauge, int gauge_dir>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, \
const unsigned int Nsteps, const unsigned int verbose_interval, \
const Float alpha0, const unsigned int autotune, const double tolerance, \
const unsigned int stopWtheta) {
TimeProfile profileInternalGaugeFixFFT("InternalGaugeFixQudaFFT", false);
profileInternalGaugeFixFFT.TPSTART(QUDA_PROFILE_COMPUTE);
Float alpha = alpha0;
std::cout << "\tAlpha parameter of the Steepest Descent Method: " << alpha << std::endl;
if ( autotune ) std::cout << "\tAuto tune active: yes" << std::endl;
else std::cout << "\tAuto tune active: no" << std::endl;
std::cout << "\tStop criterium: " << tolerance << std::endl;
if ( stopWtheta ) std::cout << "\tStop criterium method: theta" << std::endl;
else std::cout << "\tStop criterium method: Delta" << std::endl;
std::cout << "\tMaximum number of iterations: " << Nsteps << std::endl;
std::cout << "\tPrint convergence results at every " << verbose_interval << " steps" << std::endl;
typedef typename ComplexTypeId<Float>::Type Cmplx;
unsigned int delta_pad = data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3];
int4 size = make_int4( data.X()[0], data.X()[1], data.X()[2], data.X()[3] );
cufftHandle plan_xy;
cufftHandle plan_zt;
GaugeFixArg<Float> arg(data, Elems);
SetPlanFFT2DMany( plan_zt, size, 0, arg.delta); //for space and time ZT
SetPlanFFT2DMany( plan_xy, size, 1, arg.delta); //with space only XY
GaugeFixFFTRotateArg<Cmplx> arg_rotate(data);
GaugeFixFFTRotate<Cmplx> GFRotate(arg_rotate);
GaugeFixSETINVPSP<Float> setinvpsp(arg);
setinvpsp.apply(0);
GaugeFixINVPSP<Float> invpsp(arg);
#ifdef GAUGEFIXING_DONT_USE_GX
//without using GX, gx will be created only for plane rotation but with less size
GaugeFixNEW<Float, Gauge> gfixNew(dataOr, arg, alpha);
#else
//using GX
GaugeFix_GX<Elems, Float> calcGX(arg, alpha);
GaugeFix<Elems, Float, Gauge> gfix(dataOr, arg);
#endif
GaugeFixQualityArg<Cmplx, Gauge> argQ(dataOr, data, arg.delta);
GaugeFixQuality<Elems, Float, Gauge, gauge_dir> gfixquality(argQ);
gfixquality.apply(0);
double action0 = argQ.getAction();
printf("Step: %d\tAction: %.16e\ttheta: %.16e\n", 0, argQ.getAction(), argQ.getTheta());
double diff = 0.0;
int iter = 0;
for ( iter = 0; iter < Nsteps; iter++ ) {
for ( int k = 0; k < 6; k++ ) {
//------------------------------------------------------------------------
// Set a pointer do the element k in lattice volume
// each element is stored with stride lattice volume
// it uses gx as temporary array!!!!!!
//------------------------------------------------------------------------
Cmplx *_array = arg.delta + k * delta_pad;
////// 2D FFT + 2D FFT
//------------------------------------------------------------------------
// Perform FFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, _array, arg.gx, CUFFT_FORWARD);
//------------------------------------------------------------------------
// Rotate hypercube, xyzt -> ztxy
//------------------------------------------------------------------------
GFRotate.setDirection(0, arg.gx, _array);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform FFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, _array, arg.gx, CUFFT_FORWARD);
//------------------------------------------------------------------------
// Normalize FFT and apply pmax^2/p^2
//------------------------------------------------------------------------
invpsp.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on zt plane
//------------------------------------------------------------------------
ApplyFFT(plan_zt, arg.gx, _array, CUFFT_INVERSE);
//------------------------------------------------------------------------
// Rotate hypercube, ztxy -> xyzt
//------------------------------------------------------------------------
GFRotate.setDirection(1, _array, arg.gx);
GFRotate.apply(0);
//------------------------------------------------------------------------
// Perform IFFT on xy plane
//------------------------------------------------------------------------
ApplyFFT(plan_xy, arg.gx, _array, CUFFT_INVERSE);
}
#ifdef GAUGEFIXING_DONT_USE_GX
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfixNew.apply(0);
#else
//------------------------------------------------------------------------
// Calculate g(x)
//------------------------------------------------------------------------
calcGX.apply(0);
//------------------------------------------------------------------------
// Apply gauge fix to current gauge field
//------------------------------------------------------------------------
gfix.apply(0);
#endif
//------------------------------------------------------------------------
// Measure gauge quality and recalculate new Delta(x)
//------------------------------------------------------------------------
gfixquality.apply(0);
double action = argQ.getAction();
diff = abs(action0 - action);
if ((iter % verbose_interval) == (verbose_interval - 1))
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter + 1, argQ.getAction(), argQ.getTheta(), diff);
if ( autotune && ((action - action0) < -1e-14) ) {
if ( alpha > 0.01 ) {
alpha = 0.95 * alpha;
#ifdef GAUGEFIXING_DONT_USE_GX
gfixNew.setAlpha(alpha);
#else
calcGX.setAlpha(alpha);
#endif
printf(">>>>>>>>>>>>>> Warning: changing alpha down -> %.4e\n", alpha );
}
}
//------------------------------------------------------------------------
// Check gauge fix quality criterium
//------------------------------------------------------------------------
if ( stopWtheta ) { if ( argQ.getTheta() < tolerance ) break; }
else { if ( diff < tolerance ) break; }
action0 = action;
}
if ((iter % verbose_interval) != 0 )
printf("Step: %d\tAction: %.16e\ttheta: %.16e\tDelta: %.16e\n", iter, argQ.getAction(), argQ.getTheta(), diff);
// Reunitarize at end
const double unitarize_eps = 1e-14;
const double max_error = 1e-10;
const int reunit_allow_svd = 1;
const int reunit_svd_only = 0;
const double svd_rel_error = 1e-6;
const double svd_abs_error = 1e-6;
setUnitarizeLinksConstants(unitarize_eps, max_error,
reunit_allow_svd, reunit_svd_only,
svd_rel_error, svd_abs_error);
int num_failures = 0;
int* num_failures_dev;
cudaMalloc((void**)&num_failures_dev, sizeof(int));
cudaMemset(num_failures_dev, 0, sizeof(int));
if ( num_failures_dev == NULL ) errorQuda("cudaMalloc failed for dev_pointer\n");
unitarizeLinksQuda(data, data, num_failures_dev);
cudaMemcpy(&num_failures, num_failures_dev, sizeof(int), cudaMemcpyDeviceToHost);
if ( num_failures > 0 ) {
cudaFree(num_failures_dev);
errorQuda("Error in the unitarization\n");
exit(1);
}
cudaFree(num_failures_dev);
// end reunitarize
arg.free();
CUFFT_SAFE_CALL(cufftDestroy(plan_zt));
CUFFT_SAFE_CALL(cufftDestroy(plan_xy));
checkCudaError();
cudaDeviceSynchronize();
profileInternalGaugeFixFFT.TPSTOP(QUDA_PROFILE_COMPUTE);
if (getVerbosity() > QUDA_SUMMARIZE){
double secs = profileInternalGaugeFixFFT.Last(QUDA_PROFILE_COMPUTE);
double fftflop = 5.0 * (log2((double)( data.X()[0] * data.X()[1]) ) + log2( (double)(data.X()[2] * data.X()[3] )));
fftflop *= (double)( data.X()[0] * data.X()[1] * data.X()[2] * data.X()[3] );
double gflops = setinvpsp.flops() + gfixquality.flops();
double gbytes = setinvpsp.bytes() + gfixquality.bytes();
double flop = invpsp.flops() * Elems;
double byte = invpsp.bytes() * Elems;
flop += (GFRotate.flops() + fftflop) * Elems * 2;
byte += GFRotate.bytes() * Elems * 4; //includes FFT reads, assuming 1 read and 1 write per site
#ifdef GAUGEFIXING_DONT_USE_GX
flop += gfixNew.flops();
byte += gfixNew.bytes();
#else
flop += calcGX.flops();
byte += calcGX.bytes();
flop += gfix.flops();
byte += gfix.bytes();
#endif
flop += gfixquality.flops();
byte += gfixquality.bytes();
gflops += flop * iter;
gbytes += byte * iter;
gflops += 4588.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3]; //Reunitarize at end
gbytes += 8.0 * data.X()[0]*data.X()[1]*data.X()[2]*data.X()[3] * dataOr.Bytes() ; //Reunitarize at end
gflops = (gflops * 1e-9) / (secs);
gbytes = gbytes / (secs * 1e9);
printfQuda("Time: %6.6f s, Gflop/s = %6.1f, GB/s = %6.1f\n", secs, gflops, gbytes);
}
}
template<unsigned int Elems, typename Float, typename Gauge>
void gaugefixingFFT( Gauge dataOr, cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
if ( gauge_dir != 3 ) {
printf("Starting Landau gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 4>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
else {
printf("Starting Coulomb gauge fixing with FFTs...\n");
gaugefixingFFT<Elems, Float, Gauge, 3>(dataOr, data, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
}
}
template<typename Float>
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const Float alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
// Switching to FloatNOrder for the gauge field in order to support RECONSTRUCT_12
// Need to fix this!!
//9 and 6 means the number of complex elements used to store g(x) and Delta(x)
if ( data.isNative() ) {
if ( data.Reconstruct() == QUDA_RECONSTRUCT_NO ) {
//printfQuda("QUDA_RECONSTRUCT_NO\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_NO>::type Gauge;
gaugefixingFFT<9, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_12 ) {
//printfQuda("QUDA_RECONSTRUCT_12\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_12>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else if ( data.Reconstruct() == QUDA_RECONSTRUCT_8 ) {
//printfQuda("QUDA_RECONSTRUCT_8\n");
typedef typename gauge_mapper<Float,QUDA_RECONSTRUCT_8>::type Gauge;
gaugefixingFFT<6, Float>(Gauge(data), data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Reconstruction type %d of gauge field not supported", data.Reconstruct());
}
} else {
errorQuda("Invalid Gauge Order\n");
}
}
#endif // GPU_GAUGE_ALG
/**
* @brief Gauge fixing with Steepest descent method with FFTs with support for single GPU only.
* @param[in,out] data, quda gauge field
* @param[in] gauge_dir, 3 for Coulomb gauge fixing, other for Landau gauge fixing
* @param[in] Nsteps, maximum number of steps to perform gauge fixing
* @param[in] verbose_interval, print gauge fixing info when iteration count is a multiple of this
* @param[in] alpha, gauge fixing parameter of the method, most common value is 0.08
* @param[in] autotune, 1 to autotune the method, i.e., if the Fg inverts its tendency we decrease the alpha value
* @param[in] tolerance, torelance value to stop the method, if this value is zero then the method stops when iteration reachs the maximum number of steps defined by Nsteps
* @param[in] stopWtheta, 0 for MILC criterium and 1 to use the theta value
*/
void gaugefixingFFT( cudaGaugeField& data, const unsigned int gauge_dir, \
const unsigned int Nsteps, const unsigned int verbose_interval, const double alpha, const unsigned int autotune, \
const double tolerance, const unsigned int stopWtheta) {
#ifdef GPU_GAUGE_ALG
#ifdef MULTI_GPU
if(comm_dim_partitioned(0) || comm_dim_partitioned(1) || comm_dim_partitioned(2) || comm_dim_partitioned(3))
errorQuda("Gauge Fixing with FFTs in multi-GPU support NOT implemented yet!\n");
#endif
if ( data.Precision() == QUDA_HALF_PRECISION ) {
errorQuda("Half precision not supported\n");
}
if ( data.Precision() == QUDA_SINGLE_PRECISION ) {
gaugefixingFFT<float> (data, gauge_dir, Nsteps, verbose_interval, (float)alpha, autotune, tolerance, stopWtheta);
} else if ( data.Precision() == QUDA_DOUBLE_PRECISION ) {
gaugefixingFFT<double>(data, gauge_dir, Nsteps, verbose_interval, alpha, autotune, tolerance, stopWtheta);
} else {
errorQuda("Precision %d not supported", data.Precision());
}
#else
errorQuda("Gauge fixing has bot been built");
#endif
}
}
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.