hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
9282b38836640e53ce1c0ba26bc485e070a403e5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
File: reduction.cu
Author: Terrence Alsup
Date: April 15, 2019
HPC 2019 : HW 4
Implement dot product and matrix-vector product in CUDA.
**/
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
/**
Compute the dot product of vectors a and b of length N and store the result
at c location. Computation is done on the CPU.
**/
void dotprod(long N, const double* a, const double* b, double* c) {
double sum = 0;
#pragma omp parallel for reduction(+:sum)
for(long i = 0; i < N; i++) {
sum += a[i] * b[i];
}
*c = sum;
}
/**
Compute the matrix-vector product of matrix A and vector b of dimensions NxN and
N. Store the result in the vector c. Computation is done on the CPU.
**/
void MvMult(long N, double* A, double* b, double* c) {
#pragma omp parallel for
for(long i = 0; i < N; i++) {
double sum = 0;
for(long j = 0; j < N; j++) {
sum += A[i*N + j] * b[j];
}
c[i] = sum;
}
}
void Check_CUDA_Error(const char *message){
hipError_t error = hipGetLastError();
if(error!=hipSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) );
exit(-1);
}
}
#define BLOCK_SIZE 1024
__global__ void reduction_kernel(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
/**
The kernel for the dot product.
**/
__global__ void dotprod_kernel(double *sum, const double *a, const double *b, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx] * b[idx]; // Now multiply a[i] * b[i].
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
/**
The kernel for the maxtrix-vector product Av = b.
**/
__global__ void matvec_kernel(double *b, const double *A, const double *v, long N) {
__shared__ double smem[BLOCK_SIZE];
smem[threadIdx.x] = 0;
for (long t = threadIdx.x; t < N; t += BLOCK_SIZE ) {
// Recall that blockIdx.x just corresponds to the row.
smem[threadIdx.x] += A[blockIdx.x * N + t] * v[t];
}
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) b[blockIdx.x] = smem[0] + smem[1];
}
}
/**
Compute the dot product <a,b> = sum using the GPU.
**/
void gpu_dot(long N, double *a, double *b, double *sum) {
// Allocate space on the GPU.
double *a_d, *b_d;
hipMalloc(&a_d, N * sizeof(double));
hipMalloc(&b_d, N * sizeof(double));
// Transfer data to GPU.
hipMemcpyAsync(a_d, a, N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(b_d, b, N * sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
double *y_d; // Store results from blocks.
long N_work = 1; // Number of blocks we will need.
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
hipMalloc(&y_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
double tt = omp_get_wtime(); // Record the time.
// Compute the number of blocks we need.
double *sum_d = y_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
// Compute the dot product for each block.
hipLaunchKernelGGL(( dotprod_kernel), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d, a_d, b_d, N);
// Now add the results from all of the blocks.
while (Nb > 1) {
long N_temp = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
hipLaunchKernelGGL(( reduction_kernel), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d + N_temp, sum_d, N_temp);
sum_d += N_temp;
}
// Transfer result back to CPU.
hipMemcpyAsync(sum, sum_d, 1*sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
double elapsed = omp_get_wtime() - tt;
printf("Vector-Vector Multiply GPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / elapsed/1e9);
// Free the memory allocated on the GPUs.
hipFree(a_d);
hipFree(b_d);
hipFree(y_d);
}
/**
Compute the matrix vector product Ax = b using the GPU.
**/
void gpu_matvec(long N, double *A, double *x, double *b) {
// Allocate space on the GPU.
double *A_d, *x_d;
hipMalloc(&A_d, N * N * sizeof(double));
hipMalloc(&x_d, N * sizeof(double));
// Transfer data to GPU.
hipMemcpyAsync(A_d, A, N * N * sizeof(double), hipMemcpyHostToDevice);
hipMemcpyAsync(x_d, x, N * sizeof(double), hipMemcpyHostToDevice);
hipDeviceSynchronize();
double *b_d; // Store results for each entry.
hipMalloc(&b_d, N * sizeof(double));
double tt = omp_get_wtime(); // Record the time.
// Compute the matrix-vector product using the GPU.
// Note that since A has size N-by-N we use N as the number of blocks.
hipLaunchKernelGGL(( matvec_kernel), dim3(N), dim3(BLOCK_SIZE), 0, 0, b_d, A_d, x_d, N);
// Transfer result back to CPU.
hipMemcpyAsync(b, b_d, N * sizeof(double), hipMemcpyDeviceToHost);
hipDeviceSynchronize();
double elapsed = omp_get_wtime() - tt;
printf("Matrix-Vector Multiply GPU Bandwidth = %f GB/s\n", (3*N + N*N) * sizeof(double) / (elapsed*1e9));
// Free the memory allocated on the GPUs.
hipFree(A_d);
hipFree(x_d);
hipFree(b_d);
}
int main() {
// Length of the vectors and size of the matrix.
long N = (1UL<<10); // N = 2^10
// Declare and allocate space on the CPU for the vectors and matrix as well
// as the reference solution of the matrix-vector multiplication and the GPU
// computed solution.
double *v1, *v2, *A, *prod_ref, *prod;
hipHostMalloc((void**)&v1, N * sizeof(double));
hipHostMalloc((void**)&v2, N * sizeof(double));
hipHostMalloc((void**)&A, N * N * sizeof(double));
hipHostMalloc((void**)&prod_ref, N * sizeof(double));
hipHostMalloc((void**)&prod, N * sizeof(double));
// Initialize the vectors.
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++) {
v1[i] = drand48();
v2[i] = drand48();
prod_ref[i] = 0;
prod[i] = 0;
}
// Initialize the matrix.
#pragma omp parallel for schedule(static)
for (long i = 0; i < N * N; i++) {
A[i] = drand48();
}
// Compute the reference solution for the dot product on the CPU.
double dot_ref = 0;
double tt = omp_get_wtime();
dotprod(N, v1, v2, &dot_ref);
double elapsed = omp_get_wtime() - tt;
printf("\nVector-Vector Multiply CPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / elapsed/1e9);
// Now compute the dot product on the GPU.
double dot = 0;
gpu_dot(N, v1, v2, &dot);
printf("Vector-Vector Multiply Error = %f\n\n", fabs(dot-dot_ref));
// Compute the reference solution for the matrix-vector product on the CPU.
hipHostMalloc((void**)&prod_ref, N * sizeof(double));
tt = omp_get_wtime();
MvMult(N, A, v1, prod_ref);
elapsed = omp_get_wtime() - tt;
printf("Matrix-Vector Multiply CPU Bandwidth = %f GB/s\n", (3*N + N*N) * sizeof(double) / elapsed/1e9);
// Compute the matrix-vector multiplication on the GPU.
gpu_matvec(N, A, v1, prod);
// Compute the L2 error of the vectors.
double error = 0;
#pragma omp parallel for reduction(+:error)
for (long i = 0; i < N; i++) {
error += (prod[i] - prod_ref[i]) * (prod[i] - prod_ref[i]);
}
printf("Matrix-Vector Multiply Error = %f\n\n", error);
// Free all the memory now.
hipHostFree(v1); // The first vector.
hipHostFree(v2); // The second vector.
hipHostFree(A); // The matrix.
hipHostFree(prod_ref); // The vector that contained the product.
hipHostFree(prod); // The vector of the product computed with the GPU.
return 0;
}
| 9282b38836640e53ce1c0ba26bc485e070a403e5.cu | /**
File: reduction.cu
Author: Terrence Alsup
Date: April 15, 2019
HPC 2019 : HW 4
Implement dot product and matrix-vector product in CUDA.
**/
#include <algorithm>
#include <stdio.h>
#include <omp.h>
#include <string>
/**
Compute the dot product of vectors a and b of length N and store the result
at c location. Computation is done on the CPU.
**/
void dotprod(long N, const double* a, const double* b, double* c) {
double sum = 0;
#pragma omp parallel for reduction(+:sum)
for(long i = 0; i < N; i++) {
sum += a[i] * b[i];
}
*c = sum;
}
/**
Compute the matrix-vector product of matrix A and vector b of dimensions NxN and
N. Store the result in the vector c. Computation is done on the CPU.
**/
void MvMult(long N, double* A, double* b, double* c) {
#pragma omp parallel for
for(long i = 0; i < N; i++) {
double sum = 0;
for(long j = 0; j < N; j++) {
sum += A[i*N + j] * b[j];
}
c[i] = sum;
}
}
void Check_CUDA_Error(const char *message){
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) );
exit(-1);
}
}
#define BLOCK_SIZE 1024
__global__ void reduction_kernel(double* sum, const double* a, long N){
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx];
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
/**
The kernel for the dot product.
**/
__global__ void dotprod_kernel(double *sum, const double *a, const double *b, long N) {
__shared__ double smem[BLOCK_SIZE];
int idx = (blockIdx.x) * blockDim.x + threadIdx.x;
if (idx < N) smem[threadIdx.x] = a[idx] * b[idx]; // Now multiply a[i] * b[i].
else smem[threadIdx.x] = 0;
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1];
}
}
/**
The kernel for the maxtrix-vector product Av = b.
**/
__global__ void matvec_kernel(double *b, const double *A, const double *v, long N) {
__shared__ double smem[BLOCK_SIZE];
smem[threadIdx.x] = 0;
for (long t = threadIdx.x; t < N; t += BLOCK_SIZE ) {
// Recall that blockIdx.x just corresponds to the row.
smem[threadIdx.x] += A[blockIdx.x * N + t] * v[t];
}
__syncthreads();
if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512];
__syncthreads();
if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256];
__syncthreads();
if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128];
__syncthreads();
if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64];
__syncthreads();
if (threadIdx.x < 32) {
smem[threadIdx.x] += smem[threadIdx.x + 32];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 16];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 8];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 4];
__syncwarp();
smem[threadIdx.x] += smem[threadIdx.x + 2];
__syncwarp();
if (threadIdx.x == 0) b[blockIdx.x] = smem[0] + smem[1];
}
}
/**
Compute the dot product <a,b> = sum using the GPU.
**/
void gpu_dot(long N, double *a, double *b, double *sum) {
// Allocate space on the GPU.
double *a_d, *b_d;
cudaMalloc(&a_d, N * sizeof(double));
cudaMalloc(&b_d, N * sizeof(double));
// Transfer data to GPU.
cudaMemcpyAsync(a_d, a, N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(b_d, b, N * sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
double *y_d; // Store results from blocks.
long N_work = 1; // Number of blocks we will need.
for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i;
cudaMalloc(&y_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks
double tt = omp_get_wtime(); // Record the time.
// Compute the number of blocks we need.
double *sum_d = y_d;
long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE);
// Compute the dot product for each block.
dotprod_kernel<<<Nb,BLOCK_SIZE>>>(sum_d, a_d, b_d, N);
// Now add the results from all of the blocks.
while (Nb > 1) {
long N_temp = Nb;
Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE);
reduction_kernel<<<Nb,BLOCK_SIZE>>>(sum_d + N_temp, sum_d, N_temp);
sum_d += N_temp;
}
// Transfer result back to CPU.
cudaMemcpyAsync(sum, sum_d, 1*sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
double elapsed = omp_get_wtime() - tt;
printf("Vector-Vector Multiply GPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / elapsed/1e9);
// Free the memory allocated on the GPUs.
cudaFree(a_d);
cudaFree(b_d);
cudaFree(y_d);
}
/**
Compute the matrix vector product Ax = b using the GPU.
**/
void gpu_matvec(long N, double *A, double *x, double *b) {
// Allocate space on the GPU.
double *A_d, *x_d;
cudaMalloc(&A_d, N * N * sizeof(double));
cudaMalloc(&x_d, N * sizeof(double));
// Transfer data to GPU.
cudaMemcpyAsync(A_d, A, N * N * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpyAsync(x_d, x, N * sizeof(double), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
double *b_d; // Store results for each entry.
cudaMalloc(&b_d, N * sizeof(double));
double tt = omp_get_wtime(); // Record the time.
// Compute the matrix-vector product using the GPU.
// Note that since A has size N-by-N we use N as the number of blocks.
matvec_kernel<<<N, BLOCK_SIZE>>>(b_d, A_d, x_d, N);
// Transfer result back to CPU.
cudaMemcpyAsync(b, b_d, N * sizeof(double), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
double elapsed = omp_get_wtime() - tt;
printf("Matrix-Vector Multiply GPU Bandwidth = %f GB/s\n", (3*N + N*N) * sizeof(double) / (elapsed*1e9));
// Free the memory allocated on the GPUs.
cudaFree(A_d);
cudaFree(x_d);
cudaFree(b_d);
}
int main() {
// Length of the vectors and size of the matrix.
long N = (1UL<<10); // N = 2^10
// Declare and allocate space on the CPU for the vectors and matrix as well
// as the reference solution of the matrix-vector multiplication and the GPU
// computed solution.
double *v1, *v2, *A, *prod_ref, *prod;
cudaMallocHost((void**)&v1, N * sizeof(double));
cudaMallocHost((void**)&v2, N * sizeof(double));
cudaMallocHost((void**)&A, N * N * sizeof(double));
cudaMallocHost((void**)&prod_ref, N * sizeof(double));
cudaMallocHost((void**)&prod, N * sizeof(double));
// Initialize the vectors.
#pragma omp parallel for schedule(static)
for (long i = 0; i < N; i++) {
v1[i] = drand48();
v2[i] = drand48();
prod_ref[i] = 0;
prod[i] = 0;
}
// Initialize the matrix.
#pragma omp parallel for schedule(static)
for (long i = 0; i < N * N; i++) {
A[i] = drand48();
}
// Compute the reference solution for the dot product on the CPU.
double dot_ref = 0;
double tt = omp_get_wtime();
dotprod(N, v1, v2, &dot_ref);
double elapsed = omp_get_wtime() - tt;
printf("\nVector-Vector Multiply CPU Bandwidth = %f GB/s\n", 2*N*sizeof(double) / elapsed/1e9);
// Now compute the dot product on the GPU.
double dot = 0;
gpu_dot(N, v1, v2, &dot);
printf("Vector-Vector Multiply Error = %f\n\n", fabs(dot-dot_ref));
// Compute the reference solution for the matrix-vector product on the CPU.
cudaMallocHost((void**)&prod_ref, N * sizeof(double));
tt = omp_get_wtime();
MvMult(N, A, v1, prod_ref);
elapsed = omp_get_wtime() - tt;
printf("Matrix-Vector Multiply CPU Bandwidth = %f GB/s\n", (3*N + N*N) * sizeof(double) / elapsed/1e9);
// Compute the matrix-vector multiplication on the GPU.
gpu_matvec(N, A, v1, prod);
// Compute the L2 error of the vectors.
double error = 0;
#pragma omp parallel for reduction(+:error)
for (long i = 0; i < N; i++) {
error += (prod[i] - prod_ref[i]) * (prod[i] - prod_ref[i]);
}
printf("Matrix-Vector Multiply Error = %f\n\n", error);
// Free all the memory now.
cudaFreeHost(v1); // The first vector.
cudaFreeHost(v2); // The second vector.
cudaFreeHost(A); // The matrix.
cudaFreeHost(prod_ref); // The vector that contained the product.
cudaFreeHost(prod); // The vector of the product computed with the GPU.
return 0;
}
|
d148b72f68b46fb898dba6ee7fa808aa61e5b6ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
hipMalloc(&d_a, bytes);
hipMalloc(&d_b, bytes);
hipMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ )
{
h_a[i] = i;
h_b[i] = i;
}
// Copy host vectors to device
hipMemcpy( d_a, h_a, bytes, hipMemcpyHostToDevice);
hipMemcpy( d_b, h_b, bytes, hipMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel hipLaunchKernelGGL((
vecAdd), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n);
// Copy array back to host
hipMemcpy( h_c, d_c, bytes, hipMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
//printf("final result: %f\n", sum/(double)n);
// Release device memory
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} | d148b72f68b46fb898dba6ee7fa808aa61e5b6ef.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int n = 100;
// Host input vectors
double *h_a;
double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
// Initialize vectors on host
for( i = 0; i < n; i++ )
{
h_a[i] = i;
h_b[i] = i;
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
double sum = 0;
for(i=0; i<n; i++)
printf(" %f + %f =%f\n",h_a[i],h_b[i],h_c[i]);
//printf("final result: %f\n", sum/(double)n);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
c2ddb386c43a9340a36b1596259a7acd939c6d69.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "err.h"
#include <cstdio>
#include <hip/hip_vector_types.h>
#include <vector>
#include <algorithm>
#include <functional>
#include <iostream>
#include "cuda_math.h"
#include "tablet.hpp"
//#define USE_DOUBLE
#ifdef USE_DOUBLE
typedef double floatT;
typedef double3 floatT3;
#define make_floatT3 make_double3
#else//use float
typedef float floatT;
typedef float3 floatT3;
#define make_floatT3 make_float3
#endif
struct TriHexagon;
struct Tablet {//
floatT3* moments;//
float3* coords;//
float2* grid_xy;
float dt, Rtab;
int3 Nrays;//
TriHexagon* ids; //
float3 Hext;//
float3 Aani[3];//
float Kani, zero;//
float Jexch;//
float alpha, gamma; //
int Nlayer3, Nmesh, Nmoment;// , -
int initPentagon(int na, int kerns[], int Ndl=1);
int initMoments();
void set();
void clear();
int getNmoms() { return 3*Nlayer3*Nmoment; }
float3 getMinBox();
float3 getMaxBox();
};
void PrintLastError(const char *file, int line) {
hipError_t err=hipGetLastError();
if(err!=hipSuccess) fprintf(stderr, "%s in %s at line %d\n", hipGetErrorString(err), file, line);
}
bool CheckError(hipError_t err, const char *file, int line) {
if(err==hipSuccess) return false;
fprintf(stderr, "%s in %s at line %d\n", hipGetErrorString(err), file, line);
return true;
}
void deviceDiagnostics(){
int deviceCount;
CHECK_ERROR( hipGetDeviceCount(&deviceCount) );
printf("GPU devices :: %d \n", deviceCount);
hipDeviceProp_t devProp[deviceCount];
for(int i = 0; i < deviceCount; ++i) {
printf("*** CUDA Device #%d ***", i);
CHECK_ERROR( hipGetDeviceProperties(&devProp[i], i) );
printf("%s ***\n", devProp[i].name);
printf("\t%d.%d compute capability\n", devProp[i].major, devProp[i].minor);
printf("\t%d multiprocessors\n", devProp[i].multiProcessorCount);
printf("\t%.2fGB max mem pitch of %.2fGB global memory\n", devProp[i].memPitch/(1024.*1024.*1024), devProp[i].totalGlobalMem/(1024.*1024.*1024));
printf("\t%.2fKB total shared memory per block\n", devProp[i].sharedMemPerBlock/1024.);
printf("\t%.2fKB total constant memory\n", devProp[i].totalConstMem/1024.);
printf("\t%.2fK registers per block\n", devProp[i].regsPerBlock/1024.);
printf("\t%d/%d threads per Warp/block\n", devProp[i].warpSize, devProp[i].maxThreadsPerBlock);
printf("\tClock rate: %.2fGHz\n", devProp[i].clockRate*1e-6);
printf("\tTexture alignment: %luB\n", devProp[i].textureAlignment);
printf("\tConcurrent copy and execution: %s\n", (devProp[i].deviceOverlap ? "Yes" : "No"));
printf("\tKernel execution timeout: %s\n", (devProp[i].kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
//#include "heap.hpp"
__forceinline__ __host__ __device__ floatT3 rotateQ(floatT3 v, floatT3 H) {
register floatT lH = length(H);
register floatT a = 0.5*lH;
if(a < floatT(1e-15)) return v;
floatT3 nH = H*(floatT(1.0)/lH);
#ifdef USE_DOUBLE
double sina, cosa; sincos(a, &sina, &cosa);
#else//use float
#if defined(__CUDA_ARCH__)
float sina, cosa; __sincosf(a, &sina, &cosa);
#else
float sina, cosa; sincosf(a, &sina, &cosa);
#endif
#endif
//register floatT3 u = sina*nH;
/* Quaternion formula :: (floatT(2.0)*dot(u, v))*u + (s*s-dot(u, u))*v + (floatT(2.0)*s)*cross(u,v); */
return make_floatT3( ( nH.x*nH.x*v.x + floatT(2.0)*nH.x*nH.y*v.y + floatT(2.0)*nH.x*nH.z*v.z - v.x - nH.y*nH.y*v.x-nH.z*nH.z*v.x)*sina*sina + v.x + floatT(2.0)*cosa*sina*(nH.y*v.z-nH.z*v.y),
(floatT(2.0)*nH.x*nH.y*v.x + nH.y*nH.y*v.y + floatT(2.0)*nH.y*nH.z*v.z - v.y - nH.x*nH.x*v.y-nH.z*nH.z*v.y)*sina*sina + v.y + floatT(2.0)*cosa*sina*(nH.z*v.x-nH.x*v.z),
(floatT(2.0)*nH.x*nH.z*v.x + floatT(2.0)*nH.y*nH.z*v.y + nH.z*nH.z*v.z - v.z - nH.x*nH.x*v.z-nH.y*nH.y*v.z)*sina*sina + v.z + floatT(2.0)*cosa*sina*(nH.x*v.y-nH.y*v.x) );
};/* rotateQ */
__forceinline__ __host__ __device__ floatT3 rotateM(floatT3 v, floatT3 H){
register floatT a = length(H);
if(a < 1e-15f) return v;
register floatT3 u = H*(floatT(1.0)/a);
#ifdef USE_DOUBLE
double sina, cosa; sincos(a, &sina, &cosa);
#else//use float
#if defined(__CUDA_ARCH__)
float sina, cosa; __sincosf( a , &sina , &cosa );
#else
float sina, cosa; sincosf(a, &sina, &cosa);
#endif
#endif
register floatT ucos = 1-cosa;
floatT rotM[3][3] = {
{cosa + u.x*u.x*ucos , u.x*u.y*ucos - u.z*sina, u.x*u.z*ucos + u.y*sina},
{u.y*u.x*ucos + u.z*sina, cosa + u.y*u.y*ucos , u.y*u.z*ucos - u.x*sina},
{u.z*u.x*ucos - u.y*sina, u.z*u.y*ucos + u.x*sina, cosa + u.z*u.z*ucos }
};
return make_floatT3(v.x*rotM[0][0] + v.y*rotM[0][1] + v.z*rotM[0][2],
v.x*rotM[1][0] + v.y*rotM[1][1] + v.z*rotM[1][2],
v.x*rotM[2][0] + v.y*rotM[2][1] + v.z*rotM[2][2]);
};/* rotateM */
int rot_test(double tMax, double tDrop) {
floatT3 H={0,1,1}, m1={1,0,0}, m2={1,0,0};
double t=0.0, dt=1.e-1;
while(t<tMax) {
printf("%g\t%g\t%g\t%g\t%g\t%g\t%g\n",t, m1.x,m1.y,m1.z, m2.x,m2.y,m2.z);
for(double tN=t+tDrop; t<tN; t+=dt) {
floatT3 tm1=rotateM(m1,(H+0*m2)*dt);
floatT3 tm2=rotateQ(m2,(H+0*m1)*dt);
m1 = tm1; m2 = tm2;
}
}
return m1.x+m2.x;
}
__constant__ Tablet tab;
/*
struct sortT {
unsigned int sort; // 3 * 5 * 2bit
__device__ __host__ inline void set(int il, int ia, int s) { sort &= ~(3<<(2*(il*5+ia))); sort |= s<<(2*(il*5+ia)); }
};*/
struct TriHexagon {
ushort4 idx;
__device__ __host__ inline bool present(int il, int ia) { return 1&(idx.w>>(il*5+ia)); }
__device__ __host__ inline void set(int il, int ia) { idx.w |= 1<<(il*5+ia); }
__device__ __host__ inline void clear(int il, int ia) { idx.w &= ~(1<<(il*5+ia)); }
__device__ __host__ inline int get_id(int i) { return ((unsigned short*)&idx)[i]; }
__device__ __host__ inline void set_id(int i, unsigned short v) { ((unsigned short*)&idx)[i] = v; }
__device__ void set() { idx = tab.ids[threadIdx.x].idx; }
void reset() { idx = make_ushort4(0,0,0,0x7FFF); }
};
__global__ void test_rotate(int Nt) {
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3], zero={0,0,0};
for(int il=0; il<3; il++) {
m[il][0] = pM[id.x];
m[il][1] = (id.w&2)?pM[ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&8)?pM[2*tab.Nmesh+id.z]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
#pragma unroll 1
for(int it=0; it<Nt; it++) {
for(int il=0; il<3; il++) {
for(int i=0; i<3; i++) {
floatT3 mij=m[il][i];
floatT3 Heff=tab.Hext+tab.zero*mij;
m[il][i] = rotateQ(mij, Heff*tab.dt);
}
}
}
id=tab.ids[threadIdx.x].idx; pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&8) pM[2*tab.Nmesh+id.z] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
}
__global__ void test_ani(int Nt) {
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3], zero={0,0,0};
for(int il=0; il<3; il++) {
m[il][0] = pM[id.x];
m[il][1] = (id.w&2)?pM[ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&16)?pM[2*tab.Nmesh+id.z+1]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
#pragma unroll 1
for(int it=0; it<Nt; it++) {
#pragma unroll 3
for(int il=0; il<3; il++) {
#pragma unroll 3
for(int i=0; i<3; i++) {
const int subl=(il+i)%3;
floatT3 mij=m[il][i];
floatT3 Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],mij)*tab.Aani[subl];
m[il][i] = rotateQ(mij, Heff*tab.dt);
}
}
}
id=tab.ids[threadIdx.x].idx; pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&16) pM[2*tab.Nmesh+id.z+1] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
}
/*=======================================
Runge-Kutta stencil 2nd order
=========================================
0: m1 = m0 (rotate) h*S(m0, H0)
m2 = m0 (rotate) 0.5*h*S(m0, H0)
H1 = H1(m1)
h/2: m0 = m2 (rotate) 0.5*h*S(m2, H1)
where S(m_i, H_j) = gamma*H_j + alpha * cross(m_i, H_j)
=======================================*/
__global__ void stencil(int Nt) {
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3], zero={0,0,0};
for(int il=0; il<3; il++) {
m[il][0] = pM[id.x];
m[il][1] = (id.w&2)?pM[ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&16)?pM[2*tab.Nmesh+id.z+1]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
float3 m1, m2;
#pragma unroll 1
for(int it=0; it<Nt; it++) {
#pragma unroll 3
for(int il=0; il<3; il++) {
#pragma unroll 3
for(int i=0; i<3; i++) {
const int subl=(il+i)%3;
floatT3 m0=m[il][i];
floatT3 Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],m0)*tab.Aani[subl];
m1 = rotateQ(m0, tab.dt*(tab.gamma*Heff + tab.alpha*cross(m0,Heff)));
m2 = 0.5*m1;
Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],m2)*tab.Aani[subl];
m0 = rotateQ(m2, 0.5*tab.dt*(tab.gamma*Heff + tab.alpha*cross(m2,Heff)));
m[il][i] = normalize(m0);
}
}
//__syncthreads();
}
id=tab.ids[threadIdx.x].idx; pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&16) pM[2*tab.Nmesh+id.z+1] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
}
/*
__global__ void run(int Nt) {
__shared__ floatT3 shM[2][512], shH[2][512];
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3]={pM[id.x],pM[id.x+tab.Nmoment],pM[id.x+2*tab.Nmoment]}, zero={0,0,0};
for(int il=0; il<3; il++) {
const int ish=il*tab.Nmoment;
m[il][0] = pM[id.x+ish];
m[il][1] = (id.w&2)?pM[ish+ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&16)?pM[ish+2*tab.Nmesh+id.z+1]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
}
#pragma unroll 1
for(int it=0; it<Nt; it++) {
floatT3 Hexch[3] = {zero,zero,zero};
#pragma unroll 3
for(int il=0; il<3; il++) {
shM[0][id.x ] = m[il][1]; shH[0][id.x ] = H[1];
shM[1][id.z+1] = m[il][2]; shH[1][id.z+1] = H[2];
__syncthreads();
m[il][1] = shM[0][id.x ]; H[1] = shH[0][id.x ];
m[il][2] = shM[1][id.z+1]; H[2] = shH[1][id.z+1];
#pragma unroll 3
for(int i=0; i<3; i++) {
const int subl=(il+i)%3;
floatT3 mij=m[il][i];
floatT3 Hexch = tab.Jexch*(m[il][(i+1)%3]+m[il][(i+2)%3]);
floatT3 Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],mij)*tab.Aani[subl];
m[il][i] = rotateQ(mij, Heff*tab.dt);
}
pM += 3*tab.Nmesh;
}
pM -= 9*tab.Nmesh;
}
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&16) pM[2*tab.Nmesh+id.z+1] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += tab.Nmoment;
}
pM[id.x]=m0[0]; pM[id.x+tab.Nmoment]=m0[1]; pM[id.x+2*tab.Nmoment]=m0[2];
}
*/
__global__ void initMoments() {
//floatT3 mx={1.0, 0.0, 0.0}, my={0.0, 1.0, 0.0}, mz={0.0, 0.0, 1.0}, zero={0,0,0}, out={0,0,-1};
floatT3 mx=tab.Aani[0], my=tab.Aani[1], mz=tab.Aani[2], zero={0,0,0}, out={0,0,-1};
TriHexagon th; th.set(); int i;
floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
float z=blockIdx.x; float2 xy=tab.grid_xy[threadIdx.x];
const float H=0.5, A=2*H/sqrt(3.), h=A/2, a=2*H/3.;
float3* pC=&tab.coords[3*blockIdx.x*tab.Nmoment];
pM[th.idx.x] = mx; pC[th.idx.x] = {xy.x-h,xy.y,z};
i= tab.Nmesh+th.idx.x; if(th.present(0,1)) { pM[i] = my; pC[i] = {xy.x-A,xy.y-H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-A,xy.y-H,z-1}; }
i=2*tab.Nmesh+th.idx.z; if(th.present(0,3)) { pM[i] = mz; pC[i] = {xy.x-A,xy.y+H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-A,xy.y+H,z-1}; }
pM += tab.Nmoment; pC += tab.Nmoment;//Green
z += 1.0/3.0;
pM[th.idx.y] = my; pC[th.idx.y] = {xy.x,xy.y-a/2,z};
i= tab.Nmesh+th.idx.y; if(th.present(1,1)) { pM[i] = mz; pC[i] = {xy.x+A,xy.y-a/2 ,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x+A,xy.y-a/2 ,z-1}; }
i=2*tab.Nmesh+th.idx.x; if(th.present(1,3)) { pM[i] = mx; pC[i] = {xy.x-h,xy.y-a/2-H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-h,xy.y-a/2-H,z-1}; }
pM += tab.Nmoment;//Blue
pC += tab.Nmoment;//Green
z += 1.0/3.0;
pM[th.idx.z] = mz; pC[th.idx.z] = {xy.x,xy.y+a/2,z};
i= tab.Nmesh+th.idx.z; if(th.present(2,1)) { pM[i] = mx; pC[i] = {xy.x-h,xy.y+a/2+H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-h,xy.y+a/2+H,z-1}; }
i=2*tab.Nmesh+th.idx.y; if(th.present(2,3)) { pM[i] = my; pC[i] = {xy.x+A,xy.y+a/2 ,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x+A,xy.y+a/2 ,z-1}; }
}
//
struct ITH {// ( | X | )
int3 ray; // ( )
TriHexagon pid;
bool use; //
void set(int3 r) { use = true; ray = r; pid.reset(); }
int& get_ray(int i) { return ((int*)&ray)[i]; }
};
void setITH(int ic, std::vector<ITH*>& trigs, int num) {
int ip=(ic+1)%3;//, im=(ic+2)%3;
for(int i=0, _r=trigs[0]->get_ray(ic)-1; i<num; i++) {
ITH& th=*trigs[i];
th.pid.set_id(ic,i);
int r=th.get_ray(ic);
//for(int j=0; j<5; j++) th.pid.set(ic,j);
if(_r != r) {
th.pid.clear(ic,1); th.pid.clear(ip,3);
if(i>0) { ITH& _th=*trigs[i-1]; _th.pid.clear(ic,2); _th.pid.clear(ip,4); }
}
_r = r;
}
ITH& _th=*trigs[num-1]; _th.pid.clear(ic,2); _th.pid.clear(ip,4);
}
float3 Tablet::getMinBox() { return make_float3(-Rtab,-Rtab,0.); }
float3 Tablet::getMaxBox() { return make_float3(Rtab,Rtab,Nlayer3); }
int Tablet::initPentagon(int na, int kerns[], int Ndl) {
// ,
int num6=3*na*(na-1)+1; // na
ITH* pgon = new ITH[num6],* p=pgon;
int3 ray=make_int3(na-1), ray0; p->set(ray); // ---
for(int ia=1; ia<na; ia++) {
ray.x--; ray.y++;
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y--; ray.z++; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y--; ray.x++; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.z--; ray.x++; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y++; ray.z--; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y++; ray.x--; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.z++; ray.x--; }
}
//
int num=num6, nal=na-1, jNN=num6-1;
for(int i=0; i<6*Ndl; i++) {
int jM=-((kerns[i]+1)/2), jP=kerns[i]+jM, jN=jNN-(i%6)*nal;
for(int j=0; j<jP; j++) pgon[jN-j].use = false;
if(i%6==0) jN -= 6*nal;
for(int j=jM; j<0; j++) pgon[jN-j].use = false;
if((i+1)%6==0) { jNN -= 6*nal; nal--; }
num -= kerns[i];
}
Nmesh = num;
//
p = pgon;
std::vector<ITH*> trigs;
for(int i=0; i<num6; i++) {
if(p->use) trigs.push_back(p);
p++;
}
printf("#Nmesh: %d(theor) =?= %d(real) of %d in hexagon\n", num, int(trigs.size()), num6);
// ( , )
std::sort(trigs.begin(), trigs.end(), [](ITH* a, ITH* b) { if(b->ray.x == a->ray.x) return b->ray.y < a->ray.y; return b->ray.x > a->ray.x; });
setITH(0, trigs, num); ray0.x = trigs[0]->ray.x; Nrays.x = trigs[num-1]->ray.x-ray0.x+1;
std::sort(trigs.begin(), trigs.end(), [](ITH* a, ITH* b) { if(b->ray.y == a->ray.y) return b->ray.z < a->ray.z; return b->ray.y > a->ray.y; });
setITH(1, trigs, num); ray0.y = trigs[0]->ray.y; Nrays.y = trigs[num-1]->ray.y-ray0.y+1;
std::sort(trigs.begin(), trigs.end(), [](ITH* a, ITH* b) { if(b->ray.z == a->ray.z) return b->ray.x < a->ray.x; return b->ray.z > a->ray.z; });
setITH(2, trigs, num); ray0.z = trigs[0]->ray.z; Nrays.z = trigs[num-1]->ray.z-ray0.z+1;
char fn[128];
sprintf(fn, "tab%d.dat",na);
FILE* fd=fopen(fn, "w");
fprintf(fd, "#Nrays: %d,%d,%d;\tray0: %d,%d,%d\n", Nrays.x, Nrays.y, Nrays.z, ray0.x, ray0.y, ray0.z);
p = pgon;
//
TriHexagon* pids=new TriHexagon[num];
float2* xy=new float2[num];
Rtab = sqrt(Nmesh)*5.0/8.0+1;
for(int i=0; i<num; i++) {
while(!p->use) p++;
pids[i].idx = p->pid.idx;
xy[i] = make_float2((p->ray.z-p->ray.x)/sqrt(3),na-1.0-p->ray.y);
fprintf(fd, "%d\t%.4g\t%.4g\t%d\t%d\t%d\t%x\n", i, xy[i].x,xy[i].y, p->pid.idx.x,p->pid.idx.y,p->pid.idx.z,p->pid.idx.w);
p++;
}
Nmoment = 3*Nmesh;//-2*(Nrays.x+Nrays.y+Nrays.z);
fprintf(fd, "# %d Moments\n", Nmoment);
size_t sz=Nmesh*sizeof(TriHexagon);
if(CHECK_ERROR(hipMalloc((void**) &ids, sz))) throw(-1);
if(CHECK_ERROR(hipMemcpy(ids, pids, sz, hipMemcpyHostToDevice))) throw(-1);
sz = num*sizeof(float2);
if(CHECK_ERROR(hipMalloc((void**) &grid_xy, sz))) throw(-1);
if(CHECK_ERROR(hipMemcpy(grid_xy, xy, sz, hipMemcpyHostToDevice))) throw(-1);
fclose(fd);
delete[] pids;
delete[] pgon;
delete[] xy;
return num;
}
void Tablet::set() {
size_t sz=3*Nlayer3*Nmoment*sizeof(floatT3);
printf("#Moments memory: %g M\n", sz*1e-6);
if(CHECK_ERROR(hipMalloc((void**) &moments, sz))) throw(-1);
if(CHECK_ERROR(hipMalloc((void**) &coords, sz))) throw(-1);
}
int Tablet::initMoments() {
Hext = make_float3(1,0,0);
Kani = 0.002;
Jexch=-1.0;
alpha = 0.1;
gamma = 1.0;
Aani[0] = make_float3( 0. , -sqrt(2./3.),sqrt(1./3.));
Aani[1] = make_float3( sqrt(1./2.),0.5*sqrt(2./3.),sqrt(1./3.));
Aani[2] = make_float3(-sqrt(1./2.),0.5*sqrt(2./3.),sqrt(1./3.));
dt =0.1;
if(CHECK_ERROR(hipMemcpyToSymbol(tab, this, sizeof(Tablet)))) throw(-1);
hipLaunchKernelGGL(( ::initMoments), dim3(Nlayer3),dim3(Nmesh), 0, 0, );
return 0;
}
//===========================
int TabletIF::run(int Nt) {
cudaTimer tm; tm.start();
//test_rotate<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
//test_ani<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
hipLaunchKernelGGL(( stencil), dim3(tablet->Nlayer3),dim3(tablet->Nmesh), 0, 0, Nt);
//::run<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
double t=tm.stop()*1e-3; if(t==0.0) t=1;
printf("#Test Time: %g sec, rate=%g Gmom/sec\n", t, 1e-9*Nt*Nmoms/t);
return 1;
}
void Tablet::clear() {
if(CHECK_ERROR(hipFree(ids))) throw(-1);
if(CHECK_ERROR(hipFree(coords))) throw(-1);
if(CHECK_ERROR(hipFree(moments))) throw(-1);
}
void TabletIF::clear() {
delete tablet;
}
int TabletIF::set() {
tablet = new Tablet();
int rs=0;
//{int kerns[]={0,0,0,0,0,0}; rs=tablet->initPentagon(2, kerns);}
//{int kerns[]={0,1,1,1,1,1}; rs=tablet->initPentagon(4, kerns);}
//{int kerns[]={0,3,3,3,3,3}; rs=tablet->initPentagon(10, kerns);}
//{int kerns[]={10,9,10,9,10,9, 3,3,3,3,3,3}; rs=tablet->initPentagon(11, kerns, 2);}//256
{int kerns[]={5,5,5,5,5,5, 0,1,1,1,1,1}; rs=tablet->initPentagon(14, kerns, 2);}//512
//{int kerns[]={0,1,0,1,0,1}; rs=tablet->initPentagon(19, kerns);}//1024
//{int kerns[]={13,13,13,13,13,13, 5,7,5,7,5,7, 0,1,0,1,0,1}; rs=tablet->initPentagon(20, kerns,3);}//1024
//{int kerns[]={13,13,13,13,13,13, 5,6,5,6,5,6, 1,1,1,1,1,1}; rs=tablet->initPentagon(20, kerns,3);}//1024
tablet->Nlayer3 = 30;
Nmoms = 3*tablet->Nlayer3*tablet->Nmoment;
printf("%d moms in layer / %d total\n", rs, Nmoms);
return Nmoms;
}
int TabletIF::init(float* m, float* c) {
if(c) tablet->coords = (float3*)c;
if(m) tablet->moments = (floatT3*)m;
int rs=0;
rs=tablet->initMoments();
//rs=rot_test(1e7,1e4);
printf("Nmoms %d\n", Nmoms);
MinBox[0]=MinBox[1]=-tablet->Rtab; MinBox[2]=0;
MaxBox[0]=MaxBox[1]= tablet->Rtab; MaxBox[2]=1*tablet->Nlayer3;
return rs;
}
int main(int argc, char *argv[]){
int rs=0;
cudaTimer tm; tm.start();
TabletIF tab;
tab.set();
tab.tablet->set();
tab.init();
printf("#Init Time: %g sec, res=%d\n", tm.stop()*1e-3, rs);
tm.start();
int Nloop=10, Nt=1000;
for(int it=0; it<Nloop; it++) {
rs=tab.run(Nt);
//run<<<tab.Nlayer3,tab.Nmesh>>>(Nt);
}
double t=tm.stop()*1e-3; if(t==0.0) t=1;
printf("#Test Time: %g sec, rate=%g Gmom/sec\n", t, 1e-9*Nt*Nloop*tab.Nmoms/t);
tab.tablet->clear();
tab.clear();
return rs;
};
| c2ddb386c43a9340a36b1596259a7acd939c6d69.cu | #include "err.h"
#include <cstdio>
#include <vector_types.h>
#include <vector>
#include <algorithm>
#include <functional>
#include <iostream>
#include "cuda_math.h"
#include "tablet.hpp"
//#define USE_DOUBLE
#ifdef USE_DOUBLE
typedef double floatT;
typedef double3 floatT3;
#define make_floatT3 make_double3
#else//use float
typedef float floatT;
typedef float3 floatT3;
#define make_floatT3 make_float3
#endif
struct TriHexagon;
struct Tablet {//Базовая структра хранения и доступа к данным таблетки
floatT3* moments;//массив моментов
float3* coords;//массив координат
float2* grid_xy;
float dt, Rtab;
int3 Nrays;//число линеек в сетке по каждой координате
TriHexagon* ids; // массив индексов узлов сетки в трёх координатных системах
float3 Hext;//Внешнее поле
float3 Aani[3];//Оси анизотропии
float Kani, zero;//коэффициент анизотропии
float Jexch;//интеграл обменного взаимодействия
float alpha, gamma; //константы прецессии и диссипации
int Nlayer3, Nmesh, Nmoment;//число троек слоёв в таблетке, а также число узлов сетки и число моментов в три-слое
int initPentagon(int na, int kerns[], int Ndl=1);
int initMoments();
void set();
void clear();
int getNmoms() { return 3*Nlayer3*Nmoment; }
float3 getMinBox();
float3 getMaxBox();
};
void PrintLastError(const char *file, int line) {
cudaError_t err=cudaGetLastError();
if(err!=cudaSuccess) fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err), file, line);
}
bool CheckError(cudaError_t err, const char *file, int line) {
if(err==cudaSuccess) return false;
fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err), file, line);
return true;
}
void deviceDiagnostics(){
int deviceCount;
CHECK_ERROR( cudaGetDeviceCount(&deviceCount) );
printf("GPU devices :: %d \n", deviceCount);
cudaDeviceProp devProp[deviceCount];
for(int i = 0; i < deviceCount; ++i) {
printf("*** CUDA Device #%d ***", i);
CHECK_ERROR( cudaGetDeviceProperties(&devProp[i], i) );
printf("%s ***\n", devProp[i].name);
printf("\t%d.%d compute capability\n", devProp[i].major, devProp[i].minor);
printf("\t%d multiprocessors\n", devProp[i].multiProcessorCount);
printf("\t%.2fGB max mem pitch of %.2fGB global memory\n", devProp[i].memPitch/(1024.*1024.*1024), devProp[i].totalGlobalMem/(1024.*1024.*1024));
printf("\t%.2fKB total shared memory per block\n", devProp[i].sharedMemPerBlock/1024.);
printf("\t%.2fKB total constant memory\n", devProp[i].totalConstMem/1024.);
printf("\t%.2fK registers per block\n", devProp[i].regsPerBlock/1024.);
printf("\t%d/%d threads per Warp/block\n", devProp[i].warpSize, devProp[i].maxThreadsPerBlock);
printf("\tClock rate: %.2fGHz\n", devProp[i].clockRate*1e-6);
printf("\tTexture alignment: %luB\n", devProp[i].textureAlignment);
printf("\tConcurrent copy and execution: %s\n", (devProp[i].deviceOverlap ? "Yes" : "No"));
printf("\tKernel execution timeout: %s\n", (devProp[i].kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
//#include "heap.hpp"
__forceinline__ __host__ __device__ floatT3 rotateQ(floatT3 v, floatT3 H) {
register floatT lH = length(H);
register floatT a = 0.5*lH;
if(a < floatT(1e-15)) return v;
floatT3 nH = H*(floatT(1.0)/lH);
#ifdef USE_DOUBLE
double sina, cosa; sincos(a, &sina, &cosa);
#else//use float
#if defined(__CUDA_ARCH__)
float sina, cosa; __sincosf(a, &sina, &cosa);
#else
float sina, cosa; sincosf(a, &sina, &cosa);
#endif
#endif
//register floatT3 u = sina*nH;
/* Quaternion formula :: (floatT(2.0)*dot(u, v))*u + (s*s-dot(u, u))*v + (floatT(2.0)*s)*cross(u,v); */
return make_floatT3( ( nH.x*nH.x*v.x + floatT(2.0)*nH.x*nH.y*v.y + floatT(2.0)*nH.x*nH.z*v.z - v.x - nH.y*nH.y*v.x-nH.z*nH.z*v.x)*sina*sina + v.x + floatT(2.0)*cosa*sina*(nH.y*v.z-nH.z*v.y),
(floatT(2.0)*nH.x*nH.y*v.x + nH.y*nH.y*v.y + floatT(2.0)*nH.y*nH.z*v.z - v.y - nH.x*nH.x*v.y-nH.z*nH.z*v.y)*sina*sina + v.y + floatT(2.0)*cosa*sina*(nH.z*v.x-nH.x*v.z),
(floatT(2.0)*nH.x*nH.z*v.x + floatT(2.0)*nH.y*nH.z*v.y + nH.z*nH.z*v.z - v.z - nH.x*nH.x*v.z-nH.y*nH.y*v.z)*sina*sina + v.z + floatT(2.0)*cosa*sina*(nH.x*v.y-nH.y*v.x) );
};/* rotateQ */
__forceinline__ __host__ __device__ floatT3 rotateM(floatT3 v, floatT3 H){
register floatT a = length(H);
if(a < 1e-15f) return v;
register floatT3 u = H*(floatT(1.0)/a);
#ifdef USE_DOUBLE
double sina, cosa; sincos(a, &sina, &cosa);
#else//use float
#if defined(__CUDA_ARCH__)
float sina, cosa; __sincosf( a , &sina , &cosa );
#else
float sina, cosa; sincosf(a, &sina, &cosa);
#endif
#endif
register floatT ucos = 1-cosa;
floatT rotM[3][3] = {
{cosa + u.x*u.x*ucos , u.x*u.y*ucos - u.z*sina, u.x*u.z*ucos + u.y*sina},
{u.y*u.x*ucos + u.z*sina, cosa + u.y*u.y*ucos , u.y*u.z*ucos - u.x*sina},
{u.z*u.x*ucos - u.y*sina, u.z*u.y*ucos + u.x*sina, cosa + u.z*u.z*ucos }
};
return make_floatT3(v.x*rotM[0][0] + v.y*rotM[0][1] + v.z*rotM[0][2],
v.x*rotM[1][0] + v.y*rotM[1][1] + v.z*rotM[1][2],
v.x*rotM[2][0] + v.y*rotM[2][1] + v.z*rotM[2][2]);
};/* rotateM */
int rot_test(double tMax, double tDrop) {
floatT3 H={0,1,1}, m1={1,0,0}, m2={1,0,0};
double t=0.0, dt=1.e-1;
while(t<tMax) {
printf("%g\t%g\t%g\t%g\t%g\t%g\t%g\n",t, m1.x,m1.y,m1.z, m2.x,m2.y,m2.z);
for(double tN=t+tDrop; t<tN; t+=dt) {
floatT3 tm1=rotateM(m1,(H+0*m2)*dt);
floatT3 tm2=rotateQ(m2,(H+0*m1)*dt);
m1 = tm1; m2 = tm2;
}
}
return m1.x+m2.x;
}
__constant__ Tablet tab;
/*
struct sortT {
unsigned int sort; // 3 * 5 * 2bit
__device__ __host__ inline void set(int il, int ia, int s) { sort &= ~(3<<(2*(il*5+ia))); sort |= s<<(2*(il*5+ia)); }
};*/
struct TriHexagon {
ushort4 idx;
__device__ __host__ inline bool present(int il, int ia) { return 1&(idx.w>>(il*5+ia)); }
__device__ __host__ inline void set(int il, int ia) { idx.w |= 1<<(il*5+ia); }
__device__ __host__ inline void clear(int il, int ia) { idx.w &= ~(1<<(il*5+ia)); }
__device__ __host__ inline int get_id(int i) { return ((unsigned short*)&idx)[i]; }
__device__ __host__ inline void set_id(int i, unsigned short v) { ((unsigned short*)&idx)[i] = v; }
__device__ void set() { idx = tab.ids[threadIdx.x].idx; }
void reset() { idx = make_ushort4(0,0,0,0x7FFF); }
};
__global__ void test_rotate(int Nt) {
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3], zero={0,0,0};
for(int il=0; il<3; il++) {
m[il][0] = pM[id.x];
m[il][1] = (id.w&2)?pM[ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&8)?pM[2*tab.Nmesh+id.z]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
#pragma unroll 1
for(int it=0; it<Nt; it++) {
for(int il=0; il<3; il++) {
for(int i=0; i<3; i++) {
floatT3 mij=m[il][i];
floatT3 Heff=tab.Hext+tab.zero*mij;
m[il][i] = rotateQ(mij, Heff*tab.dt);
}
}
}
id=tab.ids[threadIdx.x].idx; pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&8) pM[2*tab.Nmesh+id.z] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
}
__global__ void test_ani(int Nt) {
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3], zero={0,0,0};
for(int il=0; il<3; il++) {
m[il][0] = pM[id.x];
m[il][1] = (id.w&2)?pM[ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&16)?pM[2*tab.Nmesh+id.z+1]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
#pragma unroll 1
for(int it=0; it<Nt; it++) {
#pragma unroll 3
for(int il=0; il<3; il++) {
#pragma unroll 3
for(int i=0; i<3; i++) {
const int subl=(il+i)%3;
floatT3 mij=m[il][i];
floatT3 Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],mij)*tab.Aani[subl];
m[il][i] = rotateQ(mij, Heff*tab.dt);
}
}
}
id=tab.ids[threadIdx.x].idx; pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&16) pM[2*tab.Nmesh+id.z+1] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
}
/*=======================================
Runge-Kutta stencil 2nd order
=========================================
0: m1 = m0 (rotate) h*S(m0, H0)
m2 = m0 (rotate) 0.5*h*S(m0, H0)
H1 = H1(m1)
h/2: m0 = m2 (rotate) 0.5*h*S(m2, H1)
where S(m_i, H_j) = gamma*H_j + alpha * cross(m_i, H_j)
=======================================*/
__global__ void stencil(int Nt) {
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3], zero={0,0,0};
for(int il=0; il<3; il++) {
m[il][0] = pM[id.x];
m[il][1] = (id.w&2)?pM[ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&16)?pM[2*tab.Nmesh+id.z+1]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
float3 m1, m2;
#pragma unroll 1
for(int it=0; it<Nt; it++) {
#pragma unroll 3
for(int il=0; il<3; il++) {
#pragma unroll 3
for(int i=0; i<3; i++) {
const int subl=(il+i)%3;
floatT3 m0=m[il][i];
floatT3 Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],m0)*tab.Aani[subl];
m1 = rotateQ(m0, tab.dt*(tab.gamma*Heff + tab.alpha*cross(m0,Heff)));
m2 = 0.5*m1;
Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],m2)*tab.Aani[subl];
m0 = rotateQ(m2, 0.5*tab.dt*(tab.gamma*Heff + tab.alpha*cross(m2,Heff)));
m[il][i] = normalize(m0);
}
}
//__syncthreads();
}
id=tab.ids[threadIdx.x].idx; pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&16) pM[2*tab.Nmesh+id.z+1] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += 3*tab.Nmesh;
}
}
/*
__global__ void run(int Nt) {
__shared__ floatT3 shM[2][512], shH[2][512];
ushort4 id=tab.ids[threadIdx.x].idx; floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
floatT3 m[3][3]={pM[id.x],pM[id.x+tab.Nmoment],pM[id.x+2*tab.Nmoment]}, zero={0,0,0};
for(int il=0; il<3; il++) {
const int ish=il*tab.Nmoment;
m[il][0] = pM[id.x+ish];
m[il][1] = (id.w&2)?pM[ish+ tab.Nmesh+id.x]:zero;
m[il][2] = (id.w&16)?pM[ish+2*tab.Nmesh+id.z+1]:zero;
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
}
#pragma unroll 1
for(int it=0; it<Nt; it++) {
floatT3 Hexch[3] = {zero,zero,zero};
#pragma unroll 3
for(int il=0; il<3; il++) {
shM[0][id.x ] = m[il][1]; shH[0][id.x ] = H[1];
shM[1][id.z+1] = m[il][2]; shH[1][id.z+1] = H[2];
__syncthreads();
m[il][1] = shM[0][id.x ]; H[1] = shH[0][id.x ];
m[il][2] = shM[1][id.z+1]; H[2] = shH[1][id.z+1];
#pragma unroll 3
for(int i=0; i<3; i++) {
const int subl=(il+i)%3;
floatT3 mij=m[il][i];
floatT3 Hexch = tab.Jexch*(m[il][(i+1)%3]+m[il][(i+2)%3]);
floatT3 Heff=tab.Hext+tab.Kani*dot(tab.Aani[subl],mij)*tab.Aani[subl];
m[il][i] = rotateQ(mij, Heff*tab.dt);
}
pM += 3*tab.Nmesh;
}
pM -= 9*tab.Nmesh;
}
for(int il=0; il<3; il++) {
pM[id.x] = m[il][0];
if(id.w&2) pM[ tab.Nmesh+id.x] = m[il][1];
if(id.w&16) pM[2*tab.Nmesh+id.z+1] = m[il][2];
id = make_ushort4(id.y,id.z,id.x,((id.w&31)<<10)|(id.w>>5));
pM += tab.Nmoment;
}
pM[id.x]=m0[0]; pM[id.x+tab.Nmoment]=m0[1]; pM[id.x+2*tab.Nmoment]=m0[2];
}
*/
__global__ void initMoments() {
//floatT3 mx={1.0, 0.0, 0.0}, my={0.0, 1.0, 0.0}, mz={0.0, 0.0, 1.0}, zero={0,0,0}, out={0,0,-1};
floatT3 mx=tab.Aani[0], my=tab.Aani[1], mz=tab.Aani[2], zero={0,0,0}, out={0,0,-1};
TriHexagon th; th.set(); int i;
floatT3* pM=&tab.moments[3*blockIdx.x*tab.Nmoment];//Red
float z=blockIdx.x; float2 xy=tab.grid_xy[threadIdx.x];
const float H=0.5, A=2*H/sqrt(3.), h=A/2, a=2*H/3.;
float3* pC=&tab.coords[3*blockIdx.x*tab.Nmoment];
pM[th.idx.x] = mx; pC[th.idx.x] = {xy.x-h,xy.y,z};
i= tab.Nmesh+th.idx.x; if(th.present(0,1)) { pM[i] = my; pC[i] = {xy.x-A,xy.y-H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-A,xy.y-H,z-1}; }
i=2*tab.Nmesh+th.idx.z; if(th.present(0,3)) { pM[i] = mz; pC[i] = {xy.x-A,xy.y+H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-A,xy.y+H,z-1}; }
pM += tab.Nmoment; pC += tab.Nmoment;//Green
z += 1.0/3.0;
pM[th.idx.y] = my; pC[th.idx.y] = {xy.x,xy.y-a/2,z};
i= tab.Nmesh+th.idx.y; if(th.present(1,1)) { pM[i] = mz; pC[i] = {xy.x+A,xy.y-a/2 ,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x+A,xy.y-a/2 ,z-1}; }
i=2*tab.Nmesh+th.idx.x; if(th.present(1,3)) { pM[i] = mx; pC[i] = {xy.x-h,xy.y-a/2-H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-h,xy.y-a/2-H,z-1}; }
pM += tab.Nmoment;//Blue
pC += tab.Nmoment;//Green
z += 1.0/3.0;
pM[th.idx.z] = mz; pC[th.idx.z] = {xy.x,xy.y+a/2,z};
i= tab.Nmesh+th.idx.z; if(th.present(2,1)) { pM[i] = mx; pC[i] = {xy.x-h,xy.y+a/2+H,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x-h,xy.y+a/2+H,z-1}; }
i=2*tab.Nmesh+th.idx.y; if(th.present(2,3)) { pM[i] = my; pC[i] = {xy.x+A,xy.y+a/2 ,z}; } else { pM[i] = zero; pC[i] = out; }//{xy.x+A,xy.y+a/2 ,z-1}; }
}
// Вспомогательная структура для инициализации таблетки
struct ITH {//Симметричная относительно базовой точки структура моментов (три гексагона | три X | шесть тригонов)
int3 ray; // координаты базовой точки в тройной сетке (номер линии сетки)
TriHexagon pid;
bool use; // признак использования структуры
void set(int3 r) { use = true; ray = r; pid.reset(); }
int& get_ray(int i) { return ((int*)&ray)[i]; }
};
void setITH(int ic, std::vector<ITH*>& trigs, int num) {
int ip=(ic+1)%3;//, im=(ic+2)%3;
for(int i=0, _r=trigs[0]->get_ray(ic)-1; i<num; i++) {
ITH& th=*trigs[i];
th.pid.set_id(ic,i);
int r=th.get_ray(ic);
//for(int j=0; j<5; j++) th.pid.set(ic,j);
if(_r != r) {
th.pid.clear(ic,1); th.pid.clear(ip,3);
if(i>0) { ITH& _th=*trigs[i-1]; _th.pid.clear(ic,2); _th.pid.clear(ip,4); }
}
_r = r;
}
ITH& _th=*trigs[num-1]; _th.pid.clear(ic,2); _th.pid.clear(ip,4);
}
float3 Tablet::getMinBox() { return make_float3(-Rtab,-Rtab,0.); }
float3 Tablet::getMaxBox() { return make_float3(Rtab,Rtab,Nlayer3); }
int Tablet::initPentagon(int na, int kerns[], int Ndl) {
//Забиваем полный шестиугольник тригексагонами с номерами линий, двигаясь по спирали от центра
int num6=3*na*(na-1)+1; // число узлов сетки в шестиугольнике со стороной na
ITH* pgon = new ITH[num6],* p=pgon;
int3 ray=make_int3(na-1), ray0; p->set(ray); // стартовая точка спирали --- центр шестиугольника
for(int ia=1; ia<na; ia++) {
ray.x--; ray.y++;
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y--; ray.z++; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y--; ray.x++; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.z--; ray.x++; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y++; ray.z--; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.y++; ray.x--; }
for(int i=0; i<ia; i++) { p++; p->set(ray); ray.z++; ray.x--; }
}
//Удаляем уголки и считаем сколько тригексагонов осталось
int num=num6, nal=na-1, jNN=num6-1;
for(int i=0; i<6*Ndl; i++) {
int jM=-((kerns[i]+1)/2), jP=kerns[i]+jM, jN=jNN-(i%6)*nal;
for(int j=0; j<jP; j++) pgon[jN-j].use = false;
if(i%6==0) jN -= 6*nal;
for(int j=jM; j<0; j++) pgon[jN-j].use = false;
if((i+1)%6==0) { jNN -= 6*nal; nal--; }
num -= kerns[i];
}
Nmesh = num;
//Создаём вектор тригексагонов для последующей сортировки
p = pgon;
std::vector<ITH*> trigs;
for(int i=0; i<num6; i++) {
if(p->use) trigs.push_back(p);
p++;
}
printf("#Nmesh: %d(theor) =?= %d(real) of %d in hexagon\n", num, int(trigs.size()), num6);
//Сортируем тригексагоны по координатам (номер линии, тригексагон в линии) по каждой из осей
std::sort(trigs.begin(), trigs.end(), [](ITH* a, ITH* b) { if(b->ray.x == a->ray.x) return b->ray.y < a->ray.y; return b->ray.x > a->ray.x; });
setITH(0, trigs, num); ray0.x = trigs[0]->ray.x; Nrays.x = trigs[num-1]->ray.x-ray0.x+1;
std::sort(trigs.begin(), trigs.end(), [](ITH* a, ITH* b) { if(b->ray.y == a->ray.y) return b->ray.z < a->ray.z; return b->ray.y > a->ray.y; });
setITH(1, trigs, num); ray0.y = trigs[0]->ray.y; Nrays.y = trigs[num-1]->ray.y-ray0.y+1;
std::sort(trigs.begin(), trigs.end(), [](ITH* a, ITH* b) { if(b->ray.z == a->ray.z) return b->ray.x < a->ray.x; return b->ray.z > a->ray.z; });
setITH(2, trigs, num); ray0.z = trigs[0]->ray.z; Nrays.z = trigs[num-1]->ray.z-ray0.z+1;
char fn[128];
sprintf(fn, "tab%d.dat",na);
FILE* fd=fopen(fn, "w");
fprintf(fd, "#Nrays: %d,%d,%d;\tray0: %d,%d,%d\n", Nrays.x, Nrays.y, Nrays.z, ray0.x, ray0.y, ray0.z);
p = pgon;
//Создаём массив индексов и Выводим диагностику
TriHexagon* pids=new TriHexagon[num];
float2* xy=new float2[num];
Rtab = sqrt(Nmesh)*5.0/8.0+1;
for(int i=0; i<num; i++) {
while(!p->use) p++;
pids[i].idx = p->pid.idx;
xy[i] = make_float2((p->ray.z-p->ray.x)/sqrt(3),na-1.0-p->ray.y);
fprintf(fd, "%d\t%.4g\t%.4g\t%d\t%d\t%d\t%x\n", i, xy[i].x,xy[i].y, p->pid.idx.x,p->pid.idx.y,p->pid.idx.z,p->pid.idx.w);
p++;
}
Nmoment = 3*Nmesh;//-2*(Nrays.x+Nrays.y+Nrays.z);
fprintf(fd, "# %d Moments\n", Nmoment);
size_t sz=Nmesh*sizeof(TriHexagon);
if(CHECK_ERROR(cudaMalloc((void**) &ids, sz))) throw(-1);
if(CHECK_ERROR(cudaMemcpy(ids, pids, sz, cudaMemcpyHostToDevice))) throw(-1);
sz = num*sizeof(float2);
if(CHECK_ERROR(cudaMalloc((void**) &grid_xy, sz))) throw(-1);
if(CHECK_ERROR(cudaMemcpy(grid_xy, xy, sz, cudaMemcpyHostToDevice))) throw(-1);
fclose(fd);
delete[] pids;
delete[] pgon;
delete[] xy;
return num;
}
void Tablet::set() {
size_t sz=3*Nlayer3*Nmoment*sizeof(floatT3);
printf("#Moments memory: %g M\n", sz*1e-6);
if(CHECK_ERROR(cudaMalloc((void**) &moments, sz))) throw(-1);
if(CHECK_ERROR(cudaMalloc((void**) &coords, sz))) throw(-1);
}
int Tablet::initMoments() {
Hext = make_float3(1,0,0);
Kani = 0.002;
Jexch=-1.0;
alpha = 0.1;
gamma = 1.0;
Aani[0] = make_float3( 0. , -sqrt(2./3.),sqrt(1./3.));
Aani[1] = make_float3( sqrt(1./2.),0.5*sqrt(2./3.),sqrt(1./3.));
Aani[2] = make_float3(-sqrt(1./2.),0.5*sqrt(2./3.),sqrt(1./3.));
dt =0.1;
if(CHECK_ERROR(cudaMemcpyToSymbol(tab, this, sizeof(Tablet)))) throw(-1);
::initMoments<<<Nlayer3,Nmesh>>>();
return 0;
}
//===========================
int TabletIF::run(int Nt) {
cudaTimer tm; tm.start();
//test_rotate<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
//test_ani<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
stencil<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
//::run<<<tablet->Nlayer3,tablet->Nmesh>>>(Nt);
double t=tm.stop()*1e-3; if(t==0.0) t=1;
printf("#Test Time: %g sec, rate=%g Gmom/sec\n", t, 1e-9*Nt*Nmoms/t);
return 1;
}
void Tablet::clear() {
if(CHECK_ERROR(cudaFree(ids))) throw(-1);
if(CHECK_ERROR(cudaFree(coords))) throw(-1);
if(CHECK_ERROR(cudaFree(moments))) throw(-1);
}
void TabletIF::clear() {
delete tablet;
}
int TabletIF::set() {
tablet = new Tablet();
int rs=0;
//{int kerns[]={0,0,0,0,0,0}; rs=tablet->initPentagon(2, kerns);}
//{int kerns[]={0,1,1,1,1,1}; rs=tablet->initPentagon(4, kerns);}
//{int kerns[]={0,3,3,3,3,3}; rs=tablet->initPentagon(10, kerns);}
//{int kerns[]={10,9,10,9,10,9, 3,3,3,3,3,3}; rs=tablet->initPentagon(11, kerns, 2);}//256
{int kerns[]={5,5,5,5,5,5, 0,1,1,1,1,1}; rs=tablet->initPentagon(14, kerns, 2);}//512
//{int kerns[]={0,1,0,1,0,1}; rs=tablet->initPentagon(19, kerns);}//1024
//{int kerns[]={13,13,13,13,13,13, 5,7,5,7,5,7, 0,1,0,1,0,1}; rs=tablet->initPentagon(20, kerns,3);}//1024
//{int kerns[]={13,13,13,13,13,13, 5,6,5,6,5,6, 1,1,1,1,1,1}; rs=tablet->initPentagon(20, kerns,3);}//1024
tablet->Nlayer3 = 30;
Nmoms = 3*tablet->Nlayer3*tablet->Nmoment;
printf("%d moms in layer / %d total\n", rs, Nmoms);
return Nmoms;
}
int TabletIF::init(float* m, float* c) {
if(c) tablet->coords = (float3*)c;
if(m) tablet->moments = (floatT3*)m;
int rs=0;
rs=tablet->initMoments();
//rs=rot_test(1e7,1e4);
printf("Nmoms %d\n", Nmoms);
MinBox[0]=MinBox[1]=-tablet->Rtab; MinBox[2]=0;
MaxBox[0]=MaxBox[1]= tablet->Rtab; MaxBox[2]=1*tablet->Nlayer3;
return rs;
}
int main(int argc, char *argv[]){
int rs=0;
cudaTimer tm; tm.start();
TabletIF tab;
tab.set();
tab.tablet->set();
tab.init();
printf("#Init Time: %g sec, res=%d\n", tm.stop()*1e-3, rs);
tm.start();
int Nloop=10, Nt=1000;
for(int it=0; it<Nloop; it++) {
rs=tab.run(Nt);
//run<<<tab.Nlayer3,tab.Nmesh>>>(Nt);
}
double t=tm.stop()*1e-3; if(t==0.0) t=1;
printf("#Test Time: %g sec, rate=%g Gmom/sec\n", t, 1e-9*Nt*Nloop*tab.Nmoms/t);
tab.tablet->clear();
tab.clear();
return rs;
};
|
1304b41f68c4e4e7ea5c7856a9a46d037e2115f8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/prelu_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#include "paddle/phi/kernels/gpu/prelu_funcs.h"
#include "paddle/phi/kernels/primitive/functor_primitives.h"
namespace phi {
enum PRELU_MODE { Element, ChannelFirst, ChannelLast, PRELU_Scalar };
template <typename T>
__global__ void PReluOpGradKernel(const T* x_ptr,
const T* alpha_ptr,
const T* out_grad_ptr,
T* x_grad_ptr,
T* alpha_grad_ptr,
size_t channel_num,
size_t plane_size,
size_t spatial_size,
size_t numel,
PRELU_MODE mode) {
CUDA_KERNEL_LOOP(index, numel) {
T scale;
if (mode == Element) {
size_t element_index = index % spatial_size;
scale = alpha_ptr[element_index];
} else if (mode == ChannelFirst) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
scale = alpha_ptr[channel_index];
} else if (mode == ChannelLast) {
size_t channel_index = index % channel_num;
scale = alpha_ptr[channel_index];
} else {
scale = alpha_ptr[0];
}
T x = x_ptr[index];
T out_grad = out_grad_ptr[index];
T zero = static_cast<T>(0);
if (x_grad_ptr != nullptr)
x_grad_ptr[index] = (x > zero) ? out_grad : scale * out_grad;
if (alpha_grad_ptr != nullptr)
alpha_grad_ptr[index] = (x > zero) ? zero : x * out_grad;
}
}
template <typename T>
class PreluOpGradFunctor {
public:
void operator()(gpuStream_t stream,
const T* x,
const T* alpha,
const T* out_grad,
T* x_grad,
T* alpha_grad,
const DDim& input_dims,
PRELU_MODE mode) {
size_t numel = 1;
for (size_t i = 0; i < input_dims.size(); ++i) {
numel *= input_dims[i];
}
size_t plane_size;
size_t spatial_size;
size_t channel;
if (mode == PRELU_Scalar) {
plane_size = 1;
spatial_size = 1;
channel = 1;
} else {
plane_size = numel / input_dims[0] / input_dims[1];
spatial_size = numel / input_dims[0];
channel = mode == ChannelLast ? input_dims[input_dims.size() - 1]
: input_dims[1];
}
hipLaunchKernelGGL(( PReluOpGradKernel<T>)
, dim3(PADDLE_GET_BLOCKS(numel)), dim3(CUDA_NUM_THREADS), 0, stream,
x,
alpha,
out_grad,
x_grad,
alpha_grad,
channel,
plane_size,
spatial_size,
numel,
mode);
}
};
template <typename T, typename Context>
void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const DenseTensor& out_grad,
const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad,
DenseTensor* alpha_grad) {
dev_ctx.template Alloc<T>(x_grad);
const T* x_ptr = x.data<T>();
const T* alpha_ptr = alpha.data<T>();
const T* out_grad_ptr = out_grad.data<T>();
T* x_grad_ptr = x_grad ? dev_ctx.template Alloc<T>(x_grad) : nullptr;
T* alpha_grad_ptr =
alpha_grad ? dev_ctx.template Alloc<T>(alpha_grad) : nullptr;
if (!x_grad && !alpha_grad) return;
int numel = x.numel();
auto dim = x.dims();
auto x_rank = dim.size();
auto stream = dev_ctx.stream();
T* alpha_grad_tmp_ptr;
DenseTensor alpha_grad_tmp;
if (alpha_grad_ptr == nullptr) {
alpha_grad_tmp_ptr = alpha_grad_ptr;
} else {
DenseTensorMeta alpha_grad_meta(
alpha_grad->dtype(), dim, alpha_grad->layout());
alpha_grad_tmp = phi::Empty(dev_ctx, std::move(alpha_grad_meta));
alpha_grad_tmp_ptr = alpha_grad_tmp.data<T>();
}
PRELU_MODE m;
bool channel_last = false;
if (mode == "element") {
m = Element;
} else if (mode == "channel") {
channel_last = data_format == "NHWC";
m = channel_last ? ChannelLast : ChannelFirst;
} else {
m = PRELU_Scalar;
}
PreluOpGradFunctor<T> prelu_grad;
prelu_grad(stream,
x_ptr,
alpha_ptr,
out_grad_ptr,
x_grad_ptr,
alpha_grad_tmp_ptr,
dim,
m);
if (alpha_grad_tmp_ptr == nullptr) return;
std::vector<int> reduce_dims;
for (size_t i = 0; i < dim.size(); i++) {
if (mode == "channel" && !channel_last && i == 1) continue;
if (mode == "channel" && channel_last && i == dim.size() - 1) continue;
if (mode == "element" && i != 0) continue;
reduce_dims.push_back(i);
}
phi::funcs::ReduceKernel<T, T, kps::AddFunctor, kps::IdentityFunctor<T>>(
static_cast<const phi::GPUContext&>(dev_ctx),
alpha_grad_tmp,
alpha_grad,
kps::IdentityFunctor<T>(),
reduce_dims);
}
} // namespace phi
PD_REGISTER_KERNEL(prelu_grad,
GPU,
ALL_LAYOUT,
phi::PReluGradKernel,
float,
phi::dtype::float16,
double) {}
| 1304b41f68c4e4e7ea5c7856a9a46d037e2115f8.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/prelu_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/core/tensor_meta.h"
#include "paddle/phi/kernels/empty_kernel.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#include "paddle/phi/kernels/gpu/prelu_funcs.h"
#include "paddle/phi/kernels/primitive/functor_primitives.h"
namespace phi {
enum PRELU_MODE { Element, ChannelFirst, ChannelLast, PRELU_Scalar };
template <typename T>
__global__ void PReluOpGradKernel(const T* x_ptr,
const T* alpha_ptr,
const T* out_grad_ptr,
T* x_grad_ptr,
T* alpha_grad_ptr,
size_t channel_num,
size_t plane_size,
size_t spatial_size,
size_t numel,
PRELU_MODE mode) {
CUDA_KERNEL_LOOP(index, numel) {
T scale;
if (mode == Element) {
size_t element_index = index % spatial_size;
scale = alpha_ptr[element_index];
} else if (mode == ChannelFirst) {
size_t temp = index / plane_size;
size_t channel_index = temp % channel_num;
scale = alpha_ptr[channel_index];
} else if (mode == ChannelLast) {
size_t channel_index = index % channel_num;
scale = alpha_ptr[channel_index];
} else {
scale = alpha_ptr[0];
}
T x = x_ptr[index];
T out_grad = out_grad_ptr[index];
T zero = static_cast<T>(0);
if (x_grad_ptr != nullptr)
x_grad_ptr[index] = (x > zero) ? out_grad : scale * out_grad;
if (alpha_grad_ptr != nullptr)
alpha_grad_ptr[index] = (x > zero) ? zero : x * out_grad;
}
}
template <typename T>
class PreluOpGradFunctor {
public:
void operator()(gpuStream_t stream,
const T* x,
const T* alpha,
const T* out_grad,
T* x_grad,
T* alpha_grad,
const DDim& input_dims,
PRELU_MODE mode) {
size_t numel = 1;
for (size_t i = 0; i < input_dims.size(); ++i) {
numel *= input_dims[i];
}
size_t plane_size;
size_t spatial_size;
size_t channel;
if (mode == PRELU_Scalar) {
plane_size = 1;
spatial_size = 1;
channel = 1;
} else {
plane_size = numel / input_dims[0] / input_dims[1];
spatial_size = numel / input_dims[0];
channel = mode == ChannelLast ? input_dims[input_dims.size() - 1]
: input_dims[1];
}
PReluOpGradKernel<T>
<<<PADDLE_GET_BLOCKS(numel), CUDA_NUM_THREADS, 0, stream>>>(
x,
alpha,
out_grad,
x_grad,
alpha_grad,
channel,
plane_size,
spatial_size,
numel,
mode);
}
};
template <typename T, typename Context>
void PReluGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& alpha,
const DenseTensor& out_grad,
const std::string& data_format,
const std::string& mode,
DenseTensor* x_grad,
DenseTensor* alpha_grad) {
dev_ctx.template Alloc<T>(x_grad);
const T* x_ptr = x.data<T>();
const T* alpha_ptr = alpha.data<T>();
const T* out_grad_ptr = out_grad.data<T>();
T* x_grad_ptr = x_grad ? dev_ctx.template Alloc<T>(x_grad) : nullptr;
T* alpha_grad_ptr =
alpha_grad ? dev_ctx.template Alloc<T>(alpha_grad) : nullptr;
if (!x_grad && !alpha_grad) return;
int numel = x.numel();
auto dim = x.dims();
auto x_rank = dim.size();
auto stream = dev_ctx.stream();
T* alpha_grad_tmp_ptr;
DenseTensor alpha_grad_tmp;
if (alpha_grad_ptr == nullptr) {
alpha_grad_tmp_ptr = alpha_grad_ptr;
} else {
DenseTensorMeta alpha_grad_meta(
alpha_grad->dtype(), dim, alpha_grad->layout());
alpha_grad_tmp = phi::Empty(dev_ctx, std::move(alpha_grad_meta));
alpha_grad_tmp_ptr = alpha_grad_tmp.data<T>();
}
PRELU_MODE m;
bool channel_last = false;
if (mode == "element") {
m = Element;
} else if (mode == "channel") {
channel_last = data_format == "NHWC";
m = channel_last ? ChannelLast : ChannelFirst;
} else {
m = PRELU_Scalar;
}
PreluOpGradFunctor<T> prelu_grad;
prelu_grad(stream,
x_ptr,
alpha_ptr,
out_grad_ptr,
x_grad_ptr,
alpha_grad_tmp_ptr,
dim,
m);
if (alpha_grad_tmp_ptr == nullptr) return;
std::vector<int> reduce_dims;
for (size_t i = 0; i < dim.size(); i++) {
if (mode == "channel" && !channel_last && i == 1) continue;
if (mode == "channel" && channel_last && i == dim.size() - 1) continue;
if (mode == "element" && i != 0) continue;
reduce_dims.push_back(i);
}
phi::funcs::ReduceKernel<T, T, kps::AddFunctor, kps::IdentityFunctor<T>>(
static_cast<const phi::GPUContext&>(dev_ctx),
alpha_grad_tmp,
alpha_grad,
kps::IdentityFunctor<T>(),
reduce_dims);
}
} // namespace phi
PD_REGISTER_KERNEL(prelu_grad,
GPU,
ALL_LAYOUT,
phi::PReluGradKernel,
float,
phi::dtype::float16,
double) {}
|
75e23f3f4afd735c163fd1c9e5b539befc1aed60.hip | // !!! This is a file automatically generated by hipify!!!
/*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary functions to perform X-ray parallel projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include "ray_interpolated_projection_parallel.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated_parallel",hipGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 8
#define PIXEL_SIZE_BLOCK 8
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureParallelInterp(float* image,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,hipStream_t* stream);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void kernelPixelDetector_parallel_interpolated( Geometry geo,
float* detector,
const int currProjSetNumber, const int totalNoOfProjections, hipTextureObject_t tex)
{
// Point3D source ,
// Point3D deltaU,
// Point3D deltaV,
// Point3D uvOrigin,
// float DSO,
// float maxdist){
unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long long projNumber=threadIdx.z;
if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK)
return;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
#if IS_FOR_MATLAB_TIGRE
size_t idx = (size_t)(u * (unsigned long long)geo.nDetecV + v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#else
size_t idx = (size_t)(v * (unsigned long long)geo.nDetecU + u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#endif
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float maxdist = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
unsigned long pixelV = geo.nDetecV-v-1;
unsigned long pixelU = u;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrtf((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceilf((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float * img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){
size_t num_bytes = geo.nDetecU*geo.nDetecV *PROJ_PER_BLOCK* sizeof(float);
float** dProjection=(float **)malloc(2*sizeof(float *));
for (int i = 0; i < 2; ++i){
hipMalloc((void**)&dProjection[i], num_bytes);
cudaCheckErrors("hipMalloc projections fail");
}
// allocate streams for memory and compute
int nStreams=2;
hipStream_t* stream=(hipStream_t*)malloc(nStreams*sizeof(hipStream_t));;
for (int i = 0; i < 2; ++i){
hipStreamCreate(&stream[i]);
}
// Texture object variables
hipTextureObject_t *texImg = 0;
hipArray **d_cuArrTex = 0;
texImg =(hipTextureObject_t*)malloc(1*sizeof(hipTextureObject_t));
d_cuArrTex =(hipArray**)malloc(1*sizeof(hipArray*));
CreateTextureParallelInterp(img,geo,&d_cuArrTex[0], &texImg[0],stream);
cudaCheckErrors("Texture allocation fail");
//Done! Image put into texture memory.
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost;
hipHostMalloc((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost;
hipHostMalloc((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV,divangle;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 numBlocks((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 threadsPerBlock(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
unsigned int noOfKernelCalls = (nangles+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int i;
float maxdist;
for ( i=0; i<noOfKernelCalls; i++){
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=i*PROJ_PER_BLOCK+j;
if (proj_global>=nangles)
break;
geo.alpha=angles[proj_global*3];
geo.theta=angles[proj_global*3+1];
geo.psi =angles[proj_global*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,proj_global);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
hipMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[0]);
hipMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,hipMemcpyHostToDevice,stream[0]);
hipStreamSynchronize(stream[0]);
hipLaunchKernelGGL(( kernelPixelDetector_parallel_interpolated), dim3(numBlocks),dim3(threadsPerBlock),0,stream[0], geo,dProjection[(int)i%2==0],i,nangles,texImg[0]);
// copy result to host
if (i>0)
hipMemcpyAsync(result[i*PROJ_PER_BLOCK-PROJ_PER_BLOCK],dProjection[(int)i%2!=0], num_bytes, hipMemcpyDeviceToHost,stream[1]);
}
hipDeviceSynchronize();
int lastangles=nangles-(i-1)*PROJ_PER_BLOCK;
hipMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK],dProjection[(int)(i-1)%2==0], lastangles*geo.nDetecV*geo.nDetecU*sizeof(float), hipMemcpyDeviceToHost,stream[1]);
hipDestroyTextureObject(texImg[0]);
hipFreeArray(d_cuArrTex[0]);
free(texImg); texImg = 0;
free(d_cuArrTex); d_cuArrTex = 0;
cudaCheckErrors("Unbind fail");
hipFree(dProjection[0]);
hipFree(dProjection[1]);
free(dProjection);
hipHostFree(projParamsArrayHost);
hipHostFree(projFloatsArrayHost);
cudaCheckErrors("hipFree d_imagedata fail");
for (int i = 0; i < 2; ++i){
hipStreamDestroy(stream[i]);
}
// hipDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geometric trasnformations:
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now lets translate the points where they should be:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
S.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&S);
//Now lets translate the points where they should be:
S.x=S.x+geo.DSO[i];
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
void CreateTextureParallelInterp(float* image,Geometry geo,hipArray** d_cuArrTex, hipTextureObject_t *texImage,hipStream_t* stream){ //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const hipExtent extent = make_hipExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
//hipArray Descriptor
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
//cuda Array
hipMalloc3DArray(&d_cuArrTex[0], &channelDesc, extent);
hipMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_hipPitchedPtr((void *)image, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[0];
copyParams.extent = extent;
copyParams.kind = hipMemcpyHostToDevice;
hipMemcpy3DAsync(©Params,stream[1]);
//Array creation End
hipResourceDesc texRes;
memset(&texRes, 0, sizeof(hipResourceDesc));
texRes.resType = hipResourceTypeArray;
texRes.res.array.array = d_cuArrTex[0];
hipTextureDesc texDescr;
memset(&texDescr, 0, sizeof(hipTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = hipFilterModeLinear;
texDescr.addressMode[0] = hipAddressModeBorder;
texDescr.addressMode[1] = hipAddressModeBorder;
texDescr.addressMode[2] = hipAddressModeBorder;
texDescr.readMode = hipReadModeElementType;
hipCreateTextureObject(&texImage[0], &texRes, &texDescr, NULL);
} | 75e23f3f4afd735c163fd1c9e5b539befc1aed60.cu | /*-------------------------------------------------------------------------
*
* CUDA functions for texture-memory interpolation based projection
*
* This file has the necesary functions to perform X-ray parallel projection
* operation given a geaometry, angles and image. It uses the 3D texture
* memory linear interpolation to uniformily sample a path to integrate the
* X-rays.
*
* CODE by Ander Biguri
* Sepideh Hatamikia (arbitrary rotation)
* ---------------------------------------------------------------------------
* ---------------------------------------------------------------------------
* Copyright (c) 2015, University of Bath and CERN- European Organization for
* Nuclear Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* ---------------------------------------------------------------------------
*
* Contact: [email protected]
* Codes : https://github.com/CERN/TIGRE
* ---------------------------------------------------------------------------
*/
#include <algorithm>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include "ray_interpolated_projection_parallel.hpp"
#include "TIGRE_common.hpp"
#include <math.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
mexPrintf("%s \n",msg);\
mexErrMsgIdAndTxt("TIGRE:Ax:interpolated_parallel",cudaGetErrorString(__err));\
} \
} while (0)
#define MAXTREADS 1024
#define PROJ_PER_BLOCK 8
#define PIXEL_SIZE_BLOCK 8
/*GEOMETRY DEFINITION
*
* Detector plane, behind
* |-----------------------------|
* | |
* | |
* | |
* | |
* | +--------+ |
* | / /| |
* A Z | / / |*D |
* | | +--------+ | |
* | | | | | |
* | | | *O | + |
* --->y | | | / |
* / | | |/ |
* V X | +--------+ |
* |-----------------------------|
*
* *S
*
*
*
*
*
**/
void CreateTextureParallelInterp(float* image,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,cudaStream_t* stream);
__constant__ Point3D projParamsArrayDev[4*PROJ_PER_BLOCK]; // Dev means it is on device
__constant__ float projFloatsArrayDev[2*PROJ_PER_BLOCK]; // Dev means it is on device
__global__ void kernelPixelDetector_parallel_interpolated( Geometry geo,
float* detector,
const int currProjSetNumber, const int totalNoOfProjections, cudaTextureObject_t tex)
{
// Point3D source ,
// Point3D deltaU,
// Point3D deltaV,
// Point3D uvOrigin,
// float DSO,
// float maxdist){
unsigned long long u = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long v = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long long projNumber=threadIdx.z;
if (u>= geo.nDetecU || v>= geo.nDetecV || projNumber>=PROJ_PER_BLOCK)
return;
int indAlpha = currProjSetNumber*PROJ_PER_BLOCK+projNumber; // This is the ABSOLUTE projection number in the projection array
#if IS_FOR_MATLAB_TIGRE
size_t idx = (size_t)(u * (unsigned long long)geo.nDetecV + v)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#else
size_t idx = (size_t)(v * (unsigned long long)geo.nDetecU + u)+ projNumber*(unsigned long long)geo.nDetecV *(unsigned long long)geo.nDetecU ;
#endif
if(indAlpha>=totalNoOfProjections)
return;
Point3D uvOrigin = projParamsArrayDev[4*projNumber]; // 6*projNumber because we have 6 Point3D values per projection
Point3D deltaU = projParamsArrayDev[4*projNumber+1];
Point3D deltaV = projParamsArrayDev[4*projNumber+2];
Point3D source = projParamsArrayDev[4*projNumber+3];
float DSO = projFloatsArrayDev[2*projNumber+0];
float maxdist = projFloatsArrayDev[2*projNumber+1];
/////// Get coordinates XYZ of pixel UV
unsigned long pixelV = geo.nDetecV-v-1;
unsigned long pixelU = u;
float vectX,vectY,vectZ;
Point3D P;
P.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x);
P.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y);
P.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z);
Point3D S;
S.x=(source.x+pixelU*deltaU.x+pixelV*deltaV.x);
S.y=(source.y+pixelU*deltaU.y+pixelV*deltaV.y);
S.z=(source.z+pixelU*deltaU.z+pixelV*deltaV.z);
// Length is the ray length in normalized space
double length=sqrtf((S.x-P.x)*(S.x-P.x)+(S.y-P.y)*(S.y-P.y)+(S.z-P.z)*(S.z-P.z));
//now legth is an integer of Nsamples that are required on this line
length=ceilf(length/geo.accuracy);//Divide the directional vector by an integer
vectX=(P.x -S.x)/(length);
vectY=(P.y -S.y)/(length);
vectZ=(P.z -S.z)/(length);
// //Integrate over the line
float tx,ty,tz;
float sum=0;
float i;
// limit the amount of mem access after the cube, but before the detector.
if ((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy < length)
length=ceilf((2*DSO/geo.dVoxelX+maxdist)/geo.accuracy);
//Length is not actually a length, but the amount of memreads with given accuracy ("samples per voxel")
for (i=floorf(maxdist/geo.accuracy); i<=length; i=i+1){
tx=vectX*i+S.x;
ty=vectY*i+S.y;
tz=vectZ*i+S.z;
sum += tex3D<float>(tex, tx+0.5f, ty+0.5f, tz+0.5f); // this line is 94% of time.
}
float deltalength=sqrtf((vectX*geo.dVoxelX)*(vectX*geo.dVoxelX)+
(vectY*geo.dVoxelY)*(vectY*geo.dVoxelY)+
(vectZ*geo.dVoxelZ)*(vectZ*geo.dVoxelZ) );
detector[idx]=sum*deltalength;
}
int interpolation_projection_parallel(float * img, Geometry geo, float** result,float const * const angles,int nangles, const GpuIds& gpuids){
size_t num_bytes = geo.nDetecU*geo.nDetecV *PROJ_PER_BLOCK* sizeof(float);
float** dProjection=(float **)malloc(2*sizeof(float *));
for (int i = 0; i < 2; ++i){
cudaMalloc((void**)&dProjection[i], num_bytes);
cudaCheckErrors("cudaMalloc projections fail");
}
// allocate streams for memory and compute
int nStreams=2;
cudaStream_t* stream=(cudaStream_t*)malloc(nStreams*sizeof(cudaStream_t));;
for (int i = 0; i < 2; ++i){
cudaStreamCreate(&stream[i]);
}
// Texture object variables
cudaTextureObject_t *texImg = 0;
cudaArray **d_cuArrTex = 0;
texImg =(cudaTextureObject_t*)malloc(1*sizeof(cudaTextureObject_t));
d_cuArrTex =(cudaArray**)malloc(1*sizeof(cudaArray*));
CreateTextureParallelInterp(img,geo,&d_cuArrTex[0], &texImg[0],stream);
cudaCheckErrors("Texture allocation fail");
//Done! Image put into texture memory.
Point3D source, deltaU, deltaV, uvOrigin;
Point3D* projParamsArrayHost;
cudaMallocHost((void**)&projParamsArrayHost,4*PROJ_PER_BLOCK*sizeof(Point3D));
float* projFloatsArrayHost;
cudaMallocHost((void**)&projFloatsArrayHost,2*PROJ_PER_BLOCK*sizeof(float));
// 16x16 gave the best performance empirically
// Funnily that makes it compatible with most GPUs.....
int divU,divV,divangle;
divU=PIXEL_SIZE_BLOCK;
divV=PIXEL_SIZE_BLOCK;
dim3 numBlocks((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1);
dim3 threadsPerBlock(divU,divV,PROJ_PER_BLOCK);
unsigned int proj_global;
unsigned int noOfKernelCalls = (nangles+PROJ_PER_BLOCK-1)/PROJ_PER_BLOCK; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_BLOCK
unsigned int i;
float maxdist;
for ( i=0; i<noOfKernelCalls; i++){
for(unsigned int j=0; j<PROJ_PER_BLOCK; j++){
proj_global=i*PROJ_PER_BLOCK+j;
if (proj_global>=nangles)
break;
geo.alpha=angles[proj_global*3];
geo.theta=angles[proj_global*3+1];
geo.psi =angles[proj_global*3+2];
//precomute distances for faster execution
maxdist=maxdistanceCuboid(geo,proj_global);
//Precompute per angle constant stuff for speed
computeDeltas_parallel(geo,geo.alpha,proj_global, &uvOrigin, &deltaU, &deltaV, &source);
//Ray tracing!
projParamsArrayHost[4*j]=uvOrigin; // 6*j because we have 6 Point3D values per projection
projParamsArrayHost[4*j+1]=deltaU;
projParamsArrayHost[4*j+2]=deltaV;
projParamsArrayHost[4*j+3]=source;
projFloatsArrayHost[2*j]=geo.DSO[proj_global];
projFloatsArrayHost[2*j+1]=floor(maxdist);
}
cudaMemcpyToSymbolAsync(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*4*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[0]);
cudaMemcpyToSymbolAsync(projFloatsArrayDev, projFloatsArrayHost, sizeof(float)*2*PROJ_PER_BLOCK,0,cudaMemcpyHostToDevice,stream[0]);
cudaStreamSynchronize(stream[0]);
kernelPixelDetector_parallel_interpolated<<<numBlocks,threadsPerBlock,0,stream[0]>>>(geo,dProjection[(int)i%2==0],i,nangles,texImg[0]);
// copy result to host
if (i>0)
cudaMemcpyAsync(result[i*PROJ_PER_BLOCK-PROJ_PER_BLOCK],dProjection[(int)i%2!=0], num_bytes, cudaMemcpyDeviceToHost,stream[1]);
}
cudaDeviceSynchronize();
int lastangles=nangles-(i-1)*PROJ_PER_BLOCK;
cudaMemcpyAsync(result[(i-1)*PROJ_PER_BLOCK],dProjection[(int)(i-1)%2==0], lastangles*geo.nDetecV*geo.nDetecU*sizeof(float), cudaMemcpyDeviceToHost,stream[1]);
cudaDestroyTextureObject(texImg[0]);
cudaFreeArray(d_cuArrTex[0]);
free(texImg); texImg = 0;
free(d_cuArrTex); d_cuArrTex = 0;
cudaCheckErrors("Unbind fail");
cudaFree(dProjection[0]);
cudaFree(dProjection[1]);
free(dProjection);
cudaFreeHost(projParamsArrayHost);
cudaFreeHost(projFloatsArrayHost);
cudaCheckErrors("cudaFree d_imagedata fail");
for (int i = 0; i < 2; ++i){
cudaStreamDestroy(stream[i]);
}
// cudaDeviceReset();
return 0;
}
/* This code precomputes The location of the source and the Delta U and delta V (in the warped space)
* to compute the locations of the x-rays. While it seems verbose and overly-optimized,
* it does saves about 30% of each of the kernel calls. Thats something!
**/
void computeDeltas_parallel(Geometry geo, float alpha,unsigned int i, Point3D* uvorigin, Point3D* deltaU, Point3D* deltaV, Point3D* source){
Point3D S;
S.x=geo.DSO[i];
S.y=geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5);
S.z=geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
//End point
Point3D P,Pu0,Pv0;
P.x =-(geo.DSD[i]-geo.DSO[i]); P.y = geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); P.z = geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pu0.x=-(geo.DSD[i]-geo.DSO[i]); Pu0.y= geo.dDetecU*(1-((float)geo.nDetecU/2)+0.5); Pu0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-0);
Pv0.x=-(geo.DSD[i]-geo.DSO[i]); Pv0.y= geo.dDetecU*(0-((float)geo.nDetecU/2)+0.5); Pv0.z= geo.dDetecV*(((float)geo.nDetecV/2)-0.5-1);
// Geometric trasnformations:
P.x=0;Pu0.x=0;Pv0.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&P);
rollPitchYaw(geo,i,&Pu0);
rollPitchYaw(geo,i,&Pv0);
//Now lets translate the points where they should be:
P.x=P.x-(geo.DSD[i]-geo.DSO[i]);
Pu0.x=Pu0.x-(geo.DSD[i]-geo.DSO[i]);
Pv0.x=Pv0.x-(geo.DSD[i]-geo.DSO[i]);
S.x=0;
// Roll pitch yaw
rollPitchYaw(geo,i,&S);
//Now lets translate the points where they should be:
S.x=S.x+geo.DSO[i];
//1: Offset detector
//P.x
P.y =P.y +geo.offDetecU[i]; P.z =P.z +geo.offDetecV[i];
Pu0.y=Pu0.y+geo.offDetecU[i]; Pu0.z=Pu0.z+geo.offDetecV[i];
Pv0.y=Pv0.y+geo.offDetecU[i]; Pv0.z=Pv0.z+geo.offDetecV[i];
//S doesnt need to chagne
//3: Rotate (around z)!
Point3D Pfinal, Pfinalu0, Pfinalv0;
Pfinal.x =P.x;
Pfinal.y =P.y +geo.offDetecU[i]; Pfinal.z =P.z +geo.offDetecV[i];
Pfinalu0.x=Pu0.x;
Pfinalu0.y=Pu0.y +geo.offDetecU[i]; Pfinalu0.z =Pu0.z +geo.offDetecV[i];
Pfinalv0.x=Pv0.x;
Pfinalv0.y=Pv0.y +geo.offDetecU[i]; Pfinalv0.z =Pv0.z +geo.offDetecV[i];
eulerZYZ(geo,&Pfinal);
eulerZYZ(geo,&Pfinalu0);
eulerZYZ(geo,&Pfinalv0);
eulerZYZ(geo,&S);
//2: Offset image (instead of offseting image, -offset everything else)
Pfinal.x =Pfinal.x-geo.offOrigX[i]; Pfinal.y =Pfinal.y-geo.offOrigY[i]; Pfinal.z =Pfinal.z-geo.offOrigZ[i];
Pfinalu0.x=Pfinalu0.x-geo.offOrigX[i]; Pfinalu0.y=Pfinalu0.y-geo.offOrigY[i]; Pfinalu0.z=Pfinalu0.z-geo.offOrigZ[i];
Pfinalv0.x=Pfinalv0.x-geo.offOrigX[i]; Pfinalv0.y=Pfinalv0.y-geo.offOrigY[i]; Pfinalv0.z=Pfinalv0.z-geo.offOrigZ[i];
S.x=S.x-geo.offOrigX[i]; S.y=S.y-geo.offOrigY[i]; S.z=S.z-geo.offOrigZ[i];
// As we want the (0,0,0) to be in a corner of the image, we need to translate everything (after rotation);
Pfinal.x =Pfinal.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinal.y =Pfinal.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinal.z =Pfinal.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalu0.x=Pfinalu0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalu0.y=Pfinalu0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalu0.z=Pfinalu0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
Pfinalv0.x=Pfinalv0.x+geo.sVoxelX/2-geo.dVoxelX/2; Pfinalv0.y=Pfinalv0.y+geo.sVoxelY/2-geo.dVoxelY/2; Pfinalv0.z=Pfinalv0.z+geo.sVoxelZ/2-geo.dVoxelZ/2;
S.x =S.x+geo.sVoxelX/2-geo.dVoxelX/2; S.y =S.y+geo.sVoxelY/2-geo.dVoxelY/2; S.z =S.z +geo.sVoxelZ/2-geo.dVoxelZ/2;
//4. Scale everything so dVoxel==1
Pfinal.x =Pfinal.x/geo.dVoxelX; Pfinal.y =Pfinal.y/geo.dVoxelY; Pfinal.z =Pfinal.z/geo.dVoxelZ;
Pfinalu0.x=Pfinalu0.x/geo.dVoxelX; Pfinalu0.y=Pfinalu0.y/geo.dVoxelY; Pfinalu0.z=Pfinalu0.z/geo.dVoxelZ;
Pfinalv0.x=Pfinalv0.x/geo.dVoxelX; Pfinalv0.y=Pfinalv0.y/geo.dVoxelY; Pfinalv0.z=Pfinalv0.z/geo.dVoxelZ;
S.x =S.x/geo.dVoxelX; S.y =S.y/geo.dVoxelY; S.z =S.z/geo.dVoxelZ;
//5. apply COR. Wherever everything was, now its offesetd by a bit
float CORx, CORy;
CORx=-geo.COR[i]*sin(geo.alpha)/geo.dVoxelX;
CORy= geo.COR[i]*cos(geo.alpha)/geo.dVoxelY;
Pfinal.x+=CORx; Pfinal.y+=CORy;
Pfinalu0.x+=CORx; Pfinalu0.y+=CORy;
Pfinalv0.x+=CORx; Pfinalv0.y+=CORy;
S.x+=CORx; S.y+=CORy;
// return
*uvorigin=Pfinal;
deltaU->x=Pfinalu0.x-Pfinal.x;
deltaU->y=Pfinalu0.y-Pfinal.y;
deltaU->z=Pfinalu0.z-Pfinal.z;
deltaV->x=Pfinalv0.x-Pfinal.x;
deltaV->y=Pfinalv0.y-Pfinal.y;
deltaV->z=Pfinalv0.z-Pfinal.z;
*source=S;
}
void CreateTextureParallelInterp(float* image,Geometry geo,cudaArray** d_cuArrTex, cudaTextureObject_t *texImage,cudaStream_t* stream){ //size_t size_image=geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ;
const cudaExtent extent = make_cudaExtent(geo.nVoxelX, geo.nVoxelY, geo.nVoxelZ);
//cudaArray Descriptor
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
//cuda Array
cudaMalloc3DArray(&d_cuArrTex[0], &channelDesc, extent);
cudaMemcpy3DParms copyParams = {0};
//Array creation
copyParams.srcPtr = make_cudaPitchedPtr((void *)image, extent.width*sizeof(float), extent.width, extent.height);
copyParams.dstArray = d_cuArrTex[0];
copyParams.extent = extent;
copyParams.kind = cudaMemcpyHostToDevice;
cudaMemcpy3DAsync(©Params,stream[1]);
//Array creation End
cudaResourceDesc texRes;
memset(&texRes, 0, sizeof(cudaResourceDesc));
texRes.resType = cudaResourceTypeArray;
texRes.res.array.array = d_cuArrTex[0];
cudaTextureDesc texDescr;
memset(&texDescr, 0, sizeof(cudaTextureDesc));
texDescr.normalizedCoords = false;
texDescr.filterMode = cudaFilterModeLinear;
texDescr.addressMode[0] = cudaAddressModeBorder;
texDescr.addressMode[1] = cudaAddressModeBorder;
texDescr.addressMode[2] = cudaAddressModeBorder;
texDescr.readMode = cudaReadModeElementType;
cudaCreateTextureObject(&texImage[0], &texRes, &texDescr, NULL);
} |
e111a0de87578faafda49bcf9cfb61b6c8a92e16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <math.h>
#include <time.h>
#include <vector>
#include <iomanip>
#include <algorithm>
#include <string>
#include <map>
#include <stdint.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
using namespace std;
#define BLOCK_SIZE 32 // Number of threads in x and y direction - Maximum Number of threads per block = 32 * 32 = 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void Temperature_solver(int nx, int ny, int wu, int wv, int wT, float dx, float dy, float dt, float Re, float Pr, float *u, float *v, float *Told, float *T)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0 && i < nx && j > 0 && j < ny){
Told[i * wT + j] = T[i * wT + j];
T[i * wT + j] = T[i * wT + j] + dt*(-0.5*(u[i * wu + j] + u[(i - 1) * wu + j])*(1.0 / (2.0*dx)*(T[(i + 1) * wT + j] - T[(i - 1) * wT + j])) - 0.5*(v[i * wv + j] + v[i * wv + j - 1])*(1.0 / (2.0*dy)*(T[i * wT + j + 1] - T[i * wT + j - 1])) + 1 / (Re*Pr)*(1 / pow(dx, 2.0f)*(T[(i + 1) * wT + j] - 2.0*T[i * wT + j] + T[(i - 1) * wT + j]) + 1 / pow(dy, 2.0f)*(T[i * wT + j + 1] - 2 * T[i * wT + j] + T[i * wT + j - 1])));
}
__syncthreads();
}
__global__ void PressureSolve(float * p_d, const float * p_old, float * abs_d, const float * us_d, const float * vs_d, int p_xlength, int p_ylength, int wp, int wu, int wv, float dx, float dy, float dt)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
if (i > 0 && i < p_xlength && j > 0 && j < p_ylength)
{
// __syncthreads();
p_d[i * wp + j] = pow(dx, 2.0f)*pow(dy, 2.0f) / (-2.0*(pow(dx, 2.0f) + pow(dy, 2.0f)))*(-1.0 / pow(dx, 2.0f)*(p_old[(i + 1) * wp + j] + p_old[(i - 1) * wp + j] + p_old[i * wp + j + 1] + p_old[i * wp + j - 1]) + 1.0 / dt*(1.0 / dx*(us_d[i * wu + j] - us_d[(i - 1) * wu + j]) + 1.0 / dy*(vs_d[i * wv + j] - vs_d[i * wv + j - 1])));
__syncthreads();
abs_d[i * wp + j] = p_d[i * wp + j] - p_old[i * wp + j];
__syncthreads();
abs_d[i * wp + j] = abs_d[i * wp + j] * abs_d[i * wp + j];
//__syncthreads();
} // end if
} // end global
__global__ void PressureBC(float * p_d, float * p_ref, int nx, int ny, float dy, int wp)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
if (i >= 0 && i < nx + 1 && j == 0){
p_d[i * wp + j] = p_ref[i * wp + j + 1]; // bottom wall - Final
}
__syncthreads();
if (i >= 0 && i < nx + 1 && j == ny){
p_d[i * wp + j] = p_ref[i * wp + j - 1]; // Upper - no flux
}
__syncthreads();
if (j >= 0 && j < ny + 1 && i == 0){
p_d[i * wp + j] = p_ref[(i + 1) * wp + j]; // left wall - not the inlet - Final
}
__syncthreads();
if (j >= 0 && j < ny + 1 && i == nx && j*dy < 2.0){
p_d[i * wp + j] = p_ref[(i - 1) * wp + j]; // right wall - not the outlet - Final
// printf("POSITIVE ");
}
__syncthreads();
if (j >= 0 && j < ny + 1 && i == nx && j*dy >= 2.0){
p_d[i * wp + j] = -p_ref[(i - 1) * wp + j]; // pressure outlet - static pressure is zero - Final
// printf("NEGATIVE ");
}
//__syncthreads();
}
int main()
{
try
{
// output format
float start_clock = clock();
ofstream f("result_gpu.txt"); // Solution Results
f.setf(ios::fixed | ios::showpoint);
f << setprecision(5);
ofstream g("convergence_gpu.txt"); // Convergence history
g.setf(ios::fixed | ios::showpoint);
g << setprecision(5);
cout.setf(ios::fixed | ios::showpoint);
cout << setprecision(5);
//ofstream file_p_before("p_before_gpu_BC.txt");
//file_p_before.setf(ios::fixed | ios::showpoint);
//file_p_before << setprecision(3);
//ofstream file_p_after("p_after_gpu_BC.txt");
//file_p_after.setf(ios::fixed | ios::showpoint);
//file_p_after << setprecision(3);
// Input parameters
float Re, Pr, Fr, T_L, T_0, T_amb, dx, dy, t, eps, /* beta, */ iter, maxiter, tf, st, counter, column, u_wind, T_R, Lx, Ly;
Lx = 4.0; Ly = 5.0; // Domain dimensions
int ni = 10.0; // Number of nodes per unit length in x direction
int nj = 10.0; // Number of nodes per unit length in y direction
int nx = Lx * ni; int ny = Ly * nj; // Number of Nodes in each direction
u_wind = 1; // Reference velocity
st = 0.00005 * 2; // Total variance criteria
eps = 0.001; // Pressure convergence criteria
tf = 100; // Final time step
Pr = 0.5*(0.709 + 0.711); // Prandtl number
Re = 250.0; Fr = 0.3; // Non-dimensional numbers for inflow conditions
dx = Lx / (nx - 1); dy = Ly / (ny - 1); // dx and dy
//beta = 1; // Successive over relaxation factor (SOR)
t = 0; // Initial time step
T_L = 100.0; // Left wall temperature (C)
T_R = 50.0; // Right wall temperature (C)
T_amb = 25.0; // Ambient air temperature (C)
T_0 = 50.0; // Initial air temperature
T_L = T_L + 273.15; T_0 = T_0 + 273.15; T_amb = T_amb + 273.15; T_R = T_R + 273.15;// Unit conversion to (K)
maxiter = 500; // Maximum iteration at each time step
counter = 0; // initial row for output monitoring
column = 1; // Column number for output display
// Records number of clicks a step takes
std::map<string, uint32_t> stepTimingAccumulator;
// Host Vectors
thrust::host_vector<float> u(nx * (ny + 1));
thrust::host_vector<float> us(nx*(ny + 1));
thrust::host_vector<float> uold(nx * (ny + 1));
int wu = ny + 1;
thrust::host_vector<float> v((nx + 1) * ny);
thrust::host_vector<float> vs((nx + 1) * ny);
thrust::host_vector<float> vold((nx + 1) * ny);
int wv = ny;
thrust::host_vector<float> p((nx + 1) * (ny + 1));
// thrust::host_vector<float> abs((nx + 1) * (ny + 1));
int wp = ny + 1;
thrust::host_vector<float> T((nx + 1) * (ny + 1));
int wT = ny + 1;
thrust::host_vector<float> Told((nx + 1) * (ny + 1));
thrust::host_vector<float> om(nx * ny);
thrust::host_vector<float> vc(nx * ny);
thrust::host_vector<float> uc(nx * ny);
thrust::host_vector<float> pc(nx * ny);
thrust::host_vector<float> Tc(nx*ny);
// thrust::host_vector<float> abs_h((nx+1) * (ny + 1));
int wc = ny;
hipFree(0);
thrust::device_vector<float> us_d(nx*(ny + 1));
thrust::device_vector<float> vs_d((nx + 1) * ny);
thrust::device_vector<float> p_d((nx + 1) * (ny + 1), 0);
thrust::device_vector<float> p_old((nx + 1) * (ny + 1), 0);
thrust::device_vector<float> p_ref((nx + 1) * (ny + 1));
thrust::device_vector<float> abs_d((nx + 1) * (ny + 1));
gpuErrchk( hipPeekAtLastError() );
// Time step size stability criterion
float mt1 = 0.25*pow(dx, 2.0) / (1.0 / Re); float Rer = 1.0 / Re; float mt2 = 0.25*pow(dy, 2.0) / (1.0 / Re);
float dt;
if (mt1 > Rer)
{
dt = Rer;
}
else
{
dt = mt1;
}
if (dt > mt2)
{
dt = mt2;
}
//......................................................................................
// Step 0 - It can be parallelized
// Initializing the flow variable (Temperature)
// Boundary conditions for T (Initialization)
int step0_start = clock();
for (int i = 0; i < nx + 1; i++)
{
for (int j = 0; j < ny + 1; j++)
{
T[i * wT + j] = T_0 / T_amb;
} // end for j
} // end for i
//......................................................................................
int step0_end = clock();
stepTimingAccumulator["Step 0, Initializing Temperature"] += step0_end - step0_start;
//......................................................................................
// Marching in Time - Outermost loop
while (t <= tf)
{
iter = 0;
int stepi1_start = clock();
//........................................................................................
// Step i1 - it can be parallelized
// boundary conditions for u velocity
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny + 1; j++)
{
if (i == 0 && j > 0 && j < ny)
{
if (j*dy < 2.0)
{
u[i * wu + j] = 0; // left wall - Final
}
else
{
u[i * wu + j] = u_wind; // left inlet - Final
}
}
else if (i == nx - 1 && j>0 && j < ny)
{
if (j*dy < 2.0)
{
u[i * wu + j] = 0; // Right wall has 0 horizontal velocity - Final
}
else
{
u[i * wu + j] = u[(i - 1) * wu + j]; // right outlet - no velocity change
}
}
else if (j == 0)
{
u[i * wu + j] = -u[i * wu + j + 1]; // bottom ghost - Final
}
else if (j == ny)
{
u[i * wu + j] = u[i * wu + j - 1]; // upper ghost - Final
}
} // end for j
} // end for i
int stepi1_end = clock();
stepTimingAccumulator["Step i1 - Set Horizontal Velocity Boundary Conditions"] += stepi1_end - stepi1_start;
//...............................................................................................
//.........................................................................................
// Step i2 - it can be parallelized
// boundary conditions for v velocity
int stepi2_start = clock();
for (int i = 0; i < nx + 1; i++)
{
for (int j = 0; j < ny; j++)
{
if (j == 0 && i > 0 && i < nx)
{
v[i * wv + j] = 0; // bottom wall - Final
}
else if (j == ny - 1 && i > 0 && i < nx)
{
v[i * wv + j] = v[i * wv + j - 1]; // upper wall - Final
}
else if (i == 0)
{
v[i * wv + j] = -v[(i + 1) * wv + j]; // left ghost (Left Wall and inlet has 0 vertical velocity) - Final
}
else if (i == nx)
{
if (j*dy < 2.0)
{
v[i * wv + j] = -v[(i - 1) * wv + j]; // right ghost (Right wall has 0 vertical velocity) - Final
}
else
{
v[i * wv + j] = v[(i - 1) * wv + j]; // right outlet - no velocity gradient
}
}
} // end for j
} // end for I
int stepi2_end = clock();
stepTimingAccumulator["Step i2 - Set Vertical Velocity Boundary Conditions"] += stepi2_end - stepi2_start;
//...............................................................................................
//...............................................................................................
int step1_start = clock();
//.........................................................................................
// Step 1 - it can be parallelized - Solve for intermediate velocity values
// u - us - vh - a
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny; j++)
{
float vh = 1.0 / 4.0*(v[i * wv + j] + v[(i + 1) * wv + j] + v[i * wv + j - 1] + v[(i + 1) * wv + j - 1]); // v hat
float a = u[i * wu + j] * 1.0 / (2.0*dx)*(u[(i + 1) * wu + j] - u[(i - 1) * wu + j]) + vh*1.0 / (2.0*dy)*(u[i * wu + j + 1] - u[i * wu + j - 1]); // a
us[i * wu + j] = dt / Re*(1.0 / pow(dx, 2.0)*(u[(i + 1) * wu + j] - 2.0*u[i * wu + j] + u[(i - 1) * wu + j]) + 1.0 / pow(dy, 2.0)*(u[i * wu + j + 1] - 2.0*u[i * wu + j] + u[i * wu + j - 1])) - a*dt + u[i * wu + j]; // u star
} // end for j
} // end for i
//..........................................................................................
// Step 1 - it can be parallelized
// v - vs - uh - b
for (int i = 1; i < nx; i++)
{
for (int j = 1; j < ny - 1; j++)
{
float uh = 1.0 / 4.0*(u[i * wu + j] + u[i * wu + j + 1] + u[(i - 1) * wu + j] + u[(i - 1) * wu + j + 1]);
float b = uh*1.0 / (2.0*dx)*(v[(i + 1) * wv + j] - v[(i - 1) * wv + j]) + v[i * wv + j] * 1.0 / (2.0*dy)*(v[i * wv + j + 1] - v[i * wv + j - 1]); // b
vs[i * wv + j] = dt / Re*(1.0 / pow(dx, 2.0)*(v[(i + 1) * wv + j] - 2.0*v[i * wv + j] + v[(i - 1) * wv + j]) + 1.0 / pow(dy, 2.0)*(v[i * wv + j + 1] - 2.0*v[i * wv + j] + v[i * wv + j - 1])) + dt / pow(Fr, 2.0)*(0.5*(T[i * wT + j] + T[i * wT + j - 1]) - 1) / (0.5*(T[i * wT + j] + T[i * wT + j - 1])) - b*dt + v[i * wv + j]; // v
} // end for j
} // end for i
//...........................................................................................
// vs and us on Boundary conditions
for (int i = 0; i < nx; i++)
{
us[i * wu + 0] = -us[i * wu + 1]; // bottom ghost - Final
} // end for j
//...........................................................................................
for (int j = 0; j < ny + 1; j++)
{
if (j*dy < 2.0)
{
us[0 * wu + j] = 0; // left wall - FInal
us[(nx - 1) * wu + j] = 0; // right wall - Final
}
else
{
us[0 * wu + j] = u_wind; // left inlet - Final
}
}
//...........................................................................................
for (int j = 0; j < ny; j++)
{
vs[0 * wv + j] = -vs[1 * wv + j]; // left ghost (Both wall and inlet have 0 vs) - Final
if (j*dy < 2.0)
{
vs[nx * wv + j] = -vs[(nx - 1) * wv + j]; // right ghost (Only the right wall - Final
}
else
{
vs[nx * wv + j] = vs[(nx - 1) * wv + j]; // right outlet - no flux
}
}
//............................................................................................
for (int i = 0; i < nx + 1; i++)
{
vs[i * wv + 0] = 0; // Bottom wall - Final
} // end for i
//............................................................................................
int step1_end = clock();
stepTimingAccumulator["Step 1 - Solve for intermediate velocities"] += step1_end - step1_start;
//...............................................................................................
// Step 2 - Parallel GPU version
// Poisson equation for pressure
int step2_start = clock();
// Cuda set up
int p_xlength = nx;
int p_ylength = ny;
float *ptr_us = thrust::raw_pointer_cast(&us_d[0]);
float *ptr_vs = thrust::raw_pointer_cast(&vs_d[0]);
float *ptr_p = thrust::raw_pointer_cast(&p_d[0]);
float *ptr_p_old = thrust::raw_pointer_cast(&p_old[0]);
float *ptr_abs = thrust::raw_pointer_cast(&abs_d[0]);
float *ptr_p_ref = thrust::raw_pointer_cast(&p_ref[0]);
float error = 1.0; iter = 0;
// float diffp = 0;
us_d = us;
vs_d = vs;
cout << t << endl;
// Begin Jacobi loop
while (error > eps){
gpuErrchk( hipPeekAtLastError() );
//error = 0.0;
// p_d = p;
p_old = p_d;
// SOR pressure solver
hipLaunchKernelGGL(( PressureSolve), dim3(dim3( (ny+1)/BLOCK_SIZE + 1, (nx+1)/BLOCK_SIZE + 1, 1)) , dim3(dim3(BLOCK_SIZE,BLOCK_SIZE,1)), 0, 0, ptr_p, ptr_p_old, ptr_abs, ptr_us, ptr_vs, p_xlength, p_ylength, wp, wu, wv, dx, dy, dt);
hipDeviceSynchronize();
// p = p_d;
p_ref = p_d;
error = thrust::reduce(abs_d.begin(), abs_d.end());
/* for (int i = 1; i < nx; i++)
{
for (int j = 1; j < ny; j++)
{
diffp = pow((p[i * wp + j] - p_old[i * wp + j]), 2.0);
error = error + diffp;
} // end for j
} // end for i
*/
/* for(int i = 0; i < nx + 1; ++i)
{
for(int j = 0; j < ny + 1; ++j)
{
file_p_before << p[i * wp + j] << "\t";
}
file_p_before << endl;
}
*/
// Apply boundary conditions
hipLaunchKernelGGL(( PressureBC), dim3(dim3( (ny+1)/BLOCK_SIZE + 1, (nx+1)/BLOCK_SIZE + 1, 1)) , dim3(dim3(BLOCK_SIZE,BLOCK_SIZE,1)), 0, 0, ptr_p, ptr_p_ref, nx, ny, dy, wp);
hipDeviceSynchronize();
// p = p_d;
//file_p_after << p.size() << endl;
/* for(int i = 0; i < nx + 1; ++i)
{
for(int j = 0; j < ny + 1; ++j)
{
file_p_after << p[i * wp + j] << "\t";
}
file_p_after << endl;
} */
error = pow(error, 0.5);
iter = iter + 1;
if (iter > maxiter){
break;
}
} // end while eps
p = p_d;
/*
break;
error = pow(error, 0.5);
iter = iter + 1;
if (iter == maxiter){
break;
}
} // end while eps
*/
int step2_end = clock();
stepTimingAccumulator["Step 2 - Solve for pressure until tolerance or max iterations"] += step2_end - step2_start;
//.................................................................................................
// Step 3 - It can be parallelized
// velocity update - projection method
int step3_start = clock();
// u
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny; j++)
{
uold[i * wu + j] = u[i * wu + j];
u[i * wu + j] = us[i * wu + j] - dt / dx*(p[(i + 1) * wp + j] - p[i * wp + j]);
} // end for j
} // end for i
//................................................
// v
for (int i = 1; i < nx; i++)
{
for (int j = 1; j < ny - 1; j++)
{
vold[i * wv + j] = v[i * wv + j];
v[i * wv + j] = vs[i * wv + j] - dt / dy*(p[i * wp + j + 1] - p[i * wp + j]);
} // end for j
} // end for i
int step3_end = clock();
stepTimingAccumulator["Step 3 - Velocity Update"] += step3_end - step3_start;
//...............................................................................................
//...............................................................................................
// Step 4 - It can be parallelized
// Solving for temperature
int step4_start = clock();
thrust::device_vector<float> d_T = T;
thrust::device_vector<float> d_Told = Told;
thrust::device_vector<float> d_u = u;
thrust::device_vector<float> d_v = v;
int gridsize_x = nx/BLOCK_SIZE + 1;
int gridsize_y = ny/BLOCK_SIZE + 1;
dim3 dimgrid(gridsize_x, gridsize_y, 1); // The grid has #gridsize blocks in x and 1 block in y and 1 block in z direction
dim3 dimblock(BLOCK_SIZE, BLOCK_SIZE, 1);
float *ptr_u = thrust::raw_pointer_cast(&d_u[0]);
float *ptr_v = thrust::raw_pointer_cast(&d_v[0]);
float *ptr_T = thrust::raw_pointer_cast(&d_T[0]);
float *ptr_Told = thrust::raw_pointer_cast(&d_Told[0]);
hipLaunchKernelGGL(( Temperature_solver), dim3(dimgrid), dim3(dimblock), 0, 0, nx, ny, wu, wv, wT, dx, dy, dt, Re, Pr, ptr_u, ptr_v, ptr_Told, ptr_T);
thrust::copy(d_Told.begin(), d_Told.end(), Told.begin());
thrust::copy(d_T.begin(), d_T.end(), T.begin());
int step4_end = clock();
stepTimingAccumulator["Step 4 - Solving for temperature"] += step4_end - step4_start;
//................................................................................................
//...............................................................................................
// Step i3 - Initializing boundary conditions for temperature
// boundary conditions for Temperature
int stepi3_start = clock();
for (int i = 0; i < nx + 1; i++)
{
for (int j = 0; j < ny + 1; j++)
{
if (j == 0)
{
T[i * wT + j] = T[i * wT + j + 1]; // bottom wall - Insulated - no flux - Final
}
else if (j == ny)
{
T[i * wT + j] = 2.0*(T_0) / T_amb - T[i * wT + j - 1]; // upper boundary - lid with ambient temperature (as air) - Final
}
else if (i == 0)
{
if (j*dy < 2.0)
{
T[i * wT + j] = 2.0*T_L / T_amb - T[(i + 1) * wT + j]; // left wall at T_L - Constant Temperature - Final
}
else
{
T[i * wT + j] = 2.0*T_0 / T_amb - T[(i + 1) * wT + j]; // left inlet at T_0 (initial temperature) - Final
}
}
else if (i == nx)
{
if (j*dy < 2.0)
{
T[i * wT + j] = 2.0*T_R / T_amb - T[(i - 1) * wT + j]; // right wall at T_R - Final
}
}
} // end for j
} // end for i
int stepi3_end = clock();
stepTimingAccumulator["Step i3 - Initializing boundary conditions for temperature"] += stepi3_end - stepi3_start;
//...............................................................................................
//...............................................................................................
// Step 5 - Checking if solution reached steady state
// Checking the steady state condition
int step5_start = clock();
float TV, diffv; TV = 0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 2; j++)
{
diffv = v[i * wv + j] - vold[i * wv + j];
TV = TV + pow(pow(diffv, 2), 0.5);
} // end for i
} // end for j
TV = TV / ((nx - 1)*(ny - 2));
if (TV < st && error < eps)
{
cout << "Steady state time = " << t << " (s) " << endl;
break;
}
counter = counter + 1;
if (fmod(counter, 10) == 0 || counter == 1)
{
//cout << "" << endl;
//cout << "Column" << setw(30) << "time(s)" << setw(30) << "Iterations on Pressure" << setw(30) << "Pressure Residual" << setw(30) << "Total Variance" << endl;
} // end if
int step5_end = clock();
stepTimingAccumulator["Step 5 - Check for steady state"] += step5_end - step5_start;
//...............................................................................................
//cout << column << setw(30) << t << setw(30) << iter << setw(30) << error << setw(30) << TV << endl;
g << column << setw(30) << t << setw(30) << iter << setw(30) << error << setw(30) << TV << endl;
t = t + dt;
column = column + 1;
} // end while time
//........................................................................................................
// Step 6
// Co-locate the staggered grid points
int step6_start = clock();
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
vc[i * wc + j] = 1.0 / 2.0*(v[(i + 1) * wv + j] + v[i * wv + j]);
pc[i * wc + j] = 1.0 / 4.0*(p[i * wp + j] + p[(i + 1) * wp + j] + p[i * wp + j + 1] + p[(i + 1) * wp + j + 1]);
uc[i * wc + j] = 1.0 / 2.0*(u[i*wu + j] + u[i * wu + j + 1]);
om[i * wc + j] = 1.0 / dx*(v[(i + 1) * wv + j] - v[i * wv + j]) - 1.0 / dy*(u[i * wu + j + 1] - u[i * wu + j]);
Tc[i * wc + j] = 1.0 / 4.0*(T[i * wT + j] + T[(i + 1) * wT + j] + T[i * wT + j + 1] + T[(i + 1) * wT + j + 1]);
} // end for j
} // end for i
//........................................................................................................
int step6_end = clock();
stepTimingAccumulator["Step 6 - Co-locate staggered grid points"] += step6_end - step6_start;
// Steady state results
for (int j = 0; j < ny; j++)
{
for (int i = 0; i < nx; i++)
{
f << setw(15) << t - dt << setw(15) << i*dx << setw(15) << j*dy << setw(15) << uc[i * wc + j] << setw(15) << vc[i * wc + j] << setw(15) << pc[i * wc + j] << setw(15) << Tc[i * ny + j] * T_amb - 273.15 << setw(15) << om[i * wc + j] << endl;
} // end for i
} // end for j
//.........................................................................................................
float end_clock = clock();
cout << "CPU time = " << (end_clock - start_clock) / CLOCKS_PER_SEC << " (s)" << endl;
//cout << "Re = " << Re << endl;
//cout << "Fr = " << Fr << endl;
for (auto it = stepTimingAccumulator.begin(); it != stepTimingAccumulator.end(); it++)
{
float seconds = (float)it->second / CLOCKS_PER_SEC;
std::cout << it->first << "\t" << seconds << endl;
}
}
catch(thrust::system_error e)
{
std::cerr << e.what() << std::endl;
}
return 0;
} // end main
| e111a0de87578faafda49bcf9cfb61b6c8a92e16.cu | #include <iostream>
#include <fstream>
#include <math.h>
#include <time.h>
#include <vector>
#include <iomanip>
#include <algorithm>
#include <string>
#include <map>
#include <stdint.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
using namespace std;
#define BLOCK_SIZE 32 // Number of threads in x and y direction - Maximum Number of threads per block = 32 * 32 = 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void Temperature_solver(int nx, int ny, int wu, int wv, int wT, float dx, float dy, float dt, float Re, float Pr, float *u, float *v, float *Told, float *T)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0 && i < nx && j > 0 && j < ny){
Told[i * wT + j] = T[i * wT + j];
T[i * wT + j] = T[i * wT + j] + dt*(-0.5*(u[i * wu + j] + u[(i - 1) * wu + j])*(1.0 / (2.0*dx)*(T[(i + 1) * wT + j] - T[(i - 1) * wT + j])) - 0.5*(v[i * wv + j] + v[i * wv + j - 1])*(1.0 / (2.0*dy)*(T[i * wT + j + 1] - T[i * wT + j - 1])) + 1 / (Re*Pr)*(1 / pow(dx, 2.0f)*(T[(i + 1) * wT + j] - 2.0*T[i * wT + j] + T[(i - 1) * wT + j]) + 1 / pow(dy, 2.0f)*(T[i * wT + j + 1] - 2 * T[i * wT + j] + T[i * wT + j - 1])));
}
__syncthreads();
}
__global__ void PressureSolve(float * p_d, const float * p_old, float * abs_d, const float * us_d, const float * vs_d, int p_xlength, int p_ylength, int wp, int wu, int wv, float dx, float dy, float dt)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
if (i > 0 && i < p_xlength && j > 0 && j < p_ylength)
{
// __syncthreads();
p_d[i * wp + j] = pow(dx, 2.0f)*pow(dy, 2.0f) / (-2.0*(pow(dx, 2.0f) + pow(dy, 2.0f)))*(-1.0 / pow(dx, 2.0f)*(p_old[(i + 1) * wp + j] + p_old[(i - 1) * wp + j] + p_old[i * wp + j + 1] + p_old[i * wp + j - 1]) + 1.0 / dt*(1.0 / dx*(us_d[i * wu + j] - us_d[(i - 1) * wu + j]) + 1.0 / dy*(vs_d[i * wv + j] - vs_d[i * wv + j - 1])));
__syncthreads();
abs_d[i * wp + j] = p_d[i * wp + j] - p_old[i * wp + j];
__syncthreads();
abs_d[i * wp + j] = abs_d[i * wp + j] * abs_d[i * wp + j];
//__syncthreads();
} // end if
} // end global
__global__ void PressureBC(float * p_d, float * p_ref, int nx, int ny, float dy, int wp)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
if (i >= 0 && i < nx + 1 && j == 0){
p_d[i * wp + j] = p_ref[i * wp + j + 1]; // bottom wall - Final
}
__syncthreads();
if (i >= 0 && i < nx + 1 && j == ny){
p_d[i * wp + j] = p_ref[i * wp + j - 1]; // Upper - no flux
}
__syncthreads();
if (j >= 0 && j < ny + 1 && i == 0){
p_d[i * wp + j] = p_ref[(i + 1) * wp + j]; // left wall - not the inlet - Final
}
__syncthreads();
if (j >= 0 && j < ny + 1 && i == nx && j*dy < 2.0){
p_d[i * wp + j] = p_ref[(i - 1) * wp + j]; // right wall - not the outlet - Final
// printf("POSITIVE ");
}
__syncthreads();
if (j >= 0 && j < ny + 1 && i == nx && j*dy >= 2.0){
p_d[i * wp + j] = -p_ref[(i - 1) * wp + j]; // pressure outlet - static pressure is zero - Final
// printf("NEGATIVE ");
}
//__syncthreads();
}
int main()
{
try
{
// output format
float start_clock = clock();
ofstream f("result_gpu.txt"); // Solution Results
f.setf(ios::fixed | ios::showpoint);
f << setprecision(5);
ofstream g("convergence_gpu.txt"); // Convergence history
g.setf(ios::fixed | ios::showpoint);
g << setprecision(5);
cout.setf(ios::fixed | ios::showpoint);
cout << setprecision(5);
//ofstream file_p_before("p_before_gpu_BC.txt");
//file_p_before.setf(ios::fixed | ios::showpoint);
//file_p_before << setprecision(3);
//ofstream file_p_after("p_after_gpu_BC.txt");
//file_p_after.setf(ios::fixed | ios::showpoint);
//file_p_after << setprecision(3);
// Input parameters
float Re, Pr, Fr, T_L, T_0, T_amb, dx, dy, t, eps, /* beta, */ iter, maxiter, tf, st, counter, column, u_wind, T_R, Lx, Ly;
Lx = 4.0; Ly = 5.0; // Domain dimensions
int ni = 10.0; // Number of nodes per unit length in x direction
int nj = 10.0; // Number of nodes per unit length in y direction
int nx = Lx * ni; int ny = Ly * nj; // Number of Nodes in each direction
u_wind = 1; // Reference velocity
st = 0.00005 * 2; // Total variance criteria
eps = 0.001; // Pressure convergence criteria
tf = 100; // Final time step
Pr = 0.5*(0.709 + 0.711); // Prandtl number
Re = 250.0; Fr = 0.3; // Non-dimensional numbers for inflow conditions
dx = Lx / (nx - 1); dy = Ly / (ny - 1); // dx and dy
//beta = 1; // Successive over relaxation factor (SOR)
t = 0; // Initial time step
T_L = 100.0; // Left wall temperature (C)
T_R = 50.0; // Right wall temperature (C)
T_amb = 25.0; // Ambient air temperature (C)
T_0 = 50.0; // Initial air temperature
T_L = T_L + 273.15; T_0 = T_0 + 273.15; T_amb = T_amb + 273.15; T_R = T_R + 273.15;// Unit conversion to (K)
maxiter = 500; // Maximum iteration at each time step
counter = 0; // initial row for output monitoring
column = 1; // Column number for output display
// Records number of clicks a step takes
std::map<string, uint32_t> stepTimingAccumulator;
// Host Vectors
thrust::host_vector<float> u(nx * (ny + 1));
thrust::host_vector<float> us(nx*(ny + 1));
thrust::host_vector<float> uold(nx * (ny + 1));
int wu = ny + 1;
thrust::host_vector<float> v((nx + 1) * ny);
thrust::host_vector<float> vs((nx + 1) * ny);
thrust::host_vector<float> vold((nx + 1) * ny);
int wv = ny;
thrust::host_vector<float> p((nx + 1) * (ny + 1));
// thrust::host_vector<float> abs((nx + 1) * (ny + 1));
int wp = ny + 1;
thrust::host_vector<float> T((nx + 1) * (ny + 1));
int wT = ny + 1;
thrust::host_vector<float> Told((nx + 1) * (ny + 1));
thrust::host_vector<float> om(nx * ny);
thrust::host_vector<float> vc(nx * ny);
thrust::host_vector<float> uc(nx * ny);
thrust::host_vector<float> pc(nx * ny);
thrust::host_vector<float> Tc(nx*ny);
// thrust::host_vector<float> abs_h((nx+1) * (ny + 1));
int wc = ny;
cudaFree(0);
thrust::device_vector<float> us_d(nx*(ny + 1));
thrust::device_vector<float> vs_d((nx + 1) * ny);
thrust::device_vector<float> p_d((nx + 1) * (ny + 1), 0);
thrust::device_vector<float> p_old((nx + 1) * (ny + 1), 0);
thrust::device_vector<float> p_ref((nx + 1) * (ny + 1));
thrust::device_vector<float> abs_d((nx + 1) * (ny + 1));
gpuErrchk( cudaPeekAtLastError() );
// Time step size stability criterion
float mt1 = 0.25*pow(dx, 2.0) / (1.0 / Re); float Rer = 1.0 / Re; float mt2 = 0.25*pow(dy, 2.0) / (1.0 / Re);
float dt;
if (mt1 > Rer)
{
dt = Rer;
}
else
{
dt = mt1;
}
if (dt > mt2)
{
dt = mt2;
}
//......................................................................................
// Step 0 - It can be parallelized
// Initializing the flow variable (Temperature)
// Boundary conditions for T (Initialization)
int step0_start = clock();
for (int i = 0; i < nx + 1; i++)
{
for (int j = 0; j < ny + 1; j++)
{
T[i * wT + j] = T_0 / T_amb;
} // end for j
} // end for i
//......................................................................................
int step0_end = clock();
stepTimingAccumulator["Step 0, Initializing Temperature"] += step0_end - step0_start;
//......................................................................................
// Marching in Time - Outermost loop
while (t <= tf)
{
iter = 0;
int stepi1_start = clock();
//........................................................................................
// Step i1 - it can be parallelized
// boundary conditions for u velocity
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny + 1; j++)
{
if (i == 0 && j > 0 && j < ny)
{
if (j*dy < 2.0)
{
u[i * wu + j] = 0; // left wall - Final
}
else
{
u[i * wu + j] = u_wind; // left inlet - Final
}
}
else if (i == nx - 1 && j>0 && j < ny)
{
if (j*dy < 2.0)
{
u[i * wu + j] = 0; // Right wall has 0 horizontal velocity - Final
}
else
{
u[i * wu + j] = u[(i - 1) * wu + j]; // right outlet - no velocity change
}
}
else if (j == 0)
{
u[i * wu + j] = -u[i * wu + j + 1]; // bottom ghost - Final
}
else if (j == ny)
{
u[i * wu + j] = u[i * wu + j - 1]; // upper ghost - Final
}
} // end for j
} // end for i
int stepi1_end = clock();
stepTimingAccumulator["Step i1 - Set Horizontal Velocity Boundary Conditions"] += stepi1_end - stepi1_start;
//...............................................................................................
//.........................................................................................
// Step i2 - it can be parallelized
// boundary conditions for v velocity
int stepi2_start = clock();
for (int i = 0; i < nx + 1; i++)
{
for (int j = 0; j < ny; j++)
{
if (j == 0 && i > 0 && i < nx)
{
v[i * wv + j] = 0; // bottom wall - Final
}
else if (j == ny - 1 && i > 0 && i < nx)
{
v[i * wv + j] = v[i * wv + j - 1]; // upper wall - Final
}
else if (i == 0)
{
v[i * wv + j] = -v[(i + 1) * wv + j]; // left ghost (Left Wall and inlet has 0 vertical velocity) - Final
}
else if (i == nx)
{
if (j*dy < 2.0)
{
v[i * wv + j] = -v[(i - 1) * wv + j]; // right ghost (Right wall has 0 vertical velocity) - Final
}
else
{
v[i * wv + j] = v[(i - 1) * wv + j]; // right outlet - no velocity gradient
}
}
} // end for j
} // end for I
int stepi2_end = clock();
stepTimingAccumulator["Step i2 - Set Vertical Velocity Boundary Conditions"] += stepi2_end - stepi2_start;
//...............................................................................................
//...............................................................................................
int step1_start = clock();
//.........................................................................................
// Step 1 - it can be parallelized - Solve for intermediate velocity values
// u - us - vh - a
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny; j++)
{
float vh = 1.0 / 4.0*(v[i * wv + j] + v[(i + 1) * wv + j] + v[i * wv + j - 1] + v[(i + 1) * wv + j - 1]); // v hat
float a = u[i * wu + j] * 1.0 / (2.0*dx)*(u[(i + 1) * wu + j] - u[(i - 1) * wu + j]) + vh*1.0 / (2.0*dy)*(u[i * wu + j + 1] - u[i * wu + j - 1]); // a
us[i * wu + j] = dt / Re*(1.0 / pow(dx, 2.0)*(u[(i + 1) * wu + j] - 2.0*u[i * wu + j] + u[(i - 1) * wu + j]) + 1.0 / pow(dy, 2.0)*(u[i * wu + j + 1] - 2.0*u[i * wu + j] + u[i * wu + j - 1])) - a*dt + u[i * wu + j]; // u star
} // end for j
} // end for i
//..........................................................................................
// Step 1 - it can be parallelized
// v - vs - uh - b
for (int i = 1; i < nx; i++)
{
for (int j = 1; j < ny - 1; j++)
{
float uh = 1.0 / 4.0*(u[i * wu + j] + u[i * wu + j + 1] + u[(i - 1) * wu + j] + u[(i - 1) * wu + j + 1]);
float b = uh*1.0 / (2.0*dx)*(v[(i + 1) * wv + j] - v[(i - 1) * wv + j]) + v[i * wv + j] * 1.0 / (2.0*dy)*(v[i * wv + j + 1] - v[i * wv + j - 1]); // b
vs[i * wv + j] = dt / Re*(1.0 / pow(dx, 2.0)*(v[(i + 1) * wv + j] - 2.0*v[i * wv + j] + v[(i - 1) * wv + j]) + 1.0 / pow(dy, 2.0)*(v[i * wv + j + 1] - 2.0*v[i * wv + j] + v[i * wv + j - 1])) + dt / pow(Fr, 2.0)*(0.5*(T[i * wT + j] + T[i * wT + j - 1]) - 1) / (0.5*(T[i * wT + j] + T[i * wT + j - 1])) - b*dt + v[i * wv + j]; // v
} // end for j
} // end for i
//...........................................................................................
// vs and us on Boundary conditions
for (int i = 0; i < nx; i++)
{
us[i * wu + 0] = -us[i * wu + 1]; // bottom ghost - Final
} // end for j
//...........................................................................................
for (int j = 0; j < ny + 1; j++)
{
if (j*dy < 2.0)
{
us[0 * wu + j] = 0; // left wall - FInal
us[(nx - 1) * wu + j] = 0; // right wall - Final
}
else
{
us[0 * wu + j] = u_wind; // left inlet - Final
}
}
//...........................................................................................
for (int j = 0; j < ny; j++)
{
vs[0 * wv + j] = -vs[1 * wv + j]; // left ghost (Both wall and inlet have 0 vs) - Final
if (j*dy < 2.0)
{
vs[nx * wv + j] = -vs[(nx - 1) * wv + j]; // right ghost (Only the right wall - Final
}
else
{
vs[nx * wv + j] = vs[(nx - 1) * wv + j]; // right outlet - no flux
}
}
//............................................................................................
for (int i = 0; i < nx + 1; i++)
{
vs[i * wv + 0] = 0; // Bottom wall - Final
} // end for i
//............................................................................................
int step1_end = clock();
stepTimingAccumulator["Step 1 - Solve for intermediate velocities"] += step1_end - step1_start;
//...............................................................................................
// Step 2 - Parallel GPU version
// Poisson equation for pressure
int step2_start = clock();
// Cuda set up
int p_xlength = nx;
int p_ylength = ny;
float *ptr_us = thrust::raw_pointer_cast(&us_d[0]);
float *ptr_vs = thrust::raw_pointer_cast(&vs_d[0]);
float *ptr_p = thrust::raw_pointer_cast(&p_d[0]);
float *ptr_p_old = thrust::raw_pointer_cast(&p_old[0]);
float *ptr_abs = thrust::raw_pointer_cast(&abs_d[0]);
float *ptr_p_ref = thrust::raw_pointer_cast(&p_ref[0]);
float error = 1.0; iter = 0;
// float diffp = 0;
us_d = us;
vs_d = vs;
cout << t << endl;
// Begin Jacobi loop
while (error > eps){
gpuErrchk( cudaPeekAtLastError() );
//error = 0.0;
// p_d = p;
p_old = p_d;
// SOR pressure solver
PressureSolve<<< dim3( (ny+1)/BLOCK_SIZE + 1, (nx+1)/BLOCK_SIZE + 1, 1) , dim3(BLOCK_SIZE,BLOCK_SIZE,1)>>>(ptr_p, ptr_p_old, ptr_abs, ptr_us, ptr_vs, p_xlength, p_ylength, wp, wu, wv, dx, dy, dt);
cudaDeviceSynchronize();
// p = p_d;
p_ref = p_d;
error = thrust::reduce(abs_d.begin(), abs_d.end());
/* for (int i = 1; i < nx; i++)
{
for (int j = 1; j < ny; j++)
{
diffp = pow((p[i * wp + j] - p_old[i * wp + j]), 2.0);
error = error + diffp;
} // end for j
} // end for i
*/
/* for(int i = 0; i < nx + 1; ++i)
{
for(int j = 0; j < ny + 1; ++j)
{
file_p_before << p[i * wp + j] << "\t";
}
file_p_before << endl;
}
*/
// Apply boundary conditions
PressureBC<<< dim3( (ny+1)/BLOCK_SIZE + 1, (nx+1)/BLOCK_SIZE + 1, 1) , dim3(BLOCK_SIZE,BLOCK_SIZE,1)>>>(ptr_p, ptr_p_ref, nx, ny, dy, wp);
cudaDeviceSynchronize();
// p = p_d;
//file_p_after << p.size() << endl;
/* for(int i = 0; i < nx + 1; ++i)
{
for(int j = 0; j < ny + 1; ++j)
{
file_p_after << p[i * wp + j] << "\t";
}
file_p_after << endl;
} */
error = pow(error, 0.5);
iter = iter + 1;
if (iter > maxiter){
break;
}
} // end while eps
p = p_d;
/*
break;
error = pow(error, 0.5);
iter = iter + 1;
if (iter == maxiter){
break;
}
} // end while eps
*/
int step2_end = clock();
stepTimingAccumulator["Step 2 - Solve for pressure until tolerance or max iterations"] += step2_end - step2_start;
//.................................................................................................
// Step 3 - It can be parallelized
// velocity update - projection method
int step3_start = clock();
// u
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny; j++)
{
uold[i * wu + j] = u[i * wu + j];
u[i * wu + j] = us[i * wu + j] - dt / dx*(p[(i + 1) * wp + j] - p[i * wp + j]);
} // end for j
} // end for i
//................................................
// v
for (int i = 1; i < nx; i++)
{
for (int j = 1; j < ny - 1; j++)
{
vold[i * wv + j] = v[i * wv + j];
v[i * wv + j] = vs[i * wv + j] - dt / dy*(p[i * wp + j + 1] - p[i * wp + j]);
} // end for j
} // end for i
int step3_end = clock();
stepTimingAccumulator["Step 3 - Velocity Update"] += step3_end - step3_start;
//...............................................................................................
//...............................................................................................
// Step 4 - It can be parallelized
// Solving for temperature
int step4_start = clock();
thrust::device_vector<float> d_T = T;
thrust::device_vector<float> d_Told = Told;
thrust::device_vector<float> d_u = u;
thrust::device_vector<float> d_v = v;
int gridsize_x = nx/BLOCK_SIZE + 1;
int gridsize_y = ny/BLOCK_SIZE + 1;
dim3 dimgrid(gridsize_x, gridsize_y, 1); // The grid has #gridsize blocks in x and 1 block in y and 1 block in z direction
dim3 dimblock(BLOCK_SIZE, BLOCK_SIZE, 1);
float *ptr_u = thrust::raw_pointer_cast(&d_u[0]);
float *ptr_v = thrust::raw_pointer_cast(&d_v[0]);
float *ptr_T = thrust::raw_pointer_cast(&d_T[0]);
float *ptr_Told = thrust::raw_pointer_cast(&d_Told[0]);
Temperature_solver<<<dimgrid, dimblock>>>(nx, ny, wu, wv, wT, dx, dy, dt, Re, Pr, ptr_u, ptr_v, ptr_Told, ptr_T);
thrust::copy(d_Told.begin(), d_Told.end(), Told.begin());
thrust::copy(d_T.begin(), d_T.end(), T.begin());
int step4_end = clock();
stepTimingAccumulator["Step 4 - Solving for temperature"] += step4_end - step4_start;
//................................................................................................
//...............................................................................................
// Step i3 - Initializing boundary conditions for temperature
// boundary conditions for Temperature
int stepi3_start = clock();
for (int i = 0; i < nx + 1; i++)
{
for (int j = 0; j < ny + 1; j++)
{
if (j == 0)
{
T[i * wT + j] = T[i * wT + j + 1]; // bottom wall - Insulated - no flux - Final
}
else if (j == ny)
{
T[i * wT + j] = 2.0*(T_0) / T_amb - T[i * wT + j - 1]; // upper boundary - lid with ambient temperature (as air) - Final
}
else if (i == 0)
{
if (j*dy < 2.0)
{
T[i * wT + j] = 2.0*T_L / T_amb - T[(i + 1) * wT + j]; // left wall at T_L - Constant Temperature - Final
}
else
{
T[i * wT + j] = 2.0*T_0 / T_amb - T[(i + 1) * wT + j]; // left inlet at T_0 (initial temperature) - Final
}
}
else if (i == nx)
{
if (j*dy < 2.0)
{
T[i * wT + j] = 2.0*T_R / T_amb - T[(i - 1) * wT + j]; // right wall at T_R - Final
}
}
} // end for j
} // end for i
int stepi3_end = clock();
stepTimingAccumulator["Step i3 - Initializing boundary conditions for temperature"] += stepi3_end - stepi3_start;
//...............................................................................................
//...............................................................................................
// Step 5 - Checking if solution reached steady state
// Checking the steady state condition
int step5_start = clock();
float TV, diffv; TV = 0;
for (int i = 1; i < nx - 1; i++)
{
for (int j = 1; j < ny - 2; j++)
{
diffv = v[i * wv + j] - vold[i * wv + j];
TV = TV + pow(pow(diffv, 2), 0.5);
} // end for i
} // end for j
TV = TV / ((nx - 1)*(ny - 2));
if (TV < st && error < eps)
{
cout << "Steady state time = " << t << " (s) " << endl;
break;
}
counter = counter + 1;
if (fmod(counter, 10) == 0 || counter == 1)
{
//cout << "" << endl;
//cout << "Column" << setw(30) << "time(s)" << setw(30) << "Iterations on Pressure" << setw(30) << "Pressure Residual" << setw(30) << "Total Variance" << endl;
} // end if
int step5_end = clock();
stepTimingAccumulator["Step 5 - Check for steady state"] += step5_end - step5_start;
//...............................................................................................
//cout << column << setw(30) << t << setw(30) << iter << setw(30) << error << setw(30) << TV << endl;
g << column << setw(30) << t << setw(30) << iter << setw(30) << error << setw(30) << TV << endl;
t = t + dt;
column = column + 1;
} // end while time
//........................................................................................................
// Step 6
// Co-locate the staggered grid points
int step6_start = clock();
for (int i = 0; i < nx; i++)
{
for (int j = 0; j < ny; j++)
{
vc[i * wc + j] = 1.0 / 2.0*(v[(i + 1) * wv + j] + v[i * wv + j]);
pc[i * wc + j] = 1.0 / 4.0*(p[i * wp + j] + p[(i + 1) * wp + j] + p[i * wp + j + 1] + p[(i + 1) * wp + j + 1]);
uc[i * wc + j] = 1.0 / 2.0*(u[i*wu + j] + u[i * wu + j + 1]);
om[i * wc + j] = 1.0 / dx*(v[(i + 1) * wv + j] - v[i * wv + j]) - 1.0 / dy*(u[i * wu + j + 1] - u[i * wu + j]);
Tc[i * wc + j] = 1.0 / 4.0*(T[i * wT + j] + T[(i + 1) * wT + j] + T[i * wT + j + 1] + T[(i + 1) * wT + j + 1]);
} // end for j
} // end for i
//........................................................................................................
int step6_end = clock();
stepTimingAccumulator["Step 6 - Co-locate staggered grid points"] += step6_end - step6_start;
// Steady state results
for (int j = 0; j < ny; j++)
{
for (int i = 0; i < nx; i++)
{
f << setw(15) << t - dt << setw(15) << i*dx << setw(15) << j*dy << setw(15) << uc[i * wc + j] << setw(15) << vc[i * wc + j] << setw(15) << pc[i * wc + j] << setw(15) << Tc[i * ny + j] * T_amb - 273.15 << setw(15) << om[i * wc + j] << endl;
} // end for i
} // end for j
//.........................................................................................................
float end_clock = clock();
cout << "CPU time = " << (end_clock - start_clock) / CLOCKS_PER_SEC << " (s)" << endl;
//cout << "Re = " << Re << endl;
//cout << "Fr = " << Fr << endl;
for (auto it = stepTimingAccumulator.begin(); it != stepTimingAccumulator.end(); it++)
{
float seconds = (float)it->second / CLOCKS_PER_SEC;
std::cout << it->first << "\t" << seconds << endl;
}
}
catch(thrust::system_error e)
{
std::cerr << e.what() << std::endl;
}
return 0;
} // end main
|
f5501f760ef05d119ce835637d3adc142ea2aed4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// You can use any other block size you wish.
#define BLOCK_SIZE 512
#define BLOCK_DUB 1024
//Works for power of 2 elements
#define DEFAULT_NUM_ELEMENTS 1048576
#define MAX_RAND 2
typedef float REAL;
__global__ void prescan(REAL *odata, REAL *idata, int num)
{
volatile __shared__ REAL temp[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = 1;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
//Kind of ugly. Load to shared memory
if (top < DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
temp[2*ti+1] = idata[top];
} else {
temp[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
} else {
temp[2*ti] = 0;
}
}
//Perform the actual reduction
for (int i = BLOCK_SIZE; i>0; i>>=1)
{
__syncthreads();
if (ti<i)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
temp[bi] += temp[ai];
}
ofs <<= 1;
}
__syncthreads();
//Kind of ugly. Write back to global.
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
idata[top] = temp[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
}
}
}
__global__ void downsweep(REAL *odata, REAL *idata, int num, int last, int first)
{
volatile __shared__ REAL tempd[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
//int ofs = BLOCK_DUB;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
//Kind of ugly. Load to shared memory
if (top < DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
tempd[2*ti+1] = idata[top];
} else {
tempd[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
} else {
tempd[2*ti] = 0;
}
}
//Set the last item to 0
if (first == 1)
{
if (bid == gridDim.x-1 && ti == 0)
{
tempd[num-1] = 0;
}
}
int ofs = BLOCK_DUB;
//Perform the actual reduction.
int cap = num;
if (num > BLOCK_DUB) cap = BLOCK_DUB;
for (int j = 1; j<cap; j<<=1)
{
ofs >>= 1;
__syncthreads();
if (ti < j)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
REAL temp2 = tempd[ai];
tempd[ai] = tempd[bi];
tempd[bi] += temp2;
}
}
__syncthreads();
//Kind of ugly. Write back to global.
if (last == 1) {
if (top < DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
odata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
}
}
} else {
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
idata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
}
}
}
}
// **===-------- Modify the body of this function -----------===**
// You may need to make multiple kernel calls.
void prescanArray(REAL *outArray, REAL *inArray, int numElements)
{
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = BLOCK_SIZE;
int nestElements = numElements;
int lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
// Recursive implementation to compute the reduction
hipLaunchKernelGGL(( prescan) , dim3(dimGrid),dim3(threads), 0, 0, outArray, inArray, nestElements);
lastElements = nestElements;
nestElements = blocks;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx, blocksy);
}
//Now move on to the downsweep
nestElements = lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx,blocksy);
int first = 1;
while(nestElements <= DEFAULT_NUM_ELEMENTS)
{
//printf("%d \n",nestElements);
// Recursive implementation to compute the downsweep
if (nestElements == DEFAULT_NUM_ELEMENTS) {
hipLaunchKernelGGL(( downsweep) , dim3(dimGrid2),dim3(threads), 0, 0, outArray, inArray, nestElements, 1, first);
nestElements = DEFAULT_NUM_ELEMENTS+1; //fix
} else {
hipLaunchKernelGGL(( downsweep) , dim3(dimGrid2),dim3(threads), 0, 0, outArray, inArray, nestElements, 0, first);
nestElements = BLOCK_DUB*nestElements; //fix
}
first = 0;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx, blocksy);
}
//downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements);
}
// **===-----------------------------------------------------------===**
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const REAL* reference, const REAL* data,
const unsigned int len);
extern "C"
void computeGold( REAL* reference, REAL* idata, const unsigned int len);
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
float device_time;
float host_time;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( REAL) * num_elements;
REAL* h_data = (REAL*) malloc( mem_size);
switch(argc-1)
{
case 0:
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
default:
num_elements = atoi(argv[1]);
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
hipEvent_t time_start;
hipEvent_t time_end;
hipEventCreate(&time_start);
hipEventCreate(&time_end);
// compute reference solution
REAL* reference = (REAL*) malloc( mem_size);
// cutStartTimer(timer);
hipEventRecord(time_start, 0);
computeGold( reference, h_data, num_elements);
hipEventRecord(time_end, 0);
hipEventSynchronize(time_end);
hipEventElapsedTime(&host_time, time_start, time_end);
// cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", host_time);
// allocate device memory input and output arrays
REAL* d_idata = NULL;
REAL* d_odata = NULL;
hipMalloc( (void**) &d_idata, mem_size);
hipMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice);
// initialize all the other device arrays to be safe
hipMemcpy( d_odata, h_data, mem_size, hipMemcpyHostToDevice);
// **===-------- Allocate data structure here -----------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
//prescanArray(d_odata, d_idata, 16);
// Run the prescan
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// cutStartTimer(timer);
hipEventRecord(time_start, 0);
// **===-------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, num_elements);
// **===-----------------------------------------------------------===**
hipDeviceSynchronize();
hipEventRecord(time_end, 0);
hipEventSynchronize(time_end);
hipEventElapsedTime(&device_time, time_start, time_end);
hipEventDestroy(time_start);
hipEventDestroy(time_end);
// cutStopTimer(timer);
printf("CUDA Processing time: %g (ms)\n", device_time);
// device_time = cutGetTimerValue(timer);
// printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
hipMemcpy( h_data, d_odata, sizeof(REAL) * num_elements,
hipMemcpyDeviceToHost);
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7);
printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED");
// cleanup memory
free( h_data);
free( reference);
hipFree( d_odata);
hipFree( d_idata);
}
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) {
int i;
int diff_count = 0;
for (i = 0; i < num_elements; i++) {
REAL diff = fabs(reference[i] - h_data[i]);
REAL denominator = 1.f;
if (denominator < fabs(reference[i])) {
denominator = fabs(reference[i]);
}
if (!(diff / denominator < err)) {
diff_count ++;
}
}
if (diff_count > 0) {
printf("Number of difference: %d\n", diff_count);
return 0;
} else {
return 1;
}
}
| f5501f760ef05d119ce835637d3adc142ea2aed4.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// You can use any other block size you wish.
#define BLOCK_SIZE 512
#define BLOCK_DUB 1024
//Works for power of 2 elements
#define DEFAULT_NUM_ELEMENTS 1048576
#define MAX_RAND 2
typedef float REAL;
__global__ void prescan(REAL *odata, REAL *idata, int num)
{
volatile __shared__ REAL temp[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
int ofs = 1;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
//Kind of ugly. Load to shared memory
if (top < DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
temp[2*ti+1] = idata[top];
} else {
temp[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
temp[2*ti] = idata[2*index*mult+mult-1];
} else {
temp[2*ti] = 0;
}
}
//Perform the actual reduction
for (int i = BLOCK_SIZE; i>0; i>>=1)
{
__syncthreads();
if (ti<i)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
temp[bi] += temp[ai];
}
ofs <<= 1;
}
__syncthreads();
//Kind of ugly. Write back to global.
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
idata[top] = temp[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = temp[2*ti];
}
}
}
__global__ void downsweep(REAL *odata, REAL *idata, int num, int last, int first)
{
volatile __shared__ REAL tempd[BLOCK_DUB];
//Set up some convenient variables
int ti = threadIdx.x;
int bid = blockIdx.x + blockIdx.y*gridDim.x;
int index = bid*blockDim.x + ti;
//int ofs = BLOCK_DUB;
int mult = DEFAULT_NUM_ELEMENTS/num;
int top = mult*(2*(index+1))-1;
//Kind of ugly. Load to shared memory
if (top < DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
tempd[2*ti+1] = idata[top];
} else {
tempd[2*ti+1] = 0;
if (top == DEFAULT_NUM_ELEMENTS)
{
tempd[2*ti] = idata[2*index*mult+mult-1];
} else {
tempd[2*ti] = 0;
}
}
//Set the last item to 0
if (first == 1)
{
if (bid == gridDim.x-1 && ti == 0)
{
tempd[num-1] = 0;
}
}
int ofs = BLOCK_DUB;
//Perform the actual reduction.
int cap = num;
if (num > BLOCK_DUB) cap = BLOCK_DUB;
for (int j = 1; j<cap; j<<=1)
{
ofs >>= 1;
__syncthreads();
if (ti < j)
{
int ai = ofs*(2*ti+1)-1;
int bi = ofs*(2*ti+2)-1;
REAL temp2 = tempd[ai];
tempd[ai] = tempd[bi];
tempd[bi] += temp2;
}
}
__syncthreads();
//Kind of ugly. Write back to global.
if (last == 1) {
if (top < DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
odata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
odata[2*index*mult+mult-1] = tempd[2*ti];
}
}
} else {
if (top < DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
idata[top] = tempd[2*ti+1];
} else {
if (top == DEFAULT_NUM_ELEMENTS)
{
idata[2*index*mult+mult-1] = tempd[2*ti];
}
}
}
}
// **===-------- Modify the body of this function -----------===**
// You may need to make multiple kernel calls.
void prescanArray(REAL *outArray, REAL *inArray, int numElements)
{
//Use kernel to compute the reduction
int blocksx, blocksy, blocks;
int threads = BLOCK_SIZE;
int nestElements = numElements;
int lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx,blocksy);
while(nestElements > 1)
{
// Recursive implementation to compute the reduction
prescan <<<dimGrid,threads>>> (outArray, inArray, nestElements);
lastElements = nestElements;
nestElements = blocks;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid(blocksx, blocksy);
}
//Now move on to the downsweep
nestElements = lastElements;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx,blocksy);
int first = 1;
while(nestElements <= DEFAULT_NUM_ELEMENTS)
{
//printf("%d \n",nestElements);
// Recursive implementation to compute the downsweep
if (nestElements == DEFAULT_NUM_ELEMENTS) {
downsweep <<<dimGrid2,threads>>> (outArray, inArray, nestElements, 1, first);
nestElements = DEFAULT_NUM_ELEMENTS+1; //fix
} else {
downsweep <<<dimGrid2,threads>>> (outArray, inArray, nestElements, 0, first);
nestElements = BLOCK_DUB*nestElements; //fix
}
first = 0;
blocksx = (nestElements+BLOCK_DUB-1)/(threads*2);
blocks = blocksx;
blocksy = 1;
if (blocksx > 65535) {
blocksy = (blocksx+65534)/65535;
blocksx = 65535;
}
dim3 dimGrid2(blocksx, blocksy);
}
//downsweep <<<1,BLOCK_SIZE>>>(outArray, inArray, numElements);
}
// **===-----------------------------------------------------------===**
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
extern "C"
unsigned int compare( const REAL* reference, const REAL* data,
const unsigned int len);
extern "C"
void computeGold( REAL* reference, REAL* idata, const unsigned int len);
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
float device_time;
float host_time;
int num_elements = 0; // Must support large, non-power-of-2 arrays
// allocate host memory to store the input data
unsigned int mem_size = sizeof( REAL) * num_elements;
REAL* h_data = (REAL*) malloc( mem_size);
switch(argc-1)
{
case 0:
num_elements = DEFAULT_NUM_ELEMENTS;
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
default:
num_elements = atoi(argv[1]);
// allocate host memory to store the input data
mem_size = sizeof( REAL) * num_elements;
h_data = (REAL*) malloc( mem_size);
// initialize the input data on the host
for( unsigned int i = 0; i < num_elements; ++i)
{
// h_data[i] = 1.0f;
h_data[i] = (int)(rand() % MAX_RAND);
}
break;
}
cudaEvent_t time_start;
cudaEvent_t time_end;
cudaEventCreate(&time_start);
cudaEventCreate(&time_end);
// compute reference solution
REAL* reference = (REAL*) malloc( mem_size);
// cutStartTimer(timer);
cudaEventRecord(time_start, 0);
computeGold( reference, h_data, num_elements);
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&host_time, time_start, time_end);
// cutStopTimer(timer);
printf("\n\n**===-------------------------------------------------===**\n");
printf("Processing %d elements...\n", num_elements);
printf("Host CPU Processing time: %f (ms)\n", host_time);
// allocate device memory input and output arrays
REAL* d_idata = NULL;
REAL* d_odata = NULL;
cudaMalloc( (void**) &d_idata, mem_size);
cudaMalloc( (void**) &d_odata, mem_size);
// copy host memory to device input array
cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice);
// initialize all the other device arrays to be safe
cudaMemcpy( d_odata, h_data, mem_size, cudaMemcpyHostToDevice);
// **===-------- Allocate data structure here -----------===**
// preallocBlockSums(num_elements);
// **===-----------------------------------------------------------===**
// Run just once to remove startup overhead for more accurate performance
// measurement
//prescanArray(d_odata, d_idata, 16);
// Run the prescan
// CUT_SAFE_CALL(cutCreateTimer(&timer));
// cutStartTimer(timer);
cudaEventRecord(time_start, 0);
// **===-------- Modify the body of this function -----------===**
prescanArray(d_odata, d_idata, num_elements);
// **===-----------------------------------------------------------===**
cudaThreadSynchronize();
cudaEventRecord(time_end, 0);
cudaEventSynchronize(time_end);
cudaEventElapsedTime(&device_time, time_start, time_end);
cudaEventDestroy(time_start);
cudaEventDestroy(time_end);
// cutStopTimer(timer);
printf("CUDA Processing time: %g (ms)\n", device_time);
// device_time = cutGetTimerValue(timer);
// printf("Speedup: %fX\n", host_time/device_time);
// **===-------- Deallocate data structure here -----------===**
// deallocBlockSums();
// **===-----------------------------------------------------------===**
// copy result from device to host
cudaMemcpy( h_data, d_odata, sizeof(REAL) * num_elements,
cudaMemcpyDeviceToHost);
// Check if the result is equivalent to the expected soluion
unsigned int result_regtest = cutComparef( reference, h_data, num_elements, 1e-7);
printf( "Test %s\n", (0 == result_regtest) ? "FAILED" : "PASSED");
// cleanup memory
free( h_data);
free( reference);
cudaFree( d_odata);
cudaFree( d_idata);
}
unsigned int cutComparef( REAL *reference, REAL *h_data, int num_elements, REAL err) {
int i;
int diff_count = 0;
for (i = 0; i < num_elements; i++) {
REAL diff = fabs(reference[i] - h_data[i]);
REAL denominator = 1.f;
if (denominator < fabs(reference[i])) {
denominator = fabs(reference[i]);
}
if (!(diff / denominator < err)) {
diff_count ++;
}
}
if (diff_count > 0) {
printf("Number of difference: %d\n", diff_count);
return 0;
} else {
return 1;
}
}
|
a7295fdb33996acfa5744d5ed945da7f88df429d.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <ctime>
#include <chrono>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb/stb_image_write.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <Color.cuh>
#include <Ray.cuh>
#include <Camera.cuh>
#include <HittableList.cuh>
#include <Material.cuh>
#include <AARect.cuh>
#include <helperUtils.cuh>
#include <hiprand/hiprand_kernel.h>
using namespace TinyRT;
constexpr int objNum = 6;
__device__ void cornellBox(Hittable** hittablePtrList, Texture** extraTexturePtrList) {
size_t objIdx = 0;
size_t textureIdx = 0;
Texture* redTex = new SolidColor(0.65f, 0.05f, 0.05f);
extraTexturePtrList[textureIdx++] = redTex;
Texture* whiteTex = new SolidColor(0.73f, 0.73f, 0.73f);
extraTexturePtrList[textureIdx++] = whiteTex;
Texture* greenTex = new SolidColor(0.12f, 0.45f, 0.15f);
extraTexturePtrList[textureIdx++] = greenTex;
Texture* lightTex = new SolidColor(15.0f, 15.0f, 15.0f);
extraTexturePtrList[textureIdx] = lightTex;
hittablePtrList[objIdx++] = new YZRect(0.0f, 555.0f, 0.0f, 555.0f, 555.0f, new Lambertian(greenTex));
hittablePtrList[objIdx++] = new YZRect(0.0f, 555.0f, 0.0f, 555.0f, 0.0f, new Lambertian(redTex));
hittablePtrList[objIdx++] = new XZRect(213.0f, 343.0f, 227.0f, 332.0f, 554.0f, new DiffuseLight(lightTex));
hittablePtrList[objIdx++] = new XZRect(0.0f, 555.0f, 0.0f, 555.0f, 0.0f, new Lambertian(whiteTex));
hittablePtrList[objIdx++] = new XZRect(0.0f, 555.0f, 0.0f, 555.0f, 555.0f, new Lambertian(whiteTex));
hittablePtrList[objIdx] = new XYRect(0.0f, 555.0f, 0.0f, 555.0f, 555.0f, new Lambertian(whiteTex));
}
__device__ Color rayColor(const Ray& r, const Color& background, Hittable** hittablePtr, const int maxDepth, hiprandState_t* const randStatePtr) {
Ray curRay = r;
Vec3 curAttenuation(1.0f, 1.0f, 1.0f);
Color curEmitted(0.0f, 0.0f, 0.0f);
for (size_t i = 0; i < maxDepth; ++i) {
HitRecord rec;
if ((*hittablePtr)->hit(curRay, 0.001f, M_FLOAT_INFINITY, rec)) {
Ray scattered;
Vec3 attenuation;
Color emitted = rec.matPtr->emitted(rec.u, rec.v, rec.point);
if (rec.matPtr->scatter(curRay, rec, attenuation, scattered, randStatePtr)) {
curRay = scattered;
curEmitted += curAttenuation * emitted;
curAttenuation *= attenuation;
} else {
return curEmitted + curAttenuation * emitted;
}
} else {
return curEmitted + curAttenuation * background;
}
}
// exceed max depth
return { 0.0f, 0.0f, 0.0f };
}
__global__ void renderInit(const int imageWidth, const int imageHeight, hiprandState_t* const randStateList, unsigned int seed) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if ((col >= imageWidth) || (row >= imageHeight))
return;
const int idx = row * imageWidth + col;
// init random numbers for anti-aliasing
// each thread gets its own special seed, fixed sequence number, fixed offset
hiprand_init(seed + idx, 0, 0, &randStateList[idx]);
}
__global__ void render(
Color* const pixelBuffer,
const int imageWidth,
const int imageHeight,
Camera** const camera,
const Color background,
hiprandState_t* const pixelRandStateList,
const int samplesPerPixel,
const int maxDepth,
Hittable** const hittablePtrList) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col >= imageWidth || row >= imageHeight)
return;
const int idx = row * imageWidth + col;
hiprandState_t randState = pixelRandStateList[idx];
Color pixelColor(0.0f, 0.0f, 0.0f);
for (size_t s = 0; s < samplesPerPixel; ++s) {
const auto u = (static_cast<float>(col) + randomFloat(&randState)) / static_cast<float>(imageWidth - 1);
const auto v = 1.0 - (static_cast<float>(row) + randomFloat(&randState)) / static_cast<float>(imageHeight - 1);
const Ray r = (*camera)->getRay(u, v, &randState);
pixelColor += rayColor(r, background, hittablePtrList, maxDepth, &randState);
}
pixelColor /= samplesPerPixel;
pixelColor.gammaCorrect();
pixelBuffer[idx] = pixelColor;
}
__global__ void createInit(hiprandState_t* const randStatePtr, unsigned int seed) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
// init a random number for sphere generating
hiprand_init(seed, 0, 0, randStatePtr);
}
}
__global__ void createWorld(Camera** camera, float aspectRatio, Hittable** hittablePtrList, Hittable** hittableWorldObjListPtr, Texture** extraTexturePtrList) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
const Point3 lookFrom(278.0, 278.0, -800.0f);
const Point3 lookAt(278.0f, 278.0f, 0.0f);
const Vec3 vUp(0.0f, 1.0f, 0.0f);
const float vFov = 40.0f;
const float aperture = 0.0f;
const float distToFocus = 10.0f;
const float time0 = 0.0f;
const float time1 = 0.0f;
*camera = new Camera(lookFrom, lookAt, vUp, vFov, aspectRatio, aperture, distToFocus, time0, time1);
cornellBox(hittablePtrList, extraTexturePtrList);
*hittableWorldObjListPtr = new HittableList(hittablePtrList, objNum);
}
}
__global__ void freeWorld(Camera** camera, Hittable** hittableList, size_t hittableNum, Hittable** hittableWorldObjList, Texture** extraTexturePtrList, size_t extraTexturePtrNum) {
delete* camera;
for (int i = 0; i < hittableNum; ++i) {
// delete random material instances
delete hittableList[i]->matPtr();
// delete object instances
delete hittableList[i];
}
for (int i = 0; i < extraTexturePtrNum; ++i) {
// delete extra texture instances
delete extraTexturePtrList[i];
}
delete* hittableWorldObjList;
}
int main() {
/* image config */
constexpr float aspectRatio = 1.0f;
constexpr int imageWidth = 800;
constexpr int imageHeight = static_cast<int>(imageWidth / aspectRatio);
constexpr int samplesPerPixel = 200;
constexpr int maxDepth = 50;
const Color background(0.0f, 0.0f, 0.0f);
/* image output file */
const std::string fileName("output.png");
/* thread block config */
constexpr int threadBlockWidth = 16;
constexpr int threadBlockHeight = 16;
// preparation
constexpr int channelNum = 3; // rgb
constexpr int pixelNum = imageWidth * imageHeight;
constexpr size_t pixelBufferBytes = pixelNum * sizeof(Color);
constexpr size_t randStateListBytes = pixelNum * sizeof(hiprandState_t);
// allocate memory for pixel buffer
const auto pixelBufferPtr = cudaManagedUniquePtr<Color>(pixelBufferBytes);
// allocate random state
const auto seed = static_cast<unsigned int>(std::chrono::system_clock::now().time_since_epoch().count());
const auto objRandStatePtr = cudaUniquePtr<hiprandState_t>(sizeof(hiprandState_t));
const auto pixelRandStateListPtr = cudaUniquePtr<hiprandState_t>(randStateListBytes);
// create world of hittable objects and the camera
const auto cameraPtr = cudaUniquePtr<Camera*>(sizeof(Camera*));
const auto hittablePtrList = cudaUniquePtr<Hittable*>(objNum * sizeof(Hittable*));
const auto hittableWorldObjListPtr = cudaUniquePtr<Hittable*>(sizeof(Hittable*));
constexpr size_t extraTexturePtrNum = 1;
const auto extraTexturePtrList = cudaUniquePtr<Texture*>(extraTexturePtrNum * sizeof(Texture*));
hipLaunchKernelGGL(( createInit), dim3(1), dim3(1), 0, 0, objRandStatePtr.get(), seed);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
hipLaunchKernelGGL(( createWorld), dim3(1), dim3(1), 0, 0, cameraPtr.get(), aspectRatio, hittablePtrList.get(), hittableWorldObjListPtr.get(), extraTexturePtrList.get());
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// start timer
const clock_t start = clock();
const dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1);
const dim3 threadDim(threadBlockWidth, threadBlockHeight);
// render init
hipLaunchKernelGGL(( renderInit), dim3(blockDim), dim3(threadDim), 0, 0, imageWidth, imageHeight, pixelRandStateListPtr.get(), seed);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// render the image into buffer
hipLaunchKernelGGL(( render), dim3(blockDim), dim3(threadDim), 0, 0,
pixelBufferPtr.get(),
imageWidth,
imageHeight,
cameraPtr.get(),
background,
pixelRandStateListPtr.get(),
samplesPerPixel,
maxDepth,
hittableWorldObjListPtr.get()
);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
// stop timer
const clock_t stop = clock();
// measure rendering time
const auto renderingMillisecond = stop - start;
// other image writer arguments
constexpr int imageSize = pixelNum * channelNum;
constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char);
const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]);
// store the pixel data into writing buffer as 8bit color
for (int pixelIdx = 0, dataIdx = 0; pixelIdx < pixelNum; ++pixelIdx) {
const Color color = pixelBufferPtr.get()[pixelIdx];
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.r8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.g8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.b8bit());
}
// print rendering time
std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl;
// write pixel data to output file
stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes);
// free world of hittable objects
hipLaunchKernelGGL(( freeWorld), dim3(1), dim3(1), 0, 0, cameraPtr.get(), hittablePtrList.get(), objNum, hittableWorldObjListPtr.get(), extraTexturePtrList.get(), extraTexturePtrNum);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
return 0;
} | a7295fdb33996acfa5744d5ed945da7f88df429d.cu | #include <iostream>
#include <fstream>
#include <ctime>
#include <chrono>
#define STB_IMAGE_WRITE_IMPLEMENTATION
#include <stb/stb_image_write.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <Color.cuh>
#include <Ray.cuh>
#include <Camera.cuh>
#include <HittableList.cuh>
#include <Material.cuh>
#include <AARect.cuh>
#include <helperUtils.cuh>
#include <curand_kernel.h>
using namespace TinyRT;
constexpr int objNum = 6;
__device__ void cornellBox(Hittable** hittablePtrList, Texture** extraTexturePtrList) {
size_t objIdx = 0;
size_t textureIdx = 0;
Texture* redTex = new SolidColor(0.65f, 0.05f, 0.05f);
extraTexturePtrList[textureIdx++] = redTex;
Texture* whiteTex = new SolidColor(0.73f, 0.73f, 0.73f);
extraTexturePtrList[textureIdx++] = whiteTex;
Texture* greenTex = new SolidColor(0.12f, 0.45f, 0.15f);
extraTexturePtrList[textureIdx++] = greenTex;
Texture* lightTex = new SolidColor(15.0f, 15.0f, 15.0f);
extraTexturePtrList[textureIdx] = lightTex;
hittablePtrList[objIdx++] = new YZRect(0.0f, 555.0f, 0.0f, 555.0f, 555.0f, new Lambertian(greenTex));
hittablePtrList[objIdx++] = new YZRect(0.0f, 555.0f, 0.0f, 555.0f, 0.0f, new Lambertian(redTex));
hittablePtrList[objIdx++] = new XZRect(213.0f, 343.0f, 227.0f, 332.0f, 554.0f, new DiffuseLight(lightTex));
hittablePtrList[objIdx++] = new XZRect(0.0f, 555.0f, 0.0f, 555.0f, 0.0f, new Lambertian(whiteTex));
hittablePtrList[objIdx++] = new XZRect(0.0f, 555.0f, 0.0f, 555.0f, 555.0f, new Lambertian(whiteTex));
hittablePtrList[objIdx] = new XYRect(0.0f, 555.0f, 0.0f, 555.0f, 555.0f, new Lambertian(whiteTex));
}
__device__ Color rayColor(const Ray& r, const Color& background, Hittable** hittablePtr, const int maxDepth, curandState* const randStatePtr) {
Ray curRay = r;
Vec3 curAttenuation(1.0f, 1.0f, 1.0f);
Color curEmitted(0.0f, 0.0f, 0.0f);
for (size_t i = 0; i < maxDepth; ++i) {
HitRecord rec;
if ((*hittablePtr)->hit(curRay, 0.001f, M_FLOAT_INFINITY, rec)) {
Ray scattered;
Vec3 attenuation;
Color emitted = rec.matPtr->emitted(rec.u, rec.v, rec.point);
if (rec.matPtr->scatter(curRay, rec, attenuation, scattered, randStatePtr)) {
curRay = scattered;
curEmitted += curAttenuation * emitted;
curAttenuation *= attenuation;
} else {
return curEmitted + curAttenuation * emitted;
}
} else {
return curEmitted + curAttenuation * background;
}
}
// exceed max depth
return { 0.0f, 0.0f, 0.0f };
}
__global__ void renderInit(const int imageWidth, const int imageHeight, curandState* const randStateList, unsigned int seed) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if ((col >= imageWidth) || (row >= imageHeight))
return;
const int idx = row * imageWidth + col;
// init random numbers for anti-aliasing
// each thread gets its own special seed, fixed sequence number, fixed offset
curand_init(seed + idx, 0, 0, &randStateList[idx]);
}
__global__ void render(
Color* const pixelBuffer,
const int imageWidth,
const int imageHeight,
Camera** const camera,
const Color background,
curandState* const pixelRandStateList,
const int samplesPerPixel,
const int maxDepth,
Hittable** const hittablePtrList) {
const int col = threadIdx.x + blockIdx.x * blockDim.x;
const int row = threadIdx.y + blockIdx.y * blockDim.y;
if (col >= imageWidth || row >= imageHeight)
return;
const int idx = row * imageWidth + col;
curandState randState = pixelRandStateList[idx];
Color pixelColor(0.0f, 0.0f, 0.0f);
for (size_t s = 0; s < samplesPerPixel; ++s) {
const auto u = (static_cast<float>(col) + randomFloat(&randState)) / static_cast<float>(imageWidth - 1);
const auto v = 1.0 - (static_cast<float>(row) + randomFloat(&randState)) / static_cast<float>(imageHeight - 1);
const Ray r = (*camera)->getRay(u, v, &randState);
pixelColor += rayColor(r, background, hittablePtrList, maxDepth, &randState);
}
pixelColor /= samplesPerPixel;
pixelColor.gammaCorrect();
pixelBuffer[idx] = pixelColor;
}
__global__ void createInit(curandState* const randStatePtr, unsigned int seed) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
// init a random number for sphere generating
curand_init(seed, 0, 0, randStatePtr);
}
}
__global__ void createWorld(Camera** camera, float aspectRatio, Hittable** hittablePtrList, Hittable** hittableWorldObjListPtr, Texture** extraTexturePtrList) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
const Point3 lookFrom(278.0, 278.0, -800.0f);
const Point3 lookAt(278.0f, 278.0f, 0.0f);
const Vec3 vUp(0.0f, 1.0f, 0.0f);
const float vFov = 40.0f;
const float aperture = 0.0f;
const float distToFocus = 10.0f;
const float time0 = 0.0f;
const float time1 = 0.0f;
*camera = new Camera(lookFrom, lookAt, vUp, vFov, aspectRatio, aperture, distToFocus, time0, time1);
cornellBox(hittablePtrList, extraTexturePtrList);
*hittableWorldObjListPtr = new HittableList(hittablePtrList, objNum);
}
}
__global__ void freeWorld(Camera** camera, Hittable** hittableList, size_t hittableNum, Hittable** hittableWorldObjList, Texture** extraTexturePtrList, size_t extraTexturePtrNum) {
delete* camera;
for (int i = 0; i < hittableNum; ++i) {
// delete random material instances
delete hittableList[i]->matPtr();
// delete object instances
delete hittableList[i];
}
for (int i = 0; i < extraTexturePtrNum; ++i) {
// delete extra texture instances
delete extraTexturePtrList[i];
}
delete* hittableWorldObjList;
}
int main() {
/* image config */
constexpr float aspectRatio = 1.0f;
constexpr int imageWidth = 800;
constexpr int imageHeight = static_cast<int>(imageWidth / aspectRatio);
constexpr int samplesPerPixel = 200;
constexpr int maxDepth = 50;
const Color background(0.0f, 0.0f, 0.0f);
/* image output file */
const std::string fileName("output.png");
/* thread block config */
constexpr int threadBlockWidth = 16;
constexpr int threadBlockHeight = 16;
// preparation
constexpr int channelNum = 3; // rgb
constexpr int pixelNum = imageWidth * imageHeight;
constexpr size_t pixelBufferBytes = pixelNum * sizeof(Color);
constexpr size_t randStateListBytes = pixelNum * sizeof(curandState);
// allocate memory for pixel buffer
const auto pixelBufferPtr = cudaManagedUniquePtr<Color>(pixelBufferBytes);
// allocate random state
const auto seed = static_cast<unsigned int>(std::chrono::system_clock::now().time_since_epoch().count());
const auto objRandStatePtr = cudaUniquePtr<curandState>(sizeof(curandState));
const auto pixelRandStateListPtr = cudaUniquePtr<curandState>(randStateListBytes);
// create world of hittable objects and the camera
const auto cameraPtr = cudaUniquePtr<Camera*>(sizeof(Camera*));
const auto hittablePtrList = cudaUniquePtr<Hittable*>(objNum * sizeof(Hittable*));
const auto hittableWorldObjListPtr = cudaUniquePtr<Hittable*>(sizeof(Hittable*));
constexpr size_t extraTexturePtrNum = 1;
const auto extraTexturePtrList = cudaUniquePtr<Texture*>(extraTexturePtrNum * sizeof(Texture*));
createInit<<<1, 1>>>(objRandStatePtr.get(), seed);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
createWorld<<<1, 1>>>(cameraPtr.get(), aspectRatio, hittablePtrList.get(), hittableWorldObjListPtr.get(), extraTexturePtrList.get());
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// start timer
const clock_t start = clock();
const dim3 blockDim(imageWidth / threadBlockWidth + 1, imageHeight / threadBlockHeight + 1);
const dim3 threadDim(threadBlockWidth, threadBlockHeight);
// render init
renderInit<<<blockDim, threadDim>>>(imageWidth, imageHeight, pixelRandStateListPtr.get(), seed);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// render the image into buffer
render<<<blockDim, threadDim>>>(
pixelBufferPtr.get(),
imageWidth,
imageHeight,
cameraPtr.get(),
background,
pixelRandStateListPtr.get(),
samplesPerPixel,
maxDepth,
hittableWorldObjListPtr.get()
);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
// stop timer
const clock_t stop = clock();
// measure rendering time
const auto renderingMillisecond = stop - start;
// other image writer arguments
constexpr int imageSize = pixelNum * channelNum;
constexpr size_t strideBytes = imageWidth * channelNum * sizeof(unsigned char);
const std::unique_ptr<unsigned char[]> pixelDataPtr(new unsigned char[imageSize]);
// store the pixel data into writing buffer as 8bit color
for (int pixelIdx = 0, dataIdx = 0; pixelIdx < pixelNum; ++pixelIdx) {
const Color color = pixelBufferPtr.get()[pixelIdx];
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.r8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.g8bit());
pixelDataPtr[dataIdx++] = static_cast<unsigned char>(color.b8bit());
}
// print rendering time
std::cout << "Complete!\n" << "The rendering took " << renderingMillisecond << "ms" << std::endl;
// write pixel data to output file
stbi_write_png(fileName.c_str(), imageWidth, imageHeight, channelNum, pixelDataPtr.get(), strideBytes);
// free world of hittable objects
freeWorld<<<1, 1>>>(cameraPtr.get(), hittablePtrList.get(), objNum, hittableWorldObjListPtr.get(), extraTexturePtrList.get(), extraTexturePtrNum);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
return 0;
} |
9fce8f738247571ac445372f34b82ff0ebc5afda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T* input_ptr, const int64_t elements, T* max_ptr,
T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T* input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T* max_ptr, T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T* max_ptr, T* min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T* max_ptr, const T* min_ptr, const int64_t elements,
const double quantization_bit, T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -nearbyint(min / s);
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointCambricon(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
// T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = floor(log2(weight_max)) - (quantization_bit - 2);
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
hipLaunchKernelGGL(( func), dim3(SMBlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock), shared_mem_size, \
(device_ctx_ptr)->cuda_stream(), __VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T* max_ptr = tmp_buffer->mut_dptr<T>();
T* min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
if (!per_layer_quantization) {
UNIMPLEMENTED() << " per-channel mode is not supported in cambricon scheme";
}
LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape& in_shape = ctx->InputShape("in", 0); \
tmp_buffer_size = in_shape.At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
| 9fce8f738247571ac445372f34b82ff0ebc5afda.cu | /*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/device/cuda_util.h"
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/cuda/atomic.cuh"
#include <float.h>
namespace oneflow {
namespace {
// NOTE(Liang Depeng): refer to
// https://stackoverflow.com/questions/17371275/implementing-max-reduce-in-cuda
template<typename T>
__global__ void ReduceMaxMinPerLayer(const T* input_ptr, const int64_t elements, T* max_ptr,
T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
while (gid < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[gid]);
shared_min[tid] = max(shared_min[tid], -input_ptr[gid]);
gid += gridDim.x * blockDim.x;
}
__syncthreads();
gid = (blockDim.x * blockIdx.x) + tid;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && gid < elements) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(max_ptr, shared_max[0]);
cuda::atomic::Max(min_ptr, shared_min[0]);
}
}
template<typename T>
__global__ void ReduceMaxMinPerChannel(const T* input_ptr, const int64_t elements,
const int64_t num_channels, const int64_t panel_size,
T* max_ptr, T* min_ptr) {
extern __shared__ unsigned char shared_max_min_memory[];
T* shared_max = reinterpret_cast<T*>(shared_max_min_memory);
T* shared_min = shared_max + blockDim.x;
int64_t cur_channel = blockIdx.x;
int64_t tid = threadIdx.x;
while (cur_channel < num_channels) {
shared_max[tid] = -FLT_MAX;
shared_min[tid] = -FLT_MAX;
int64_t index = (panel_size * cur_channel) + tid;
int64_t end = panel_size * (cur_channel + 1);
while (index < end && index < elements) {
shared_max[tid] = max(shared_max[tid], input_ptr[index]);
shared_min[tid] = max(shared_min[tid], -input_ptr[index]);
index += blockDim.x;
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) {
shared_max[tid] = max(shared_max[tid], shared_max[tid + s]);
shared_min[tid] = max(shared_min[tid], shared_min[tid + s]);
}
__syncthreads();
}
if (tid == 0) {
cuda::atomic::Max(&max_ptr[cur_channel], shared_max[0]);
cuda::atomic::Max(&min_ptr[cur_channel], shared_min[0]);
}
// __syncthreads();
cur_channel += gridDim.x;
}
}
template<typename T>
__global__ void InitMaxMin(const int64_t elements, T* max_ptr, T* min_ptr) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
max_ptr[gid] = -FLT_MAX;
min_ptr[gid] = -FLT_MAX;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointSymmetric(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = weight_max / denominator;
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointAffine(const T* max_ptr, const T* min_ptr, const int64_t elements,
const double quantization_bit, T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T denominator = static_cast<T>(pow(2.0, quantization_bit)) - 1;
T min = -min_ptr[gid];
T s = (max_ptr[gid] - min) / denominator;
scale[gid] = s;
zero_point[gid] = -nearbyint(min / s);
gid += gridDim.x * blockDim.x;
}
}
template<typename T>
__global__ void CalScaleZeroPointCambricon(const T* max_ptr, const T* min_ptr,
const int64_t elements, const double quantization_bit,
T* scale, T* zero_point) {
int64_t tid = threadIdx.x;
int64_t gid = (blockDim.x * blockIdx.x) + tid;
while (gid < elements) {
T weight_max = max(fabs(max_ptr[gid]), fabs(min_ptr[gid]));
// T denominator = static_cast<T>(pow(2.0, quantization_bit - 1)) - 1;
scale[gid] = floor(log2(weight_max)) - (quantization_bit - 2);
zero_point[gid] = 0;
gid += gridDim.x * blockDim.x;
}
}
} // namespace
#define LAUNCH_CUDA_KERNEL(func, device_ctx_ptr, thread_num, shared_mem_size, ...) \
func<<<SMBlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock, shared_mem_size, \
(device_ctx_ptr)->cuda_stream()>>>(__VA_ARGS__)
template<typename T>
class GpuMinMaxObserverKernel final : public user_op::OpKernel {
public:
GpuMinMaxObserverKernel() = default;
~GpuMinMaxObserverKernel() = default;
private:
using user_op::OpKernel::Compute;
void Compute(user_op::KernelComputeContext* ctx) const override {
const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0);
user_op::Tensor* scale = ctx->Tensor4ArgNameAndIndex("scale", 0);
user_op::Tensor* zero_point = ctx->Tensor4ArgNameAndIndex("zero_point", 0);
user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0);
const std::string quantization_scheme = ctx->Attr<std::string>("quantization_scheme");
const int32_t quantization_bit = ctx->Attr<int32_t>("quantization_bit");
const bool per_layer_quantization = ctx->Attr<bool>("per_layer_quantization");
const std::string quantization_formula = ctx->Attr<std::string>("quantization_formula");
const int64_t elements = in->shape().elem_cnt();
const int64_t channel = scale->shape().At(0);
const int64_t panel_size = elements / channel;
T* max_ptr = tmp_buffer->mut_dptr<T>();
T* min_ptr = max_ptr + channel;
LAUNCH_CUDA_KERNEL((InitMaxMin<T>), ctx->device_ctx(), channel, 0, channel, max_ptr, min_ptr);
if (per_layer_quantization) {
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerLayer<T>), ctx->device_ctx(), elements,
kCudaThreadsNumPerBlock * 2 * sizeof(T), in->dptr<T>(), elements, max_ptr,
min_ptr);
} else { // per-channel quantization
// NOTE(Liang Depeng): each block of threads will be responsible for
// computing the max and min values of the whole channel.
LAUNCH_CUDA_KERNEL((ReduceMaxMinPerChannel<T>), ctx->device_ctx(),
channel * kCudaThreadsNumPerBlock, kCudaThreadsNumPerBlock * 2 * sizeof(T),
in->dptr<T>(), elements, channel, panel_size, max_ptr, min_ptr);
}
if (quantization_formula == "google") {
if (quantization_scheme == "symmetric") {
LAUNCH_CUDA_KERNEL((CalScaleZeroPointSymmetric<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else { // quantization_scheme == "affine"
LAUNCH_CUDA_KERNEL((CalScaleZeroPointAffine<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
}
} else if (quantization_formula == "cambricon") {
if (!per_layer_quantization) {
UNIMPLEMENTED() << " per-channel mode is not supported in cambricon scheme";
}
LAUNCH_CUDA_KERNEL((CalScaleZeroPointCambricon<T>), ctx->device_ctx(), channel, 0, max_ptr,
min_ptr, channel, static_cast<double>(quantization_bit),
scale->mut_dptr<T>(), zero_point->mut_dptr<T>());
} else {
UNIMPLEMENTED();
}
}
bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; }
};
#define REGISTER_MIN_MAX_OBSERVER_KERNEL(dtype) \
REGISTER_USER_KERNEL("min_max_observer") \
.SetCreateFn<GpuMinMaxObserverKernel<dtype>>() \
.SetIsMatchedHob((user_op::HobDeviceTag() == DeviceType::kGPU) \
& (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \
.SetInferTmpSizeFn([](user_op::InferContext* ctx) -> size_t { \
size_t tmp_buffer_size = 1; \
if (ctx->Attr<bool>("per_layer_quantization") == false) { \
const Shape& in_shape = ctx->InputShape("in", 0); \
tmp_buffer_size = in_shape.At(0); \
} \
return 2 * tmp_buffer_size * sizeof(dtype); \
})
REGISTER_MIN_MAX_OBSERVER_KERNEL(float);
REGISTER_MIN_MAX_OBSERVER_KERNEL(double);
} // namespace oneflow
|
a97c15af38eb935b4079e8e1adf537d5b195a6d9.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename x_t, typename y_t, typename out_t>
struct AxpbyFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
float a,
float b,
int arg_to_check)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
y_t* y = (y_t*)tl.addresses[1][tensor_loc];
y += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[2][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for __syncthreads, not necessary here
float xs[ILP];
float ys[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
xs[ii] = 0;
ys[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
xs[ii] = static_cast<float>(x[i]);
ys[ii] = static_cast<float>(y[i]);
}
}
// see note in multi_tensor_scale_kernel.cu
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
out[i] = static_cast<out_t>(a*xs[ii] + b*ys[ii]);
bool finite = true;
if(arg_to_check == -1)
finite = (isfinite(xs[ii]) && isfinite(ys[ii]));
if(arg_to_check == 0)
finite = isfinite(xs[ii]);
if(arg_to_check == 1)
finite = isfinite(ys[ii]);
if(!finite)
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
}
}
}
};
void multi_tensor_axpby_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float a,
float b,
int arg_to_check)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[2][0].scalar_type(), 2, "multi_tensor_axpby_cuda",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AxpbyFunctor<scalar_t_0, scalar_t_1, scalar_t_2>(),
a,
b,
arg_to_check); )))
AT_CUDA_CHECK(hipGetLastError());
// AT_CUDA_CHECK(hipDeviceSynchronize());
}
| a97c15af38eb935b4079e8e1adf537d5b195a6d9.cu | #include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
// Another possibility:
// #include <torch/all.h>
#include <assert.h>
#include "type_shim.h"
#include "multi_tensor_apply.cuh"
#define BLOCK_SIZE 512
#define ILP 4
template<typename x_t, typename y_t, typename out_t>
struct AxpbyFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int* noop_gmem,
TensorListMetadata<3>& tl,
float a,
float b,
int arg_to_check)
{
// I'd like this kernel to propagate infs/nans.
// if(*noop_gmem == 1)
// return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
x_t* x = (x_t*)tl.addresses[0][tensor_loc];
x += chunk_idx*chunk_size;
y_t* y = (y_t*)tl.addresses[1][tensor_loc];
y += chunk_idx*chunk_size;
out_t* out = (out_t*)tl.addresses[2][tensor_loc];
out += chunk_idx*chunk_size;
n -= chunk_idx*chunk_size;
// Non-divergent exit condition for __syncthreads, not necessary here
float xs[ILP];
float ys[ILP];
for(int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x*ILP)
{
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
xs[ii] = 0;
ys[ii] = 0;
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
xs[ii] = static_cast<float>(x[i]);
ys[ii] = static_cast<float>(y[i]);
}
}
// see note in multi_tensor_scale_kernel.cu
#pragma unroll
for(int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii*blockDim.x;
if(i < n && i < chunk_size)
{
out[i] = static_cast<out_t>(a*xs[ii] + b*ys[ii]);
bool finite = true;
if(arg_to_check == -1)
finite = (isfinite(xs[ii]) && isfinite(ys[ii]));
if(arg_to_check == 0)
finite = isfinite(xs[ii]);
if(arg_to_check == 1)
finite = isfinite(ys[ii]);
if(!finite)
*noop_gmem = 1; // Blindly fire off a write. These will race but that's ok.
}
}
}
}
};
void multi_tensor_axpby_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float a,
float b,
int arg_to_check)
{
using namespace at;
// The output (downscaled) type is always float.
// If build times suffer, think about where to put this dispatch,
// and what logic should be moved out of multi_tensor_apply.
DISPATCH_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), 0, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[1][0].scalar_type(), 1, "multi_tensor_axpby_cuda",
DISPATCH_FLOAT_AND_HALF(tensor_lists[2][0].scalar_type(), 2, "multi_tensor_axpby_cuda",
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
AxpbyFunctor<scalar_t_0, scalar_t_1, scalar_t_2>(),
a,
b,
arg_to_check); )))
AT_CUDA_CHECK(cudaGetLastError());
// AT_CUDA_CHECK(cudaDeviceSynchronize());
}
|
fa9a8ddf752975fd2265961a12600b032bd8acfd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_minus_4_back;
int xdim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_minus_4_back;
int ydim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_minus_4_back;
int xdim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_minus_4_back;
int ydim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_minus_4_back * (y) + \
xdim0_update_halo_kernel2_zvel_minus_4_back * \
ydim0_update_halo_kernel2_zvel_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_minus_4_back * (y) + \
xdim1_update_halo_kernel2_zvel_minus_4_back * \
ydim1_update_halo_kernel2_zvel_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_minus_4_back_gpu(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = -zvel0[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = -zvel1[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_minus_4_back(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back *
ydim0_update_halo_kernel2_zvel_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back *
ydim1_update_halo_kernel2_zvel_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_minus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_minus_4_back(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 100))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(100, "update_halo_kernel2_zvel_minus_4_back");
OPS_kernels[100].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_back_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_minus_4_back_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_minus_4_back_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_minus_4_back_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[100].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_4_back), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[100].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[100].mpi_time += t2 - t1;
OPS_kernels[100].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[100].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| fa9a8ddf752975fd2265961a12600b032bd8acfd.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_minus_4_back;
int xdim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_minus_4_back;
int ydim0_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_minus_4_back;
int xdim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_minus_4_back;
int ydim1_update_halo_kernel2_zvel_minus_4_back_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel2_zvel_minus_4_back * (y) + \
xdim0_update_halo_kernel2_zvel_minus_4_back * \
ydim0_update_halo_kernel2_zvel_minus_4_back * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel2_zvel_minus_4_back * (y) + \
xdim1_update_halo_kernel2_zvel_minus_4_back * \
ydim1_update_halo_kernel2_zvel_minus_4_back * (z))
// user function
__device__
inline void
update_halo_kernel2_zvel_minus_4_back_gpu(double *zvel0, double *zvel1,
const int *fields) {
if (fields[FIELD_ZVEL0] == 1)
zvel0[OPS_ACC0(0, 0, 0)] = -zvel0[OPS_ACC0(0, 0, 4)];
if (fields[FIELD_ZVEL1] == 1)
zvel1[OPS_ACC1(0, 0, 0)] = -zvel1[OPS_ACC1(0, 0, 4)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_minus_4_back(
double *__restrict arg0, double *__restrict arg1,
const int *__restrict arg2, int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back +
idx_z * 1 * 1 * xdim0_update_halo_kernel2_zvel_minus_4_back *
ydim0_update_halo_kernel2_zvel_minus_4_back;
arg1 += idx_x * 1 * 1 +
idx_y * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back +
idx_z * 1 * 1 * xdim1_update_halo_kernel2_zvel_minus_4_back *
ydim1_update_halo_kernel2_zvel_minus_4_back;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_minus_4_back_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel2_zvel_minus_4_back(
char const *name, ops_block block, int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 100))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(100, "update_halo_kernel2_zvel_minus_4_back");
OPS_kernels[100].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_minus_4_back_h ||
ydim0 != ydim0_update_halo_kernel2_zvel_minus_4_back_h ||
xdim1 != xdim1_update_halo_kernel2_zvel_minus_4_back_h ||
ydim1 != ydim1_update_halo_kernel2_zvel_minus_4_back_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel2_zvel_minus_4_back, &xdim0,
sizeof(int));
xdim0_update_halo_kernel2_zvel_minus_4_back_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel2_zvel_minus_4_back, &ydim0,
sizeof(int));
ydim0_update_halo_kernel2_zvel_minus_4_back_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel2_zvel_minus_4_back, &xdim1,
sizeof(int));
xdim1_update_halo_kernel2_zvel_minus_4_back_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel2_zvel_minus_4_back, &ydim1,
sizeof(int));
ydim1_update_halo_kernel2_zvel_minus_4_back_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[100].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel2_zvel_minus_4_back<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[100].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[100].mpi_time += t2 - t1;
OPS_kernels[100].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[100].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
590f7bc249501f78324faa88ca719a717fce1033.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ADD-BY-LEETEN 12/07/2009-BEGIN
__global__
static
void
_FlowDiffusion2D_kernel
(
// INPUT
float fAttenuation,
int iVolumeWidth,
int iVolumeHeight,
hipPitchedPtr cDstPitchedPtr,
// ADD-BY-LEETEN 2009/11/25-BEGIN
hipPitchedPtr cErrorPitchedPtr
// ADD-BY-LEETEN 2009/11/25-END
)
{
int iVoxelX = blockIdx.x * blockDim.x + threadIdx.x;
int iVoxelY = blockIdx.y * blockDim.y + threadIdx.y;
// compute the central differnece
float4 f4Value = tex2D(t2dSrc, iVoxelX, iVoxelY);
float4 f4PX = tex2D(t2dSrc, min(iVoxelX + 1, iVolumeWidth - 1), iVoxelY);
float4 f4NX = tex2D(t2dSrc, max(iVoxelX - 1, 0), iVoxelY);
float4 f4PY = tex2D(t2dSrc, iVoxelX, min(iVoxelY + 1, iVolumeHeight - 1));
float4 f4NY = tex2D(t2dSrc, iVoxelX, max(iVoxelY - 1, 0) );
float4 f4WeightOffset = tex3D(t3dWeightOffset, iVoxelX, iVoxelY, 0);
float4 f4Result;
f4Result = make_float4(
f4WeightOffset.w * f4Value.x + (f4PX.x + f4NX.x + f4PY.x + f4NY.x - 4.0f * f4Value.x) * fAttenuation + f4WeightOffset.x,
f4WeightOffset.w * f4Value.y + (f4PX.y + f4NX.y + f4PY.y + f4NY.y - 4.0f * f4Value.y) * fAttenuation + f4WeightOffset.y,
f4WeightOffset.w * f4Value.z + (f4PX.z + f4NX.z + f4PY.z + f4NY.z - 4.0f * f4Value.z) * fAttenuation + f4WeightOffset.z,
0);
if( iVoxelX < iVolumeWidth && iVoxelY < iVolumeHeight )
{
*ADDRESS_2D(
float4, cDstPitchedPtr.ptr,
sizeof(float4), cDstPitchedPtr.pitch,
iVoxelX, iVoxelY) = f4Result;
// ADD-BY-LEETEN 12/16/2009-BEGIN
#if CHECK_ERROR_CONVERGENCE_BY_CUDPP
// ADD-BY-LEETEN 12/16/2009-END
float4 f4Diff;
f4Diff.x = f4Value.x - f4Result.x;
f4Diff.y = f4Value.y - f4Result.y;
f4Diff.z = f4Value.z - f4Result.z;
float fDiff = f4Diff.x * f4Diff.x + f4Diff.y * f4Diff.y + f4Diff.z * f4Diff.z;
*ADDRESS_2D(
float, cErrorPitchedPtr.ptr,
sizeof(float), cErrorPitchedPtr.pitch,
iVoxelX, iVoxelY) = fDiff;
// ADD-BY-LEETEN 12/16/2009-BEGIN
#endif // #if CHECK_ERROR_CONVERGENCE_BY_CUDPP
// ADD-BY-LEETEN 12/16/2009-END
}
}
/*
$Log: not supported by cvs2svn $
*/
| 590f7bc249501f78324faa88ca719a717fce1033.cu | // ADD-BY-LEETEN 12/07/2009-BEGIN
__global__
static
void
_FlowDiffusion2D_kernel
(
// INPUT
float fAttenuation,
int iVolumeWidth,
int iVolumeHeight,
cudaPitchedPtr cDstPitchedPtr,
// ADD-BY-LEETEN 2009/11/25-BEGIN
cudaPitchedPtr cErrorPitchedPtr
// ADD-BY-LEETEN 2009/11/25-END
)
{
int iVoxelX = blockIdx.x * blockDim.x + threadIdx.x;
int iVoxelY = blockIdx.y * blockDim.y + threadIdx.y;
// compute the central differnece
float4 f4Value = tex2D(t2dSrc, iVoxelX, iVoxelY);
float4 f4PX = tex2D(t2dSrc, min(iVoxelX + 1, iVolumeWidth - 1), iVoxelY);
float4 f4NX = tex2D(t2dSrc, max(iVoxelX - 1, 0), iVoxelY);
float4 f4PY = tex2D(t2dSrc, iVoxelX, min(iVoxelY + 1, iVolumeHeight - 1));
float4 f4NY = tex2D(t2dSrc, iVoxelX, max(iVoxelY - 1, 0) );
float4 f4WeightOffset = tex3D(t3dWeightOffset, iVoxelX, iVoxelY, 0);
float4 f4Result;
f4Result = make_float4(
f4WeightOffset.w * f4Value.x + (f4PX.x + f4NX.x + f4PY.x + f4NY.x - 4.0f * f4Value.x) * fAttenuation + f4WeightOffset.x,
f4WeightOffset.w * f4Value.y + (f4PX.y + f4NX.y + f4PY.y + f4NY.y - 4.0f * f4Value.y) * fAttenuation + f4WeightOffset.y,
f4WeightOffset.w * f4Value.z + (f4PX.z + f4NX.z + f4PY.z + f4NY.z - 4.0f * f4Value.z) * fAttenuation + f4WeightOffset.z,
0);
if( iVoxelX < iVolumeWidth && iVoxelY < iVolumeHeight )
{
*ADDRESS_2D(
float4, cDstPitchedPtr.ptr,
sizeof(float4), cDstPitchedPtr.pitch,
iVoxelX, iVoxelY) = f4Result;
// ADD-BY-LEETEN 12/16/2009-BEGIN
#if CHECK_ERROR_CONVERGENCE_BY_CUDPP
// ADD-BY-LEETEN 12/16/2009-END
float4 f4Diff;
f4Diff.x = f4Value.x - f4Result.x;
f4Diff.y = f4Value.y - f4Result.y;
f4Diff.z = f4Value.z - f4Result.z;
float fDiff = f4Diff.x * f4Diff.x + f4Diff.y * f4Diff.y + f4Diff.z * f4Diff.z;
*ADDRESS_2D(
float, cErrorPitchedPtr.ptr,
sizeof(float), cErrorPitchedPtr.pitch,
iVoxelX, iVoxelY) = fDiff;
// ADD-BY-LEETEN 12/16/2009-BEGIN
#endif // #if CHECK_ERROR_CONVERGENCE_BY_CUDPP
// ADD-BY-LEETEN 12/16/2009-END
}
}
/*
$Log: not supported by cvs2svn $
*/
|
d9bc2b2e05ae5014cb09648b47b318aa45763abf.hip | // !!! This is a file automatically generated by hipify!!!
//Notes: 1- In infoVecs.nodeCellRankBehind and infoVecs.nodeCellRankFront are given sequential values correspond to their actual values in function: SceNodes::allocSpaceForNodes
// 2- the algorithm of adhesion won't work if there is no apical node.
// 3- maxNumAdh is given inside the code as a parameters in .cu file. It should become an input or I should write a function to detect that automatically
// 4- In SceNodes::NumAdhAfter and SceNodes::NumAdhBefore number of lateral nodes is given manually inside the code it should be automatically calculate from the input parameters/
#include "SceNodes.h"
#include "SceCells.h"
__constant__ double sceInterPara[5];
__constant__ double sceIntraPara[5];
// parameter set for cells that are going to divide
__constant__ double sceIntraParaDiv[5];
__constant__ double sceDivProPara;
__constant__ double sceCartPara[5];
__constant__ double sceInterDiffPara[5];
__constant__ double sceProfilePara[7];
__constant__ double sceECMPara[5];
__constant__ double sceDiffPara[5];
__constant__ double cartGrowDirVec[3];
__constant__ uint ProfilebeginPos;
__constant__ uint ECMbeginPos;
__constant__ uint cellNodeBeginPos;
__constant__ uint nodeCountPerECM;
__constant__ uint nodeCountPerCell;
//
__constant__ uint cellNodeBeginPos_M;
__constant__ uint allNodeCountPerCell_M;
__constant__ uint membrThreshold_M;
__constant__ double sceInterBPara_M[5];
__constant__ int sceInterBPara_Jones_On_M ; //Ali
__constant__ double sceInterBPara_Jones_M[3] ; //Ali
__constant__ double sceIntnlBPara_M[5];
__constant__ double sceIntraPara_M[5];
__constant__ double sceIntraParaDiv_M[5];
__constant__ double growthPrgrCriVal_M;
__constant__ double maxAdhBondLen_M;
__constant__ double minAdhBondLen_M;
__constant__ double bondStiff_M;
__constant__ double bondStiff_Mitotic;
__constant__ double bondAdhCriLen_M;
// #define DebugMode
// This template method expands an input sequence by
// replicating each element a variable number of times. For example,
//
// expand([2,2,2],[A,B,C]) -> [A,A,B,B,C,C]
// expand([3,0,1],[A,B,C]) -> [A,A,A,C]
// expand([1,3,2],[A,B,C]) -> [A,B,B,B,C,C]
//
// The element counts are assumed to be non-negative integers
template<typename InputIterator1, typename InputIterator2,
typename OutputIterator>
OutputIterator expand(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(),
output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2,
output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
SceNodes::SceNodes() {
readDomainPara();
}
int SceNodes::NumAdhBefore(int cellRank,ECellType eCellType) {
if (eCellType==peri) {
return 28 ;
}
if (eCellType==bc) {
return 30 ;
}
if (eCellType==pouch) {
if (cellRank==0){
return 36 ;
}
else if ( cellRank==1) {
return 108 ;
}
else if ( cellRank==64) {
return 108 ;
}
else {
return 180 ;
}
}
}
int SceNodes::NumAdhAfter(int cellRank,ECellType eCellType) {
if ( eCellType==peri) {
return 28 ;
}
if (eCellType==bc) {
return 30 ;
}
if (eCellType==pouch) {
if (cellRank==64){
return 36 ;
}
else if ( cellRank==63) {
return 108 ;
}
else if ( cellRank==0) {
return 108 ;
}
else {
return 180 ;
}
}
}
void SceNodes::readDomainPara() {
domainPara.minX = globalConfigVars.getConfigValue("DOMAIN_XMIN").toDouble();
domainPara.maxX = globalConfigVars.getConfigValue("DOMAIN_XMAX").toDouble();
domainPara.minY = globalConfigVars.getConfigValue("DOMAIN_YMIN").toDouble();
domainPara.maxY = globalConfigVars.getConfigValue("DOMAIN_YMAX").toDouble();
//domainPara.minZ = globalConfigVars.getConfigValue("DOMAIN_ZMIN").toDouble();
//domainPara.maxZ = globalConfigVars.getConfigValue("DOMAIN_ZMAX").toDouble();
domainPara.gridSpacing = getMaxEffectiveRange();
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
//domainPara.ZBucketSize = (domainPara.maxZ - domainPara.minZ)
// / domainPara.gridSpacing + 1;
}
void SceNodes::readMechPara() {
double U0 =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_U0_DivFactor").toDouble();
double V0 =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_V0_DivFactor").toDouble();
double k1 =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k1_DivFactor").toDouble();
double k2 =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k2_DivFactor").toDouble();
mechPara.sceInterParaCPU[0] = U0;
mechPara.sceInterParaCPU[1] = V0;
mechPara.sceInterParaCPU[2] = k1;
mechPara.sceInterParaCPU[3] = k2;
double interLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
mechPara.sceInterParaCPU[4] = interLinkEffectiveRange;
}
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_U0_DivFactor").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_V0_DivFactor").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k1_DivFactor").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k2_DivFactor").toDouble();
mechPara.sceIntraParaCPU[0] = U0_Intra;
mechPara.sceIntraParaCPU[1] = V0_Intra;
mechPara.sceIntraParaCPU[2] = k1_Intra;
mechPara.sceIntraParaCPU[3] = k2_Intra;
double intraLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
mechPara.sceIntraParaCPU[4] = intraLinkEffectiveRange;
}
if (controlPara.simuType == Disc) {
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_U0_Div_DivFactor").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_V0_Div_DivFactor").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k1_Div_DivFactor").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k2_Div_DivFactor").toDouble();
double growthProgressThreshold = globalConfigVars.getConfigValue(
"GrowthProgressThreshold").toDouble();
mechPara.sceIntraParaDivCPU[0] = U0_Intra_Div;
mechPara.sceIntraParaDivCPU[1] = V0_Intra_Div;
mechPara.sceIntraParaDivCPU[2] = k1_Intra_Div;
mechPara.sceIntraParaDivCPU[3] = k2_Intra_Div;
mechPara.sceIntraParaDivCPU[4] = growthProgressThreshold;
}
}
// This constructor is not active Ali
SceNodes::SceNodes(uint totalBdryNodeCount, uint maxProfileNodeCount,
uint maxCartNodeCount, uint maxTotalECMCount, uint maxNodeInECM,
uint maxTotalCellCount, uint maxNodeInCell, bool isStab) {
initControlPara(isStab);
readDomainPara();
uint maxTotalNodeCount;
if (controlPara.simuType != Disc_M) {
initNodeAllocPara(totalBdryNodeCount, maxProfileNodeCount,
maxCartNodeCount, maxTotalECMCount, maxNodeInECM,
maxTotalCellCount, maxNodeInCell);
maxTotalNodeCount = totalBdryNodeCount + maxProfileNodeCount
+ maxCartNodeCount + allocPara.maxTotalECMNodeCount
+ allocPara.maxTotalCellNodeCount;
} else {
uint maxEpiNodeCount = globalConfigVars.getConfigValue(
"MaxEpiNodeCountPerCell").toInt();
uint maxInternalNodeCount = globalConfigVars.getConfigValue(
"MaxAllNodeCountPerCell").toInt() - maxEpiNodeCount;
initNodeAllocPara_M(totalBdryNodeCount, maxTotalCellCount,
maxEpiNodeCount, maxInternalNodeCount);
maxTotalNodeCount = allocPara_M.maxTotalNodeCount;
}
//allocSpaceForNodes(maxTotalNodeCount); Ali comment this becasue it is not active in this simulation and I updated the function in the active constructor
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
thrust::host_vector<int> hostTmpVector3(maxTotalNodeCount);
if (controlPara.simuType != Disc_M) {
for (int i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara.startPosProfile) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCart) {
hostTmpVector[i] = Profile;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosECM) {
hostTmpVector[i] = Cart;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCells) {
hostTmpVector[i] = ECM;
hostTmpVector3[i] = (i - allocPara.startPosECM)
/ allocPara.maxNodePerECM;
} else {
// all initialized as FNM
hostTmpVector[i] = FNM;
hostTmpVector3[i] = (i - allocPara.startPosCells)
/ allocPara.maxNodeOfOneCell;
}
hostTmpVector2[i] = false;
}
} else {
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
uint cellRank = tmp / allocPara_M.bdryNodeCount;
uint nodeRank = tmp % allocPara_M.bdryNodeCount;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
} else {
hostTmpVector[i] = CellIntnl;
}
hostTmpVector3[i] = cellRank;
}
hostTmpVector2[i] = false;
}
}
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
infoVecs.nodeCellRank = hostTmpVector3;
std::cout << " I am in SceNodes constructor with long input which includes copyParaToGPUConstMem function " << endl ;
copyParaToGPUConstMem();
}
SceNodes::SceNodes(uint maxTotalCellCount, uint maxAllNodePerCell, uint currentActiveCellCount) {
//initControlPara (isStab);
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
readDomainPara();
uint maxTotalNodeCount = maxTotalCellCount * maxAllNodePerCell;
uint maxMembrNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxMembrNodeCountPerCell").toInt();
uint maxIntnlNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
initNodeAllocPara_M(0, maxTotalCellCount, maxMembrNodeCountPerCell,
maxIntnlNodeCountPerCell);
std::cout << " Number of boundary nodes = " << allocPara_M.bdryNodeCount
<< std::endl;
std::cout << " Max number of cells in domain = "
<< allocPara_M.maxCellCount << std::endl;
std::cout << " Max all nodes per cell = "
<< allocPara_M.maxAllNodePerCell << std::endl;
std::cout << " Max membrane node per cell= "
<< allocPara_M.maxMembrNodePerCell << std::endl;
std::cout << " Max internal node per cell= "
<< allocPara_M.maxIntnlNodePerCell << std::endl;
std::cout << " Max total number of nodes in domain = "
<< allocPara_M.maxTotalNodeCount << std::endl;
allocSpaceForNodes(maxTotalNodeCount, allocPara_M.maxCellCount, currentActiveCellCount);
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
uint nodeRank;
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
nodeRank = tmp % allocPara_M.maxAllNodePerCell;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
//std::cout << "0";
} else {
hostTmpVector[i] = CellIntnl;
//std::cout << "1";
}
}
hostTmpVector2[i] = false;
if (nodeRank == 0) {
//std::cout << std::endl;
}
}
//std::cout << "finished" << std::endl;
//std::cout.flush();
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
thrust::host_vector<int> bondVec(maxTotalNodeCount, -1);
infoVecs.nodeAdhereIndex = bondVec;
infoVecs.membrIntnlIndex = bondVec;
infoVecs.nodeAdhIndxHostCopy = bondVec;
//std::cout << "copy finished!" << std::endl;
//std::cout.flush();
copyParaToGPUConstMem_M();
std::cout << " I am in SceNodes constructor with short input which includes copyParaToGPUConstMem_M function " << endl ;
//std::cout << "at the end" << std::endl;
//std::cout.flush();
adhNotSet=true ; //Ali
adhUpdate=true ; //Ali
cout << "adhesion not set is initialized as " << adhNotSet << endl ;
cout << "adhesion update is initialized as " << adhUpdate << endl ;
}
void SceNodes::copyParaToGPUConstMem() {
readMechPara();
hipMemcpyToSymbol(sceInterPara, mechPara.sceInterParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraPara, mechPara.sceIntraParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraParaDiv, mechPara.sceIntraParaDivCPU,
5 * sizeof(double));
hipMemcpyToSymbol(ProfilebeginPos, &allocPara.startPosProfile,
sizeof(uint));
hipMemcpyToSymbol(ECMbeginPos, &allocPara.startPosECM, sizeof(uint));
hipMemcpyToSymbol(cellNodeBeginPos, &allocPara.startPosCells,
sizeof(uint));
hipMemcpyToSymbol(nodeCountPerECM, &allocPara.maxNodePerECM, sizeof(uint));
hipMemcpyToSymbol(nodeCountPerCell, &allocPara.maxNodeOfOneCell,
sizeof(uint));
hipMemcpyToSymbol(sceCartPara, mechPara.sceCartParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceProfilePara, mechPara.sceProfileParaCPU,
7 * sizeof(double));
hipMemcpyToSymbol(sceInterDiffPara, mechPara.sceInterDiffParaCPU,
5 * sizeof(double));
hipMemcpyToSymbol(sceECMPara, mechPara.sceECMParaCPU, 5 * sizeof(double));
}
void SceNodes::copyParaToGPUConstMem_M() {
readParas_M();
hipMemcpyToSymbol(cellNodeBeginPos_M, &allocPara_M.bdryNodeCount,
sizeof(uint));
hipMemcpyToSymbol(allNodeCountPerCell_M, &allocPara_M.maxAllNodePerCell,
sizeof(uint));
hipMemcpyToSymbol(membrThreshold_M, &allocPara_M.maxMembrNodePerCell,
sizeof(uint));
hipMemcpyToSymbol(bondAdhCriLen_M, &mechPara_M.bondAdhCriLenCPU_M,
sizeof(double));
hipMemcpyToSymbol(bondStiff_M, &mechPara_M.bondStiffCPU_M, sizeof(double));
hipMemcpyToSymbol(bondStiff_Mitotic, &mechPara_M.bondStiffCPU_Mitotic, sizeof(double));//Ali June 16
hipMemcpyToSymbol(growthPrgrCriVal_M, &mechPara_M.growthPrgrCriValCPU_M,
sizeof(double));
hipMemcpyToSymbol(maxAdhBondLen_M, &mechPara_M.maxAdhBondLenCPU_M,
sizeof(double));
hipMemcpyToSymbol(minAdhBondLen_M, &mechPara_M.minAdhBondLenCPU_M,
sizeof(double));
hipMemcpyToSymbol(sceInterBPara_M, mechPara_M.sceInterBParaCPU_M,
5 * sizeof(double));
hipMemcpyToSymbol(sceInterBPara_Jones_On_M, &mechPara_M.sceInterBParaCPU_Jones_On_M,
sizeof(int)); //Ali
hipMemcpyToSymbol(sceInterBPara_Jones_M, mechPara_M.sceInterBParaCPU_Jones_M,
3 * sizeof(double)); //Ali
hipMemcpyToSymbol(sceIntnlBPara_M, mechPara_M.sceIntnlBParaCPU_M,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraPara_M, mechPara_M.sceIntraParaCPU_M,
5 * sizeof(double));
hipMemcpyToSymbol(sceIntraParaDiv_M, mechPara_M.sceIntraParaDivCPU_M,
5 * sizeof(double));
}
void SceNodes::initDimension(double domainMinX, double domainMaxX,
double domainMinY, double domainMaxY, double domainBucketSize) {
domainPara.minX = domainMinX;
domainPara.maxX = domainMaxX;
domainPara.minY = domainMinY;
domainPara.maxY = domainMaxY;
domainPara.gridSpacing = domainBucketSize;
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
domainPara.totalBucketCount = domainPara.XBucketSize
* domainPara.YBucketSize;
auxVecs.keyBegin.resize(domainPara.totalBucketCount);
auxVecs.keyEnd.resize(domainPara.totalBucketCount);
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
int size = bucketKeysCPU.size();
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
int node1 = bucketValuesCPU[i];
int node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
iterationCounter++;
}
}
return result;
}
void SceNodes::readParas_M() {
//////////////////////
//// Block 1 /////////
//////////////////////
double U0_InterB =
globalConfigVars.getConfigValue("SceInterB_U0").toDouble();
double V0_InterB =
globalConfigVars.getConfigValue("SceInterB_V0").toDouble();
double k1_InterB =
globalConfigVars.getConfigValue("SceInterB_k1").toDouble();
double k2_InterB =
globalConfigVars.getConfigValue("SceInterB_k2").toDouble();
double interBEffectiveRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
mechPara_M.sceInterBParaCPU_M[0] = U0_InterB;
mechPara_M.sceInterBParaCPU_M[1] = V0_InterB;
mechPara_M.sceInterBParaCPU_M[2] = k1_InterB;
mechPara_M.sceInterBParaCPU_M[3] = k2_InterB;
mechPara_M.sceInterBParaCPU_M[4] = interBEffectiveRange;
//Ali
//////////////////////
//// Block 1.5 /////////
//////////////////////
int On_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_On").toDouble();
double eps_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_eps").toDouble();
double sig_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_sig").toDouble();
double interBEffectiveRange_Jones = globalConfigVars.getConfigValue(
"InterBEffectiveRange_Jones").toDouble();
mechPara_M.sceInterBParaCPU_Jones_On_M = On_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[0] = eps_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[1] = sig_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[2] = interBEffectiveRange_Jones;
//Ali
//////////////////////
//// Block 2 /////////
//////////////////////
double U0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble();
double V0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble();
double k1_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble();
double k2_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble();
double intnlBEffectiveRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
mechPara_M.sceIntnlBParaCPU_M[0] = U0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[1] = V0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[2] = k1_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[3] = k2_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[4] = intnlBEffectiveRange;
//////////////////////
//// Block 3 /////////
//////////////////////
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2").toDouble();
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
mechPara_M.sceIntraParaCPU_M[0] = U0_Intra;
mechPara_M.sceIntraParaCPU_M[1] = V0_Intra;
mechPara_M.sceIntraParaCPU_M[2] = k1_Intra;
mechPara_M.sceIntraParaCPU_M[3] = k2_Intra;
mechPara_M.sceIntraParaCPU_M[4] = intraLinkEffectiveRange;
//////////////////////
//// Block 4 /////////
//////////////////////
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble();
double intraDivEffectiveRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
mechPara_M.sceIntraParaDivCPU_M[0] = U0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[1] = V0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[2] = k1_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[3] = k2_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[4] = intraDivEffectiveRange;
//////////////////////
//// Block 5 /////////
//////////////////////
double bondAdhCriLen =
globalConfigVars.getConfigValue("BondAdhCriLen").toDouble();
mechPara_M.bondAdhCriLenCPU_M = bondAdhCriLen;
double bondStiff = globalConfigVars.getConfigValue("BondStiff").toDouble();
mechPara_M.bondStiffCPU_M = bondStiff;
//Ali June 16
double bondStiff_Mitotic = globalConfigVars.getConfigValue("BondStiff_Mitotic").toDouble();
mechPara_M.bondStiffCPU_Mitotic = bondStiff_Mitotic;
double growthPrgrCriVal = globalConfigVars.getConfigValue(
"GrowthPrgrCriVal").toDouble();
mechPara_M.growthPrgrCriValCPU_M = growthPrgrCriVal;
double maxAdhBondLen =
globalConfigVars.getConfigValue("MaxAdhBondLen").toDouble();
mechPara_M.maxAdhBondLenCPU_M = maxAdhBondLen;
double minAdhBondLen =
globalConfigVars.getConfigValue("MinAdhBondLen").toDouble();
mechPara_M.minAdhBondLenCPU_M = minAdhBondLen;
}
void SceNodes::debugNAN() {
uint totalActiveNodeC = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double res = thrust::reduce(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocX.begin() + totalActiveNodeC);
if (isnan(res)) {
std::cout << "fatal error! NAN found" << std::endl;
std::cout.flush();
exit(0);
}
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs_M() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint offSet = allocPara_M.bdryNodeCount;
uint memThreshold = allocPara_M.maxMembrNodePerCell;
int size = bucketKeysCPU.size();
int node1, node2, cellRank1, cellRank2, nodeRank1, nodeRank2;
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
node1 = bucketValuesCPU[i];
node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
cellRank1 = (node1 - offSet) / maxNodePerCell;
nodeRank1 = (node1 - offSet) % maxNodePerCell;
cellRank2 = (node2 - offSet) / maxNodePerCell;
nodeRank2 = (node2 - offSet) % maxNodePerCell;
if (nodeRank1 >= memThreshold && nodeRank2 >= memThreshold
&& cellRank1 == cellRank2) {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
}
iterationCounter++;
}
}
return result;
}
void SceNodes::initValues(std::vector<CVector>& initBdryCellNodePos,
std::vector<CVector>& initProfileNodePos,
std::vector<CVector>& initCartNodePos,
std::vector<CVector>& initECMNodePos,
std::vector<CVector>& initFNMCellNodePos,
std::vector<CVector>& initMXCellNodePos) {
uint FNMNodeCount = initFNMCellNodePos.size();
uint MXNodeCount = initMXCellNodePos.size();
uint beginAddressOfProfile = allocPara.startPosProfile;
uint beginAddressOfCart = allocPara.startPosCart;
// find the begining position of ECM.
uint beginAddressOfECM = allocPara.startPosECM;
// find the begining position of FNM cells.
uint beginAddressOfFNM = allocPara.startPosCells;
// find the begining position of MX cells.
uint beginAddressOfMX = beginAddressOfFNM + FNMNodeCount;
std::vector<double> initBdryCellNodePosX = getArrayXComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosX.begin(), initBdryCellNodePosX.end(),
infoVecs.nodeLocX.begin());
std::vector<double> initBdryCellNodePosY = getArrayYComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosY.begin(), initBdryCellNodePosY.end(),
infoVecs.nodeLocY.begin());
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initProfileNodePosX = getArrayXComp(initProfileNodePos);
thrust::copy(initProfileNodePosX.begin(), initProfileNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfProfile);
std::vector<double> initProfileNodePosY = getArrayYComp(initProfileNodePos);
thrust::copy(initProfileNodePosY.begin(), initProfileNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfProfile);
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initCartNodePosX = getArrayXComp(initCartNodePos);
thrust::copy(initCartNodePosX.begin(), initCartNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfCart);
std::vector<double> initCartNodePosY = getArrayYComp(initCartNodePos);
thrust::copy(initCartNodePosY.begin(), initCartNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfCart);
// copy x and y position of nodes of ECM to actual node position.
std::vector<double> initECMNodePosX = getArrayXComp(initECMNodePos);
thrust::copy(initECMNodePosX.begin(), initECMNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfECM);
std::vector<double> initECMNodePosY = getArrayYComp(initECMNodePos);
thrust::copy(initECMNodePosY.begin(), initECMNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfECM);
for (int i = 0; i < initECMNodePosX.size(); i++) {
assert(infoVecs.nodeLocX[i + beginAddressOfECM] == initECMNodePosX[i]);
assert(!isnan(initECMNodePosX[i]));
}
// copy x and y position of nodes of FNM cells to actual node position.
std::vector<double> initFNMCellNodePosX = getArrayXComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosX.begin(), initFNMCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfFNM);
std::vector<double> initFNMCellNodePosY = getArrayYComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosY.begin(), initFNMCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfFNM);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfFNM,
infoVecs.nodeCellType.begin() + beginAddressOfMX, FNM);
// copy x and y position of nodes of MX cells to actual node position.
std::vector<double> initMXCellNodePosX = getArrayXComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosX.begin(), initMXCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfMX);
std::vector<double> initMXCellNodePosY = getArrayYComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosY.begin(), initMXCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfMX);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfMX,
infoVecs.nodeCellType.begin() + beginAddressOfMX + MXNodeCount, MX);
}
// It copies the information of node locations from CPU to GPU
void SceNodes::initValues_M(std::vector<bool>& initIsActive,
std::vector<CVector>& initCellNodePos,
std::vector<SceNodeType>& nodeTypes,
std::vector<double>& mDppV,
std::vector<MembraneType1>& mTypeV) {
std::vector<double> initCellNodePosX = getArrayXComp(initCellNodePos);
std::vector<double> initCellNodePosY = getArrayYComp(initCellNodePos);
thrust::copy(initCellNodePosX.begin(), initCellNodePosX.end(),
infoVecs.nodeLocX.begin() + allocPara_M.bdryNodeCount);
thrust::copy(initCellNodePosY.begin(), initCellNodePosY.end(),
infoVecs.nodeLocY.begin() + allocPara_M.bdryNodeCount);
thrust::copy(nodeTypes.begin(), nodeTypes.end(),
infoVecs.nodeCellType.begin() + allocPara_M.bdryNodeCount);
thrust::copy(mDppV.begin(), mDppV.end(),
infoVecs.dppLevel.begin() ); // Ali
thrust::copy(mTypeV.begin(), mTypeV.end(),
infoVecs.memNodeType1.begin() ); // Ali
thrust::copy(initIsActive.begin(), initIsActive.end(),
infoVecs.nodeIsActive.begin() + allocPara_M.bdryNodeCount);
}
VtkAnimationData SceNodes::obtainAnimationData(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<double> hostTmpVectorForceX;
thrust::host_vector<double> hostTmpVectorForceY;
thrust::host_vector<double> hostTmpVectorForceZ;
thrust::host_vector<double> hostTmpVectorVelVal;
assert(hostTmpVectorLocX.size() == hostTmpVectorLocY.size());
assert(hostTmpVectorLocY.size() == hostTmpVectorLocZ.size());
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<double> hostTmpVectorNodeStress;
if (aniCri.animationType != CellType) {
hostTmpVectorForceX = infoVecs.nodeInterForceX;
hostTmpVectorForceY = infoVecs.nodeInterForceY;
hostTmpVectorForceZ = infoVecs.nodeInterForceZ;
assert(hostTmpVectorForceX.size() == hostTmpVectorLocX.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceY.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceZ.size());
uint vecSize = hostTmpVectorForceX.size();
hostTmpVectorVelVal.resize(vecSize);
for (uint i = 0; i < vecSize; i++) {
hostTmpVectorVelVal[i] = sqrt(
hostTmpVectorForceX[i] * hostTmpVectorForceX[i]
+ hostTmpVectorForceY[i] * hostTmpVectorForceY[i]
+ hostTmpVectorForceZ[i] * hostTmpVectorForceZ[i]);
}
}
if (aniCri.animationType == Force) {
vtkData.isArrowIncluded = true;
} else {
vtkData.isArrowIncluded = false;
}
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node1Z = hostTmpVectorLocZ[node1Index];
SceNodeType node1T = hostTmpVectorNodeType[node1Index];
uint node1R = hostTmpVectorNodeRank[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
double node2Z = hostTmpVectorLocZ[node2Index];
SceNodeType node2T = hostTmpVectorNodeType[node2Index];
uint node2R = hostTmpVectorNodeRank[node2Index];
if (aniCri.isPairQualify(node1Index, node2Index, node1X, node1Y, node1Z,
node1T, node1R, node2X, node2Y, node2Z, node2T, node2R)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
if (hostTmpVectorVelVal[node1Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node1T);
}
ptAniData.pos = CVector(node1X, node1Y, node1Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
if (hostTmpVectorVelVal[node2Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node2T);
}
ptAniData.pos = CVector(node2X, node2Y, node2Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
uint profileStartIndex = allocPara.startPosProfile;
uint profileEndIndex = profileStartIndex
+ allocPara.currentActiveProfileNodeCount;
for (uint i = profileStartIndex; i < profileEndIndex; i++) {
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
if (i != profileEndIndex - 1) {
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
uint cartStartIndex = allocPara.startPosCart;
uint cartEndIndex = cartStartIndex + allocPara.maxCartNodeCount;
for (uint i = cartStartIndex; i < cartEndIndex; i++) {
bool isActive = infoVecs.nodeIsActive[i];
if (!isActive) {
continue;
}
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
bool isNextActive;
if (i == cartEndIndex - 1) {
isNextActive = false;
} else {
isNextActive = infoVecs.nodeIsActive[i + 1];
}
if (isNextActive) {
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
return vtkData;
}
// TODO
VtkAnimationData SceNodes::obtainAnimationData_M(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs_M();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<bool> hostIsActiveVec = infoVecs.nodeIsActive;
thrust::host_vector<int> hostBondVec = infoVecs.nodeAdhereIndex;
thrust::host_vector<double> hostMembrTenMag = infoVecs.membrTensionMag;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
uint activeCellCount = allocPara_M.currentActiveCellCount;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_M.maxMembrNodePerCell;
uint beginIndx = allocPara_M.bdryNodeCount;
//uint endIndx = beginIndx + activeCellCount * maxNodePerCell;
//uint cellRank1, nodeRank1, cellRank2, nodeRank2;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
vtkData.bondsInfo = bondInfoVec;
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node1X, node1Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node2X, node2Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
return vtkData;
}
void SceNodes::findBucketBounds() {
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds_M() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds3D() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::prepareSceForceComputation() {
buildBuckets2D();
extendBuckets2D();
findBucketBounds();
}
void SceNodes::prepareSceForceComputation_M() {
buildBuckets2D_M();
extendBuckets2D_M();
findBucketBounds_M();
}
void SceNodes::prepareSceForceComputation3D() {
buildBuckets3D();
extendBuckets3D();
findBucketBounds3D();
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell,
thrust::device_vector<SceNodeType> &nodeCellTypeNewCell) {
// data validation
uint nodesSize = nodeLocXNewCell.size();
assert(nodesSize % allocPara.maxNodeOfOneCell == 0);
uint addCellCount = nodesSize / allocPara.maxNodeOfOneCell;
// position that we will add newly divided cells.
uint shiftStartPosNewCell = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin(),
nodeCellTypeNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end(),
nodeCellTypeNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(),
infoVecs.nodeCellType.begin()))
+ shiftStartPosNewCell);
// total number of cells has increased.
allocPara.currentActiveCellCount = allocPara.currentActiveCellCount
+ addCellCount;
}
void SceNodes::buildBuckets2D() {
int totalActiveNodes;
if (controlPara.simuType != Disc_M) {
totalActiveNodes = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
} else {
totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
}
auxVecs.bucketKeys.resize(totalActiveNodes);
auxVecs.bucketValues.resize(totalActiveNodes);
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(totalActiveNodes);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(), auxVecs.bucketKeys.end(),
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.end(), UINT_MAX);
auxVecs.bucketKeys.erase(auxVecs.bucketKeys.end() - numberOfOutOfRange,
auxVecs.bucketKeys.end());
auxVecs.bucketValues.erase(auxVecs.bucketValues.end() - numberOfOutOfRange,
auxVecs.bucketValues.end());
}
void SceNodes::buildBuckets2D_M() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
void SceNodes::buildBuckets3D() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
BucketIndexer3D(domainPara.minX, domainPara.maxX, domainPara.minY,
domainPara.maxY, domainPara.minZ, domainPara.maxZ,
domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
__device__
double computeDist(double &xPos, double &yPos, double &zPos, double &xPos2,
double &yPos2, double &zPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)
+ (zPos - zPos2) * (zPos - zPos2));
}
__device__
double computeDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2));
}
__device__
void calculateAndAddECMForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceECMPara[4]) {
forceValue = 0;
} else {
forceValue = -sceECMPara[0] / sceECMPara[2]
* exp(-linkLength / sceECMPara[2])
+ sceECMPara[1] / sceECMPara[3]
* exp(-linkLength / sceECMPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddProfileForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
forceValue = -sceProfilePara[5] * (linkLength - sceProfilePara[6]);
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddIntraForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraForceDiv(double& xPos, double& yPos, double& zPos,
double& xPos2, double& yPos2, double& zPos2, double& growPro,
double& xRes, double& yRes, double& zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
if (growPro > sceIntraParaDiv[4]) {
double intraPara0 = growPro * (sceIntraParaDiv[0])
+ (1.0 - growPro) * sceIntraPara[0];
double intraPara1 = growPro * (sceIntraParaDiv[1])
+ (1.0 - growPro) * sceIntraPara[1];
double intraPara2 = growPro * (sceIntraParaDiv[2])
+ (1.0 - growPro) * sceIntraPara[2];
double intraPara3 = growPro * (sceIntraParaDiv[3])
+ (1.0 - growPro) * sceIntraPara[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraDiv_M(double& xPos, double& yPos, double& xPos2,
double& yPos2, double& growPro, double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (growPro > growthPrgrCriVal_M) {
if (linkLength > sceIntraParaDiv_M[4]) {
forceValue = 0;
} else {
double percent = (growPro - growthPrgrCriVal_M)
/ (1.0 - growthPrgrCriVal_M);
double intraPara0 = percent * (sceIntraParaDiv_M[0])
+ (1.0 - percent) * sceIntraPara_M[0];
double intraPara1 = percent * (sceIntraParaDiv_M[1])
+ (1.0 - percent) * sceIntraPara_M[1];
double intraPara2 = percent * (sceIntraParaDiv_M[2])
+ (1.0 - percent) * sceIntraPara_M[2];
double intraPara3 = percent * (sceIntraParaDiv_M[3])
+ (1.0 - percent) * sceIntraPara_M[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
}
} else {
if (linkLength > sceIntraPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara_M[0] / sceIntraPara_M[2]
* exp(-linkLength / sceIntraPara_M[2])
+ sceIntraPara_M[1] / sceIntraPara_M[3]
* exp(-linkLength / sceIntraPara_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddIntraB_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceIntnlBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntnlBPara_M[0] / sceIntnlBPara_M[2]
* exp(-linkLength / sceIntnlBPara_M[2])
+ sceIntnlBPara_M[1] / sceIntnlBPara_M[3]
* exp(-linkLength / sceIntnlBPara_M[3]);
}
//if (forceValue > 0) {
// forceValue = 0;
//}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddInter_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceInterBPara_M[0] / sceInterBPara_M[2]
* exp(-linkLength / sceInterBPara_M[2])
+ sceInterBPara_M[1] / sceInterBPara_M[3]
* exp(-linkLength / sceInterBPara_M[3]);
// if (forceValue > 0) { //Ali
// forceValue = 0;
// }
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calAndAddInter_M2(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_Jones_M[2]) {
forceValue = 0;
} else {
forceValue =24*sceInterBPara_Jones_M[0]/linkLength*pow(sceInterBPara_Jones_M[1]/linkLength,6)*
( 1.0-2 *pow(sceInterBPara_Jones_M[1]/linkLength,6) ) ;
if (forceValue > 0) {
forceValue = 0;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calculateAndAddInterForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddInterForceDisc(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes, double& interForceX, double& interForceY,
double& interForceZ) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
double fX = forceValue * (xPos2 - xPos) / linkLength;
double fY = forceValue * (yPos2 - yPos) / linkLength;
double fZ = forceValue * (zPos2 - zPos) / linkLength;
xRes = xRes + fX;
yRes = yRes + fY;
zRes = zRes + fZ;
interForceX = interForceX + fX;
interForceY = interForceY + fY;
interForceZ = interForceZ + fZ;
}
__device__
void calculateAndAddCartForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceCartPara[4]) {
forceValue = 0;
} else {
forceValue = -sceCartPara[0] / sceCartPara[2]
* exp(-linkLength / sceCartPara[2])
+ sceCartPara[1] / sceCartPara[3]
* exp(-linkLength / sceCartPara[3]);
if (linkLength > 1.0e-12) {
//double dotProduct = (xPos2 - xPos) / linkLength * cartGrowDirVec[0]
// + (yPos2 - yPos) / linkLength * cartGrowDirVec[1]
// + (zPos2 - zPos) / linkLength * cartGrowDirVec[2];
//forceValue = forceValue * dotProduct;
// this is just a temperary solution -- the direction should not be fixed.
xRes = xRes - forceValue * cartGrowDirVec[0];
yRes = yRes - forceValue * cartGrowDirVec[1];
zRes = zRes - forceValue * cartGrowDirVec[2];
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
if (forceValue > 0) {
//forceValue = forceValue * 0.01;
forceValue = 0;
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
}
}
__device__
void calculateAndAddDiffInterCellForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterDiffPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterDiffPara[0] / sceInterDiffPara[2]
* exp(-linkLength / sceInterDiffPara[2])
+ sceInterDiffPara[1] / sceInterDiffPara[3]
* exp(-linkLength / sceInterDiffPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.2;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddInterForceDiffType(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__ bool bothNodesCellNode(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint cellNodesThreshold) {
if (nodeGlobalRank1 < cellNodesThreshold
&& nodeGlobalRank2 < cellNodesThreshold) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos
|| nodeGlobalRank2 < cellNodeBeginPos) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos) / nodeCountPerCell
== (nodeGlobalRank2 - cellNodeBeginPos) / nodeCountPerCell) {
return true;
} else {
return false;
}
}
//Ali
__device__
bool Is_Lennard_Jones() {
if (sceInterBPara_Jones_On_M==1) {
return true ;
}
else {
return false ;
}
}
__device__
bool isSameCell_m(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos_M) / allNodeCountPerCell_M
== (nodeGlobalRank2 - cellNodeBeginPos_M) / allNodeCountPerCell_M) {
return true;
} else {
return false;
}
}
__device__
bool bothInternal(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 >= membrThreshold_M && nodeRank2 >= membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembr(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembrDiffCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 == cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
//AAMIRI
/*
__device__
bool isNodeOnMembrane(uint nodeGlobalRank) {
uint nodeRank = (nodeGlobalRank - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeGlobalRank >= cellNodeBeginPos_M && nodeRank < membrThreshold_M){
return true;
} else{
return false;
}
}
*/
__device__
bool sameCellMemIntnl(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 != cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if ((nodeRank1 < membrThreshold_M && nodeRank2 >= membrThreshold_M)
|| (nodeRank2 < membrThreshold_M && nodeRank1 >= membrThreshold_M)) {
return true;
} else {
return false;
}
}
__device__ bool isSameECM(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
return true;
} else {
return false;
}
}
__device__ bool isNeighborECMNodes(uint nodeGlobalRank1, uint nodeGlobalRank2) {
// this means that two nodes are from the same ECM
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
// this means that two nodes are actually close to each other
// seems to be strange because of unsigned int.
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
}
return false;
}
__device__ bool isNeighborProfileNodes(uint nodeGlobalRank1,
uint nodeGlobalRank2) {
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
return false;
}
__device__ bool ofSameType(uint cellType1, uint cellType2) {
if (cellType1 == cellType2) {
return true;
} else {
return false;
}
}
__device__ bool bothCellNodes(SceNodeType &type1, SceNodeType &type2) {
if ((type1 == MX || type1 == FNM) && (type2 == MX || type2 == FNM)) {
return true;
} else {
return false;
}
}
__device__
void attemptToAdhere(bool& isSuccess, uint& index, double& dist,
uint& nodeRank2, double& xPos1, double& yPos1, double& xPos2,
double& yPos2) {
double length = computeDist2D(xPos1, yPos1, xPos2, yPos2);
if (length <= bondAdhCriLen_M) {
if (isSuccess) {
if (length < dist) {
dist = length;
index = nodeRank2;
}
} else {
isSuccess = true;
index = nodeRank2;
dist = length;
}
}
}
__device__
void handleAdhesionForce_M(int& adhereIndex, double& xPos, double& yPos,
double& curAdherePosX, double& curAdherePosY, double& xRes,
double& yRes, double& alpha, double & beta) {
double curLen = computeDist2D(xPos, yPos, curAdherePosX, curAdherePosY);
//if (curLen > maxAdhBondLen_M) {
// adhereIndex = -1;
// return;
// } else {
if (curLen > minAdhBondLen_M) {
double forceValue = beta*(curLen - minAdhBondLen_M) * (bondStiff_M * alpha + bondStiff_Mitotic * (1.0-alpha) );
xRes = xRes + forceValue * (curAdherePosX - xPos) / curLen;
yRes = yRes + forceValue * (curAdherePosY - yPos) / curLen;
}
// }
}
//Ali for reaction force
__device__
void handleAdhesionForce_M2(double& xPos, double& yPos,
double& curAdherePosX, double& curAdherePosY, double& xRes,
double& yRes, double& alpha) {
double curLen = computeDist2D(xPos, yPos, curAdherePosX, curAdherePosY);
if (curLen > minAdhBondLen_M ) {
double forceValue = (curLen - minAdhBondLen_M) * (bondStiff_M * alpha + bondStiff_Mitotic * (1.0-alpha) );
xRes = forceValue * (curAdherePosX - xPos) / curLen;
yRes = forceValue * (curAdherePosY - yPos) / curLen;
}
else {
xRes=0 ;
yRes=0 ;
}
}
//Ali June 16
__device__
double getMitoticAdhCoef(double& growProg, double& growProgNeigh){
double alpha = 1.0;
if (growProg > growthPrgrCriVal_M && growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - ( 0.5*(growProg+growProgNeigh)-growthPrgrCriVal_M )/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProg > growthPrgrCriVal_M){
alpha = 1.0 - (growProg-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - (growProgNeigh-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
return alpha;
}
__device__
void calculateForceBetweenLinkNodes(double &xLoc, double &yLoc, double &zLoc,
double &xLocLeft, double &yLocLeft, double &zLocLeft, double &xLocRight,
double &yLocRight, double &zLocRight, double &xVel, double &yVel,
double &zVel) {
double linkLengthLeft = computeDist(xLoc, yLoc, zLoc, xLocLeft, yLocLeft,
zLocLeft);
double forceValueLeft = sceProfilePara[5]
* (linkLengthLeft - sceProfilePara[6]);
xVel = xVel + forceValueLeft * (xLocLeft - xLoc) / linkLengthLeft;
yVel = yVel + forceValueLeft * (yLocLeft - yLoc) / linkLengthLeft;
zVel = zVel + forceValueLeft * (zLocLeft - zLoc) / linkLengthLeft;
double linkLengthRight = computeDist(xLoc, yLoc, zLoc, xLocRight, yLocRight,
zLocRight);
double forceValueRight = sceProfilePara[5]
* (linkLengthRight - sceProfilePara[6]);
xVel = xVel + forceValueRight * (xLocRight - xLoc) / linkLengthRight;
yVel = yVel + forceValueRight * (yLocRight - yLoc) / linkLengthRight;
zVel = zVel + forceValueRight * (zLocRight - zLoc) / linkLengthRight;
}
__device__
void handleSceForceNodesBasic(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress) {
if (isSameCell(nodeRank1, nodeRank2)) {
calculateAndAddIntraForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
} else {
calculateAndAddInterForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
}
}
__device__
void handleSceForceNodesDisc(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double& interForceX,
double& interForceY, double& interForceZ, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress,
double* _nodeGrowProAddr) {
if (isSameCell(nodeRank1, nodeRank2)) {
calAndAddIntraForceDiv(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2],
_nodeGrowProAddr[nodeRank2], xRes, yRes, zRes);
} else {
calAndAddInterForceDisc(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes, interForceX, interForceY, interForceZ);
}
}
__device__
void handleSceForceNodesDisc_M(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes,
double* _nodeLocXAddress, double* _nodeLocYAddress,
double* _nodeGrowProAddr) {
if (isSameCell_m(nodeRank1, nodeRank2)) {
if (bothInternal(nodeRank1, nodeRank2)) {
// both nodes are internal type.
calAndAddIntraDiv_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeGrowProAddr[nodeRank2],
xRes, yRes);
} else if (bothMembr(nodeRank1, nodeRank2)) {
// both nodes epithilium type. no sce force applied.
// nothing to do here.
} else {
// one node is epithilium type the other is internal type.
calAndAddIntraB_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
} else {
if (bothMembr(nodeRank1, nodeRank2)) {
calAndAddInter_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
}
}
void SceNodes::extendBuckets2D() {
static const uint extensionFactor2D = 9;
uint valuesCount = auxVecs.bucketValues.size();
auxVecs.bucketKeysExpanded.resize(valuesCount * extensionFactor2D);
auxVecs.bucketValuesIncludingNeighbor.resize(
valuesCount * extensionFactor2D);
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(extensionFactor2D);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + valuesCount;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd = countingBegin
+ valuesCount * extensionFactor2D;
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.end(), countingEnd)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), UINT_MAX);
int sizeBeforeShrink = auxVecs.bucketKeysExpanded.size();
int numberInsideRange = sizeBeforeShrink - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(),
auxVecs.bucketValuesIncludingNeighbor.begin());
auxVecs.bucketKeysExpanded.erase(
auxVecs.bucketKeysExpanded.begin() + numberInsideRange,
auxVecs.bucketKeysExpanded.end());
auxVecs.bucketValuesIncludingNeighbor.erase(
auxVecs.bucketValuesIncludingNeighbor.begin() + numberInsideRange,
auxVecs.bucketValuesIncludingNeighbor.end());
}
void SceNodes::extendBuckets2D_M() {
endIndxExt_M = endIndx_M * 9;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(9);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::extendBuckets3D() {
endIndxExt_M = endIndx_M * 27;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(27);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M; // this is NOT numerical addition!
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NgbrFunc3D(domainPara.XBucketSize, domainPara.YBucketSize,
domainPara.ZBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::applySceForcesBasic() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceBasic(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress));
}
void SceNodes::applySceForcesDisc() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceDisc(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress, nodeGrowProAddr));
}
void SceNodes::applySceForcesDisc_M() {
ECellType eCellTypeTmp ;
thrust::host_vector <ECellType> eCellTypeVHost ;
eCellTypeVHost.resize(allocPara_M.currentActiveCellCount, notActive) ;
//for (int i= 0 ; i<allocPara_M.currentActiveCellCount; i++) {
// eCellTypeTmp=cellsSceNodes->getCellInfoVecs().eCellTypeV2[i];
// cout << "Epithelial cell type is="<<eCellTypeTmp <<endl ;
//eCellTypeVHost.push_back(eCellTypeTmp) ;
// }
if (adhUpdate) {
adhUpdate=false ;
int maxNumAdh=180 ;
//vector <ECellType> eCellTypeV2Host ;
thrust :: copy (infoVecs.nodeLocX.begin(),infoVecs.nodeLocX.end(),infoVecs.nodeLocXHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeLocY.begin(),infoVecs.nodeLocY.end(),infoVecs.nodeLocYHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeIsActive.begin(),infoVecs.nodeIsActive.end(),infoVecs.nodeIsActiveHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeCellRankFront.begin() ,infoVecs.nodeCellRankFront.end() ,infoVecs.nodeCellRankFrontHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeCellRankBehind.begin(),infoVecs.nodeCellRankBehind.end(),infoVecs.nodeCellRankBehindHost.begin()) ; // Ali
thrust :: copy (infoVecs.memNodeType1.begin(),infoVecs.memNodeType1.end(),infoVecs.memNodeType1Host.begin()) ; // Ali
cout << " I am right before cell type vector" << endl ;
thrust :: copy (cellsSceNodes->getCellInfoVecs().eCellTypeV2.begin(),cellsSceNodes->getCellInfoVecs().eCellTypeV2.begin()+allocPara_M.currentActiveCellCount, eCellTypeVHost.begin()) ;
cout << " I am right after cell type vector" << endl ;
thrust::fill(infoVecs.nodeAdhereIndexHost.begin(),infoVecs.nodeAdhereIndexHost.end(), -1) ; //Ali it is important to reset the values
thrust::fill(infoVecs.nodeMemMirrorIndexHost.begin(),infoVecs.nodeMemMirrorIndexHost.end(), -1) ; //Ali it is important to reset the values
//thrust::fill(infoVecs.nodeIsLateralMemHost.begin(),infoVecs.nodeIsLateralMemHost.end(), false) ; //Ali
thrust::fill(infoVecs.nodeAdhMinDist.begin(),infoVecs.nodeAdhMinDist.end(), 10000) ; //Ali
int totalActiveNodes = allocPara_M.currentActiveCellCount* allocPara_M.maxAllNodePerCell; // Ali
int maxMembNode= allocPara_M.maxMembrNodePerCell ;
int maxNodePerCell= allocPara_M.maxAllNodePerCell ;
double distMinP2,distP2 ;
int indexAdhNode ;
bool findAnyNode ;
double maxAdhLen= mechPara_M.bondAdhCriLenCPU_M;
int cellRankTmp1, cellRankTmp2 ;
int deactiveIdMyPair, deactiveIdAdhPair ;
int activeMemCount [ allocPara_M.currentActiveCellCount] ;
int firstApiLat [ allocPara_M.currentActiveCellCount] ;
int secondApiLat [ allocPara_M.currentActiveCellCount] ;
int cellRank, iNext, jJunction ;
std::vector <SubApicalInfoEachCell> subApicalInfo ;
cout << "I am inside the function for finding adhesion pair" << endl ;
//setup required basic parameters
for (int i=0 ; i< allocPara_M.currentActiveCellCount ; i++ ){
activeMemCount[i] = 0 ;
}
for (int i=0 ; i<totalActiveNodes ; i++) {
infoVecs.isSubApicalJunctionHost[i]=false ;
}
for (int i=0 ; i<totalActiveNodes ; i++) {
if (infoVecs.nodeIsActiveHost[i]==true && (i%maxNodePerCell)<maxMembNode){
cellRank=i/maxNodePerCell ;
activeMemCount [cellRank]=activeMemCount [cellRank]+1 ;
}
}
subApicalInfo.clear() ;
//Find the subapical nodes in front of the cell
int cellRankOld=-1 ;
for (int i=0 ; i<totalActiveNodes ; i++) {
if (infoVecs.nodeIsActiveHost[i]==true && (i%maxNodePerCell)<maxMembNode){ // check active and membrane
cellRank=i/maxNodePerCell ;
eCellTypeTmp=eCellTypeVHost[cellRank];
iNext=i+1 ;
if ( (i%maxNodePerCell)==(activeMemCount[cellRank]-1)) { // if the node is the last node of cell's membrane
iNext=iNext-activeMemCount [cellRank] ;
}
if ( infoVecs.memNodeType1Host[i]==lateralA && infoVecs.memNodeType1Host[iNext]==apical1 ) { // find the apical junction
firstApiLat[cellRank]=i ; // lateral node
for (int j=0 ; j<NumAdhAfter(cellRank,eCellTypeTmp) ; j++) { //find junction nodes //
jJunction=firstApiLat[cellRank]-j ;
if (jJunction <(cellRank*maxNodePerCell)) {
jJunction=jJunction + activeMemCount [cellRank] ;
//cout << " The subApicalNodes of cell rank " << cellRank << " passed the first node ID" << endl ;
}
infoVecs.isSubApicalJunctionHost[jJunction]=true ;
if (cellRank !=cellRankOld) {
cout << " for cell rank= " << cellRank << " subapicalInfo has been created." << endl ;
SubApicalInfoEachCell subApicalInfoEachCell(maxNumAdh);
subApicalInfo.push_back(subApicalInfoEachCell);
cellRankOld=cellRank ;
}
subApicalInfo[cellRank].nodeIdFront[j]=jJunction ;
}
}
}
}
cout << "first set of adhesion joints are found" << endl ;
//Find the subapical nodes supposingly behind (Before) the cell
for (int i=0 ; i<totalActiveNodes ; i++) {
if (infoVecs.nodeIsActiveHost[i]==true && (i%maxNodePerCell)<maxMembNode){
cellRank=i/maxNodePerCell ;
//eCellType= eCellTypeV2Host[cellRank];
eCellTypeTmp= eCellTypeVHost [cellRank];
iNext=i+1 ;
if ( (i%maxNodePerCell)==(activeMemCount [cellRank]-1)) {
iNext=iNext-activeMemCount [cellRank] ;
}
if (infoVecs.memNodeType1Host[i]==apical1 && ( infoVecs.memNodeType1Host[iNext]==lateralB ) ) {
secondApiLat[cellRank]=iNext ;
for (int j=0 ; j<NumAdhBefore(cellRank,eCellTypeTmp) ; j++) { //find junction nodes
jJunction=secondApiLat[cellRank]+j ;
if (jJunction>=(cellRank*maxNodePerCell+activeMemCount [cellRank]) ) {
jJunction=jJunction - activeMemCount [cellRank];
//cout << " The subApicalNodes of cell rank " << cellRank << " passed the last node ID" << endl ;
}
infoVecs.isSubApicalJunctionHost[jJunction]=true ;
subApicalInfo[cellRank].nodeIdBehind[j]=jJunction ; // the vector of structures for active cells has already been generated.
}
}
}
}
cout << "Second set of adhesion joints are found" << endl ;
//for (int i=0 ; i<totalActiveNodes ; i++) {
// if (infoVecs.isSubApicalJunctionHost[i]) {
// cout << "for cell with rank " <<int(i/maxNodePerCell) << "node rank of subApical junction is " << i << endl ;
// }
// }
cout << " size of vector storing information of apical junctions is " << subApicalInfo.size() << endl ;
if (subApicalInfo.size() != 0 ) { // to pass the first time step in which the membrane node type is not defined.
for ( int i= 0 ; i<allocPara_M.currentActiveCellCount ; i++) {
for ( int j=0 ; j<maxNumAdh ; j++) {
int idFront=subApicalInfo[i].nodeIdFront[j] ;
int idBehind=subApicalInfo[i].nodeIdBehind[j] ;
int cellRankFront=infoVecs.nodeCellRankFrontHost[i] ;
int cellRankBehind=infoVecs.nodeCellRankBehindHost[i] ;
if (idFront != -1) {
infoVecs.nodeAdhereIndexHost[idFront]=subApicalInfo[cellRankFront].nodeIdBehind[j] ;
}
if (idBehind !=-1) {
infoVecs.nodeAdhereIndexHost[idBehind]=subApicalInfo[cellRankBehind].nodeIdFront[j] ;
}
if ( eCellTypeVHost[i]==pouch && NumAdhBefore(i,pouch)==NumAdhAfter(i,pouch) ) {
infoVecs.nodeMemMirrorIndexHost[idFront]=idBehind ;
infoVecs.nodeMemMirrorIndexHost[idBehind]=idFront ;
};
}
}
/////////////////////////////////// start adhesion for apical nodes of pouch cells with apical nodes of peripodial cells ///////////////////////
for (int i=0 ; i<totalActiveNodes ; i++) {
cellRankTmp1=i/maxNodePerCell ;
distMinP2=10000 ; // large number
findAnyNode=false ;
if (eCellTypeVHost[cellRankTmp1]==pouch && infoVecs.memNodeType1Host[i]==apical1) {
for (int j=0 ; j<totalActiveNodes ; j++) {
cellRankTmp2=j/maxNodePerCell ;
// if ( cellRankTmp2>=74 && cellRankTmp2<=76 && infoVecs.memNodeType1Host[i]==apical1) {
if (eCellTypeVHost[cellRankTmp2]==peri && infoVecs.memNodeType1Host[j]==apical1 ) {
distP2=pow( infoVecs.nodeLocXHost[i]-infoVecs.nodeLocXHost[j],2)+
pow( infoVecs.nodeLocYHost[i]-infoVecs.nodeLocYHost[j],2) ;
if (distP2<distMinP2 && distP2<maxAdhLen*maxAdhLen) {
cout << " I am inside a function where there is one apical pouch and one apical perip node and it is min" << endl ;
distMinP2=distP2 ;
indexAdhNode=j ;
findAnyNode=true ;
}
}
}
}
if ( findAnyNode && sqrt(distMinP2)<infoVecs.nodeAdhMinDist[indexAdhNode]){
cout << " I am inside apical adhesion" << endl ;
deactiveIdAdhPair=infoVecs.nodeAdhereIndexHost[indexAdhNode] ;
if (deactiveIdAdhPair != -1){
cout << " I am inside deactiving one perip adhesion" << endl ;
infoVecs.nodeAdhereIndexHost[deactiveIdAdhPair]=-1 ;
infoVecs.nodeAdhMinDist[deactiveIdAdhPair]=10000 ;
}
infoVecs.nodeAdhereIndexHost[i]=indexAdhNode ;
infoVecs.nodeAdhereIndexHost[indexAdhNode]=i ;
infoVecs.nodeAdhMinDist[indexAdhNode]=sqrt(distMinP2) ;
infoVecs.nodeAdhMinDist[i]=sqrt(distMinP2) ;
}
}
cout << " I am ready to copy the data in adhesion function to the GPU " << endl ;
/////////////////////////////////// start adhesion for apical nodes of pouch cells with apical nodes of peripodial cells ///////////////////////
} // finish if of bypassing the first time
// copy back to GPU
thrust::copy(infoVecs.nodeAdhereIndexHost.begin(),infoVecs.nodeAdhereIndexHost.end(), infoVecs.nodeAdhereIndex.begin()) ; //Ali
thrust::copy(infoVecs.nodeMemMirrorIndexHost.begin(),infoVecs.nodeMemMirrorIndexHost.end(), infoVecs.nodeMemMirrorIndex.begin()) ; //Ali
thrust::copy(infoVecs.isSubApicalJunctionHost.begin(),infoVecs.isSubApicalJunctionHost.end(), infoVecs.isSubApicalJunction.begin()) ; //Ali
} // finish the if condition for finding the pair node
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
int* membrIntnlAddress = thrust::raw_pointer_cast(
&infoVecs.membrIntnlIndex[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin() + endIndx_M),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin() + endIndx_M))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()))),
AddForceDisc_M(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeAdhIdxAddress, membrIntnlAddress, nodeGrowProAddr,adhNotSet));
}
const SceDomainPara& SceNodes::getDomainPara() const {
return domainPara;
}
void SceNodes::setDomainPara(const SceDomainPara& domainPara) {
this->domainPara = domainPara;
}
const NodeAllocPara& SceNodes::getAllocPara() const {
return allocPara;
}
void SceNodes::setAllocPara(const NodeAllocPara& allocPara) {
this->allocPara = allocPara;
}
const NodeAuxVecs& SceNodes::getAuxVecs() const {
return auxVecs;
}
void SceNodes::setAuxVecs(const NodeAuxVecs& auxVecs) {
this->auxVecs = auxVecs;
}
NodeInfoVecs& SceNodes::getInfoVecs() {
return infoVecs;
}
std::vector<std::vector<int> > SceNodes::obtainLabelMatrix(
PixelizePara& pixelPara) {
std::vector<std::vector<int> > result;
std::vector<NodeWithLabel> nodeLabels;
ResAnalysisHelper resHelper;
resHelper.setPixelPara(pixelPara);
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<uint> hostTmpVectorIsActive = infoVecs.nodeIsActive;
uint startIndex = allocPara.startPosCells;
uint endIndex = startIndex
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
for (uint i = startIndex; i < endIndex; i++) {
if (hostTmpVectorIsActive[i] == true) {
NodeWithLabel nodeLabel;
nodeLabel.cellRank = hostTmpVectorNodeRank[i];
nodeLabel.position = CVector(hostTmpVectorLocX[i],
hostTmpVectorLocY[i], hostTmpVectorLocZ[i]);
nodeLabels.push_back(nodeLabel);
}
}
result = resHelper.outputLabelMatrix(nodeLabels);
return result;
}
void SceNodes::initControlPara(bool isStab) {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
controlPara.controlSwitchs.outputBmpImg = globalConfigVars.getSwitchState(
"Switch_OutputBMP");
controlPara.controlSwitchs.outputLabelMatrix =
globalConfigVars.getSwitchState("Switch_OutputLabelMatrix");
controlPara.controlSwitchs.outputStat = globalConfigVars.getSwitchState(
"Switch_OutputStat");
controlPara.controlSwitchs.outputVtkFile = globalConfigVars.getSwitchState(
"Switch_OutputVtk");
if (isStab) {
controlPara.controlSwitchs.stab = ON;
} else {
controlPara.controlSwitchs.stab = OFF;
}
}
void SceNodes::sceForcesPerfTesting() {
prepareSceForceComputation();
applySceForcesBasic();
}
void SceNodes::sceForcesPerfTesting_M() {
prepareSceForceComputation_M();
applySceForcesBasic_M();
}
void SceNodes::applySceForcesBasic_M() {
}
void SceNodes::sceForcesDisc() {
prepareSceForceComputation();
applySceForcesDisc();
}
void SceNodes::sceForcesDisc_M() {
#ifdef DebugMode
hipEvent_t start1, start2, start3, stop;
float elapsedTime1, elapsedTime2, elapsedTime3;
hipEventCreate(&start1);
hipEventCreate(&start2);
hipEventCreate(&start3);
hipEventCreate(&stop);
hipEventRecord(start1, 0);
#endif
cout << " confirm --- 1 ---" << endl;
cout.flush();
prepareSceForceComputation_M(); //buckets for optimization of searching algorithm
#ifdef DebugMode
hipEventRecord(start2, 0);
hipEventSynchronize(start2);
hipEventElapsedTime(&elapsedTime1, start1, start2);
#endif
cout << " --- 2 ---" << endl;
cout.flush();
applySceForcesDisc_M(); // compate the MMD forces and also finds the nearset neighbor for applying the adhesion
#ifdef DebugMode
hipEventRecord(start3, 0);
hipEventSynchronize(start3);
hipEventElapsedTime(&elapsedTime2, start2, start3);
#endif
cout << " --- 3 ---" << endl;
cout.flush();
processMembrAdh_M(); //applying the adhesion force
cout << " --- 4 ---" << endl;
cout.flush();
copyExtForces_M();//AAMIRI
#ifdef DebugMode
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime3, start3, stop);
std::cout << "time spent in Node logic: " << elapsedTime1 << " "
<< elapsedTime2 << " " << elapsedTime3 << std::endl;
#endif
}
double SceNodes::getMaxEffectiveRange() {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
SimulationType type = parseTypeFromConfig(simuTypeConfigValue);
if (type != Disc_M) {
double interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
double maxEffectiveRange = interLinkEffectiveRange;
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
if (intraLinkEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = intraLinkEffectiveRange;
}
double cartEffectiveRange = 0;
// cartilage effective range does not apply for other types of simulation.
try {
cartEffectiveRange = globalConfigVars.getConfigValue(
"CartForceEffectiveRange").toDouble();
} catch (SceException &exce) {
}
if (cartEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = cartEffectiveRange;
}
return maxEffectiveRange;
} else {
double membrMembrEffRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
double membrIntnlEffRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
double intnlIntnlEffRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
double intnlDivEffRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
double maxEffRange = 0;
std::vector<double> ranges;
ranges.push_back(membrMembrEffRange);
// all these are now
//ranges.push_back(membrIntnlEffRange);
//ranges.push_back(intnlIntnlEffRange);
//ranges.push_back(intnlDivEffRange);
maxEffRange = *std::max_element(ranges.begin(), ranges.end());
return maxEffRange;
}
}
void SceNodes::setInfoVecs(const NodeInfoVecs& infoVecs) {
this->infoVecs = infoVecs;
}
void SceNodes::allocSpaceForNodes(uint maxTotalNodeCount,uint maxNumCells, uint currentActiveCellCount) {
cout << " inside function allocSpaceForNodes current active cells are " << currentActiveCellCount << endl ;
cout << " inside function allocSpaceForNodes max number of cells is " << maxNumCells << endl ;
infoVecs.nodeLocX.resize(maxTotalNodeCount);
infoVecs.nodeLocXHost.resize(maxTotalNodeCount); //Ali
infoVecs.nodeLocY.resize(maxTotalNodeCount);
infoVecs.nodeLocYHost.resize(maxTotalNodeCount); // Ali
infoVecs.nodeLocZ.resize(maxTotalNodeCount);
infoVecs.nodeVelX.resize(maxTotalNodeCount);
infoVecs.nodeVelY.resize(maxTotalNodeCount);
infoVecs.nodeVelZ.resize(maxTotalNodeCount);
cout << " I am here 0 " << maxNumCells << endl ;
//infoVecs.nodeContractLevel.resize(maxTotalNodeCount,0.0);// Ali
infoVecs.nodeF_MM_C_X.resize(maxTotalNodeCount,0.0);// Ali
infoVecs.nodeF_MM_C_Y.resize(maxTotalNodeCount,0.0);// Ali
infoVecs.nodeContractEnergyT.resize(maxTotalNodeCount,0.0);// Ali
cout << " I am here 1 " << maxNumCells << endl ;
infoVecs.nodeF_MI_M_x.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_y.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_T.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_N.resize(maxTotalNodeCount,0.0); //Ali
infoVecs.nodeVelTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeVelNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeCurvature.resize(maxTotalNodeCount, 0.0);//AAMIRI
infoVecs.nodeActinLevel.resize(maxTotalNodeCount, 0.0);//Ali
infoVecs.nodeExtForceX.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceY.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeMaxForce.resize(maxTotalNodeCount);
//infoVecs.nodeIsBasalMem.resize(maxTotalNodeCount,false); //Ali
//infoVecs.nodeIsLateralMem.resize(maxTotalNodeCount,false); //Ali
infoVecs.nodeIsApicalMem.resize(maxTotalNodeCount,0); //Ali
infoVecs.nodeIsBasalMem.resize(maxTotalNodeCount,0); //Ali
//infoVecs.nodeIsLateralMemHost.resize(maxTotalNodeCount,false); //Ali
infoVecs.nodeCellType.resize(maxTotalNodeCount);
infoVecs.nodeCellRank.resize(maxTotalNodeCount);
infoVecs.nodeIsActive.resize(maxTotalNodeCount);
infoVecs.nodeIsActiveHost.resize(maxTotalNodeCount); // Ali
infoVecs.nodeAdhMinDist.resize(maxTotalNodeCount); // Ali
infoVecs.nodeCellRankFront.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankBehind.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankFrontOld.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankBehindOld.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankFrontHost.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankBehindHost.resize(maxNumCells,-1); // Ali
if (controlPara.simuType == Disc
|| controlPara.simuType == SingleCellTest) {
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.nodeInterForceX.resize(maxTotalNodeCount);
infoVecs.nodeInterForceY.resize(maxTotalNodeCount);
infoVecs.nodeInterForceZ.resize(maxTotalNodeCount);
}
if (controlPara.simuType == Disc_M) {
infoVecs.nodeAdhereIndex.resize(maxTotalNodeCount);
infoVecs.nodeMemMirrorIndex.resize(maxTotalNodeCount); //Ali
infoVecs.nodeAdhIndxHostCopy.resize(maxTotalNodeCount);
infoVecs.nodeAdhereIndexHost.resize(maxTotalNodeCount); //Ali
infoVecs.nodeMemMirrorIndexHost.resize(maxTotalNodeCount); //Ali
infoVecs.membrIntnlIndex.resize(maxTotalNodeCount);
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.membrTensionMag.resize(maxTotalNodeCount, 0);
infoVecs.membrTenMagRi.resize(maxTotalNodeCount, 0);
infoVecs.membrDistToRi.resize(maxTotalNodeCount, 0);//AAMIRI
infoVecs.membrLinkRiMidX.resize(maxTotalNodeCount, 0);
infoVecs.membrLinkRiMidY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendLeftX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendSpringEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.membrLinSpringEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.nodeIIEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.nodeIMEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.lagrangeFX.resize(maxTotalNodeCount, 0.0);
infoVecs.lagrangeFY.resize(maxTotalNodeCount, 0.0);
infoVecs.lagrangeFN.resize(maxTotalNodeCount, 0.0);
infoVecs.membrBendLeftY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightY.resize(maxTotalNodeCount, 0);
infoVecs.dppLevel.resize(maxTotalNodeCount, 0.0); //Ali
infoVecs.memNodeType1.resize(maxTotalNodeCount, notAssigned1); //Ali
infoVecs.memNodeType1Host.resize(maxTotalNodeCount, notAssigned1); //Ali
infoVecs.isSubApicalJunction.resize(maxTotalNodeCount, false); //Ali
infoVecs.isSubApicalJunctionHost.resize(maxTotalNodeCount, false); //Ali
cout << " I am here2 " << maxNumCells << endl ;
auxVecs.bucketKeys.resize(maxTotalNodeCount);
auxVecs.bucketValues.resize(maxTotalNodeCount);
auxVecs.bucketKeysExpanded.resize(maxTotalNodeCount * 9);
auxVecs.bucketValuesIncludingNeighbor.resize(maxTotalNodeCount * 9);
}
thrust:: sequence (infoVecs.nodeCellRankFront.begin() ,infoVecs.nodeCellRankFront.begin() +currentActiveCellCount) ; //Ali
thrust:: sequence (infoVecs.nodeCellRankBehind.begin(),infoVecs.nodeCellRankBehind.begin()+currentActiveCellCount) ; //Ali
thrust:: device_vector<int> tmp1 ;
thrust:: device_vector<int> tmp2 ;
tmp1.resize(currentActiveCellCount,1) ;
tmp2.resize(currentActiveCellCount,-1) ;
thrust:: transform(tmp1.begin(),tmp1.begin()+currentActiveCellCount,
infoVecs.nodeCellRankFront.begin(),infoVecs.nodeCellRankFront.begin(), thrust::plus<int>()) ; //Ali
thrust:: transform(tmp2.begin(),tmp2.begin()+currentActiveCellCount,
infoVecs.nodeCellRankBehind.begin(),infoVecs.nodeCellRankBehind.begin(),thrust::plus<int>()) ; //Ali
infoVecs.nodeCellRankBehind[0]=currentActiveCellCount-1 ;
infoVecs.nodeCellRankFront[currentActiveCellCount-1]=0 ;
cout << " I am here 3 " << maxNumCells << endl ;
}
void SceNodes::initNodeAllocPara(uint totalBdryNodeCount,
uint maxProfileNodeCount, uint maxCartNodeCount, uint maxTotalECMCount,
uint maxNodeInECM, uint maxTotalCellCount, uint maxNodeInCell) {
allocPara.maxCellCount = maxTotalCellCount;
allocPara.maxNodeOfOneCell = maxNodeInCell;
allocPara.maxNodePerECM = maxNodeInECM;
allocPara.maxECMCount = maxTotalECMCount;
allocPara.maxProfileNodeCount = maxProfileNodeCount;
allocPara.maxCartNodeCount = maxCartNodeCount;
allocPara.currentActiveProfileNodeCount = 0;
allocPara.currentActiveCartNodeCount = 0;
allocPara.BdryNodeCount = totalBdryNodeCount;
allocPara.currentActiveCellCount = 0;
allocPara.maxTotalECMNodeCount = allocPara.maxECMCount
* allocPara.maxNodePerECM;
allocPara.currentActiveECM = 0;
allocPara.maxTotalCellNodeCount = maxTotalCellCount
* allocPara.maxNodeOfOneCell;
allocPara.startPosProfile = totalBdryNodeCount;
allocPara.startPosCart = allocPara.startPosProfile
+ allocPara.maxProfileNodeCount;
allocPara.startPosECM = allocPara.startPosCart + allocPara.maxCartNodeCount;
allocPara.startPosCells = allocPara.startPosECM
+ allocPara.maxTotalECMNodeCount;
}
void SceNodes::initNodeAllocPara_M(uint totalBdryNodeCount,
uint maxTotalCellCount, uint maxEpiNodePerCell,
uint maxInternalNodePerCell) {
allocPara_M.bdryNodeCount = totalBdryNodeCount;
allocPara_M.currentActiveCellCount = 0;
allocPara_M.maxCellCount = maxTotalCellCount;
allocPara_M.maxAllNodePerCell = maxEpiNodePerCell + maxInternalNodePerCell;
allocPara_M.maxMembrNodePerCell = maxEpiNodePerCell;
allocPara_M.maxIntnlNodePerCell = maxInternalNodePerCell;
allocPara_M.maxTotalNodeCount = allocPara_M.maxAllNodePerCell
* allocPara_M.maxCellCount;
}
void SceNodes::removeNodes(int cellRank, vector<uint> &removeSeq) {
uint cellBeginIndex = allocPara.startPosCells
+ cellRank * allocPara.maxNodeOfOneCell;
uint cellEndIndex = cellBeginIndex + allocPara.maxNodeOfOneCell;
thrust::host_vector<double> cellXCoords(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYCoords(allocPara.maxNodeOfOneCell);
thrust::copy(infoVecs.nodeLocX.begin() + cellBeginIndex,
infoVecs.nodeLocX.begin() + cellEndIndex, cellXCoords.begin());
thrust::copy(infoVecs.nodeLocY.begin() + cellBeginIndex,
infoVecs.nodeLocY.begin() + cellEndIndex, cellYCoords.begin());
vector<bool> isRemove(allocPara.maxNodeOfOneCell, false);
/*
std::cout << "before, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXCoords[i] << " ";
}
std::cout << "]" << endl;
std::cout << "before, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYCoords[i] << " ";
}
std::cout << "]" << endl;
*/
for (uint i = 0; i < removeSeq.size(); i++) {
isRemove[removeSeq[i]] = true;
}
thrust::host_vector<double> cellXRemoved(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYRemoved(allocPara.maxNodeOfOneCell);
uint curIndex = 0;
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
if (isRemove[i] == false) {
cellXRemoved[curIndex] = cellXCoords[i];
cellYRemoved[curIndex] = cellYCoords[i];
curIndex++;
}
}
/*
std::cout << "after, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXRemoved[i] << " ";
}
std::cout << "]" << endl;
std::cout << "after, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYRemoved[i] << " ";
}
std::cout << "]" << endl;
*/
thrust::copy(cellXRemoved.begin(), cellXRemoved.end(),
infoVecs.nodeLocX.begin() + cellBeginIndex);
thrust::copy(cellYRemoved.begin(), cellYRemoved.end(),
infoVecs.nodeLocY.begin() + cellBeginIndex);
}
void SceNodes::processMembrAdh_M() {
keepAdhIndxCopyInHost_M();
applyMembrAdh_M();
//removeInvalidPairs_M(); //Ali changed position
}
void SceNodes::keepAdhIndxCopyInHost_M() {
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::copy(infoVecs.nodeAdhereIndex.begin(),
infoVecs.nodeAdhereIndex.begin() + maxTotalNode,
infoVecs.nodeAdhIndxHostCopy.begin());
}
void SceNodes::removeInvalidPairs_M() {
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
uint curActiveNodeCt = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<int> iBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin()))
+ curActiveNodeCt, infoVecs.nodeAdhereIndex.begin(),
AdjustAdh(nodeAdhIdxAddress));
}
void SceNodes::applyMembrAdh_M() {
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iBegin2(0);
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(&infoVecs.nodeGrowPro[0]);
int* nodeAdhAddr = thrust::raw_pointer_cast(&infoVecs.nodeAdhereIndex[0]);
double* nodedppLevelAddr = thrust::raw_pointer_cast(&infoVecs.dppLevel[0]);
//thrust::counting_iterator<uint> iBegin_node(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin(),
infoVecs.memNodeType1.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin(),
infoVecs.memNodeType1.begin())) + maxTotalNode,
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
ApplyAdh(nodeLocXAddress, nodeLocYAddress, nodeGrowProAddr,nodeAdhAddr,nodedppLevelAddr));
//for (int i=0 ; i<140 ; i++){
// cout <<"adhesion index for "<<i << " is "<<infoVecs.nodeAdhereIndex[i]<< endl ;
// }
/* thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
iBegin2,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
iBegin2,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())) + maxTotalNode,
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
ApplyAdhReaction(nodeLocXAddress, nodeLocYAddress, nodeGrowProAddr,nodeAdhAddr,maxTotalNode));
*/
}
//AAMIRI
void SceNodes::copyExtForces_M(){
thrust::copy(infoVecs.nodeVelX.begin(), infoVecs.nodeVelX.end(),
infoVecs.nodeExtForceX.begin());
thrust::copy(infoVecs.nodeVelY.begin(), infoVecs.nodeVelY.end(),
infoVecs.nodeExtForceY.begin());
}
| d9bc2b2e05ae5014cb09648b47b318aa45763abf.cu | //Notes: 1- In infoVecs.nodeCellRankBehind and infoVecs.nodeCellRankFront are given sequential values correspond to their actual values in function: SceNodes::allocSpaceForNodes
// 2- the algorithm of adhesion won't work if there is no apical node.
// 3- maxNumAdh is given inside the code as a parameters in .cu file. It should become an input or I should write a function to detect that automatically
// 4- In SceNodes::NumAdhAfter and SceNodes::NumAdhBefore number of lateral nodes is given manually inside the code it should be automatically calculate from the input parameters/
#include "SceNodes.h"
#include "SceCells.h"
__constant__ double sceInterPara[5];
__constant__ double sceIntraPara[5];
// parameter set for cells that are going to divide
__constant__ double sceIntraParaDiv[5];
__constant__ double sceDivProPara;
__constant__ double sceCartPara[5];
__constant__ double sceInterDiffPara[5];
__constant__ double sceProfilePara[7];
__constant__ double sceECMPara[5];
__constant__ double sceDiffPara[5];
__constant__ double cartGrowDirVec[3];
__constant__ uint ProfilebeginPos;
__constant__ uint ECMbeginPos;
__constant__ uint cellNodeBeginPos;
__constant__ uint nodeCountPerECM;
__constant__ uint nodeCountPerCell;
//
__constant__ uint cellNodeBeginPos_M;
__constant__ uint allNodeCountPerCell_M;
__constant__ uint membrThreshold_M;
__constant__ double sceInterBPara_M[5];
__constant__ int sceInterBPara_Jones_On_M ; //Ali
__constant__ double sceInterBPara_Jones_M[3] ; //Ali
__constant__ double sceIntnlBPara_M[5];
__constant__ double sceIntraPara_M[5];
__constant__ double sceIntraParaDiv_M[5];
__constant__ double growthPrgrCriVal_M;
__constant__ double maxAdhBondLen_M;
__constant__ double minAdhBondLen_M;
__constant__ double bondStiff_M;
__constant__ double bondStiff_Mitotic;
__constant__ double bondAdhCriLen_M;
// #define DebugMode
// This template method expands an input sequence by
// replicating each element a variable number of times. For example,
//
// expand([2,2,2],[A,B,C]) -> [A,A,B,B,C,C]
// expand([3,0,1],[A,B,C]) -> [A,A,A,C]
// expand([1,3,2],[A,B,C]) -> [A,B,B,B,C,C]
//
// The element counts are assumed to be non-negative integers
template<typename InputIterator1, typename InputIterator2,
typename OutputIterator>
OutputIterator expand(InputIterator1 first1, InputIterator1 last1,
InputIterator2 first2, OutputIterator output) {
typedef typename thrust::iterator_difference<InputIterator1>::type difference_type;
difference_type input_size = thrust::distance(first1, last1);
difference_type output_size = thrust::reduce(first1, last1);
// scan the counts to obtain output offsets for each input element
thrust::device_vector<difference_type> output_offsets(input_size, 0);
thrust::exclusive_scan(first1, last1, output_offsets.begin());
// scatter the nonzero counts into their corresponding output positions
thrust::device_vector<difference_type> output_indices(output_size, 0);
thrust::scatter_if(thrust::counting_iterator<difference_type>(0),
thrust::counting_iterator<difference_type>(input_size),
output_offsets.begin(), first1, output_indices.begin());
// compute max-scan over the output indices, filling in the holes
thrust::inclusive_scan(output_indices.begin(), output_indices.end(),
output_indices.begin(), thrust::maximum<difference_type>());
// gather input values according to index array (output = first2[output_indices])
OutputIterator output_end = output;
thrust::advance(output_end, output_size);
thrust::gather(output_indices.begin(), output_indices.end(), first2,
output);
// return output + output_size
thrust::advance(output, output_size);
return output;
}
SceNodes::SceNodes() {
readDomainPara();
}
int SceNodes::NumAdhBefore(int cellRank,ECellType eCellType) {
if (eCellType==peri) {
return 28 ;
}
if (eCellType==bc) {
return 30 ;
}
if (eCellType==pouch) {
if (cellRank==0){
return 36 ;
}
else if ( cellRank==1) {
return 108 ;
}
else if ( cellRank==64) {
return 108 ;
}
else {
return 180 ;
}
}
}
int SceNodes::NumAdhAfter(int cellRank,ECellType eCellType) {
if ( eCellType==peri) {
return 28 ;
}
if (eCellType==bc) {
return 30 ;
}
if (eCellType==pouch) {
if (cellRank==64){
return 36 ;
}
else if ( cellRank==63) {
return 108 ;
}
else if ( cellRank==0) {
return 108 ;
}
else {
return 180 ;
}
}
}
void SceNodes::readDomainPara() {
domainPara.minX = globalConfigVars.getConfigValue("DOMAIN_XMIN").toDouble();
domainPara.maxX = globalConfigVars.getConfigValue("DOMAIN_XMAX").toDouble();
domainPara.minY = globalConfigVars.getConfigValue("DOMAIN_YMIN").toDouble();
domainPara.maxY = globalConfigVars.getConfigValue("DOMAIN_YMAX").toDouble();
//domainPara.minZ = globalConfigVars.getConfigValue("DOMAIN_ZMIN").toDouble();
//domainPara.maxZ = globalConfigVars.getConfigValue("DOMAIN_ZMAX").toDouble();
domainPara.gridSpacing = getMaxEffectiveRange();
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
//domainPara.ZBucketSize = (domainPara.maxZ - domainPara.minZ)
// / domainPara.gridSpacing + 1;
}
void SceNodes::readMechPara() {
double U0 =
globalConfigVars.getConfigValue("InterCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_U0_DivFactor").toDouble();
double V0 =
globalConfigVars.getConfigValue("InterCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_V0_DivFactor").toDouble();
double k1 =
globalConfigVars.getConfigValue("InterCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k1_DivFactor").toDouble();
double k2 =
globalConfigVars.getConfigValue("InterCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("InterCell_k2_DivFactor").toDouble();
mechPara.sceInterParaCPU[0] = U0;
mechPara.sceInterParaCPU[1] = V0;
mechPara.sceInterParaCPU[2] = k1;
mechPara.sceInterParaCPU[3] = k2;
double interLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
mechPara.sceInterParaCPU[4] = interLinkEffectiveRange;
}
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_U0_DivFactor").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_V0_DivFactor").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k1_DivFactor").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue("IntraCell_k2_DivFactor").toDouble();
mechPara.sceIntraParaCPU[0] = U0_Intra;
mechPara.sceIntraParaCPU[1] = V0_Intra;
mechPara.sceIntraParaCPU[2] = k1_Intra;
mechPara.sceIntraParaCPU[3] = k2_Intra;
double intraLinkEffectiveRange;
if (controlPara.simuType != Disc_M) {
intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
mechPara.sceIntraParaCPU[4] = intraLinkEffectiveRange;
}
if (controlPara.simuType == Disc) {
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_U0_Div_DivFactor").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_V0_Div_DivFactor").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k1_Div_DivFactor").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Original").toDouble()
/ globalConfigVars.getConfigValue(
"IntraCell_k2_Div_DivFactor").toDouble();
double growthProgressThreshold = globalConfigVars.getConfigValue(
"GrowthProgressThreshold").toDouble();
mechPara.sceIntraParaDivCPU[0] = U0_Intra_Div;
mechPara.sceIntraParaDivCPU[1] = V0_Intra_Div;
mechPara.sceIntraParaDivCPU[2] = k1_Intra_Div;
mechPara.sceIntraParaDivCPU[3] = k2_Intra_Div;
mechPara.sceIntraParaDivCPU[4] = growthProgressThreshold;
}
}
// This constructor is not active Ali
SceNodes::SceNodes(uint totalBdryNodeCount, uint maxProfileNodeCount,
uint maxCartNodeCount, uint maxTotalECMCount, uint maxNodeInECM,
uint maxTotalCellCount, uint maxNodeInCell, bool isStab) {
initControlPara(isStab);
readDomainPara();
uint maxTotalNodeCount;
if (controlPara.simuType != Disc_M) {
initNodeAllocPara(totalBdryNodeCount, maxProfileNodeCount,
maxCartNodeCount, maxTotalECMCount, maxNodeInECM,
maxTotalCellCount, maxNodeInCell);
maxTotalNodeCount = totalBdryNodeCount + maxProfileNodeCount
+ maxCartNodeCount + allocPara.maxTotalECMNodeCount
+ allocPara.maxTotalCellNodeCount;
} else {
uint maxEpiNodeCount = globalConfigVars.getConfigValue(
"MaxEpiNodeCountPerCell").toInt();
uint maxInternalNodeCount = globalConfigVars.getConfigValue(
"MaxAllNodeCountPerCell").toInt() - maxEpiNodeCount;
initNodeAllocPara_M(totalBdryNodeCount, maxTotalCellCount,
maxEpiNodeCount, maxInternalNodeCount);
maxTotalNodeCount = allocPara_M.maxTotalNodeCount;
}
//allocSpaceForNodes(maxTotalNodeCount); Ali comment this becasue it is not active in this simulation and I updated the function in the active constructor
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
thrust::host_vector<int> hostTmpVector3(maxTotalNodeCount);
if (controlPara.simuType != Disc_M) {
for (int i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara.startPosProfile) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCart) {
hostTmpVector[i] = Profile;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosECM) {
hostTmpVector[i] = Cart;
hostTmpVector3[i] = 0;
} else if (i < allocPara.startPosCells) {
hostTmpVector[i] = ECM;
hostTmpVector3[i] = (i - allocPara.startPosECM)
/ allocPara.maxNodePerECM;
} else {
// all initialized as FNM
hostTmpVector[i] = FNM;
hostTmpVector3[i] = (i - allocPara.startPosCells)
/ allocPara.maxNodeOfOneCell;
}
hostTmpVector2[i] = false;
}
} else {
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
hostTmpVector3[i] = 0;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
uint cellRank = tmp / allocPara_M.bdryNodeCount;
uint nodeRank = tmp % allocPara_M.bdryNodeCount;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
} else {
hostTmpVector[i] = CellIntnl;
}
hostTmpVector3[i] = cellRank;
}
hostTmpVector2[i] = false;
}
}
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
infoVecs.nodeCellRank = hostTmpVector3;
std::cout << " I am in SceNodes constructor with long input which includes copyParaToGPUConstMem function " << endl ;
copyParaToGPUConstMem();
}
SceNodes::SceNodes(uint maxTotalCellCount, uint maxAllNodePerCell, uint currentActiveCellCount) {
//initControlPara (isStab);
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
readDomainPara();
uint maxTotalNodeCount = maxTotalCellCount * maxAllNodePerCell;
uint maxMembrNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxMembrNodeCountPerCell").toInt();
uint maxIntnlNodeCountPerCell = globalConfigVars.getConfigValue(
"MaxIntnlNodeCountPerCell").toInt();
initNodeAllocPara_M(0, maxTotalCellCount, maxMembrNodeCountPerCell,
maxIntnlNodeCountPerCell);
std::cout << " Number of boundary nodes = " << allocPara_M.bdryNodeCount
<< std::endl;
std::cout << " Max number of cells in domain = "
<< allocPara_M.maxCellCount << std::endl;
std::cout << " Max all nodes per cell = "
<< allocPara_M.maxAllNodePerCell << std::endl;
std::cout << " Max membrane node per cell= "
<< allocPara_M.maxMembrNodePerCell << std::endl;
std::cout << " Max internal node per cell= "
<< allocPara_M.maxIntnlNodePerCell << std::endl;
std::cout << " Max total number of nodes in domain = "
<< allocPara_M.maxTotalNodeCount << std::endl;
allocSpaceForNodes(maxTotalNodeCount, allocPara_M.maxCellCount, currentActiveCellCount);
thrust::host_vector<SceNodeType> hostTmpVector(maxTotalNodeCount);
thrust::host_vector<bool> hostTmpVector2(maxTotalNodeCount);
uint nodeRank;
for (uint i = 0; i < maxTotalNodeCount; i++) {
if (i < allocPara_M.bdryNodeCount) {
hostTmpVector[i] = Boundary;
} else {
uint tmp = i - allocPara_M.bdryNodeCount;
nodeRank = tmp % allocPara_M.maxAllNodePerCell;
if (nodeRank < allocPara_M.maxMembrNodePerCell) {
hostTmpVector[i] = CellMembr;
//std::cout << "0";
} else {
hostTmpVector[i] = CellIntnl;
//std::cout << "1";
}
}
hostTmpVector2[i] = false;
if (nodeRank == 0) {
//std::cout << std::endl;
}
}
//std::cout << "finished" << std::endl;
//std::cout.flush();
infoVecs.nodeCellType = hostTmpVector;
infoVecs.nodeIsActive = hostTmpVector2;
thrust::host_vector<int> bondVec(maxTotalNodeCount, -1);
infoVecs.nodeAdhereIndex = bondVec;
infoVecs.membrIntnlIndex = bondVec;
infoVecs.nodeAdhIndxHostCopy = bondVec;
//std::cout << "copy finished!" << std::endl;
//std::cout.flush();
copyParaToGPUConstMem_M();
std::cout << " I am in SceNodes constructor with short input which includes copyParaToGPUConstMem_M function " << endl ;
//std::cout << "at the end" << std::endl;
//std::cout.flush();
adhNotSet=true ; //Ali
adhUpdate=true ; //Ali
cout << "adhesion not set is initialized as " << adhNotSet << endl ;
cout << "adhesion update is initialized as " << adhUpdate << endl ;
}
void SceNodes::copyParaToGPUConstMem() {
readMechPara();
cudaMemcpyToSymbol(sceInterPara, mechPara.sceInterParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraPara, mechPara.sceIntraParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraParaDiv, mechPara.sceIntraParaDivCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(ProfilebeginPos, &allocPara.startPosProfile,
sizeof(uint));
cudaMemcpyToSymbol(ECMbeginPos, &allocPara.startPosECM, sizeof(uint));
cudaMemcpyToSymbol(cellNodeBeginPos, &allocPara.startPosCells,
sizeof(uint));
cudaMemcpyToSymbol(nodeCountPerECM, &allocPara.maxNodePerECM, sizeof(uint));
cudaMemcpyToSymbol(nodeCountPerCell, &allocPara.maxNodeOfOneCell,
sizeof(uint));
cudaMemcpyToSymbol(sceCartPara, mechPara.sceCartParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceProfilePara, mechPara.sceProfileParaCPU,
7 * sizeof(double));
cudaMemcpyToSymbol(sceInterDiffPara, mechPara.sceInterDiffParaCPU,
5 * sizeof(double));
cudaMemcpyToSymbol(sceECMPara, mechPara.sceECMParaCPU, 5 * sizeof(double));
}
void SceNodes::copyParaToGPUConstMem_M() {
readParas_M();
cudaMemcpyToSymbol(cellNodeBeginPos_M, &allocPara_M.bdryNodeCount,
sizeof(uint));
cudaMemcpyToSymbol(allNodeCountPerCell_M, &allocPara_M.maxAllNodePerCell,
sizeof(uint));
cudaMemcpyToSymbol(membrThreshold_M, &allocPara_M.maxMembrNodePerCell,
sizeof(uint));
cudaMemcpyToSymbol(bondAdhCriLen_M, &mechPara_M.bondAdhCriLenCPU_M,
sizeof(double));
cudaMemcpyToSymbol(bondStiff_M, &mechPara_M.bondStiffCPU_M, sizeof(double));
cudaMemcpyToSymbol(bondStiff_Mitotic, &mechPara_M.bondStiffCPU_Mitotic, sizeof(double));//Ali June 16
cudaMemcpyToSymbol(growthPrgrCriVal_M, &mechPara_M.growthPrgrCriValCPU_M,
sizeof(double));
cudaMemcpyToSymbol(maxAdhBondLen_M, &mechPara_M.maxAdhBondLenCPU_M,
sizeof(double));
cudaMemcpyToSymbol(minAdhBondLen_M, &mechPara_M.minAdhBondLenCPU_M,
sizeof(double));
cudaMemcpyToSymbol(sceInterBPara_M, mechPara_M.sceInterBParaCPU_M,
5 * sizeof(double));
cudaMemcpyToSymbol(sceInterBPara_Jones_On_M, &mechPara_M.sceInterBParaCPU_Jones_On_M,
sizeof(int)); //Ali
cudaMemcpyToSymbol(sceInterBPara_Jones_M, mechPara_M.sceInterBParaCPU_Jones_M,
3 * sizeof(double)); //Ali
cudaMemcpyToSymbol(sceIntnlBPara_M, mechPara_M.sceIntnlBParaCPU_M,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraPara_M, mechPara_M.sceIntraParaCPU_M,
5 * sizeof(double));
cudaMemcpyToSymbol(sceIntraParaDiv_M, mechPara_M.sceIntraParaDivCPU_M,
5 * sizeof(double));
}
void SceNodes::initDimension(double domainMinX, double domainMaxX,
double domainMinY, double domainMaxY, double domainBucketSize) {
domainPara.minX = domainMinX;
domainPara.maxX = domainMaxX;
domainPara.minY = domainMinY;
domainPara.maxY = domainMaxY;
domainPara.gridSpacing = domainBucketSize;
domainPara.XBucketSize = (domainPara.maxX - domainPara.minX)
/ domainPara.gridSpacing + 1;
domainPara.YBucketSize = (domainPara.maxY - domainPara.minY)
/ domainPara.gridSpacing + 1;
domainPara.totalBucketCount = domainPara.XBucketSize
* domainPara.YBucketSize;
auxVecs.keyBegin.resize(domainPara.totalBucketCount);
auxVecs.keyEnd.resize(domainPara.totalBucketCount);
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
int size = bucketKeysCPU.size();
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
int node1 = bucketValuesCPU[i];
int node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
iterationCounter++;
}
}
return result;
}
void SceNodes::readParas_M() {
//////////////////////
//// Block 1 /////////
//////////////////////
double U0_InterB =
globalConfigVars.getConfigValue("SceInterB_U0").toDouble();
double V0_InterB =
globalConfigVars.getConfigValue("SceInterB_V0").toDouble();
double k1_InterB =
globalConfigVars.getConfigValue("SceInterB_k1").toDouble();
double k2_InterB =
globalConfigVars.getConfigValue("SceInterB_k2").toDouble();
double interBEffectiveRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
mechPara_M.sceInterBParaCPU_M[0] = U0_InterB;
mechPara_M.sceInterBParaCPU_M[1] = V0_InterB;
mechPara_M.sceInterBParaCPU_M[2] = k1_InterB;
mechPara_M.sceInterBParaCPU_M[3] = k2_InterB;
mechPara_M.sceInterBParaCPU_M[4] = interBEffectiveRange;
//Ali
//////////////////////
//// Block 1.5 /////////
//////////////////////
int On_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_On").toDouble();
double eps_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_eps").toDouble();
double sig_InterB_Jones =
globalConfigVars.getConfigValue("SceInterB_Jones_sig").toDouble();
double interBEffectiveRange_Jones = globalConfigVars.getConfigValue(
"InterBEffectiveRange_Jones").toDouble();
mechPara_M.sceInterBParaCPU_Jones_On_M = On_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[0] = eps_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[1] = sig_InterB_Jones;
mechPara_M.sceInterBParaCPU_Jones_M[2] = interBEffectiveRange_Jones;
//Ali
//////////////////////
//// Block 2 /////////
//////////////////////
double U0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_U0").toDouble();
double V0_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_V0").toDouble();
double k1_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k1").toDouble();
double k2_IntnlB =
globalConfigVars.getConfigValue("SceIntnlB_k2").toDouble();
double intnlBEffectiveRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
mechPara_M.sceIntnlBParaCPU_M[0] = U0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[1] = V0_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[2] = k1_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[3] = k2_IntnlB;
mechPara_M.sceIntnlBParaCPU_M[4] = intnlBEffectiveRange;
//////////////////////
//// Block 3 /////////
//////////////////////
double U0_Intra =
globalConfigVars.getConfigValue("IntraCell_U0").toDouble();
double V0_Intra =
globalConfigVars.getConfigValue("IntraCell_V0").toDouble();
double k1_Intra =
globalConfigVars.getConfigValue("IntraCell_k1").toDouble();
double k2_Intra =
globalConfigVars.getConfigValue("IntraCell_k2").toDouble();
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
mechPara_M.sceIntraParaCPU_M[0] = U0_Intra;
mechPara_M.sceIntraParaCPU_M[1] = V0_Intra;
mechPara_M.sceIntraParaCPU_M[2] = k1_Intra;
mechPara_M.sceIntraParaCPU_M[3] = k2_Intra;
mechPara_M.sceIntraParaCPU_M[4] = intraLinkEffectiveRange;
//////////////////////
//// Block 4 /////////
//////////////////////
double U0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_U0_Div").toDouble();
double V0_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_V0_Div").toDouble();
double k1_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k1_Div").toDouble();
double k2_Intra_Div =
globalConfigVars.getConfigValue("IntraCell_k2_Div").toDouble();
double intraDivEffectiveRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
mechPara_M.sceIntraParaDivCPU_M[0] = U0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[1] = V0_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[2] = k1_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[3] = k2_Intra_Div;
mechPara_M.sceIntraParaDivCPU_M[4] = intraDivEffectiveRange;
//////////////////////
//// Block 5 /////////
//////////////////////
double bondAdhCriLen =
globalConfigVars.getConfigValue("BondAdhCriLen").toDouble();
mechPara_M.bondAdhCriLenCPU_M = bondAdhCriLen;
double bondStiff = globalConfigVars.getConfigValue("BondStiff").toDouble();
mechPara_M.bondStiffCPU_M = bondStiff;
//Ali June 16
double bondStiff_Mitotic = globalConfigVars.getConfigValue("BondStiff_Mitotic").toDouble();
mechPara_M.bondStiffCPU_Mitotic = bondStiff_Mitotic;
double growthPrgrCriVal = globalConfigVars.getConfigValue(
"GrowthPrgrCriVal").toDouble();
mechPara_M.growthPrgrCriValCPU_M = growthPrgrCriVal;
double maxAdhBondLen =
globalConfigVars.getConfigValue("MaxAdhBondLen").toDouble();
mechPara_M.maxAdhBondLenCPU_M = maxAdhBondLen;
double minAdhBondLen =
globalConfigVars.getConfigValue("MinAdhBondLen").toDouble();
mechPara_M.minAdhBondLenCPU_M = minAdhBondLen;
}
void SceNodes::debugNAN() {
uint totalActiveNodeC = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double res = thrust::reduce(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocX.begin() + totalActiveNodeC);
if (isnan(res)) {
std::cout << "fatal error! NAN found" << std::endl;
std::cout.flush();
exit(0);
}
}
std::vector<std::pair<uint, uint> > SceNodes::obtainPossibleNeighborPairs_M() {
std::vector<std::pair<uint, uint> > result;
thrust::host_vector<uint> keyBeginCPU = auxVecs.keyBegin;
thrust::host_vector<uint> keyEndCPU = auxVecs.keyEnd;
thrust::host_vector<uint> bucketKeysCPU = auxVecs.bucketKeys;
thrust::host_vector<uint> bucketValuesCPU = auxVecs.bucketValues;
thrust::host_vector<uint> bucketValuesExtendedCPU =
auxVecs.bucketValuesIncludingNeighbor;
uint iterationCounter = 0;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint offSet = allocPara_M.bdryNodeCount;
uint memThreshold = allocPara_M.maxMembrNodePerCell;
int size = bucketKeysCPU.size();
int node1, node2, cellRank1, cellRank2, nodeRank1, nodeRank2;
for (int i = 0; i < size; i++) {
for (int j = keyBeginCPU[bucketKeysCPU[i]];
j < keyEndCPU[bucketKeysCPU[i]]; j++) {
node1 = bucketValuesCPU[i];
node2 = bucketValuesExtendedCPU[j];
if (node1 >= node2) {
continue;
} else {
cellRank1 = (node1 - offSet) / maxNodePerCell;
nodeRank1 = (node1 - offSet) % maxNodePerCell;
cellRank2 = (node2 - offSet) / maxNodePerCell;
nodeRank2 = (node2 - offSet) % maxNodePerCell;
if (nodeRank1 >= memThreshold && nodeRank2 >= memThreshold
&& cellRank1 == cellRank2) {
result.push_back(std::make_pair<uint, uint>(node1, node2));
}
}
iterationCounter++;
}
}
return result;
}
void SceNodes::initValues(std::vector<CVector>& initBdryCellNodePos,
std::vector<CVector>& initProfileNodePos,
std::vector<CVector>& initCartNodePos,
std::vector<CVector>& initECMNodePos,
std::vector<CVector>& initFNMCellNodePos,
std::vector<CVector>& initMXCellNodePos) {
uint FNMNodeCount = initFNMCellNodePos.size();
uint MXNodeCount = initMXCellNodePos.size();
uint beginAddressOfProfile = allocPara.startPosProfile;
uint beginAddressOfCart = allocPara.startPosCart;
// find the begining position of ECM.
uint beginAddressOfECM = allocPara.startPosECM;
// find the begining position of FNM cells.
uint beginAddressOfFNM = allocPara.startPosCells;
// find the begining position of MX cells.
uint beginAddressOfMX = beginAddressOfFNM + FNMNodeCount;
std::vector<double> initBdryCellNodePosX = getArrayXComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosX.begin(), initBdryCellNodePosX.end(),
infoVecs.nodeLocX.begin());
std::vector<double> initBdryCellNodePosY = getArrayYComp(
initBdryCellNodePos);
thrust::copy(initBdryCellNodePosY.begin(), initBdryCellNodePosY.end(),
infoVecs.nodeLocY.begin());
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initProfileNodePosX = getArrayXComp(initProfileNodePos);
thrust::copy(initProfileNodePosX.begin(), initProfileNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfProfile);
std::vector<double> initProfileNodePosY = getArrayYComp(initProfileNodePos);
thrust::copy(initProfileNodePosY.begin(), initProfileNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfProfile);
// copy x and y position of nodes of Profile to actual node position.
std::vector<double> initCartNodePosX = getArrayXComp(initCartNodePos);
thrust::copy(initCartNodePosX.begin(), initCartNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfCart);
std::vector<double> initCartNodePosY = getArrayYComp(initCartNodePos);
thrust::copy(initCartNodePosY.begin(), initCartNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfCart);
// copy x and y position of nodes of ECM to actual node position.
std::vector<double> initECMNodePosX = getArrayXComp(initECMNodePos);
thrust::copy(initECMNodePosX.begin(), initECMNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfECM);
std::vector<double> initECMNodePosY = getArrayYComp(initECMNodePos);
thrust::copy(initECMNodePosY.begin(), initECMNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfECM);
for (int i = 0; i < initECMNodePosX.size(); i++) {
assert(infoVecs.nodeLocX[i + beginAddressOfECM] == initECMNodePosX[i]);
assert(!isnan(initECMNodePosX[i]));
}
// copy x and y position of nodes of FNM cells to actual node position.
std::vector<double> initFNMCellNodePosX = getArrayXComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosX.begin(), initFNMCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfFNM);
std::vector<double> initFNMCellNodePosY = getArrayYComp(initFNMCellNodePos);
thrust::copy(initFNMCellNodePosY.begin(), initFNMCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfFNM);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfFNM,
infoVecs.nodeCellType.begin() + beginAddressOfMX, FNM);
// copy x and y position of nodes of MX cells to actual node position.
std::vector<double> initMXCellNodePosX = getArrayXComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosX.begin(), initMXCellNodePosX.end(),
infoVecs.nodeLocX.begin() + beginAddressOfMX);
std::vector<double> initMXCellNodePosY = getArrayYComp(initMXCellNodePos);
thrust::copy(initMXCellNodePosY.begin(), initMXCellNodePosY.end(),
infoVecs.nodeLocY.begin() + beginAddressOfMX);
thrust::fill(infoVecs.nodeCellType.begin() + beginAddressOfMX,
infoVecs.nodeCellType.begin() + beginAddressOfMX + MXNodeCount, MX);
}
// It copies the information of node locations from CPU to GPU
void SceNodes::initValues_M(std::vector<bool>& initIsActive,
std::vector<CVector>& initCellNodePos,
std::vector<SceNodeType>& nodeTypes,
std::vector<double>& mDppV,
std::vector<MembraneType1>& mTypeV) {
std::vector<double> initCellNodePosX = getArrayXComp(initCellNodePos);
std::vector<double> initCellNodePosY = getArrayYComp(initCellNodePos);
thrust::copy(initCellNodePosX.begin(), initCellNodePosX.end(),
infoVecs.nodeLocX.begin() + allocPara_M.bdryNodeCount);
thrust::copy(initCellNodePosY.begin(), initCellNodePosY.end(),
infoVecs.nodeLocY.begin() + allocPara_M.bdryNodeCount);
thrust::copy(nodeTypes.begin(), nodeTypes.end(),
infoVecs.nodeCellType.begin() + allocPara_M.bdryNodeCount);
thrust::copy(mDppV.begin(), mDppV.end(),
infoVecs.dppLevel.begin() ); // Ali
thrust::copy(mTypeV.begin(), mTypeV.end(),
infoVecs.memNodeType1.begin() ); // Ali
thrust::copy(initIsActive.begin(), initIsActive.end(),
infoVecs.nodeIsActive.begin() + allocPara_M.bdryNodeCount);
}
VtkAnimationData SceNodes::obtainAnimationData(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<double> hostTmpVectorForceX;
thrust::host_vector<double> hostTmpVectorForceY;
thrust::host_vector<double> hostTmpVectorForceZ;
thrust::host_vector<double> hostTmpVectorVelVal;
assert(hostTmpVectorLocX.size() == hostTmpVectorLocY.size());
assert(hostTmpVectorLocY.size() == hostTmpVectorLocZ.size());
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<double> hostTmpVectorNodeStress;
if (aniCri.animationType != CellType) {
hostTmpVectorForceX = infoVecs.nodeInterForceX;
hostTmpVectorForceY = infoVecs.nodeInterForceY;
hostTmpVectorForceZ = infoVecs.nodeInterForceZ;
assert(hostTmpVectorForceX.size() == hostTmpVectorLocX.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceY.size());
assert(hostTmpVectorForceX.size() == hostTmpVectorForceZ.size());
uint vecSize = hostTmpVectorForceX.size();
hostTmpVectorVelVal.resize(vecSize);
for (uint i = 0; i < vecSize; i++) {
hostTmpVectorVelVal[i] = sqrt(
hostTmpVectorForceX[i] * hostTmpVectorForceX[i]
+ hostTmpVectorForceY[i] * hostTmpVectorForceY[i]
+ hostTmpVectorForceZ[i] * hostTmpVectorForceZ[i]);
}
}
if (aniCri.animationType == Force) {
vtkData.isArrowIncluded = true;
} else {
vtkData.isArrowIncluded = false;
}
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node1Z = hostTmpVectorLocZ[node1Index];
SceNodeType node1T = hostTmpVectorNodeType[node1Index];
uint node1R = hostTmpVectorNodeRank[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
double node2Z = hostTmpVectorLocZ[node2Index];
SceNodeType node2T = hostTmpVectorNodeType[node2Index];
uint node2R = hostTmpVectorNodeRank[node2Index];
if (aniCri.isPairQualify(node1Index, node2Index, node1X, node1Y, node1Z,
node1T, node1R, node2X, node2Y, node2Z, node2T, node2R)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node1Index];
if (hostTmpVectorVelVal[node1Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node1Index]
/ hostTmpVectorVelVal[node1Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node1T);
}
ptAniData.pos = CVector(node1X, node1Y, node1Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[node2Index];
if (hostTmpVectorVelVal[node2Index] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[node2Index]
/ hostTmpVectorVelVal[node2Index]
* aniCri.arrowLength;
} else {
ptAniData.dir.x = 0;
ptAniData.dir.y = 0;
ptAniData.dir.z = 0;
}
} else {
ptAniData.colorScale = nodeTypeToScale(node2T);
}
ptAniData.pos = CVector(node2X, node2Y, node2Z);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
uint profileStartIndex = allocPara.startPosProfile;
uint profileEndIndex = profileStartIndex
+ allocPara.currentActiveProfileNodeCount;
for (uint i = profileStartIndex; i < profileEndIndex; i++) {
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
if (i != profileEndIndex - 1) {
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
uint cartStartIndex = allocPara.startPosCart;
uint cartEndIndex = cartStartIndex + allocPara.maxCartNodeCount;
for (uint i = cartStartIndex; i < cartEndIndex; i++) {
bool isActive = infoVecs.nodeIsActive[i];
if (!isActive) {
continue;
}
PointAniData ptAniData;
ptAniData.pos = CVector(hostTmpVectorLocX[i], hostTmpVectorLocY[i],
hostTmpVectorLocZ[i]);
if (aniCri.animationType == ForceAbsVal) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
} else if (aniCri.animationType == Force) {
ptAniData.colorScale = hostTmpVectorVelVal[i];
if (hostTmpVectorVelVal[i] > aniCri.threshold) {
ptAniData.dir.x = hostTmpVectorForceX[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.y = hostTmpVectorForceY[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
ptAniData.dir.z = hostTmpVectorForceZ[i]
/ hostTmpVectorVelVal[i] * aniCri.arrowLength;
}
} else {
ptAniData.colorScale = nodeTypeToScale(hostTmpVectorNodeType[i]);
}
vtkData.pointsAniData.push_back(ptAniData);
bool isNextActive;
if (i == cartEndIndex - 1) {
isNextActive = false;
} else {
isNextActive = infoVecs.nodeIsActive[i + 1];
}
if (isNextActive) {
LinkAniData linkData;
linkData.node1Index = curIndex;
linkData.node2Index = curIndex + 1;
vtkData.linksAniData.push_back(linkData);
}
curIndex++;
}
return vtkData;
}
// TODO
VtkAnimationData SceNodes::obtainAnimationData_M(AnimationCriteria aniCri) {
VtkAnimationData vtkData;
std::vector<std::pair<uint, uint> > pairs = obtainPossibleNeighborPairs_M();
cout << "size of potential pairs = " << pairs.size() << endl;
// unordered_map is more efficient than map, but it is a c++ 11 feature
// and c++ 11 seems to be incompatible with Thrust.
IndexMap locIndexToAniIndexMap;
// Doesn't have to copy the entire nodeLocX array.
// Only copy the first half will be sufficient
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<bool> hostIsActiveVec = infoVecs.nodeIsActive;
thrust::host_vector<int> hostBondVec = infoVecs.nodeAdhereIndex;
thrust::host_vector<double> hostMembrTenMag = infoVecs.membrTensionMag;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
uint activeCellCount = allocPara_M.currentActiveCellCount;
uint maxNodePerCell = allocPara_M.maxAllNodePerCell;
uint maxMemNodePerCell = allocPara_M.maxMembrNodePerCell;
uint beginIndx = allocPara_M.bdryNodeCount;
//uint endIndx = beginIndx + activeCellCount * maxNodePerCell;
//uint cellRank1, nodeRank1, cellRank2, nodeRank2;
uint index1;
int index2;
std::vector<BondInfo> bondInfoVec;
for (uint i = 0; i < activeCellCount; i++) {
for (uint j = 0; j < maxMemNodePerCell; j++) {
index1 = beginIndx + i * maxNodePerCell + j;
if (hostIsActiveVec[index1] == true) {
index2 = hostBondVec[index1];
if (index2 > index1 && index2 != -1) {
BondInfo bond;
bond.cellRank1 = i;
bond.pos1 = CVector(hostTmpVectorLocX[index1],
hostTmpVectorLocY[index1], 0);
bond.cellRank2 = (index2 - beginIndx) / maxNodePerCell;
bond.pos2 = CVector(hostTmpVectorLocX[index2],
hostTmpVectorLocY[index2], 0);
bondInfoVec.push_back(bond);
}
}
}
}
vtkData.bondsInfo = bondInfoVec;
uint curIndex = 0;
for (uint i = 0; i < pairs.size(); i++) {
uint node1Index = pairs[i].first;
uint node2Index = pairs[i].second;
double node1X = hostTmpVectorLocX[node1Index];
double node1Y = hostTmpVectorLocY[node1Index];
double node2X = hostTmpVectorLocX[node2Index];
double node2Y = hostTmpVectorLocY[node2Index];
if (aniCri.isPairQualify_M(node1X, node1Y, node2X, node2Y)) {
IndexMap::iterator it = locIndexToAniIndexMap.find(pairs[i].first);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].first, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node1X, node1Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].second);
if (it == locIndexToAniIndexMap.end()) {
locIndexToAniIndexMap.insert(
std::pair<uint, uint>(pairs[i].second, curIndex));
curIndex++;
PointAniData ptAniData;
//ptAniData.colorScale = nodeTypeToScale(
// hostTmpVectorNodeType[node1Index]);
ptAniData.colorScale = -1;
ptAniData.colorScale2 = -1;//AAMIRI
ptAniData.pos = CVector(node2X, node2Y, 0);
vtkData.pointsAniData.push_back(ptAniData);
}
it = locIndexToAniIndexMap.find(pairs[i].first);
uint aniIndex1 = it->second;
it = locIndexToAniIndexMap.find(pairs[i].second);
uint aniIndex2 = it->second;
LinkAniData linkData;
linkData.node1Index = aniIndex1;
linkData.node2Index = aniIndex2;
vtkData.linksAniData.push_back(linkData);
}
}
return vtkData;
}
void SceNodes::findBucketBounds() {
thrust::counting_iterator<unsigned int> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds_M() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::findBucketBounds3D() {
thrust::counting_iterator<uint> search_begin(0);
thrust::lower_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount,
auxVecs.keyBegin.begin());
thrust::upper_bound(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExtProc_M, search_begin,
search_begin + domainPara.totalBucketCount, auxVecs.keyEnd.begin());
}
void SceNodes::prepareSceForceComputation() {
buildBuckets2D();
extendBuckets2D();
findBucketBounds();
}
void SceNodes::prepareSceForceComputation_M() {
buildBuckets2D_M();
extendBuckets2D_M();
findBucketBounds_M();
}
void SceNodes::prepareSceForceComputation3D() {
buildBuckets3D();
extendBuckets3D();
findBucketBounds3D();
}
void SceNodes::addNewlyDividedCells(
thrust::device_vector<double> &nodeLocXNewCell,
thrust::device_vector<double> &nodeLocYNewCell,
thrust::device_vector<double> &nodeLocZNewCell,
thrust::device_vector<bool> &nodeIsActiveNewCell,
thrust::device_vector<SceNodeType> &nodeCellTypeNewCell) {
// data validation
uint nodesSize = nodeLocXNewCell.size();
assert(nodesSize % allocPara.maxNodeOfOneCell == 0);
uint addCellCount = nodesSize / allocPara.maxNodeOfOneCell;
// position that we will add newly divided cells.
uint shiftStartPosNewCell = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
thrust::copy(
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.begin(),
nodeLocYNewCell.begin(), nodeLocZNewCell.begin(),
nodeIsActiveNewCell.begin(),
nodeCellTypeNewCell.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(nodeLocXNewCell.end(),
nodeLocYNewCell.end(), nodeLocZNewCell.end(),
nodeIsActiveNewCell.end(),
nodeCellTypeNewCell.end())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(),
infoVecs.nodeCellType.begin()))
+ shiftStartPosNewCell);
// total number of cells has increased.
allocPara.currentActiveCellCount = allocPara.currentActiveCellCount
+ addCellCount;
}
void SceNodes::buildBuckets2D() {
int totalActiveNodes;
if (controlPara.simuType != Disc_M) {
totalActiveNodes = allocPara.startPosCells
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
} else {
totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
}
auxVecs.bucketKeys.resize(totalActiveNodes);
auxVecs.bucketValues.resize(totalActiveNodes);
thrust::counting_iterator<uint> countingIterBegin(0);
thrust::counting_iterator<uint> countingIterEnd(totalActiveNodes);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), countingIterBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(), auxVecs.bucketKeys.end(),
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.end(), UINT_MAX);
auxVecs.bucketKeys.erase(auxVecs.bucketKeys.end() - numberOfOutOfRange,
auxVecs.bucketKeys.end());
auxVecs.bucketValues.erase(auxVecs.bucketValues.end() - numberOfOutOfRange,
auxVecs.bucketValues.end());
}
void SceNodes::buildBuckets2D_M() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
pointToBucketIndex2D(domainPara.minX, domainPara.maxX,
domainPara.minY, domainPara.maxY, domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
void SceNodes::buildBuckets3D() {
int totalActiveNodes = allocPara_M.bdryNodeCount
+ allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<uint> iBegin(0);
// takes counting iterator and coordinates
// return tuple of keys and values
// transform the points to their bucket indices
thrust::transform(
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin)),
make_zip_iterator(
make_tuple(infoVecs.nodeLocX.begin(),
infoVecs.nodeLocY.begin(),
infoVecs.nodeLocZ.begin(),
infoVecs.nodeIsActive.begin(), iBegin))
+ totalActiveNodes,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
BucketIndexer3D(domainPara.minX, domainPara.maxX, domainPara.minY,
domainPara.maxY, domainPara.minZ, domainPara.maxZ,
domainPara.gridSpacing));
// sort the points by their bucket index
thrust::sort_by_key(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes,
auxVecs.bucketValues.begin());
// for those nodes that are inactive, key value of UINT_MAX will be returned.
// we need to removed those keys along with their values.
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeys.begin(),
auxVecs.bucketKeys.begin() + totalActiveNodes, UINT_MAX);
endIndx_M = totalActiveNodes - numberOfOutOfRange;
}
__device__
double computeDist(double &xPos, double &yPos, double &zPos, double &xPos2,
double &yPos2, double &zPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2)
+ (zPos - zPos2) * (zPos - zPos2));
}
__device__
double computeDist2D(double &xPos, double &yPos, double &xPos2, double &yPos2) {
return sqrt(
(xPos - xPos2) * (xPos - xPos2) + (yPos - yPos2) * (yPos - yPos2));
}
__device__
void calculateAndAddECMForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceECMPara[4]) {
forceValue = 0;
} else {
forceValue = -sceECMPara[0] / sceECMPara[2]
* exp(-linkLength / sceECMPara[2])
+ sceECMPara[1] / sceECMPara[3]
* exp(-linkLength / sceECMPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddProfileForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
forceValue = -sceProfilePara[5] * (linkLength - sceProfilePara[6]);
if (linkLength > 1.0e-12) {
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
}
__device__
void calculateAndAddIntraForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraForceDiv(double& xPos, double& yPos, double& zPos,
double& xPos2, double& yPos2, double& zPos2, double& growPro,
double& xRes, double& yRes, double& zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue;
if (linkLength > sceIntraPara[4]) {
forceValue = 0;
} else {
if (growPro > sceIntraParaDiv[4]) {
double intraPara0 = growPro * (sceIntraParaDiv[0])
+ (1.0 - growPro) * sceIntraPara[0];
double intraPara1 = growPro * (sceIntraParaDiv[1])
+ (1.0 - growPro) * sceIntraPara[1];
double intraPara2 = growPro * (sceIntraParaDiv[2])
+ (1.0 - growPro) * sceIntraPara[2];
double intraPara3 = growPro * (sceIntraParaDiv[3])
+ (1.0 - growPro) * sceIntraPara[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
} else {
forceValue = -sceIntraPara[0] / sceIntraPara[2]
* exp(-linkLength / sceIntraPara[2])
+ sceIntraPara[1] / sceIntraPara[3]
* exp(-linkLength / sceIntraPara[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddIntraDiv_M(double& xPos, double& yPos, double& xPos2,
double& yPos2, double& growPro, double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (growPro > growthPrgrCriVal_M) {
if (linkLength > sceIntraParaDiv_M[4]) {
forceValue = 0;
} else {
double percent = (growPro - growthPrgrCriVal_M)
/ (1.0 - growthPrgrCriVal_M);
double intraPara0 = percent * (sceIntraParaDiv_M[0])
+ (1.0 - percent) * sceIntraPara_M[0];
double intraPara1 = percent * (sceIntraParaDiv_M[1])
+ (1.0 - percent) * sceIntraPara_M[1];
double intraPara2 = percent * (sceIntraParaDiv_M[2])
+ (1.0 - percent) * sceIntraPara_M[2];
double intraPara3 = percent * (sceIntraParaDiv_M[3])
+ (1.0 - percent) * sceIntraPara_M[3];
forceValue = -intraPara0 / intraPara2
* exp(-linkLength / intraPara2)
+ intraPara1 / intraPara3 * exp(-linkLength / intraPara3);
}
} else {
if (linkLength > sceIntraPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntraPara_M[0] / sceIntraPara_M[2]
* exp(-linkLength / sceIntraPara_M[2])
+ sceIntraPara_M[1] / sceIntraPara_M[3]
* exp(-linkLength / sceIntraPara_M[3]);
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddIntraB_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceIntnlBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceIntnlBPara_M[0] / sceIntnlBPara_M[2]
* exp(-linkLength / sceIntnlBPara_M[2])
+ sceIntnlBPara_M[1] / sceIntnlBPara_M[3]
* exp(-linkLength / sceIntnlBPara_M[3]);
}
//if (forceValue > 0) {
// forceValue = 0;
//}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
__device__
void calAndAddInter_M(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_M[4]) {
forceValue = 0;
} else {
forceValue = -sceInterBPara_M[0] / sceInterBPara_M[2]
* exp(-linkLength / sceInterBPara_M[2])
+ sceInterBPara_M[1] / sceInterBPara_M[3]
* exp(-linkLength / sceInterBPara_M[3]);
// if (forceValue > 0) { //Ali
// forceValue = 0;
// }
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calAndAddInter_M2(double& xPos, double& yPos, double& xPos2, double& yPos2,
double& xRes, double& yRes) {
double linkLength = computeDist2D(xPos, yPos, xPos2, yPos2);
double forceValue;
if (linkLength > sceInterBPara_Jones_M[2]) {
forceValue = 0;
} else {
forceValue =24*sceInterBPara_Jones_M[0]/linkLength*pow(sceInterBPara_Jones_M[1]/linkLength,6)*
( 1.0-2 *pow(sceInterBPara_Jones_M[1]/linkLength,6) ) ;
if (forceValue > 0) {
forceValue = 0;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
}
//Ali
__device__
void calculateAndAddInterForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calAndAddInterForceDisc(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes, double& interForceX, double& interForceY,
double& interForceZ) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
}
double fX = forceValue * (xPos2 - xPos) / linkLength;
double fY = forceValue * (yPos2 - yPos) / linkLength;
double fZ = forceValue * (zPos2 - zPos) / linkLength;
xRes = xRes + fX;
yRes = yRes + fY;
zRes = zRes + fZ;
interForceX = interForceX + fX;
interForceY = interForceY + fY;
interForceZ = interForceZ + fZ;
}
__device__
void calculateAndAddCartForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceCartPara[4]) {
forceValue = 0;
} else {
forceValue = -sceCartPara[0] / sceCartPara[2]
* exp(-linkLength / sceCartPara[2])
+ sceCartPara[1] / sceCartPara[3]
* exp(-linkLength / sceCartPara[3]);
if (linkLength > 1.0e-12) {
//double dotProduct = (xPos2 - xPos) / linkLength * cartGrowDirVec[0]
// + (yPos2 - yPos) / linkLength * cartGrowDirVec[1]
// + (zPos2 - zPos) / linkLength * cartGrowDirVec[2];
//forceValue = forceValue * dotProduct;
// this is just a temperary solution -- the direction should not be fixed.
xRes = xRes - forceValue * cartGrowDirVec[0];
yRes = yRes - forceValue * cartGrowDirVec[1];
zRes = zRes - forceValue * cartGrowDirVec[2];
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
if (forceValue > 0) {
//forceValue = forceValue * 0.01;
forceValue = 0;
//xRes = xRes + forceValue * (xPos2 - xPos);
//yRes = yRes + forceValue * (yPos2 - yPos);
//zRes = zRes + forceValue * (zPos2 - zPos);
}
}
}
__device__
void calculateAndAddDiffInterCellForce(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterDiffPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterDiffPara[0] / sceInterDiffPara[2]
* exp(-linkLength / sceInterDiffPara[2])
+ sceInterDiffPara[1] / sceInterDiffPara[3]
* exp(-linkLength / sceInterDiffPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.2;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__
void calculateAndAddInterForceDiffType(double &xPos, double &yPos, double &zPos,
double &xPos2, double &yPos2, double &zPos2, double &xRes, double &yRes,
double &zRes) {
double linkLength = computeDist(xPos, yPos, zPos, xPos2, yPos2, zPos2);
double forceValue = 0;
if (linkLength > sceInterPara[4]) {
forceValue = 0;
} else {
forceValue = -sceInterPara[0] / sceInterPara[2]
* exp(-linkLength / sceInterPara[2])
+ sceInterPara[1] / sceInterPara[3]
* exp(-linkLength / sceInterPara[3]);
if (forceValue > 0) {
//forceValue = 0;
forceValue = forceValue * 0.3;
}
}
xRes = xRes + forceValue * (xPos2 - xPos) / linkLength;
yRes = yRes + forceValue * (yPos2 - yPos) / linkLength;
zRes = zRes + forceValue * (zPos2 - zPos) / linkLength;
}
__device__ bool bothNodesCellNode(uint nodeGlobalRank1, uint nodeGlobalRank2,
uint cellNodesThreshold) {
if (nodeGlobalRank1 < cellNodesThreshold
&& nodeGlobalRank2 < cellNodesThreshold) {
return true;
} else {
return false;
}
}
__device__ bool isSameCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos
|| nodeGlobalRank2 < cellNodeBeginPos) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos) / nodeCountPerCell
== (nodeGlobalRank2 - cellNodeBeginPos) / nodeCountPerCell) {
return true;
} else {
return false;
}
}
//Ali
__device__
bool Is_Lennard_Jones() {
if (sceInterBPara_Jones_On_M==1) {
return true ;
}
else {
return false ;
}
}
__device__
bool isSameCell_m(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
if ((nodeGlobalRank1 - cellNodeBeginPos_M) / allNodeCountPerCell_M
== (nodeGlobalRank2 - cellNodeBeginPos_M) / allNodeCountPerCell_M) {
return true;
} else {
return false;
}
}
__device__
bool bothInternal(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 >= membrThreshold_M && nodeRank2 >= membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembr(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
__device__
bool bothMembrDiffCell(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 == cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeRank1 < membrThreshold_M && nodeRank2 < membrThreshold_M) {
return true;
} else {
return false;
}
}
//AAMIRI
/*
__device__
bool isNodeOnMembrane(uint nodeGlobalRank) {
uint nodeRank = (nodeGlobalRank - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if (nodeGlobalRank >= cellNodeBeginPos_M && nodeRank < membrThreshold_M){
return true;
} else{
return false;
}
}
*/
__device__
bool sameCellMemIntnl(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if (nodeGlobalRank1 < cellNodeBeginPos_M
|| nodeGlobalRank2 < cellNodeBeginPos_M) {
return false;
}
uint cellRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
uint cellRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
/ allNodeCountPerCell_M;
if (cellRank1 != cellRank2) {
return false;
}
uint nodeRank1 = (nodeGlobalRank1 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
uint nodeRank2 = (nodeGlobalRank2 - cellNodeBeginPos_M)
% allNodeCountPerCell_M;
if ((nodeRank1 < membrThreshold_M && nodeRank2 >= membrThreshold_M)
|| (nodeRank2 < membrThreshold_M && nodeRank1 >= membrThreshold_M)) {
return true;
} else {
return false;
}
}
__device__ bool isSameECM(uint nodeGlobalRank1, uint nodeGlobalRank2) {
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
return true;
} else {
return false;
}
}
__device__ bool isNeighborECMNodes(uint nodeGlobalRank1, uint nodeGlobalRank2) {
// this means that two nodes are from the same ECM
if ((nodeGlobalRank1 - ECMbeginPos) / nodeCountPerECM
== (nodeGlobalRank2 - ECMbeginPos) / nodeCountPerECM) {
// this means that two nodes are actually close to each other
// seems to be strange because of unsigned int.
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
}
return false;
}
__device__ bool isNeighborProfileNodes(uint nodeGlobalRank1,
uint nodeGlobalRank2) {
if ((nodeGlobalRank1 > nodeGlobalRank2
&& nodeGlobalRank1 - nodeGlobalRank2 == 1)
|| (nodeGlobalRank2 > nodeGlobalRank1
&& nodeGlobalRank2 - nodeGlobalRank1 == 1)) {
return true;
}
return false;
}
__device__ bool ofSameType(uint cellType1, uint cellType2) {
if (cellType1 == cellType2) {
return true;
} else {
return false;
}
}
__device__ bool bothCellNodes(SceNodeType &type1, SceNodeType &type2) {
if ((type1 == MX || type1 == FNM) && (type2 == MX || type2 == FNM)) {
return true;
} else {
return false;
}
}
__device__
void attemptToAdhere(bool& isSuccess, uint& index, double& dist,
uint& nodeRank2, double& xPos1, double& yPos1, double& xPos2,
double& yPos2) {
double length = computeDist2D(xPos1, yPos1, xPos2, yPos2);
if (length <= bondAdhCriLen_M) {
if (isSuccess) {
if (length < dist) {
dist = length;
index = nodeRank2;
}
} else {
isSuccess = true;
index = nodeRank2;
dist = length;
}
}
}
__device__
void handleAdhesionForce_M(int& adhereIndex, double& xPos, double& yPos,
double& curAdherePosX, double& curAdherePosY, double& xRes,
double& yRes, double& alpha, double & beta) {
double curLen = computeDist2D(xPos, yPos, curAdherePosX, curAdherePosY);
//if (curLen > maxAdhBondLen_M) {
// adhereIndex = -1;
// return;
// } else {
if (curLen > minAdhBondLen_M) {
double forceValue = beta*(curLen - minAdhBondLen_M) * (bondStiff_M * alpha + bondStiff_Mitotic * (1.0-alpha) );
xRes = xRes + forceValue * (curAdherePosX - xPos) / curLen;
yRes = yRes + forceValue * (curAdherePosY - yPos) / curLen;
}
// }
}
//Ali for reaction force
__device__
void handleAdhesionForce_M2(double& xPos, double& yPos,
double& curAdherePosX, double& curAdherePosY, double& xRes,
double& yRes, double& alpha) {
double curLen = computeDist2D(xPos, yPos, curAdherePosX, curAdherePosY);
if (curLen > minAdhBondLen_M ) {
double forceValue = (curLen - minAdhBondLen_M) * (bondStiff_M * alpha + bondStiff_Mitotic * (1.0-alpha) );
xRes = forceValue * (curAdherePosX - xPos) / curLen;
yRes = forceValue * (curAdherePosY - yPos) / curLen;
}
else {
xRes=0 ;
yRes=0 ;
}
}
//Ali June 16
__device__
double getMitoticAdhCoef(double& growProg, double& growProgNeigh){
double alpha = 1.0;
if (growProg > growthPrgrCriVal_M && growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - ( 0.5*(growProg+growProgNeigh)-growthPrgrCriVal_M )/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProg > growthPrgrCriVal_M){
alpha = 1.0 - (growProg-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
else if (growProgNeigh > growthPrgrCriVal_M){
alpha = 1.0 - (growProgNeigh-growthPrgrCriVal_M)/(1.0 - growthPrgrCriVal_M);
// adhSkipped = true;
}
return alpha;
}
__device__
void calculateForceBetweenLinkNodes(double &xLoc, double &yLoc, double &zLoc,
double &xLocLeft, double &yLocLeft, double &zLocLeft, double &xLocRight,
double &yLocRight, double &zLocRight, double &xVel, double &yVel,
double &zVel) {
double linkLengthLeft = computeDist(xLoc, yLoc, zLoc, xLocLeft, yLocLeft,
zLocLeft);
double forceValueLeft = sceProfilePara[5]
* (linkLengthLeft - sceProfilePara[6]);
xVel = xVel + forceValueLeft * (xLocLeft - xLoc) / linkLengthLeft;
yVel = yVel + forceValueLeft * (yLocLeft - yLoc) / linkLengthLeft;
zVel = zVel + forceValueLeft * (zLocLeft - zLoc) / linkLengthLeft;
double linkLengthRight = computeDist(xLoc, yLoc, zLoc, xLocRight, yLocRight,
zLocRight);
double forceValueRight = sceProfilePara[5]
* (linkLengthRight - sceProfilePara[6]);
xVel = xVel + forceValueRight * (xLocRight - xLoc) / linkLengthRight;
yVel = yVel + forceValueRight * (yLocRight - yLoc) / linkLengthRight;
zVel = zVel + forceValueRight * (zLocRight - zLoc) / linkLengthRight;
}
__device__
void handleSceForceNodesBasic(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress) {
if (isSameCell(nodeRank1, nodeRank2)) {
calculateAndAddIntraForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
} else {
calculateAndAddInterForce(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes);
}
}
__device__
void handleSceForceNodesDisc(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& zPos, double& xPos2, double& yPos2, double& zPos2,
double& xRes, double& yRes, double& zRes, double& interForceX,
double& interForceY, double& interForceZ, double* _nodeLocXAddress,
double* _nodeLocYAddress, double* _nodeLocZAddress,
double* _nodeGrowProAddr) {
if (isSameCell(nodeRank1, nodeRank2)) {
calAndAddIntraForceDiv(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2],
_nodeGrowProAddr[nodeRank2], xRes, yRes, zRes);
} else {
calAndAddInterForceDisc(xPos, yPos, zPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeLocZAddress[nodeRank2], xRes,
yRes, zRes, interForceX, interForceY, interForceZ);
}
}
__device__
void handleSceForceNodesDisc_M(uint& nodeRank1, uint& nodeRank2, double& xPos,
double& yPos, double& xPos2, double& yPos2, double& xRes, double& yRes,
double* _nodeLocXAddress, double* _nodeLocYAddress,
double* _nodeGrowProAddr) {
if (isSameCell_m(nodeRank1, nodeRank2)) {
if (bothInternal(nodeRank1, nodeRank2)) {
// both nodes are internal type.
calAndAddIntraDiv_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], _nodeGrowProAddr[nodeRank2],
xRes, yRes);
} else if (bothMembr(nodeRank1, nodeRank2)) {
// both nodes epithilium type. no sce force applied.
// nothing to do here.
} else {
// one node is epithilium type the other is internal type.
calAndAddIntraB_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
} else {
if (bothMembr(nodeRank1, nodeRank2)) {
calAndAddInter_M(xPos, yPos, _nodeLocXAddress[nodeRank2],
_nodeLocYAddress[nodeRank2], xRes, yRes);
}
}
}
void SceNodes::extendBuckets2D() {
static const uint extensionFactor2D = 9;
uint valuesCount = auxVecs.bucketValues.size();
auxVecs.bucketKeysExpanded.resize(valuesCount * extensionFactor2D);
auxVecs.bucketValuesIncludingNeighbor.resize(
valuesCount * extensionFactor2D);
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(extensionFactor2D);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + valuesCount;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::counting_iterator<uint> countingEnd = countingBegin
+ valuesCount * extensionFactor2D;
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.end(), countingEnd)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(), UINT_MAX);
int sizeBeforeShrink = auxVecs.bucketKeysExpanded.size();
int numberInsideRange = sizeBeforeShrink - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.end(),
auxVecs.bucketValuesIncludingNeighbor.begin());
auxVecs.bucketKeysExpanded.erase(
auxVecs.bucketKeysExpanded.begin() + numberInsideRange,
auxVecs.bucketKeysExpanded.end());
auxVecs.bucketValuesIncludingNeighbor.erase(
auxVecs.bucketValuesIncludingNeighbor.begin() + numberInsideRange,
auxVecs.bucketValuesIncludingNeighbor.end());
}
void SceNodes::extendBuckets2D_M() {
endIndxExt_M = endIndx_M * 9;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(9);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M;
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NeighborFunctor2D(domainPara.XBucketSize, domainPara.YBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::extendBuckets3D() {
endIndxExt_M = endIndx_M * 27;
/**
* beginning of constant iterator
*/
thrust::constant_iterator<uint> first(27);
/**
* end of constant iterator.
* the plus sign only indicate movement of position, not value.
* e.g. movement is 5 and first iterator is initialized as 9
* result array is [9,9,9,9,9];
*/
thrust::constant_iterator<uint> last = first + endIndx_M; // this is NOT numerical addition!
expand(first, last,
make_zip_iterator(
make_tuple(auxVecs.bucketKeys.begin(),
auxVecs.bucketValues.begin())),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketValuesIncludingNeighbor.begin())));
thrust::counting_iterator<uint> countingBegin(0);
thrust::transform(
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)) + endIndxExt_M,
make_zip_iterator(
make_tuple(auxVecs.bucketKeysExpanded.begin(),
countingBegin)),
NgbrFunc3D(domainPara.XBucketSize, domainPara.YBucketSize,
domainPara.ZBucketSize));
int numberOfOutOfRange = thrust::count(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M, UINT_MAX);
endIndxExtProc_M = endIndxExt_M - numberOfOutOfRange;
thrust::sort_by_key(auxVecs.bucketKeysExpanded.begin(),
auxVecs.bucketKeysExpanded.begin() + endIndxExt_M,
auxVecs.bucketValuesIncludingNeighbor.begin());
}
void SceNodes::applySceForcesBasic() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceBasic(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress));
}
void SceNodes::applySceForcesDisc() {
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeLocZAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocZ[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.end()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.end()),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.end()),
make_permutation_iterator(infoVecs.nodeLocZ.begin(),
auxVecs.bucketValues.end()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelZ.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceY.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(
infoVecs.nodeInterForceZ.begin(),
auxVecs.bucketValues.begin()))),
AddSceForceDisc(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeLocZAddress, nodeGrowProAddr));
}
void SceNodes::applySceForcesDisc_M() {
ECellType eCellTypeTmp ;
thrust::host_vector <ECellType> eCellTypeVHost ;
eCellTypeVHost.resize(allocPara_M.currentActiveCellCount, notActive) ;
//for (int i= 0 ; i<allocPara_M.currentActiveCellCount; i++) {
// eCellTypeTmp=cellsSceNodes->getCellInfoVecs().eCellTypeV2[i];
// cout << "Epithelial cell type is="<<eCellTypeTmp <<endl ;
//eCellTypeVHost.push_back(eCellTypeTmp) ;
// }
if (adhUpdate) {
adhUpdate=false ;
int maxNumAdh=180 ;
//vector <ECellType> eCellTypeV2Host ;
thrust :: copy (infoVecs.nodeLocX.begin(),infoVecs.nodeLocX.end(),infoVecs.nodeLocXHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeLocY.begin(),infoVecs.nodeLocY.end(),infoVecs.nodeLocYHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeIsActive.begin(),infoVecs.nodeIsActive.end(),infoVecs.nodeIsActiveHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeCellRankFront.begin() ,infoVecs.nodeCellRankFront.end() ,infoVecs.nodeCellRankFrontHost.begin()) ; // Ali
thrust :: copy (infoVecs.nodeCellRankBehind.begin(),infoVecs.nodeCellRankBehind.end(),infoVecs.nodeCellRankBehindHost.begin()) ; // Ali
thrust :: copy (infoVecs.memNodeType1.begin(),infoVecs.memNodeType1.end(),infoVecs.memNodeType1Host.begin()) ; // Ali
cout << " I am right before cell type vector" << endl ;
thrust :: copy (cellsSceNodes->getCellInfoVecs().eCellTypeV2.begin(),cellsSceNodes->getCellInfoVecs().eCellTypeV2.begin()+allocPara_M.currentActiveCellCount, eCellTypeVHost.begin()) ;
cout << " I am right after cell type vector" << endl ;
thrust::fill(infoVecs.nodeAdhereIndexHost.begin(),infoVecs.nodeAdhereIndexHost.end(), -1) ; //Ali it is important to reset the values
thrust::fill(infoVecs.nodeMemMirrorIndexHost.begin(),infoVecs.nodeMemMirrorIndexHost.end(), -1) ; //Ali it is important to reset the values
//thrust::fill(infoVecs.nodeIsLateralMemHost.begin(),infoVecs.nodeIsLateralMemHost.end(), false) ; //Ali
thrust::fill(infoVecs.nodeAdhMinDist.begin(),infoVecs.nodeAdhMinDist.end(), 10000) ; //Ali
int totalActiveNodes = allocPara_M.currentActiveCellCount* allocPara_M.maxAllNodePerCell; // Ali
int maxMembNode= allocPara_M.maxMembrNodePerCell ;
int maxNodePerCell= allocPara_M.maxAllNodePerCell ;
double distMinP2,distP2 ;
int indexAdhNode ;
bool findAnyNode ;
double maxAdhLen= mechPara_M.bondAdhCriLenCPU_M;
int cellRankTmp1, cellRankTmp2 ;
int deactiveIdMyPair, deactiveIdAdhPair ;
int activeMemCount [ allocPara_M.currentActiveCellCount] ;
int firstApiLat [ allocPara_M.currentActiveCellCount] ;
int secondApiLat [ allocPara_M.currentActiveCellCount] ;
int cellRank, iNext, jJunction ;
std::vector <SubApicalInfoEachCell> subApicalInfo ;
cout << "I am inside the function for finding adhesion pair" << endl ;
//setup required basic parameters
for (int i=0 ; i< allocPara_M.currentActiveCellCount ; i++ ){
activeMemCount[i] = 0 ;
}
for (int i=0 ; i<totalActiveNodes ; i++) {
infoVecs.isSubApicalJunctionHost[i]=false ;
}
for (int i=0 ; i<totalActiveNodes ; i++) {
if (infoVecs.nodeIsActiveHost[i]==true && (i%maxNodePerCell)<maxMembNode){
cellRank=i/maxNodePerCell ;
activeMemCount [cellRank]=activeMemCount [cellRank]+1 ;
}
}
subApicalInfo.clear() ;
//Find the subapical nodes in front of the cell
int cellRankOld=-1 ;
for (int i=0 ; i<totalActiveNodes ; i++) {
if (infoVecs.nodeIsActiveHost[i]==true && (i%maxNodePerCell)<maxMembNode){ // check active and membrane
cellRank=i/maxNodePerCell ;
eCellTypeTmp=eCellTypeVHost[cellRank];
iNext=i+1 ;
if ( (i%maxNodePerCell)==(activeMemCount[cellRank]-1)) { // if the node is the last node of cell's membrane
iNext=iNext-activeMemCount [cellRank] ;
}
if ( infoVecs.memNodeType1Host[i]==lateralA && infoVecs.memNodeType1Host[iNext]==apical1 ) { // find the apical junction
firstApiLat[cellRank]=i ; // lateral node
for (int j=0 ; j<NumAdhAfter(cellRank,eCellTypeTmp) ; j++) { //find junction nodes //
jJunction=firstApiLat[cellRank]-j ;
if (jJunction <(cellRank*maxNodePerCell)) {
jJunction=jJunction + activeMemCount [cellRank] ;
//cout << " The subApicalNodes of cell rank " << cellRank << " passed the first node ID" << endl ;
}
infoVecs.isSubApicalJunctionHost[jJunction]=true ;
if (cellRank !=cellRankOld) {
cout << " for cell rank= " << cellRank << " subapicalInfo has been created." << endl ;
SubApicalInfoEachCell subApicalInfoEachCell(maxNumAdh);
subApicalInfo.push_back(subApicalInfoEachCell);
cellRankOld=cellRank ;
}
subApicalInfo[cellRank].nodeIdFront[j]=jJunction ;
}
}
}
}
cout << "first set of adhesion joints are found" << endl ;
//Find the subapical nodes supposingly behind (Before) the cell
for (int i=0 ; i<totalActiveNodes ; i++) {
if (infoVecs.nodeIsActiveHost[i]==true && (i%maxNodePerCell)<maxMembNode){
cellRank=i/maxNodePerCell ;
//eCellType= eCellTypeV2Host[cellRank];
eCellTypeTmp= eCellTypeVHost [cellRank];
iNext=i+1 ;
if ( (i%maxNodePerCell)==(activeMemCount [cellRank]-1)) {
iNext=iNext-activeMemCount [cellRank] ;
}
if (infoVecs.memNodeType1Host[i]==apical1 && ( infoVecs.memNodeType1Host[iNext]==lateralB ) ) {
secondApiLat[cellRank]=iNext ;
for (int j=0 ; j<NumAdhBefore(cellRank,eCellTypeTmp) ; j++) { //find junction nodes
jJunction=secondApiLat[cellRank]+j ;
if (jJunction>=(cellRank*maxNodePerCell+activeMemCount [cellRank]) ) {
jJunction=jJunction - activeMemCount [cellRank];
//cout << " The subApicalNodes of cell rank " << cellRank << " passed the last node ID" << endl ;
}
infoVecs.isSubApicalJunctionHost[jJunction]=true ;
subApicalInfo[cellRank].nodeIdBehind[j]=jJunction ; // the vector of structures for active cells has already been generated.
}
}
}
}
cout << "Second set of adhesion joints are found" << endl ;
//for (int i=0 ; i<totalActiveNodes ; i++) {
// if (infoVecs.isSubApicalJunctionHost[i]) {
// cout << "for cell with rank " <<int(i/maxNodePerCell) << "node rank of subApical junction is " << i << endl ;
// }
// }
cout << " size of vector storing information of apical junctions is " << subApicalInfo.size() << endl ;
if (subApicalInfo.size() != 0 ) { // to pass the first time step in which the membrane node type is not defined.
for ( int i= 0 ; i<allocPara_M.currentActiveCellCount ; i++) {
for ( int j=0 ; j<maxNumAdh ; j++) {
int idFront=subApicalInfo[i].nodeIdFront[j] ;
int idBehind=subApicalInfo[i].nodeIdBehind[j] ;
int cellRankFront=infoVecs.nodeCellRankFrontHost[i] ;
int cellRankBehind=infoVecs.nodeCellRankBehindHost[i] ;
if (idFront != -1) {
infoVecs.nodeAdhereIndexHost[idFront]=subApicalInfo[cellRankFront].nodeIdBehind[j] ;
}
if (idBehind !=-1) {
infoVecs.nodeAdhereIndexHost[idBehind]=subApicalInfo[cellRankBehind].nodeIdFront[j] ;
}
if ( eCellTypeVHost[i]==pouch && NumAdhBefore(i,pouch)==NumAdhAfter(i,pouch) ) {
infoVecs.nodeMemMirrorIndexHost[idFront]=idBehind ;
infoVecs.nodeMemMirrorIndexHost[idBehind]=idFront ;
};
}
}
/////////////////////////////////// start adhesion for apical nodes of pouch cells with apical nodes of peripodial cells ///////////////////////
for (int i=0 ; i<totalActiveNodes ; i++) {
cellRankTmp1=i/maxNodePerCell ;
distMinP2=10000 ; // large number
findAnyNode=false ;
if (eCellTypeVHost[cellRankTmp1]==pouch && infoVecs.memNodeType1Host[i]==apical1) {
for (int j=0 ; j<totalActiveNodes ; j++) {
cellRankTmp2=j/maxNodePerCell ;
// if ( cellRankTmp2>=74 && cellRankTmp2<=76 && infoVecs.memNodeType1Host[i]==apical1) {
if (eCellTypeVHost[cellRankTmp2]==peri && infoVecs.memNodeType1Host[j]==apical1 ) {
distP2=pow( infoVecs.nodeLocXHost[i]-infoVecs.nodeLocXHost[j],2)+
pow( infoVecs.nodeLocYHost[i]-infoVecs.nodeLocYHost[j],2) ;
if (distP2<distMinP2 && distP2<maxAdhLen*maxAdhLen) {
cout << " I am inside a function where there is one apical pouch and one apical perip node and it is min" << endl ;
distMinP2=distP2 ;
indexAdhNode=j ;
findAnyNode=true ;
}
}
}
}
if ( findAnyNode && sqrt(distMinP2)<infoVecs.nodeAdhMinDist[indexAdhNode]){
cout << " I am inside apical adhesion" << endl ;
deactiveIdAdhPair=infoVecs.nodeAdhereIndexHost[indexAdhNode] ;
if (deactiveIdAdhPair != -1){
cout << " I am inside deactiving one perip adhesion" << endl ;
infoVecs.nodeAdhereIndexHost[deactiveIdAdhPair]=-1 ;
infoVecs.nodeAdhMinDist[deactiveIdAdhPair]=10000 ;
}
infoVecs.nodeAdhereIndexHost[i]=indexAdhNode ;
infoVecs.nodeAdhereIndexHost[indexAdhNode]=i ;
infoVecs.nodeAdhMinDist[indexAdhNode]=sqrt(distMinP2) ;
infoVecs.nodeAdhMinDist[i]=sqrt(distMinP2) ;
}
}
cout << " I am ready to copy the data in adhesion function to the GPU " << endl ;
/////////////////////////////////// start adhesion for apical nodes of pouch cells with apical nodes of peripodial cells ///////////////////////
} // finish if of bypassing the first time
// copy back to GPU
thrust::copy(infoVecs.nodeAdhereIndexHost.begin(),infoVecs.nodeAdhereIndexHost.end(), infoVecs.nodeAdhereIndex.begin()) ; //Ali
thrust::copy(infoVecs.nodeMemMirrorIndexHost.begin(),infoVecs.nodeMemMirrorIndexHost.end(), infoVecs.nodeMemMirrorIndex.begin()) ; //Ali
thrust::copy(infoVecs.isSubApicalJunctionHost.begin(),infoVecs.isSubApicalJunctionHost.end(), infoVecs.isSubApicalJunction.begin()) ; //Ali
} // finish the if condition for finding the pair node
uint* valueAddress = thrust::raw_pointer_cast(
&auxVecs.bucketValuesIncludingNeighbor[0]);
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
int* membrIntnlAddress = thrust::raw_pointer_cast(
&infoVecs.membrIntnlIndex[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(
&infoVecs.nodeGrowPro[0]);
thrust::transform(
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin()),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin()),
auxVecs.bucketValues.begin(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin()))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(auxVecs.keyBegin.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
make_permutation_iterator(auxVecs.keyEnd.begin(),
auxVecs.bucketKeys.begin() + endIndx_M),
auxVecs.bucketValues.end(),
make_permutation_iterator(infoVecs.nodeLocX.begin(),
auxVecs.bucketValues.begin() + endIndx_M),
make_permutation_iterator(infoVecs.nodeLocY.begin(),
auxVecs.bucketValues.begin() + endIndx_M))),
make_zip_iterator(
make_tuple(
make_permutation_iterator(infoVecs.nodeVelX.begin(),
auxVecs.bucketValues.begin()),
make_permutation_iterator(infoVecs.nodeVelY.begin(),
auxVecs.bucketValues.begin()))),
AddForceDisc_M(valueAddress, nodeLocXAddress, nodeLocYAddress,
nodeAdhIdxAddress, membrIntnlAddress, nodeGrowProAddr,adhNotSet));
}
const SceDomainPara& SceNodes::getDomainPara() const {
return domainPara;
}
void SceNodes::setDomainPara(const SceDomainPara& domainPara) {
this->domainPara = domainPara;
}
const NodeAllocPara& SceNodes::getAllocPara() const {
return allocPara;
}
void SceNodes::setAllocPara(const NodeAllocPara& allocPara) {
this->allocPara = allocPara;
}
const NodeAuxVecs& SceNodes::getAuxVecs() const {
return auxVecs;
}
void SceNodes::setAuxVecs(const NodeAuxVecs& auxVecs) {
this->auxVecs = auxVecs;
}
NodeInfoVecs& SceNodes::getInfoVecs() {
return infoVecs;
}
std::vector<std::vector<int> > SceNodes::obtainLabelMatrix(
PixelizePara& pixelPara) {
std::vector<std::vector<int> > result;
std::vector<NodeWithLabel> nodeLabels;
ResAnalysisHelper resHelper;
resHelper.setPixelPara(pixelPara);
thrust::host_vector<double> hostTmpVectorLocX = infoVecs.nodeLocX;
thrust::host_vector<double> hostTmpVectorLocY = infoVecs.nodeLocY;
thrust::host_vector<double> hostTmpVectorLocZ = infoVecs.nodeLocZ;
thrust::host_vector<SceNodeType> hostTmpVectorNodeType =
infoVecs.nodeCellType;
thrust::host_vector<uint> hostTmpVectorNodeRank = infoVecs.nodeCellRank;
thrust::host_vector<uint> hostTmpVectorIsActive = infoVecs.nodeIsActive;
uint startIndex = allocPara.startPosCells;
uint endIndex = startIndex
+ allocPara.currentActiveCellCount * allocPara.maxNodeOfOneCell;
for (uint i = startIndex; i < endIndex; i++) {
if (hostTmpVectorIsActive[i] == true) {
NodeWithLabel nodeLabel;
nodeLabel.cellRank = hostTmpVectorNodeRank[i];
nodeLabel.position = CVector(hostTmpVectorLocX[i],
hostTmpVectorLocY[i], hostTmpVectorLocZ[i]);
nodeLabels.push_back(nodeLabel);
}
}
result = resHelper.outputLabelMatrix(nodeLabels);
return result;
}
void SceNodes::initControlPara(bool isStab) {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
controlPara.simuType = parseTypeFromConfig(simuTypeConfigValue);
controlPara.controlSwitchs.outputBmpImg = globalConfigVars.getSwitchState(
"Switch_OutputBMP");
controlPara.controlSwitchs.outputLabelMatrix =
globalConfigVars.getSwitchState("Switch_OutputLabelMatrix");
controlPara.controlSwitchs.outputStat = globalConfigVars.getSwitchState(
"Switch_OutputStat");
controlPara.controlSwitchs.outputVtkFile = globalConfigVars.getSwitchState(
"Switch_OutputVtk");
if (isStab) {
controlPara.controlSwitchs.stab = ON;
} else {
controlPara.controlSwitchs.stab = OFF;
}
}
void SceNodes::sceForcesPerfTesting() {
prepareSceForceComputation();
applySceForcesBasic();
}
void SceNodes::sceForcesPerfTesting_M() {
prepareSceForceComputation_M();
applySceForcesBasic_M();
}
void SceNodes::applySceForcesBasic_M() {
}
void SceNodes::sceForcesDisc() {
prepareSceForceComputation();
applySceForcesDisc();
}
void SceNodes::sceForcesDisc_M() {
#ifdef DebugMode
cudaEvent_t start1, start2, start3, stop;
float elapsedTime1, elapsedTime2, elapsedTime3;
cudaEventCreate(&start1);
cudaEventCreate(&start2);
cudaEventCreate(&start3);
cudaEventCreate(&stop);
cudaEventRecord(start1, 0);
#endif
cout << " confirm --- 1 ---" << endl;
cout.flush();
prepareSceForceComputation_M(); //buckets for optimization of searching algorithm
#ifdef DebugMode
cudaEventRecord(start2, 0);
cudaEventSynchronize(start2);
cudaEventElapsedTime(&elapsedTime1, start1, start2);
#endif
cout << " --- 2 ---" << endl;
cout.flush();
applySceForcesDisc_M(); // compate the MMD forces and also finds the nearset neighbor for applying the adhesion
#ifdef DebugMode
cudaEventRecord(start3, 0);
cudaEventSynchronize(start3);
cudaEventElapsedTime(&elapsedTime2, start2, start3);
#endif
cout << " --- 3 ---" << endl;
cout.flush();
processMembrAdh_M(); //applying the adhesion force
cout << " --- 4 ---" << endl;
cout.flush();
copyExtForces_M();//AAMIRI
#ifdef DebugMode
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime3, start3, stop);
std::cout << "time spent in Node logic: " << elapsedTime1 << " "
<< elapsedTime2 << " " << elapsedTime3 << std::endl;
#endif
}
double SceNodes::getMaxEffectiveRange() {
int simuTypeConfigValue =
globalConfigVars.getConfigValue("SimulationType").toInt();
SimulationType type = parseTypeFromConfig(simuTypeConfigValue);
if (type != Disc_M) {
double interLinkEffectiveRange = globalConfigVars.getConfigValue(
"InterCellLinkEffectRange").toDouble();
double maxEffectiveRange = interLinkEffectiveRange;
double intraLinkEffectiveRange = globalConfigVars.getConfigValue(
"IntraCellLinkEffectRange").toDouble();
if (intraLinkEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = intraLinkEffectiveRange;
}
double cartEffectiveRange = 0;
// cartilage effective range does not apply for other types of simulation.
try {
cartEffectiveRange = globalConfigVars.getConfigValue(
"CartForceEffectiveRange").toDouble();
} catch (SceException &exce) {
}
if (cartEffectiveRange > maxEffectiveRange) {
maxEffectiveRange = cartEffectiveRange;
}
return maxEffectiveRange;
} else {
double membrMembrEffRange = globalConfigVars.getConfigValue(
"InterBEffectiveRange").toDouble();
double membrIntnlEffRange = globalConfigVars.getConfigValue(
"IntnlBEffectRange").toDouble();
double intnlIntnlEffRange = globalConfigVars.getConfigValue(
"IntraEffectRange").toDouble();
double intnlDivEffRange = globalConfigVars.getConfigValue(
"IntraDivEffectRange").toDouble();
double maxEffRange = 0;
std::vector<double> ranges;
ranges.push_back(membrMembrEffRange);
// all these are now
//ranges.push_back(membrIntnlEffRange);
//ranges.push_back(intnlIntnlEffRange);
//ranges.push_back(intnlDivEffRange);
maxEffRange = *std::max_element(ranges.begin(), ranges.end());
return maxEffRange;
}
}
void SceNodes::setInfoVecs(const NodeInfoVecs& infoVecs) {
this->infoVecs = infoVecs;
}
void SceNodes::allocSpaceForNodes(uint maxTotalNodeCount,uint maxNumCells, uint currentActiveCellCount) {
cout << " inside function allocSpaceForNodes current active cells are " << currentActiveCellCount << endl ;
cout << " inside function allocSpaceForNodes max number of cells is " << maxNumCells << endl ;
infoVecs.nodeLocX.resize(maxTotalNodeCount);
infoVecs.nodeLocXHost.resize(maxTotalNodeCount); //Ali
infoVecs.nodeLocY.resize(maxTotalNodeCount);
infoVecs.nodeLocYHost.resize(maxTotalNodeCount); // Ali
infoVecs.nodeLocZ.resize(maxTotalNodeCount);
infoVecs.nodeVelX.resize(maxTotalNodeCount);
infoVecs.nodeVelY.resize(maxTotalNodeCount);
infoVecs.nodeVelZ.resize(maxTotalNodeCount);
cout << " I am here 0 " << maxNumCells << endl ;
//infoVecs.nodeContractLevel.resize(maxTotalNodeCount,0.0);// Ali
infoVecs.nodeF_MM_C_X.resize(maxTotalNodeCount,0.0);// Ali
infoVecs.nodeF_MM_C_Y.resize(maxTotalNodeCount,0.0);// Ali
infoVecs.nodeContractEnergyT.resize(maxTotalNodeCount,0.0);// Ali
cout << " I am here 1 " << maxNumCells << endl ;
infoVecs.nodeF_MI_M_x.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_y.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_T.resize(maxTotalNodeCount); //Ali
infoVecs.nodeF_MI_M_N.resize(maxTotalNodeCount,0.0); //Ali
infoVecs.nodeVelTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeVelNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeCurvature.resize(maxTotalNodeCount, 0.0);//AAMIRI
infoVecs.nodeActinLevel.resize(maxTotalNodeCount, 0.0);//Ali
infoVecs.nodeExtForceX.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceY.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceTangent.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeExtForceNormal.resize(maxTotalNodeCount);//AAMIRI
infoVecs.nodeMaxForce.resize(maxTotalNodeCount);
//infoVecs.nodeIsBasalMem.resize(maxTotalNodeCount,false); //Ali
//infoVecs.nodeIsLateralMem.resize(maxTotalNodeCount,false); //Ali
infoVecs.nodeIsApicalMem.resize(maxTotalNodeCount,0); //Ali
infoVecs.nodeIsBasalMem.resize(maxTotalNodeCount,0); //Ali
//infoVecs.nodeIsLateralMemHost.resize(maxTotalNodeCount,false); //Ali
infoVecs.nodeCellType.resize(maxTotalNodeCount);
infoVecs.nodeCellRank.resize(maxTotalNodeCount);
infoVecs.nodeIsActive.resize(maxTotalNodeCount);
infoVecs.nodeIsActiveHost.resize(maxTotalNodeCount); // Ali
infoVecs.nodeAdhMinDist.resize(maxTotalNodeCount); // Ali
infoVecs.nodeCellRankFront.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankBehind.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankFrontOld.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankBehindOld.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankFrontHost.resize(maxNumCells,-1); // Ali
infoVecs.nodeCellRankBehindHost.resize(maxNumCells,-1); // Ali
if (controlPara.simuType == Disc
|| controlPara.simuType == SingleCellTest) {
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.nodeInterForceX.resize(maxTotalNodeCount);
infoVecs.nodeInterForceY.resize(maxTotalNodeCount);
infoVecs.nodeInterForceZ.resize(maxTotalNodeCount);
}
if (controlPara.simuType == Disc_M) {
infoVecs.nodeAdhereIndex.resize(maxTotalNodeCount);
infoVecs.nodeMemMirrorIndex.resize(maxTotalNodeCount); //Ali
infoVecs.nodeAdhIndxHostCopy.resize(maxTotalNodeCount);
infoVecs.nodeAdhereIndexHost.resize(maxTotalNodeCount); //Ali
infoVecs.nodeMemMirrorIndexHost.resize(maxTotalNodeCount); //Ali
infoVecs.membrIntnlIndex.resize(maxTotalNodeCount);
infoVecs.nodeGrowPro.resize(maxTotalNodeCount);
infoVecs.membrTensionMag.resize(maxTotalNodeCount, 0);
infoVecs.membrTenMagRi.resize(maxTotalNodeCount, 0);
infoVecs.membrDistToRi.resize(maxTotalNodeCount, 0);//AAMIRI
infoVecs.membrLinkRiMidX.resize(maxTotalNodeCount, 0);
infoVecs.membrLinkRiMidY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendLeftX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendSpringEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.membrLinSpringEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.nodeIIEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.nodeIMEnergy.resize(maxTotalNodeCount, 0.0);
infoVecs.lagrangeFX.resize(maxTotalNodeCount, 0.0);
infoVecs.lagrangeFY.resize(maxTotalNodeCount, 0.0);
infoVecs.lagrangeFN.resize(maxTotalNodeCount, 0.0);
infoVecs.membrBendLeftY.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightX.resize(maxTotalNodeCount, 0);
infoVecs.membrBendRightY.resize(maxTotalNodeCount, 0);
infoVecs.dppLevel.resize(maxTotalNodeCount, 0.0); //Ali
infoVecs.memNodeType1.resize(maxTotalNodeCount, notAssigned1); //Ali
infoVecs.memNodeType1Host.resize(maxTotalNodeCount, notAssigned1); //Ali
infoVecs.isSubApicalJunction.resize(maxTotalNodeCount, false); //Ali
infoVecs.isSubApicalJunctionHost.resize(maxTotalNodeCount, false); //Ali
cout << " I am here2 " << maxNumCells << endl ;
auxVecs.bucketKeys.resize(maxTotalNodeCount);
auxVecs.bucketValues.resize(maxTotalNodeCount);
auxVecs.bucketKeysExpanded.resize(maxTotalNodeCount * 9);
auxVecs.bucketValuesIncludingNeighbor.resize(maxTotalNodeCount * 9);
}
thrust:: sequence (infoVecs.nodeCellRankFront.begin() ,infoVecs.nodeCellRankFront.begin() +currentActiveCellCount) ; //Ali
thrust:: sequence (infoVecs.nodeCellRankBehind.begin(),infoVecs.nodeCellRankBehind.begin()+currentActiveCellCount) ; //Ali
thrust:: device_vector<int> tmp1 ;
thrust:: device_vector<int> tmp2 ;
tmp1.resize(currentActiveCellCount,1) ;
tmp2.resize(currentActiveCellCount,-1) ;
thrust:: transform(tmp1.begin(),tmp1.begin()+currentActiveCellCount,
infoVecs.nodeCellRankFront.begin(),infoVecs.nodeCellRankFront.begin(), thrust::plus<int>()) ; //Ali
thrust:: transform(tmp2.begin(),tmp2.begin()+currentActiveCellCount,
infoVecs.nodeCellRankBehind.begin(),infoVecs.nodeCellRankBehind.begin(),thrust::plus<int>()) ; //Ali
infoVecs.nodeCellRankBehind[0]=currentActiveCellCount-1 ;
infoVecs.nodeCellRankFront[currentActiveCellCount-1]=0 ;
cout << " I am here 3 " << maxNumCells << endl ;
}
void SceNodes::initNodeAllocPara(uint totalBdryNodeCount,
uint maxProfileNodeCount, uint maxCartNodeCount, uint maxTotalECMCount,
uint maxNodeInECM, uint maxTotalCellCount, uint maxNodeInCell) {
allocPara.maxCellCount = maxTotalCellCount;
allocPara.maxNodeOfOneCell = maxNodeInCell;
allocPara.maxNodePerECM = maxNodeInECM;
allocPara.maxECMCount = maxTotalECMCount;
allocPara.maxProfileNodeCount = maxProfileNodeCount;
allocPara.maxCartNodeCount = maxCartNodeCount;
allocPara.currentActiveProfileNodeCount = 0;
allocPara.currentActiveCartNodeCount = 0;
allocPara.BdryNodeCount = totalBdryNodeCount;
allocPara.currentActiveCellCount = 0;
allocPara.maxTotalECMNodeCount = allocPara.maxECMCount
* allocPara.maxNodePerECM;
allocPara.currentActiveECM = 0;
allocPara.maxTotalCellNodeCount = maxTotalCellCount
* allocPara.maxNodeOfOneCell;
allocPara.startPosProfile = totalBdryNodeCount;
allocPara.startPosCart = allocPara.startPosProfile
+ allocPara.maxProfileNodeCount;
allocPara.startPosECM = allocPara.startPosCart + allocPara.maxCartNodeCount;
allocPara.startPosCells = allocPara.startPosECM
+ allocPara.maxTotalECMNodeCount;
}
void SceNodes::initNodeAllocPara_M(uint totalBdryNodeCount,
uint maxTotalCellCount, uint maxEpiNodePerCell,
uint maxInternalNodePerCell) {
allocPara_M.bdryNodeCount = totalBdryNodeCount;
allocPara_M.currentActiveCellCount = 0;
allocPara_M.maxCellCount = maxTotalCellCount;
allocPara_M.maxAllNodePerCell = maxEpiNodePerCell + maxInternalNodePerCell;
allocPara_M.maxMembrNodePerCell = maxEpiNodePerCell;
allocPara_M.maxIntnlNodePerCell = maxInternalNodePerCell;
allocPara_M.maxTotalNodeCount = allocPara_M.maxAllNodePerCell
* allocPara_M.maxCellCount;
}
void SceNodes::removeNodes(int cellRank, vector<uint> &removeSeq) {
uint cellBeginIndex = allocPara.startPosCells
+ cellRank * allocPara.maxNodeOfOneCell;
uint cellEndIndex = cellBeginIndex + allocPara.maxNodeOfOneCell;
thrust::host_vector<double> cellXCoords(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYCoords(allocPara.maxNodeOfOneCell);
thrust::copy(infoVecs.nodeLocX.begin() + cellBeginIndex,
infoVecs.nodeLocX.begin() + cellEndIndex, cellXCoords.begin());
thrust::copy(infoVecs.nodeLocY.begin() + cellBeginIndex,
infoVecs.nodeLocY.begin() + cellEndIndex, cellYCoords.begin());
vector<bool> isRemove(allocPara.maxNodeOfOneCell, false);
/*
std::cout << "before, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXCoords[i] << " ";
}
std::cout << "]" << endl;
std::cout << "before, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYCoords[i] << " ";
}
std::cout << "]" << endl;
*/
for (uint i = 0; i < removeSeq.size(); i++) {
isRemove[removeSeq[i]] = true;
}
thrust::host_vector<double> cellXRemoved(allocPara.maxNodeOfOneCell);
thrust::host_vector<double> cellYRemoved(allocPara.maxNodeOfOneCell);
uint curIndex = 0;
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
if (isRemove[i] == false) {
cellXRemoved[curIndex] = cellXCoords[i];
cellYRemoved[curIndex] = cellYCoords[i];
curIndex++;
}
}
/*
std::cout << "after, X: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellXRemoved[i] << " ";
}
std::cout << "]" << endl;
std::cout << "after, Y: [";
for (uint i = 0; i < allocPara.maxNodeOfOneCell; i++) {
std::cout << cellYRemoved[i] << " ";
}
std::cout << "]" << endl;
*/
thrust::copy(cellXRemoved.begin(), cellXRemoved.end(),
infoVecs.nodeLocX.begin() + cellBeginIndex);
thrust::copy(cellYRemoved.begin(), cellYRemoved.end(),
infoVecs.nodeLocY.begin() + cellBeginIndex);
}
void SceNodes::processMembrAdh_M() {
keepAdhIndxCopyInHost_M();
applyMembrAdh_M();
//removeInvalidPairs_M(); //Ali changed position
}
void SceNodes::keepAdhIndxCopyInHost_M() {
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::copy(infoVecs.nodeAdhereIndex.begin(),
infoVecs.nodeAdhereIndex.begin() + maxTotalNode,
infoVecs.nodeAdhIndxHostCopy.begin());
}
void SceNodes::removeInvalidPairs_M() {
int* nodeAdhIdxAddress = thrust::raw_pointer_cast(
&infoVecs.nodeAdhereIndex[0]);
uint curActiveNodeCt = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
thrust::counting_iterator<int> iBegin(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(iBegin,
infoVecs.nodeAdhereIndex.begin()))
+ curActiveNodeCt, infoVecs.nodeAdhereIndex.begin(),
AdjustAdh(nodeAdhIdxAddress));
}
void SceNodes::applyMembrAdh_M() {
thrust::counting_iterator<uint> iBegin(0);
thrust::counting_iterator<uint> iBegin2(0);
uint maxTotalNode = allocPara_M.currentActiveCellCount
* allocPara_M.maxAllNodePerCell;
double* nodeLocXAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocX[0]);
double* nodeLocYAddress = thrust::raw_pointer_cast(&infoVecs.nodeLocY[0]);
double* nodeGrowProAddr = thrust::raw_pointer_cast(&infoVecs.nodeGrowPro[0]);
int* nodeAdhAddr = thrust::raw_pointer_cast(&infoVecs.nodeAdhereIndex[0]);
double* nodedppLevelAddr = thrust::raw_pointer_cast(&infoVecs.dppLevel[0]);
//thrust::counting_iterator<uint> iBegin_node(0);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin(),
infoVecs.memNodeType1.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
infoVecs.nodeAdhereIndex.begin(), iBegin,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin(),
infoVecs.memNodeType1.begin())) + maxTotalNode,
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
ApplyAdh(nodeLocXAddress, nodeLocYAddress, nodeGrowProAddr,nodeAdhAddr,nodedppLevelAddr));
//for (int i=0 ; i<140 ; i++){
// cout <<"adhesion index for "<<i << " is "<<infoVecs.nodeAdhereIndex[i]<< endl ;
// }
/* thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
iBegin2,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeIsActive.begin(),
iBegin2,
infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())) + maxTotalNode,
thrust::make_zip_iterator(
thrust::make_tuple(infoVecs.nodeVelX.begin(),
infoVecs.nodeVelY.begin())),
ApplyAdhReaction(nodeLocXAddress, nodeLocYAddress, nodeGrowProAddr,nodeAdhAddr,maxTotalNode));
*/
}
//AAMIRI
void SceNodes::copyExtForces_M(){
thrust::copy(infoVecs.nodeVelX.begin(), infoVecs.nodeVelX.end(),
infoVecs.nodeExtForceX.begin());
thrust::copy(infoVecs.nodeVelY.begin(), infoVecs.nodeVelY.end(),
infoVecs.nodeExtForceY.begin());
}
|
70f6b750688a5adecc4555ccb845b54e92b1bb7a.hip | // !!! This is a file automatically generated by hipify!!!
/**
Author: Dimitriadis Vasileios 8404
Faculty of Electrical and Computer Engineering AUTH
3rd assignment at Parallel and Distributed Systems (7th semester)
This is a parallel implementation of mean shift algorithm using the
Gaussian probability density function.
**/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define N 60000
#define DIMENSIONS 5
#define EPSILON 0.001
#define VAR 0.001 // =^2 variance
#define N_Threads 1024
struct timeval startwtime, endwtime;
double seq_time;
void getinput(double *x, char *filename);
__global__ void meanshift(double *dev_x, double *dev_y, int dim, double eps, double var);
__device__ double find_distance(double *y, int i, double *x, int j, int dim);
void show_results(double *y_new);
int main(int argc, char **argv)
{
if (argc != 2)
{
printf("Need as input a dataset to process\n");
exit (1);
}
double *x = (double *)malloc(N * DIMENSIONS * sizeof(double));
if (x == NULL)
{
printf("Failed to allocate data at x...\n");
exit(1);
}
getinput(x, argv[1]);
double *y = (double *)malloc(N * DIMENSIONS * sizeof(double));
if (y == NULL)
{
printf("Failed to allocate data at y...\n");
exit(1);
}
double *dev_x;
hipMalloc(&dev_x, N * DIMENSIONS * sizeof(double));
double *dev_y;
hipMalloc(&dev_y, N * DIMENSIONS * sizeof(double));
hipMemcpy(dev_x, x, N * DIMENSIONS * sizeof(double), hipMemcpyHostToDevice);
//Initialize y as x in gpu.
hipMemcpy(dev_y, x, N * DIMENSIONS * sizeof(double), hipMemcpyHostToDevice);
hipError_t error;
size_t shared_size = N_Threads * DIMENSIONS + N_Threads;
gettimeofday (&startwtime, NULL);
hipLaunchKernelGGL(( meanshift), dim3(N), dim3(N_Threads), sizeof(double) * shared_size, 0, dev_x, dev_y, DIMENSIONS, EPSILON, VAR);
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
hipMemcpy(y, dev_y, N * DIMENSIONS * sizeof(double), hipMemcpyDeviceToHost);
error = hipGetLastError();
if (error != hipSuccess)
{
printf("Error at copying back: %s\n", hipGetErrorString(error));
exit(1);
}
hipDeviceSynchronize();
error = hipGetLastError();
if (error != hipSuccess)
{
printf("Error at Sync: %s\n", hipGetErrorString(error));
exit(1);
}
printf("Time needed for mean shift is %f sec\n", seq_time);
show_results(y);
free(x);
free(y);
hipFree(dev_x);
hipFree(dev_y);
return (0);
}
void getinput(double *x, char *filename)
{
FILE *fin;
int i = 0, j;
char *str = (char *)malloc(2 * DIMENSIONS * sizeof(double));
char *token = (char *)malloc(sizeof(double));
fin = fopen(filename, "r");
if (fin == NULL)
{
printf("Error opening the file...");
exit(1);
}
str = fgets(str, 2 * DIMENSIONS * sizeof(double), fin); //Take one point.
while (str != NULL && i < N)
{
token = strtok(str, "\t"); //get one dimension per recursion.
j = 0;
while (token != NULL && j < DIMENSIONS)
{
x[i*DIMENSIONS + j] = atof(token);
token = strtok(NULL, "\t");
j++;
}
str = fgets(str, 2 * DIMENSIONS * sizeof(double), fin);
i++;
}
fclose(fin);
free(str);
free(token);
}
__global__
void meanshift(double *dev_x, double *dev_y, int dim, double eps, double var)
{
int start, end;
// Every block is finding the new y until convergence.
int i = blockIdx.x;
int j = threadIdx.x;
int n = gridDim.x;
int n_th = blockDim.x;
/** Every thread is processing a chunk of the data in order
to find distances between y_i and all x faster. If the
number of elements is devided equally by the number of
threads then the chunk is N/(# of Blocks). If it is not then
the first N%(# of Blocks) have one more element to process.
**/
int chunk = n / n_th;
if ((n % n_th) != 0)
{
if (j < (n % n_th))
{
chunk = chunk + 1;
start = chunk * j;
end = start + chunk;
}
else
{
start = chunk * j + (n % n_th);
end = start + chunk;
}
}
else
{
start = chunk * j;
end = start + chunk;
}
/** Each block has its own shared memory and the
size of it is number of threads multiplied by
(dimensions + 1) to store the values of nominators
and denominator that each thread finds.
**/
extern __shared__ double s[];
double *nominator = &s[0];
double *denominator = &s[n_th * dim];
__shared__ int converge;
converge = 0;
double distance = 0, k;
int l, r;
while (!converge)
{
//Initialize nominators and denominators as 0.
for (r=0; r<dim; r++)
{
nominator[j*dim + r] = 0;
}
denominator[j] = 0;
// Every thread is responsible of finding the new nominators
// and denominator in it's chunk.
for (l=start; l<end; l++)
{
distance = find_distance(dev_y, i, dev_x, l, dim);
if (sqrt(distance) <= var)
{
k = exp(-distance / (2 * var)); //Guassian possibility density function.
}
else
{
k = 0;
}
for (r=0; r<dim; r++)
{
nominator[j*dim + r] += k * dev_x[l*dim + r];
}
denominator[j] += k;
}
__syncthreads();
// Reduction
for (l=n_th/2; l>0; l>>=1)
{
if (j < l)
{
for (r=0; r<dim; r++)
{
nominator[j*dim + r] += nominator[(j+l) * dim + r];
}
denominator[j] += denominator[j+l];
}
__syncthreads();
}
// Threads from 0 to dim-1 store in the first column
// of nominator the values of new y
if (j < dim)
{
nominator[j] = nominator[j] / denominator[0];
}
__syncthreads();
// Only first thread checking the converge.
if (j == 0)
{
distance = 0;
for (r=0; r<dim; r++)
{
distance += pow(dev_y[i*dim + r] - nominator[r], 2);
}
if (sqrt(distance) < eps)
{
converge = 1;
}
}
__syncthreads();
// New y is stored in place of the previous y.
if (j < dim)
{
dev_y[i*dim + j] = nominator[j];
}
__syncthreads();
}
}
__device__
double find_distance(double *y, int i, double *x, int j, int dim)
{
double distance = 0;
for (int l=0; l<dim; l++)
{
distance = distance + pow(y[i*dim + l]-x[j*dim + l], 2);
}
return distance;
}
void show_results(double *y_new)
{
int i,j;
for(i=0; i<20; i++)
{
for (j=0; j<DIMENSIONS; j++)
{
printf("%f ", y_new[i*DIMENSIONS + j]);
}
printf("\n");
}
}
| 70f6b750688a5adecc4555ccb845b54e92b1bb7a.cu | /**
Author: Dimitriadis Vasileios 8404
Faculty of Electrical and Computer Engineering AUTH
3rd assignment at Parallel and Distributed Systems (7th semester)
This is a parallel implementation of mean shift algorithm using the
Gaussian probability density function.
**/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 60000
#define DIMENSIONS 5
#define EPSILON 0.001
#define VAR 0.001 // =σ^2 variance
#define N_Threads 1024
struct timeval startwtime, endwtime;
double seq_time;
void getinput(double *x, char *filename);
__global__ void meanshift(double *dev_x, double *dev_y, int dim, double eps, double var);
__device__ double find_distance(double *y, int i, double *x, int j, int dim);
void show_results(double *y_new);
int main(int argc, char **argv)
{
if (argc != 2)
{
printf("Need as input a dataset to process\n");
exit (1);
}
double *x = (double *)malloc(N * DIMENSIONS * sizeof(double));
if (x == NULL)
{
printf("Failed to allocate data at x...\n");
exit(1);
}
getinput(x, argv[1]);
double *y = (double *)malloc(N * DIMENSIONS * sizeof(double));
if (y == NULL)
{
printf("Failed to allocate data at y...\n");
exit(1);
}
double *dev_x;
cudaMalloc(&dev_x, N * DIMENSIONS * sizeof(double));
double *dev_y;
cudaMalloc(&dev_y, N * DIMENSIONS * sizeof(double));
cudaMemcpy(dev_x, x, N * DIMENSIONS * sizeof(double), cudaMemcpyHostToDevice);
//Initialize y as x in gpu.
cudaMemcpy(dev_y, x, N * DIMENSIONS * sizeof(double), cudaMemcpyHostToDevice);
cudaError_t error;
size_t shared_size = N_Threads * DIMENSIONS + N_Threads;
gettimeofday (&startwtime, NULL);
meanshift<<<N, N_Threads, sizeof(double) * shared_size>>>(dev_x, dev_y, DIMENSIONS, EPSILON, VAR);
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
cudaMemcpy(y, dev_y, N * DIMENSIONS * sizeof(double), cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("Error at copying back: %s\n", cudaGetErrorString(error));
exit(1);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("Error at Sync: %s\n", cudaGetErrorString(error));
exit(1);
}
printf("Time needed for mean shift is %f sec\n", seq_time);
show_results(y);
free(x);
free(y);
cudaFree(dev_x);
cudaFree(dev_y);
return (0);
}
void getinput(double *x, char *filename)
{
FILE *fin;
int i = 0, j;
char *str = (char *)malloc(2 * DIMENSIONS * sizeof(double));
char *token = (char *)malloc(sizeof(double));
fin = fopen(filename, "r");
if (fin == NULL)
{
printf("Error opening the file...");
exit(1);
}
str = fgets(str, 2 * DIMENSIONS * sizeof(double), fin); //Take one point.
while (str != NULL && i < N)
{
token = strtok(str, "\t"); //get one dimension per recursion.
j = 0;
while (token != NULL && j < DIMENSIONS)
{
x[i*DIMENSIONS + j] = atof(token);
token = strtok(NULL, "\t");
j++;
}
str = fgets(str, 2 * DIMENSIONS * sizeof(double), fin);
i++;
}
fclose(fin);
free(str);
free(token);
}
__global__
void meanshift(double *dev_x, double *dev_y, int dim, double eps, double var)
{
int start, end;
// Every block is finding the new y until convergence.
int i = blockIdx.x;
int j = threadIdx.x;
int n = gridDim.x;
int n_th = blockDim.x;
/** Every thread is processing a chunk of the data in order
to find distances between y_i and all x faster. If the
number of elements is devided equally by the number of
threads then the chunk is N/(# of Blocks). If it is not then
the first N%(# of Blocks) have one more element to process.
**/
int chunk = n / n_th;
if ((n % n_th) != 0)
{
if (j < (n % n_th))
{
chunk = chunk + 1;
start = chunk * j;
end = start + chunk;
}
else
{
start = chunk * j + (n % n_th);
end = start + chunk;
}
}
else
{
start = chunk * j;
end = start + chunk;
}
/** Each block has its own shared memory and the
size of it is number of threads multiplied by
(dimensions + 1) to store the values of nominators
and denominator that each thread finds.
**/
extern __shared__ double s[];
double *nominator = &s[0];
double *denominator = &s[n_th * dim];
__shared__ int converge;
converge = 0;
double distance = 0, k;
int l, r;
while (!converge)
{
//Initialize nominators and denominators as 0.
for (r=0; r<dim; r++)
{
nominator[j*dim + r] = 0;
}
denominator[j] = 0;
// Every thread is responsible of finding the new nominators
// and denominator in it's chunk.
for (l=start; l<end; l++)
{
distance = find_distance(dev_y, i, dev_x, l, dim);
if (sqrt(distance) <= var)
{
k = exp(-distance / (2 * var)); //Guassian possibility density function.
}
else
{
k = 0;
}
for (r=0; r<dim; r++)
{
nominator[j*dim + r] += k * dev_x[l*dim + r];
}
denominator[j] += k;
}
__syncthreads();
// Reduction
for (l=n_th/2; l>0; l>>=1)
{
if (j < l)
{
for (r=0; r<dim; r++)
{
nominator[j*dim + r] += nominator[(j+l) * dim + r];
}
denominator[j] += denominator[j+l];
}
__syncthreads();
}
// Threads from 0 to dim-1 store in the first column
// of nominator the values of new y
if (j < dim)
{
nominator[j] = nominator[j] / denominator[0];
}
__syncthreads();
// Only first thread checking the converge.
if (j == 0)
{
distance = 0;
for (r=0; r<dim; r++)
{
distance += pow(dev_y[i*dim + r] - nominator[r], 2);
}
if (sqrt(distance) < eps)
{
converge = 1;
}
}
__syncthreads();
// New y is stored in place of the previous y.
if (j < dim)
{
dev_y[i*dim + j] = nominator[j];
}
__syncthreads();
}
}
__device__
double find_distance(double *y, int i, double *x, int j, int dim)
{
double distance = 0;
for (int l=0; l<dim; l++)
{
distance = distance + pow(y[i*dim + l]-x[j*dim + l], 2);
}
return distance;
}
void show_results(double *y_new)
{
int i,j;
for(i=0; i<20; i++)
{
for (j=0; j<DIMENSIONS; j++)
{
printf("%f ", y_new[i*DIMENSIONS + j]);
}
printf("\n");
}
}
|
ad85d090e5ebc76ce78ff27d73466b0d5e3f4d0b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define MAX_ERR 1e-6
//__global__ void vector_add(float *out, float *a, float *b, int n) {
// int stride = 1;
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
// 0 * 256 + 1 = 1 | BLOCK0 |
// 0 * 256 + 2 = 2
// 1 * 256 + 1 = 257 | BLOCK1 |
// 1 * 256 + 2 = 258
// out[tid] = a[tid] + b[tid];
//}
int calculate_no_threads(int array_size){
if(array_size/256 < 1){
return 1;
} else {
return array_size/256;
}
}
void print_results(float *ARRAY, int array_size){
printf("[");
for(int i = 0; i < array_size; i++){
printf("{");
for(int j = 0; j < array_size; j++){
printf("%1.1f,",ARRAY[(i * array_size) +j]);
}
printf("}\n");
}
printf("]");
printf("\n");
}
__global__ void vector_dot_product(float *CUDA_A, float *CUDA_B, float *CUDA_C,float *CUDA_T,int array_size,int no_threads) {
int tid = threadIdx.x;
int bid = blockIdx.x;
long long int increment = 431441;
int row_no = array_size;
int col_no = array_size;
double *mul = (double *)malloc(sizeof(double) * increment);
double *sum = (double *)malloc(sizeof(double) * increment);
//Make multiplications
for (int p = 0; p < array_size; p++){
for(int i = 0; i < array_size; i++){
for (int j = 0; j < array_size; j++){
mul[((i*array_size)+j) + p*array_size*array_size] = CUDA_A[p * col_no + j] * CUDA_B[ j * row_no + i];
}
}
}
float res=0.0;
//sum all multiplications a1.a2+b1.b3
for(int r=0;r<array_size;r++){
for(int j=0;j<array_size;j++){
for(int k=0;k<array_size;k++){
res += mul[k+(j*array_size)+(r*(array_size*array_size))];
}
sum[j+(r*array_size)] = res;
res = 0;
}
}
for(int j = 0;j<(array_size*array_size);j++){
//CUDA_C[(i*array_size) + j] = mul[(i*row_no)+j];
//place all the results back to the array
CUDA_T[j] = sum[j];
}
__syncthreads();
}
int main(){
int array_size = 83;
float *C, *A, *B, *T;
float *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_T;
A = (float *)malloc(array_size * array_size * sizeof(float));
B = (float *)malloc(array_size * array_size * sizeof(float));
T = (float *)malloc((array_size*array_size*array_size) * sizeof(float));
float a = 4.0;
for(int i = 0; i<(array_size * array_size); i++){
A[i] = ((float)rand()/(float)(RAND_MAX)) * a;
B[i] = ((float)rand()/(float)(RAND_MAX)) * a;
}
//Fill remaining bytes in array with 1s
//for(int i = 0; i<(array_size*array_size*array_size);i++){
// T[i] = 1;
// }
C = (float *)malloc(array_size * array_size * sizeof(float) );
// Allocate device memory
hipMalloc((void**)&CUDA_A, sizeof(float) * array_size * array_size);
hipMalloc((void**)&CUDA_B, sizeof(float) * array_size * array_size);
hipMalloc((void**)&CUDA_C, sizeof(float) * array_size * array_size);
hipMalloc((void**)&CUDA_T, sizeof(float) * (array_size*array_size*array_size));
// Transfer data from host to device memory
hipMemcpy(CUDA_A, A, sizeof(float) * array_size * array_size, hipMemcpyHostToDevice);
hipMemcpy(CUDA_B, B, sizeof(float) * array_size * array_size, hipMemcpyHostToDevice);
hipMemcpy(CUDA_T, T, sizeof(float) * array_size * array_size, hipMemcpyHostToDevice);
printf("calculate_no_threads %d\n",calculate_no_threads(array_size));
hipLaunchKernelGGL(( vector_dot_product), dim3(1),dim3(calculate_no_threads(array_size)), 0, 0, CUDA_A, CUDA_B, CUDA_C, CUDA_T,array_size,calculate_no_threads(array_size));
hipMemcpy(C, CUDA_C, sizeof(float) * array_size * array_size, hipMemcpyDeviceToHost);
hipMemcpy(T, CUDA_T, sizeof(float) * (array_size*array_size*array_size), hipMemcpyDeviceToHost);
puts("DOT_PRODUCT");
print_results(A,array_size);
print_results(B,array_size);
puts("MATRIX MULTI");
print_results(T,array_size);
// Deallocate device memory
hipFree(CUDA_A);
hipFree(CUDA_B);
hipFree(CUDA_C);
free(C);
// Deallocate host memory
}
| ad85d090e5ebc76ce78ff27d73466b0d5e3f4d0b.cu | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define MAX_ERR 1e-6
//__global__ void vector_add(float *out, float *a, float *b, int n) {
// int stride = 1;
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
// 0 * 256 + 1 = 1 | BLOCK0 |
// 0 * 256 + 2 = 2
// 1 * 256 + 1 = 257 | BLOCK1 |
// 1 * 256 + 2 = 258
// out[tid] = a[tid] + b[tid];
//}
int calculate_no_threads(int array_size){
if(array_size/256 < 1){
return 1;
} else {
return array_size/256;
}
}
void print_results(float *ARRAY, int array_size){
printf("[");
for(int i = 0; i < array_size; i++){
printf("{");
for(int j = 0; j < array_size; j++){
printf("%1.1f,",ARRAY[(i * array_size) +j]);
}
printf("}\n");
}
printf("]");
printf("\n");
}
__global__ void vector_dot_product(float *CUDA_A, float *CUDA_B, float *CUDA_C,float *CUDA_T,int array_size,int no_threads) {
int tid = threadIdx.x;
int bid = blockIdx.x;
long long int increment = 431441;
int row_no = array_size;
int col_no = array_size;
double *mul = (double *)malloc(sizeof(double) * increment);
double *sum = (double *)malloc(sizeof(double) * increment);
//Make multiplications
for (int p = 0; p < array_size; p++){
for(int i = 0; i < array_size; i++){
for (int j = 0; j < array_size; j++){
mul[((i*array_size)+j) + p*array_size*array_size] = CUDA_A[p * col_no + j] * CUDA_B[ j * row_no + i];
}
}
}
float res=0.0;
//sum all multiplications a1.a2+b1.b3
for(int r=0;r<array_size;r++){
for(int j=0;j<array_size;j++){
for(int k=0;k<array_size;k++){
res += mul[k+(j*array_size)+(r*(array_size*array_size))];
}
sum[j+(r*array_size)] = res;
res = 0;
}
}
for(int j = 0;j<(array_size*array_size);j++){
//CUDA_C[(i*array_size) + j] = mul[(i*row_no)+j];
//place all the results back to the array
CUDA_T[j] = sum[j];
}
__syncthreads();
}
int main(){
int array_size = 83;
float *C, *A, *B, *T;
float *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_T;
A = (float *)malloc(array_size * array_size * sizeof(float));
B = (float *)malloc(array_size * array_size * sizeof(float));
T = (float *)malloc((array_size*array_size*array_size) * sizeof(float));
float a = 4.0;
for(int i = 0; i<(array_size * array_size); i++){
A[i] = ((float)rand()/(float)(RAND_MAX)) * a;
B[i] = ((float)rand()/(float)(RAND_MAX)) * a;
}
//Fill remaining bytes in array with 1s
//for(int i = 0; i<(array_size*array_size*array_size);i++){
// T[i] = 1;
// }
C = (float *)malloc(array_size * array_size * sizeof(float) );
// Allocate device memory
cudaMalloc((void**)&CUDA_A, sizeof(float) * array_size * array_size);
cudaMalloc((void**)&CUDA_B, sizeof(float) * array_size * array_size);
cudaMalloc((void**)&CUDA_C, sizeof(float) * array_size * array_size);
cudaMalloc((void**)&CUDA_T, sizeof(float) * (array_size*array_size*array_size));
// Transfer data from host to device memory
cudaMemcpy(CUDA_A, A, sizeof(float) * array_size * array_size, cudaMemcpyHostToDevice);
cudaMemcpy(CUDA_B, B, sizeof(float) * array_size * array_size, cudaMemcpyHostToDevice);
cudaMemcpy(CUDA_T, T, sizeof(float) * array_size * array_size, cudaMemcpyHostToDevice);
printf("calculate_no_threads %d\n",calculate_no_threads(array_size));
vector_dot_product<<<1,calculate_no_threads(array_size)>>>(CUDA_A, CUDA_B, CUDA_C, CUDA_T,array_size,calculate_no_threads(array_size));
cudaMemcpy(C, CUDA_C, sizeof(float) * array_size * array_size, cudaMemcpyDeviceToHost);
cudaMemcpy(T, CUDA_T, sizeof(float) * (array_size*array_size*array_size), cudaMemcpyDeviceToHost);
puts("DOT_PRODUCT");
print_results(A,array_size);
print_results(B,array_size);
puts("MATRIX MULTI");
print_results(T,array_size);
// Deallocate device memory
cudaFree(CUDA_A);
cudaFree(CUDA_B);
cudaFree(CUDA_C);
free(C);
// Deallocate host memory
}
|
5261252f357bdd9f1823efae08d7aee56add242f.hip | // !!! This is a file automatically generated by hipify!!!
#include <optix.h>
#include <optix_math.h>
#include "common_hip.cuh"
rtDeclareVariable(float3, eye, , );
//rtDeclareVariable(float3, roll_pitch_yaw, , );
//rtDeclareVariable(float3, fovy_cx_cy, , );
rtDeclareVariable(float3, target, , );
rtDeclareVariable(float, roll_angle, , );
rtDeclareVariable(float3, intrisics, , ); // [fovy, cx, cy]
RT_PROGRAM void pinhole_camera() {
optix::size_t2 screen = output_buffer.size();
float2 d = make_float2(launch_index) / make_float2(screen);
float3 dir = make_float3(d-0.5, 1);
optix::Ray ray(eye, dir, 0, 0.01);
PerRayData_radiance prd;
prd.result = make_float3(1, 0, 0);
rtTrace(top_object, ray, prd);
output_buffer[launch_index] = make_float4(prd.result, 1);
//rtPrintf("hello");
}
| 5261252f357bdd9f1823efae08d7aee56add242f.cu |
#include <optix.h>
#include <optix_math.h>
#include "common.cuh"
rtDeclareVariable(float3, eye, , );
//rtDeclareVariable(float3, roll_pitch_yaw, , );
//rtDeclareVariable(float3, fovy_cx_cy, , );
rtDeclareVariable(float3, target, , );
rtDeclareVariable(float, roll_angle, , );
rtDeclareVariable(float3, intrisics, , ); // [fovy, cx, cy]
RT_PROGRAM void pinhole_camera() {
optix::size_t2 screen = output_buffer.size();
float2 d = make_float2(launch_index) / make_float2(screen);
float3 dir = make_float3(d-0.5, 1);
optix::Ray ray(eye, dir, 0, 0.01);
PerRayData_radiance prd;
prd.result = make_float3(1, 0, 0);
rtTrace(top_object, ray, prd);
output_buffer[launch_index] = make_float4(prd.result, 1);
//rtPrintf("hello");
}
|
f52f440090b6a98cf4d1baf30ecac13fc61521b2.hip | // !!! This is a file automatically generated by hipify!!!
void bridge::construct_empirical_ci(){
hipMemcpy(host_empirical_mu, device_empirical_mu,
sizeof(double) * N_CLUSTERS * n_experiments, hipMemcpyDeviceToHost);
// hipMemcpy(is_outlier, device_is_outlier,
// sizeof(int) * n_experiments, hipMemcpyDeviceToHost);
// estimate standard deviation
double avg_m[N_CLUSTERS];
double sd_m_est[N_CLUSTERS];
for(int k = 0; k < N_CLUSTERS; k++){
avg_m[k] = 0;
sd_m_est[k] = 0;
for(int n = 0; n < n_experiments; n++){
avg_m[k] += host_empirical_mu[k*n_experiments + n];
}
avg_m[k] /= double(n_experiments);
for(int n = 0; n < n_experiments; n++){
sd_m_est[k] +=
(host_empirical_mu[k*n_experiments + n] - avg_m[k])
*
(host_empirical_mu[k*n_experiments + n] - avg_m[k]);
}
sd_m_est[k] /= double(n_experiments - 1);
sd_m_est[k] = sqrt(sd_m_est[k]);
}
// detect outlier
for(int n = 0; n < n_experiments; n++){
is_outlier[n] = 0;
for(int k = 0; k < N_CLUSTERS; k++){
// if(fabs(host_empirical_mu[k*n_experiments + n] - avg_m[k]) > sd_m_est[k] * 3 ){
if(0){
is_outlier[n] = 1;
n_outliers ++;
break;
}
}
}
for(int k = 0; k < N_CLUSTERS; k++){
avg_m[k] = 0;
sd_m_est[k] = 0;
for(int n = 0; n < n_experiments; n++){
if(is_outlier[n]) continue;
avg_m[k] += host_empirical_mu[k*n_experiments + n];
}
avg_m[k] /= double(n_experiments - n_outliers);
for(int n = 0; n < n_experiments; n++){
if(is_outlier[n]) continue;
sd_m_est[k] +=
(host_empirical_mu[k*n_experiments + n] - avg_m[k])
*
(host_empirical_mu[k*n_experiments + n] - avg_m[k]);
}
sd_m_est[k] /= double(n_experiments - n_outliers - 1);
sd_m_est[k] = sqrt(sd_m_est[k]);
empirical_ci[k][0] = host_dev_settings.l_vars.mu[k] -
(*host_dev_settings.ci_quantile) * sd_m_est[k];
empirical_ci[k][1] = host_dev_settings.l_vars.mu[k] +
(*host_dev_settings.ci_quantile) * sd_m_est[k];
}
}
| f52f440090b6a98cf4d1baf30ecac13fc61521b2.cu | void bridge::construct_empirical_ci(){
cudaMemcpy(host_empirical_mu, device_empirical_mu,
sizeof(double) * N_CLUSTERS * n_experiments, cudaMemcpyDeviceToHost);
// cudaMemcpy(is_outlier, device_is_outlier,
// sizeof(int) * n_experiments, cudaMemcpyDeviceToHost);
// estimate standard deviation
double avg_m[N_CLUSTERS];
double sd_m_est[N_CLUSTERS];
for(int k = 0; k < N_CLUSTERS; k++){
avg_m[k] = 0;
sd_m_est[k] = 0;
for(int n = 0; n < n_experiments; n++){
avg_m[k] += host_empirical_mu[k*n_experiments + n];
}
avg_m[k] /= double(n_experiments);
for(int n = 0; n < n_experiments; n++){
sd_m_est[k] +=
(host_empirical_mu[k*n_experiments + n] - avg_m[k])
*
(host_empirical_mu[k*n_experiments + n] - avg_m[k]);
}
sd_m_est[k] /= double(n_experiments - 1);
sd_m_est[k] = sqrt(sd_m_est[k]);
}
// detect outlier
for(int n = 0; n < n_experiments; n++){
is_outlier[n] = 0;
for(int k = 0; k < N_CLUSTERS; k++){
// if(fabs(host_empirical_mu[k*n_experiments + n] - avg_m[k]) > sd_m_est[k] * 3 ){
if(0){
is_outlier[n] = 1;
n_outliers ++;
break;
}
}
}
for(int k = 0; k < N_CLUSTERS; k++){
avg_m[k] = 0;
sd_m_est[k] = 0;
for(int n = 0; n < n_experiments; n++){
if(is_outlier[n]) continue;
avg_m[k] += host_empirical_mu[k*n_experiments + n];
}
avg_m[k] /= double(n_experiments - n_outliers);
for(int n = 0; n < n_experiments; n++){
if(is_outlier[n]) continue;
sd_m_est[k] +=
(host_empirical_mu[k*n_experiments + n] - avg_m[k])
*
(host_empirical_mu[k*n_experiments + n] - avg_m[k]);
}
sd_m_est[k] /= double(n_experiments - n_outliers - 1);
sd_m_est[k] = sqrt(sd_m_est[k]);
empirical_ci[k][0] = host_dev_settings.l_vars.mu[k] -
(*host_dev_settings.ci_quantile) * sd_m_est[k];
empirical_ci[k][1] = host_dev_settings.l_vars.mu[k] +
(*host_dev_settings.ci_quantile) * sd_m_est[k];
}
}
|
487843fbf892b209ad4aa6a4e520cc7ebc6abb1c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2016 by Contributors
* \file multibox_prior.cu
* \brief generate multibox prior boxes cuda kernels
* \author Joshua Zhang
*/
#include "./multibox_prior-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOXPRIOR_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__global__ void AssignPriors(DType *out, const float size,
const float sqrt_ratio, const int in_width,
const int in_height, const float step_x,
const float step_y, const float center_offy,
const float center_offx, const int stride,
const int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= in_width * in_height) return;
int r = index / in_width;
int c = index % in_width;
float center_x = (c + center_offx) * step_x;
float center_y = (r + center_offy) * step_y;
float w = size / in_width * in_height * sqrt_ratio / 2; // half width
float h = size / sqrt_ratio / 2; // half height
DType *ptr = out + index * stride + 4 * offset;
*(ptr++) = center_x - w; // xmin
*(ptr++) = center_y - h; // ymin
*(ptr++) = center_x + w; // xmax
*(ptr++) = center_y + h; // ymax
}
} // namespace cuda
template<typename DType>
inline void MultiBoxPriorForward(const Tensor<gpu, 2, DType> &out,
const std::vector<float> &sizes,
const std::vector<float> &ratios,
const int in_width, const int in_height,
const std::vector<float> &steps,
const std::vector<float> &offsets) {
CHECK_EQ(out.CheckContiguous(), true);
hipStream_t stream = Stream<gpu>::GetStream(out.stream_);
DType *out_ptr = out.dptr_;
const float step_x = steps[1];
const float step_y = steps[0];
const float offset_x = offsets[1];
const float offset_y = offsets[0];
const int num_sizes = static_cast<int>(sizes.size());
const int num_ratios = static_cast<int>(ratios.size());
const int num_thread = cuda::kMaxThreadsPerBlock;
dim3 dimBlock(num_thread);
dim3 dimGrid((in_width * in_height - 1) / num_thread + 1);
cuda::CheckLaunchParam(dimGrid, dimBlock, "MultiBoxPrior Forward");
const int stride = 4 * (num_sizes + num_ratios - 1);
int offset = 0;
// ratio = 1, various sizes
for (int i = 0; i < num_sizes; ++i) {
hipLaunchKernelGGL(( cuda::AssignPriors<DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, out_ptr,
sizes[i], 1.f, in_width, in_height, step_x, step_y, offset_y, offset_x, stride, offset);
++offset;
}
MULTIBOXPRIOR_CUDA_CHECK(hipPeekAtLastError());
// size = sizes[0], various ratios
for (int j = 1; j < num_ratios; ++j) {
hipLaunchKernelGGL(( cuda::AssignPriors<DType>), dim3(dimGrid), dim3(dimBlock), 0, stream, out_ptr,
sizes[0], sqrtf(ratios[j]), in_width, in_height, step_x, step_y,
offset_y, offset_x, stride, offset);
++offset;
}
MULTIBOXPRIOR_CUDA_CHECK(hipPeekAtLastError());
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(MultiBoxPriorParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxPriorOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
| 487843fbf892b209ad4aa6a4e520cc7ebc6abb1c.cu | /*!
* Copyright (c) 2016 by Contributors
* \file multibox_prior.cu
* \brief generate multibox prior boxes cuda kernels
* \author Joshua Zhang
*/
#include "./multibox_prior-inl.h"
#include <mshadow/cuda/tensor_gpu-inl.cuh>
#define MULTIBOXPRIOR_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow {
namespace cuda {
template<typename DType>
__global__ void AssignPriors(DType *out, const float size,
const float sqrt_ratio, const int in_width,
const int in_height, const float step_x,
const float step_y, const float center_offy,
const float center_offx, const int stride,
const int offset) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= in_width * in_height) return;
int r = index / in_width;
int c = index % in_width;
float center_x = (c + center_offx) * step_x;
float center_y = (r + center_offy) * step_y;
float w = size / in_width * in_height * sqrt_ratio / 2; // half width
float h = size / sqrt_ratio / 2; // half height
DType *ptr = out + index * stride + 4 * offset;
*(ptr++) = center_x - w; // xmin
*(ptr++) = center_y - h; // ymin
*(ptr++) = center_x + w; // xmax
*(ptr++) = center_y + h; // ymax
}
} // namespace cuda
template<typename DType>
inline void MultiBoxPriorForward(const Tensor<gpu, 2, DType> &out,
const std::vector<float> &sizes,
const std::vector<float> &ratios,
const int in_width, const int in_height,
const std::vector<float> &steps,
const std::vector<float> &offsets) {
CHECK_EQ(out.CheckContiguous(), true);
cudaStream_t stream = Stream<gpu>::GetStream(out.stream_);
DType *out_ptr = out.dptr_;
const float step_x = steps[1];
const float step_y = steps[0];
const float offset_x = offsets[1];
const float offset_y = offsets[0];
const int num_sizes = static_cast<int>(sizes.size());
const int num_ratios = static_cast<int>(ratios.size());
const int num_thread = cuda::kMaxThreadsPerBlock;
dim3 dimBlock(num_thread);
dim3 dimGrid((in_width * in_height - 1) / num_thread + 1);
cuda::CheckLaunchParam(dimGrid, dimBlock, "MultiBoxPrior Forward");
const int stride = 4 * (num_sizes + num_ratios - 1);
int offset = 0;
// ratio = 1, various sizes
for (int i = 0; i < num_sizes; ++i) {
cuda::AssignPriors<DType><<<dimGrid, dimBlock, 0, stream>>>(out_ptr,
sizes[i], 1.f, in_width, in_height, step_x, step_y, offset_y, offset_x, stride, offset);
++offset;
}
MULTIBOXPRIOR_CUDA_CHECK(cudaPeekAtLastError());
// size = sizes[0], various ratios
for (int j = 1; j < num_ratios; ++j) {
cuda::AssignPriors<DType><<<dimGrid, dimBlock, 0, stream>>>(out_ptr,
sizes[0], sqrtf(ratios[j]), in_width, in_height, step_x, step_y,
offset_y, offset_x, stride, offset);
++offset;
}
MULTIBOXPRIOR_CUDA_CHECK(cudaPeekAtLastError());
}
} // namespace mshadow
namespace mxnet {
namespace op {
template<>
Operator* CreateOp<gpu>(MultiBoxPriorParam param, int dtype) {
Operator *op = NULL;
MSHADOW_REAL_TYPE_SWITCH(dtype, DType, {
op = new MultiBoxPriorOp<gpu, DType>(param);
});
return op;
}
} // namespace op
} // namespace mxnet
|
3917ae30156c5d51d78f1ffef32b2ff6b55cf37e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include "common/book.h"
#include "common/cpu_bitmap.h"
#include <fstream>
#define N 10000
#define DIM 1024
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < N)
c[tid] = a[tid] + b[tid];
}
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a,float b):r(a),i(b){}
__device__ float magnitude2(void){return r*r + i*i;}
__device__ hipComplex operator*(const hipComplex&a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator+(const hipComplex & a){
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y){
const float scale = 1.5;
float jx = scale * (float) (DIM/2-x)/(DIM/2);
float jy = scale * (float) (DIM/2-y)/(DIM/2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
for(int i=0;i<200;i++){
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia(x,y);
ptr[offset*4 + 0 ] = 255 * juliaValue;
ptr[offset*4 + 1 ] = 0;
ptr[offset*4 + 2 ] = 0;
ptr[offset*4 + 3 ] = 255;
}
int main(void){
CPUBitmap bitmap(DIM,DIM);
unsigned char *dev_bitmap;
hipMalloc((void**)&dev_bitmap,bitmap.image_size());
dim3 grid(DIM,DIM);
hipLaunchKernelGGL(( kernel), dim3(grid),dim3(1), 0, 0, dev_bitmap);
hipMemcpy(bitmap.get_ptr(),dev_bitmap,bitmap.image_size(),hipMemcpyDeviceToHost);
std::ofstream output;
output.open("julia.bmp");
for(int i=0;i<bitmap.image_size();i++){
output<<(bitmap.get_ptr()+i);
}
output.close();
bitmap.display_and_exit();
hipFree(dev_bitmap);
return 0;
}
| 3917ae30156c5d51d78f1ffef32b2ff6b55cf37e.cu | #include <iostream>
#include "common/book.h"
#include "common/cpu_bitmap.h"
#include <fstream>
#define N 10000
#define DIM 1024
__global__ void add(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid < N)
c[tid] = a[tid] + b[tid];
}
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a,float b):r(a),i(b){}
__device__ float magnitude2(void){return r*r + i*i;}
__device__ cuComplex operator*(const cuComplex&a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator+(const cuComplex & a){
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y){
const float scale = 1.5;
float jx = scale * (float) (DIM/2-x)/(DIM/2);
float jy = scale * (float) (DIM/2-y)/(DIM/2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
for(int i=0;i<200;i++){
a = a * a + c;
if (a.magnitude2() > 1000)
return 0;
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int juliaValue = julia(x,y);
ptr[offset*4 + 0 ] = 255 * juliaValue;
ptr[offset*4 + 1 ] = 0;
ptr[offset*4 + 2 ] = 0;
ptr[offset*4 + 3 ] = 255;
}
int main(void){
CPUBitmap bitmap(DIM,DIM);
unsigned char *dev_bitmap;
cudaMalloc((void**)&dev_bitmap,bitmap.image_size());
dim3 grid(DIM,DIM);
kernel<<<grid,1>>>(dev_bitmap);
cudaMemcpy(bitmap.get_ptr(),dev_bitmap,bitmap.image_size(),cudaMemcpyDeviceToHost);
std::ofstream output;
output.open("julia.bmp");
for(int i=0;i<bitmap.image_size();i++){
output<<(bitmap.get_ptr()+i);
}
output.close();
bitmap.display_and_exit();
cudaFree(dev_bitmap);
return 0;
}
|
967f2f844affca7a64e4435be2105e6c1a677ce7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Created by gautam on 28/04/20.
//
#include <float.h>
#include "myExpr.cuh"
#define NUM_THREADS 512
myExpr newExpr(myExprType type, long intVal) {
myExpr expr;
expr.type = type;
expr.iVal = (int)intVal;
expr.fVal = 0.0f;
expr.sVal[0] = 0;
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
myExpr newExpr(myExprType type, float fVal){
myExpr expr;
expr.type = type;
expr.iVal = 0;
expr.fVal = fVal;
expr.sVal[0] = 0;
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
myExpr newExpr(myExprType type, char *sVal){
myExpr expr;
expr.type = type;
expr.iVal = 0;
expr.fVal = 0.0f;
// expr->sVal = new char[strlen(sVal) + 1];
stpcpy(expr.sVal, sVal);
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
myExpr newExpr(myExprType type){
myExpr expr;
expr.type = type;
expr.iVal = 0;
expr.fVal = 0.0f;
expr.sVal[0] = 0;
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
void freeExpr(myExpr *expr){
free(expr);
}
__device__ float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__ void minKernel(void *data, const int colPos, const int rowSize, const int numRows, int *min) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadMin = *min;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMin = threadMin < *currVal ? threadMin : *currVal;
}
atomicMin(min, threadMin);
}
__global__ void minKernel(void *data, const int colPos, const int rowSize, const int numRows, float *min) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMin = *min;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMin = threadMin < *currVal ? threadMin : *currVal;
}
atomicMin(min, threadMin);
}
__global__ void maxKernel(void *data, const int colPos, const int rowSize, const int numRows, int *max) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadMax = *max;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMax = threadMax > *currVal ? threadMax : *currVal;
}
atomicMax(max, threadMax);
}
__global__ void maxKernel(void *data, const int colPos, const int rowSize, const int numRows, float *max) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMax = *max;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMax = threadMax > *currVal ? threadMax : *currVal;
}
atomicMax(max, threadMax);
}
__global__ void sumKernel(void *data, const int colPos, const int rowSize, const int numRows, int *sum) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadSum = *sum;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadSum += *currVal;
}
atomicAdd(sum, threadSum);
}
__global__ void sumKernel(void *data, const int colPos, const int rowSize, const int numRows, float *sum) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMax = *sum;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMax += *currVal;
}
atomicAdd(sum, threadMax);
}
__global__ void avgKernel(void *data, const int colPos, const int rowSize, const int numRows, int *sum, long *count) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadSum = *sum;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal)){
threadSum += *currVal;
atomicInc(reinterpret_cast<unsigned int *>(count), INT_MAX);
}
}
atomicAdd(sum, threadSum);
}
__global__ void avgKernel(void *data, const int colPos, const int rowSize, const int numRows, float *sum, long *count) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMax = *sum;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal)){
threadMax += *currVal;
atomicInc(reinterpret_cast<unsigned int *>(count), INT_MAX);
}
}
atomicAdd(sum, threadMax);
}
__global__ void countKernel(void *data, const int colPos, const int rowSize, const int numRows, long *count) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
atomicInc(reinterpret_cast<unsigned int *>(count), INT_MAX);
}
}
void exprToVec(hsql::Expr *expr, std::vector<myExpr> &vector, const std::vector<std::string>& colNames, Data &d) {
switch (expr->type) {
case hsql::kExprLiteralFloat:
vector.push_back(newExpr(CONSTANT_FLT, expr->fval));
break;
case hsql::kExprLiteralString:
vector.push_back(newExpr(CONSTANT_STR, expr->name));
break;
case hsql::kExprLiteralInt:
vector.push_back(newExpr(CONSTANT_INT, expr->ival));
break;
case hsql::kExprStar:
printf("Why is there a `*` here?");
break;
case hsql::kExprPlaceholder:
printf("What is this?");
break;
case hsql::kExprColumnRef:{
int i;
for (i = 0; i < colNames.size(); i++) {
if (colNames[i] == expr->name) break;
}
vector.push_back(newExpr(COL_NAME, (long)i));
break;
}
case hsql::kExprFunctionRef: {
// printf("%s\n", expr->name);
int oldChunkSize = d.chunkSize;
d.chunkSize *= 10;
void *data = malloc(d.chunkSize * d.mdata.rowSize);
void *data_d;
hipMalloc(&data_d, d.chunkSize * d.mdata.rowSize);
int rowsRead = d.read(data);
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
std::string colName = expr->exprList->at(0)->name;
// printf("Col for agg function is %s:%d\n", colName.c_str(), d.mdata.colMap[colName]);
fflush(stdout);
int colPos = 0;
int resType = TYPE_INT;
for (int i = 0; i < colNames.size(); ++i) {
if (colNames[i] == colName) {
resType = d.mdata.datatypes[i].type;
break;
}
colPos += d.mdata.datatypes[i].size;
}
if (strcmp(expr->name, "min") == 0) {
if (resType == TYPE_INT) {
int min_h = INT_MAX;
int *min;
hipMalloc(&min, sizeof(int));
hipMemcpy(min, &min_h, sizeof(int), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( minKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, min);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&min_h, min, sizeof(int), hipMemcpyDeviceToHost);
hipFree(min);
printf("Min value is: %d\n", min_h);
vector.push_back(newExpr(CONSTANT_INT, (long) min_h));
} else if (resType == TYPE_FLOAT) {
float min_h = FLT_MAX;
float *min;
hipMalloc(&min, sizeof(float));
hipMemcpy(min, &min_h, sizeof(float), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( minKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, min);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&min_h, min, sizeof(float), hipMemcpyDeviceToHost);
hipFree(min);
vector.push_back(newExpr(CONSTANT_FLT, min_h));
}
} else if (strcmp(expr->name, "max") == 0) {
if (resType == TYPE_INT) {
int max_h = INT_MIN;
int *max;
hipMalloc(&max, sizeof(int));
hipMemcpy(max, &max_h, sizeof(int), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( maxKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, max);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&max_h, max, sizeof(int), hipMemcpyDeviceToHost);
hipFree(max);
printf("Max value is: %d\n", max_h);
vector.push_back(newExpr(CONSTANT_INT, (long) max_h));
} else if (resType == TYPE_FLOAT) {
float max_h = FLT_MIN;
float *max;
hipMalloc(&max, sizeof(float));
hipMemcpy(max, &max_h, sizeof(float), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( maxKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, max);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&max_h, max, sizeof(float), hipMemcpyDeviceToHost);
hipFree(max);
vector.push_back(newExpr(CONSTANT_FLT, max_h));
}
} else if (strcmp(expr->name, "sum") == 0) {
if (resType == TYPE_INT) {
int sum_h = 0;
int *sum;
hipMalloc(&sum, sizeof(int));
hipMemcpy(sum, &sum_h, sizeof(int), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( sumKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, sum);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&sum_h, sum, sizeof(int), hipMemcpyDeviceToHost);
hipFree(sum);
printf("Sum value is: %d\n", sum_h);
vector.push_back(newExpr(CONSTANT_INT, (long) sum_h));
} else if (resType == TYPE_FLOAT) {
float sum_h = 0;
float *sum;
hipMalloc(&sum, sizeof(float));
hipMemcpy(sum, &sum_h, sizeof(float), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( sumKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, sum);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&sum_h, sum, sizeof(float), hipMemcpyDeviceToHost);
hipFree(sum);
vector.push_back(newExpr(CONSTANT_FLT, sum_h));
}
} else if (strcmp(expr->name, "avg") == 0) {
if (resType == TYPE_INT) {
int sum_h = 0;
int *sum;
long count_h = 0;
long *count;
hipMalloc(&sum, sizeof(int));
hipMalloc(&count, sizeof(long));
hipMemcpy(sum, &sum_h, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(count, &count_h, sizeof(long), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( avgKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, sum, count);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&sum_h, sum, sizeof(int), hipMemcpyDeviceToHost);
hipMemcpy(&count_h, count, sizeof(long), hipMemcpyDeviceToHost);
hipFree(sum);
hipFree(count);
printf("sum , count : %d, %ld \n", sum_h, count_h);
printf("avg value is: %f\n", (float)sum_h/count_h);
vector.push_back(newExpr(CONSTANT_FLT, (float)sum_h/count_h));
} else if (resType == TYPE_FLOAT) {
float sum_h = FLT_MAX;
float *sum;
long count_h = 0;
long *count;
hipMalloc(&count, sizeof(long));
hipMalloc(&sum, sizeof(float));
hipMemcpy(count, &count_h, sizeof(long), hipMemcpyHostToDevice);
hipMemcpy(sum, &sum_h, sizeof(float), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( avgKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, sum, count);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&sum_h, sum, sizeof(float), hipMemcpyDeviceToHost);
hipMemcpy(&count_h, count, sizeof(long), hipMemcpyDeviceToHost);
hipFree(count);
hipFree(sum);
vector.push_back(newExpr(CONSTANT_FLT, sum_h/count_h));
}
} else if (strcmp(expr->name, "count") == 0) {
long count = 0;
long *count_h;
hipMalloc(&count_h, sizeof(long));
hipMemcpy(count_h, &count, sizeof(long), hipMemcpyHostToDevice);
while (rowsRead > 0) {
hipLaunchKernelGGL(( countKernel), dim3(1), dim3(NUM_THREADS), 0, 0, data_d, colPos, d.mdata.rowSize, rowsRead, count_h);
rowsRead = d.read(data);
hipDeviceSynchronize();
hipMemcpy(data_d, data, rowsRead * d.mdata.rowSize, hipMemcpyHostToDevice);
}
hipMemcpy(&count, count_h, sizeof(int), hipMemcpyDeviceToHost);
hipFree(count_h);
printf("count is: %ld\n", count);
vector.push_back(newExpr(CONSTANT_INT, count));
}
d.chunkSize = oldChunkSize;
d.restartRead();
free(data);
hipFree(data_d);
break;
}
case hsql::kExprOperator: {
myExpr temp = newExpr(getOpType(expr->opType, expr->opChar));
vector.push_back(temp);
int curr = (int)vector.size() - 1;
vector[curr].childLeft = vector.size();
exprToVec(expr->expr, vector, colNames, d);
if (expr->expr2 != nullptr) {
vector[curr].childRight = vector.size();
exprToVec(expr->expr2, vector, colNames, d);
}
break;
}
case hsql::kExprSelect:
printf("Not yet implemented");
break;
}
}
myExprType getOpType(hsql::Expr::OperatorType type, char opChar) {
// TODO: Change Error to correct Constants
switch (type) {
case hsql::Expr::NONE:
return CONSTANT_ERR;
case hsql::Expr::BETWEEN:
return CONSTANT_ERR;
case hsql::Expr::CASE:
return CONSTANT_ERR;
case hsql::Expr::SIMPLE_OP:
switch (opChar) {
case '+':
return OPERATOR_PL;
case '-':
return OPERATOR_MI;
case '*':
return OPERATOR_MU;
case '/':
return OPERATOR_DI;
case '%':
return OPERATOR_MO;
case '=':
return OPERATOR_EQ;
case '<':
return OPERATOR_LT;
case '>':
return OPERATOR_GT;
default:
return CONSTANT_ERR;
}
case hsql::Expr::NOT_EQUALS:
return OPERATOR_NE;
case hsql::Expr::LESS_EQ:
return OPERATOR_LE;
case hsql::Expr::GREATER_EQ:
return OPERATOR_GE;
case hsql::Expr::LIKE:
return CONSTANT_ERR;
case hsql::Expr::NOT_LIKE:
return CONSTANT_ERR;
case hsql::Expr::AND:
return OPERATOR_AND;
case hsql::Expr::OR:
return OPERATOR_OR;
case hsql::Expr::IN:
return CONSTANT_ERR;
case hsql::Expr::NOT:
return OPERATOR_NOT;
case hsql::Expr::UMINUS:
return OPERATOR_UMI;
case hsql::Expr::ISNULL:
return CONSTANT_ERR;
case hsql::Expr::EXISTS:
return CONSTANT_ERR;
}
return CONSTANT_ERR;
}
| 967f2f844affca7a64e4435be2105e6c1a677ce7.cu | //
// Created by gautam on 28/04/20.
//
#include <float.h>
#include "myExpr.cuh"
#define NUM_THREADS 512
myExpr newExpr(myExprType type, long intVal) {
myExpr expr;
expr.type = type;
expr.iVal = (int)intVal;
expr.fVal = 0.0f;
expr.sVal[0] = 0;
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
myExpr newExpr(myExprType type, float fVal){
myExpr expr;
expr.type = type;
expr.iVal = 0;
expr.fVal = fVal;
expr.sVal[0] = 0;
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
myExpr newExpr(myExprType type, char *sVal){
myExpr expr;
expr.type = type;
expr.iVal = 0;
expr.fVal = 0.0f;
// expr->sVal = new char[strlen(sVal) + 1];
stpcpy(expr.sVal, sVal);
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
myExpr newExpr(myExprType type){
myExpr expr;
expr.type = type;
expr.iVal = 0;
expr.fVal = 0.0f;
expr.sVal[0] = 0;
expr.childLeft = -1;
expr.childRight = -1;
return expr;
}
void freeExpr(myExpr *expr){
free(expr);
}
__device__ float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__global__ void minKernel(void *data, const int colPos, const int rowSize, const int numRows, int *min) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadMin = *min;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMin = threadMin < *currVal ? threadMin : *currVal;
}
atomicMin(min, threadMin);
}
__global__ void minKernel(void *data, const int colPos, const int rowSize, const int numRows, float *min) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMin = *min;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMin = threadMin < *currVal ? threadMin : *currVal;
}
atomicMin(min, threadMin);
}
__global__ void maxKernel(void *data, const int colPos, const int rowSize, const int numRows, int *max) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadMax = *max;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMax = threadMax > *currVal ? threadMax : *currVal;
}
atomicMax(max, threadMax);
}
__global__ void maxKernel(void *data, const int colPos, const int rowSize, const int numRows, float *max) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMax = *max;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMax = threadMax > *currVal ? threadMax : *currVal;
}
atomicMax(max, threadMax);
}
__global__ void sumKernel(void *data, const int colPos, const int rowSize, const int numRows, int *sum) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadSum = *sum;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadSum += *currVal;
}
atomicAdd(sum, threadSum);
}
__global__ void sumKernel(void *data, const int colPos, const int rowSize, const int numRows, float *sum) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMax = *sum;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
threadMax += *currVal;
}
atomicAdd(sum, threadMax);
}
__global__ void avgKernel(void *data, const int colPos, const int rowSize, const int numRows, int *sum, long *count) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int threadSum = *sum;
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal)){
threadSum += *currVal;
atomicInc(reinterpret_cast<unsigned int *>(count), INT_MAX);
}
}
atomicAdd(sum, threadSum);
}
__global__ void avgKernel(void *data, const int colPos, const int rowSize, const int numRows, float *sum, long *count) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
float threadMax = *sum;
float *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (float *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal)){
threadMax += *currVal;
atomicInc(reinterpret_cast<unsigned int *>(count), INT_MAX);
}
}
atomicAdd(sum, threadMax);
}
__global__ void countKernel(void *data, const int colPos, const int rowSize, const int numRows, long *count) {
int rowsPerBlock = (numRows + NUM_THREADS - 1) / NUM_THREADS;
unsigned int start = rowsPerBlock * threadIdx.x;
unsigned int end = rowsPerBlock * (threadIdx.x + 1);
int *currVal;
for (unsigned int i = start; i < end; ++i) {
if (i >= numRows) break;
currVal = (int *)((char *)data + i * rowSize + colPos);
if(!isNull(currVal))
atomicInc(reinterpret_cast<unsigned int *>(count), INT_MAX);
}
}
void exprToVec(hsql::Expr *expr, std::vector<myExpr> &vector, const std::vector<std::string>& colNames, Data &d) {
switch (expr->type) {
case hsql::kExprLiteralFloat:
vector.push_back(newExpr(CONSTANT_FLT, expr->fval));
break;
case hsql::kExprLiteralString:
vector.push_back(newExpr(CONSTANT_STR, expr->name));
break;
case hsql::kExprLiteralInt:
vector.push_back(newExpr(CONSTANT_INT, expr->ival));
break;
case hsql::kExprStar:
printf("Why is there a `*` here?");
break;
case hsql::kExprPlaceholder:
printf("What is this?");
break;
case hsql::kExprColumnRef:{
int i;
for (i = 0; i < colNames.size(); i++) {
if (colNames[i] == expr->name) break;
}
vector.push_back(newExpr(COL_NAME, (long)i));
break;
}
case hsql::kExprFunctionRef: {
// printf("%s\n", expr->name);
int oldChunkSize = d.chunkSize;
d.chunkSize *= 10;
void *data = malloc(d.chunkSize * d.mdata.rowSize);
void *data_d;
cudaMalloc(&data_d, d.chunkSize * d.mdata.rowSize);
int rowsRead = d.read(data);
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
std::string colName = expr->exprList->at(0)->name;
// printf("Col for agg function is %s:%d\n", colName.c_str(), d.mdata.colMap[colName]);
fflush(stdout);
int colPos = 0;
int resType = TYPE_INT;
for (int i = 0; i < colNames.size(); ++i) {
if (colNames[i] == colName) {
resType = d.mdata.datatypes[i].type;
break;
}
colPos += d.mdata.datatypes[i].size;
}
if (strcmp(expr->name, "min") == 0) {
if (resType == TYPE_INT) {
int min_h = INT_MAX;
int *min;
cudaMalloc(&min, sizeof(int));
cudaMemcpy(min, &min_h, sizeof(int), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
minKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, min);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&min_h, min, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(min);
printf("Min value is: %d\n", min_h);
vector.push_back(newExpr(CONSTANT_INT, (long) min_h));
} else if (resType == TYPE_FLOAT) {
float min_h = FLT_MAX;
float *min;
cudaMalloc(&min, sizeof(float));
cudaMemcpy(min, &min_h, sizeof(float), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
minKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, min);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&min_h, min, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(min);
vector.push_back(newExpr(CONSTANT_FLT, min_h));
}
} else if (strcmp(expr->name, "max") == 0) {
if (resType == TYPE_INT) {
int max_h = INT_MIN;
int *max;
cudaMalloc(&max, sizeof(int));
cudaMemcpy(max, &max_h, sizeof(int), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
maxKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, max);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&max_h, max, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(max);
printf("Max value is: %d\n", max_h);
vector.push_back(newExpr(CONSTANT_INT, (long) max_h));
} else if (resType == TYPE_FLOAT) {
float max_h = FLT_MIN;
float *max;
cudaMalloc(&max, sizeof(float));
cudaMemcpy(max, &max_h, sizeof(float), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
maxKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, max);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&max_h, max, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(max);
vector.push_back(newExpr(CONSTANT_FLT, max_h));
}
} else if (strcmp(expr->name, "sum") == 0) {
if (resType == TYPE_INT) {
int sum_h = 0;
int *sum;
cudaMalloc(&sum, sizeof(int));
cudaMemcpy(sum, &sum_h, sizeof(int), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
sumKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, sum);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&sum_h, sum, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(sum);
printf("Sum value is: %d\n", sum_h);
vector.push_back(newExpr(CONSTANT_INT, (long) sum_h));
} else if (resType == TYPE_FLOAT) {
float sum_h = 0;
float *sum;
cudaMalloc(&sum, sizeof(float));
cudaMemcpy(sum, &sum_h, sizeof(float), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
sumKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, sum);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&sum_h, sum, sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(sum);
vector.push_back(newExpr(CONSTANT_FLT, sum_h));
}
} else if (strcmp(expr->name, "avg") == 0) {
if (resType == TYPE_INT) {
int sum_h = 0;
int *sum;
long count_h = 0;
long *count;
cudaMalloc(&sum, sizeof(int));
cudaMalloc(&count, sizeof(long));
cudaMemcpy(sum, &sum_h, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(count, &count_h, sizeof(long), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
avgKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, sum, count);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&sum_h, sum, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&count_h, count, sizeof(long), cudaMemcpyDeviceToHost);
cudaFree(sum);
cudaFree(count);
printf("sum , count : %d, %ld \n", sum_h, count_h);
printf("avg value is: %f\n", (float)sum_h/count_h);
vector.push_back(newExpr(CONSTANT_FLT, (float)sum_h/count_h));
} else if (resType == TYPE_FLOAT) {
float sum_h = FLT_MAX;
float *sum;
long count_h = 0;
long *count;
cudaMalloc(&count, sizeof(long));
cudaMalloc(&sum, sizeof(float));
cudaMemcpy(count, &count_h, sizeof(long), cudaMemcpyHostToDevice);
cudaMemcpy(sum, &sum_h, sizeof(float), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
avgKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, sum, count);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&sum_h, sum, sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(&count_h, count, sizeof(long), cudaMemcpyDeviceToHost);
cudaFree(count);
cudaFree(sum);
vector.push_back(newExpr(CONSTANT_FLT, sum_h/count_h));
}
} else if (strcmp(expr->name, "count") == 0) {
long count = 0;
long *count_h;
cudaMalloc(&count_h, sizeof(long));
cudaMemcpy(count_h, &count, sizeof(long), cudaMemcpyHostToDevice);
while (rowsRead > 0) {
countKernel<<<1, NUM_THREADS>>>(data_d, colPos, d.mdata.rowSize, rowsRead, count_h);
rowsRead = d.read(data);
cudaDeviceSynchronize();
cudaMemcpy(data_d, data, rowsRead * d.mdata.rowSize, cudaMemcpyHostToDevice);
}
cudaMemcpy(&count, count_h, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(count_h);
printf("count is: %ld\n", count);
vector.push_back(newExpr(CONSTANT_INT, count));
}
d.chunkSize = oldChunkSize;
d.restartRead();
free(data);
cudaFree(data_d);
break;
}
case hsql::kExprOperator: {
myExpr temp = newExpr(getOpType(expr->opType, expr->opChar));
vector.push_back(temp);
int curr = (int)vector.size() - 1;
vector[curr].childLeft = vector.size();
exprToVec(expr->expr, vector, colNames, d);
if (expr->expr2 != nullptr) {
vector[curr].childRight = vector.size();
exprToVec(expr->expr2, vector, colNames, d);
}
break;
}
case hsql::kExprSelect:
printf("Not yet implemented");
break;
}
}
myExprType getOpType(hsql::Expr::OperatorType type, char opChar) {
// TODO: Change Error to correct Constants
switch (type) {
case hsql::Expr::NONE:
return CONSTANT_ERR;
case hsql::Expr::BETWEEN:
return CONSTANT_ERR;
case hsql::Expr::CASE:
return CONSTANT_ERR;
case hsql::Expr::SIMPLE_OP:
switch (opChar) {
case '+':
return OPERATOR_PL;
case '-':
return OPERATOR_MI;
case '*':
return OPERATOR_MU;
case '/':
return OPERATOR_DI;
case '%':
return OPERATOR_MO;
case '=':
return OPERATOR_EQ;
case '<':
return OPERATOR_LT;
case '>':
return OPERATOR_GT;
default:
return CONSTANT_ERR;
}
case hsql::Expr::NOT_EQUALS:
return OPERATOR_NE;
case hsql::Expr::LESS_EQ:
return OPERATOR_LE;
case hsql::Expr::GREATER_EQ:
return OPERATOR_GE;
case hsql::Expr::LIKE:
return CONSTANT_ERR;
case hsql::Expr::NOT_LIKE:
return CONSTANT_ERR;
case hsql::Expr::AND:
return OPERATOR_AND;
case hsql::Expr::OR:
return OPERATOR_OR;
case hsql::Expr::IN:
return CONSTANT_ERR;
case hsql::Expr::NOT:
return OPERATOR_NOT;
case hsql::Expr::UMINUS:
return OPERATOR_UMI;
case hsql::Expr::ISNULL:
return CONSTANT_ERR;
case hsql::Expr::EXISTS:
return CONSTANT_ERR;
}
return CONSTANT_ERR;
}
|
ab0e59e48a356cf69f723b7807b4a62847f68416.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* An example program utilizing most/all calls from the CUDA
* Runtime API module:
*
* Stream Management
*
*/
#include "../common.hpp"
using std::printf;
template <typename T, size_t N>
struct poor_mans_array {
T data[N];
__host__ __device__ operator T*() { return data; }
__host__ __device__ operator const T*() const { return data; }
__host__ __device__ T& operator [](off_t offset) { return data[offset]; }
__host__ __device__ const T& operator [](off_t offset) const { return data[offset]; }
};
template <size_t N>
poor_mans_array<char, N> message(const char* message_str)
{
poor_mans_array<char, N> a;
assert(std::strlen(message_str) < N);
std::strcpy(a.data, message_str);
return a;
}
template <size_t N>
poor_mans_array<char, N> message(const std::string& message_str)
{
return message<N>(message_str.c_str());
}
template <size_t N, unsigned Index>
__global__ void print_message(poor_mans_array<char, N> message)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
printf("Kernel no. %u says: %s\n", Index, (const char*) message);
}
}
__host__ __device__ void print_first_char(const char* __restrict__ data)
{
printf("data[0] = '%c' (0x%02x)\n", data[0], (unsigned) data[0]);
}
__global__ void print_first_char_kernel(const char* __restrict__ data)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
print_first_char(data);
}
}
__global__ void increment(char* data, size_t length)
{
size_t global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index < length)
data[global_index]++;
}
#if TORCH_HIP_VERSION >= 11000
const char* get_policy_name(cuda::stream::synchronization_policy_t policy)
{
switch(policy) {
case cuda::stream::automatic: return "automatic";
case cuda::stream::spin: return "spin";
case cuda::stream::yield: return "yield";
case cuda::stream::block: return "block";
default:
return "unknown policy";
}
}
#endif // TORCH_HIP_VERSION >= 11000
int main(int argc, char **argv)
{
constexpr const size_t N = 50;
cuda::launch_configuration_t single_thread_config { 1, 1 };
// Being very cavalier about our command-line arguments here...
cuda::device::id_t device_id = (argc > 1) ?
std::stoi(argv[1]) : cuda::device::default_device_id;
if (cuda::device::count() == 0) {
die_("No CUDA devices on this system");
}
auto device = cuda::device::get(device_id).make_current();
std::cout << "Using CUDA device " << device.name() << " (having device ID " << device.id() << ")\n";
// Stream creation and destruction, stream flags
//------------------------------------------------
{
auto stream = cuda::device::current::get().create_stream(cuda::stream::sync);
std::cout
<< "A new CUDA stream with no priority specified defaults to having priority "
<< stream.priority() << ".\n";
stream.enqueue.kernel_launch(print_message<N,1>, single_thread_config, message<N>("I can see my house!"));
stream.synchronize();
}
// Use of the default stream as an rvalue
// ---------------------------------------------
cuda::enqueue_launch(
print_message<N,2>, device.default_stream(), single_thread_config,
message<N>("I was launched on the default stream.")
);
// Everything else - Enqueueing kernel or host-function launches, events
// and memory attachments, recording and waiting on events
//--------------------------------------------------------------
auto stream_1 = cuda::device::current::get().create_stream(
cuda::stream::default_priority + 1,
cuda::stream::no_implicit_synchronization_with_default_stream);
auto stream_2 = cuda::device::current::get().create_stream(
cuda::stream::default_priority + 1,
cuda::stream::no_implicit_synchronization_with_default_stream);
#if TORCH_HIP_VERSION >= 11000
// Stream synchronization policy and attribute copying
auto initial_policy = stream_1.synchronization_policy();
std::cout
<< "Initial stream synchronization policy is "
<< get_policy_name(initial_policy) << " (numeric value: " << (int) initial_policy << ")\n";
if (initial_policy != stream_2.synchronization_policy()) {
throw std::logic_error("Different synchronization policies for streams created the same way");
}
cuda::stream::synchronization_policy_t alt_policy =
(initial_policy == cuda::stream::yield) ? cuda::stream::block : cuda::stream::yield;
stream_2.set_synchronization_policy(alt_policy);
auto new_s2_policy = stream_2.synchronization_policy();
if (alt_policy != new_s2_policy) {
std::stringstream ss;
ss
<< "Got a different synchronization policy (" << get_policy_name(new_s2_policy) << ")"
<< " than the one we set the stream to (" << get_policy_name(alt_policy) << ")\n";
throw std::logic_error(ss.str());
}
std::cout << "Overwriting all attributes of stream 1 with those of stream 2.\n";
cuda::copy_attributes(stream_1, stream_2);
auto s1_policy_after_copy = stream_1.synchronization_policy();
if (alt_policy != s1_policy_after_copy) {
std::stringstream ss;
ss
<< "Got a different synchronization policy (" << get_policy_name(s1_policy_after_copy) << ")"
<< " than the one we expected after attribute-copying (" << get_policy_name(alt_policy) << ")\n";
throw std::logic_error(ss.str());
}
#endif
constexpr auto buffer_size = 12345678;
auto buffer = cuda::memory::managed::make_unique<char[]>(
buffer_size,
device.supports_concurrent_managed_access() ?
cuda::memory::managed::initial_visibility_t::to_supporters_of_concurrent_managed_access:
cuda::memory::managed::initial_visibility_t::to_all_devices);
print_first_char(buffer.get());
std::fill(buffer.get(), buffer.get() + buffer_size, 'a');
print_first_char(buffer.get());
auto event_1 = cuda::event::create(device, cuda::event::sync_by_blocking);
stream_1.enqueue.kernel_launch(print_message<N,3>, single_thread_config, message<N>("I'm on stream 1"));
stream_1.enqueue.memset(buffer.get(), 'b', buffer_size);
auto callback = [&]() {
std::cout << "Callback from stream 1!... \n";
print_first_char(buffer.get());
};
stream_1.enqueue.host_invokable(callback);
auto threads_per_block = cuda::kernel::get(device, increment).get_attribute(hipFuncAttributeMaxThreadsPerBlocks);
auto num_blocks = div_rounding_up(buffer_size, threads_per_block);
auto launch_config = cuda::make_launch_config(num_blocks, threads_per_block);
// TODO: The following doesn't have much of a meaningful effect; we should modify this example
// so that the attachment has some observable effect
stream_1.enqueue.attach_managed_region(buffer.get());
stream_1.enqueue.kernel_launch(increment, launch_config, buffer.get(), buffer_size);
event_1.record(stream_1);
stream_1.enqueue.kernel_launch(print_message<N,4>, single_thread_config, message<N>("I'm on stream 1"));
stream_2.enqueue.wait(event_1);
stream_2.enqueue.kernel_launch(print_first_char_kernel, launch_config , buffer.get());
stream_2.enqueue.kernel_launch(print_message<N,5>, single_thread_config, message<N>("I'm on stream 2"));
bool idleness_1 = stream_2.has_work_remaining();
device.synchronize();
print_first_char(buffer.get());
// cuda::memory::managed::free(buffer);
bool idleness_2 = stream_2.has_work_remaining();
std::cout << std::boolalpha
<< "Did stream 2 have work before device-level synchronization? " << (idleness_1 ? "yes" : "no") << "\n"
<< "Did stream 2 have work after device-level synchronization? " << (idleness_2 ? "yes" : "no") << "\n";
std::cout << "\nSUCCESS\n";
return EXIT_SUCCESS;
}
| ab0e59e48a356cf69f723b7807b4a62847f68416.cu | /**
* An example program utilizing most/all calls from the CUDA
* Runtime API module:
*
* Stream Management
*
*/
#include "../common.hpp"
using std::printf;
template <typename T, size_t N>
struct poor_mans_array {
T data[N];
__host__ __device__ operator T*() { return data; }
__host__ __device__ operator const T*() const { return data; }
__host__ __device__ T& operator [](off_t offset) { return data[offset]; }
__host__ __device__ const T& operator [](off_t offset) const { return data[offset]; }
};
template <size_t N>
poor_mans_array<char, N> message(const char* message_str)
{
poor_mans_array<char, N> a;
assert(std::strlen(message_str) < N);
std::strcpy(a.data, message_str);
return a;
}
template <size_t N>
poor_mans_array<char, N> message(const std::string& message_str)
{
return message<N>(message_str.c_str());
}
template <size_t N, unsigned Index>
__global__ void print_message(poor_mans_array<char, N> message)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
printf("Kernel no. %u says: %s\n", Index, (const char*) message);
}
}
__host__ __device__ void print_first_char(const char* __restrict__ data)
{
printf("data[0] = '%c' (0x%02x)\n", data[0], (unsigned) data[0]);
}
__global__ void print_first_char_kernel(const char* __restrict__ data)
{
if (threadIdx.x == 0 && blockIdx.x == 0) {
print_first_char(data);
}
}
__global__ void increment(char* data, size_t length)
{
size_t global_index = threadIdx.x + blockIdx.x * blockDim.x;
if (global_index < length)
data[global_index]++;
}
#if CUDA_VERSION >= 11000
const char* get_policy_name(cuda::stream::synchronization_policy_t policy)
{
switch(policy) {
case cuda::stream::automatic: return "automatic";
case cuda::stream::spin: return "spin";
case cuda::stream::yield: return "yield";
case cuda::stream::block: return "block";
default:
return "unknown policy";
}
}
#endif // CUDA_VERSION >= 11000
int main(int argc, char **argv)
{
constexpr const size_t N = 50;
cuda::launch_configuration_t single_thread_config { 1, 1 };
// Being very cavalier about our command-line arguments here...
cuda::device::id_t device_id = (argc > 1) ?
std::stoi(argv[1]) : cuda::device::default_device_id;
if (cuda::device::count() == 0) {
die_("No CUDA devices on this system");
}
auto device = cuda::device::get(device_id).make_current();
std::cout << "Using CUDA device " << device.name() << " (having device ID " << device.id() << ")\n";
// Stream creation and destruction, stream flags
//------------------------------------------------
{
auto stream = cuda::device::current::get().create_stream(cuda::stream::sync);
std::cout
<< "A new CUDA stream with no priority specified defaults to having priority "
<< stream.priority() << ".\n";
stream.enqueue.kernel_launch(print_message<N,1>, single_thread_config, message<N>("I can see my house!"));
stream.synchronize();
}
// Use of the default stream as an rvalue
// ---------------------------------------------
cuda::enqueue_launch(
print_message<N,2>, device.default_stream(), single_thread_config,
message<N>("I was launched on the default stream.")
);
// Everything else - Enqueueing kernel or host-function launches, events
// and memory attachments, recording and waiting on events
//--------------------------------------------------------------
auto stream_1 = cuda::device::current::get().create_stream(
cuda::stream::default_priority + 1,
cuda::stream::no_implicit_synchronization_with_default_stream);
auto stream_2 = cuda::device::current::get().create_stream(
cuda::stream::default_priority + 1,
cuda::stream::no_implicit_synchronization_with_default_stream);
#if CUDA_VERSION >= 11000
// Stream synchronization policy and attribute copying
auto initial_policy = stream_1.synchronization_policy();
std::cout
<< "Initial stream synchronization policy is "
<< get_policy_name(initial_policy) << " (numeric value: " << (int) initial_policy << ")\n";
if (initial_policy != stream_2.synchronization_policy()) {
throw std::logic_error("Different synchronization policies for streams created the same way");
}
cuda::stream::synchronization_policy_t alt_policy =
(initial_policy == cuda::stream::yield) ? cuda::stream::block : cuda::stream::yield;
stream_2.set_synchronization_policy(alt_policy);
auto new_s2_policy = stream_2.synchronization_policy();
if (alt_policy != new_s2_policy) {
std::stringstream ss;
ss
<< "Got a different synchronization policy (" << get_policy_name(new_s2_policy) << ")"
<< " than the one we set the stream to (" << get_policy_name(alt_policy) << ")\n";
throw std::logic_error(ss.str());
}
std::cout << "Overwriting all attributes of stream 1 with those of stream 2.\n";
cuda::copy_attributes(stream_1, stream_2);
auto s1_policy_after_copy = stream_1.synchronization_policy();
if (alt_policy != s1_policy_after_copy) {
std::stringstream ss;
ss
<< "Got a different synchronization policy (" << get_policy_name(s1_policy_after_copy) << ")"
<< " than the one we expected after attribute-copying (" << get_policy_name(alt_policy) << ")\n";
throw std::logic_error(ss.str());
}
#endif
constexpr auto buffer_size = 12345678;
auto buffer = cuda::memory::managed::make_unique<char[]>(
buffer_size,
device.supports_concurrent_managed_access() ?
cuda::memory::managed::initial_visibility_t::to_supporters_of_concurrent_managed_access:
cuda::memory::managed::initial_visibility_t::to_all_devices);
print_first_char(buffer.get());
std::fill(buffer.get(), buffer.get() + buffer_size, 'a');
print_first_char(buffer.get());
auto event_1 = cuda::event::create(device, cuda::event::sync_by_blocking);
stream_1.enqueue.kernel_launch(print_message<N,3>, single_thread_config, message<N>("I'm on stream 1"));
stream_1.enqueue.memset(buffer.get(), 'b', buffer_size);
auto callback = [&]() {
std::cout << "Callback from stream 1!... \n";
print_first_char(buffer.get());
};
stream_1.enqueue.host_invokable(callback);
auto threads_per_block = cuda::kernel::get(device, increment).get_attribute(CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK);
auto num_blocks = div_rounding_up(buffer_size, threads_per_block);
auto launch_config = cuda::make_launch_config(num_blocks, threads_per_block);
// TODO: The following doesn't have much of a meaningful effect; we should modify this example
// so that the attachment has some observable effect
stream_1.enqueue.attach_managed_region(buffer.get());
stream_1.enqueue.kernel_launch(increment, launch_config, buffer.get(), buffer_size);
event_1.record(stream_1);
stream_1.enqueue.kernel_launch(print_message<N,4>, single_thread_config, message<N>("I'm on stream 1"));
stream_2.enqueue.wait(event_1);
stream_2.enqueue.kernel_launch(print_first_char_kernel, launch_config , buffer.get());
stream_2.enqueue.kernel_launch(print_message<N,5>, single_thread_config, message<N>("I'm on stream 2"));
bool idleness_1 = stream_2.has_work_remaining();
device.synchronize();
print_first_char(buffer.get());
// cuda::memory::managed::free(buffer);
bool idleness_2 = stream_2.has_work_remaining();
std::cout << std::boolalpha
<< "Did stream 2 have work before device-level synchronization? " << (idleness_1 ? "yes" : "no") << "\n"
<< "Did stream 2 have work after device-level synchronization? " << (idleness_2 ? "yes" : "no") << "\n";
std::cout << "\nSUCCESS\n";
return EXIT_SUCCESS;
}
|
5fc6a620f4314c35ab2ba6830ac6874f9ceb48d0.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuml/neighbors/knn_mg.hpp>
#include <memory>
#include <random/make_blobs.cuh>
#include "../prims/test_utils.h"
#include "test_opg_utils.h"
#include <common/device_buffer.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <common/cumlHandle.hpp>
#include <cuda_utils.cuh>
namespace ML {
namespace KNN {
namespace opg {
struct KNNParams {
int k;
size_t min_rows;
size_t n_cols;
int n_query_parts;
int n_index_parts;
size_t batch_size;
};
class BruteForceKNNTest : public ::testing::TestWithParam<KNNParams> {
public:
void generate_partition(Matrix::floatData_t *part, size_t n_rows, int n_cols,
int n_clusters, int part_num,
std::shared_ptr<deviceAllocator> allocator,
hipStream_t stream) {
device_buffer<int> labels(allocator, stream, n_rows);
Random::make_blobs<float, int>(part->ptr, labels.data(), (int)n_rows,
(int)n_cols, 5, allocator, stream);
}
bool runTest(const KNNParams ¶ms) {
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
const auto &comm = handle.get_comms();
const auto allocator = handle.get_device_allocator();
hipStream_t stream = handle.get_stream();
int my_rank = comm.get_rank();
int size = comm.get_size();
int index_parts_per_rank = raft::ceildiv(params.n_index_parts, size);
int query_parts_per_rank = raft::ceildiv(params.n_query_parts, size);
std::vector<Matrix::RankSizePair *> idxPartsToRanks;
std::vector<Matrix::RankSizePair *> queryPartsToRanks;
for (int cur_rank = 0; cur_rank < size; cur_rank++) {
int ippr = index_parts_per_rank;
int qppr = query_parts_per_rank;
if (cur_rank == size - 1) {
ippr = params.n_index_parts - (cur_rank * index_parts_per_rank);
qppr = params.n_query_parts - (cur_rank * query_parts_per_rank);
}
std::cout << "Generating " << ippr << " partitions for rank " << cur_rank
<< std::endl;
std::cout << "min_rows: " << params.min_rows << std::endl;
for (int part_n = 0; part_n < ippr; part_n++) {
Matrix::RankSizePair *rsp =
new Matrix::RankSizePair(cur_rank, params.min_rows);
idxPartsToRanks.push_back(rsp);
}
for (int part_n = 0; part_n < qppr; part_n++) {
Matrix::RankSizePair *rsp =
new Matrix::RankSizePair(cur_rank, params.min_rows);
queryPartsToRanks.push_back(rsp);
}
}
std::cout << idxPartsToRanks.size() << std::endl;
if (my_rank == size - 1) {
index_parts_per_rank =
params.n_index_parts - ((size - 1) * index_parts_per_rank);
query_parts_per_rank =
params.n_query_parts - ((size - 1) * query_parts_per_rank);
}
std::cout << "Generating " << index_parts_per_rank
<< " partitions for rank " << my_rank << std::endl;
std::vector<Matrix::floatData_t *> query_parts;
std::vector<Matrix::floatData_t *> out_d_parts;
std::vector<Matrix::Data<int64_t> *> out_i_parts;
for (int i = 0; i < query_parts_per_rank; i++) {
float *q = (float *)allocator.get()->allocate(
params.min_rows * params.n_cols * sizeof(float *), stream);
float *o = (float *)allocator.get()->allocate(
params.min_rows * params.k * sizeof(float *), stream);
int64_t *ind = (int64_t *)allocator.get()->allocate(
params.min_rows * params.k * sizeof(int64_t), stream);
Matrix::Data<float> *query_d =
new Matrix::Data<float>(q, params.min_rows * params.n_cols);
Matrix::floatData_t *out_d =
new Matrix::floatData_t(o, params.min_rows * params.k);
Matrix::Data<int64_t> *out_i =
new Matrix::Data<int64_t>(ind, params.min_rows * params.k);
query_parts.push_back(query_d);
out_d_parts.push_back(out_d);
out_i_parts.push_back(out_i);
generate_partition(query_d, params.min_rows, params.n_cols, 5, i,
allocator, stream);
}
std::vector<Matrix::floatData_t *> index_parts;
for (int i = 0; i < index_parts_per_rank; i++) {
float *ind = (float *)allocator.get()->allocate(
params.min_rows * params.n_cols * sizeof(float), stream);
Matrix::Data<float> *i_d =
new Matrix::Data<float>(ind, params.min_rows * params.n_cols);
index_parts.push_back(i_d);
generate_partition(i_d, params.min_rows, params.n_cols, 5, i, allocator,
stream);
}
Matrix::PartDescriptor idx_desc(params.min_rows * params.n_index_parts,
params.n_cols, idxPartsToRanks,
comm.get_rank());
Matrix::PartDescriptor query_desc(params.min_rows * params.n_query_parts,
params.n_cols, queryPartsToRanks,
comm.get_rank());
CUDA_CHECK(hipStreamSynchronize(stream));
/**
* Execute brute_force_knn()
*/
brute_force_knn(handle, out_i_parts, out_d_parts, index_parts, idx_desc,
query_parts, query_desc, params.k, params.batch_size, true);
CUDA_CHECK(hipStreamSynchronize(stream));
std::cout << raft::arr2Str(out_i_parts[0]->ptr, 10, "final_out_I", stream)
<< std::endl;
std::cout << raft::arr2Str(out_d_parts[0]->ptr, 10, "final_out_D", stream)
<< std::endl;
/**
* Verify expected results
*/
for (Matrix::floatData_t *fd : query_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float),
stream);
delete fd;
}
for (Matrix::floatData_t *fd : index_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float),
stream);
delete fd;
}
for (Matrix::Data<int64_t> *fd : out_i_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(int64_t),
stream);
delete fd;
}
for (Matrix::floatData_t *fd : out_d_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float),
stream);
delete fd;
}
for (Matrix::RankSizePair *rsp : queryPartsToRanks) {
delete rsp;
}
for (Matrix::RankSizePair *rsp : idxPartsToRanks) {
delete rsp;
}
int actual = 1;
int expected = 1;
return raft::CompareApprox<int>(1)(actual, expected);
}
private:
raft::handle_t handle;
};
const std::vector<KNNParams> inputs = {
{5, 50, 3, 5, 5, 12}, {10, 50, 3, 5, 5, 50}, {5, 50, 3, 5, 5, 50},
{5, 500, 5, 5, 5, 50}, {10, 500, 50, 5, 5, 50}, {15, 500, 5, 5, 5, 50},
{5, 500, 10, 5, 5, 50}, {10, 500, 10, 5, 5, 50}, {15, 500, 10, 5, 5, 50}};
typedef BruteForceKNNTest KNNTest;
TEST_P(KNNTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(BruteForceKNNTest, KNNTest,
::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
| 5fc6a620f4314c35ab2ba6830ac6874f9ceb48d0.cu | /*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <cuml/neighbors/knn_mg.hpp>
#include <memory>
#include <random/make_blobs.cuh>
#include "../prims/test_utils.h"
#include "test_opg_utils.h"
#include <common/device_buffer.hpp>
#include <cuml/common/cuml_allocator.hpp>
#include <raft/comms/mpi_comms.hpp>
#include <common/cumlHandle.hpp>
#include <cuda_utils.cuh>
namespace ML {
namespace KNN {
namespace opg {
struct KNNParams {
int k;
size_t min_rows;
size_t n_cols;
int n_query_parts;
int n_index_parts;
size_t batch_size;
};
class BruteForceKNNTest : public ::testing::TestWithParam<KNNParams> {
public:
void generate_partition(Matrix::floatData_t *part, size_t n_rows, int n_cols,
int n_clusters, int part_num,
std::shared_ptr<deviceAllocator> allocator,
cudaStream_t stream) {
device_buffer<int> labels(allocator, stream, n_rows);
Random::make_blobs<float, int>(part->ptr, labels.data(), (int)n_rows,
(int)n_cols, 5, allocator, stream);
}
bool runTest(const KNNParams ¶ms) {
raft::comms::initialize_mpi_comms(&handle, MPI_COMM_WORLD);
const auto &comm = handle.get_comms();
const auto allocator = handle.get_device_allocator();
cudaStream_t stream = handle.get_stream();
int my_rank = comm.get_rank();
int size = comm.get_size();
int index_parts_per_rank = raft::ceildiv(params.n_index_parts, size);
int query_parts_per_rank = raft::ceildiv(params.n_query_parts, size);
std::vector<Matrix::RankSizePair *> idxPartsToRanks;
std::vector<Matrix::RankSizePair *> queryPartsToRanks;
for (int cur_rank = 0; cur_rank < size; cur_rank++) {
int ippr = index_parts_per_rank;
int qppr = query_parts_per_rank;
if (cur_rank == size - 1) {
ippr = params.n_index_parts - (cur_rank * index_parts_per_rank);
qppr = params.n_query_parts - (cur_rank * query_parts_per_rank);
}
std::cout << "Generating " << ippr << " partitions for rank " << cur_rank
<< std::endl;
std::cout << "min_rows: " << params.min_rows << std::endl;
for (int part_n = 0; part_n < ippr; part_n++) {
Matrix::RankSizePair *rsp =
new Matrix::RankSizePair(cur_rank, params.min_rows);
idxPartsToRanks.push_back(rsp);
}
for (int part_n = 0; part_n < qppr; part_n++) {
Matrix::RankSizePair *rsp =
new Matrix::RankSizePair(cur_rank, params.min_rows);
queryPartsToRanks.push_back(rsp);
}
}
std::cout << idxPartsToRanks.size() << std::endl;
if (my_rank == size - 1) {
index_parts_per_rank =
params.n_index_parts - ((size - 1) * index_parts_per_rank);
query_parts_per_rank =
params.n_query_parts - ((size - 1) * query_parts_per_rank);
}
std::cout << "Generating " << index_parts_per_rank
<< " partitions for rank " << my_rank << std::endl;
std::vector<Matrix::floatData_t *> query_parts;
std::vector<Matrix::floatData_t *> out_d_parts;
std::vector<Matrix::Data<int64_t> *> out_i_parts;
for (int i = 0; i < query_parts_per_rank; i++) {
float *q = (float *)allocator.get()->allocate(
params.min_rows * params.n_cols * sizeof(float *), stream);
float *o = (float *)allocator.get()->allocate(
params.min_rows * params.k * sizeof(float *), stream);
int64_t *ind = (int64_t *)allocator.get()->allocate(
params.min_rows * params.k * sizeof(int64_t), stream);
Matrix::Data<float> *query_d =
new Matrix::Data<float>(q, params.min_rows * params.n_cols);
Matrix::floatData_t *out_d =
new Matrix::floatData_t(o, params.min_rows * params.k);
Matrix::Data<int64_t> *out_i =
new Matrix::Data<int64_t>(ind, params.min_rows * params.k);
query_parts.push_back(query_d);
out_d_parts.push_back(out_d);
out_i_parts.push_back(out_i);
generate_partition(query_d, params.min_rows, params.n_cols, 5, i,
allocator, stream);
}
std::vector<Matrix::floatData_t *> index_parts;
for (int i = 0; i < index_parts_per_rank; i++) {
float *ind = (float *)allocator.get()->allocate(
params.min_rows * params.n_cols * sizeof(float), stream);
Matrix::Data<float> *i_d =
new Matrix::Data<float>(ind, params.min_rows * params.n_cols);
index_parts.push_back(i_d);
generate_partition(i_d, params.min_rows, params.n_cols, 5, i, allocator,
stream);
}
Matrix::PartDescriptor idx_desc(params.min_rows * params.n_index_parts,
params.n_cols, idxPartsToRanks,
comm.get_rank());
Matrix::PartDescriptor query_desc(params.min_rows * params.n_query_parts,
params.n_cols, queryPartsToRanks,
comm.get_rank());
CUDA_CHECK(cudaStreamSynchronize(stream));
/**
* Execute brute_force_knn()
*/
brute_force_knn(handle, out_i_parts, out_d_parts, index_parts, idx_desc,
query_parts, query_desc, params.k, params.batch_size, true);
CUDA_CHECK(cudaStreamSynchronize(stream));
std::cout << raft::arr2Str(out_i_parts[0]->ptr, 10, "final_out_I", stream)
<< std::endl;
std::cout << raft::arr2Str(out_d_parts[0]->ptr, 10, "final_out_D", stream)
<< std::endl;
/**
* Verify expected results
*/
for (Matrix::floatData_t *fd : query_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float),
stream);
delete fd;
}
for (Matrix::floatData_t *fd : index_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float),
stream);
delete fd;
}
for (Matrix::Data<int64_t> *fd : out_i_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(int64_t),
stream);
delete fd;
}
for (Matrix::floatData_t *fd : out_d_parts) {
allocator.get()->deallocate(fd->ptr, fd->totalSize * sizeof(float),
stream);
delete fd;
}
for (Matrix::RankSizePair *rsp : queryPartsToRanks) {
delete rsp;
}
for (Matrix::RankSizePair *rsp : idxPartsToRanks) {
delete rsp;
}
int actual = 1;
int expected = 1;
return raft::CompareApprox<int>(1)(actual, expected);
}
private:
raft::handle_t handle;
};
const std::vector<KNNParams> inputs = {
{5, 50, 3, 5, 5, 12}, {10, 50, 3, 5, 5, 50}, {5, 50, 3, 5, 5, 50},
{5, 500, 5, 5, 5, 50}, {10, 500, 50, 5, 5, 50}, {15, 500, 5, 5, 5, 50},
{5, 500, 10, 5, 5, 50}, {10, 500, 10, 5, 5, 50}, {15, 500, 10, 5, 5, 50}};
typedef BruteForceKNNTest KNNTest;
TEST_P(KNNTest, Result) { ASSERT_TRUE(runTest(GetParam())); }
INSTANTIATE_TEST_CASE_P(BruteForceKNNTest, KNNTest,
::testing::ValuesIn(inputs));
} // namespace opg
} // namespace KNN
} // namespace ML
|
c17e6f2438f6ec38e00d11cb2e267e12f63616f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<math.h>
// #include<omp.h>
#define SIZE 1024
__global__ void min(int * A, int * C)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
A[2*i]<A[2*i+1]?C[i]=A[2*i]:C[i]=A[2*i+1];
}
int main()
{
int A[SIZE];
int *devA,*devC;
//double start,end;
for(int j=0;j<SIZE;j++)
{
A[j]=SIZE-j;
}
hipMalloc((void **)&devA,SIZE*sizeof(int));
hipMalloc((void **)&devC,SIZE*sizeof(int));
//start=omp_get_wtime();
//printf("\nStart time:%f",start);
for(int j=1;j<log2((double)SIZE);j++)
{
hipMemcpy(devA,A,SIZE*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( min), dim3(1),dim3(SIZE/pow(2,j)), 0, 0, devA,devC);
hipMemcpy(&A,devC,SIZE*sizeof(int),hipMemcpyDeviceToHost);
}
//end=omp_get_wtime();
//printf("\nEnd time:%f",end);
//printf("\nTotal time:%f\n",end-start);
A[0]<A[1]?printf("\nMin is:%d\n",A[0]):printf("\nMin is:%d\n",A[1]);
hipFree(devA);
hipFree(devC);
return 0;
}
| c17e6f2438f6ec38e00d11cb2e267e12f63616f1.cu | #include<stdio.h>
#include<math.h>
// #include<omp.h>
#define SIZE 1024
__global__ void min(int * A, int * C)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
A[2*i]<A[2*i+1]?C[i]=A[2*i]:C[i]=A[2*i+1];
}
int main()
{
int A[SIZE];
int *devA,*devC;
//double start,end;
for(int j=0;j<SIZE;j++)
{
A[j]=SIZE-j;
}
cudaMalloc((void **)&devA,SIZE*sizeof(int));
cudaMalloc((void **)&devC,SIZE*sizeof(int));
//start=omp_get_wtime();
//printf("\nStart time:%f",start);
for(int j=1;j<log2((double)SIZE);j++)
{
cudaMemcpy(devA,A,SIZE*sizeof(int),cudaMemcpyHostToDevice);
min<<<1,SIZE/pow(2,j)>>>(devA,devC);
cudaMemcpy(&A,devC,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
}
//end=omp_get_wtime();
//printf("\nEnd time:%f",end);
//printf("\nTotal time:%f\n",end-start);
A[0]<A[1]?printf("\nMin is:%d\n",A[0]):printf("\nMin is:%d\n",A[1]);
cudaFree(devA);
cudaFree(devC);
return 0;
}
|
d1e746652aab78dc11aa0a67935c87f4dffd8dab.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
using namespace std;
int main(int argc, const char *argv[]) {
string N;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
thrust::host_vector<float> H(n);
for (unsigned int i = 0; i < n; i++) {
H[i] = 1;
}
thrust::device_vector<float> D = H;
thrust::device_vector<float> res(n);
hipEvent_t start;
hipEvent_t stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
thrust::exclusive_scan(D.begin(), D.end(), res.begin());
hipEventRecord(stop);
hipEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
hipEventElapsedTime(&ms, start, stop);
cout << res[n - 1] << endl;
cout << ms << endl;
return 0;
} | d1e746652aab78dc11aa0a67935c87f4dffd8dab.cu | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
using namespace std;
int main(int argc, const char *argv[]) {
string N;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
thrust::host_vector<float> H(n);
for (unsigned int i = 0; i < n; i++) {
H[i] = 1;
}
thrust::device_vector<float> D = H;
thrust::device_vector<float> res(n);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
thrust::exclusive_scan(D.begin(), D.end(), res.begin());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << res[n - 1] << endl;
cout << ms << endl;
return 0;
} |
9c37d427062e3e8073a9e553aea0c2a421b0a790.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_s
__global__
void magma_slarfg_gpu_kernel( int n, float* dx0, float* dx,
float *dtau, float *dxnorm, float* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ float scale;
float xnorm;
float dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_S_ZERO;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
float alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_S_REAL(alpha), alphai = MAGMA_S_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_S_MAKE(beta, 0.);
alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha));
scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_S_MUL(dxi, scale);
} else
*dtau = MAGMA_S_ZERO;
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = norm( [dx0, dx] ) = dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's slarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" magma_int_t
magma_slarfg_gpu( magma_int_t n, float *dx0, float *dx,
float *dtau, float *dxnorm, float *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_snrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_snrm2_cols(n-1, 1, dx0+1, n, dxnorm);
hipLaunchKernelGGL(( magma_slarfg_gpu_kernel), dim3(blocks), dim3(threads),
0, magma_stream , n, dx0, dx, dtau, dxnorm, dAkk);
return MAGMA_SUCCESS;
}
| 9c37d427062e3e8073a9e553aea0c2a421b0a790.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
#define PRECISION_s
__global__
void magma_slarfg_gpu_kernel( int n, float* dx0, float* dx,
float *dtau, float *dxnorm, float* dAkk)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
__shared__ float scale;
float xnorm;
float dxi;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if( n <= 1 ) {
#else
if( n <= 0 ) {
#endif
*dtau = MAGMA_S_ZERO;
return;
}
if ( j < n-1)
dxi = dx[j];
xnorm = *dxnorm;
float alpha = *dx0;
#if (defined(PRECISION_s) || defined(PRECISION_d))
if ( xnorm != 0 ) {
if (i == 0) {
float beta = sqrt( alpha*alpha + xnorm*xnorm );
beta = -copysign( beta, alpha );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = (beta - alpha) / beta;
*dAkk = beta;
scale = 1. / (alpha - beta);
}
#else
float alphar = MAGMA_S_REAL(alpha), alphai = MAGMA_S_IMAG(alpha);
if ( xnorm != 0 || alphai != 0) {
if (i == 0) {
float beta = sqrt( alphar*alphar + alphai*alphai + xnorm*xnorm );
beta = -copysign( beta, alphar );
// todo: deal with badly scaled vectors (see lapack's larfg)
*dtau = MAGMA_S_MAKE((beta - alphar)/beta, -alphai/beta);
*dAkk = MAGMA_S_MAKE(beta, 0.);
alpha = MAGMA_S_MAKE( MAGMA_S_REAL(alpha) - beta, MAGMA_S_IMAG(alpha));
scale = MAGMA_S_DIV( MAGMA_S_ONE, alpha);
}
#endif
// scale x
__syncthreads();
if ( xnorm != 0 && j < n-1)
dx[j] = MAGMA_S_MUL(dxi, scale);
} else
*dtau = MAGMA_S_ZERO;
}
/*
Generates Householder elementary reflector H = I - tau v v^T to reduce
H [ dx0 ] = [ beta ]
[ dx ] [ 0 ]
with beta = ±norm( [dx0, dx] ) = ±dxnorm[0].
Stores v over dx; first element of v is 1 and is not stored.
Stores beta over dx0.
Stores tau.
The difference with LAPACK's slarfg is that the norm of dx, and hence beta,
are computed outside the routine and passed to it in dxnorm (array on the GPU).
*/
extern "C" magma_int_t
magma_slarfg_gpu( magma_int_t n, float *dx0, float *dx,
float *dtau, float *dxnorm, float *dAkk)
{
dim3 blocks((n+BLOCK_SIZE-1) / BLOCK_SIZE);
dim3 threads( BLOCK_SIZE );
/* recomputing the norm */
//magmablas_snrm2_cols(n, 1, dx0, n, dxnorm);
magmablas_snrm2_cols(n-1, 1, dx0+1, n, dxnorm);
magma_slarfg_gpu_kernel<<< blocks, threads,
0, magma_stream >>>(n, dx0, dx, dtau, dxnorm, dAkk);
return MAGMA_SUCCESS;
}
|
8c6524f4655228d6ae1ae2da51207bf8cd9159ba.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
/*****************************************************************************
GPU main computation kernels
*****************************************************************************/
__global__ void gpu_datatest_kernel(double *data,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
double *in;
for (int iter=0; iter<num_iter; iter++)
{
in = data + (iter%dataSize)*inSize;
for (int i = 0; i < inSize; i++) {
printf("val: %f", in[i]);
}
printf("\n");
}
}
__global__ void gpu_naive_kernel(double *data,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
double *in = data;
int i, k, iter;
for (iter=0; iter<num_iter; iter++)
{
in = data + (iter%dataSize)*inSize;
int idx = threadIdx.x + blockDim.x * blockIdx.x;
float sum;
// update output values for each neuron
// assign content to input layer
if (idx < lsize[0])
{
// output_from_neuron(i,j) Jth neuron in Ith Layer
out[idx]=in[idx];
}
__syncthreads();
// assign output(activation) value
// to each neuron usng sigmoid func
for (i=1;i<numl;i++)
{
if (idx < lsize[i])
{
sum=0.0;
for (k=0;k<lsize[i-1];k++)
{
sum += out[rowptr_od[i-1]+k]* weight[rowptr_w[i]+(idx*(lsize[i-1]+1))+k];
}
sum += weight[rowptr_w[i]+(lsize[i-1]+1)*idx + lsize[i-1]];
out[rowptr_od[i]+idx]=(double)(1/(1+exp(-sum)));
}
}
__syncthreads();
// find delta for output layer
if (idx == 0)
{
for (i=0;i<lsize[numl-1];i++)
{
delta[rowptr_od[numl-1]+i]=out[rowptr_od[numl-1]+i]*
(1-out[rowptr_od[numl-1]+i])*(in[lsize[0]]-out[rowptr_od[numl-1]+i]);
}
}
__syncthreads();
// find delta for hidden layers
for (i=numl-2;i>0;i--)
{
if (idx<lsize[i])
{
sum=0.0;
for (k=0;k<lsize[i+1];k++)
{
sum += delta[rowptr_od[i+1]+k]*weight[rowptr_w[i+1]+k*(lsize[i]+1)+idx];
}
delta[rowptr_od[i]+idx]=out[rowptr_od[i]+idx]*(1-out[rowptr_od[i]+idx])*sum;
__syncthreads();
}
}
__syncthreads();
// apply momentum ( does nothing if alpha=0 )
for (i=1;i<numl;i++)
{
if (idx<lsize[i])
{
for (k=0;k<lsize[i-1];k++)
{
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+k]+=alpha*prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+k];
}
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]]+=alpha*prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]];
}
__syncthreads();
}
__syncthreads();
// adjust weights usng steepest descent
for (i=1;i<numl;i++)
{
if (idx<lsize[i])
{
for (k=0;k<lsize[i-1];k++)
{
prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+k]=beta*delta[rowptr_od[i]+idx]*out[rowptr_od[i-1]+k];
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+k]+=prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+k];
}
prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]]=beta*delta[rowptr_od[i]+idx];
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]]+=prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]];
}
}
}
}
/*****************************************************************************
Main computation functions
*****************************************************************************/
void gpu_datatest(double *in,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
printf("c'mon");
const unsigned int numThreadsPerBlock = 1;
const unsigned int numBlocks = 1;
hipLaunchKernelGGL(( gpu_datatest_kernel) , dim3(numBlocks) , dim3(numThreadsPerBlock) , 0, 0,
in,out,delta,rowptr_od,weight,numl,lsize,beta,alpha,
prevDwt,rowptr_w,num_iter, inSize, dataSize);
}
void gpu_naive_bpgt(double *in,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
const unsigned int numThreadsPerBlock = 512;
const unsigned int numBlocks = (128 - 1)/numThreadsPerBlock + 1;
hipLaunchKernelGGL(( gpu_naive_kernel) , dim3(numBlocks) , dim3(numThreadsPerBlock) , 0, 0,
in,out,delta,rowptr_od,weight,numl,lsize,beta,alpha,
prevDwt,rowptr_w,num_iter,inSize, dataSize);
}
void cpu_bpgt(double *in,double *tgt,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w)
{
double sum;
int i,j,k;
for (i=0;i<lsize[0];i++)
{
out[rowptr_od[0]+i]=in[i];
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
sum=0.0;
for (k=0;k<lsize[i-1];k++)
{
sum+= out[rowptr_od[i-1]+k]*weight[rowptr_w[i] + (lsize[i-1]+1)*j+k];
}
sum+=weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
out[rowptr_od[i]+j]=(double)(1/(1+exp(-sum)));
}
}
for (i=0;i<lsize[(numl)-1];i++)
{
delta[rowptr_od[(numl)-1]+i]=out[rowptr_od[(numl)-1]+i]*
(1-out[rowptr_od[(numl)-1]+i])*(tgt[i]-out[rowptr_od[(numl)-1]+i]);
}
for (i=numl-2;i>0;i--)
{
for (j=0;j<lsize[i];j++)
{
sum=0.0;
for (k=0;k<lsize[i+1];k++)
{
sum+=delta[rowptr_od[i+1]+k]*weight[rowptr_w[i+1]+(lsize[i]+1)*k+j];
}
delta[rowptr_od[i]+j]=out[rowptr_od[i]+j]*(1-out[rowptr_od[i]+j])*sum;
}
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
for (k=0;k<lsize[i-1];k++)
{
weight[rowptr_w[i] + (lsize[i-1]+1)*j+k]+=(alpha)*prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+k];
}
weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]]+=(alpha)*prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
}
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
for (k=0;k<lsize[i-1];k++)
{
prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+k]=(beta)*delta[rowptr_od[i]+j]*out[rowptr_od[i-1]+k];
weight[rowptr_w[i] + (lsize[i-1]+1)*j+k]+=prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+k];
}
prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]]=(beta)*delta[rowptr_od[i]+j];
weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]]+=prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
}
}
}
void ffwd(double *in,
double *out,
double *weight,
int numl,
int *lsize,
int *rowptr_od,
int *rowptr_w)
{
double sum;
int i,j,k;
for (i=0;i<lsize[0];i++)
{
out[rowptr_od[0]+i]=in[i];
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
sum=0.0;
for (k=0;k<lsize[i-1];k++)
{
sum+= out[rowptr_od[i-1]+k]*weight[rowptr_w[i]
+ (lsize[i-1]+1)*j+k];
}
sum+=weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
out[rowptr_od[i]+j]=(double)(1/(1+exp(-sum)));
}
}
}
| 8c6524f4655228d6ae1ae2da51207bf8cd9159ba.cu | #include <math.h>
/*****************************************************************************
GPU main computation kernels
*****************************************************************************/
__global__ void gpu_datatest_kernel(double *data,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
double *in;
for (int iter=0; iter<num_iter; iter++)
{
in = data + (iter%dataSize)*inSize;
for (int i = 0; i < inSize; i++) {
printf("val: %f", in[i]);
}
printf("\n");
}
}
__global__ void gpu_naive_kernel(double *data,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
double *in = data;
int i, k, iter;
for (iter=0; iter<num_iter; iter++)
{
in = data + (iter%dataSize)*inSize;
int idx = threadIdx.x + blockDim.x * blockIdx.x;
float sum;
// update output values for each neuron
// assign content to input layer
if (idx < lsize[0])
{
// output_from_neuron(i,j) Jth neuron in Ith Layer
out[idx]=in[idx];
}
__syncthreads();
// assign output(activation) value
// to each neuron usng sigmoid func
for (i=1;i<numl;i++)
{
if (idx < lsize[i])
{
sum=0.0;
for (k=0;k<lsize[i-1];k++)
{
sum += out[rowptr_od[i-1]+k]* weight[rowptr_w[i]+(idx*(lsize[i-1]+1))+k];
}
sum += weight[rowptr_w[i]+(lsize[i-1]+1)*idx + lsize[i-1]];
out[rowptr_od[i]+idx]=(double)(1/(1+exp(-sum)));
}
}
__syncthreads();
// find delta for output layer
if (idx == 0)
{
for (i=0;i<lsize[numl-1];i++)
{
delta[rowptr_od[numl-1]+i]=out[rowptr_od[numl-1]+i]*
(1-out[rowptr_od[numl-1]+i])*(in[lsize[0]]-out[rowptr_od[numl-1]+i]);
}
}
__syncthreads();
// find delta for hidden layers
for (i=numl-2;i>0;i--)
{
if (idx<lsize[i])
{
sum=0.0;
for (k=0;k<lsize[i+1];k++)
{
sum += delta[rowptr_od[i+1]+k]*weight[rowptr_w[i+1]+k*(lsize[i]+1)+idx];
}
delta[rowptr_od[i]+idx]=out[rowptr_od[i]+idx]*(1-out[rowptr_od[i]+idx])*sum;
__syncthreads();
}
}
__syncthreads();
// apply momentum ( does nothing if alpha=0 )
for (i=1;i<numl;i++)
{
if (idx<lsize[i])
{
for (k=0;k<lsize[i-1];k++)
{
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+k]+=alpha*prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+k];
}
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]]+=alpha*prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]];
}
__syncthreads();
}
__syncthreads();
// adjust weights usng steepest descent
for (i=1;i<numl;i++)
{
if (idx<lsize[i])
{
for (k=0;k<lsize[i-1];k++)
{
prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+k]=beta*delta[rowptr_od[i]+idx]*out[rowptr_od[i-1]+k];
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+k]+=prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+k];
}
prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]]=beta*delta[rowptr_od[i]+idx];
weight[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]]+=prevDwt[rowptr_w[i]+idx*(lsize[i-1]+1)+lsize[i-1]];
}
}
}
}
/*****************************************************************************
Main computation functions
*****************************************************************************/
void gpu_datatest(double *in,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
printf("c'mon");
const unsigned int numThreadsPerBlock = 1;
const unsigned int numBlocks = 1;
gpu_datatest_kernel <<< numBlocks , numThreadsPerBlock >>>
(in,out,delta,rowptr_od,weight,numl,lsize,beta,alpha,
prevDwt,rowptr_w,num_iter, inSize, dataSize);
}
void gpu_naive_bpgt(double *in,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w,
int num_iter,
int inSize,
int dataSize) {
const unsigned int numThreadsPerBlock = 512;
const unsigned int numBlocks = (128 - 1)/numThreadsPerBlock + 1;
gpu_naive_kernel <<< numBlocks , numThreadsPerBlock >>>
(in,out,delta,rowptr_od,weight,numl,lsize,beta,alpha,
prevDwt,rowptr_w,num_iter,inSize, dataSize);
}
void cpu_bpgt(double *in,double *tgt,
double *out,
double *delta,
int *rowptr_od,
double *weight,
int numl,
int *lsize,
double beta,
double alpha,
double *prevDwt,
int *rowptr_w)
{
double sum;
int i,j,k;
for (i=0;i<lsize[0];i++)
{
out[rowptr_od[0]+i]=in[i];
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
sum=0.0;
for (k=0;k<lsize[i-1];k++)
{
sum+= out[rowptr_od[i-1]+k]*weight[rowptr_w[i] + (lsize[i-1]+1)*j+k];
}
sum+=weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
out[rowptr_od[i]+j]=(double)(1/(1+exp(-sum)));
}
}
for (i=0;i<lsize[(numl)-1];i++)
{
delta[rowptr_od[(numl)-1]+i]=out[rowptr_od[(numl)-1]+i]*
(1-out[rowptr_od[(numl)-1]+i])*(tgt[i]-out[rowptr_od[(numl)-1]+i]);
}
for (i=numl-2;i>0;i--)
{
for (j=0;j<lsize[i];j++)
{
sum=0.0;
for (k=0;k<lsize[i+1];k++)
{
sum+=delta[rowptr_od[i+1]+k]*weight[rowptr_w[i+1]+(lsize[i]+1)*k+j];
}
delta[rowptr_od[i]+j]=out[rowptr_od[i]+j]*(1-out[rowptr_od[i]+j])*sum;
}
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
for (k=0;k<lsize[i-1];k++)
{
weight[rowptr_w[i] + (lsize[i-1]+1)*j+k]+=(alpha)*prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+k];
}
weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]]+=(alpha)*prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
}
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
for (k=0;k<lsize[i-1];k++)
{
prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+k]=(beta)*delta[rowptr_od[i]+j]*out[rowptr_od[i-1]+k];
weight[rowptr_w[i] + (lsize[i-1]+1)*j+k]+=prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+k];
}
prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]]=(beta)*delta[rowptr_od[i]+j];
weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]]+=prevDwt[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
}
}
}
void ffwd(double *in,
double *out,
double *weight,
int numl,
int *lsize,
int *rowptr_od,
int *rowptr_w)
{
double sum;
int i,j,k;
for (i=0;i<lsize[0];i++)
{
out[rowptr_od[0]+i]=in[i];
}
for (i=1;i<numl;i++)
{
for (j=0;j<lsize[i];j++)
{
sum=0.0;
for (k=0;k<lsize[i-1];k++)
{
sum+= out[rowptr_od[i-1]+k]*weight[rowptr_w[i]
+ (lsize[i-1]+1)*j+k];
}
sum+=weight[rowptr_w[i] + (lsize[i-1]+1)*j+lsize[i-1]];
out[rowptr_od[i]+j]=(double)(1/(1+exp(-sum)));
}
}
}
|
5e0f56f4af6710723b336f6fc526ab65467b1aa2.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) // We can use defines provided in this project
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
int* dev_buf1;
int* dev_buf2;
int* dev_bufLoader;
#define blockSize 512
__global__ void performScan(int d, int* buf1, int* buf2, int N)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N - 1)
{
return;
}
//int pow2_dminus1 = std::round(pow(2, d - 1));
int pow2_dminus1 = 1 <<(d - 1);
if (index >= pow2_dminus1)
{
buf2[index] = buf1[index - pow2_dminus1] + buf1[index];
}
else
{
buf2[index] = buf1[index];
}
}
__global__ void ShiftRight(int* buf1, int* buf2, int N, int difference)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N - 1)
{
return;
}
if (index == 0)
{
buf2[index] = 0;
return;
}
buf2[index] = buf1[index - 1];
}
void FreeMemory() {
hipFree(dev_buf1);
hipFree(dev_buf2);
hipFree(dev_bufLoader);
}
void AllocateMemory(int n)
{
hipMalloc((void**)&dev_buf1, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_buf1 failed!");
hipMalloc((void**)&dev_buf2, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_buf2 failed!");
hipMalloc((void**)&dev_bufLoader, n * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_bufLoader failed!");
hipDeviceSynchronize();
}
__global__ void RightShiftAddZeros(int* buf, int* buf_loader, int N, int difference)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N - 1)
{
return;
}
if (index > (N-1) - difference)
{
buf[index] = 0;
return;
}
buf[index] = buf_loader[index];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int* odata, const int* idata) {
timer().startGpuTimer();
// TODO
int power2 = 1;
int nearesttwo = 1 << ilog2ceil(n);
int difference = nearesttwo - n;
int finalMemSize = nearesttwo;
AllocateMemory(finalMemSize);
dim3 fullBlocksPerGrid((finalMemSize + blockSize - 1) / blockSize);
dim3 threadsperblockSize(blockSize);
hipMemcpy(dev_bufLoader, idata, sizeof(int) * n, hipMemcpyHostToDevice);
RightShiftAddZeros << < fullBlocksPerGrid, threadsperblockSize >> > (dev_buf1, dev_bufLoader, finalMemSize, difference);
int d = ilog2(finalMemSize);
for (int i = 1; i <= d; i++)
{
performScan << < fullBlocksPerGrid, threadsperblockSize >> > (i, dev_buf1, dev_buf2, finalMemSize);
//hipDeviceSynchronize();
std::swap(dev_buf1, dev_buf2);
}
ShiftRight << < fullBlocksPerGrid, blockSize >> > (dev_buf1, dev_buf2, finalMemSize, difference);
hipMemcpy(odata, dev_buf2, sizeof(int) * n, hipMemcpyDeviceToHost);
/*printf(" \n Array After:");*/
/*for (int i = 0; i < finalMemSize; i++)
{
printf("%3d ", arr_z[i]);
}*/
/* printf("]\n");
for (int i = 0; i < n; i++)
{
printf("%3d ", odata[i]);
}*/
timer().endGpuTimer();
hipDeviceSynchronize();
FreeMemory();
}
}
}
| 5e0f56f4af6710723b336f6fc526ab65467b1aa2.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
namespace StreamCompaction {
namespace Naive {
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__) // We can use defines provided in this project
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
int* dev_buf1;
int* dev_buf2;
int* dev_bufLoader;
#define blockSize 512
__global__ void performScan(int d, int* buf1, int* buf2, int N)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N - 1)
{
return;
}
//int pow2_dminus1 = std::round(pow(2, d - 1));
int pow2_dminus1 = 1 <<(d - 1);
if (index >= pow2_dminus1)
{
buf2[index] = buf1[index - pow2_dminus1] + buf1[index];
}
else
{
buf2[index] = buf1[index];
}
}
__global__ void ShiftRight(int* buf1, int* buf2, int N, int difference)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N - 1)
{
return;
}
if (index == 0)
{
buf2[index] = 0;
return;
}
buf2[index] = buf1[index - 1];
}
void FreeMemory() {
cudaFree(dev_buf1);
cudaFree(dev_buf2);
cudaFree(dev_bufLoader);
}
void AllocateMemory(int n)
{
cudaMalloc((void**)&dev_buf1, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_buf1 failed!");
cudaMalloc((void**)&dev_buf2, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_buf2 failed!");
cudaMalloc((void**)&dev_bufLoader, n * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_bufLoader failed!");
cudaDeviceSynchronize();
}
__global__ void RightShiftAddZeros(int* buf, int* buf_loader, int N, int difference)
{
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index > N - 1)
{
return;
}
if (index > (N-1) - difference)
{
buf[index] = 0;
return;
}
buf[index] = buf_loader[index];
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int* odata, const int* idata) {
timer().startGpuTimer();
// TODO
int power2 = 1;
int nearesttwo = 1 << ilog2ceil(n);
int difference = nearesttwo - n;
int finalMemSize = nearesttwo;
AllocateMemory(finalMemSize);
dim3 fullBlocksPerGrid((finalMemSize + blockSize - 1) / blockSize);
dim3 threadsperblockSize(blockSize);
cudaMemcpy(dev_bufLoader, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
RightShiftAddZeros << < fullBlocksPerGrid, threadsperblockSize >> > (dev_buf1, dev_bufLoader, finalMemSize, difference);
int d = ilog2(finalMemSize);
for (int i = 1; i <= d; i++)
{
performScan << < fullBlocksPerGrid, threadsperblockSize >> > (i, dev_buf1, dev_buf2, finalMemSize);
//cudaDeviceSynchronize();
std::swap(dev_buf1, dev_buf2);
}
ShiftRight << < fullBlocksPerGrid, blockSize >> > (dev_buf1, dev_buf2, finalMemSize, difference);
cudaMemcpy(odata, dev_buf2, sizeof(int) * n, cudaMemcpyDeviceToHost);
/*printf(" \n Array After:");*/
/*for (int i = 0; i < finalMemSize; i++)
{
printf("%3d ", arr_z[i]);
}*/
/* printf("]\n");
for (int i = 0; i < n; i++)
{
printf("%3d ", odata[i]);
}*/
timer().endGpuTimer();
cudaDeviceSynchronize();
FreeMemory();
}
}
}
|
4b09cd0027395f8b56de257a526429de5b8f5c1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_data_structure.hpp"
const int max_cells_per_cuda_block = 1024;
//GPU supports 2^n*65535*65535 blocks of 1024 threads each.
//it is assumed that one yz plane of each block cannot exceed 1024 cells;
//that is, mesh_sizes cannot be > 32.
void set_GPU_dimensions(physics_mesh * host_struct, dim3 &blocks, dim3 &threads, int depth){
//number of refined blocks actually present in mesh on this level
int num_physical_blocks = (*host_struct).blocks_on_level(depth);
//physical blocks on this level are all the same size,
int physical_block_width = (*host_struct).mesh_sizes[depth]-2; //ghosts!
int sub_blocks = ceil(((float)cube(physical_block_width))/((float)max_cells_per_cuda_block));
while(physical_block_width % sub_blocks != 0){
//if the block can't be divided evenly in Z
sub_blocks++;
//will get to physical_block_width at most.
}
blocks.PHYSICAL_BLOCKS = num_physical_blocks;
blocks.SUB_BLOCKS = sub_blocks;
threads.x = physical_block_width;
threads.y = physical_block_width;
threads.z = physical_block_width/sub_blocks;
}
//+1 to ignore ghosts
// a simple if(ghost_linkages[cell]) over all cells might end up being faster.
__global__ void device_copy_ghost_values_kernel(physics_mesh &device_struct, float ** values, int depth) {
int direction = threadIdx.x;
int this_block = device_struct.block_indices[device_struct.block_depth_lookup[depth]+blockIdx.PHYSICAL_BLOCKS];
int x = device_struct.mesh_sizes[depth]-1;
int y = (threadIdx.y)+1;
int z = ((((device_struct.mesh_sizes[depth]-2)/gridDim.SUB_BLOCKS)*blockIdx.SUB_BLOCKS)+threadIdx.z)+1;
int this_cell = this_block + transform_idx(x,y,z, device_struct.mesh_sizes[depth], direction);
if(device_struct.ghost_linkages[this_cell]){
int value_from_indice = device_struct.ghost_linkages[this_cell];
(*values)[this_cell] = (*values)[value_from_indice];
}
}
// template <class T>
void physics_mesh::device_copy_ghost_values(physics_mesh * host_struct, physics_mesh * device_struct, float ** values, int depth){
if(depth == 0) return;
//here we're using the third dimension as the 'cube facet count'
//since iterating over facets only requires two dimensions:
//X and Y.
//level must be >0.
//assumes that parameters of the mesh are consistent from host to device.
//This may not be a safe assumption.
dim3 threads;
dim3 blocks;
set_GPU_dimensions(host_struct,blocks,threads,depth);
threads.x = 6; //override
hipLaunchKernelGGL(( device_copy_ghost_values_kernel), dim3(blocks), dim3(threads), 0, 0, *device_struct, values, depth);
// gpu_error_check( hipPeekAtLastError() );
// gpu_error_check( hipDeviceSynchronize() );
//error checking now handled externally.
}
//+1 to ignore ghosts
__global__ void device_jacobi_relax_kernel(physics_mesh &device_struct, float ** input, float * output, int depth) {
//see https://people.eecs.berkeley.edu/~demmel/cs267/lecture24/lecture24.html
int this_block = device_struct.block_indices[device_struct.block_depth_lookup[depth]+blockIdx.x];
int x = threadIdx.x+1;
int y = threadIdx.y+1; //if there's a bug here, make sure gridDim and blockDim are correct.
int z = ((((device_struct.mesh_sizes[depth]-2)/gridDim.SUB_BLOCKS)*blockIdx.SUB_BLOCKS)+threadIdx.z)+1;
int this_cell = this_block + idx(x, y, z, device_struct.mesh_sizes[depth]);
bool boundary_condition_check = (device_struct.boundary_conditions[this_cell] == 0); //1 if cell should be updated
float sum = 0;
sum += (*input)[this_block + idx(x+1, y, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x-1, y, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y+1, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y-1, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y, z+1, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y, z-1, device_struct.mesh_sizes[depth])];
sum += (device_struct.space_charge[this_cell]/EPSILON_ZERO);
(output)[this_cell] = ((1-boundary_condition_check)*(*input)[this_cell]) //leave cell unchanged if a boundary is present - saves a jump
+ (boundary_condition_check * (sum)/(6.0f));
}
__global__ void device_copy_kernel(physics_mesh &device_struct, float * input, float ** output, int depth) {
int this_block = device_struct.block_indices[device_struct.block_depth_lookup[depth]+blockIdx.x];
int x = threadIdx.x+1;
int y = threadIdx.y+1; //if there's a bug here, make sure gridDim and blockDim are correct.
int z = ((((device_struct.mesh_sizes[depth]-2)/gridDim.SUB_BLOCKS)*blockIdx.SUB_BLOCKS)+threadIdx.z)+1;
int this_cell = this_block + idx(x, y, z, device_struct.mesh_sizes[depth]);
(*output)[this_cell] = (input)[this_cell];
}
void physics_mesh::device_jacobi_relax(physics_mesh * host_struct, physics_mesh * device_struct, float ** values, int iterations, int depth){
dim3 threads;
dim3 blocks;
set_GPU_dimensions(host_struct,blocks,threads,depth);
float * device_temp;
//this is basically free since nothing needs to be zeroed
gpu_error_check(hipMalloc(&device_temp, sizeof(float)*(*host_struct).buffer_end_pointer));
physics_mesh::device_copy_ghost_values(host_struct, device_struct, values, depth);
for(int i = 0; i < iterations; i++){
hipLaunchKernelGGL(( device_jacobi_relax_kernel), dim3(blocks), dim3(threads), 0, 0, *device_struct, values, device_temp, depth);
hipLaunchKernelGGL(( device_copy_kernel), dim3(blocks), dim3(threads), 0, 0, *device_struct, device_temp, values, depth); //double-buffering adds quite a bit of complexity.
device_copy_ghost_values(host_struct, device_struct, values, depth);
}
//also basically free
hipFree(device_temp);
gpu_error_check( hipPeekAtLastError() );
gpu_error_check( hipDeviceSynchronize() );
}
| 4b09cd0027395f8b56de257a526429de5b8f5c1e.cu | #include "device_data_structure.hpp"
const int max_cells_per_cuda_block = 1024;
//GPU supports 2^n*65535*65535 blocks of 1024 threads each.
//it is assumed that one yz plane of each block cannot exceed 1024 cells;
//that is, mesh_sizes cannot be > 32.
void set_GPU_dimensions(physics_mesh * host_struct, dim3 &blocks, dim3 &threads, int depth){
//number of refined blocks actually present in mesh on this level
int num_physical_blocks = (*host_struct).blocks_on_level(depth);
//physical blocks on this level are all the same size,
int physical_block_width = (*host_struct).mesh_sizes[depth]-2; //ghosts!
int sub_blocks = ceil(((float)cube(physical_block_width))/((float)max_cells_per_cuda_block));
while(physical_block_width % sub_blocks != 0){
//if the block can't be divided evenly in Z
sub_blocks++;
//will get to physical_block_width at most.
}
blocks.PHYSICAL_BLOCKS = num_physical_blocks;
blocks.SUB_BLOCKS = sub_blocks;
threads.x = physical_block_width;
threads.y = physical_block_width;
threads.z = physical_block_width/sub_blocks;
}
//+1 to ignore ghosts
// a simple if(ghost_linkages[cell]) over all cells might end up being faster.
__global__ void device_copy_ghost_values_kernel(physics_mesh &device_struct, float ** values, int depth) {
int direction = threadIdx.x;
int this_block = device_struct.block_indices[device_struct.block_depth_lookup[depth]+blockIdx.PHYSICAL_BLOCKS];
int x = device_struct.mesh_sizes[depth]-1;
int y = (threadIdx.y)+1;
int z = ((((device_struct.mesh_sizes[depth]-2)/gridDim.SUB_BLOCKS)*blockIdx.SUB_BLOCKS)+threadIdx.z)+1;
int this_cell = this_block + transform_idx(x,y,z, device_struct.mesh_sizes[depth], direction);
if(device_struct.ghost_linkages[this_cell]){
int value_from_indice = device_struct.ghost_linkages[this_cell];
(*values)[this_cell] = (*values)[value_from_indice];
}
}
// template <class T>
void physics_mesh::device_copy_ghost_values(physics_mesh * host_struct, physics_mesh * device_struct, float ** values, int depth){
if(depth == 0) return;
//here we're using the third dimension as the 'cube facet count'
//since iterating over facets only requires two dimensions:
//X and Y.
//level must be >0.
//assumes that parameters of the mesh are consistent from host to device.
//This may not be a safe assumption.
dim3 threads;
dim3 blocks;
set_GPU_dimensions(host_struct,blocks,threads,depth);
threads.x = 6; //override
device_copy_ghost_values_kernel<<<blocks, threads>>>(*device_struct, values, depth);
// gpu_error_check( cudaPeekAtLastError() );
// gpu_error_check( cudaDeviceSynchronize() );
//error checking now handled externally.
}
//+1 to ignore ghosts
__global__ void device_jacobi_relax_kernel(physics_mesh &device_struct, float ** input, float * output, int depth) {
//see https://people.eecs.berkeley.edu/~demmel/cs267/lecture24/lecture24.html
int this_block = device_struct.block_indices[device_struct.block_depth_lookup[depth]+blockIdx.x];
int x = threadIdx.x+1;
int y = threadIdx.y+1; //if there's a bug here, make sure gridDim and blockDim are correct.
int z = ((((device_struct.mesh_sizes[depth]-2)/gridDim.SUB_BLOCKS)*blockIdx.SUB_BLOCKS)+threadIdx.z)+1;
int this_cell = this_block + idx(x, y, z, device_struct.mesh_sizes[depth]);
bool boundary_condition_check = (device_struct.boundary_conditions[this_cell] == 0); //1 if cell should be updated
float sum = 0;
sum += (*input)[this_block + idx(x+1, y, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x-1, y, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y+1, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y-1, z, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y, z+1, device_struct.mesh_sizes[depth])];
sum += (*input)[this_block + idx(x, y, z-1, device_struct.mesh_sizes[depth])];
sum += (device_struct.space_charge[this_cell]/EPSILON_ZERO);
(output)[this_cell] = ((1-boundary_condition_check)*(*input)[this_cell]) //leave cell unchanged if a boundary is present - saves a jump
+ (boundary_condition_check * (sum)/(6.0f));
}
__global__ void device_copy_kernel(physics_mesh &device_struct, float * input, float ** output, int depth) {
int this_block = device_struct.block_indices[device_struct.block_depth_lookup[depth]+blockIdx.x];
int x = threadIdx.x+1;
int y = threadIdx.y+1; //if there's a bug here, make sure gridDim and blockDim are correct.
int z = ((((device_struct.mesh_sizes[depth]-2)/gridDim.SUB_BLOCKS)*blockIdx.SUB_BLOCKS)+threadIdx.z)+1;
int this_cell = this_block + idx(x, y, z, device_struct.mesh_sizes[depth]);
(*output)[this_cell] = (input)[this_cell];
}
void physics_mesh::device_jacobi_relax(physics_mesh * host_struct, physics_mesh * device_struct, float ** values, int iterations, int depth){
dim3 threads;
dim3 blocks;
set_GPU_dimensions(host_struct,blocks,threads,depth);
float * device_temp;
//this is basically free since nothing needs to be zeroed
gpu_error_check(cudaMalloc(&device_temp, sizeof(float)*(*host_struct).buffer_end_pointer));
physics_mesh::device_copy_ghost_values(host_struct, device_struct, values, depth);
for(int i = 0; i < iterations; i++){
device_jacobi_relax_kernel<<<blocks, threads>>>(*device_struct, values, device_temp, depth);
device_copy_kernel<<<blocks, threads>>>(*device_struct, device_temp, values, depth); //double-buffering adds quite a bit of complexity.
device_copy_ghost_values(host_struct, device_struct, values, depth);
}
//also basically free
cudaFree(device_temp);
gpu_error_check( cudaPeekAtLastError() );
gpu_error_check( cudaDeviceSynchronize() );
}
|
b1d9da010e0ef1c8ebc3cd5480f2de77ecef4b51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zbajac_csr.cu, normal z -> s, Sun Nov 20 20:20:39 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_s
#define BLOCKSIZE 256
__global__ void
magma_sbajac_csr_ls_kernel(int localiters, int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
const float * __restrict__ b,
float * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
if (index < n) {
start = rowR[index];
end = rowR[index+1];
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
/* add more local iterations */
__shared__ float local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
x[index] = local_x[threadIdx.x];
}
}
__global__ void
magma_sbajac_csr_kernel(
int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
float * b,
float * x )
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, start, end;
if (index < n) {
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
start = rowR[index];
end = rowR[index+1];
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
v = bl - v;
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
x[index] = x[index] + ( v - tmp ) / (valD[start]);
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D magma_s_matrix
input matrix with diagonal blocks
@param[in]
R magma_s_matrix
input matrix with non-diagonal parts
@param[in]
b magma_s_matrix
RHS
@param[in]
x magma_s_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbajac_csr(
magma_int_t localiters,
magma_s_matrix D,
magma_s_matrix R,
magma_s_matrix b,
magma_s_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( D.num_rows, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
if ( R.nnz > 0 ) {
if ( localiters == 1 )
hipLaunchKernelGGL(( magma_sbajac_csr_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
else
hipLaunchKernelGGL(( magma_sbajac_csr_ls_kernel), dim3(grid), dim3(block), 0, queue->cuda_stream() ,
localiters, D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
| b1d9da010e0ef1c8ebc3cd5480f2de77ecef4b51.cu | /*
-- MAGMA (version 2.2.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2016
@generated from sparse/blas/zbajac_csr.cu, normal z -> s, Sun Nov 20 20:20:39 2016
*/
#include "magmasparse_internal.h"
#define PRECISION_s
#define BLOCKSIZE 256
__global__ void
magma_sbajac_csr_ls_kernel(int localiters, int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
const float * __restrict__ b,
float * x )
{
int inddiag = blockIdx.x*blockDim.x;
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, j, start, end;
if (index < n) {
start = rowR[index];
end = rowR[index+1];
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
v = bl - v;
/* add more local iterations */
__shared__ float local_x[ BLOCKSIZE ];
local_x[threadIdx.x] = x[index] + ( v - tmp) / (valD[start]);
__syncthreads();
#pragma unroll
for( j=0; j<localiters-1; j++ )
{
tmp = zero;
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * local_x[ colD[i] - inddiag];
local_x[threadIdx.x] += ( v - tmp) / (valD[start]);
}
x[index] = local_x[threadIdx.x];
}
}
__global__ void
magma_sbajac_csr_kernel(
int n,
float * valD,
magma_index_t * rowD,
magma_index_t * colD,
float * valR,
magma_index_t * rowR,
magma_index_t * colR,
float * b,
float * x )
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int i, start, end;
if (index < n) {
float zero = MAGMA_S_MAKE(0.0, 0.0);
float bl, tmp = zero, v = zero;
#if (__CUDA_ARCH__ >= 350) && (defined(PRECISION_d) || defined(PRECISION_s))
bl = __ldg( b+index );
#else
bl = b[index];
#endif
start = rowR[index];
end = rowR[index+1];
#pragma unroll
for( i=start; i<end; i++ )
v += valR[i] * x[ colR[i] ];
v = bl - v;
start = rowD[index];
end = rowD[index+1];
#pragma unroll
for( i=start; i<end; i++ )
tmp += valD[i] * x[ colD[i] ];
x[index] = x[index] + ( v - tmp ) / (valD[start]);
}
}
/**
Purpose
-------
This routine is a block-asynchronous Jacobi iteration performing s
local Jacobi-updates within the block. Input format is two CSR matrices,
one containing the diagonal blocks, one containing the rest.
Arguments
---------
@param[in]
localiters magma_int_t
number of local Jacobi-like updates
@param[in]
D magma_s_matrix
input matrix with diagonal blocks
@param[in]
R magma_s_matrix
input matrix with non-diagonal parts
@param[in]
b magma_s_matrix
RHS
@param[in]
x magma_s_matrix*
iterate/solution
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_sgegpuk
********************************************************************/
extern "C" magma_int_t
magma_sbajac_csr(
magma_int_t localiters,
magma_s_matrix D,
magma_s_matrix R,
magma_s_matrix b,
magma_s_matrix *x,
magma_queue_t queue )
{
int blocksize1 = BLOCKSIZE;
int blocksize2 = 1;
int dimgrid1 = magma_ceildiv( D.num_rows, blocksize1 );
int dimgrid2 = 1;
int dimgrid3 = 1;
dim3 grid( dimgrid1, dimgrid2, dimgrid3 );
dim3 block( blocksize1, blocksize2, 1 );
if ( R.nnz > 0 ) {
if ( localiters == 1 )
magma_sbajac_csr_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
else
magma_sbajac_csr_ls_kernel<<< grid, block, 0, queue->cuda_stream() >>>
( localiters, D.num_rows, D.dval, D.drow, D.dcol,
R.dval, R.drow, R.dcol, b.dval, x->dval );
}
else {
printf("error: all elements in diagonal block.\n");
}
return MAGMA_SUCCESS;
}
|
5b502a10833ec4fb8c2c3bdc07e9a0c736d09a51.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <map>
#include "parse.h"
#include "sequential.h"
#include "util.cuh"
#include "kernels_hip.cuh"
#include "main.cuh"
std::map<int, double> compute_bc(std::vector<std::pair<int, int> > edges, int k, bool approx, int device ){
int max_threads_per_block, number_of_SMs;
choose_device(max_threads_per_block,number_of_SMs, device);
graph g = parse_edgelist(edges);
std::cout << "Number of nodes: " << g.n << std::endl;
std::cout << "Number of edges: " << g.m << std::endl;
//If we're approximating, choose source vertices at random
std::set<int> source_vertices;
if(approx)
{
if(k > g.n || k < 1)
{
k = g.n;
}
while(source_vertices.size() < k)
{
int temp_source = rand() % g.n;
source_vertices.insert(temp_source);
}
}
hipEvent_t start,end;
float CPU_time;
std::vector<float> bc;
if(false) //Only run CPU code if verifying
{
start_clock(start,end);
bc = bc_cpu(g,source_vertices);
CPU_time = end_clock(start,end);
}
float GPU_time;
std::vector<double> bc_g;
start_clock(start,end);
bc_g = bc_gpu(g,max_threads_per_block,number_of_SMs,approx, k,source_vertices);
GPU_time = end_clock(start,end);
if(false)
{
//verify(g,bc,bc_g);
}
if(false)
{
//g.print_BC_scores(bc_g,"summa.txt");
}
std::cout << std::setprecision(9);
if(false)
{
std::cout << "Time for CPU Algorithm: " << CPU_time << " s" << std::endl;
}
std::cout << "Time for GPU Algorithm: " << GPU_time << " s" << std::endl;
std::map<int, double> bc_scores = g.get_BC_scores(bc_g);
delete[] g.R;
delete[] g.C;
delete[] g.F;
return bc_scores;
} | 5b502a10833ec4fb8c2c3bdc07e9a0c736d09a51.cu | #include <iostream>
#include <iomanip>
#include <cstdlib>
#include <map>
#include "parse.h"
#include "sequential.h"
#include "util.cuh"
#include "kernels.cuh"
#include "main.cuh"
std::map<int, double> compute_bc(std::vector<std::pair<int, int> > edges, int k, bool approx, int device ){
int max_threads_per_block, number_of_SMs;
choose_device(max_threads_per_block,number_of_SMs, device);
graph g = parse_edgelist(edges);
std::cout << "Number of nodes: " << g.n << std::endl;
std::cout << "Number of edges: " << g.m << std::endl;
//If we're approximating, choose source vertices at random
std::set<int> source_vertices;
if(approx)
{
if(k > g.n || k < 1)
{
k = g.n;
}
while(source_vertices.size() < k)
{
int temp_source = rand() % g.n;
source_vertices.insert(temp_source);
}
}
cudaEvent_t start,end;
float CPU_time;
std::vector<float> bc;
if(false) //Only run CPU code if verifying
{
start_clock(start,end);
bc = bc_cpu(g,source_vertices);
CPU_time = end_clock(start,end);
}
float GPU_time;
std::vector<double> bc_g;
start_clock(start,end);
bc_g = bc_gpu(g,max_threads_per_block,number_of_SMs,approx, k,source_vertices);
GPU_time = end_clock(start,end);
if(false)
{
//verify(g,bc,bc_g);
}
if(false)
{
//g.print_BC_scores(bc_g,"summa.txt");
}
std::cout << std::setprecision(9);
if(false)
{
std::cout << "Time for CPU Algorithm: " << CPU_time << " s" << std::endl;
}
std::cout << "Time for GPU Algorithm: " << GPU_time << " s" << std::endl;
std::map<int, double> bc_scores = g.get_BC_scores(bc_g);
delete[] g.R;
delete[] g.C;
delete[] g.F;
return bc_scores;
} |
076c55e1a44e2438f407d041e922444724121a1c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "_mat_sum_row.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *m = NULL;
hipMalloc(&m, XSIZE*YSIZE);
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
int nrow = 1;
int ncol = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
_mat_sum_row), dim3(gridBlock),dim3(threadBlock), 0, 0, m,target,nrow,ncol);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
_mat_sum_row), dim3(gridBlock),dim3(threadBlock), 0, 0, m,target,nrow,ncol);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
_mat_sum_row), dim3(gridBlock),dim3(threadBlock), 0, 0, m,target,nrow,ncol);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 076c55e1a44e2438f407d041e922444724121a1c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "_mat_sum_row.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *m = NULL;
cudaMalloc(&m, XSIZE*YSIZE);
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
int nrow = 1;
int ncol = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
_mat_sum_row<<<gridBlock,threadBlock>>>(m,target,nrow,ncol);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
_mat_sum_row<<<gridBlock,threadBlock>>>(m,target,nrow,ncol);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
_mat_sum_row<<<gridBlock,threadBlock>>>(m,target,nrow,ncol);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
6e5162bfae91ce40ce2fe6be274a1f417809b1f8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <iostream>
__global__ void addArray(int *ary1, int *ary2)
{
int indx = threadIdx.x;
ary1[indx] = ary2[indx];
}
int main(int argc,char **argv)
{
int ary[32]{0};
int res[32]{0};
for(int i = 0; i < 32; i++){
ary[i] = 2*i;
}
int *d_ary, *d_res;
hipMalloc((void**)&d_ary, 32 * sizeof(int));
hipMalloc((void**)&d_res, 32 * sizeof(int));
hipMemcpy((void*)d_ary, (void*)ary, 32 * sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( addArray), dim3(1),dim3(32), 0, 0, d_res, d_ary);
hipMemcpy((void*)res, (void*)d_res, 32 * sizeof(int), hipMemcpyDeviceToHost);
for(int i = 0; i < 32; i++){
std::cout << "result:" << res[i] << std::endl;
}
hipFree(d_ary);
hipFree(d_res);
return 0;
}
| 6e5162bfae91ce40ce2fe6be274a1f417809b1f8.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
__global__ void addArray(int *ary1, int *ary2)
{
int indx = threadIdx.x;
ary1[indx] = ary2[indx];
}
int main(int argc,char **argv)
{
int ary[32]{0};
int res[32]{0};
for(int i = 0; i < 32; i++){
ary[i] = 2*i;
}
int *d_ary, *d_res;
cudaMalloc((void**)&d_ary, 32 * sizeof(int));
cudaMalloc((void**)&d_res, 32 * sizeof(int));
cudaMemcpy((void*)d_ary, (void*)ary, 32 * sizeof(int), cudaMemcpyHostToDevice);
addArray<<<1,32>>>(d_res, d_ary);
cudaMemcpy((void*)res, (void*)d_res, 32 * sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 32; i++){
std::cout << "result:" << res[i] << std::endl;
}
cudaFree(d_ary);
cudaFree(d_res);
return 0;
}
|
5906b4da8f7e30c740b63cd55281b1d759db9247.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
__device__ int get_global_index(){
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int get_constant(){
return 7;
}
__global__ void kernel1(int *array){
int index = get_global_index();
array[index] = get_constant();
}
__global__ void kernel2(int *array){
int index = get_global_index();
array[index] = get_global_index();
}
int main(){
int num_elements = 256;
int num_bytes = num_elements*sizeof(int);
int *device_array = 0;
int *host_array = 0;
host_array = (int *) malloc(num_bytes);
hipMalloc((void**)&device_array, num_bytes);
int block_size = 128;
int grid_size = num_elements/block_size;
hipLaunchKernelGGL((
kernel1), dim3(grid_size), dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
printf("kernel 1 results: \n");
int i;
for(i=0;i<num_elements;i++){
printf("%d ", host_array[i]);
}
printf("\n");
hipLaunchKernelGGL((
kernel2), dim3(grid_size), dim3(block_size), 0, 0, device_array);
hipMemcpy(host_array, device_array, num_bytes, hipMemcpyDeviceToHost);
printf("kernel 2 results: \n");
for(i= 0; i< num_elements; i++){
printf("%d ", host_array[i]);
}
printf("\n");
return 0;
} | 5906b4da8f7e30c740b63cd55281b1d759db9247.cu | #include <stdlib.h>
#include <stdio.h>
__device__ int get_global_index(){
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int get_constant(){
return 7;
}
__global__ void kernel1(int *array){
int index = get_global_index();
array[index] = get_constant();
}
__global__ void kernel2(int *array){
int index = get_global_index();
array[index] = get_global_index();
}
int main(){
int num_elements = 256;
int num_bytes = num_elements*sizeof(int);
int *device_array = 0;
int *host_array = 0;
host_array = (int *) malloc(num_bytes);
cudaMalloc((void**)&device_array, num_bytes);
int block_size = 128;
int grid_size = num_elements/block_size;
kernel1<<<grid_size, block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel 1 results: \n");
int i;
for(i=0;i<num_elements;i++){
printf("%d ", host_array[i]);
}
printf("\n");
kernel2<<<grid_size, block_size>>>(device_array);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
printf("kernel 2 results: \n");
for(i= 0; i< num_elements; i++){
printf("%d ", host_array[i]);
}
printf("\n");
return 0;
} |
ca5f3fc17dcb103a78517d82396d35fb613552b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/dice_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ArgMax(const int n, const Dtype* data, Dtype* prediction) {
CUDA_KERNEL_LOOP(i, n) {
prediction[i] = data[i] > data[i + n] ? 0 : 1;
}
}
template <typename Dtype>
void DiceLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int labelCount = bottom[1]->count();
const int batchSize = bottom[1]->shape(0);
const int dimSize = labelCount / batchSize;
// printf("labelCount: %d, batchSize: %d, dimSize: %d, num(): %d\n", labelCount, batchSize, dimSize, bottom[1]->shape(0));
// call cuda method to compute prediction
for (int i = 0; i < batchSize; i++) {
// NOLINT_NEXT_LINE(whitespace/operators)
ArgMax<Dtype> << <CAFFE_GET_BLOCKS(dimSize), CAFFE_CUDA_NUM_THREADS >> >(
dimSize,
data + i * 2 * dimSize,
bottom[1]->mutable_gpu_diff() + i * dimSize
);
}
const Dtype* prediction = bottom[1]->gpu_diff();
caffe_gpu_set(batchSize, Dtype(0), predictionSum.mutable_gpu_data());
caffe_gpu_set(batchSize, Dtype(0), labelSum.mutable_gpu_data());
for (int i = 0; i < batchSize; i++) {
caffe_gpu_asum(dimSize, prediction + i * dimSize, predictionSum.mutable_cpu_data() + i);
caffe_gpu_asum(dimSize, label + i * dimSize, labelSum.mutable_cpu_data() + i);
}
caffe_gpu_mul(labelCount, prediction, label, bottom[1]->mutable_gpu_diff());
caffe_gpu_set(batchSize, Dtype(0), intersectionSum.mutable_gpu_data());
for (int i = 0; i < batchSize; i++) {
caffe_gpu_asum(dimSize, bottom[1]->gpu_diff() + i * dimSize, intersectionSum.mutable_cpu_data() + i);
}
// total dice - it's simple so we directly compute on cpu
top[0]->mutable_cpu_data()[0] = Dtype(0);
for (int i = 0; i < batchSize; i++) {
// printf("i: %f, p: %f, l: %f\n", intersectionSum.cpu_data()[i], predictionSum.cpu_data()[i], labelSum.cpu_data()[i]);
top[0]->mutable_cpu_data()[0] += 2.0 * intersectionSum.cpu_data()[i] / (predictionSum.cpu_data()[i] + labelSum.cpu_data()[i]);
}
}
template <typename Dtype>
__global__ void SegmentDiff(const int dimSize, const int batchIndex, const Dtype* data, const Dtype* label, const Dtype* u, const Dtype* u2, const Dtype* intersectionSum, Dtype* diff) {
CUDA_KERNEL_LOOP(i, dimSize) {
// printf("data[i]: %f, data[i+labelCount]: %f\n", data[i], data[i + labelCount]);
// diff[i] = 2.0 * ((label[i] * u) / (u * u) - 2.0 * (data[i + dimSize] * intersectionSum[batchIndex]) / (u * u));
// diff[i + dimSize] = -2.0 * ((label[i] * u) / (u * u) - 2.0 * (data[i + dimSize] * intersectionSum[batchIndex]) / (u * u));
diff[i] = 2.0 * ((label[i] * u[batchIndex] - 2.0 * data[i + dimSize] * intersectionSum[batchIndex]) / u2[batchIndex]);
diff[i + dimSize] = -1.0 * diff[i];
}
}
template <typename Dtype>
void DiceLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
const Dtype* data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int labelCount = bottom[1]->count();
const int batchSize = bottom[0]->shape(0);
const int dimSize = labelCount / batchSize;
// use label / prediction sum to save union and union ^ 2
caffe_gpu_add(batchSize, predictionSum.gpu_data(), labelSum.gpu_data(), predictionSum.mutable_gpu_data());
caffe_gpu_mul(batchSize, predictionSum.gpu_data(), predictionSum.gpu_data(), labelSum.mutable_gpu_data());
for (int i = 0; i < batchSize; i++) {
// NOLINT_NEXT_LINE(whitespace/operators)
SegmentDiff<Dtype> << <CAFFE_GET_BLOCKS(dimSize), CAFFE_CUDA_NUM_THREADS >> >(
dimSize, i,
data + i * 2 * dimSize, label + i * dimSize,
predictionSum.gpu_data(), labelSum.gpu_data(), intersectionSum.gpu_data(),
bottom[0]->mutable_gpu_diff() + i * 2 * dimSize
);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DiceLossLayer);
} // namespace caffe | ca5f3fc17dcb103a78517d82396d35fb613552b8.cu | #include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/dice_loss_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ArgMax(const int n, const Dtype* data, Dtype* prediction) {
CUDA_KERNEL_LOOP(i, n) {
prediction[i] = data[i] > data[i + n] ? 0 : 1;
}
}
template <typename Dtype>
void DiceLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int labelCount = bottom[1]->count();
const int batchSize = bottom[1]->shape(0);
const int dimSize = labelCount / batchSize;
// printf("labelCount: %d, batchSize: %d, dimSize: %d, num(): %d\n", labelCount, batchSize, dimSize, bottom[1]->shape(0));
// call cuda method to compute prediction
for (int i = 0; i < batchSize; i++) {
// NOLINT_NEXT_LINE(whitespace/operators)
ArgMax<Dtype> << <CAFFE_GET_BLOCKS(dimSize), CAFFE_CUDA_NUM_THREADS >> >(
dimSize,
data + i * 2 * dimSize,
bottom[1]->mutable_gpu_diff() + i * dimSize
);
}
const Dtype* prediction = bottom[1]->gpu_diff();
caffe_gpu_set(batchSize, Dtype(0), predictionSum.mutable_gpu_data());
caffe_gpu_set(batchSize, Dtype(0), labelSum.mutable_gpu_data());
for (int i = 0; i < batchSize; i++) {
caffe_gpu_asum(dimSize, prediction + i * dimSize, predictionSum.mutable_cpu_data() + i);
caffe_gpu_asum(dimSize, label + i * dimSize, labelSum.mutable_cpu_data() + i);
}
caffe_gpu_mul(labelCount, prediction, label, bottom[1]->mutable_gpu_diff());
caffe_gpu_set(batchSize, Dtype(0), intersectionSum.mutable_gpu_data());
for (int i = 0; i < batchSize; i++) {
caffe_gpu_asum(dimSize, bottom[1]->gpu_diff() + i * dimSize, intersectionSum.mutable_cpu_data() + i);
}
// total dice - it's simple so we directly compute on cpu
top[0]->mutable_cpu_data()[0] = Dtype(0);
for (int i = 0; i < batchSize; i++) {
// printf("i: %f, p: %f, l: %f\n", intersectionSum.cpu_data()[i], predictionSum.cpu_data()[i], labelSum.cpu_data()[i]);
top[0]->mutable_cpu_data()[0] += 2.0 * intersectionSum.cpu_data()[i] / (predictionSum.cpu_data()[i] + labelSum.cpu_data()[i]);
}
}
template <typename Dtype>
__global__ void SegmentDiff(const int dimSize, const int batchIndex, const Dtype* data, const Dtype* label, const Dtype* u, const Dtype* u2, const Dtype* intersectionSum, Dtype* diff) {
CUDA_KERNEL_LOOP(i, dimSize) {
// printf("data[i]: %f, data[i+labelCount]: %f\n", data[i], data[i + labelCount]);
// diff[i] = 2.0 * ((label[i] * u) / (u * u) - 2.0 * (data[i + dimSize] * intersectionSum[batchIndex]) / (u * u));
// diff[i + dimSize] = -2.0 * ((label[i] * u) / (u * u) - 2.0 * (data[i + dimSize] * intersectionSum[batchIndex]) / (u * u));
diff[i] = 2.0 * ((label[i] * u[batchIndex] - 2.0 * data[i + dimSize] * intersectionSum[batchIndex]) / u2[batchIndex]);
diff[i + dimSize] = -1.0 * diff[i];
}
}
template <typename Dtype>
void DiceLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
const Dtype* data = bottom[0]->gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int labelCount = bottom[1]->count();
const int batchSize = bottom[0]->shape(0);
const int dimSize = labelCount / batchSize;
// use label / prediction sum to save union and union ^ 2
caffe_gpu_add(batchSize, predictionSum.gpu_data(), labelSum.gpu_data(), predictionSum.mutable_gpu_data());
caffe_gpu_mul(batchSize, predictionSum.gpu_data(), predictionSum.gpu_data(), labelSum.mutable_gpu_data());
for (int i = 0; i < batchSize; i++) {
// NOLINT_NEXT_LINE(whitespace/operators)
SegmentDiff<Dtype> << <CAFFE_GET_BLOCKS(dimSize), CAFFE_CUDA_NUM_THREADS >> >(
dimSize, i,
data + i * 2 * dimSize, label + i * dimSize,
predictionSum.gpu_data(), labelSum.gpu_data(), intersectionSum.gpu_data(),
bottom[0]->mutable_gpu_diff() + i * 2 * dimSize
);
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(DiceLossLayer);
} // namespace caffe |
92e9d788eb344f6fd6f1328af77a87297db5928a.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "common.h"
#include "naive.h"
#include <memory>
#include <iostream>
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
/* Copy initial data over and pad 0's if out of scope of initial size
* aka the input array has a smaller initial size than the final array,
* and anything larger than index [size of input array] will be 0 in the output array
*/
__global__ void formatInitData(int initSize, int finalSize, int* data) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= initSize && index < finalSize) {
data[index] = 0;
}
}
__global__ void add(int n, int ignoreIndexCount, int* odata, const int* idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < ignoreIndexCount) {
odata[index] = idata[index];
} else if (index < n) {
int x1 = idata[index - ignoreIndexCount];
int x2 = idata[index];
odata[index] = x1 + x2;
}
}
// Careful with non-power of 2
void scan(int n, int *odata, const int *idata) {
if (n < 1) {
return;
}
// Calculate the number of elements the input can be treated as an array with a power of two elements
int kernelInvokeCount = ilog2ceil(n);
int n2 = pow(2, kernelInvokeCount);
// Declare data to be on the gpu
int* dev_odata;
int* dev_idata;
std::unique_ptr<int[]> tdata{ new int[n2] };
// Allocate data to be on the gpu
hipMalloc((void**)&dev_odata, n2 * sizeof(int));
checkCUDAError("hipMalloc dev_odata failed!");
hipMalloc((void**)&dev_idata, n2 * sizeof(int));
checkCUDAError("hipMalloc dev_tdata failed!");
// Transfer data from cpu to gpu
hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
checkCUDAError("hipMemcpy dev_idata failed!");
timer().startGpuTimer();
int blockSize = 128;
dim3 blockCount((n2 + blockSize - 1) / blockSize);
// Format input data (pad 0s to the closest power of two elements, inclusively)
StreamCompaction::Common::formatInitData << <blockCount, blockSize >> > (n, n2, dev_idata);
std::cout << "kernel invoke count: " << kernelInvokeCount << std::endl;
for (int i = 1; i <= kernelInvokeCount; i++) {
int ignoreIndexCount = pow(2, i - 1);
add << <blockCount, blockSize >> > (n2, ignoreIndexCount, dev_odata, dev_idata);
int* temp = dev_idata;
dev_idata = dev_odata;
dev_odata = temp;
}
// Shift things to the right to make the inclusive can into exclusive scan
StreamCompaction::Common::shiftRight<< <blockCount, blockSize >> > (n, dev_idata, dev_odata);
// Transfer data from gpu to cpu
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
checkCUDAError("hipMemcpy dev_odata failed!");
hipFree(dev_odata);
checkCUDAError("hipFree dev_odata failed!");
hipFree(dev_idata);
checkCUDAError("hipFree dev_idata failed!");
// Calculate the number of blocks and threads per block
timer().endGpuTimer();
}
}
}
| 92e9d788eb344f6fd6f1328af77a87297db5928a.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include "common.h"
#include "naive.h"
#include <memory>
#include <iostream>
namespace StreamCompaction {
namespace Naive {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
// TODO: __global__
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
/* Copy initial data over and pad 0's if out of scope of initial size
* aka the input array has a smaller initial size than the final array,
* and anything larger than index [size of input array] will be 0 in the output array
*/
__global__ void formatInitData(int initSize, int finalSize, int* data) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= initSize && index < finalSize) {
data[index] = 0;
}
}
__global__ void add(int n, int ignoreIndexCount, int* odata, const int* idata) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < ignoreIndexCount) {
odata[index] = idata[index];
} else if (index < n) {
int x1 = idata[index - ignoreIndexCount];
int x2 = idata[index];
odata[index] = x1 + x2;
}
}
// Careful with non-power of 2
void scan(int n, int *odata, const int *idata) {
if (n < 1) {
return;
}
// Calculate the number of elements the input can be treated as an array with a power of two elements
int kernelInvokeCount = ilog2ceil(n);
int n2 = pow(2, kernelInvokeCount);
// Declare data to be on the gpu
int* dev_odata;
int* dev_idata;
std::unique_ptr<int[]> tdata{ new int[n2] };
// Allocate data to be on the gpu
cudaMalloc((void**)&dev_odata, n2 * sizeof(int));
checkCUDAError("cudaMalloc dev_odata failed!");
cudaMalloc((void**)&dev_idata, n2 * sizeof(int));
checkCUDAError("cudaMalloc dev_tdata failed!");
// Transfer data from cpu to gpu
cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
checkCUDAError("cudaMemcpy dev_idata failed!");
timer().startGpuTimer();
int blockSize = 128;
dim3 blockCount((n2 + blockSize - 1) / blockSize);
// Format input data (pad 0s to the closest power of two elements, inclusively)
StreamCompaction::Common::formatInitData << <blockCount, blockSize >> > (n, n2, dev_idata);
std::cout << "kernel invoke count: " << kernelInvokeCount << std::endl;
for (int i = 1; i <= kernelInvokeCount; i++) {
int ignoreIndexCount = pow(2, i - 1);
add << <blockCount, blockSize >> > (n2, ignoreIndexCount, dev_odata, dev_idata);
int* temp = dev_idata;
dev_idata = dev_odata;
dev_odata = temp;
}
// Shift things to the right to make the inclusive can into exclusive scan
StreamCompaction::Common::shiftRight<< <blockCount, blockSize >> > (n, dev_idata, dev_odata);
// Transfer data from gpu to cpu
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
checkCUDAError("cudaMemcpy dev_odata failed!");
cudaFree(dev_odata);
checkCUDAError("cudaFree dev_odata failed!");
cudaFree(dev_idata);
checkCUDAError("cudaFree dev_idata failed!");
// Calculate the number of blocks and threads per block
timer().endGpuTimer();
}
}
}
|
e20957bae33488353bd3cfc0263f0dc88ea53709.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[TILED_WIDTH][TILED_WIDTH];
__shared__ float Nds[TILED_WIDTH][TILED_WIDTH];
//float Mds[TILED_WIDTH][TILED_WIDTH];
//float Nds[TILED_WIDTH][TILED_WIDTH];
float M_reg;
float N_reg;
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int row = by * TILED_WIDTH + ty;
int col = bx * TILED_WIDTH + tx;
float p_sum=0.0f;
Mds[ty][tx]=M.elements[row*M.width+tx];
Nds[ty][tx]=N.elements[ty*N.width+col];
__syncthreads();
for(int m=0; m<(M.width-1)/TILED_WIDTH+1; m++)
{
for(int n=0; n<TILED_WIDTH; n++)
p_sum += Mds[ty][n] * Nds[n][tx];
if(((m+1)*TILED_WIDTH+tx)<M.width&&row<M.height)
{
M_reg=M.elements[row*M.width+((m+1)*TILED_WIDTH+tx)];
}
else
M_reg=0.0;
if(((m+1)*TILED_WIDTH+ty)<N.height&&col<N.width)
{
N_reg=N.elements[((m+1)*TILED_WIDTH+ty)*N.width+col];
}
else
N_reg = 0.0;
__syncthreads();
Mds[ty][tx]=M_reg;
Nds[ty][tx]=N_reg;
__syncthreads();
}
if(row<P.height&&col<P.width)
P.elements[ row*P.width + col ] = p_sum;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| e20957bae33488353bd3cfc0263f0dc88ea53709.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
__shared__ float Mds[TILED_WIDTH][TILED_WIDTH];
__shared__ float Nds[TILED_WIDTH][TILED_WIDTH];
//float Mds[TILED_WIDTH][TILED_WIDTH];
//float Nds[TILED_WIDTH][TILED_WIDTH];
float M_reg;
float N_reg;
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int row = by * TILED_WIDTH + ty;
int col = bx * TILED_WIDTH + tx;
float p_sum=0.0f;
Mds[ty][tx]=M.elements[row*M.width+tx];
Nds[ty][tx]=N.elements[ty*N.width+col];
__syncthreads();
for(int m=0; m<(M.width-1)/TILED_WIDTH+1; m++)
{
for(int n=0; n<TILED_WIDTH; n++)
p_sum += Mds[ty][n] * Nds[n][tx];
if(((m+1)*TILED_WIDTH+tx)<M.width&&row<M.height)
{
M_reg=M.elements[row*M.width+((m+1)*TILED_WIDTH+tx)];
}
else
M_reg=0.0;
if(((m+1)*TILED_WIDTH+ty)<N.height&&col<N.width)
{
N_reg=N.elements[((m+1)*TILED_WIDTH+ty)*N.width+col];
}
else
N_reg = 0.0;
__syncthreads();
Mds[ty][tx]=M_reg;
Nds[ty][tx]=N_reg;
__syncthreads();
}
if(row<P.height&&col<P.width)
P.elements[ row*P.width + col ] = p_sum;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
5dbaec51528e41593397240cbae6b7a70976e001.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_data.h"
#include "helper_math.h"
__device__ float4 matMul(const Mat& m, const float4& v)
{
return make_float4(dot(m.rows[0], v), dot(m.rows[1], v), dot(m.rows[2], v), dot(m.rows[3], v));
}
__device__ bool isInsideFrustum(const Mat& m, float3 point)
{
float4 pos = matMul(m, make_float4(point, 1.0f));
if(pos.w <= 0.0f || pos.x < -1.4f*pos.w || pos.x > 1.2f*pos.w || pos.y < -1.2f*pos.w || pos.y > 1.2f*pos.w){
return false;
}else{
return true;
}
}
#define FACTOR_9BIT 512.0
#define FACTOR_19BIT 524288.0
#define MASK_10BIT 1023
#define MASK_20BIT 1048575
static const float SCALE_19BIT = (1.0 / FACTOR_19BIT);
static const float SCALE_9BIT = (1.0 / FACTOR_9BIT);
static const float STEPS_10BIT = 1024;
static const unsigned int SPECTRAL[5] = {
0x00ba832b,
0x00a4ddab,
0x00bfffff,
0x0061aefd,
0x001c19d7
};
extern "C" __global__
void kernel(const ChangingRenderData data,
unsigned long long int* framebuffer,
XYZBatch* batches,
float* xyz12,
uint2* xyz8,
uint4* xyz4,
unsigned int* rgba)
{
unsigned int batchIndex = blockIdx.x;
unsigned int numPointsPerBatch = data.uPointsPerThread * blockDim.x;
unsigned int wgFirstPoint = batchIndex * numPointsPerBatch;
XYZBatch batch = batches[batchIndex];
float3 wgMin = make_float3(batch.min_x, batch.min_y, batch.min_z);
float3 wgMax = make_float3(batch.max_x, batch.max_y, batch.max_z);
float3 boxSize = wgMax - wgMin;
float3 boxSizeScaled = boxSize / STEPS_10BIT;
float3 bigBoxSize = data.uBoxMax - data.uBoxMin;
float3 bigBoxSizeScaled = bigBoxSize * SCALE_19BIT;
// FRUSTUM CULLING
if((data.uEnableFrustumCulling != 0) && (!isInsideFrustum(data.uTransform, wgMin) && !isInsideFrustum(data.uTransform, wgMax))){
return;
}
float3 wgCenter = 0.5f * (wgMin + wgMax);
__shared__ int sLevel;
if(threadIdx.x == 0)
{
sLevel = 0;
// LOD CULLING
float wgRadius = length(wgMin - wgMax);
float4 viewCenter = matMul(data.uWorldView, make_float4(wgCenter, 1.0f));
float4 viewEdge = viewCenter + make_float4(wgRadius, 0.0f, 0.0f, 0.0f);
float4 projCenter = matMul(data.uProj, viewCenter);
float4 projEdge = matMul(data.uProj, viewEdge);
float2 projCenter2D = make_float2(projCenter.x, projCenter.y);
float2 projEdge2D = make_float2(projEdge.x, projEdge.y);
projCenter2D /= projCenter.w;
projEdge2D /= projEdge.w;
float w_depth = length(projCenter2D - projEdge2D);
float d_screen = length(projCenter2D);
float w_screen = expf(- (d_screen * d_screen));
float w = w_depth * w_screen;
if(w < 0.01f){
sLevel = 4;
}else if(w < 0.02f){
sLevel = 3;
}else if(w < 0.05f){
sLevel = 2;
}else if(w < 0.1f){
sLevel = 1;
}
}
__syncthreads();
int level = sLevel;
if (blockIdx.x == gridDim.x -1)
return;
int loopSize = data.uPointsPerThread;
uint4 encodedl = xyz4[wgFirstPoint / 4 + threadIdx.x];
for(int i = 0; i < loopSize/4; i++)
{
//unsigned int index = wgFirstPoint + i * blockDim.x + threadIdx.x;
unsigned int index = wgFirstPoint / 4 + i * blockDim.x + threadIdx.x;
//if(index >= data.uNumPoints/4){
// return;
//}
float3 point;
/*if (level == 0)
{
point.x = xyz12[index];
point.y = xyz12[index + data.uNumPoints];
point.z = xyz12[index + 2*data.uNumPoints];
}
else if(level == 1)
{
uint2 ab = xyz8[index];
unsigned int X = ab.x & MASK_20BIT;
unsigned int Y = ab.y & MASK_20BIT;
unsigned int Z_a = (ab.x >> 20) & MASK_10BIT;
unsigned int Z_b = (ab.y >> 20) & MASK_10BIT;
unsigned int Z = Z_a | (Z_b << 10);
point.x = X * bigBoxSizeScaled.x + data.uBoxMin.x;
point.y = Y * bigBoxSizeScaled.y + data.uBoxMin.y;
point.z = Z * bigBoxSizeScaled.z + data.uBoxMin.z;
}
else
{
unsigned int encoded = xyz4[index];
int X = (encoded >> 0) & MASK_10BIT;
int Y = (encoded >> 10) & MASK_10BIT;
int Z = (encoded >> 20) & MASK_10BIT;
point.x = X * boxSizeScaled.x + wgMin.x;
point.y = Y * boxSizeScaled.y + wgMin.y;
point.z = Z * boxSizeScaled.z + wgMin.z;
}*/
//uint4 encodedl = xyz4[index];
uint encodedw[4] = {encodedl.x, encodedl.y, encodedl.z, encodedl.w};
if (i < (loopSize/4 - 1))
encodedl = xyz4[wgFirstPoint / 4 + threadIdx.x + (i+1)*blockDim.x];
for (int j = 0; j < 4; j++)
{
uint encoded = encodedw[j];
int X = (encoded >> 0) & MASK_10BIT;
int Y = (encoded >> 10) & MASK_10BIT;
int Z = (encoded >> 20) & MASK_10BIT;
point.x = X * boxSizeScaled.x + wgMin.x;
point.y = Y * boxSizeScaled.y + wgMin.y;
point.z = Z * boxSizeScaled.z + wgMin.z;
float4 pos = matMul(data.uTransform, make_float4(point, 1.0f));
pos.x = pos.x / pos.w;
pos.y = pos.y / pos.w;
float2 imgPos = {(pos.x * 0.5f + 0.5f) * data.uImageSize.x, (pos.y * 0.5f + 0.5f) * data.uImageSize.y};
int2 pixelCoords = make_int2(imgPos.x, imgPos.y);
int pixelID = pixelCoords.x + pixelCoords.y * data.uImageSize.x;
unsigned int depth = *((int*)&pos.w);
unsigned long long int newPoint = (((unsigned long long int)depth) << 32) | (index * 4 + j);
if(!(pos.w <= 0.0 || pos.x < -1 || pos.x > 1 || pos.y < -1|| pos.y > 1)){
unsigned long long int oldPoint = framebuffer[pixelID];
if(newPoint < oldPoint){
atomicMin(&framebuffer[pixelID], newPoint);
}
}
}
}
}
| 5dbaec51528e41593397240cbae6b7a70976e001.cu | #include "kernel_data.h"
#include "helper_math.h"
__device__ float4 matMul(const Mat& m, const float4& v)
{
return make_float4(dot(m.rows[0], v), dot(m.rows[1], v), dot(m.rows[2], v), dot(m.rows[3], v));
}
__device__ bool isInsideFrustum(const Mat& m, float3 point)
{
float4 pos = matMul(m, make_float4(point, 1.0f));
if(pos.w <= 0.0f || pos.x < -1.4f*pos.w || pos.x > 1.2f*pos.w || pos.y < -1.2f*pos.w || pos.y > 1.2f*pos.w){
return false;
}else{
return true;
}
}
#define FACTOR_9BIT 512.0
#define FACTOR_19BIT 524288.0
#define MASK_10BIT 1023
#define MASK_20BIT 1048575
static const float SCALE_19BIT = (1.0 / FACTOR_19BIT);
static const float SCALE_9BIT = (1.0 / FACTOR_9BIT);
static const float STEPS_10BIT = 1024;
static const unsigned int SPECTRAL[5] = {
0x00ba832b,
0x00a4ddab,
0x00bfffff,
0x0061aefd,
0x001c19d7
};
extern "C" __global__
void kernel(const ChangingRenderData data,
unsigned long long int* framebuffer,
XYZBatch* batches,
float* xyz12,
uint2* xyz8,
uint4* xyz4,
unsigned int* rgba)
{
unsigned int batchIndex = blockIdx.x;
unsigned int numPointsPerBatch = data.uPointsPerThread * blockDim.x;
unsigned int wgFirstPoint = batchIndex * numPointsPerBatch;
XYZBatch batch = batches[batchIndex];
float3 wgMin = make_float3(batch.min_x, batch.min_y, batch.min_z);
float3 wgMax = make_float3(batch.max_x, batch.max_y, batch.max_z);
float3 boxSize = wgMax - wgMin;
float3 boxSizeScaled = boxSize / STEPS_10BIT;
float3 bigBoxSize = data.uBoxMax - data.uBoxMin;
float3 bigBoxSizeScaled = bigBoxSize * SCALE_19BIT;
// FRUSTUM CULLING
if((data.uEnableFrustumCulling != 0) && (!isInsideFrustum(data.uTransform, wgMin) && !isInsideFrustum(data.uTransform, wgMax))){
return;
}
float3 wgCenter = 0.5f * (wgMin + wgMax);
__shared__ int sLevel;
if(threadIdx.x == 0)
{
sLevel = 0;
// LOD CULLING
float wgRadius = length(wgMin - wgMax);
float4 viewCenter = matMul(data.uWorldView, make_float4(wgCenter, 1.0f));
float4 viewEdge = viewCenter + make_float4(wgRadius, 0.0f, 0.0f, 0.0f);
float4 projCenter = matMul(data.uProj, viewCenter);
float4 projEdge = matMul(data.uProj, viewEdge);
float2 projCenter2D = make_float2(projCenter.x, projCenter.y);
float2 projEdge2D = make_float2(projEdge.x, projEdge.y);
projCenter2D /= projCenter.w;
projEdge2D /= projEdge.w;
float w_depth = length(projCenter2D - projEdge2D);
float d_screen = length(projCenter2D);
float w_screen = expf(- (d_screen * d_screen));
float w = w_depth * w_screen;
if(w < 0.01f){
sLevel = 4;
}else if(w < 0.02f){
sLevel = 3;
}else if(w < 0.05f){
sLevel = 2;
}else if(w < 0.1f){
sLevel = 1;
}
}
__syncthreads();
int level = sLevel;
if (blockIdx.x == gridDim.x -1)
return;
int loopSize = data.uPointsPerThread;
uint4 encodedl = xyz4[wgFirstPoint / 4 + threadIdx.x];
for(int i = 0; i < loopSize/4; i++)
{
//unsigned int index = wgFirstPoint + i * blockDim.x + threadIdx.x;
unsigned int index = wgFirstPoint / 4 + i * blockDim.x + threadIdx.x;
//if(index >= data.uNumPoints/4){
// return;
//}
float3 point;
/*if (level == 0)
{
point.x = xyz12[index];
point.y = xyz12[index + data.uNumPoints];
point.z = xyz12[index + 2*data.uNumPoints];
}
else if(level == 1)
{
uint2 ab = xyz8[index];
unsigned int X = ab.x & MASK_20BIT;
unsigned int Y = ab.y & MASK_20BIT;
unsigned int Z_a = (ab.x >> 20) & MASK_10BIT;
unsigned int Z_b = (ab.y >> 20) & MASK_10BIT;
unsigned int Z = Z_a | (Z_b << 10);
point.x = X * bigBoxSizeScaled.x + data.uBoxMin.x;
point.y = Y * bigBoxSizeScaled.y + data.uBoxMin.y;
point.z = Z * bigBoxSizeScaled.z + data.uBoxMin.z;
}
else
{
unsigned int encoded = xyz4[index];
int X = (encoded >> 0) & MASK_10BIT;
int Y = (encoded >> 10) & MASK_10BIT;
int Z = (encoded >> 20) & MASK_10BIT;
point.x = X * boxSizeScaled.x + wgMin.x;
point.y = Y * boxSizeScaled.y + wgMin.y;
point.z = Z * boxSizeScaled.z + wgMin.z;
}*/
//uint4 encodedl = xyz4[index];
uint encodedw[4] = {encodedl.x, encodedl.y, encodedl.z, encodedl.w};
if (i < (loopSize/4 - 1))
encodedl = xyz4[wgFirstPoint / 4 + threadIdx.x + (i+1)*blockDim.x];
for (int j = 0; j < 4; j++)
{
uint encoded = encodedw[j];
int X = (encoded >> 0) & MASK_10BIT;
int Y = (encoded >> 10) & MASK_10BIT;
int Z = (encoded >> 20) & MASK_10BIT;
point.x = X * boxSizeScaled.x + wgMin.x;
point.y = Y * boxSizeScaled.y + wgMin.y;
point.z = Z * boxSizeScaled.z + wgMin.z;
float4 pos = matMul(data.uTransform, make_float4(point, 1.0f));
pos.x = pos.x / pos.w;
pos.y = pos.y / pos.w;
float2 imgPos = {(pos.x * 0.5f + 0.5f) * data.uImageSize.x, (pos.y * 0.5f + 0.5f) * data.uImageSize.y};
int2 pixelCoords = make_int2(imgPos.x, imgPos.y);
int pixelID = pixelCoords.x + pixelCoords.y * data.uImageSize.x;
unsigned int depth = *((int*)&pos.w);
unsigned long long int newPoint = (((unsigned long long int)depth) << 32) | (index * 4 + j);
if(!(pos.w <= 0.0 || pos.x < -1 || pos.x > 1 || pos.y < -1|| pos.y > 1)){
unsigned long long int oldPoint = framebuffer[pixelID];
if(newPoint < oldPoint){
atomicMin(&framebuffer[pixelID], newPoint);
}
}
}
}
}
|
26bb25dd0f32db14d6f80e0a651c6b704f296dd8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Performs surface reconstruction, i.e. updates the internal volume with data from the current frame
// This is CUDA code; compile with nvcc
// Author: Christian Diller, [email protected]
#include "include/common.h"
using Vec2ida = Eigen::Matrix<int, 2, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
__global__
void update_tsdf_kernel(const PtrStepSz<float> depth_image, const PtrStepSz<uchar3> color_image,
PtrStepSz<short2> tsdf_volume, PtrStepSz<uchar3> color_volume,
int3 volume_size, float voxel_scale,
CameraParameters cam_params, const float truncation_distance,
Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, Vec3fda translation)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume_size.x || y >= volume_size.y)
return;
for (int z = 0; z < volume_size.z; ++z) {
const Vec3fda position((static_cast<float>(x) + 0.5f) * voxel_scale,
(static_cast<float>(y) + 0.5f) * voxel_scale,
(static_cast<float>(z) + 0.5f) * voxel_scale);
const Vec3fda camera_pos = rotation * position + translation;
if (camera_pos.z() <= 0)
continue;
const Vec2ida uv(
__float2int_rn(camera_pos.x() / camera_pos.z() * cam_params.focal_x + cam_params.principal_x),
__float2int_rn(camera_pos.y() / camera_pos.z() * cam_params.focal_y + cam_params.principal_y));
if (uv.x() < 0 || uv.x() >= depth_image.cols || uv.y() < 0 || uv.y() >= depth_image.rows)
continue;
const float depth = depth_image.ptr(uv.y())[uv.x()];
if (depth <= 0)
continue;
const Vec3fda xylambda(
(uv.x() - cam_params.principal_x) / cam_params.focal_x,
(uv.y() - cam_params.principal_y) / cam_params.focal_y,
1.f);
const float lambda = xylambda.norm();
const float sdf = (-1.f) * ((1.f / lambda) * camera_pos.norm() - depth);
if (sdf >= -truncation_distance) {
const float new_tsdf = fmin(1.f, sdf / truncation_distance);
short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x];
const float current_tsdf = static_cast<float>(voxel_tuple.x) * DIVSHORTMAX;
const int current_weight = voxel_tuple.y;
const int add_weight = 1;
const float updated_tsdf = (current_weight * current_tsdf + add_weight * new_tsdf) /
(current_weight + add_weight);
const int new_weight = min(current_weight + add_weight, MAX_WEIGHT);
const int new_value = max(-SHORTMAX, min(SHORTMAX, static_cast<int>(updated_tsdf * SHORTMAX)));
tsdf_volume.ptr(z * volume_size.y + y)[x] = make_short2(static_cast<short>(new_value),
static_cast<short>(new_weight));
if (sdf <= truncation_distance / 2 && sdf >= -truncation_distance / 2) {
uchar3& model_color = color_volume.ptr(z * volume_size.y + y)[x];
const uchar3 image_color = color_image.ptr(uv.y())[uv.x()];
model_color.x = static_cast<uchar>(
(current_weight * model_color.x + add_weight * image_color.x) /
(current_weight + add_weight));
model_color.y = static_cast<uchar>(
(current_weight * model_color.y + add_weight * image_color.y) /
(current_weight + add_weight));
model_color.z = static_cast<uchar>(
(current_weight * model_color.z + add_weight * image_color.z) /
(current_weight + add_weight));
}
}
}
}
void surface_reconstruction(const cv::cuda::GpuMat& depth_image, const cv::cuda::GpuMat& color_image,
VolumeData& volume,
const CameraParameters& cam_params, const float truncation_distance,
const Eigen::Matrix4f& model_view)
{
const dim3 threads(32, 32);
const dim3 blocks((volume.volume_size.x + threads.x - 1) / threads.x,
(volume.volume_size.y + threads.y - 1) / threads.y);
hipLaunchKernelGGL(( update_tsdf_kernel), dim3(blocks), dim3(threads), 0, 0, depth_image, color_image,
volume.tsdf_volume, volume.color_volume,
volume.volume_size, volume.voxel_scale,
cam_params, truncation_distance,
model_view.block(0, 0, 3, 3), model_view.block(0, 3, 3, 1));
hipDeviceSynchronize();
}
}
}
}
| 26bb25dd0f32db14d6f80e0a651c6b704f296dd8.cu | // Performs surface reconstruction, i.e. updates the internal volume with data from the current frame
// This is CUDA code; compile with nvcc
// Author: Christian Diller, [email protected]
#include "include/common.h"
using Vec2ida = Eigen::Matrix<int, 2, 1, Eigen::DontAlign>;
namespace kinectfusion {
namespace internal {
namespace cuda {
__global__
void update_tsdf_kernel(const PtrStepSz<float> depth_image, const PtrStepSz<uchar3> color_image,
PtrStepSz<short2> tsdf_volume, PtrStepSz<uchar3> color_volume,
int3 volume_size, float voxel_scale,
CameraParameters cam_params, const float truncation_distance,
Eigen::Matrix<float, 3, 3, Eigen::DontAlign> rotation, Vec3fda translation)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= volume_size.x || y >= volume_size.y)
return;
for (int z = 0; z < volume_size.z; ++z) {
const Vec3fda position((static_cast<float>(x) + 0.5f) * voxel_scale,
(static_cast<float>(y) + 0.5f) * voxel_scale,
(static_cast<float>(z) + 0.5f) * voxel_scale);
const Vec3fda camera_pos = rotation * position + translation;
if (camera_pos.z() <= 0)
continue;
const Vec2ida uv(
__float2int_rn(camera_pos.x() / camera_pos.z() * cam_params.focal_x + cam_params.principal_x),
__float2int_rn(camera_pos.y() / camera_pos.z() * cam_params.focal_y + cam_params.principal_y));
if (uv.x() < 0 || uv.x() >= depth_image.cols || uv.y() < 0 || uv.y() >= depth_image.rows)
continue;
const float depth = depth_image.ptr(uv.y())[uv.x()];
if (depth <= 0)
continue;
const Vec3fda xylambda(
(uv.x() - cam_params.principal_x) / cam_params.focal_x,
(uv.y() - cam_params.principal_y) / cam_params.focal_y,
1.f);
const float lambda = xylambda.norm();
const float sdf = (-1.f) * ((1.f / lambda) * camera_pos.norm() - depth);
if (sdf >= -truncation_distance) {
const float new_tsdf = fmin(1.f, sdf / truncation_distance);
short2 voxel_tuple = tsdf_volume.ptr(z * volume_size.y + y)[x];
const float current_tsdf = static_cast<float>(voxel_tuple.x) * DIVSHORTMAX;
const int current_weight = voxel_tuple.y;
const int add_weight = 1;
const float updated_tsdf = (current_weight * current_tsdf + add_weight * new_tsdf) /
(current_weight + add_weight);
const int new_weight = min(current_weight + add_weight, MAX_WEIGHT);
const int new_value = max(-SHORTMAX, min(SHORTMAX, static_cast<int>(updated_tsdf * SHORTMAX)));
tsdf_volume.ptr(z * volume_size.y + y)[x] = make_short2(static_cast<short>(new_value),
static_cast<short>(new_weight));
if (sdf <= truncation_distance / 2 && sdf >= -truncation_distance / 2) {
uchar3& model_color = color_volume.ptr(z * volume_size.y + y)[x];
const uchar3 image_color = color_image.ptr(uv.y())[uv.x()];
model_color.x = static_cast<uchar>(
(current_weight * model_color.x + add_weight * image_color.x) /
(current_weight + add_weight));
model_color.y = static_cast<uchar>(
(current_weight * model_color.y + add_weight * image_color.y) /
(current_weight + add_weight));
model_color.z = static_cast<uchar>(
(current_weight * model_color.z + add_weight * image_color.z) /
(current_weight + add_weight));
}
}
}
}
void surface_reconstruction(const cv::cuda::GpuMat& depth_image, const cv::cuda::GpuMat& color_image,
VolumeData& volume,
const CameraParameters& cam_params, const float truncation_distance,
const Eigen::Matrix4f& model_view)
{
const dim3 threads(32, 32);
const dim3 blocks((volume.volume_size.x + threads.x - 1) / threads.x,
(volume.volume_size.y + threads.y - 1) / threads.y);
update_tsdf_kernel<<<blocks, threads>>>(depth_image, color_image,
volume.tsdf_volume, volume.color_volume,
volume.volume_size, volume.voxel_scale,
cam_params, truncation_distance,
model_view.block(0, 0, 3, 3), model_view.block(0, 3, 3, 1));
cudaThreadSynchronize();
}
}
}
}
|
51694f9a8a9d06788d5d3b5150ce0e0e6387d3b9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_multidim_copy_kernel;
int xdim0_multidim_copy_kernel_h = -1;
__constant__ int ydim0_multidim_copy_kernel;
int ydim0_multidim_copy_kernel_h = -1;
__constant__ int zdim0_multidim_copy_kernel;
int zdim0_multidim_copy_kernel_h = -1;
__constant__ int xdim1_multidim_copy_kernel;
int xdim1_multidim_copy_kernel_h = -1;
__constant__ int ydim1_multidim_copy_kernel;
int ydim1_multidim_copy_kernel_h = -1;
__constant__ int zdim1_multidim_copy_kernel;
int zdim1_multidim_copy_kernel_h = -1;
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
#define OPS_ACC_MD0(d, x, y, z) \
((x) + (xdim0_multidim_copy_kernel * (y)) + \
(xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel * (z)) + \
(d)*xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel * \
zdim0_multidim_copy_kernel)
#define OPS_ACC_MD1(d, x, y, z) \
((x) + (xdim1_multidim_copy_kernel * (y)) + \
(xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel * (z)) + \
(d)*xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel * \
zdim1_multidim_copy_kernel)
// user function
__device__
void
multidim_copy_kernel_gpu(const double *src, double *dest) {
dest[OPS_ACC_MD1(0, 0, 0, 0)] = src[OPS_ACC_MD0(0, 0, 0, 0)];
dest[OPS_ACC_MD1(1, 0, 0, 0)] = src[OPS_ACC_MD0(1, 0, 0, 0)];
dest[OPS_ACC_MD1(2, 0, 0, 0)] = src[OPS_ACC_MD0(2, 0, 0, 0)];
}
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
__global__ void ops_multidim_copy_kernel(const double *__restrict arg0,
double *__restrict arg1, int size0,
int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_multidim_copy_kernel +
idx_z * 1 * xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_multidim_copy_kernel +
idx_z * 1 * xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
multidim_copy_kernel_gpu(arg0, arg1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
#else
void ops_par_loop_multidim_copy_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 2, range, 1))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1, "multidim_copy_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int zdim0 = args[0].dat->size[2];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int zdim1 = args[1].dat->size[2];
if (xdim0 != xdim0_multidim_copy_kernel_h ||
ydim0 != ydim0_multidim_copy_kernel_h ||
zdim0 != zdim0_multidim_copy_kernel_h ||
xdim1 != xdim1_multidim_copy_kernel_h ||
ydim1 != ydim1_multidim_copy_kernel_h ||
zdim1 != zdim1_multidim_copy_kernel_h) {
hipMemcpyToSymbol(xdim0_multidim_copy_kernel, &xdim0, sizeof(int));
xdim0_multidim_copy_kernel_h = xdim0;
hipMemcpyToSymbol(ydim0_multidim_copy_kernel, &ydim0, sizeof(int));
ydim0_multidim_copy_kernel_h = ydim0;
hipMemcpyToSymbol(zdim0_multidim_copy_kernel, &zdim0, sizeof(int));
zdim0_multidim_copy_kernel_h = zdim0;
hipMemcpyToSymbol(xdim1_multidim_copy_kernel, &xdim1, sizeof(int));
xdim1_multidim_copy_kernel_h = xdim1;
hipMemcpyToSymbol(ydim1_multidim_copy_kernel, &ydim1, sizeof(int));
ydim1_multidim_copy_kernel_h = ydim1;
hipMemcpyToSymbol(zdim1_multidim_copy_kernel, &zdim1, sizeof(int));
zdim1_multidim_copy_kernel_h = zdim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_multidim_copy_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[1].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_multidim_copy_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1, "multidim_copy_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
| 51694f9a8a9d06788d5d3b5150ce0e0e6387d3b9.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_multidim_copy_kernel;
int xdim0_multidim_copy_kernel_h = -1;
__constant__ int ydim0_multidim_copy_kernel;
int ydim0_multidim_copy_kernel_h = -1;
__constant__ int zdim0_multidim_copy_kernel;
int zdim0_multidim_copy_kernel_h = -1;
__constant__ int xdim1_multidim_copy_kernel;
int xdim1_multidim_copy_kernel_h = -1;
__constant__ int ydim1_multidim_copy_kernel;
int ydim1_multidim_copy_kernel_h = -1;
__constant__ int zdim1_multidim_copy_kernel;
int zdim1_multidim_copy_kernel_h = -1;
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
#define OPS_ACC_MD0(d, x, y, z) \
((x) + (xdim0_multidim_copy_kernel * (y)) + \
(xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel * (z)) + \
(d)*xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel * \
zdim0_multidim_copy_kernel)
#define OPS_ACC_MD1(d, x, y, z) \
((x) + (xdim1_multidim_copy_kernel * (y)) + \
(xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel * (z)) + \
(d)*xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel * \
zdim1_multidim_copy_kernel)
// user function
__device__
void
multidim_copy_kernel_gpu(const double *src, double *dest) {
dest[OPS_ACC_MD1(0, 0, 0, 0)] = src[OPS_ACC_MD0(0, 0, 0, 0)];
dest[OPS_ACC_MD1(1, 0, 0, 0)] = src[OPS_ACC_MD0(1, 0, 0, 0)];
dest[OPS_ACC_MD1(2, 0, 0, 0)] = src[OPS_ACC_MD0(2, 0, 0, 0)];
}
#undef OPS_ACC_MD0
#undef OPS_ACC_MD1
__global__ void ops_multidim_copy_kernel(const double *__restrict arg0,
double *__restrict arg1, int size0,
int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_multidim_copy_kernel +
idx_z * 1 * xdim0_multidim_copy_kernel * ydim0_multidim_copy_kernel;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_multidim_copy_kernel +
idx_z * 1 * xdim1_multidim_copy_kernel * ydim1_multidim_copy_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
multidim_copy_kernel_gpu(arg0, arg1);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
#else
void ops_par_loop_multidim_copy_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 2, range, 1))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(1, "multidim_copy_kernel");
OPS_kernels[1].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int zdim0 = args[0].dat->size[2];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int zdim1 = args[1].dat->size[2];
if (xdim0 != xdim0_multidim_copy_kernel_h ||
ydim0 != ydim0_multidim_copy_kernel_h ||
zdim0 != zdim0_multidim_copy_kernel_h ||
xdim1 != xdim1_multidim_copy_kernel_h ||
ydim1 != ydim1_multidim_copy_kernel_h ||
zdim1 != zdim1_multidim_copy_kernel_h) {
cudaMemcpyToSymbol(xdim0_multidim_copy_kernel, &xdim0, sizeof(int));
xdim0_multidim_copy_kernel_h = xdim0;
cudaMemcpyToSymbol(ydim0_multidim_copy_kernel, &ydim0, sizeof(int));
ydim0_multidim_copy_kernel_h = ydim0;
cudaMemcpyToSymbol(zdim0_multidim_copy_kernel, &zdim0, sizeof(int));
zdim0_multidim_copy_kernel_h = zdim0;
cudaMemcpyToSymbol(xdim1_multidim_copy_kernel, &xdim1, sizeof(int));
xdim1_multidim_copy_kernel_h = xdim1;
cudaMemcpyToSymbol(ydim1_multidim_copy_kernel, &ydim1, sizeof(int));
ydim1_multidim_copy_kernel_h = ydim1;
cudaMemcpyToSymbol(zdim1_multidim_copy_kernel, &zdim1, sizeof(int));
zdim1_multidim_copy_kernel_h = zdim1;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[2];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_multidim_copy_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[1].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[1].mpi_time += t2 - t1;
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[1].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_multidim_copy_kernel(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 1;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 1;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg *)malloc(2 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->function = ops_par_loop_multidim_copy_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(1, "multidim_copy_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
10346958644a133fc0b91bedacf458addd9dc18b.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int a = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
// These come from CLI arguments:
int BOX_SIZE; // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
__global__ void rotate_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){
int row = blockIdx.x * blockDim.x + threadIdx.x; //row of image
int col = blockIdx.y * blockDim.y + threadIdx.y; //column of image
int idx = row*N + col; //which pixel in full 1D array
uchar output = GPU_i[idx];
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = output;
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
hipError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 4){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations>\n", argv[0]);
exit(EXIT_FAILURE);
}
BOX_SIZE = 1;
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
//start here????
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255));
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != hipSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
hipEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
dim3 threadsPerBlock;
dim3 numBlocks;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0); // use the first GPU
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
hipEventCreate(&time1);
hipEventCreate(&time2);
hipEventCreate(&time3);
hipEventCreate(&time4);
hipEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = hipMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(GPU_odata, zero.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(GPU_idata, image.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
// Check for errors immediately after kernel launch.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1);
hipEventSynchronize(time2);
hipEventSynchronize(time3);
hipEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecutionTime, time2, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
hipFree(GPU_odata);
hipFree(GPU_idata);
hipFree(GPU_zerodata);
hipEventDestroy(time1);
hipEventDestroy(time2);
hipEventDestroy(time3);
hipEventDestroy(time4);
a++;
return cudaStatus;
}
| 10346958644a133fc0b91bedacf458addd9dc18b.cu | #include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int a = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
// These come from CLI arguments:
int BOX_SIZE; // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
__global__ void rotate_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){
int row = blockIdx.x * blockDim.x + threadIdx.x; //row of image
int col = blockIdx.y * blockDim.y + threadIdx.y; //column of image
int idx = row*N + col; //which pixel in full 1D array
uchar output = GPU_i[idx];
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = output;
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
cudaError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 4){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations>\n", argv[0]);
exit(EXIT_FAILURE);
}
BOX_SIZE = 1;
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
//start here????
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255));
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != cudaSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
cudaEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
dim3 threadsPerBlock;
dim3 numBlocks;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0); // use the first GPU
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = cudaMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPU_odata, zero.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(GPU_idata, image.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
// Check for errors immediately after kernel launch.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
cudaFree(GPU_odata);
cudaFree(GPU_idata);
cudaFree(GPU_zerodata);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
a++;
return cudaStatus;
}
|
3d41c78aa4967a983dfc7031b6275cb1f9a6dc2c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Crop1d <T = ?, Device = CUDA> */
template<typename T>
__global__ void _Crop1d(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
y[idx] = x[(o * dim + ex_d + start) * inner_dim + i];
}
}
/*! Crop1d <T = float32, Device = CUDA> */
template<> void Crop1d<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const float* x,
float* y,
CUDAContext* ctx) {
_Crop1d<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, x, y);
}
/*! Crop1d <T = int32, Device = CUDA> */
template<> void Crop1d<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int* x,
int* y,
CUDAContext* ctx) {
_Crop1d<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, x, y);
}
/*! Crop1dGrad <T = ?, Device = CUDA> */
template<typename T>
__global__ void _Crop1dGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int d = (idx / inner_dim) % dim;
const int o = idx / inner_dim / dim;
dx[idx] = (d < start || d >= end) ? 0 :
dy[(o * ex_dim + d - start) * inner_dim + i];
}
}
/*! Crop1dGrad <T = float32, Device = CUDA> */
template<> void Crop1dGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const float* dy,
float* dx,
CUDAContext* ctx) {
_Crop1dGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, end, dy, dx);
}
/*! Crop1dGrad <T = int32, Device = CUDA> */
template<> void Crop1dGrad<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const int* dy,
int* dx,
CUDAContext* ctx) {
_Crop1dGrad<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, end, dy, dx);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | 3d41c78aa4967a983dfc7031b6275cb1f9a6dc2c.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! Crop1d <T = ?, Device = CUDA> */
template<typename T>
__global__ void _Crop1d(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const T* x,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int ex_d = (idx / inner_dim) % ex_dim;
const int o = idx / inner_dim / ex_dim;
y[idx] = x[(o * dim + ex_d + start) * inner_dim + i];
}
}
/*! Crop1d <T = float32, Device = CUDA> */
template<> void Crop1d<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const float* x,
float* y,
CUDAContext* ctx) {
_Crop1d<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, x, y);
}
/*! Crop1d <T = int32, Device = CUDA> */
template<> void Crop1d<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int* x,
int* y,
CUDAContext* ctx) {
_Crop1d<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, x, y);
}
/*! Crop1dGrad <T = ?, Device = CUDA> */
template<typename T>
__global__ void _Crop1dGrad(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const T* dy,
T* dx) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const int i = idx % inner_dim;
const int d = (idx / inner_dim) % dim;
const int o = idx / inner_dim / dim;
dx[idx] = (d < start || d >= end) ? 0 :
dy[(o * ex_dim + d - start) * inner_dim + i];
}
}
/*! Crop1dGrad <T = float32, Device = CUDA> */
template<> void Crop1dGrad<float, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const float* dy,
float* dx,
CUDAContext* ctx) {
_Crop1dGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, end, dy, dx);
}
/*! Crop1dGrad <T = int32, Device = CUDA> */
template<> void Crop1dGrad<int, CUDAContext>(
const int count,
const int dim,
const int ex_dim,
const int inner_dim,
const int start,
const int end,
const int* dy,
int* dx,
CUDAContext* ctx) {
_Crop1dGrad<int>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, dim, ex_dim, inner_dim, start, end, dy, dx);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
503eb736c53ba2bd5f470c64d71e113aa9b04d12.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA programming
// Exercise n. 05
#include <errno.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#define BLOCKS 2
#define THREADS 4
// Prototype
__global__ void add(int *a, int *b, int *c);
__host__ void ints(int *m, int N);
__host__ void print_array(int *a, int N);
int main(void)
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int N = BLOCKS * THREADS;
int size = N * sizeof(int);
// Allocate space for host copies of a, b, c
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup input values
ints(a, N);
ints(b, N);
// Allocate space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, size);
// Copy inputs to device
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Call the kernel on GPU
hipLaunchKernelGGL(( add), dim3(BLOCKS), dim3(THREADS) , 0, 0, d_a, d_b, d_c);
// Copy result back to host
hipMemcpy(c, d_c, size, hipMemcpyDeviceToHost);
// Check the result
print_array(a, N);
print_array(b, N);
print_array(c, N);
// Cleanup
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
return(EXIT_SUCCESS);
}
// Vector addition (on device)
__global__ void add(int *a, int *b, int *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Initialisation
__host__ void ints(int *m, int N)
{
int i;
for(i = 0; i < N; i++)
m[i] = i;
}
// Print the elements of the array
__host__ void print_array(int *a, int N)
{
for(int i = 0; i < N; i++)
{
printf("%d\t", a[i]);
}
printf("\n");
}
| 503eb736c53ba2bd5f470c64d71e113aa9b04d12.cu | // CUDA programming
// Exercise n. 05
#include <errno.h>
#include <cuda.h>
#include <stdio.h>
#define BLOCKS 2
#define THREADS 4
// Prototype
__global__ void add(int *a, int *b, int *c);
__host__ void ints(int *m, int N);
__host__ void print_array(int *a, int N);
int main(void)
{
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int N = BLOCKS * THREADS;
int size = N * sizeof(int);
// Allocate space for host copies of a, b, c
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setup input values
ints(a, N);
ints(b, N);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Call the kernel on GPU
add<<< BLOCKS, THREADS >>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
// Check the result
print_array(a, N);
print_array(b, N);
print_array(c, N);
// Cleanup
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return(EXIT_SUCCESS);
}
// Vector addition (on device)
__global__ void add(int *a, int *b, int *c)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Initialisation
__host__ void ints(int *m, int N)
{
int i;
for(i = 0; i < N; i++)
m[i] = i;
}
// Print the elements of the array
__host__ void print_array(int *a, int N)
{
for(int i = 0; i < N; i++)
{
printf("%d\t", a[i]);
}
printf("\n");
}
|
33d3858d8d77ed15ff7e5497afe4f2517bda5e20.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 4, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| 33d3858d8d77ed15ff7e5497afe4f2517bda5e20.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<16, 64, 8>;
using WarpShape = cutlass::gemm::GemmShape<16, 64, 8>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::conv::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle,
2, 4, 4, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
5f98c21bd7aea0c5562a6e6c1dcc0acfdc802fa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*********************************************************************
* Copyrights (c) Marwan Abdellah. All rights reserved.
* This code is part of my Master's Thesis Project entitled "High
* Performance Fourier Volume Rendering on Graphics Processing Units
* (GPUs)" and submitted to the Systems & Biomedical Engineering
* Department, Faculty of Engineering, Cairo University.
* Please, don't use or distribute without authors' permission.
* File : cuda3DKernel.cu
* Author(s) : Marwan Abdellah <[email protected]>
* Created : April 2011
* Description : This module handles volume processing on the device.
* Note(s) :
*********************************************************************/
/* CUDA utilities & system includes */
/* #include <shrUtils.h> */
#include <cutil_inline.h>
/** Kernel globals **/
/* CUDA FFT plan */
hipfftHandle fftPlan3D;
/* Host array to validate GPU implementation results */
hipfftComplex* CPU_ARRAY;
/********************************************************************
* Name:
* __global__ fftShift3D_cufftComplex_i
*
* Description:
* Loading the raw volume and returning a pointer to the containing
* array.
*
* Formal Parameters:
* devArrayInput (hipfftComplex*) : Input array,
* devArrayOutput (hipfftComplex*) : Output array,
* arrSize1D (int) : Size of the volume in 1D assumming unified
dimensionality,
* zIndex (int) : To complete indexing an element in the volume.
*
* Returns:
* void.
*
* Note(s):
* This kernel executes in coordination with the function fftShift3D_i.
* This kernel executes on the device and callable from the host.
* "zIndex" does all the magic, try it yourself to believe it.
********************************************************************/
__global__
void fftShift3D_cufftComplex_i( hipfftComplex* devArrayInput,
hipfftComplex* devArrayOutput,
int arrSize1D,
int zIndex )
{
/* 3D volume & 2D slice & 1D line */
int sLine = arrSize1D;
int sSlice = arrSize1D * arrSize1D;
int sVolume = arrSize1D * arrSize1D * arrSize1D;
/* Transformation equations */
int sEq1 = ( sVolume + sSlice + sLine ) / 2;
int sEq2 = ( sVolume + sSlice - sLine ) / 2;
int sEq3 = ( sVolume - sSlice + sLine ) / 2;
int sEq4 = ( sVolume - sSlice - sLine ) / 2;
/* Thread index */
int xThreadIdx = threadIdx.x;
int yThreadIdx = threadIdx.y;
/* Block width & height */
int blockWidth = blockDim.x;
int blockHeight = blockDim.y;
/* Thread index 2D */
int xIndex = blockIdx.x * blockWidth + xThreadIdx;
int yIndex = blockIdx.y * blockHeight + yThreadIdx;
// Thread index converted into 1D index
int index = ( zIndex * sSlice ) + ( yIndex * sLine ) + xIndex;
if ( zIndex < arrSize1D / 2 )
{
if ( xIndex < arrSize1D / 2 )
{
if ( yIndex < arrSize1D / 2 )
{
/* First Quad */
devArrayOutput[index] = devArrayInput[index + sEq1];
}
else
{
/* Third Quad */
devArrayOutput[index] = devArrayInput[index + sEq3];
}
}
else
{
if ( yIndex < arrSize1D / 2 )
{
/* Second Quad */
devArrayOutput[index] = devArrayInput[index + sEq2];
}
else
{
/* Fourth Quad */
devArrayOutput[index] = devArrayInput[index + sEq4];
}
}
}
else
{
if ( xIndex < arrSize1D / 2 )
{
if ( yIndex < arrSize1D / 2 )
{
/* First Quad */
devArrayOutput[index] = devArrayInput[index - sEq4];
}
else
{
/* Third Quad */
devArrayOutput[index] = devArrayInput[index - sEq2];
}
}
else
{
if ( yIndex < arrSize1D / 2 )
{
/* Second Quad */
devArrayOutput[index] = devArrayInput[index - sEq3];
}
else
{
/* Fourth Quad */
devArrayOutput[index] = devArrayInput[index - sEq1];
}
}
}
}
/********************************************************************
* Name:
* fftShift3D_i
*
* Description:
* Loading the raw volume and returning a pointer to the containing
* array.
*
* Formal Parameters:
* _arrayDeviceInput (hipfftComplex*) : Input array,
* _arrayDeviceOutput (hipfftComplex*) : Output array,
* _arraySize1D (int) : Size of the volume in 1D assumming unified
dimensionality,
* _block (dim3) : CUDA block configuration,
* _grid (dim3) : CUDA grid configuration.
* Returns:
* void.
*
* Note(s):
* This function is callable from the CPU and executes on the device.
* This function is to be treated exactly as CUDA __global__ one.
* This function executes in coordination with the kernel
fftShift3D_cufftComplex_i.
********************************************************************/
void fftShift3D_i( hipfftComplex* _arrayDeviceInput,
hipfftComplex* _arrayDeviceOutput,
int _arraySize1D,
dim3 _block,
dim3 _grid )
{
for ( int i = 0; i < _arraySize1D; i++ )
hipLaunchKernelGGL(( fftShift3D_cufftComplex_i) , dim3(_grid), dim3(_block) , 0, 0,
_arrayDeviceInput, _arrayDeviceOutput, _arraySize1D, i );
}
/********************************************************************
* Name:
* __global__ Pack1DComplexArray
*
* Description:
* Packing complex array of 2 float compoenets into 1D float sorted in
* consecutive fashion, i.e. real value at odd indicies and imaginary
* values at even indicies.
*
* Formal Parameters:
* complexArray (hipfftComplex*) : Input complex array,
* array1D (float*) : Resulting float array,
* arrSize1D (int) : Size of the volume in 1D assumming unified
* dimensionality.
*
* Returns:
* void.
*
* Note(s):
********************************************************************/
__global__
void Pack1DComplexArray( hipfftComplex* complexArray,
float* array1D,
int arrSize1D )
{
// Index (1D)
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (i ndex < ( arrSize1D * arrSize1D * arrSize1D ) )
{
/* Real value */
array1D[(2 * index) ] = complexArray[index].x;
/* Imaginary value */
array1D[(2 * index) + 1] = complexArray[index].y;
}
}
/********************************************************************
* Name:
* LaunchVolumeProcessingCUDA
*
* Description:
* Volume processing on the GPU that includes shifting the spatial and
* the spectral volumes, doing the 3D FFT with cuFFT, repacking the
* resulting shifted spectral volume into 1D array sto suit a 2-component
* (RG) OpenGL 3D texture.
*
* Formal Parameters:
* MainComplexArray (hipfftComplex*) : Main complex array residing on
the GPU,
* TempComplexArray (hipfftComplex*) : Temporary complex array used on
the fly,
* fDataSize (int) : Size of the volume in 1D assumming unified
* dimensionality,
* ptrTo1DArray (float*) : OpenGL-compatiable array containg the shifted
spectral volume.
*
* Returns:
* void.
*
* Note(s):
* Some testing code is commented to reduce their overhead. Uncomment
* them if needed.
********************************************************************/
extern "C"
void LaunchVolumeProcessingCUDA( hipfftComplex* MainComplexArray,
hipfftComplex* TempComplexArray,
int fDataSize,
float* ptrTo1DArray )
{
/* In this point, I have a pointer to the complex array residing
* in the GPU memory. This complex array will carry the spatial
* and the spectral volumes alternatively */
/* CUDA volume processing configuration */
dim3 sBlock(16, 16, 1);
dim3 sGrid((fDataSize / 16), (fDataSize / 16), 1);
/* Wrapping-around the spatial volume */
fftShift3D_i(TempComplexArray, MainComplexArray, fDataSize, sBlock, sGrid);
/* Allocating a CPU array to receive resulting volumes on the GPU
* for validation */
CPU_ARRAY = (hipfftComplex*) malloc
(fDataSize * fDataSize * fDataSize * sizeof(hipfftComplex));
/* Copy the final volume from the device to the host and test it */
cutilSafeCall(
hipMemcpy( CPU_ARRAY, MainComplexArray,
fDataSize * fDataSize * fDataSize * sizeof( hipfftComplex ),
hipMemcpyDeviceToHost ) );
/* Testing the last 100 elements of the spatial volume on the GPU */
/*
printf("Testing the spatial volume before doing the cuFFT \n");
for (int i = ( fDataSize * fDataSize * fDataSize) - 100; i < fDataSize * fDataSize * fDataSize; i++ )
printf("%f \n", CPU_ARRAY[i].x);
*/
/* Setup for 3D cuFFT plan */
hipfftPlan3d(&fftPlan3D, fDataSize, fDataSize, fDataSize, HIPFFT_C2C);
hipfftExecC2C(fftPlan3D, MainComplexArray, TempComplexArray, HIPFFT_FORWARD);
/* Copy the resulting spectrum from the cuFFT operation to the host
* and test it */
/* cutilSafeCall(hipMemcpy(CPU_ARRAY,
TempComplexArray,
(fDataSize * fDataSize * fDataSize * sizeof(hipfftComplex)),
hipMemcpyDeviceToHost)); */
/* Testing the last 100 elements of the spectral volume on the GPU */
/*
printf("Testing the resulting spectral volume after doing the cuFFT \n");
for (int i = ( fDataSize * fDataSize * fDataSize) - 100; i < fDataSize * fDataSize * fDataSize; i++ )
printf("%f \n", CPU_ARRAY[i].x);
*/
// Do 3D FFT Shift for the Generated Spectrum
// Save the Output in the Temp Array
// fftShift3D_cufftComplex <<< sGrid, sBlock >>>(TempComplexArray, MainComplexArray, fDataSize);
/* Wrapping-around the spectral volume to center its DC component */
fftShift3D_i( TempComplexArray, MainComplexArray, fDataSize, sBlock, sGrid );
/* Copy the resulting shifted spectrum from the "fftShift3D_i operation" to
* the host and test it */
/* cutilSafeCall(hipMemcpy(CPU_ARRAY,
TempComplexArray,
(fDataSize * fDataSize * fDataSize * sizeof(hipfftComplex)),
hipMemcpyDeviceToHost)); */
/* Testing the last 100 elements of the shifted spectral volume on the GPU */
/*
printf("Testing the resulting shifted spectral volume after wrapping it around \n");
for (int i = ( fDataSize * fDataSize * fDataSize) - 100; i < fDataSize * fDataSize * fDataSize; i++ )
printf("%f \n", CPU_ARRAY[i].x);
*/
/* Garbbage */
/*
cutilSafeCall(hipMemcpy(CPU_ARRAY, MainComplexArray, 128 * 128 * 128 * sizeof(hipfftComplex), hipMemcpyDeviceToHost)); */
printf("Testing for a specific volume of 128 * 128 * 128 \n");
for (int i = 0; i < (128 * 128 * 128 / 2); i++)
printf("%f \n", CPU_ARRAY[i].x);
*/
/* Now, I have the spectral volume in cufftComlpex array. Save this
* spectrum to 1D array on the device & link this array to OpenGL Via
* cudaGraphics Resource */
/* GPU configuration for the packing kernel */
dim3 sBlock1D( 512,1,1 );
dim3 sGrid1D( ( fDataSize * fDataSize * fDataSize ) / 512, 1, 1 );
/* Run 1D packing for texture array */
hipLaunchKernelGGL(( Pack1DComplexArray) , dim3(sGrid1D), dim3(sBlock1D) , 0, 0, MainComplexArray, ptrTo1DArray, fDataSize );
}
| 5f98c21bd7aea0c5562a6e6c1dcc0acfdc802fa2.cu | /*********************************************************************
* Copyrights (c) Marwan Abdellah. All rights reserved.
* This code is part of my Master's Thesis Project entitled "High
* Performance Fourier Volume Rendering on Graphics Processing Units
* (GPUs)" and submitted to the Systems & Biomedical Engineering
* Department, Faculty of Engineering, Cairo University.
* Please, don't use or distribute without authors' permission.
* File : cuda3DKernel.cu
* Author(s) : Marwan Abdellah <[email protected]>
* Created : April 2011
* Description : This module handles volume processing on the device.
* Note(s) :
*********************************************************************/
/* CUDA utilities & system includes */
/* #include <shrUtils.h> */
#include <cutil_inline.h>
/** Kernel globals **/
/* CUDA FFT plan */
cufftHandle fftPlan3D;
/* Host array to validate GPU implementation results */
cufftComplex* CPU_ARRAY;
/********************************************************************
* Name:
* __global__ fftShift3D_cufftComplex_i
*
* Description:
* Loading the raw volume and returning a pointer to the containing
* array.
*
* Formal Parameters:
* devArrayInput (cufftComplex*) : Input array,
* devArrayOutput (cufftComplex*) : Output array,
* arrSize1D (int) : Size of the volume in 1D assumming unified
dimensionality,
* zIndex (int) : To complete indexing an element in the volume.
*
* Returns:
* void.
*
* Note(s):
* This kernel executes in coordination with the function fftShift3D_i.
* This kernel executes on the device and callable from the host.
* "zIndex" does all the magic, try it yourself to believe it.
********************************************************************/
__global__
void fftShift3D_cufftComplex_i( cufftComplex* devArrayInput,
cufftComplex* devArrayOutput,
int arrSize1D,
int zIndex )
{
/* 3D volume & 2D slice & 1D line */
int sLine = arrSize1D;
int sSlice = arrSize1D * arrSize1D;
int sVolume = arrSize1D * arrSize1D * arrSize1D;
/* Transformation equations */
int sEq1 = ( sVolume + sSlice + sLine ) / 2;
int sEq2 = ( sVolume + sSlice - sLine ) / 2;
int sEq3 = ( sVolume - sSlice + sLine ) / 2;
int sEq4 = ( sVolume - sSlice - sLine ) / 2;
/* Thread index */
int xThreadIdx = threadIdx.x;
int yThreadIdx = threadIdx.y;
/* Block width & height */
int blockWidth = blockDim.x;
int blockHeight = blockDim.y;
/* Thread index 2D */
int xIndex = blockIdx.x * blockWidth + xThreadIdx;
int yIndex = blockIdx.y * blockHeight + yThreadIdx;
// Thread index converted into 1D index
int index = ( zIndex * sSlice ) + ( yIndex * sLine ) + xIndex;
if ( zIndex < arrSize1D / 2 )
{
if ( xIndex < arrSize1D / 2 )
{
if ( yIndex < arrSize1D / 2 )
{
/* First Quad */
devArrayOutput[index] = devArrayInput[index + sEq1];
}
else
{
/* Third Quad */
devArrayOutput[index] = devArrayInput[index + sEq3];
}
}
else
{
if ( yIndex < arrSize1D / 2 )
{
/* Second Quad */
devArrayOutput[index] = devArrayInput[index + sEq2];
}
else
{
/* Fourth Quad */
devArrayOutput[index] = devArrayInput[index + sEq4];
}
}
}
else
{
if ( xIndex < arrSize1D / 2 )
{
if ( yIndex < arrSize1D / 2 )
{
/* First Quad */
devArrayOutput[index] = devArrayInput[index - sEq4];
}
else
{
/* Third Quad */
devArrayOutput[index] = devArrayInput[index - sEq2];
}
}
else
{
if ( yIndex < arrSize1D / 2 )
{
/* Second Quad */
devArrayOutput[index] = devArrayInput[index - sEq3];
}
else
{
/* Fourth Quad */
devArrayOutput[index] = devArrayInput[index - sEq1];
}
}
}
}
/********************************************************************
* Name:
* fftShift3D_i
*
* Description:
* Loading the raw volume and returning a pointer to the containing
* array.
*
* Formal Parameters:
* _arrayDeviceInput (cufftComplex*) : Input array,
* _arrayDeviceOutput (cufftComplex*) : Output array,
* _arraySize1D (int) : Size of the volume in 1D assumming unified
dimensionality,
* _block (dim3) : CUDA block configuration,
* _grid (dim3) : CUDA grid configuration.
* Returns:
* void.
*
* Note(s):
* This function is callable from the CPU and executes on the device.
* This function is to be treated exactly as CUDA __global__ one.
* This function executes in coordination with the kernel
fftShift3D_cufftComplex_i.
********************************************************************/
void fftShift3D_i( cufftComplex* _arrayDeviceInput,
cufftComplex* _arrayDeviceOutput,
int _arraySize1D,
dim3 _block,
dim3 _grid )
{
for ( int i = 0; i < _arraySize1D; i++ )
fftShift3D_cufftComplex_i <<< _grid, _block >>>
( _arrayDeviceInput, _arrayDeviceOutput, _arraySize1D, i );
}
/********************************************************************
* Name:
* __global__ Pack1DComplexArray
*
* Description:
* Packing complex array of 2 float compoenets into 1D float sorted in
* consecutive fashion, i.e. real value at odd indicies and imaginary
* values at even indicies.
*
* Formal Parameters:
* complexArray (cufftComplex*) : Input complex array,
* array1D (float*) : Resulting float array,
* arrSize1D (int) : Size of the volume in 1D assumming unified
* dimensionality.
*
* Returns:
* void.
*
* Note(s):
********************************************************************/
__global__
void Pack1DComplexArray( cufftComplex* complexArray,
float* array1D,
int arrSize1D )
{
// Index (1D)
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (i ndex < ( arrSize1D * arrSize1D * arrSize1D ) )
{
/* Real value */
array1D[(2 * index) ] = complexArray[index].x;
/* Imaginary value */
array1D[(2 * index) + 1] = complexArray[index].y;
}
}
/********************************************************************
* Name:
* LaunchVolumeProcessingCUDA
*
* Description:
* Volume processing on the GPU that includes shifting the spatial and
* the spectral volumes, doing the 3D FFT with cuFFT, repacking the
* resulting shifted spectral volume into 1D array sto suit a 2-component
* (RG) OpenGL 3D texture.
*
* Formal Parameters:
* MainComplexArray (cufftComplex*) : Main complex array residing on
the GPU,
* TempComplexArray (cufftComplex*) : Temporary complex array used on
the fly,
* fDataSize (int) : Size of the volume in 1D assumming unified
* dimensionality,
* ptrTo1DArray (float*) : OpenGL-compatiable array containg the shifted
spectral volume.
*
* Returns:
* void.
*
* Note(s):
* Some testing code is commented to reduce their overhead. Uncomment
* them if needed.
********************************************************************/
extern "C"
void LaunchVolumeProcessingCUDA( cufftComplex* MainComplexArray,
cufftComplex* TempComplexArray,
int fDataSize,
float* ptrTo1DArray )
{
/* In this point, I have a pointer to the complex array residing
* in the GPU memory. This complex array will carry the spatial
* and the spectral volumes alternatively */
/* CUDA volume processing configuration */
dim3 sBlock(16, 16, 1);
dim3 sGrid((fDataSize / 16), (fDataSize / 16), 1);
/* Wrapping-around the spatial volume */
fftShift3D_i(TempComplexArray, MainComplexArray, fDataSize, sBlock, sGrid);
/* Allocating a CPU array to receive resulting volumes on the GPU
* for validation */
CPU_ARRAY = (cufftComplex*) malloc
(fDataSize * fDataSize * fDataSize * sizeof(cufftComplex));
/* Copy the final volume from the device to the host and test it */
cutilSafeCall(
cudaMemcpy( CPU_ARRAY, MainComplexArray,
fDataSize * fDataSize * fDataSize * sizeof( cufftComplex ),
cudaMemcpyDeviceToHost ) );
/* Testing the last 100 elements of the spatial volume on the GPU */
/*
printf("Testing the spatial volume before doing the cuFFT \n");
for (int i = ( fDataSize * fDataSize * fDataSize) - 100; i < fDataSize * fDataSize * fDataSize; i++ )
printf("%f \n", CPU_ARRAY[i].x);
*/
/* Setup for 3D cuFFT plan */
cufftPlan3d(&fftPlan3D, fDataSize, fDataSize, fDataSize, CUFFT_C2C);
cufftExecC2C(fftPlan3D, MainComplexArray, TempComplexArray, CUFFT_FORWARD);
/* Copy the resulting spectrum from the cuFFT operation to the host
* and test it */
/* cutilSafeCall(cudaMemcpy(CPU_ARRAY,
TempComplexArray,
(fDataSize * fDataSize * fDataSize * sizeof(cufftComplex)),
cudaMemcpyDeviceToHost)); */
/* Testing the last 100 elements of the spectral volume on the GPU */
/*
printf("Testing the resulting spectral volume after doing the cuFFT \n");
for (int i = ( fDataSize * fDataSize * fDataSize) - 100; i < fDataSize * fDataSize * fDataSize; i++ )
printf("%f \n", CPU_ARRAY[i].x);
*/
// Do 3D FFT Shift for the Generated Spectrum
// Save the Output in the Temp Array
// fftShift3D_cufftComplex <<< sGrid, sBlock >>>(TempComplexArray, MainComplexArray, fDataSize);
/* Wrapping-around the spectral volume to center its DC component */
fftShift3D_i( TempComplexArray, MainComplexArray, fDataSize, sBlock, sGrid );
/* Copy the resulting shifted spectrum from the "fftShift3D_i operation" to
* the host and test it */
/* cutilSafeCall(cudaMemcpy(CPU_ARRAY,
TempComplexArray,
(fDataSize * fDataSize * fDataSize * sizeof(cufftComplex)),
cudaMemcpyDeviceToHost)); */
/* Testing the last 100 elements of the shifted spectral volume on the GPU */
/*
printf("Testing the resulting shifted spectral volume after wrapping it around \n");
for (int i = ( fDataSize * fDataSize * fDataSize) - 100; i < fDataSize * fDataSize * fDataSize; i++ )
printf("%f \n", CPU_ARRAY[i].x);
*/
/* Garbbage */
/*
cutilSafeCall(cudaMemcpy(CPU_ARRAY, MainComplexArray, 128 * 128 * 128 * sizeof(cufftComplex), cudaMemcpyDeviceToHost)); */
printf("Testing for a specific volume of 128 * 128 * 128 \n");
for (int i = 0; i < (128 * 128 * 128 / 2); i++)
printf("%f \n", CPU_ARRAY[i].x);
*/
/* Now, I have the spectral volume in cufftComlpex array. Save this
* spectrum to 1D array on the device & link this array to OpenGL Via
* cudaGraphics Resource */
/* GPU configuration for the packing kernel */
dim3 sBlock1D( 512,1,1 );
dim3 sGrid1D( ( fDataSize * fDataSize * fDataSize ) / 512, 1, 1 );
/* Run 1D packing for texture array */
Pack1DComplexArray <<< sGrid1D, sBlock1D >>> ( MainComplexArray, ptrTo1DArray, fDataSize );
}
|
d25476dca47677cf939220765560364e79d6238c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "sparse_matrix.h"
#include "sparse_formats.h"
#include "sparse_io.h"
#include "mem.h"
#include "spmv_host.h"
#include "sparse_operations.h"
__global__ void spmv_GPU(const unsigned int num_rows,const float *AxS,const float *xS,const double *xD,const int *AjS,const int *ApS,
double *y,const double *AxD,const int *AjD,const int *ApD);
#define THREADS_PER_BLOCK 32
#define THREADS_PER_VECTOR 32
size_t bytes_per_spmv(const csr_matrix<int,float>& mtxS, const csr_matrix<int,double>& mtxD)
{
size_t bytes = 0;
bytes += 2*sizeof(int) * mtxS.num_rows; // row pointer
bytes += 1*sizeof(int) * mtxS.num_nonzeros; // column index
bytes += 1*sizeof(int) * mtxD.num_nonzeros; // column index
bytes += 2*sizeof(float) * mtxS.num_nonzeros ; // A[i,j] and x[j]
bytes += 2*sizeof(double) * mtxD.num_nonzeros ; // A[i,j] and x[j]
bytes += 2*sizeof(double) * mtxD.num_rows; // y[i] = y[i] + ...
return bytes;
}
int main(int argc,char **argv)
{
struct sparse_matrix A;
struct sparse_matrixS AS;
struct sparse_matrix AD;
float *xs;
double *xd;
double *b;
double *b2;
int Range = 2;
hipEvent_t start_event, stop_event;
float cuda_elapsed_time;
hipEventCreate(&start_event);
hipEventCreate(&stop_event);
char * mm_filename = NULL;
for(int i = 1; i < argc; i++){
if(argv[i][0] != '-'){
mm_filename = argv[i];
break;
}
}
coo_matrix<int,double> coo = read_coo_matrix<int,double>(mm_filename);
coo_matrix<int,float> cooS;
coo_matrix<int,double> cooD;
size_t coo1_nnz=0;
size_t coo2_nnz=0;
for( int i = 0; i < coo.num_nonzeros; i++ ){
if(coo.V[i] >= (-1*Range) && coo.V[i] <= Range )
{coo1_nnz++;}
else
{coo2_nnz++;}
}
cooD.num_cols = coo.num_cols;
cooD.num_rows = coo.num_rows;
cooD.num_nonzeros = coo1_nnz;
cooD.I = new_host_array<int>(coo1_nnz);
cooD.J = new_host_array<int>(coo1_nnz);
cooD.V = new_host_array<double>(coo1_nnz);
cooS.num_cols = coo.num_cols;
cooS.num_rows = coo.num_rows;
cooS.num_nonzeros = coo2_nnz;
cooS.I = new_host_array<int>(coo2_nnz);
cooS.J = new_host_array<int>(coo2_nnz);
cooS.V = new_host_array<float>(coo2_nnz);
printf("Inside nnz =%d Outside nnz =%d Total nnz =%d \n",coo2_nnz,coo1_nnz,coo.num_nonzeros);
coo1_nnz=0;
coo2_nnz=0;
//timing split loop
hipEventRecord(start_event, 0);
for(size_t i = 0; i < coo.num_nonzeros; i++)
{
if(coo.V[i] >= (-1*Range) && coo.V[i] <= Range)
{
cooD.I[coo1_nnz] = coo.I[i];
cooD.J[coo1_nnz] = coo.J[i];
cooD.V[coo1_nnz] = coo.V[i];
coo1_nnz++;
}
else
{
cooS.I[coo2_nnz] = coo.I[i];
cooS.J[coo2_nnz] = coo.J[i];
cooS.V[coo2_nnz] = coo.V[i];
coo2_nnz++;
}
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
printf("Spliting time : %8.4f ms \n", cuda_elapsed_time);
csr_matrix<int,double> csr = coo_to_csr(coo, false);
csr_matrix<int,float> csrS = coo_to_csr(cooS, false);
csr_matrix<int,double> csrD = coo_to_csr(cooD, false);
delete_host_matrix(coo);
delete_host_matrix(cooS);
delete_host_matrix(cooD);
A.nnz = csr.num_nonzeros;
A.ncols = csr.num_cols;
A.nrows = csr.num_rows;
A.cols = csr.Aj;
A.rows = csr.Ap;
A.vals = csr.Ax;
AS.nnz = csrS.num_nonzeros;
AS.ncols = csrS.num_cols;
AS.nrows = csrS.num_rows;
AS.cols = csrS.Aj;
AS.rows = csrS.Ap;
AS.vals = csrS.Ax;
AD.nnz = csrD.num_nonzeros;
AD.ncols = csrD.num_cols;
AD.nrows = csrD.num_rows;
AD.cols = csrD.Aj;
AD.rows = csrD.Ap;
AD.vals = csrD.Ax;
int i;
xs = ((float *)(malloc(sizeof(float ) * A . ncols)));
xd = ((double *)(malloc(sizeof(double ) * A . ncols)));
b = ((double *)(malloc(sizeof(double ) * A . nrows)));
b2 = ((double *)(malloc(sizeof(double ) * A . nrows)));
srand(2013);
for (i = 0; i < A . ncols; i++){
double tmp =1* rand() / (RAND_MAX + 1.0);
xs[i] = (float) tmp;//1.0;//
xd[i] = tmp;//1.0;//
}
for (i = 0; i < A . nrows; i++) {
b[i] = 0;
b2[i] = 0;
}
spmv_csr_serial_host<int,double>(csr, xd, b);
int *devI4Ptr;
int *devI3Ptr;
float *devI2Ptrs;
double *devI2Ptrd;
float *devI1Ptr;
int *devI4DPtr;
int *devI3DPtr;
double *devI1DPtr;
double *devO1DPtr;
hipMalloc(((void **)(&devI1Ptr)),AS.nnz* sizeof(float ));
hipMemcpy(devI1Ptr,AS.vals,AS.nnz* sizeof(float ),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI2Ptrs)),A . nrows* sizeof(float ));
hipMemcpy(devI2Ptrs,xs,A . nrows* sizeof(float ),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI2Ptrd)),A . nrows* sizeof(double ));
hipMemcpy(devI2Ptrd,xd,A . nrows* sizeof(double ),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI3Ptr)),AS.nnz* sizeof(int ));
hipMemcpy(devI3Ptr,AS.cols,AS.nnz* sizeof(int ),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI4Ptr)),(A . nrows+1)* sizeof(int ));
hipMemcpy(devI4Ptr,AS.rows,(A . nrows+1)* sizeof(int ),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devO1DPtr)),A . nrows* sizeof(double ));
hipMemcpy(devO1DPtr,b2,A . nrows* sizeof(double),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI1DPtr)),AD . nnz* sizeof(double));
hipMemcpy(devI1DPtr,AD.vals,AD . nnz* sizeof(double),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI3DPtr)),AD . nnz* sizeof(int ));
hipMemcpy(devI3DPtr,AD.cols,AD . nnz* sizeof(int ),hipMemcpyHostToDevice);
hipMalloc(((void **)(&devI4DPtr)),(A . nrows+1)* sizeof(int ));
hipMemcpy(devI4DPtr,AD.rows,(A . nrows+1)* sizeof(int ),hipMemcpyHostToDevice);
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
const size_t MAX_BLOCKS = 2048;//cusp::system::cuda::detail::max_active_blocks
const size_t NUM_BLOCKS = min(MAX_BLOCKS, (A . nrows + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
size_t num_iterations=500;
hipEventRecord(start_event, 0);
hipLaunchKernelGGL(( spmv_GPU), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK),0 , 0, A . nrows,devI1Ptr,devI2Ptrs,devI2Ptrd,devI3Ptr,devI4Ptr,devO1DPtr,devI1DPtr,devI3DPtr,devI4DPtr);
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
const double seconds = 3.0;
const size_t min_iterations = 1;
const size_t max_iterations = 1000;
double estimated_time = cuda_elapsed_time/1000.0;
if (estimated_time == 0)
num_iterations = max_iterations;
else
num_iterations = ::min(max_iterations, ::max(min_iterations, (size_t) (seconds / estimated_time)) );
hipEventRecord(start_event, 0);
for (i = 0; i< num_iterations; i++){
hipLaunchKernelGGL(( spmv_GPU), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK),0 , 0, A . nrows,devI1Ptr,devI2Ptrs,devI2Ptrd,devI3Ptr,devI4Ptr, devO1DPtr,devI1DPtr,devI3DPtr,devI4DPtr);
}
hipEventRecord(stop_event, 0);
hipEventSynchronize(stop_event);
hipEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
double msec_per_iteration = cuda_elapsed_time/num_iterations;
double sec_per_iteration = msec_per_iteration / 1000.0;
double GFLOPs = (sec_per_iteration == 0) ? 0 : (2.0 * (double) csr.num_nonzeros / sec_per_iteration) / 1e9;
double GBYTEs = (sec_per_iteration == 0) ? 0 : ((double) bytes_per_spmv(csrS,csrD) / sec_per_iteration) / 1e9;
printf("\tbenchmarking : %8.4f ms ( %5.2f GFLOP/s %5.1f GB/s)\n", msec_per_iteration, GFLOPs, GBYTEs);
hipMemcpy(b2,devO1DPtr,A . nrows* sizeof(double ),hipMemcpyDeviceToHost);
hipFree(devI1Ptr);
hipFree(devI2Ptrs);
hipFree(devI2Ptrd);
hipFree(devI3Ptr);
hipFree(devI4Ptr);
hipFree(devO1DPtr);
hipFree(devI1DPtr);
hipFree(devI3DPtr);
hipFree(devI4DPtr);
for (i = 0; i < A . nrows; i++) {
double kor = fabs(b2[i] - b[i]);
if(kor>0.0001)
printf("Values don't match at %d, expected %f obtained %f\n",i,b[i],b2[i]);
break;
}
int k0,k1,k2,k3,k4,k5,k6,k7,k8,ki;
k0=k1=k2=k3=k4=k5=k6=k7=k8=ki=0;
for (i = 0; i < A . nrows; i++) {
while((int)b[i]!= 0 )//normalizes decimal numbers Hari S idea
{
b2[i]=b2[i]/10.0;
b[i]=b[i]/10.0;
}
double kor = fabs(b2[i] - b[i]);
if (kor<=0.0000000001) ki++;
else if (kor<=0.000000001) k8++;
else if (kor<=0.00000001) k7++;
else if (kor<=0.0000001) k6++;
else if (kor<=0.000001) k5++;
else if (kor<=0.00001) k4++;
else if (kor<=0.0001) k3++;
else if (kor<=0.001) k2++;
else if (kor<=0.01) k1++;
else k0++;
}
// SDD stands for significant decimal digit
printf("Out of %d ,SDD0= %d ,SDD1= %d ,SDD2= %d ,SDD3= %d ,SDD4= %d ,SDD5= %d ,SDD6= %d ,SDD7= %d ,SDD8= %d ,SDDi= %d\n",A . nrows,k0,k1,k2,k3,k4,k5,k6,k7,k8,ki);
free(xs);
free(xd);
free(b);
free(b2);
free(A . rows); free(AS . rows); free(AD . rows);
free(A . cols); free(AS . cols); free(AD . cols);
free(A . vals); free(AS . vals); free(AD . vals);
if (i != A . nrows)
exit(1);
return 0;
}
__global__ void spmv_GPU(const unsigned int num_rows,
const float *AxS,
const float *xS,
const double *xD,
const int *AjS,
const int *ApS,
double *y,
const double *AxD,
const int *AjD,
const int *ApD)
{
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
__shared__ volatile double sdata[VECTORS_PER_BLOCK * THREADS_PER_VECTOR + THREADS_PER_VECTOR / 2]; // padded to avoid reduction conditionals
__shared__ volatile int ptrsS[VECTORS_PER_BLOCK][2];
__shared__ volatile int ptrsD[VECTORS_PER_BLOCK][2];
const int thread_id = THREADS_PER_BLOCK * blockIdx.x + threadIdx.x; // global thread index
const int thread_lane = threadIdx.x & (THREADS_PER_VECTOR - 1); // thread index within the vector
const int vector_id = thread_id / THREADS_PER_VECTOR; // global vector index
const int vector_lane = threadIdx.x / THREADS_PER_VECTOR; // vector index within the block
const int num_vectors = VECTORS_PER_BLOCK * gridDim.x; // total number of active vectors
for(int row = vector_id; row < num_rows; row += num_vectors)
{
// use two threads to fetch Ap[row] and Ap[row+1]
// this is considerably faster than the straightforward version
if(thread_lane < 2){
ptrsS[vector_lane][thread_lane] = ApS[row + thread_lane];
ptrsD[vector_lane][thread_lane] = ApD[row + thread_lane];
}
const int row_startS = ptrsS[vector_lane][0]; //same as: row_start = Ap[row];
const int row_endS = ptrsS[vector_lane][1]; //same as: row_end = Ap[row+1];
const int row_startD = ptrsD[vector_lane][0]; //same as: row_start = Ap[row];
const int row_endD = ptrsD[vector_lane][1]; //same as: row_end = Ap[row+1];
// initialize local sum
double sum = 0.0;
// accumulate local sums
//Single precision
if ( row_endS - row_startS > 32)
{
// ensure aligned memory access to Aj and Ax
int jj = row_startS - (row_startS & (THREADS_PER_VECTOR - 1)) + thread_lane;
// accumulate local sums
if(jj >= row_startS && jj < row_endS)
sum += AxS[jj]* xS[AjS[jj]];
// accumulate local sums
for(jj += THREADS_PER_VECTOR; jj < row_endS; jj += THREADS_PER_VECTOR)
sum += AxS[jj]* xS[AjS[jj]];
}
else
{
// accumulate local sums
for(int jj = row_startS + thread_lane; jj < row_endS; jj += THREADS_PER_VECTOR)
sum += AxS[jj]* xS[AjS[jj]];
}
//Double precision
if ( row_endD - row_startD > 32)
{
// ensure aligned memory access to Aj and Ax
int jj = row_startD - (row_startD & (THREADS_PER_VECTOR - 1)) + thread_lane;
// accumulate local sums
if(jj >= row_startD && jj < row_endD)
sum += AxD[jj]* xD[AjD[jj]];
// accumulate local sums
for(jj += THREADS_PER_VECTOR; jj < row_endD; jj += THREADS_PER_VECTOR)
sum += AxD[jj]* xD[AjD[jj]];
}
else
{
// accumulate local sums
for(int jj = row_startD + thread_lane; jj < row_endD; jj += THREADS_PER_VECTOR)
sum += AxD[jj]* xD[AjD[jj]];
}
// store local sum in shared memory
sdata[threadIdx.x] = sum;
double temp=0;
// reduce local sums to row sum
if (THREADS_PER_VECTOR > 16) {temp = sdata[threadIdx.x + 16]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 8) {temp = sdata[threadIdx.x + 8]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 4) {temp = sdata[threadIdx.x + 4]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 2) {temp = sdata[threadIdx.x + 2]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 1) {temp = sdata[threadIdx.x + 1]; sdata[threadIdx.x] = sum += temp;}
// first thread writes the result
if (threadIdx.x == 0)
y[row] = sdata[threadIdx.x];
}
}
| d25476dca47677cf939220765560364e79d6238c.cu | /*
* Copyright NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "sparse_matrix.h"
#include "sparse_formats.h"
#include "sparse_io.h"
#include "mem.h"
#include "spmv_host.h"
#include "sparse_operations.h"
__global__ void spmv_GPU(const unsigned int num_rows,const float *AxS,const float *xS,const double *xD,const int *AjS,const int *ApS,
double *y,const double *AxD,const int *AjD,const int *ApD);
#define THREADS_PER_BLOCK 32
#define THREADS_PER_VECTOR 32
size_t bytes_per_spmv(const csr_matrix<int,float>& mtxS, const csr_matrix<int,double>& mtxD)
{
size_t bytes = 0;
bytes += 2*sizeof(int) * mtxS.num_rows; // row pointer
bytes += 1*sizeof(int) * mtxS.num_nonzeros; // column index
bytes += 1*sizeof(int) * mtxD.num_nonzeros; // column index
bytes += 2*sizeof(float) * mtxS.num_nonzeros ; // A[i,j] and x[j]
bytes += 2*sizeof(double) * mtxD.num_nonzeros ; // A[i,j] and x[j]
bytes += 2*sizeof(double) * mtxD.num_rows; // y[i] = y[i] + ...
return bytes;
}
int main(int argc,char **argv)
{
struct sparse_matrix A;
struct sparse_matrixS AS;
struct sparse_matrix AD;
float *xs;
double *xd;
double *b;
double *b2;
int Range = 2;
cudaEvent_t start_event, stop_event;
float cuda_elapsed_time;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
char * mm_filename = NULL;
for(int i = 1; i < argc; i++){
if(argv[i][0] != '-'){
mm_filename = argv[i];
break;
}
}
coo_matrix<int,double> coo = read_coo_matrix<int,double>(mm_filename);
coo_matrix<int,float> cooS;
coo_matrix<int,double> cooD;
size_t coo1_nnz=0;
size_t coo2_nnz=0;
for( int i = 0; i < coo.num_nonzeros; i++ ){
if(coo.V[i] >= (-1*Range) && coo.V[i] <= Range )
{coo1_nnz++;}
else
{coo2_nnz++;}
}
cooD.num_cols = coo.num_cols;
cooD.num_rows = coo.num_rows;
cooD.num_nonzeros = coo1_nnz;
cooD.I = new_host_array<int>(coo1_nnz);
cooD.J = new_host_array<int>(coo1_nnz);
cooD.V = new_host_array<double>(coo1_nnz);
cooS.num_cols = coo.num_cols;
cooS.num_rows = coo.num_rows;
cooS.num_nonzeros = coo2_nnz;
cooS.I = new_host_array<int>(coo2_nnz);
cooS.J = new_host_array<int>(coo2_nnz);
cooS.V = new_host_array<float>(coo2_nnz);
printf("Inside nnz =%d Outside nnz =%d Total nnz =%d \n",coo2_nnz,coo1_nnz,coo.num_nonzeros);
coo1_nnz=0;
coo2_nnz=0;
//timing split loop
cudaEventRecord(start_event, 0);
for(size_t i = 0; i < coo.num_nonzeros; i++)
{
if(coo.V[i] >= (-1*Range) && coo.V[i] <= Range)
{
cooD.I[coo1_nnz] = coo.I[i];
cooD.J[coo1_nnz] = coo.J[i];
cooD.V[coo1_nnz] = coo.V[i];
coo1_nnz++;
}
else
{
cooS.I[coo2_nnz] = coo.I[i];
cooS.J[coo2_nnz] = coo.J[i];
cooS.V[coo2_nnz] = coo.V[i];
coo2_nnz++;
}
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
printf("Spliting time : %8.4f ms \n", cuda_elapsed_time);
csr_matrix<int,double> csr = coo_to_csr(coo, false);
csr_matrix<int,float> csrS = coo_to_csr(cooS, false);
csr_matrix<int,double> csrD = coo_to_csr(cooD, false);
delete_host_matrix(coo);
delete_host_matrix(cooS);
delete_host_matrix(cooD);
A.nnz = csr.num_nonzeros;
A.ncols = csr.num_cols;
A.nrows = csr.num_rows;
A.cols = csr.Aj;
A.rows = csr.Ap;
A.vals = csr.Ax;
AS.nnz = csrS.num_nonzeros;
AS.ncols = csrS.num_cols;
AS.nrows = csrS.num_rows;
AS.cols = csrS.Aj;
AS.rows = csrS.Ap;
AS.vals = csrS.Ax;
AD.nnz = csrD.num_nonzeros;
AD.ncols = csrD.num_cols;
AD.nrows = csrD.num_rows;
AD.cols = csrD.Aj;
AD.rows = csrD.Ap;
AD.vals = csrD.Ax;
int i;
xs = ((float *)(malloc(sizeof(float ) * A . ncols)));
xd = ((double *)(malloc(sizeof(double ) * A . ncols)));
b = ((double *)(malloc(sizeof(double ) * A . nrows)));
b2 = ((double *)(malloc(sizeof(double ) * A . nrows)));
srand(2013);
for (i = 0; i < A . ncols; i++){
double tmp =1* rand() / (RAND_MAX + 1.0);
xs[i] = (float) tmp;//1.0;//
xd[i] = tmp;//1.0;//
}
for (i = 0; i < A . nrows; i++) {
b[i] = 0;
b2[i] = 0;
}
spmv_csr_serial_host<int,double>(csr, xd, b);
int *devI4Ptr;
int *devI3Ptr;
float *devI2Ptrs;
double *devI2Ptrd;
float *devI1Ptr;
int *devI4DPtr;
int *devI3DPtr;
double *devI1DPtr;
double *devO1DPtr;
cudaMalloc(((void **)(&devI1Ptr)),AS.nnz* sizeof(float ));
cudaMemcpy(devI1Ptr,AS.vals,AS.nnz* sizeof(float ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptrs)),A . nrows* sizeof(float ));
cudaMemcpy(devI2Ptrs,xs,A . nrows* sizeof(float ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI2Ptrd)),A . nrows* sizeof(double ));
cudaMemcpy(devI2Ptrd,xd,A . nrows* sizeof(double ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI3Ptr)),AS.nnz* sizeof(int ));
cudaMemcpy(devI3Ptr,AS.cols,AS.nnz* sizeof(int ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI4Ptr)),(A . nrows+1)* sizeof(int ));
cudaMemcpy(devI4Ptr,AS.rows,(A . nrows+1)* sizeof(int ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devO1DPtr)),A . nrows* sizeof(double ));
cudaMemcpy(devO1DPtr,b2,A . nrows* sizeof(double),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI1DPtr)),AD . nnz* sizeof(double));
cudaMemcpy(devI1DPtr,AD.vals,AD . nnz* sizeof(double),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI3DPtr)),AD . nnz* sizeof(int ));
cudaMemcpy(devI3DPtr,AD.cols,AD . nnz* sizeof(int ),cudaMemcpyHostToDevice);
cudaMalloc(((void **)(&devI4DPtr)),(A . nrows+1)* sizeof(int ));
cudaMemcpy(devI4DPtr,AD.rows,(A . nrows+1)* sizeof(int ),cudaMemcpyHostToDevice);
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
const size_t MAX_BLOCKS = 2048;//cusp::system::cuda::detail::max_active_blocks
const size_t NUM_BLOCKS = min(MAX_BLOCKS, (A . nrows + (VECTORS_PER_BLOCK - 1)) / VECTORS_PER_BLOCK);
size_t num_iterations=500;
cudaEventRecord(start_event, 0);
spmv_GPU<<<NUM_BLOCKS,THREADS_PER_BLOCK,0 >>>(A . nrows,devI1Ptr,devI2Ptrs,devI2Ptrd,devI3Ptr,devI4Ptr,devO1DPtr,devI1DPtr,devI3DPtr,devI4DPtr);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
const double seconds = 3.0;
const size_t min_iterations = 1;
const size_t max_iterations = 1000;
double estimated_time = cuda_elapsed_time/1000.0;
if (estimated_time == 0)
num_iterations = max_iterations;
else
num_iterations = std::min(max_iterations, std::max(min_iterations, (size_t) (seconds / estimated_time)) );
cudaEventRecord(start_event, 0);
for (i = 0; i< num_iterations; i++){
spmv_GPU<<<NUM_BLOCKS,THREADS_PER_BLOCK,0 >>>(A . nrows,devI1Ptr,devI2Ptrs,devI2Ptrd,devI3Ptr,devI4Ptr, devO1DPtr,devI1DPtr,devI3DPtr,devI4DPtr);
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&cuda_elapsed_time, start_event, stop_event);
double msec_per_iteration = cuda_elapsed_time/num_iterations;
double sec_per_iteration = msec_per_iteration / 1000.0;
double GFLOPs = (sec_per_iteration == 0) ? 0 : (2.0 * (double) csr.num_nonzeros / sec_per_iteration) / 1e9;
double GBYTEs = (sec_per_iteration == 0) ? 0 : ((double) bytes_per_spmv(csrS,csrD) / sec_per_iteration) / 1e9;
printf("\tbenchmarking : %8.4f ms ( %5.2f GFLOP/s %5.1f GB/s)\n", msec_per_iteration, GFLOPs, GBYTEs);
cudaMemcpy(b2,devO1DPtr,A . nrows* sizeof(double ),cudaMemcpyDeviceToHost);
cudaFree(devI1Ptr);
cudaFree(devI2Ptrs);
cudaFree(devI2Ptrd);
cudaFree(devI3Ptr);
cudaFree(devI4Ptr);
cudaFree(devO1DPtr);
cudaFree(devI1DPtr);
cudaFree(devI3DPtr);
cudaFree(devI4DPtr);
for (i = 0; i < A . nrows; i++) {
double kor = fabs(b2[i] - b[i]);
if(kor>0.0001)
printf("Values don't match at %d, expected %f obtained %f\n",i,b[i],b2[i]);
break;
}
int k0,k1,k2,k3,k4,k5,k6,k7,k8,ki;
k0=k1=k2=k3=k4=k5=k6=k7=k8=ki=0;
for (i = 0; i < A . nrows; i++) {
while((int)b[i]!= 0 )//normalizes decimal numbers Hari S idea
{
b2[i]=b2[i]/10.0;
b[i]=b[i]/10.0;
}
double kor = fabs(b2[i] - b[i]);
if (kor<=0.0000000001) ki++;
else if (kor<=0.000000001) k8++;
else if (kor<=0.00000001) k7++;
else if (kor<=0.0000001) k6++;
else if (kor<=0.000001) k5++;
else if (kor<=0.00001) k4++;
else if (kor<=0.0001) k3++;
else if (kor<=0.001) k2++;
else if (kor<=0.01) k1++;
else k0++;
}
// SDD stands for significant decimal digit
printf("Out of %d ,SDD0= %d ,SDD1= %d ,SDD2= %d ,SDD3= %d ,SDD4= %d ,SDD5= %d ,SDD6= %d ,SDD7= %d ,SDD8= %d ,SDDi= %d\n",A . nrows,k0,k1,k2,k3,k4,k5,k6,k7,k8,ki);
free(xs);
free(xd);
free(b);
free(b2);
free(A . rows); free(AS . rows); free(AD . rows);
free(A . cols); free(AS . cols); free(AD . cols);
free(A . vals); free(AS . vals); free(AD . vals);
if (i != A . nrows)
exit(1);
return 0;
}
__global__ void spmv_GPU(const unsigned int num_rows,
const float *AxS,
const float *xS,
const double *xD,
const int *AjS,
const int *ApS,
double *y,
const double *AxD,
const int *AjD,
const int *ApD)
{
const size_t VECTORS_PER_BLOCK = THREADS_PER_BLOCK / THREADS_PER_VECTOR;
__shared__ volatile double sdata[VECTORS_PER_BLOCK * THREADS_PER_VECTOR + THREADS_PER_VECTOR / 2]; // padded to avoid reduction conditionals
__shared__ volatile int ptrsS[VECTORS_PER_BLOCK][2];
__shared__ volatile int ptrsD[VECTORS_PER_BLOCK][2];
const int thread_id = THREADS_PER_BLOCK * blockIdx.x + threadIdx.x; // global thread index
const int thread_lane = threadIdx.x & (THREADS_PER_VECTOR - 1); // thread index within the vector
const int vector_id = thread_id / THREADS_PER_VECTOR; // global vector index
const int vector_lane = threadIdx.x / THREADS_PER_VECTOR; // vector index within the block
const int num_vectors = VECTORS_PER_BLOCK * gridDim.x; // total number of active vectors
for(int row = vector_id; row < num_rows; row += num_vectors)
{
// use two threads to fetch Ap[row] and Ap[row+1]
// this is considerably faster than the straightforward version
if(thread_lane < 2){
ptrsS[vector_lane][thread_lane] = ApS[row + thread_lane];
ptrsD[vector_lane][thread_lane] = ApD[row + thread_lane];
}
const int row_startS = ptrsS[vector_lane][0]; //same as: row_start = Ap[row];
const int row_endS = ptrsS[vector_lane][1]; //same as: row_end = Ap[row+1];
const int row_startD = ptrsD[vector_lane][0]; //same as: row_start = Ap[row];
const int row_endD = ptrsD[vector_lane][1]; //same as: row_end = Ap[row+1];
// initialize local sum
double sum = 0.0;
// accumulate local sums
//Single precision
if ( row_endS - row_startS > 32)
{
// ensure aligned memory access to Aj and Ax
int jj = row_startS - (row_startS & (THREADS_PER_VECTOR - 1)) + thread_lane;
// accumulate local sums
if(jj >= row_startS && jj < row_endS)
sum += AxS[jj]* xS[AjS[jj]];
// accumulate local sums
for(jj += THREADS_PER_VECTOR; jj < row_endS; jj += THREADS_PER_VECTOR)
sum += AxS[jj]* xS[AjS[jj]];
}
else
{
// accumulate local sums
for(int jj = row_startS + thread_lane; jj < row_endS; jj += THREADS_PER_VECTOR)
sum += AxS[jj]* xS[AjS[jj]];
}
//Double precision
if ( row_endD - row_startD > 32)
{
// ensure aligned memory access to Aj and Ax
int jj = row_startD - (row_startD & (THREADS_PER_VECTOR - 1)) + thread_lane;
// accumulate local sums
if(jj >= row_startD && jj < row_endD)
sum += AxD[jj]* xD[AjD[jj]];
// accumulate local sums
for(jj += THREADS_PER_VECTOR; jj < row_endD; jj += THREADS_PER_VECTOR)
sum += AxD[jj]* xD[AjD[jj]];
}
else
{
// accumulate local sums
for(int jj = row_startD + thread_lane; jj < row_endD; jj += THREADS_PER_VECTOR)
sum += AxD[jj]* xD[AjD[jj]];
}
// store local sum in shared memory
sdata[threadIdx.x] = sum;
double temp=0;
// reduce local sums to row sum
if (THREADS_PER_VECTOR > 16) {temp = sdata[threadIdx.x + 16]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 8) {temp = sdata[threadIdx.x + 8]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 4) {temp = sdata[threadIdx.x + 4]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 2) {temp = sdata[threadIdx.x + 2]; sdata[threadIdx.x] = sum += temp;}
if (THREADS_PER_VECTOR > 1) {temp = sdata[threadIdx.x + 1]; sdata[threadIdx.x] = sum += temp;}
// first thread writes the result
if (threadIdx.x == 0)
y[row] = sdata[threadIdx.x];
}
}
|
20af2a0b2eb2dbd6d0f3e8fe95b7731cb0011f16.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <THHUNN/THHUNN.h>
#include <THH/THHTensor.hpp>
#include <THHUNN/common.h>
#include <THHUNN/upsampling.h>
#include <THH/THHDeviceTensor.cuh>
#include <THH/THHDeviceTensorUtils.cuh>
#include <THH/THHDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THHUNN/THHHalfAutoNumerics.cuh>
#include <THH/THHAtomics.cuh>
template<typename Dtype, typename Acctype>
__launch_bounds__(1024)
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 5> data1, THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][t1][h1][w1];
data2[n][c][t2][h2][w2] = val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Acctype val = t0lambda * (h0lambda * (w0lambda * data1[n][c][t1][h1][w1]
+ w1lambda * data1[n][c][t1][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1][h1+h1p][w1]
+ w1lambda * data1[n][c][t1][h1+h1p][w1+w1p]))
+ t1lambda * (h0lambda * (w0lambda * data1[n][c][t1+t1p][h1][w1]
+ w1lambda * data1[n][c][t1+t1p][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1+t1p][h1+h1p][w1]
+ w1lambda * data1[n][c][t1+t1p][h1+h1p][w1+w1p]));
data2[n][c][t2][h2][w2] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
__launch_bounds__(1024)
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 5> data1, const THCDeviceTensor<Dtype, 5> data2){
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][t1][h1][w1];
data1[n][c][t2][h2][w2] += val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][t2][h2][w2];
atomicAdd(data1[n][c][t1][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w1lambda * d2val));
}
}
}
/////////////////////////////////////////////////////////
}
#include <THHUNN/generic/VolumetricUpSamplingTrilinear.hip>
#include <THH/THHGenerateFloatTypes.h>
| 20af2a0b2eb2dbd6d0f3e8fe95b7731cb0011f16.cu | // Adapted from interp.cpp from Caffe util by Pauline Luc
// Originally developed by George Papandreou
#include <THCUNN/THCUNN.h>
#include <THC/THCTensor.hpp>
#include <THCUNN/common.h>
#include <THCUNN/upsampling.h>
#include <THC/THCDeviceTensor.cuh>
#include <THC/THCDeviceTensorUtils.cuh>
#include <THC/THCDeviceUtils.cuh>
#include <TH/THHalf.h>
#include <THCUNN/THCHalfAutoNumerics.cuh>
#include <THC/THCAtomics.cuh>
template<typename Dtype, typename Acctype>
__launch_bounds__(1024)
__global__ void caffe_gpu_interp2_kernel(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
const THCDeviceTensor<Dtype, 5> data1, THCDeviceTensor<Dtype, 5> data2) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data1[n][c][t1][h1][w1];
data2[n][c][t2][h2][w2] = val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Acctype val = t0lambda * (h0lambda * (w0lambda * data1[n][c][t1][h1][w1]
+ w1lambda * data1[n][c][t1][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1][h1+h1p][w1]
+ w1lambda * data1[n][c][t1][h1+h1p][w1+w1p]))
+ t1lambda * (h0lambda * (w0lambda * data1[n][c][t1+t1p][h1][w1]
+ w1lambda * data1[n][c][t1+t1p][h1][w1+w1p])
+ h1lambda * (w0lambda * data1[n][c][t1+t1p][h1+h1p][w1]
+ w1lambda * data1[n][c][t1+t1p][h1+h1p][w1+w1p]));
data2[n][c][t2][h2][w2] = ScalarConvert<Acctype, Dtype>::to(val);
}
}
}
}
// Backward (adjoint) operation 1 <- 2 (accumulates)
template <typename Dtype, typename Acctype>
__launch_bounds__(1024)
__global__ void caffe_gpu_interp2_kernel_backward(const int n,
const Acctype rdepth, const Acctype rheight, const Acctype rwidth, const bool align_corners,
THCDeviceTensor<Dtype, 5> data1, const THCDeviceTensor<Dtype, 5> data2){
int index = threadIdx.x + blockIdx.x * blockDim.x;
const int batchsize = data1.getSize(0);
const int channels = data1.getSize(1);
const int depth1 = data1.getSize(2);
const int height1 = data1.getSize(3);
const int width1 = data1.getSize(4);
const int depth2 = data2.getSize(2);
const int height2 = data2.getSize(3);
const int width2 = data2.getSize(4);
if (index < n) {
const int w2 = (index % (height2*width2)) % width2; // 0:width2-1
const int h2 = (index % (height2*width2)) / width2; // 0:height2-1
const int t2 = index / (height2*width2); // 0:depth2-1
// special case: just copy
if (depth1 == depth2 && height1 == height2 && width1 == width2) {
const int t1 = t2;
const int h1 = h2;
const int w1 = w2;
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype val = data2[n][c][t1][h1][w1];
data1[n][c][t2][h2][w2] += val;
}
}
return;
}
//
const Acctype t1r = linear_upsampling_compute_source_index<Acctype>(rdepth, t2, align_corners);
const int t1 = t1r;
const int t1p = (t1 < depth1 - 1) ? 1 : 0;
const Acctype t1lambda = t1r - t1;
const Acctype t0lambda = Acctype(1) - t1lambda;
//
const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners);
const int h1 = h1r;
const int h1p = (h1 < height1 - 1) ? 1 : 0;
const Acctype h1lambda = h1r - h1;
const Acctype h0lambda = Acctype(1) - h1lambda;
//
const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners);
const int w1 = w1r;
const int w1p = (w1 < width1 - 1) ? 1 : 0;
const Acctype w1lambda = w1r - w1;
const Acctype w0lambda = Acctype(1) - w1lambda;
//
for (int n = 0; n < batchsize ; n++){
for (int c = 0; c < channels; ++c) {
const Dtype d2val = data2[n][c][t2][h2][w2];
atomicAdd(data1[n][c][t1][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t0lambda * h1lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h0lambda * w1lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w0lambda * d2val));
atomicAdd(data1[n][c][t1+t1p][h1+h1p][w1+w1p].data(),
ScalarConvert<Acctype, Dtype>::to(t1lambda * h1lambda * w1lambda * d2val));
}
}
}
/////////////////////////////////////////////////////////
}
#include <THCUNN/generic/VolumetricUpSamplingTrilinear.cu>
#include <THC/THCGenerateFloatTypes.h>
|
56d2432ae3da6ebba30236a1b1aacd205c1e2c4d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <device_launch_parameters.h>
#include <image.h>
#include <assert.h>
#include <cell_grid.cuh>
#include <cstring>
#include <string>
#include <limits>
constexpr int ThreadsPerBlock = 32;
constexpr int NumberOfEvolutions = 5000;
constexpr int CellGridDimension = 2048 * 4; //10000
constexpr size_t CellCount = CellGridDimension * CellGridDimension;
#define ENABLE_OGL 0
#if ENABLE_OGL
#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
typedef unsigned char uchar;
constexpr int BlockDim = 32;
constexpr ushort MaxFitness = std::numeric_limits<ushort>::max();
int viewportWidth = CellGridDimension;
int viewportHeight = CellGridDimension;
uchar *renderTextureData;
unsigned int pboId;
unsigned int textureId;
cudaGraphicsResource_t cudaPBOResource;
cudaGraphicsResource_t cudaTexResource;
CellGrid grid;
float fitness = 0.0;
float lastFitness = -1.0f;
uint iter = 0;
double averageEvolveTime = 0.0f;
double averageFitnessTime = 0.0f;
size_t sameFitnessValue = 0;
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureId);
glBegin(GL_QUADS);
glTexCoord2d(0, 0);
glVertex2d(0, 0);
glTexCoord2d(1, 0);
glVertex2d(viewportWidth, 0);
glTexCoord2d(1, 1);
glVertex2d(viewportWidth, viewportHeight);
glTexCoord2d(0, 1);
glVertex2d(0, viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewportWidth = w;
viewportHeight = h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, viewportWidth, viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, viewportWidth, 0, viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
__global__ void clear_kernel(unsigned char *pbo)
{
uint tIdX = (blockIdx.x * blockDim.x) + threadIdx.x;
uint tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
uint strideX = blockDim.x * gridDim.x;
uint strideY = blockDim.y * gridDim.y;
while (tIdX < CellGridDimension)
{
tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
while (tIdY < CellGridDimension)
{
pbo[(tIdY * CellGridDimension * 4) + tIdX] = 0;
pbo[(tIdY * CellGridDimension * 4) + tIdX + 1] = 0;
pbo[(tIdY * CellGridDimension * 4) + tIdX + 2] = 0;
pbo[(tIdY * CellGridDimension * 4) + tIdX + 3] = 0;
tIdY += strideY;
}
tIdX += strideX;
}
}
__global__ void draw(const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo, Cell *population)
{
for (int cellX = 0; cellX < CellGridDimension; cellX++)
{
for (int cellY = 0; cellY < CellGridDimension; cellY++)
{
Cell cell = population[(cellY * CellGridDimension) + cellX];
float p = cell.fitness / static_cast<float>(MaxFitness);
pbo[(cell.x * CellGridDimension * 4) + cell.y] = 255;
pbo[(cell.x * CellGridDimension * 4) + cell.y + 1] = 255;
pbo[(cell.x * CellGridDimension * 4) + cell.y + 2] = 255;
pbo[(cell.x * CellGridDimension * 4) + cell.y + 3] = 0; //(uchar)(255 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x] = static_cast<uchar>(255 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x + 1] = static_cast<uchar>(125 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x + 2] = static_cast<uchar>(255 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x + 3] = 255;
}
}
}
void cudaWorker()
{
if (iter < NumberOfEvolutions && sameFitnessValue < 5)
{
float evolveTime, fitnessTime;
lastFitness = fitness;
++iter;
grid.evolve(evolveTime);
averageEvolveTime += evolveTime;
fitness = grid.get_average_fitness(fitnessTime);
averageFitnessTime += fitnessTime;
if (fitness == lastFitness)
sameFitnessValue++;
else
sameFitnessValue = 0;
printf("Finished iteration %u, fitness: %.6f\n", iter + 1, fitness);
unsigned char *pboData;
size_t pboSize;
CUDA_CALL(hipGraphicsMapResources(1, &cudaPBOResource, 0));
CUDA_CALL(hipGraphicsResourceGetMappedPointer((void **)&pboData, &pboSize, cudaPBOResource));
KernelSettings ks = grid.get_kernel_settings();
Cell *cellMemory = grid.get_device_population_memory();
hipLaunchKernelGGL(( clear_kernel), dim3(ks.gridDimension), dim3(ks.blockDimension), 0, 0, pboData);
hipLaunchKernelGGL(( draw), dim3(1), dim3(1), 0, 0, CellGridDimension, CellGridDimension, pboData, cellMemory);
CUDA_CALL(hipGraphicsUnmapResources(1, &cudaPBOResource, 0));
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, CellGridDimension, CellGridDimension, GL_BGRA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
}
}
void my_idle()
{
cudaWorker();
glutPostRedisplay();
}
void initGL(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewportWidth, viewportHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow("Cellular genetic algorithm");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, viewportWidth, viewportHeight);
glFlush();
}
void create_render_texture()
{
size_t allocSize = sizeof(uchar) * 4 * CellGridDimension * CellGridDimension;
renderTextureData = static_cast<uchar *>(::operator new(allocSize));
memset(renderTextureData, 0, allocSize);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, CellGridDimension, CellGridDimension, 0, GL_RGBA, GL_UNSIGNED_BYTE, renderTextureData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
}
void preparePBO()
{
glGenBuffers(1, &pboId);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboId); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, CellGridDimension * CellGridDimension * 4, NULL, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void initCUDAtex()
{
CUDA_CALL(hipGraphicsGLRegisterBuffer(&cudaPBOResource, pboId, hipGraphicsRegisterFlagsWriteDiscard));
}
void releaseOpenGL()
{
if (textureId > 0)
glDeleteTextures(1, &textureId);
if (pboId > 0)
glDeleteBuffers(1, &pboId);
}
void releaseCUDA()
{
hipGraphicsUnregisterResource(cudaPBOResource);
hipGraphicsUnregisterResource(cudaTexResource);
}
void releaseResources()
{
releaseCUDA();
releaseOpenGL();
averageEvolveTime /= (double)iter;
averageFitnessTime /= (double)iter;
printf("Average evolve time: %f ms\n", averageEvolveTime);
printf("Average fitness time: %f ms\n", averageFitnessTime);
}
#endif
int main(int argc, char **argv)
{
std::string inputFile = "/home/mor0146/github/CudaCourse/project/images/radial16bit_2.png";
if (argc > 1)
{
inputFile = argv[1];
}
#if ENABLE_OGL
// Initialize CellGrid
fprintf(stdout, "Cell count: %lu\n", CellCount);
KernelSettings ks = {};
ks.blockDimension = dim3(ThreadsPerBlock, ThreadsPerBlock, 1);
ks.gridDimension = dim3(get_number_of_parts(CellGridDimension, ThreadsPerBlock), get_number_of_parts(CellGridDimension, ThreadsPerBlock), 1);
grid = CellGrid(CellGridDimension, CellGridDimension, ks);
//Image fitnessImage = Image("/home/mor0146/github/CudaCourse/project/images/radial16bit_2.png", ImageType_GrayScale_16bpp);
fprintf(stdout, "Loading %s as fitness image.\n", inputFile.c_str());
Image fitnessImage = Image(inputFile.c_str(), ImageType_GrayScale_16bpp);
grid.initialize_grid(fitnessImage);
// Start OpenGL
initGL(argc, argv);
create_render_texture();
preparePBO();
initCUDAtex();
//start rendering mainloop
glutMainLoop();
atexit(releaseResources);
free(renderTextureData);
fprintf(stdout, "terminated\n");
return 0;
#else
fprintf(stdout, "Cell count: %lu\n", CellCount);
KernelSettings ks = {};
ks.blockDimension = dim3(ThreadsPerBlock, ThreadsPerBlock, 1);
ks.gridDimension = dim3(get_number_of_parts(CellGridDimension, ThreadsPerBlock), get_number_of_parts(CellGridDimension, ThreadsPerBlock), 1);
CellGrid grid = CellGrid(CellGridDimension, CellGridDimension, ks);
Image fitnessImage = Image("/home/mor0146/github/CudaCourse/project/images/radial16bit_2.png", ImageType_GrayScale_16bpp);
grid.initialize_grid(fitnessImage);
float fitness = 0.0;
float lastFitness = -1.0f;
uint iter = 0;
double diff = 0;
double averageEvolveTime = 0.0f;
double averageFitnessTime = 0.0f;
size_t sameFitnessValue = 0;
while (iter < NumberOfEvolutions && sameFitnessValue < 5)
{
float evolveTime, fitnessTime;
lastFitness = fitness;
++iter;
grid.evolve(evolveTime);
averageEvolveTime += evolveTime;
fitness = grid.get_average_fitness(fitnessTime);
averageFitnessTime += fitnessTime;
diff = fitness - lastFitness;
if (fitness == lastFitness)
sameFitnessValue++;
else
sameFitnessValue = 0;
printf("Finished iteration %u, fitness: %.6f\t %.6f\n", iter + 1, fitness, diff); //diff >= 0 ? "+" : "-",
}
averageEvolveTime /= (double)iter;
averageFitnessTime /= (double)iter;
printf("Average evolve time: %f ms\n", averageEvolveTime);
printf("Average fitness time: %f ms\n", averageFitnessTime);
#endif
}
| 56d2432ae3da6ebba30236a1b1aacd205c1e2c4d.cu | #include <device_launch_parameters.h>
#include <image.h>
#include <assert.h>
#include <cell_grid.cuh>
#include <cstring>
#include <string>
#include <limits>
constexpr int ThreadsPerBlock = 32;
constexpr int NumberOfEvolutions = 5000;
constexpr int CellGridDimension = 2048 * 4; //10000
constexpr size_t CellCount = CellGridDimension * CellGridDimension;
#define ENABLE_OGL 0
#if ENABLE_OGL
#include <GL/glew.h>
#include <GL/glut.h>
#include <cuda_gl_interop.h>
typedef unsigned char uchar;
constexpr int BlockDim = 32;
constexpr ushort MaxFitness = std::numeric_limits<ushort>::max();
int viewportWidth = CellGridDimension;
int viewportHeight = CellGridDimension;
uchar *renderTextureData;
unsigned int pboId;
unsigned int textureId;
cudaGraphicsResource_t cudaPBOResource;
cudaGraphicsResource_t cudaTexResource;
CellGrid grid;
float fitness = 0.0;
float lastFitness = -1.0f;
uint iter = 0;
double averageEvolveTime = 0.0f;
double averageFitnessTime = 0.0f;
size_t sameFitnessValue = 0;
void my_display()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textureId);
glBegin(GL_QUADS);
glTexCoord2d(0, 0);
glVertex2d(0, 0);
glTexCoord2d(1, 0);
glVertex2d(viewportWidth, 0);
glTexCoord2d(1, 1);
glVertex2d(viewportWidth, viewportHeight);
glTexCoord2d(0, 1);
glVertex2d(0, viewportHeight);
glEnd();
glDisable(GL_TEXTURE_2D);
glFlush();
glutSwapBuffers();
}
void my_resize(GLsizei w, GLsizei h)
{
viewportWidth = w;
viewportHeight = h;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, viewportWidth, viewportHeight);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0, viewportWidth, 0, viewportHeight);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glutPostRedisplay();
}
__global__ void clear_kernel(unsigned char *pbo)
{
uint tIdX = (blockIdx.x * blockDim.x) + threadIdx.x;
uint tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
uint strideX = blockDim.x * gridDim.x;
uint strideY = blockDim.y * gridDim.y;
while (tIdX < CellGridDimension)
{
tIdY = (blockIdx.y * blockDim.y) + threadIdx.y;
while (tIdY < CellGridDimension)
{
pbo[(tIdY * CellGridDimension * 4) + tIdX] = 0;
pbo[(tIdY * CellGridDimension * 4) + tIdX + 1] = 0;
pbo[(tIdY * CellGridDimension * 4) + tIdX + 2] = 0;
pbo[(tIdY * CellGridDimension * 4) + tIdX + 3] = 0;
tIdY += strideY;
}
tIdX += strideX;
}
}
__global__ void draw(const unsigned int pboWidth, const unsigned int pboHeight, unsigned char *pbo, Cell *population)
{
for (int cellX = 0; cellX < CellGridDimension; cellX++)
{
for (int cellY = 0; cellY < CellGridDimension; cellY++)
{
Cell cell = population[(cellY * CellGridDimension) + cellX];
float p = cell.fitness / static_cast<float>(MaxFitness);
pbo[(cell.x * CellGridDimension * 4) + cell.y] = 255;
pbo[(cell.x * CellGridDimension * 4) + cell.y + 1] = 255;
pbo[(cell.x * CellGridDimension * 4) + cell.y + 2] = 255;
pbo[(cell.x * CellGridDimension * 4) + cell.y + 3] = 0; //(uchar)(255 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x] = static_cast<uchar>(255 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x + 1] = static_cast<uchar>(125 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x + 2] = static_cast<uchar>(255 * p);
// pbo[(cell.y * CellGridDimension * 4) + cell.x + 3] = 255;
}
}
}
void cudaWorker()
{
if (iter < NumberOfEvolutions && sameFitnessValue < 5)
{
float evolveTime, fitnessTime;
lastFitness = fitness;
++iter;
grid.evolve(evolveTime);
averageEvolveTime += evolveTime;
fitness = grid.get_average_fitness(fitnessTime);
averageFitnessTime += fitnessTime;
if (fitness == lastFitness)
sameFitnessValue++;
else
sameFitnessValue = 0;
printf("Finished iteration %u, fitness: %.6f\n", iter + 1, fitness);
unsigned char *pboData;
size_t pboSize;
CUDA_CALL(cudaGraphicsMapResources(1, &cudaPBOResource, 0));
CUDA_CALL(cudaGraphicsResourceGetMappedPointer((void **)&pboData, &pboSize, cudaPBOResource));
KernelSettings ks = grid.get_kernel_settings();
Cell *cellMemory = grid.get_device_population_memory();
clear_kernel<<<ks.gridDimension, ks.blockDimension>>>(pboData);
draw<<<1, 1>>>(CellGridDimension, CellGridDimension, pboData, cellMemory);
CUDA_CALL(cudaGraphicsUnmapResources(1, &cudaPBOResource, 0));
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboId);
glBindTexture(GL_TEXTURE_2D, textureId);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, CellGridDimension, CellGridDimension, GL_BGRA, GL_UNSIGNED_BYTE, NULL); //Source parameter is NULL, Data is coming from a PBO, not host memory
}
}
void my_idle()
{
cudaWorker();
glutPostRedisplay();
}
void initGL(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(viewportWidth, viewportHeight);
glutInitWindowPosition(0, 0);
glutCreateWindow("Cellular genetic algorithm");
glutDisplayFunc(my_display);
glutReshapeFunc(my_resize);
glutIdleFunc(my_idle);
glutSetCursor(GLUT_CURSOR_CROSSHAIR);
glewInit();
glClearColor(0.0, 0.0, 0.0, 1.0);
glShadeModel(GL_SMOOTH);
glViewport(0, 0, viewportWidth, viewportHeight);
glFlush();
}
void create_render_texture()
{
size_t allocSize = sizeof(uchar) * 4 * CellGridDimension * CellGridDimension;
renderTextureData = static_cast<uchar *>(::operator new(allocSize));
memset(renderTextureData, 0, allocSize);
//OpenGL Texture
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &textureId);
glBindTexture(GL_TEXTURE_2D, textureId);
//WARNING: Just some of inner format are supported by CUDA!!!
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, CellGridDimension, CellGridDimension, 0, GL_RGBA, GL_UNSIGNED_BYTE, renderTextureData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP);
}
void preparePBO()
{
glGenBuffers(1, &pboId);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER, pboId); // Make this the current UNPACK buffer (OpenGL is state-based)
glBufferData(GL_PIXEL_UNPACK_BUFFER, CellGridDimension * CellGridDimension * 4, NULL, GL_DYNAMIC_COPY); // Allocate data for the buffer. 4-channel 8-bit image
}
void initCUDAtex()
{
CUDA_CALL(cudaGraphicsGLRegisterBuffer(&cudaPBOResource, pboId, cudaGraphicsRegisterFlagsWriteDiscard));
}
void releaseOpenGL()
{
if (textureId > 0)
glDeleteTextures(1, &textureId);
if (pboId > 0)
glDeleteBuffers(1, &pboId);
}
void releaseCUDA()
{
cudaGraphicsUnregisterResource(cudaPBOResource);
cudaGraphicsUnregisterResource(cudaTexResource);
}
void releaseResources()
{
releaseCUDA();
releaseOpenGL();
averageEvolveTime /= (double)iter;
averageFitnessTime /= (double)iter;
printf("Average evolve time: %f ms\n", averageEvolveTime);
printf("Average fitness time: %f ms\n", averageFitnessTime);
}
#endif
int main(int argc, char **argv)
{
std::string inputFile = "/home/mor0146/github/CudaCourse/project/images/radial16bit_2.png";
if (argc > 1)
{
inputFile = argv[1];
}
#if ENABLE_OGL
// Initialize CellGrid
fprintf(stdout, "Cell count: %lu\n", CellCount);
KernelSettings ks = {};
ks.blockDimension = dim3(ThreadsPerBlock, ThreadsPerBlock, 1);
ks.gridDimension = dim3(get_number_of_parts(CellGridDimension, ThreadsPerBlock), get_number_of_parts(CellGridDimension, ThreadsPerBlock), 1);
grid = CellGrid(CellGridDimension, CellGridDimension, ks);
//Image fitnessImage = Image("/home/mor0146/github/CudaCourse/project/images/radial16bit_2.png", ImageType_GrayScale_16bpp);
fprintf(stdout, "Loading %s as fitness image.\n", inputFile.c_str());
Image fitnessImage = Image(inputFile.c_str(), ImageType_GrayScale_16bpp);
grid.initialize_grid(fitnessImage);
// Start OpenGL
initGL(argc, argv);
create_render_texture();
preparePBO();
initCUDAtex();
//start rendering mainloop
glutMainLoop();
atexit(releaseResources);
free(renderTextureData);
fprintf(stdout, "terminated\n");
return 0;
#else
fprintf(stdout, "Cell count: %lu\n", CellCount);
KernelSettings ks = {};
ks.blockDimension = dim3(ThreadsPerBlock, ThreadsPerBlock, 1);
ks.gridDimension = dim3(get_number_of_parts(CellGridDimension, ThreadsPerBlock), get_number_of_parts(CellGridDimension, ThreadsPerBlock), 1);
CellGrid grid = CellGrid(CellGridDimension, CellGridDimension, ks);
Image fitnessImage = Image("/home/mor0146/github/CudaCourse/project/images/radial16bit_2.png", ImageType_GrayScale_16bpp);
grid.initialize_grid(fitnessImage);
float fitness = 0.0;
float lastFitness = -1.0f;
uint iter = 0;
double diff = 0;
double averageEvolveTime = 0.0f;
double averageFitnessTime = 0.0f;
size_t sameFitnessValue = 0;
while (iter < NumberOfEvolutions && sameFitnessValue < 5)
{
float evolveTime, fitnessTime;
lastFitness = fitness;
++iter;
grid.evolve(evolveTime);
averageEvolveTime += evolveTime;
fitness = grid.get_average_fitness(fitnessTime);
averageFitnessTime += fitnessTime;
diff = fitness - lastFitness;
if (fitness == lastFitness)
sameFitnessValue++;
else
sameFitnessValue = 0;
printf("Finished iteration %u, fitness: %.6f\t %.6f\n", iter + 1, fitness, diff); //diff >= 0 ? "+" : "-",
}
averageEvolveTime /= (double)iter;
averageFitnessTime /= (double)iter;
printf("Average evolve time: %f ms\n", averageEvolveTime);
printf("Average fitness time: %f ms\n", averageFitnessTime);
#endif
}
|
cc1103840e8bef7ca0ebd2306203812d746fd8ea.hip | // !!! This is a file automatically generated by hipify!!!
// //
// ----------------------------------------------------------------------------
// // Gunrock -- Fast and Efficient GPU Graph Library
// //
// ----------------------------------------------------------------------------
// // This source code is distributed under the terms of LICENSE.TXT
// // in the root directory of this source distribution.
// //
// ----------------------------------------------------------------------------
// /**
// * @file bc_app.cu
// *
// * @brief Betweenness Centrality (BC) application
// */
#include <iostream>
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// betweenness centrality path includesls
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_test.cuh>
namespace gunrock {
namespace app {
namespace bc {
hipError_t UseParameters(util::Parameters ¶meters) {
hipError_t retval = hipSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run BC tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Where to perform the BC computation
* \return hipError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT,
typename VertexT = typename GraphT::VertexT>
hipError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **reference_bc_values = NULL,
ValueT **reference_sigmas = NULL,
VertexT **reference_labels = NULL,
util::Location target = util::DEVICE) {
std::cout << "--- RunTests ---" << std::endl;
hipError_t retval = hipSuccess;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("bc", parameters, graph); // initialize Info structure
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_bc_values = new ValueT[graph.nodes];
ValueT *h_sigmas = new ValueT[graph.nodes];
VertexT *h_labels = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
src = srcs[run_index];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
} else {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
#else
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
#endif
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
if (validation == "last") {
auto run_index = (num_runs - 1) % num_srcs;
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_labels);
// Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_bc_values;
h_bc_values = NULL;
delete[] h_sigmas;
h_sigmas = NULL;
delete[] h_labels;
h_labels = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace bc
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_bc function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the BC values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_bc(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT **bc_values, ValueT **sigmas,
typename GraphT::VertexT **labels) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::bc::Problem<GraphT> ProblemT;
typedef gunrock::app::bc::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
} else {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
#else
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
#endif
problem.Extract(bc_values[src_num], sigmas[src_num], labels[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = float, typename BCValueT = GValueT>
float bc(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets,
const VertexT *col_indices, const int num_runs, VertexT *sources,
BCValueT **bc_values, BCValueT **sigmas, VertexT **labels) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("bc");
gunrock::graphio::UseParameters(parameters);
gunrock::app::bc::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST);
graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true);
// Run BC
double elapsed_time =
gunrock_bc(parameters, graph, bc_values, sigmas, labels);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
double bc(int num_nodes, int num_edges, const int *row_offsets,
const int *col_indices, int source, float *bc_values, float *sigmas,
int *labels) {
return bc(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&source, &bc_values, &sigmas, &labels);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
| cc1103840e8bef7ca0ebd2306203812d746fd8ea.cu | // //
// ----------------------------------------------------------------------------
// // Gunrock -- Fast and Efficient GPU Graph Library
// //
// ----------------------------------------------------------------------------
// // This source code is distributed under the terms of LICENSE.TXT
// // in the root directory of this source distribution.
// //
// ----------------------------------------------------------------------------
// /**
// * @file bc_app.cu
// *
// * @brief Betweenness Centrality (BC) application
// */
#include <iostream>
#include <gunrock/gunrock.h>
// Utilities and correctness-checking
#include <gunrock/util/test_utils.cuh>
// Graph definations
#include <gunrock/graphio/graphio.cuh>
#include <gunrock/app/app_base.cuh>
#include <gunrock/app/test_base.cuh>
// betweenness centrality path includesls
#include <gunrock/app/bc/bc_enactor.cuh>
#include <gunrock/app/bc/bc_test.cuh>
namespace gunrock {
namespace app {
namespace bc {
cudaError_t UseParameters(util::Parameters ¶meters) {
cudaError_t retval = cudaSuccess;
GUARD_CU(UseParameters_app(parameters));
GUARD_CU(UseParameters_problem(parameters));
GUARD_CU(UseParameters_enactor(parameters));
GUARD_CU(parameters.Use<std::string>(
"src",
util::REQUIRED_ARGUMENT | util::MULTI_VALUE | util::OPTIONAL_PARAMETER,
"0",
"<Vertex-ID|random|largestdegree> The source vertices\n"
"\tIf random, randomly select non-zero degree vertices;\n"
"\tIf largestdegree, select vertices with largest degrees",
__FILE__, __LINE__));
GUARD_CU(parameters.Use<int>(
"src-seed",
util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::OPTIONAL_PARAMETER,
util::PreDefinedValues<int>::InvalidValue,
"seed to generate random sources", __FILE__, __LINE__));
return retval;
}
/**
* @brief Run BC tests
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the distances
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[in] ref_distances Reference distances
* @param[in] target Where to perform the BC computation
* \return cudaError_t error message(s), if any
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT,
typename VertexT = typename GraphT::VertexT>
cudaError_t RunTests(util::Parameters ¶meters, GraphT &graph,
ValueT **reference_bc_values = NULL,
ValueT **reference_sigmas = NULL,
VertexT **reference_labels = NULL,
util::Location target = util::DEVICE) {
std::cout << "--- RunTests ---" << std::endl;
cudaError_t retval = cudaSuccess;
typedef typename GraphT::SizeT SizeT;
typedef Problem<GraphT> ProblemT;
typedef Enactor<ProblemT> EnactorT;
util::CpuTimer cpu_timer, total_timer;
cpu_timer.Start();
total_timer.Start();
// parse configurations from parameters
bool quiet_mode = parameters.Get<bool>("quiet");
int num_runs = parameters.Get<int>("num-runs");
std::string validation = parameters.Get<std::string>("validation");
util::Info info("bc", parameters, graph); // initialize Info structure
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
// Allocate host-side array (for both reference and GPU-computed results)
ValueT *h_bc_values = new ValueT[graph.nodes];
ValueT *h_sigmas = new ValueT[graph.nodes];
VertexT *h_labels = new VertexT[graph.nodes];
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
GUARD_CU(problem.Init(graph, target));
GUARD_CU(enactor.Init(problem, target));
cpu_timer.Stop();
parameters.Set("preprocess-time", cpu_timer.ElapsedMillis());
// perform the algorithm
VertexT src;
for (int run_num = 0; run_num < num_runs; ++run_num) {
auto run_index = run_num % num_srcs;
src = srcs[run_index];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
} else {
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
}
#else
GUARD_CU(problem.Reset(src, target));
GUARD_CU(enactor.Reset(src, target));
util::PrintMsg("__________________________", !quiet_mode);
cpu_timer.Start();
GUARD_CU(enactor.Enact(src));
cpu_timer.Stop();
info.CollectSingleRun(cpu_timer.ElapsedMillis());
#endif
util::PrintMsg(
"--------------------------\nRun " + std::to_string(run_num) +
" elapsed: " + std::to_string(cpu_timer.ElapsedMillis()) +
" ms, src = " + std::to_string(src) + ", #iterations = " +
std::to_string(enactor.enactor_slices[0].enactor_stats.iteration),
!quiet_mode);
if (validation == "each") {
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
}
cpu_timer.Start();
// Copy out results
GUARD_CU(problem.Extract(h_bc_values, h_sigmas, h_labels));
if (validation == "last") {
auto run_index = (num_runs - 1) % num_srcs;
SizeT num_errors = app::bc::Validate_Results(
parameters, graph, src, h_bc_values, h_sigmas, h_labels,
reference_bc_values == NULL ? NULL : reference_bc_values[run_index],
reference_sigmas == NULL ? NULL : reference_sigmas[run_index],
reference_labels == NULL ? NULL : reference_labels[run_index], true);
}
// compute running statistics
info.ComputeTraversalStats(enactor, h_labels);
// Display_Memory_Usage(problem);
// #ifdef ENABLE_PERFORMANCE_PROFILING
// Display_Performance_Profiling(&enactor);
// #endif
// Clean up
GUARD_CU(enactor.Release(target));
GUARD_CU(problem.Release(target));
delete[] h_bc_values;
h_bc_values = NULL;
delete[] h_sigmas;
h_sigmas = NULL;
delete[] h_labels;
h_labels = NULL;
cpu_timer.Stop();
total_timer.Stop();
info.Finalize(cpu_timer.ElapsedMillis(), total_timer.ElapsedMillis());
return retval;
}
} // namespace bc
} // namespace app
} // namespace gunrock
/*
* @brief Entry of gunrock_bc function
* @tparam GraphT Type of the graph
* @tparam ValueT Type of the BC values
* @param[in] parameters Excution parameters
* @param[in] graph Input graph
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename GraphT, typename ValueT = typename GraphT::ValueT>
double gunrock_bc(gunrock::util::Parameters ¶meters, GraphT &graph,
ValueT **bc_values, ValueT **sigmas,
typename GraphT::VertexT **labels) {
typedef typename GraphT::VertexT VertexT;
typedef gunrock::app::bc::Problem<GraphT> ProblemT;
typedef gunrock::app::bc::Enactor<ProblemT> EnactorT;
gunrock::util::CpuTimer cpu_timer;
gunrock::util::Location target = gunrock::util::DEVICE;
double total_time = 0;
if (parameters.UseDefault("quiet")) parameters.Set("quiet", true);
// Allocate problem and enactor on GPU, and initialize them
ProblemT problem(parameters);
EnactorT enactor;
problem.Init(graph, target);
enactor.Init(problem, target);
int num_runs = parameters.Get<int>("num-runs");
std::vector<VertexT> srcs = parameters.Get<std::vector<VertexT>>("srcs");
int num_srcs = srcs.size();
for (int run_num = 0; run_num < num_runs; ++run_num) {
int src_num = run_num % num_srcs;
VertexT src = srcs[src_num];
#if 1
if (src == -1) {
for (src = 0 ; src < graph.nodes ; ++src) {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
} else {
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
}
#else
problem.Reset(src, target);
enactor.Reset(src, target);
cpu_timer.Start();
enactor.Enact(src);
cpu_timer.Stop();
total_time += cpu_timer.ElapsedMillis();
#endif
problem.Extract(bc_values[src_num], sigmas[src_num], labels[src_num]);
}
enactor.Release(target);
problem.Release(target);
srcs.clear();
return total_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] edge_values CSR-formatted graph input edge weights
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
template <typename VertexT = int, typename SizeT = int,
typename GValueT = float, typename BCValueT = GValueT>
float bc(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets,
const VertexT *col_indices, const int num_runs, VertexT *sources,
BCValueT **bc_values, BCValueT **sigmas, VertexT **labels) {
typedef typename gunrock::app::TestGraph<VertexT, SizeT, GValueT,
gunrock::graph::HAS_CSR>
GraphT;
typedef typename GraphT::CsrT CsrT;
// Setup parameters
gunrock::util::Parameters parameters("bc");
gunrock::graphio::UseParameters(parameters);
gunrock::app::bc::UseParameters(parameters);
gunrock::app::UseParameters_test(parameters);
parameters.Parse_CommandLine(0, NULL);
parameters.Set("graph-type", "by-pass");
parameters.Set("num-runs", num_runs);
std::vector<VertexT> srcs;
for (int i = 0; i < num_runs; i++) srcs.push_back(sources[i]);
parameters.Set("srcs", srcs);
bool quiet = parameters.Get<bool>("quiet");
GraphT graph;
// Assign pointers into gunrock graph format
graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST);
graph.CsrT::row_offsets.SetPointer((SizeT *)row_offsets, num_nodes + 1, gunrock::util::HOST);
graph.CsrT::column_indices.SetPointer((VertexT *)col_indices, num_edges, gunrock::util::HOST);
graph.FromCsr(graph.csr(), gunrock::util::HOST, 0, quiet, true);
// Run BC
double elapsed_time =
gunrock_bc(parameters, graph, bc_values, sigmas, labels);
// Cleanup
graph.Release();
srcs.clear();
return elapsed_time;
}
/*
* @brief Simple interface take in graph as CSR format
* @param[in] num_nodes Number of veritces in the input graph
* @param[in] num_edges Number of edges in the input graph
* @param[in] row_offsets CSR-formatted graph input row offsets
* @param[in] col_indices CSR-formatted graph input column indices
* @param[in] num_runs Number of runs to perform BC
* @param[in] sources Sources to begin traverse, one for each run
* @param[out] bc_values Return betweenness centrality values per vertex
* @param[out] sigmas Return sigma of each vertex
* @param[out] labels Return label of each vertex
* \return double Return accumulated elapsed times for all runs
*/
double bc(int num_nodes, int num_edges, const int *row_offsets,
const int *col_indices, int source, float *bc_values, float *sigmas,
int *labels) {
return bc(num_nodes, num_edges, row_offsets, col_indices, 1 /* num_runs */,
&source, &bc_values, &sigmas, &labels);
}
// Leave this at the end of the file
// Local Variables:
// mode:c++
// c-file-style: "NVIDIA"
// End:
|
803b344159c9ddf24158aafb7f28b38d5e320f1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/******************************************************************************/
extern "C"
__global__ void
magma_zgemv_kernel1(int m, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ c,
magmaDoubleComplex *dwork)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Call
magma_zgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau)
to compute
ZGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
*/
extern "C"
__global__ void
magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv, magmaDoubleComplex *c,
magmaDoubleComplex *dwork, magmaDoubleComplex *tau)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
if (i == 0)
c[0] = MAGMA_Z_ONE;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
/******************************************************************************/
extern "C"
__global__ void
magma_zgemv_kernel2(int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaDoubleComplex lsum;
V += j;
lsum = MAGMA_Z_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_Z_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
/******************************************************************************/
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_zlarfbx_gpu_q(
magma_int_t m, magma_int_t k,
magmaDoubleComplex_ptr V, magma_int_t ldv,
magmaDoubleComplex_ptr dT, magma_int_t ldt,
magmaDoubleComplex_ptr c,
magmaDoubleComplex_ptr dwork,
magma_queue_t queue )
{
/* dwork = V**H c */
hipLaunchKernelGGL(( magma_zgemv_kernel1)
, dim3(k), dim3(BLOCK_SIZE), 0, queue->cuda_stream() ,
m, V, ldv, c, dwork);
/* dwork = T**H dwork */
hipLaunchKernelGGL(( magma_ztrmv_tkernel)
, dim3(k), dim3(k), 0, queue->cuda_stream() ,
dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
hipLaunchKernelGGL(( magma_zgemv_kernel2)
, dim3(blocks3), dim3(threads3), 0, queue->cuda_stream() ,
m, k, V, ldv, dwork+k, c);
}
| 803b344159c9ddf24158aafb7f28b38d5e320f1e.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#include "commonblas_z.h"
#include "magma_templates.h"
// 512 is maximum number of threads for CUDA capability 1.x
#define BLOCK_SIZE 512
/******************************************************************************/
extern "C"
__global__ void
magma_zgemv_kernel1(int m, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ c,
magmaDoubleComplex *dwork)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = sum[0];
}
/******************************************************************************/
/*
Call
magma_zgemv_kernel3<<< n, BLOCK_SIZE, 0, queue->cuda_stream() >>>(m, V, ldv, c, dwork, tau)
to compute
ZGEMV( "Conjugate transpose", m, n, -tau[0], V, ldv, c, 1, zero, dwork, 1)
and to set c[0] to 1.
i.e.,
work = -tau[0] V**H c
*/
extern "C"
__global__ void
magma_zgemv_kernel3(int m, const magmaDoubleComplex * __restrict__ V, int ldv, magmaDoubleComplex *c,
magmaDoubleComplex *dwork, magmaDoubleComplex *tau)
{
const int i = threadIdx.x;
const magmaDoubleComplex *dV = V + (blockIdx.x) * ldv;
__shared__ magmaDoubleComplex sum[ BLOCK_SIZE ];
magmaDoubleComplex lsum;
if (i == 0)
c[0] = MAGMA_Z_ONE;
/* lsum := v**H * C */
lsum = MAGMA_Z_ZERO;
for (int j = i; j < m; j += BLOCK_SIZE)
lsum += MAGMA_Z_MUL( MAGMA_Z_CONJ( dV[j] ), c[j] );
sum[i] = lsum;
magma_sum_reduce< BLOCK_SIZE >( i, sum );
__syncthreads();
if (i == 0)
dwork [blockIdx.x] = -tau[0]*sum[0];
}
/******************************************************************************/
extern "C"
__global__ void
magma_zgemv_kernel2(int m, int n, const magmaDoubleComplex * __restrict__ V, int ldv,
const magmaDoubleComplex * __restrict__ x, magmaDoubleComplex *c)
{
const int i = threadIdx.x;
const int j = i + BLOCK_SIZE * blockIdx.x;
magmaDoubleComplex lsum;
V += j;
lsum = MAGMA_Z_ZERO;
if (j < m) {
for (int k=0; k < n; k++)
lsum += MAGMA_Z_MUL( V[k*ldv], x[k]);
c[j] -= lsum;
}
}
/******************************************************************************/
/*
Apply a complex block reflector H to a complex vector C from the left
(i.e., C = H C). H is represented in the form
H = I - V T V**H
where T is the complex k-by-k upper triangular matrix in the
representation of the block reflector, and V is a complex block of
k elementary reflectors.
*/
extern "C" void
magma_zlarfbx_gpu_q(
magma_int_t m, magma_int_t k,
magmaDoubleComplex_ptr V, magma_int_t ldv,
magmaDoubleComplex_ptr dT, magma_int_t ldt,
magmaDoubleComplex_ptr c,
magmaDoubleComplex_ptr dwork,
magma_queue_t queue )
{
/* dwork = V**H c */
magma_zgemv_kernel1
<<< k, BLOCK_SIZE, 0, queue->cuda_stream() >>>
(m, V, ldv, c, dwork);
/* dwork = T**H dwork */
magma_ztrmv_tkernel
<<< k, k, 0, queue->cuda_stream() >>>
( dT, ldt, dwork, dwork+k);
/* c = c - V dwork */
dim3 blocks3( magma_ceildiv( m, BLOCK_SIZE ) );
dim3 threads3( BLOCK_SIZE );
magma_zgemv_kernel2
<<< blocks3, threads3, 0, queue->cuda_stream() >>>
( m, k, V, ldv, dwork+k, c);
}
|
dd817f8af435505959ba445bc842ede9e1984af3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include "shared.hpp"
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const dim_type THREADS = 256;
static const dim_type THREADS_X = 16;
static const dim_type THREADS_Y = 16;
static const dim_type CUBE_X = 8;
static const dim_type CUBE_Y = 8;
static const dim_type CUBE_Z = 4;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const dim_type MAX_CONV1_FILTER_LEN = 129;
static const dim_type MAX_CONV2_FILTER_LEN = 17;
static const dim_type MAX_CONV3_FILTER_LEN = 5;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)];
__inline__ __device__
dim_type index(dim_type i, dim_type j, dim_type k, dim_type jstride, dim_type kstride)
{
return i+j*jstride+k*kstride;
}
template<typename T, typename accType, bool expand>
__global__
void convolve1(Param<T> out, CParam<T> signal, dim_type fLen, dim_type nBBS,
dim_type oStep, dim_type sStep)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
const dim_type padding = fLen-1;
const dim_type shrdLen = blockDim.x + 2*padding;
const unsigned batchId = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + oStep +(batchId*out.strides[1]);
const T *src = (const T *)signal.ptr + sStep +(batchId*signal.strides[1]);
const accType *impulse = (const accType *)cFilter;
dim_type gx = blockDim.x*(blockIdx.x-batchId*nBBS);
dim_type s0 = signal.strides[0];
dim_type d0 = signal.dims[0];
for (dim_type i=threadIdx.x; i<shrdLen; i+=blockDim.x) {
dim_type idx= gx-padding + i;
shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0);
}
__syncthreads();
gx += threadIdx.x;
if (gx<out.dims[0]) {
dim_type lx = threadIdx.x + padding + (expand ? 0 : fLen>>1);
accType accum = scalar<accType>(0);
for(dim_type f=0; f<fLen; ++f) {
accum = accum + (shrdMem[lx-f]*impulse[f]);
}
dst[gx] = (T)accum;
}
}
template<typename T, typename accType, bool expand, dim_type fLen0, dim_type fLen1>
__global__
void convolve2(Param<T> out, CParam<T> signal, dim_type nBBS, dim_type oStep, dim_type sStep)
{
const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1));
__shared__ T shrdMem[C_SIZE];
const dim_type radius0 = fLen0-1;
const dim_type radius1 = fLen1-1;
const dim_type padding0 = 2*radius0;
const dim_type padding1 = 2*radius1;
const dim_type shrdLen0 = THREADS_X + padding0;
const dim_type shrdLen1 = THREADS_Y + padding1;
unsigned batchId = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + oStep + (batchId*out.strides[2]);
const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[2]);
const accType *impulse = (const accType *)cFilter;
dim_type lx = threadIdx.x;
dim_type ly = threadIdx.y;
dim_type gx = THREADS_X * (blockIdx.x-batchId*nBBS) + lx;
dim_type gy = THREADS_Y * blockIdx.y + ly;
dim_type s0 = signal.strides[0];
dim_type s1 = signal.strides[1];
dim_type d0 = signal.dims[0];
dim_type d1 = signal.dims[1];
// below loops are traditional loops, they only run multiple
// times filter length is more than launch size
#pragma unroll
for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) {
dim_type j = gy2-radius1;
bool is_j = j>=0 && j<d1;
// move row_set THREADS_Y along coloumns
#pragma unroll
for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) {
dim_type i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1]) {
dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1);
dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1);
accType accum = scalar<accType>(0);
#pragma unroll
for(dim_type fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(dim_type fi=0; fi<fLen0; ++fi) {
accType f_val = impulse[fj*fLen0+fi];
T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)];
accum = accum + s_val*f_val;
}
}
dst[gy*out.strides[1]+gx] = (T)accum;
}
}
template<typename T>
__device__
T readSrc(T const *src, dim_type i, dim_type j, dim_type k, dim_type dims[], dim_type strides[])
{
bool is_i = i>=0 && i<dims[0];
bool is_j = j>=0 && j<dims[1];
bool is_k = k>=0 && k<dims[2];
if (is_i && is_j && is_k)
return src[(i*strides[0] + j*strides[1] + k*strides[2])];
else
return scalar<T>(0);
}
template<typename T, typename accType, bool expand>
__global__
void convolve3(Param<T> out, CParam<T> signal, dim_type fLen0, dim_type fLen1,
dim_type fLen2, dim_type nBBS, dim_type oStep, dim_type sStep)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
dim_type radius0 = fLen0-1;
dim_type radius1 = fLen1-1;
dim_type radius2 = fLen2-1;
dim_type padding0 = 2*radius0;
dim_type padding1 = 2*radius1;
dim_type padding2 = 2*radius2;
dim_type shrdLen0 = blockDim.x + padding0;
dim_type skStride = shrdLen0 * (blockDim.y + padding1);
dim_type fStride = fLen0 * fLen1;
unsigned batchId = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + oStep + (batchId*out.strides[3]);
const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[3]);
const accType *impulse = (const accType *)cFilter;
dim_type lx = threadIdx.x;
dim_type ly = threadIdx.y;
dim_type lz = threadIdx.z;
dim_type gx = blockDim.x * (blockIdx.x-batchId*nBBS) + lx;
dim_type gy = blockDim.y * blockIdx.y + ly;
dim_type gz = blockDim.z * blockIdx.z + lz;
dim_type lx2 = lx + blockDim.x;
dim_type ly2 = ly + blockDim.y;
dim_type lz2 = lz + blockDim.z;
dim_type gx2 = gx + blockDim.x;
dim_type gy2 = gy + blockDim.y;
dim_type gz2 = gz + blockDim.z;
shrdMem[index(lx, ly, lz, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides);
if (lx < padding0) {
shrdMem[index(lx2, ly, lz, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides);
}
if (ly < padding1) {
shrdMem[index(lx, ly2, lz, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides);
}
if (lz < padding2) {
shrdMem[index(lx, ly, lz2, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides);
}
if (lx < padding0 && ly < padding1) {
shrdMem[index(lx2, ly2, lz, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides);
}
if (ly < padding1 && lz < padding2) {
shrdMem[index(lx, ly2, lz2, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides);
}
if (lz < padding2 && lx < padding0) {
shrdMem[index(lx2, ly, lz2, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides);
}
if (lx < padding0 && ly < padding1 && lz < padding2) {
shrdMem[index(lx2, ly2, lz2, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides);
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) {
dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1);
dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1);
dim_type ck = lz + radius2 + (expand ? 0 : fLen2>>1);
accType accum = scalar<accType>(0);
#pragma unroll
for(dim_type fk=0; fk<fLen2; ++fk) {
#pragma unroll
for(dim_type fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(dim_type fi=0; fi<fLen0; ++fi) {
accType f_val = impulse[index(fi, fj, fk, fLen0, fStride)];
T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)];
accum = accum + s_val*f_val;
}
}
}
dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum;
}
}
template<typename T, dim_type baseDim>
void prepareKernelArgs(dim3 &blocks, dim3 &threads, size_t &sharedSize, dim_type &blk_x,
ConvolveBatchKind kind, dim_type oDims[], dim_type sDims[], dim_type fDims[])
{
dim_type blk_y, blk_z;
if (baseDim==1) {
threads = dim3(THREADS, 1);
blk_x = divup(oDims[0], threads.x);
blocks = dim3(blk_x, 1);
if (kind==MANY2ONE)
blocks.x *= sDims[1];
sharedSize = (threads.x+2*(fDims[0]-1)) * sizeof(T);
} else if (baseDim==2) {
threads = dim3(THREADS_X, THREADS_Y);
blk_x = divup(oDims[0], threads.x);
blk_y = divup(oDims[1], threads.y);
blocks = dim3(blk_x, blk_y);
if (kind==MANY2ONE)
blocks.x *= sDims[2];
} else if (baseDim==3) {
threads = dim3(CUBE_X, CUBE_Y, CUBE_Z);
blk_x = divup(oDims[0], threads.x);
blk_y = divup(oDims[1], threads.y);
blk_z = divup(oDims[2], threads.z);
blocks = dim3(blk_x, blk_y, blk_z);
if (kind==MANY2ONE)
blocks.x *= sDims[3];
sharedSize = (threads.x+2*(fDims[0]-1)) * (threads.y+2*(fDims[1]-1)) *
(threads.z+2*(fDims[2]-1)) * sizeof(T);
}
}
template<typename T, typename aT, bool expand, dim_type f0, dim_type f1>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig,
dim_type nBBS, dim_type oStp, dim_type sStp)
{
(convolve2<T, aT, expand, f0, f1hipLaunchKernelGGL((>)), dim3(blks), dim3(thrds), 0, 0, out, sig, nBBS, oStp, sStp);
}
template<typename T, typename aT, bool expand, dim_type f0>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig,
dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp)
{
switch(f1) {
case 1: conv2Helper<T, aT, expand, f0, 1>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 2: conv2Helper<T, aT, expand, f0, 2>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 3: conv2Helper<T, aT, expand, f0, 3>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 4: conv2Helper<T, aT, expand, f0, 4>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 5: conv2Helper<T, aT, expand, f0, 5>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
default: CUDA_NOT_SUPPORTED();
}
}
template<typename T, typename aT, bool expand>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig,
dim_type f0, dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp)
{
switch(f0) {
case 1: conv2Helper<T, aT, expand, 1>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 2: conv2Helper<T, aT, expand, 2>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 3: conv2Helper<T, aT, expand, 3>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 4: conv2Helper<T, aT, expand, 4>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 5: conv2Helper<T, aT, expand, 5>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
default: {
if (f0==f1) {
switch(f1) {
case 6: conv2Helper<T, aT, expand, 6, 6>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 7: conv2Helper<T, aT, expand, 7, 7>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 8: conv2Helper<T, aT, expand, 8, 8>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 9: conv2Helper<T, aT, expand, 9, 9>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 10: conv2Helper<T, aT, expand, 10, 10>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 11: conv2Helper<T, aT, expand, 11, 11>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
default: CUDA_NOT_SUPPORTED();
}
} else
CUDA_NOT_SUPPORTED();
} break;
}
}
template<typename T, typename accType, dim_type baseDim, bool expand>
void convolve_nd(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind)
{
bool callKernel = true;
dim_type MCFL2 = kernel::MAX_CONV2_FILTER_LEN;
dim_type MCFL3 = kernel::MAX_CONV3_FILTER_LEN;
switch(baseDim) {
case 1:
if (filter.dims[0]>kernel::MAX_CONV1_FILTER_LEN)
callKernel = false;
break;
case 2:
if ((filter.dims[0]*filter.dims[1]) > (MCFL2 * MCFL2))
callKernel = false;
break;
case 3:
if ((filter.dims[0]*filter.dims[1]*filter.dims[2]) > (MCFL3 * MCFL3 * MCFL3))
callKernel = false;
break;
}
if (!callKernel) {
CUDA_NOT_SUPPORTED();
}
dim_type bCount = 1;
dim_type steps[3] = { 0, 0, 0 };
// [0] - output step, [1] - signal step, [2] - filter step
if (kind==MANY2MANY) {
steps[0] = out.strides[baseDim];
steps[1] = signal.strides[baseDim];
steps[2] = filter.strides[baseDim];
bCount = signal.dims[baseDim];
} else if (kind==ONE2ALL) {
steps[0] = out.strides[baseDim];
steps[2] = filter.strides[baseDim];
bCount = filter.dims[baseDim];
}
dim3 blocks, threads;
dim_type blk_x;
size_t sharedSize;
prepareKernelArgs<T, baseDim>(blocks, threads, sharedSize, blk_x,
kind, out.dims, signal.dims, filter.dims);
dim_type filterLen = filter.dims[0];
for(int i=1; i<baseDim; ++i) filterLen *= filter.dims[i];
for (dim_type b=0; b<bCount; ++b) {
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(hipMemcpyToSymbol(kernel::cFilter, filter.ptr+b*steps[2], filterLen*sizeof(accType), 0, hipMemcpyDeviceToDevice));
switch(baseDim) {
case 1:
(convolve1<T, accType, expandhipLaunchKernelGGL((>))
, dim3(blocks), dim3(threads), sharedSize, 0, out, signal, filter.dims[0], blk_x, b*steps[0], b*steps[1]);
break;
case 2:
conv2Helper<T, accType, expand>(blocks, threads, out, signal, filter.dims[0],
filter.dims[1], blk_x, b*steps[0], b*steps[1]);
break;
case 3:
(convolve3<T, accType, expandhipLaunchKernelGGL((>))
, dim3(blocks), dim3(threads), sharedSize, 0, out, signal, filter.dims[0], filter.dims[1], filter.dims[2],
blk_x, b*steps[0], b*steps[1]);
break;
}
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve_nd<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 2, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 2, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 3, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 3, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
| dd817f8af435505959ba445bc842ede9e1984af3.cu | /*******************************************************
* Copyright (c) 2014, ArrayFire
* All rights reserved.
*
* This file is distributed under 3-clause BSD license.
* The complete license agreement can be obtained at:
* http://arrayfire.com/licenses/BSD-3-Clause
********************************************************/
#include <af/defines.h>
#include <backend.hpp>
#include <dispatch.hpp>
#include <Param.hpp>
#include <debug_cuda.hpp>
#include <math.hpp>
#include "shared.hpp"
#include <convolve.hpp>
namespace cuda
{
namespace kernel
{
static const dim_type THREADS = 256;
static const dim_type THREADS_X = 16;
static const dim_type THREADS_Y = 16;
static const dim_type CUBE_X = 8;
static const dim_type CUBE_Y = 8;
static const dim_type CUBE_Z = 4;
// below shared MAX_*_LEN's are calculated based on
// a maximum shared memory configuration of 48KB per block
// considering complex types as well
static const dim_type MAX_CONV1_FILTER_LEN = 129;
static const dim_type MAX_CONV2_FILTER_LEN = 17;
static const dim_type MAX_CONV3_FILTER_LEN = 5;
// we shall declare the maximum size required of above all three cases
// and re-use the same constant memory locations for every case
__constant__ char cFilter[2*(2*(MAX_CONV1_FILTER_LEN-1)+THREADS)*sizeof(double)];
__inline__ __device__
dim_type index(dim_type i, dim_type j, dim_type k, dim_type jstride, dim_type kstride)
{
return i+j*jstride+k*kstride;
}
template<typename T, typename accType, bool expand>
__global__
void convolve1(Param<T> out, CParam<T> signal, dim_type fLen, dim_type nBBS,
dim_type oStep, dim_type sStep)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
const dim_type padding = fLen-1;
const dim_type shrdLen = blockDim.x + 2*padding;
const unsigned batchId = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + oStep +(batchId*out.strides[1]);
const T *src = (const T *)signal.ptr + sStep +(batchId*signal.strides[1]);
const accType *impulse = (const accType *)cFilter;
dim_type gx = blockDim.x*(blockIdx.x-batchId*nBBS);
dim_type s0 = signal.strides[0];
dim_type d0 = signal.dims[0];
for (dim_type i=threadIdx.x; i<shrdLen; i+=blockDim.x) {
dim_type idx= gx-padding + i;
shrdMem[i] = (idx>=0 && idx<d0) ? src[idx*s0] : scalar<T>(0);
}
__syncthreads();
gx += threadIdx.x;
if (gx<out.dims[0]) {
dim_type lx = threadIdx.x + padding + (expand ? 0 : fLen>>1);
accType accum = scalar<accType>(0);
for(dim_type f=0; f<fLen; ++f) {
accum = accum + (shrdMem[lx-f]*impulse[f]);
}
dst[gx] = (T)accum;
}
}
template<typename T, typename accType, bool expand, dim_type fLen0, dim_type fLen1>
__global__
void convolve2(Param<T> out, CParam<T> signal, dim_type nBBS, dim_type oStep, dim_type sStep)
{
const size_t C_SIZE = (THREADS_X+2*(fLen0-1))* (THREADS_Y+2*(fLen1-1));
__shared__ T shrdMem[C_SIZE];
const dim_type radius0 = fLen0-1;
const dim_type radius1 = fLen1-1;
const dim_type padding0 = 2*radius0;
const dim_type padding1 = 2*radius1;
const dim_type shrdLen0 = THREADS_X + padding0;
const dim_type shrdLen1 = THREADS_Y + padding1;
unsigned batchId = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + oStep + (batchId*out.strides[2]);
const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[2]);
const accType *impulse = (const accType *)cFilter;
dim_type lx = threadIdx.x;
dim_type ly = threadIdx.y;
dim_type gx = THREADS_X * (blockIdx.x-batchId*nBBS) + lx;
dim_type gy = THREADS_Y * blockIdx.y + ly;
dim_type s0 = signal.strides[0];
dim_type s1 = signal.strides[1];
dim_type d0 = signal.dims[0];
dim_type d1 = signal.dims[1];
// below loops are traditional loops, they only run multiple
// times filter length is more than launch size
#pragma unroll
for (dim_type b=ly, gy2=gy; b<shrdLen1; b+=THREADS_Y, gy2+=THREADS_Y) {
dim_type j = gy2-radius1;
bool is_j = j>=0 && j<d1;
// move row_set THREADS_Y along coloumns
#pragma unroll
for (dim_type a=lx, gx2=gx; a<shrdLen0; a+=THREADS_X, gx2+=THREADS_X) {
dim_type i = gx2-radius0;
bool is_i = i>=0 && i<d0;
shrdMem[b*shrdLen0+a] = (is_i && is_j ? src[i*s0+j*s1] : scalar<T>(0));
}
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1]) {
dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1);
dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1);
accType accum = scalar<accType>(0);
#pragma unroll
for(dim_type fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(dim_type fi=0; fi<fLen0; ++fi) {
accType f_val = impulse[fj*fLen0+fi];
T s_val = shrdMem[(cj-fj)*shrdLen0 + (ci-fi)];
accum = accum + s_val*f_val;
}
}
dst[gy*out.strides[1]+gx] = (T)accum;
}
}
template<typename T>
__device__
T readSrc(T const *src, dim_type i, dim_type j, dim_type k, dim_type dims[], dim_type strides[])
{
bool is_i = i>=0 && i<dims[0];
bool is_j = j>=0 && j<dims[1];
bool is_k = k>=0 && k<dims[2];
if (is_i && is_j && is_k)
return src[(i*strides[0] + j*strides[1] + k*strides[2])];
else
return scalar<T>(0);
}
template<typename T, typename accType, bool expand>
__global__
void convolve3(Param<T> out, CParam<T> signal, dim_type fLen0, dim_type fLen1,
dim_type fLen2, dim_type nBBS, dim_type oStep, dim_type sStep)
{
SharedMemory<T> shared;
T * shrdMem = shared.getPointer();
dim_type radius0 = fLen0-1;
dim_type radius1 = fLen1-1;
dim_type radius2 = fLen2-1;
dim_type padding0 = 2*radius0;
dim_type padding1 = 2*radius1;
dim_type padding2 = 2*radius2;
dim_type shrdLen0 = blockDim.x + padding0;
dim_type skStride = shrdLen0 * (blockDim.y + padding1);
dim_type fStride = fLen0 * fLen1;
unsigned batchId = blockIdx.x/nBBS;
T *dst = (T *)out.ptr + oStep + (batchId*out.strides[3]);
const T *src = (const T *)signal.ptr + sStep + (batchId*signal.strides[3]);
const accType *impulse = (const accType *)cFilter;
dim_type lx = threadIdx.x;
dim_type ly = threadIdx.y;
dim_type lz = threadIdx.z;
dim_type gx = blockDim.x * (blockIdx.x-batchId*nBBS) + lx;
dim_type gy = blockDim.y * blockIdx.y + ly;
dim_type gz = blockDim.z * blockIdx.z + lz;
dim_type lx2 = lx + blockDim.x;
dim_type ly2 = ly + blockDim.y;
dim_type lz2 = lz + blockDim.z;
dim_type gx2 = gx + blockDim.x;
dim_type gy2 = gy + blockDim.y;
dim_type gz2 = gz + blockDim.z;
shrdMem[index(lx, ly, lz, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides);
if (lx < padding0) {
shrdMem[index(lx2, ly, lz, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy-radius1, gz-radius2, signal.dims, signal.strides);
}
if (ly < padding1) {
shrdMem[index(lx, ly2, lz, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides);
}
if (lz < padding2) {
shrdMem[index(lx, ly, lz2, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides);
}
if (lx < padding0 && ly < padding1) {
shrdMem[index(lx2, ly2, lz, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy2-radius1, gz-radius2, signal.dims, signal.strides);
}
if (ly < padding1 && lz < padding2) {
shrdMem[index(lx, ly2, lz2, shrdLen0, skStride)] =
readSrc(src, gx-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides);
}
if (lz < padding2 && lx < padding0) {
shrdMem[index(lx2, ly, lz2, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy-radius1, gz2-radius2, signal.dims, signal.strides);
}
if (lx < padding0 && ly < padding1 && lz < padding2) {
shrdMem[index(lx2, ly2, lz2, shrdLen0, skStride)] =
readSrc(src, gx2-radius0, gy2-radius1, gz2-radius2, signal.dims, signal.strides);
}
__syncthreads();
if (gx<out.dims[0] && gy<out.dims[1] && gz<out.dims[2]) {
dim_type ci = lx + radius0 + (expand ? 0 : fLen0>>1);
dim_type cj = ly + radius1 + (expand ? 0 : fLen1>>1);
dim_type ck = lz + radius2 + (expand ? 0 : fLen2>>1);
accType accum = scalar<accType>(0);
#pragma unroll
for(dim_type fk=0; fk<fLen2; ++fk) {
#pragma unroll
for(dim_type fj=0; fj<fLen1; ++fj) {
#pragma unroll
for(dim_type fi=0; fi<fLen0; ++fi) {
accType f_val = impulse[index(fi, fj, fk, fLen0, fStride)];
T s_val = shrdMem[index(ci-fi, cj-fj, ck-fk, shrdLen0, skStride)];
accum = accum + s_val*f_val;
}
}
}
dst[index(gx, gy, gz, out.strides[1], out.strides[2])] = (T)accum;
}
}
template<typename T, dim_type baseDim>
void prepareKernelArgs(dim3 &blocks, dim3 &threads, size_t &sharedSize, dim_type &blk_x,
ConvolveBatchKind kind, dim_type oDims[], dim_type sDims[], dim_type fDims[])
{
dim_type blk_y, blk_z;
if (baseDim==1) {
threads = dim3(THREADS, 1);
blk_x = divup(oDims[0], threads.x);
blocks = dim3(blk_x, 1);
if (kind==MANY2ONE)
blocks.x *= sDims[1];
sharedSize = (threads.x+2*(fDims[0]-1)) * sizeof(T);
} else if (baseDim==2) {
threads = dim3(THREADS_X, THREADS_Y);
blk_x = divup(oDims[0], threads.x);
blk_y = divup(oDims[1], threads.y);
blocks = dim3(blk_x, blk_y);
if (kind==MANY2ONE)
blocks.x *= sDims[2];
} else if (baseDim==3) {
threads = dim3(CUBE_X, CUBE_Y, CUBE_Z);
blk_x = divup(oDims[0], threads.x);
blk_y = divup(oDims[1], threads.y);
blk_z = divup(oDims[2], threads.z);
blocks = dim3(blk_x, blk_y, blk_z);
if (kind==MANY2ONE)
blocks.x *= sDims[3];
sharedSize = (threads.x+2*(fDims[0]-1)) * (threads.y+2*(fDims[1]-1)) *
(threads.z+2*(fDims[2]-1)) * sizeof(T);
}
}
template<typename T, typename aT, bool expand, dim_type f0, dim_type f1>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig,
dim_type nBBS, dim_type oStp, dim_type sStp)
{
(convolve2<T, aT, expand, f0, f1>)<<<blks, thrds>>>(out, sig, nBBS, oStp, sStp);
}
template<typename T, typename aT, bool expand, dim_type f0>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig,
dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp)
{
switch(f1) {
case 1: conv2Helper<T, aT, expand, f0, 1>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 2: conv2Helper<T, aT, expand, f0, 2>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 3: conv2Helper<T, aT, expand, f0, 3>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 4: conv2Helper<T, aT, expand, f0, 4>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 5: conv2Helper<T, aT, expand, f0, 5>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
default: CUDA_NOT_SUPPORTED();
}
}
template<typename T, typename aT, bool expand>
void conv2Helper(dim3 blks, dim3 thrds, Param<T> out, CParam<T> sig,
dim_type f0, dim_type f1, dim_type nBBS, dim_type oStp, dim_type sStp)
{
switch(f0) {
case 1: conv2Helper<T, aT, expand, 1>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 2: conv2Helper<T, aT, expand, 2>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 3: conv2Helper<T, aT, expand, 3>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 4: conv2Helper<T, aT, expand, 4>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
case 5: conv2Helper<T, aT, expand, 5>(blks, thrds, out, sig, f1, nBBS, oStp, sStp); break;
default: {
if (f0==f1) {
switch(f1) {
case 6: conv2Helper<T, aT, expand, 6, 6>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 7: conv2Helper<T, aT, expand, 7, 7>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 8: conv2Helper<T, aT, expand, 8, 8>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 9: conv2Helper<T, aT, expand, 9, 9>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 10: conv2Helper<T, aT, expand, 10, 10>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
case 11: conv2Helper<T, aT, expand, 11, 11>(blks, thrds, out, sig, nBBS, oStp, sStp); break;
default: CUDA_NOT_SUPPORTED();
}
} else
CUDA_NOT_SUPPORTED();
} break;
}
}
template<typename T, typename accType, dim_type baseDim, bool expand>
void convolve_nd(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind)
{
bool callKernel = true;
dim_type MCFL2 = kernel::MAX_CONV2_FILTER_LEN;
dim_type MCFL3 = kernel::MAX_CONV3_FILTER_LEN;
switch(baseDim) {
case 1:
if (filter.dims[0]>kernel::MAX_CONV1_FILTER_LEN)
callKernel = false;
break;
case 2:
if ((filter.dims[0]*filter.dims[1]) > (MCFL2 * MCFL2))
callKernel = false;
break;
case 3:
if ((filter.dims[0]*filter.dims[1]*filter.dims[2]) > (MCFL3 * MCFL3 * MCFL3))
callKernel = false;
break;
}
if (!callKernel) {
CUDA_NOT_SUPPORTED();
}
dim_type bCount = 1;
dim_type steps[3] = { 0, 0, 0 };
// [0] - output step, [1] - signal step, [2] - filter step
if (kind==MANY2MANY) {
steps[0] = out.strides[baseDim];
steps[1] = signal.strides[baseDim];
steps[2] = filter.strides[baseDim];
bCount = signal.dims[baseDim];
} else if (kind==ONE2ALL) {
steps[0] = out.strides[baseDim];
steps[2] = filter.strides[baseDim];
bCount = filter.dims[baseDim];
}
dim3 blocks, threads;
dim_type blk_x;
size_t sharedSize;
prepareKernelArgs<T, baseDim>(blocks, threads, sharedSize, blk_x,
kind, out.dims, signal.dims, filter.dims);
dim_type filterLen = filter.dims[0];
for(int i=1; i<baseDim; ++i) filterLen *= filter.dims[i];
for (dim_type b=0; b<bCount; ++b) {
// FIXME: if the filter array is strided, direct copy of symbols
// might cause issues
CUDA_CHECK(cudaMemcpyToSymbol(kernel::cFilter, filter.ptr+b*steps[2], filterLen*sizeof(accType), 0, cudaMemcpyDeviceToDevice));
switch(baseDim) {
case 1:
(convolve1<T, accType, expand>)
<<<blocks, threads, sharedSize>>>(out, signal, filter.dims[0], blk_x, b*steps[0], b*steps[1]);
break;
case 2:
conv2Helper<T, accType, expand>(blocks, threads, out, signal, filter.dims[0],
filter.dims[1], blk_x, b*steps[0], b*steps[1]);
break;
case 3:
(convolve3<T, accType, expand>)
<<<blocks, threads, sharedSize>>>(out, signal, filter.dims[0], filter.dims[1], filter.dims[2],
blk_x, b*steps[0], b*steps[1]);
break;
}
}
POST_LAUNCH_CHECK();
}
#define INSTANTIATE(T, accType) \
template void convolve_nd<T, accType, 1, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 1, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 2, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 2, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 3, true >(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
template void convolve_nd<T, accType, 3, false>(Param<T> out, CParam<T> signal, CParam<accType> filter, ConvolveBatchKind kind);\
INSTANTIATE(cdouble, cdouble)
INSTANTIATE(cfloat , cfloat)
INSTANTIATE(double , double)
INSTANTIATE(float , float)
INSTANTIATE(uint , float)
INSTANTIATE(int , float)
INSTANTIATE(uchar , float)
INSTANTIATE(char , float)
}
}
|
92a206b74bba4e1894d7125ebec8846619742c58.hip | // !!! This is a file automatically generated by hipify!!!
// include libraries
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "rocblas.h"
#include "hip/hip_runtime.h"
#define nstreams 1
int main () {
// banner
printf ("\n\nGPU DGEMM Exercise\n");
printf ( "==========================================\n");
printf ( "\nTiled Matrix-Matrix Multiplication\n");
printf ( "Using NVIDIA cuBLAS Library\n");
// echo device data
int idevice = 0;
hipSetDevice(idevice);
hipDeviceProp_t dprops;
hipGetDeviceProperties( &dprops, idevice );
printf ("\nDevice name = %s, with compute capability %d.%d \n",
dprops.name, dprops.major, dprops.minor);
// define parameters
int n = 32768; // matrix dimension - all matrices being multiplied will be square
int m = 4096; // tile size - tiles will be square, n must be divisible by m !!
printf ("\nMatrix sizes: %d x %d, tile size: %d x %d\n", n,n,m,m);
if ( ( n % m ) != 0 ) {
printf ("\nmatrix size (n) has to be devisible by tile size (m) !");
return 0 ;
}
printf ("Number of Streams: %d (default stream)", nstreams);
// allocate arrays
double *a;
double *b;
double *c;
a = (double *) malloc ( n*n*sizeof(double) );
b = (double *) malloc ( n*n*sizeof(double) );
c = (double *) malloc ( n*n*sizeof(double) );
// initialize data
#pragma omp parallel for
for ( int row = 0; row<n; row++ ) {
for ( int col = 0; col<n; col++ ) {
// data in row-major format
a[row*n+col] = row + col;
b[row*n+col] = (row == col ) ? 1.0 : 0.0;
c[row*n+col] = 0.0;
}
}
// create communcations arrays
double *pa;
double *pb;
double *pc;
hipHostMalloc ( &pa, m*m*sizeof(double) );
hipHostMalloc ( &pb, m*m*sizeof(double) );
hipHostMalloc ( &pc, m*m*sizeof(double) );
// create a handle to cuBlas
hipblasHandle_t cublasHandle;
hipblasCreate( &cublasHandle );
// allocate space on device - 3 tiles for a, b, c
double *d_a;
double *d_b;
double *d_c;
hipMalloc ( &d_a, m*m*sizeof(double) );
hipMalloc ( &d_b, m*m*sizeof(double) );
hipMalloc ( &d_c, m*m*sizeof(double) );
int ntiles = n/m;
// record start time
hipEvent_t t_start;
hipEvent_t t_end;
hipEventCreate (&t_start);
hipEventCreate (&t_end);
hipEventRecord (t_start,0);
// caches for indices of previous tile to write back results
// from pinned buffer to original result matrix
int prowtile;
int pcoltile;
// PERFORM MULTIPLICATION
{
double alpha = 1.0;
double beta = 1.0;
int itile = 0;
// loop over inner tile dimension
for ( int iktile = 0; iktile < ntiles; iktile++ ) {
// loop over row tiles
for ( int irowtile = 0; irowtile < ntiles; irowtile++ ) {
// loop over column tiles
for ( int icoltile = 0; icoltile < ntiles; icoltile++ ) {
if ( itile >= 1 ) {
// copy result in pinned buffer back to global matrix
# pragma omp parallel for
for ( int i=0; i<m; i++ ) {
for ( int j=0; j<m; j++ ) {
c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j];
}
}
}
// copy next tile to pinned buffer
# pragma omp parallel for
for ( int i=0; i<m; i++ ) {
for ( int j=0; j<m; j++ ) {
pa[i*m+j] = a[(irowtile*m+i)*n+iktile*m+j];
pb[i*m+j] = b[(iktile*m+i)*n+icoltile*m+j];
pc[i*m+j] = c[(irowtile*m+i)*n+icoltile*m+j];
}
}
// copy tile data to device
hipMemcpy ( d_a, pa, m*m*sizeof(double), hipMemcpyHostToDevice );
hipMemcpy ( d_b, pb, m*m*sizeof(double), hipMemcpyHostToDevice );
hipMemcpy ( d_c, pc, m*m*sizeof(double), hipMemcpyHostToDevice );
// perform dgemm
hipblasDgemm ( cublasHandle, HIPBLAS_OP_T, HIPBLAS_OP_T, m, m, m, &alpha, d_a, m, d_b, m, &beta, d_c, m );
prowtile = irowtile;
pcoltile = icoltile;
// copy result back to host
hipMemcpy ( pc, d_c, m*m*sizeof(double), hipMemcpyDeviceToHost );
// go to next tile
itile++;
}
}
}
// copy result in pinned buffer back to source
# pragma omp parallel for
for ( int i=0; i<m; i++ ) {
for ( int j=0; j<m; j++ ) {
c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j];
}
}
} // END OF PERFORM MULTIPLICATION
// record end time
hipEventRecord (t_end,0);
hipEventSynchronize(t_end);
float et;
hipEventElapsedTime (&et, t_start, t_end);
// check results
printf ("\nchecking results: ");
bool correct = true;
double abs_error, sum_abs_errors = 0;
# pragma omp parallel for
for ( int row = 0; row < n; row++ ) {
for ( int col = 0; col < n; col++ ) {
abs_error = fabs(c[row * n + col] - a[row * n + col] );
sum_abs_errors += abs_error;
if ( abs_error > 10e-5 ) {
printf ("FAILED\n\nerror: c[%d]: %f != a[%d]: %f",
row * n + col, c[row * n + col], row * n + col, a[row * n + col]);
correct = false;
break;
}
}
}
// report results
if ( correct ) {
printf ("SUCCESS");
printf ("\nSum abs errors: %f", sum_abs_errors);
printf("\nExecution time: %4.4f seconds\n", (double)et/1000.); // hipEventElapsedTime is in milliseconds
printf( "Gflop/s: %4.4f \n\n\n", 2.0e-6*n*n*n/et); // 2( * and + ) *n (inner dimension)*n^2(result size)/(time in ms.)
} else {
printf ("\nResult not correct, check your code !\n");
}
// clean up
hipblasDestroy ( cublasHandle );
hipEventDestroy ( t_start );
hipEventDestroy ( t_end );
hipHostFree ( pa );
hipHostFree ( pb );
hipHostFree ( pc );
hipFree ( d_a );
hipFree ( d_b );
hipFree ( d_c );
free (a);
free (b);
free (c);
}
| 92a206b74bba4e1894d7125ebec8846619742c58.cu | // include libraries
#include <stdio.h>
#include <math.h>
#include <omp.h>
#include "cublas_v2.h"
#include "cuda.h"
#define nstreams 1
int main () {
// banner
printf ("\n\nGPU DGEMM Exercise\n");
printf ( "==========================================\n");
printf ( "\nTiled Matrix-Matrix Multiplication\n");
printf ( "Using NVIDIA cuBLAS Library\n");
// echo device data
int idevice = 0;
cudaSetDevice(idevice);
cudaDeviceProp dprops;
cudaGetDeviceProperties( &dprops, idevice );
printf ("\nDevice name = %s, with compute capability %d.%d \n",
dprops.name, dprops.major, dprops.minor);
// define parameters
int n = 32768; // matrix dimension - all matrices being multiplied will be square
int m = 4096; // tile size - tiles will be square, n must be divisible by m !!
printf ("\nMatrix sizes: %d x %d, tile size: %d x %d\n", n,n,m,m);
if ( ( n % m ) != 0 ) {
printf ("\nmatrix size (n) has to be devisible by tile size (m) !");
return 0 ;
}
printf ("Number of Streams: %d (default stream)", nstreams);
// allocate arrays
double *a;
double *b;
double *c;
a = (double *) malloc ( n*n*sizeof(double) );
b = (double *) malloc ( n*n*sizeof(double) );
c = (double *) malloc ( n*n*sizeof(double) );
// initialize data
#pragma omp parallel for
for ( int row = 0; row<n; row++ ) {
for ( int col = 0; col<n; col++ ) {
// data in row-major format
a[row*n+col] = row + col;
b[row*n+col] = (row == col ) ? 1.0 : 0.0;
c[row*n+col] = 0.0;
}
}
// create communcations arrays
double *pa;
double *pb;
double *pc;
cudaMallocHost ( &pa, m*m*sizeof(double) );
cudaMallocHost ( &pb, m*m*sizeof(double) );
cudaMallocHost ( &pc, m*m*sizeof(double) );
// create a handle to cuBlas
cublasHandle_t cublasHandle;
cublasCreate( &cublasHandle );
// allocate space on device - 3 tiles for a, b, c
double *d_a;
double *d_b;
double *d_c;
cudaMalloc ( &d_a, m*m*sizeof(double) );
cudaMalloc ( &d_b, m*m*sizeof(double) );
cudaMalloc ( &d_c, m*m*sizeof(double) );
int ntiles = n/m;
// record start time
cudaEvent_t t_start;
cudaEvent_t t_end;
cudaEventCreate (&t_start);
cudaEventCreate (&t_end);
cudaEventRecord (t_start,0);
// caches for indices of previous tile to write back results
// from pinned buffer to original result matrix
int prowtile;
int pcoltile;
// PERFORM MULTIPLICATION
{
double alpha = 1.0;
double beta = 1.0;
int itile = 0;
// loop over inner tile dimension
for ( int iktile = 0; iktile < ntiles; iktile++ ) {
// loop over row tiles
for ( int irowtile = 0; irowtile < ntiles; irowtile++ ) {
// loop over column tiles
for ( int icoltile = 0; icoltile < ntiles; icoltile++ ) {
if ( itile >= 1 ) {
// copy result in pinned buffer back to global matrix
# pragma omp parallel for
for ( int i=0; i<m; i++ ) {
for ( int j=0; j<m; j++ ) {
c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j];
}
}
}
// copy next tile to pinned buffer
# pragma omp parallel for
for ( int i=0; i<m; i++ ) {
for ( int j=0; j<m; j++ ) {
pa[i*m+j] = a[(irowtile*m+i)*n+iktile*m+j];
pb[i*m+j] = b[(iktile*m+i)*n+icoltile*m+j];
pc[i*m+j] = c[(irowtile*m+i)*n+icoltile*m+j];
}
}
// copy tile data to device
cudaMemcpy ( d_a, pa, m*m*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy ( d_b, pb, m*m*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy ( d_c, pc, m*m*sizeof(double), cudaMemcpyHostToDevice );
// perform dgemm
cublasDgemm ( cublasHandle, CUBLAS_OP_T, CUBLAS_OP_T, m, m, m, &alpha, d_a, m, d_b, m, &beta, d_c, m );
prowtile = irowtile;
pcoltile = icoltile;
// copy result back to host
cudaMemcpy ( pc, d_c, m*m*sizeof(double), cudaMemcpyDeviceToHost );
// go to next tile
itile++;
}
}
}
// copy result in pinned buffer back to source
# pragma omp parallel for
for ( int i=0; i<m; i++ ) {
for ( int j=0; j<m; j++ ) {
c[(prowtile*m+i)*n+pcoltile*m+j] = pc[i*m+j];
}
}
} // END OF PERFORM MULTIPLICATION
// record end time
cudaEventRecord (t_end,0);
cudaEventSynchronize(t_end);
float et;
cudaEventElapsedTime (&et, t_start, t_end);
// check results
printf ("\nchecking results: ");
bool correct = true;
double abs_error, sum_abs_errors = 0;
# pragma omp parallel for
for ( int row = 0; row < n; row++ ) {
for ( int col = 0; col < n; col++ ) {
abs_error = fabs(c[row * n + col] - a[row * n + col] );
sum_abs_errors += abs_error;
if ( abs_error > 10e-5 ) {
printf ("FAILED\n\nerror: c[%d]: %f != a[%d]: %f",
row * n + col, c[row * n + col], row * n + col, a[row * n + col]);
correct = false;
break;
}
}
}
// report results
if ( correct ) {
printf ("SUCCESS");
printf ("\nSum abs errors: %f", sum_abs_errors);
printf("\nExecution time: %4.4f seconds\n", (double)et/1000.); // cudaEventElapsedTime is in milliseconds
printf( "Gflop/s: %4.4f \n\n\n", 2.0e-6*n*n*n/et); // 2( * and + ) *n (inner dimension)*n^2(result size)/(time in ms.)
} else {
printf ("\nResult not correct, check your code !\n");
}
// clean up
cublasDestroy ( cublasHandle );
cudaEventDestroy ( t_start );
cudaEventDestroy ( t_end );
cudaFreeHost ( pa );
cudaFreeHost ( pb );
cudaFreeHost ( pc );
cudaFree ( d_a );
cudaFree ( d_b );
cudaFree ( d_c );
free (a);
free (b);
free (c);
}
|
d77b13dc3918e2e3c4ff9faadd4484b856114e3d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ENTRYWISE_SCALE_BIAS_LAYER_INSTANTIATE
#include "lbann/layers/learning/entrywise_scale_bias.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x num_channels
*/
template <typename TensorDataType>
__global__ void fp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ scale,
const TensorDataType* __restrict__ bias) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto a = scale[row];
const auto b = bias[row];
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& x = input[row + col*input_ldim];
auto& y = output[row + col*output_ldim];
y = a * x + b;
}
}
}
/**
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (height / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim,
const TensorDataType* __restrict__ scale,
TensorDataType* __restrict__ gradient_wrt_scale,
TensorDataType* __restrict__ gradient_wrt_bias) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t row = gid; row < height; row += nthreads) {
const auto a = scale[row];
TensorDataType da{0}, db{0};
for (size_t col = 0; col < width; ++col) {
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = a * dy;
da += x * dy;
db += dy;
}
gradient_wrt_scale[row] = da;
gradient_wrt_bias[row] = db;
}
}
template <typename TensorDataType>
void fp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
El::Matrix<TensorDataType, El::Device::GPU>& local_output,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
const auto local_bias = El::LockedView(local_scale_bias,
El::ALL, El::IR(1));
// Apply entry-wise scale and bias
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
if (!local_input.IsEmpty()) {
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input),
gpu::get_sync_info(local_scale_bias));
hydrogen::gpu::LaunchKernel(
fp_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
local_scale.LockedBuffer(),
local_bias.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
const El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_output,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_input,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_scale = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_bias = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(1));
// Compute gradients
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
El::Zero(local_gradient_wrt_scale_bias);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_input),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_scale_bias),
gpu::get_sync_info(local_gradient_wrt_scale_bias));
hydrogen::gpu::LaunchKernel(
bp_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(),
local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(),
local_scale.LockedBuffer(),
local_gradient_wrt_scale.Buffer(),
local_gradient_wrt_bias.Buffer());
}
}
} // namespace
// Template instantiation
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::fp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
fp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<LocalMatType&>(this->get_local_activations()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()));
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::bp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
auto& scale_bias = this->get_weights(0);
auto& gradient_wrt_scale_bias = *this->m_weights_gradient;
bp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<const LocalMatType&>(this->get_local_prev_error_signals()),
dynamic_cast<LocalMatType&>(this->get_local_error_signals()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()),
dynamic_cast<LocalMatType&>(gradient_wrt_scale_bias.Matrix()));
// Update optimizer with gradient
auto* opt = scale_bias.get_optimizer();
if (opt != nullptr) {
opt->add_to_gradient(gradient_wrt_scale_bias, TensorDataType{1}, true);
}
}
LBANN_LAYER_DEFAULT_BUILDER(entrywise_scale_bias)
#define PROTO(T) \
template class entrywise_scale_bias_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class entrywise_scale_bias_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
LBANN_LAYER_BUILDER_ETI(entrywise_scale_bias, T, El::Device::GPU)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
| d77b13dc3918e2e3c4ff9faadd4484b856114e3d.cu | ////////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2014-2019, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory.
// Written by the LBANN Research Team (B. Van Essen, et al.) listed in
// the CONTRIBUTORS file. <[email protected]>
//
// LLNL-CODE-697807.
// All rights reserved.
//
// This file is part of LBANN: Livermore Big Artificial Neural Network
// Toolkit. For details, see http://software.llnl.gov/LBANN or
// https://github.com/LLNL/LBANN.
//
// Licensed under the Apache License, Version 2.0 (the "Licensee"); you
// may not use this file except in compliance with the License. You may
// obtain a copy of the License at:
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the license.
////////////////////////////////////////////////////////////////////////////////
#define LBANN_ENTRYWISE_SCALE_BIAS_LAYER_INSTANTIATE
#include "lbann/layers/learning/entrywise_scale_bias.hpp"
#include "lbann/utils/gpu/helpers.hpp"
namespace lbann {
namespace {
/**
* Block dimensions: bsizex x bsizey x 1
*
* Grid dimensions: (height / bsizex) x (width / bsizey) x num_channels
*/
template <typename TensorDataType>
__global__ void fp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
TensorDataType* __restrict__ output,
size_t output_ldim,
const TensorDataType* __restrict__ scale,
const TensorDataType* __restrict__ bias) {
const size_t gidx = threadIdx.x + blockIdx.x * blockDim.x;
const size_t gidy = threadIdx.y + blockIdx.y * blockDim.y;
const size_t nthreadsx = blockDim.x * gridDim.x;
const size_t nthreadsy = blockDim.y * gridDim.y;
for (size_t row = gidx; row < height; row += nthreadsx) {
const auto a = scale[row];
const auto b = bias[row];
for (size_t col = gidy; col < width; col += nthreadsy) {
const auto& x = input[row + col*input_ldim];
auto& y = output[row + col*output_ldim];
y = a * x + b;
}
}
}
/**
* Block dimensions: bsize x 1 x 1
*
* Grid dimensions: (height / bsize) x 1 x 1
*/
template <typename TensorDataType>
__global__ void bp_kernel(size_t height,
size_t width,
const TensorDataType* __restrict__ input,
size_t input_ldim,
const TensorDataType* __restrict__ gradient_wrt_output,
size_t gradient_wrt_output_ldim,
TensorDataType* __restrict__ gradient_wrt_input,
size_t gradient_wrt_input_ldim,
const TensorDataType* __restrict__ scale,
TensorDataType* __restrict__ gradient_wrt_scale,
TensorDataType* __restrict__ gradient_wrt_bias) {
const size_t gid = threadIdx.x + blockIdx.x * blockDim.x;
const size_t nthreads = blockDim.x * gridDim.x;
for (size_t row = gid; row < height; row += nthreads) {
const auto a = scale[row];
TensorDataType da{0}, db{0};
for (size_t col = 0; col < width; ++col) {
const auto& x = input[row + col * input_ldim];
const auto& dy = gradient_wrt_output[row + col * gradient_wrt_output_ldim];
auto& dx = gradient_wrt_input[row + col * gradient_wrt_input_ldim];
dx = a * dy;
da += x * dy;
db += dy;
}
gradient_wrt_scale[row] = da;
gradient_wrt_bias[row] = db;
}
}
template <typename TensorDataType>
void fp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
El::Matrix<TensorDataType, El::Device::GPU>& local_output,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
const auto local_bias = El::LockedView(local_scale_bias,
El::ALL, El::IR(1));
// Apply entry-wise scale and bias
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
if (!local_input.IsEmpty()) {
constexpr size_t block_size_x = 256;
constexpr size_t block_size_y = 1;
dim3 block_dims, grid_dims;
block_dims.x = block_size_x;
block_dims.y = block_size_y;
grid_dims.x = (local_height + block_size_x - 1) / block_size_x;
grid_dims.y = (local_width + block_size_y - 1) / block_size_y;
auto multisync = El::MakeMultiSync(gpu::get_sync_info(local_output),
gpu::get_sync_info(local_input),
gpu::get_sync_info(local_scale_bias));
hydrogen::gpu::LaunchKernel(
fp_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_output.Buffer(), local_output.LDim(),
local_scale.LockedBuffer(),
local_bias.LockedBuffer());
}
}
template <typename TensorDataType>
void bp_impl(
const El::Matrix<TensorDataType, El::Device::GPU>& local_input,
const El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_output,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_input,
El::Matrix<TensorDataType, El::Device::GPU> const& local_scale_bias,
El::Matrix<TensorDataType, El::Device::GPU>& local_gradient_wrt_scale_bias) {
// Local matrices
const auto local_scale = El::LockedView(local_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_scale = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(0));
auto local_gradient_wrt_bias = El::View(local_gradient_wrt_scale_bias,
El::ALL, El::IR(1));
// Compute gradients
const El::Int local_height = local_input.Height();
const El::Int local_width = local_input.Width();
El::Zero(local_gradient_wrt_scale_bias);
if (!local_input.IsEmpty()) {
constexpr size_t block_size = 256;
dim3 block_dims, grid_dims;
block_dims.x = block_size;
grid_dims.x = (local_height + block_size - 1) / block_size;
auto multisync = El::MakeMultiSync(
gpu::get_sync_info(local_gradient_wrt_input),
gpu::get_sync_info(local_input),
gpu::get_sync_info(local_gradient_wrt_output),
gpu::get_sync_info(local_scale_bias),
gpu::get_sync_info(local_gradient_wrt_scale_bias));
hydrogen::gpu::LaunchKernel(
bp_kernel<TensorDataType>,
grid_dims, block_dims, 0, multisync,
local_height, local_width,
local_input.LockedBuffer(), local_input.LDim(),
local_gradient_wrt_output.LockedBuffer(), local_gradient_wrt_output.LDim(),
local_gradient_wrt_input.Buffer(), local_gradient_wrt_input.LDim(),
local_scale.LockedBuffer(),
local_gradient_wrt_scale.Buffer(),
local_gradient_wrt_bias.Buffer());
}
}
} // namespace
// Template instantiation
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::fp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
fp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<LocalMatType&>(this->get_local_activations()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()));
}
template <typename TensorDataType, data_layout Layout, El::Device Device>
void entrywise_scale_bias_layer<TensorDataType, Layout, Device>::bp_compute() {
using LocalMatType = El::Matrix<TensorDataType, Device>;
auto& scale_bias = this->get_weights(0);
auto& gradient_wrt_scale_bias = *this->m_weights_gradient;
bp_impl(dynamic_cast<const LocalMatType&>(this->get_local_prev_activations()),
dynamic_cast<const LocalMatType&>(this->get_local_prev_error_signals()),
dynamic_cast<LocalMatType&>(this->get_local_error_signals()),
dynamic_cast<LocalMatType const&>(
this->weights_values(0).LockedMatrix()),
dynamic_cast<LocalMatType&>(gradient_wrt_scale_bias.Matrix()));
// Update optimizer with gradient
auto* opt = scale_bias.get_optimizer();
if (opt != nullptr) {
opt->add_to_gradient(gradient_wrt_scale_bias, TensorDataType{1}, true);
}
}
LBANN_LAYER_DEFAULT_BUILDER(entrywise_scale_bias)
#define PROTO(T) \
template class entrywise_scale_bias_layer< \
T, data_layout::DATA_PARALLEL, El::Device::GPU>; \
template class entrywise_scale_bias_layer< \
T, data_layout::MODEL_PARALLEL, El::Device::GPU>; \
LBANN_LAYER_BUILDER_ETI(entrywise_scale_bias, T, El::Device::GPU)
#define LBANN_INSTANTIATE_GPU_HALF
#include "lbann/macros/instantiate.hpp"
} // namespace lbann
|
e4c9df6595cf1e27d19e2dd049aace414485a89d.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright 2013-10 sxniu
#include "include/four_direction_scan.h"
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <string>
#include "include/ConstValue.h"
#include "include/utils.h"
namespace four_direction_scan {
__global__ void TopToEndScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen + image_width] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (y + i + 1 > image_height - 3) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen+(i + 1) * image_width] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen+(i + 1) * image_width] != reference_colour)
break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen+(i + 1) * image_width] = render_colour;
}
}
}
}
}
__global__ void LeftToRightScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen + 1] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (x + i + 1 > image_width - 3) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen + i + 1] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen + i + 1] != reference_colour) break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen + i + 1] = render_colour;
}
}
}
}
}
__global__ void EndToTopScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen - image_width] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (y-(i + 1) < 2) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen-(i + 1) * image_width] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen-(i + 1) * image_width] != reference_colour)
break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen-(i + 1) * image_width] = render_colour;
}
}
}
}
}
__global__ void RightToLeftScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen - 1] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (x - i - 1 < 2) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen - i - 1] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen - i - 1] != reference_colour) break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen - i - 1] = render_colour;
}
}
}
}
}
__global__ void RmExtraColour(int* edge_image, int image_width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
if (edge_image[index_cen] != WHITE &&
edge_image[index_cen] != 0 &&
edge_image[index_cen] != COLOUR_RED &&
edge_image[index_cen] != COLOUR_YELLOW &&
edge_image[index_cen] != COLOUR_PURPLE &&
edge_image[index_cen] != COLOUR_CYAN &&
edge_image[index_cen] != COLOUR_GREEN &&
edge_image[index_cen] != COLOUR_BLUE &&
edge_image[index_cen] != COLOUR_LIGHT_GRAY &&
edge_image[index_cen] != COLOUR_LIGHT_BLUE &&
edge_image[index_cen] != COLOUR_PURPLE_RED &&
edge_image[index_cen] != COLOUR_LIGHT_PURPLE &&
edge_image[index_cen] != COLOUR_DARK_YELLOW &&
edge_image[index_cen] != COLOUR_LIGHT_GREEN &&
edge_image[index_cen] != COLOUR_DARK_GREEN &&
edge_image[index_cen] != COLOUR_DARK_BLUE &&
edge_image[index_cen] != COLOUR_DARK_RED &&
edge_image[index_cen] != COLOUR_GRAY) {
edge_image[index_cen] = 0;
}
}
void FourDirectionScan(int* edge_image, int image_width,
int image_height, int s_length,
int repeat_num, int fill_colour) {
dim3 block(g_block_x, g_block_y);
dim3 grid(g_grid_x, g_grid_y);
int _host_exist = 8;
int a = 0;
int b = 0;
for (int i = 0; i < repeat_num; i++) {
a = b;
b = 2000; // random big number
hipLaunchKernelGGL(( TopToEndScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
hipLaunchKernelGGL(( LeftToRightScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
hipLaunchKernelGGL(( EndToTopScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
a = b;
if (i == repeat_num - 1) {
b = fill_colour;
} else {
b++;
}
hipLaunchKernelGGL(( RightToLeftScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
}
hipLaunchKernelGGL(( RmExtraColour), dim3(grid), dim3(block), 0, 0, edge_image, image_width);
}
#ifdef FOURDIRECTIONSCAN_WITH_FEEDBACK
__global__ void exist_identify(int* exist) {
if (threadIdx.x == 0 && threadIdx.y == 0 &&
blockIdx.x == 0 && blockIdx.y == 0) {
if (*exist > 1)
*exist = 1;
else
*exist = 0;
}
}
__global__ void exist_set(int* exist) {
if (threadIdx.x == 0 && threadIdx.y == 0 &&
blockIdx.x == 0 && blockIdx.y == 0) {
*exist = 1;
}
}
__global__ void colour_exist(int* des_mem, int ref_col, int image_width,
int image_height, int* exist) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
if (des_mem[index_cen] == ref_col) {
atomicAdd(exist, 1);
}
}
void FourDirectionScan(int* edge_image, int image_width,
int image_height, int s_length, int* dev_exist) {
dim3 block(g_block_x, g_block_y);
dim3 grid(g_grid_x, g_grid_y);
int _host_exist = 8;
hipLaunchKernelGGL(( exist_set), dim3(grid), dim3(block), 0, 0, dev_exist);
int a = 0;
int b = 0;
for (int i = 0; i < 500; i++) {
a = b;
b = 2000;
hipLaunchKernelGGL(( TopToEndScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
hipLaunchKernelGGL(( LeftToRightScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
hipLaunchKernelGGL(( EndToTopScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
a = b;
b = 0x0000ffff * i; // kColourArray[i];
hipLaunchKernelGGL(( RightToLeftScan), dim3(grid), dim3(block), 0, 0, edge_image, image_width,
image_height, s_length, a, b);
//hipLaunchKernelGGL(( RmExtraColour), dim3(grid), dim3(block), 0, 0, edge_image, image_width);
if (i > 0) {
hipLaunchKernelGGL(( colour_exist), dim3(grid), dim3(block), 0, 0, edge_image, 0x0000ffff * (i - 1), // kColourArray[i - 1],
image_width, image_height, dev_exist);
hipLaunchKernelGGL(( exist_identify), dim3(grid), dim3(block), 0, 0, dev_exist);
hipMemcpy(&_host_exist, dev_exist, sizeof(int), hipMemcpyDeviceToHost);
if (_host_exist == 0) {
utils::ShowNum(i+1);
break;
}
}
}
}
#endif
} // namespace four_direction_scan
| e4c9df6595cf1e27d19e2dd049aace414485a89d.cu | // Copyright 2013-10 sxniu
#include "include/four_direction_scan.h"
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <string>
#include "include/ConstValue.h"
#include "include/utils.h"
namespace four_direction_scan {
__global__ void TopToEndScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen + image_width] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (y + i + 1 > image_height - 3) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen+(i + 1) * image_width] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen+(i + 1) * image_width] != reference_colour)
break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen+(i + 1) * image_width] = render_colour;
}
}
}
}
}
__global__ void LeftToRightScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen + 1] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (x + i + 1 > image_width - 3) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen + i + 1] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen + i + 1] != reference_colour) break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen + i + 1] = render_colour;
}
}
}
}
}
__global__ void EndToTopScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen - image_width] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (y-(i + 1) < 2) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen-(i + 1) * image_width] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen-(i + 1) * image_width] != reference_colour)
break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen-(i + 1) * image_width] = render_colour;
}
}
}
}
}
__global__ void RightToLeftScan(int* edge_image, int image_width,
int image_height, int search_length,
int reference_colour, int render_colour) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
int enable_render = false;
int pos_num = 0;
if (x > 1 && x < image_width - 2 && y > 1 && y < image_height - 2) {
if (edge_image[index_cen] == WHITE &&
edge_image[index_cen - 1] == reference_colour) {
for (int i = 0; i < search_length; i++) {
if (x - i - 1 < 2) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen - i - 1] == WHITE) {
enable_render = true;
pos_num = i;
break;
}
if (edge_image[index_cen - i - 1] != reference_colour) break;
}
if (enable_render) {
for (int i = 0; i < pos_num; i++) {
edge_image[index_cen - i - 1] = render_colour;
}
}
}
}
}
__global__ void RmExtraColour(int* edge_image, int image_width) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
if (edge_image[index_cen] != WHITE &&
edge_image[index_cen] != 0 &&
edge_image[index_cen] != COLOUR_RED &&
edge_image[index_cen] != COLOUR_YELLOW &&
edge_image[index_cen] != COLOUR_PURPLE &&
edge_image[index_cen] != COLOUR_CYAN &&
edge_image[index_cen] != COLOUR_GREEN &&
edge_image[index_cen] != COLOUR_BLUE &&
edge_image[index_cen] != COLOUR_LIGHT_GRAY &&
edge_image[index_cen] != COLOUR_LIGHT_BLUE &&
edge_image[index_cen] != COLOUR_PURPLE_RED &&
edge_image[index_cen] != COLOUR_LIGHT_PURPLE &&
edge_image[index_cen] != COLOUR_DARK_YELLOW &&
edge_image[index_cen] != COLOUR_LIGHT_GREEN &&
edge_image[index_cen] != COLOUR_DARK_GREEN &&
edge_image[index_cen] != COLOUR_DARK_BLUE &&
edge_image[index_cen] != COLOUR_DARK_RED &&
edge_image[index_cen] != COLOUR_GRAY) {
edge_image[index_cen] = 0;
}
}
void FourDirectionScan(int* edge_image, int image_width,
int image_height, int s_length,
int repeat_num, int fill_colour) {
dim3 block(g_block_x, g_block_y);
dim3 grid(g_grid_x, g_grid_y);
int _host_exist = 8;
int a = 0;
int b = 0;
for (int i = 0; i < repeat_num; i++) {
a = b;
b = 2000; // random big number
TopToEndScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
LeftToRightScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
EndToTopScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
a = b;
if (i == repeat_num - 1) {
b = fill_colour;
} else {
b++;
}
RightToLeftScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
}
RmExtraColour<<<grid, block>>>(edge_image, image_width);
}
#ifdef FOURDIRECTIONSCAN_WITH_FEEDBACK
__global__ void exist_identify(int* exist) {
if (threadIdx.x == 0 && threadIdx.y == 0 &&
blockIdx.x == 0 && blockIdx.y == 0) {
if (*exist > 1)
*exist = 1;
else
*exist = 0;
}
}
__global__ void exist_set(int* exist) {
if (threadIdx.x == 0 && threadIdx.y == 0 &&
blockIdx.x == 0 && blockIdx.y == 0) {
*exist = 1;
}
}
__global__ void colour_exist(int* des_mem, int ref_col, int image_width,
int image_height, int* exist) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index_cen = y * image_width + x;
if (des_mem[index_cen] == ref_col) {
atomicAdd(exist, 1);
}
}
void FourDirectionScan(int* edge_image, int image_width,
int image_height, int s_length, int* dev_exist) {
dim3 block(g_block_x, g_block_y);
dim3 grid(g_grid_x, g_grid_y);
int _host_exist = 8;
exist_set<<<grid, block>>>(dev_exist);
int a = 0;
int b = 0;
for (int i = 0; i < 500; i++) {
a = b;
b = 2000;
TopToEndScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
LeftToRightScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
a = b;
b++;
EndToTopScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
a = b;
b = 0x0000ffff * i; // kColourArray[i];
RightToLeftScan<<<grid, block>>>(edge_image, image_width,
image_height, s_length, a, b);
// RmExtraColour<<<grid, block>>>(edge_image, image_width);
if (i > 0) {
colour_exist<<<grid, block>>>(edge_image, 0x0000ffff * (i - 1), // kColourArray[i - 1],
image_width, image_height, dev_exist);
exist_identify<<<grid, block>>>(dev_exist);
cudaMemcpy(&_host_exist, dev_exist, sizeof(int), cudaMemcpyDeviceToHost);
if (_host_exist == 0) {
utils::ShowNum(i+1);
break;
}
}
}
}
#endif
} // namespace four_direction_scan
|
f09d6660c1436471272d07b897975c46bccadf65.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#define NUM_BANKS 16
#define N_ELEMENTS 16384
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
//#include <cutil_inline.h>
#include <helper_cuda.h>
// includes, kernels
__global__ void sum_kernel(float *g_odata, float *g_idata, int n)
{
int i;
int tid = threadIdx.x; //Calculate a thread ID based on this thread's position within the block
//int tid = threadIdx.y * blockDim.x + threadIdx.x; //Another thread ID example for a 2-D thread block
//int tid = blockIdk.x * blockDim.x + threadIdx.x; //Another thread ID example for assigning unique thread IDs across
//different blocks
g_odata[0] = 0;
//A single thread adds up all 1M array elements serially.
//This is a poor use of parallel hardware - your job is to increase the number of threads, split up the work, and communicate
//data between threads as necessary to improve the kernel performance.
for(i = 0;i < N_ELEMENTS;i++)
{
g_odata[0] += g_idata[i];
}
__syncthreads(); //Syncthreads forces all threads within a block to reach this point before continuing past. Note this is
//necessary within blocks because not all threads can physically execute at the same time.
//Syncthreads does NOT synchronize different blocks (but you should not need to for this project).
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
hipSetDevice( cutGetMaxGflopsDeviceId() );
int num_elements = N_ELEMENTS;
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements);
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int num_threads = 1;
const unsigned int mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
printf("INPUT: ");
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
printf(" %f ", h_data[i]);
}
printf("\n");
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
cutilSafeCall( hipMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( hipMalloc( (void**) &d_odata, mem_size));
// copy host memory to device input array
cutilSafeCall( hipMemcpy( d_idata, h_data, mem_size, hipMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
#ifndef __DEVICE_EMULATION__
dim3 grid(1, 1, 1);
#else
dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow
#endif
dim3 threads(num_threads, 1, 1);
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("Kernel execution failed");
printf("Running sum of %d elements\n", num_elements);
// execute the kernels
unsigned int numIterations = 100;
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( sum_kernel), dim3(grid), dim3(threads) , 0, 0,
d_odata, d_idata, num_elements);
}
hipDeviceSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
// check for any errors
cutilCheckMsg("Kernel execution failed");
// check results
// copy result from device to host
cutilSafeCall(hipMemcpy( h_data, d_odata, sizeof(float) * num_elements,
hipMemcpyDeviceToHost));
printf("OUTPUT: ");
printf(" %f ", h_data[0]);
printf("\n");
printf("REFERENCE: ");
printf(" %f ", reference[0]);
printf("\n");
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
// Due to the large number of additions, a non-zero epsilon is necessary to
// mask floating point precision errors.
float epsilon = 0.0f;
unsigned int result_regtest = cutComparefe( reference, h_data, 1, epsilon);
printf( "sum: Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cutilSafeCall(hipFree(d_idata));
cutilSafeCall(hipFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
}
| f09d6660c1436471272d07b897975c46bccadf65.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
#ifdef _WIN32
# define NOMINMAX
#endif
#define NUM_BANKS 16
#define N_ELEMENTS 16384
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
// includes, project
//#include <cutil_inline.h>
#include <helper_cuda.h>
// includes, kernels
__global__ void sum_kernel(float *g_odata, float *g_idata, int n)
{
int i;
int tid = threadIdx.x; //Calculate a thread ID based on this thread's position within the block
//int tid = threadIdx.y * blockDim.x + threadIdx.x; //Another thread ID example for a 2-D thread block
//int tid = blockIdk.x * blockDim.x + threadIdx.x; //Another thread ID example for assigning unique thread IDs across
//different blocks
g_odata[0] = 0;
//A single thread adds up all 1M array elements serially.
//This is a poor use of parallel hardware - your job is to increase the number of threads, split up the work, and communicate
//data between threads as necessary to improve the kernel performance.
for(i = 0;i < N_ELEMENTS;i++)
{
g_odata[0] += g_idata[i];
}
__syncthreads(); //Syncthreads forces all threads within a block to reach this point before continuing past. Note this is
//necessary within blocks because not all threads can physically execute at the same time.
//Syncthreads does NOT synchronize different blocks (but you should not need to for this project).
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
// regression test functionality
extern "C"
unsigned int compare( const float* reference, const float* data,
const unsigned int len);
extern "C"
void computeGold( float* reference, float* idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
runTest( argc, argv);
cutilExit(argc, argv);
}
////////////////////////////////////////////////////////////////////////////////
//! Run a scan test for CUDA
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
cudaSetDevice( cutGetMaxGflopsDeviceId() );
int num_elements = N_ELEMENTS;
cutGetCmdLineArgumenti( argc, (const char**) argv, "n", &num_elements);
unsigned int timer;
cutilCheckError( cutCreateTimer(&timer));
const unsigned int num_threads = 1;
const unsigned int mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc( mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
printf("INPUT: ");
for( unsigned int i = 0; i < num_elements; ++i)
{
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
printf(" %f ", h_data[i]);
}
printf("\n");
// compute reference solution
float* reference = (float*) malloc( mem_size);
computeGold( reference, h_data, num_elements);
// allocate device memory input and output arrays
float* d_idata;
float* d_odata;
cutilSafeCall( cudaMalloc( (void**) &d_idata, mem_size));
cutilSafeCall( cudaMalloc( (void**) &d_odata, mem_size));
// copy host memory to device input array
cutilSafeCall( cudaMemcpy( d_idata, h_data, mem_size, cudaMemcpyHostToDevice) );
// setup execution parameters
// Note that these scans only support a single thread-block worth of data,
// but we invoke them here on many blocks so that we can accurately compare
// performance
#ifndef __DEVICE_EMULATION__
dim3 grid(1, 1, 1);
#else
dim3 grid(1, 1, 1); // only one run block in device emu mode or it will be too slow
#endif
dim3 threads(num_threads, 1, 1);
// make sure there are no CUDA errors before we start
CUT_CHECK_ERROR("Kernel execution failed");
printf("Running sum of %d elements\n", num_elements);
// execute the kernels
unsigned int numIterations = 100;
cutStartTimer(timer);
for (int i = 0; i < numIterations; ++i)
{
sum_kernel<<< grid, threads >>>
(d_odata, d_idata, num_elements);
}
cudaThreadSynchronize();
cutStopTimer(timer);
printf("Average time: %f ms\n\n", cutGetTimerValue(timer) / numIterations);
cutResetTimer(timer);
// check for any errors
cutilCheckMsg("Kernel execution failed");
// check results
// copy result from device to host
cutilSafeCall(cudaMemcpy( h_data, d_odata, sizeof(float) * num_elements,
cudaMemcpyDeviceToHost));
printf("OUTPUT: ");
printf(" %f ", h_data[0]);
printf("\n");
printf("REFERENCE: ");
printf(" %f ", reference[0]);
printf("\n");
// custom output handling when no regression test running
// in this case check if the result is equivalent to the expected soluion
// Due to the large number of additions, a non-zero epsilon is necessary to
// mask floating point precision errors.
float epsilon = 0.0f;
unsigned int result_regtest = cutComparefe( reference, h_data, 1, epsilon);
printf( "sum: Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
// cleanup memory
free( h_data);
free( reference);
cutilSafeCall(cudaFree(d_idata));
cutilSafeCall(cudaFree(d_odata));
cutilCheckError(cutDeleteTimer(timer));
}
|
99678fc0a43ee48c408ede53462d2120185c72e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "HelAmps_sm.h"
#include <complex>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <thrust/complex.h>
using namespace std;
namespace MG5_sm
{
__device__ void ixxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fi[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomega[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int ip, im, nh;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fi[0] = thrust::complex<double> (-p[0] * nsf, -p[3] * nsf);
fi[1] = thrust::complex<double> (-p[1] * nsf, -p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.0)
{
pp = min(p[0], sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]));
if (pp == 0.0)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = (1 + nh)/2;
im = (1 - nh)/2;
fi[2] = ip * sqm[ip];
fi[3] = im * nsf * sqm[ip];
fi[4] = ip * nsf * sqm[im];
fi[5] = im * sqm[im];
}
else
{
sf[0] = (1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = (1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomega[0] = sf[0] * omega[ip];
sfomega[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.0);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0);
if (pp3 == 0.0)
{
chi[1] = thrust::complex<double> (-nh, 0);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], p[2])/sqrt(2.0 * pp * pp3);
}
fi[2] = sfomega[0] * chi[im];
fi[3] = sfomega[0] * chi[ip];
fi[4] = sfomega[1] * chi[im];
fi[5] = sfomega[1] * chi[ip];
}
}
else
{
if (p[1] == 0.0 and p[2] == 0.0 and p[3] < 0.0)
{
sqp0p3 = 0.0;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.0)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.0);
if (sqp0p3 == 0.0)
{
chi[1] = thrust::complex<double> (-nhel * sqrt(2.0 * p[0]), 0.0);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], p[2])/sqp0p3;
}
if (nh == 1)
{
fi[2] = thrust::complex<double> (0.0, 0.0);
fi[3] = thrust::complex<double> (0.0, 0.0);
fi[4] = chi[0];
fi[5] = chi[1];
}
else
{
fi[2] = chi[1];
fi[3] = chi[0];
fi[4] = thrust::complex<double> (0.0, 0.0);
fi[5] = thrust::complex<double> (0.0, 0.0);
}
}
return;
}
__device__ void txxxxx(double pvec[3], double tmass, int nhel, int nst,
thrust::complex<double> tc[18])
{
thrust::complex<double> ft[6][4], ep[4], em[4], e0[4];
double pt, pt2, pp, pzpt, emp, sqh, sqs;
int i, j;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + tmass * tmass);
sqh = sqrt(0.5);
sqs = sqrt(0.5/3);
pt2 = p[1] * p[1] + p[2] * p[2];
pp = min(p[0], sqrt(pt2 + p[3] * p[3]));
pt = min(pp, sqrt(pt2));
ft[4][0] = thrust::complex<double> (p[0] * nst, p[3] * nst);
ft[5][0] = thrust::complex<double> (p[1] * nst, p[2] * nst);
// construct eps+
if (nhel >= 0)
{
if (pp == 0)
{
ep[0] = thrust::complex<double> (0, 0);
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] = thrust::complex<double> (0, nst * sqh);
ep[3] = thrust::complex<double> (0, 0);
}
else
{
ep[0] = thrust::complex<double> (0, 0);
ep[3] = thrust::complex<double> (pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = p[3]/(pp * pt) * sqh;
ep[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
ep[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps-
if (nhel <= 0)
{
if (pp == 0)
{
em[0] = thrust::complex<double> (0, 0);
em[1] = thrust::complex<double> (sqh, 0);
em[2] = thrust::complex<double> (0, nst * sqh);
em[3] = thrust::complex<double> (0, 0);
}
else
{
em[0] = thrust::complex<double> (0, 0);
em[3] = thrust::complex<double> (-pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = -p[3]/(pp * pt) * sqh;
em[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
em[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
em[1] = thrust::complex<double> (sqh, 0);
em[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps0
if (std::labs(nhel) <= 1)
{
if (pp == 0)
{
e0[0] = thrust::complex<double> (0, 0);
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
e0[3] = thrust::complex<double> (1, 0);
}
else
{
emp = p[0]/(tmass * pp);
e0[0] = thrust::complex<double> (pp/tmass, 0);
e0[3] = thrust::complex<double> (p[3] * emp, 0);
if (pt != 0)
{
e0[1] = thrust::complex<double> (p[1] * emp, 0);
e0[2] = thrust::complex<double> (p[2] * emp, 0);
}
else
{
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
}
}
}
if (nhel == 2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = ep[i] * ep[j];
}
}
else if (nhel == -2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = em[i] * em[j];
}
}
else if (tmass == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = 0;
}
}
else if (tmass != 0)
{
if (nhel == 1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (ep[i] * e0[j] + e0[i] * ep[j]);
}
}
else if (nhel == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] =
sqs * (ep[i] * em[j] + em[i] * ep[j] + 2.0 * e0[i] * e0[j]);
}
}
else if (nhel == -1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (em[i] * e0[j] + e0[i] * em[j]);
}
}
else
{
// sr fixme // std::cerr << "Invalid helicity in txxxxx.\n";
// sr fixme // std::exit(1);
}
}
tc[0] = ft[4][0];
tc[1] = ft[5][0];
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
tc[j * 4 + i + 2] = ft[j][i];
}
}
__device__ void vxxxxx(double pvec[3], double vmass, int nhel, int nsv,
thrust::complex<double> vc[6])
{
double hel, hel0, pt, pt2, pp, pzpt, emp, sqh;
int nsvahl;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + vmass * vmass);
sqh = sqrt(0.5);
hel = double(nhel);
nsvahl = nsv * std::abs(hel);
pt2 = (p[1] * p[1]) + (p[2] * p[2]);
pp = min(p[0], sqrt(pt2 + (p[3] * p[3])));
pt = min(pp, sqrt(pt2));
vc[0] = thrust::complex<double> (p[0] * nsv, p[3] * nsv);
vc[1] = thrust::complex<double> (p[1] * nsv, p[2] * nsv);
if (vmass != 0.0)
{
hel0 = 1.0 - std::abs(hel);
if (pp == 0.0)
{
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * sqh);
vc[5] = thrust::complex<double> (hel0, 0.0);
}
else
{
emp = p[0]/(vmass * pp);
vc[2] = thrust::complex<double> (hel0 * pp/vmass, 0.0);
vc[5] =
thrust::complex<double> (hel0 * p[3] * emp + hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (hel0 * p[1] * emp - p[1] * pzpt,
- nsvahl * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (hel0 * p[2] * emp - p[2] * pzpt,
nsvahl * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * (p[3] < 0) ? - abs(sqh)
: abs(sqh));
}
}
}
else
{
pp = p[0];
pt = sqrt((p[1] * p[1]) + (p[2] * p[2]));
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[5] = thrust::complex<double> (hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (-p[1] * pzpt, -nsv * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (-p[2] * pzpt, nsv * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] =
thrust::complex<double> (0.0, nsv * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
return;
}
__device__ void sxxxxx(double pvec[3], int nss, thrust::complex<double> sc[3])
{
// double p[4] = {0, pvec[0], pvec[1], pvec[2]};
// p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]+fmass*fmass);
double p[4] = {0, 0, 0, 0};
printf("scalar not supported so far. to do: fix mass issue");
sc[2] = thrust::complex<double> (1.00, 0.00);
sc[0] = thrust::complex<double> (p[0] * nss, p[3] * nss);
sc[1] = thrust::complex<double> (p[1] * nss, p[2] * nss);
return;
}
__device__ void oxxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fo[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomeg[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int nh, ip, im;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fo[0] = thrust::complex<double> (p[0] * nsf, p[3] * nsf);
fo[1] = thrust::complex<double> (p[1] * nsf, p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.000)
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
if (pp == 0.000)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = -((1 - nh)/2) * nhel;
im = (1 + nh)/2 * nhel;
fo[2] = im * sqm[std::abs(ip)];
fo[3] = ip * nsf * sqm[std::abs(ip)];
fo[4] = im * nsf * sqm[std::abs(im)];
fo[5] = ip * sqm[std::abs(im)];
}
else
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
sf[0] = double(1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = double(1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomeg[0] = sf[0] * omega[ip];
sfomeg[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.00);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0.00);
if (pp3 == 0.00)
{
chi[1] = thrust::complex<double> (-nh, 0.00);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], -p[2])/sqrt(2.0 * pp * pp3);
}
fo[2] = sfomeg[1] * chi[im];
fo[3] = sfomeg[1] * chi[ip];
fo[4] = sfomeg[0] * chi[im];
fo[5] = sfomeg[0] * chi[ip];
}
}
else
{
if ((p[1] == 0.00) and (p[2] == 0.00) and (p[3] < 0.00))
{
sqp0p3 = 0.00;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.00)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.00);
if (sqp0p3 == 0.000)
{
chi[1] = thrust::complex<double> (-nhel, 0.00) * sqrt(2.0 * p[0]);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], -p[2])/sqp0p3;
}
if (nh == 1)
{
fo[2] = chi[0];
fo[3] = chi[1];
fo[4] = thrust::complex<double> (0.00, 0.00);
fo[5] = thrust::complex<double> (0.00, 0.00);
}
else
{
fo[2] = thrust::complex<double> (0.00, 0.00);
fo[3] = thrust::complex<double> (0.00, 0.00);
fo[4] = chi[1];
fo[5] = chi[0];
}
}
return;
}
__device__ void VVVV3_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> V4[], const thrust::complex<double> COUP,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP1;
thrust::complex<double> TMP2;
thrust::complex<double> TMP3;
TMP3 = (V1[2] * V2[2] - V1[3] * V2[3] - V1[4] * V2[4] - V1[5] * V2[5]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
TMP0 = (V4[2] * V1[2] - V4[3] * V1[3] - V4[4] * V1[4] - V4[5] * V1[5]);
(*vertex) = COUP * (-cI * (TMP0 * TMP1) + cI * (TMP2 * TMP3));
}
__device__ void VVVV3P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> V4[], const
thrust::complex<double> COUP, const double M1, const double W1,
thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP2;
thrust::complex<double> denom;
V1[0] = +V2[0] + V3[0] + V4[0];
V1[1] = +V2[1] + V3[1] + V4[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (-cI * (V4[2] * TMP1) + cI * (V2[2] * TMP2));
V1[3] = denom * (-cI * (V4[3] * TMP1) + cI * (V2[3] * TMP2));
V1[4] = denom * (-cI * (V4[4] * TMP1) + cI * (V2[4] * TMP2));
V1[5] = denom * (-cI * (V4[5] * TMP1) + cI * (V2[5] * TMP2));
}
__device__ void VVVV1_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> V4[], const thrust::complex<double> COUP,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP1;
thrust::complex<double> TMP4;
thrust::complex<double> TMP5;
TMP5 = (V1[2] * V3[2] - V1[3] * V3[3] - V1[4] * V3[4] - V1[5] * V3[5]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP0 = (V4[2] * V1[2] - V4[3] * V1[3] - V4[4] * V1[4] - V4[5] * V1[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
(*vertex) = COUP * (-cI * (TMP0 * TMP1) + cI * (TMP4 * TMP5));
}
__device__ void VVVV1P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> V4[], const
thrust::complex<double> COUP, const double M1, const double W1,
thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP4;
thrust::complex<double> denom;
V1[0] = +V2[0] + V3[0] + V4[0];
V1[1] = +V2[1] + V3[1] + V4[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (-cI * (V4[2] * TMP1) + cI * (V3[2] * TMP4));
V1[3] = denom * (-cI * (V4[3] * TMP1) + cI * (V3[3] * TMP4));
V1[4] = denom * (-cI * (V4[4] * TMP1) + cI * (V3[4] * TMP4));
V1[5] = denom * (-cI * (V4[5] * TMP1) + cI * (V3[5] * TMP4));
}
__device__ void FFV1_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP6;
TMP6 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
(F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])) +
(F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])))));
(*vertex) = COUP * - cI * TMP6;
}
__device__ void FFV1_1(thrust::complex<double> F2[], const
thrust::complex<double> V3[], const thrust::complex<double> COUP, const
double M1, const double W1, thrust::complex<double> F1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> denom;
F1[0] = +F2[0] + V3[0];
F1[1] = +F2[1] + V3[1];
P1[0] = -F1[0].real();
P1[1] = -F1[1].real();
P1[2] = -F1[1].imag();
P1[3] = -F1[0].imag();
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
F1[2] = denom * cI * (F2[2] * (P1[0] * (-V3[2] + V3[5]) + (P1[1] * (V3[3] -
cI * (V3[4])) + (P1[2] * (+cI * (V3[3]) + V3[4]) + P1[3] * (-V3[2] +
V3[5])))) + (F2[3] * (P1[0] * (V3[3] + cI * (V3[4])) + (P1[1] * (-1.) *
(V3[2] + V3[5]) + (P1[2] * (-1.) * (+cI * (V3[2] + V3[5])) + P1[3] *
(V3[3] + cI * (V3[4]))))) + M1 * (F2[4] * (V3[2] + V3[5]) + F2[5] *
(V3[3] + cI * (V3[4])))));
F1[3] = denom * (-cI) * (F2[2] * (P1[0] * (-V3[3] + cI * (V3[4])) + (P1[1] *
(V3[2] - V3[5]) + (P1[2] * (-cI * (V3[2]) + cI * (V3[5])) + P1[3] *
(V3[3] - cI * (V3[4]))))) + (F2[3] * (P1[0] * (V3[2] + V3[5]) + (P1[1] *
(-1.) * (V3[3] + cI * (V3[4])) + (P1[2] * (+cI * (V3[3]) - V3[4]) - P1[3]
* (V3[2] + V3[5])))) + M1 * (F2[4] * (-V3[3] + cI * (V3[4])) + F2[5] *
(-V3[2] + V3[5]))));
F1[4] = denom * (-cI) * (F2[4] * (P1[0] * (V3[2] + V3[5]) + (P1[1] * (-V3[3]
+ cI * (V3[4])) + (P1[2] * (-1.) * (+cI * (V3[3]) + V3[4]) - P1[3] *
(V3[2] + V3[5])))) + (F2[5] * (P1[0] * (V3[3] + cI * (V3[4])) + (P1[1] *
(-V3[2] + V3[5]) + (P1[2] * (-cI * (V3[2]) + cI * (V3[5])) - P1[3] *
(V3[3] + cI * (V3[4]))))) + M1 * (F2[2] * (-V3[2] + V3[5]) + F2[3] *
(V3[3] + cI * (V3[4])))));
F1[5] = denom * cI * (F2[4] * (P1[0] * (-V3[3] + cI * (V3[4])) + (P1[1] *
(V3[2] + V3[5]) + (P1[2] * (-1.) * (+cI * (V3[2] + V3[5])) + P1[3] *
(-V3[3] + cI * (V3[4]))))) + (F2[5] * (P1[0] * (-V3[2] + V3[5]) + (P1[1]
* (V3[3] + cI * (V3[4])) + (P1[2] * (-cI * (V3[3]) + V3[4]) + P1[3] *
(-V3[2] + V3[5])))) + M1 * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] *
(V3[2] + V3[5]))));
}
__device__ void FFV1_2(thrust::complex<double> F1[], const
thrust::complex<double> V3[], const thrust::complex<double> COUP, const
double M2, const double W2, thrust::complex<double> F2[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P2[4];
thrust::complex<double> denom;
F2[0] = +F1[0] + V3[0];
F2[1] = +F1[1] + V3[1];
P2[0] = -F2[0].real();
P2[1] = -F2[1].real();
P2[2] = -F2[1].imag();
P2[3] = -F2[0].imag();
denom = COUP/((P2[0] * P2[0]) - (P2[1] * P2[1]) - (P2[2] * P2[2]) - (P2[3] *
P2[3]) - M2 * (M2 - cI * W2));
F2[2] = denom * cI * (F1[2] * (P2[0] * (V3[2] + V3[5]) + (P2[1] * (-1.) *
(V3[3] + cI * (V3[4])) + (P2[2] * (+cI * (V3[3]) - V3[4]) - P2[3] *
(V3[2] + V3[5])))) + (F1[3] * (P2[0] * (V3[3] - cI * (V3[4])) + (P2[1] *
(-V3[2] + V3[5]) + (P2[2] * (+cI * (V3[2]) - cI * (V3[5])) + P2[3] *
(-V3[3] + cI * (V3[4]))))) + M2 * (F1[4] * (V3[2] - V3[5]) + F1[5] *
(-V3[3] + cI * (V3[4])))));
F2[3] = denom * (-cI) * (F1[2] * (P2[0] * (-1.) * (V3[3] + cI * (V3[4])) +
(P2[1] * (V3[2] + V3[5]) + (P2[2] * (+cI * (V3[2] + V3[5])) - P2[3] *
(V3[3] + cI * (V3[4]))))) + (F1[3] * (P2[0] * (-V3[2] + V3[5]) + (P2[1] *
(V3[3] - cI * (V3[4])) + (P2[2] * (+cI * (V3[3]) + V3[4]) + P2[3] *
(-V3[2] + V3[5])))) + M2 * (F1[4] * (V3[3] + cI * (V3[4])) - F1[5] *
(V3[2] + V3[5]))));
F2[4] = denom * (-cI) * (F1[4] * (P2[0] * (-V3[2] + V3[5]) + (P2[1] * (V3[3]
+ cI * (V3[4])) + (P2[2] * (-cI * (V3[3]) + V3[4]) + P2[3] * (-V3[2] +
V3[5])))) + (F1[5] * (P2[0] * (V3[3] - cI * (V3[4])) + (P2[1] * (-1.) *
(V3[2] + V3[5]) + (P2[2] * (+cI * (V3[2] + V3[5])) + P2[3] * (V3[3] - cI
* (V3[4]))))) + M2 * (F1[2] * (-1.) * (V3[2] + V3[5]) + F1[3] * (-V3[3] +
cI * (V3[4])))));
F2[5] = denom * cI * (F1[4] * (P2[0] * (-1.) * (V3[3] + cI * (V3[4])) +
(P2[1] * (V3[2] - V3[5]) + (P2[2] * (+cI * (V3[2]) - cI * (V3[5])) +
P2[3] * (V3[3] + cI * (V3[4]))))) + (F1[5] * (P2[0] * (V3[2] + V3[5]) +
(P2[1] * (-V3[3] + cI * (V3[4])) + (P2[2] * (-1.) * (+cI * (V3[3]) +
V3[4]) - P2[3] * (V3[2] + V3[5])))) + M2 * (F1[2] * (V3[3] + cI *
(V3[4])) + F1[3] * (V3[2] - V3[5]))));
}
__device__ void FFV1P0_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P3[4];
thrust::complex<double> denom;
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] + F1[4] * F2[2] +
F1[5] * F2[3]);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] + F1[4] * F2[3] +
F1[5] * F2[2]);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5] + F1[5] * F2[2]) + cI * (F1[3]
* F2[4] + F1[4] * F2[3]));
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - F1[5] * F2[3] + F1[3] * F2[5] +
F1[4] * F2[2]);
}
__device__ void VVVV4_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> V4[], const thrust::complex<double> COUP,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP2;
thrust::complex<double> TMP3;
thrust::complex<double> TMP4;
thrust::complex<double> TMP5;
TMP3 = (V1[2] * V2[2] - V1[3] * V2[3] - V1[4] * V2[4] - V1[5] * V2[5]);
TMP5 = (V1[2] * V3[2] - V1[3] * V3[3] - V1[4] * V3[4] - V1[5] * V3[5]);
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
(*vertex) = COUP * (-cI * (TMP4 * TMP5) + cI * (TMP2 * TMP3));
}
__device__ void VVVV4P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> V4[], const
thrust::complex<double> COUP, const double M1, const double W1,
thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> TMP2;
thrust::complex<double> TMP4;
thrust::complex<double> denom;
V1[0] = +V2[0] + V3[0] + V4[0];
V1[1] = +V2[1] + V3[1] + V4[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (-cI * (V3[2] * TMP4) + cI * (V2[2] * TMP2));
V1[3] = denom * (-cI * (V3[3] * TMP4) + cI * (V2[3] * TMP2));
V1[4] = denom * (-cI * (V3[4] * TMP4) + cI * (V2[4] * TMP2));
V1[5] = denom * (-cI * (V3[5] * TMP4) + cI * (V2[5] * TMP2));
}
__device__ void VVV1_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
double P2[4];
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP10;
thrust::complex<double> TMP11;
thrust::complex<double> TMP12;
thrust::complex<double> TMP3;
thrust::complex<double> TMP5;
thrust::complex<double> TMP7;
thrust::complex<double> TMP8;
thrust::complex<double> TMP9;
P1[0] = V1[0].real();
P1[1] = V1[1].real();
P1[2] = V1[1].imag();
P1[3] = V1[0].imag();
P2[0] = V2[0].real();
P2[1] = V2[1].real();
P2[2] = V2[1].imag();
P2[3] = V2[0].imag();
P3[0] = V3[0].real();
P3[1] = V3[1].real();
P3[2] = V3[1].imag();
P3[3] = V3[0].imag();
TMP9 = (V2[2] * P1[0] - V2[3] * P1[1] - V2[4] * P1[2] - V2[5] * P1[3]);
TMP8 = (V3[2] * P2[0] - V3[3] * P2[1] - V3[4] * P2[2] - V3[5] * P2[3]);
TMP3 = (V1[2] * V2[2] - V1[3] * V2[3] - V1[4] * V2[4] - V1[5] * V2[5]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP7 = (V3[2] * P1[0] - V3[3] * P1[1] - V3[4] * P1[2] - V3[5] * P1[3]);
TMP5 = (V1[2] * V3[2] - V1[3] * V3[3] - V1[4] * V3[4] - V1[5] * V3[5]);
TMP10 = (V2[2] * P3[0] - V2[3] * P3[1] - V2[4] * P3[2] - V2[5] * P3[3]);
TMP11 = (V1[2] * P2[0] - V1[3] * P2[1] - V1[4] * P2[2] - V1[5] * P2[3]);
TMP12 = (V1[2] * P3[0] - V1[3] * P3[1] - V1[4] * P3[2] - V1[5] * P3[3]);
(*vertex) = COUP * (TMP1 * (-cI * (TMP11) + cI * (TMP12)) + (TMP3 * (-cI *
(TMP7) + cI * (TMP8)) + TMP5 * (+cI * (TMP9) - cI * (TMP10))));
}
__device__ void VVV1P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> COUP, const
double M1, const double W1, thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
double P2[4];
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP10;
thrust::complex<double> TMP7;
thrust::complex<double> TMP8;
thrust::complex<double> TMP9;
thrust::complex<double> denom;
P2[0] = V2[0].real();
P2[1] = V2[1].real();
P2[2] = V2[1].imag();
P2[3] = V2[0].imag();
P3[0] = V3[0].real();
P3[1] = V3[1].real();
P3[2] = V3[1].imag();
P3[3] = V3[0].imag();
V1[0] = +V2[0] + V3[0];
V1[1] = +V2[1] + V3[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP9 = (V2[2] * P1[0] - V2[3] * P1[1] - V2[4] * P1[2] - V2[5] * P1[3]);
TMP8 = (V3[2] * P2[0] - V3[3] * P2[1] - V3[4] * P2[2] - V3[5] * P2[3]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP7 = (V3[2] * P1[0] - V3[3] * P1[1] - V3[4] * P1[2] - V3[5] * P1[3]);
TMP10 = (V2[2] * P3[0] - V2[3] * P3[1] - V2[4] * P3[2] - V2[5] * P3[3]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (TMP1 * (-cI * (P2[0]) + cI * (P3[0])) + (V2[2] * (-cI *
(TMP7) + cI * (TMP8)) + V3[2] * (+cI * (TMP9) - cI * (TMP10))));
V1[3] = denom * (TMP1 * (-cI * (P2[1]) + cI * (P3[1])) + (V2[3] * (-cI *
(TMP7) + cI * (TMP8)) + V3[3] * (+cI * (TMP9) - cI * (TMP10))));
V1[4] = denom * (TMP1 * (-cI * (P2[2]) + cI * (P3[2])) + (V2[4] * (-cI *
(TMP7) + cI * (TMP8)) + V3[4] * (+cI * (TMP9) - cI * (TMP10))));
V1[5] = denom * (TMP1 * (-cI * (P2[3]) + cI * (P3[3])) + (V2[5] * (-cI *
(TMP7) + cI * (TMP8)) + V3[5] * (+cI * (TMP9) - cI * (TMP10))));
}
} // end namespace $(namespace)s_sm
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include <algorithm>
#include <iostream>
#include <thrust/complex.h>
using namespace MG5_sm;
//==========================================================================
// Class member functions for calculating the matrix elements for
// Process: g g > t t~ g g WEIGHTED<=4 @1
__constant__ int cHel[64][6];
// __constant__ double cmME[6]; value hardcoded now
// extern __constant__ int cPerm[4];
//
__constant__ double cIPC[6]; // coupling ?
__constant__ double cIPD[2];
// Evaluate |M|^2 for each subprocess
__device__ void calculate_wavefunctions(int ihel, double local_mom[6][3],
double &matrix)
{
thrust::complex<double> amp[159];
// Calculate wavefunctions for all processes
thrust::complex<double> w[26][6];
vxxxxx(local_mom[0], 0., cHel[ihel][0], -1, w[0]);
vxxxxx(local_mom[1], 0., cHel[ihel][1], -1, w[1]);
oxxxxx(local_mom[2], cIPD[0], cHel[ihel][2], +1, w[2]);
ixxxxx(local_mom[3], cIPD[0], cHel[ihel][3], -1, w[3]);
vxxxxx(local_mom[4], 0., cHel[ihel][4], +1, w[4]);
vxxxxx(local_mom[5], 0., cHel[ihel][5], +1, w[5]);
VVV1P0_1(w[0], w[1], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[6]);
FFV1P0_3(w[3], w[2], thrust::complex<double> (cIPC[2], cIPC[3]), 0., 0.,
w[7]);
// Amplitude(s) for diagram number 1
VVVV1_0(w[6], w[7], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[0]);
VVVV3_0(w[6], w[7], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[1]);
VVVV4_0(w[6], w[7], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[2]);
VVV1P0_1(w[6], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[8]);
// Amplitude(s) for diagram number 2
VVV1_0(w[7], w[5], w[8], thrust::complex<double> (cIPC[0], cIPC[1]),
&[3]);
VVV1P0_1(w[6], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[9]);
// Amplitude(s) for diagram number 3
VVV1_0(w[7], w[4], w[9], thrust::complex<double> (cIPC[0], cIPC[1]),
&[4]);
VVV1P0_1(w[4], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[10]);
// Amplitude(s) for diagram number 4
VVV1_0(w[6], w[7], w[10], thrust::complex<double> (cIPC[0], cIPC[1]),
&[5]);
FFV1_1(w[2], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[11]);
FFV1_2(w[3], w[6], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
// Amplitude(s) for diagram number 5
FFV1_0(w[12], w[11], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[6]);
// Amplitude(s) for diagram number 6
FFV1_0(w[3], w[11], w[9], thrust::complex<double> (cIPC[2], cIPC[3]),
&[7]);
FFV1_2(w[3], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[13]);
// Amplitude(s) for diagram number 7
FFV1_0(w[13], w[11], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[8]);
FFV1_1(w[2], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[14]);
// Amplitude(s) for diagram number 8
FFV1_0(w[12], w[14], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[9]);
// Amplitude(s) for diagram number 9
FFV1_0(w[3], w[14], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[10]);
FFV1_2(w[3], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[15]);
// Amplitude(s) for diagram number 10
FFV1_0(w[15], w[14], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[11]);
FFV1_1(w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[16]);
// Amplitude(s) for diagram number 11
FFV1_0(w[15], w[16], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[12]);
// Amplitude(s) for diagram number 12
FFV1_0(w[15], w[2], w[9], thrust::complex<double> (cIPC[2], cIPC[3]),
&[13]);
// Amplitude(s) for diagram number 13
FFV1_0(w[13], w[16], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[14]);
// Amplitude(s) for diagram number 14
FFV1_0(w[13], w[2], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[15]);
// Amplitude(s) for diagram number 15
FFV1_0(w[3], w[16], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[16]);
// Amplitude(s) for diagram number 16
FFV1_0(w[12], w[2], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[17]);
FFV1_1(w[2], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
FFV1_2(w[3], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[16]);
FFV1_1(w[12], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[8]);
// Amplitude(s) for diagram number 17
FFV1_0(w[16], w[8], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[18]);
FFV1_1(w[12], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[9]);
// Amplitude(s) for diagram number 18
FFV1_0(w[16], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[19]);
// Amplitude(s) for diagram number 19
FFV1_0(w[16], w[12], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[20]);
VVV1P0_1(w[1], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[6]);
FFV1P0_3(w[3], w[12], thrust::complex<double> (cIPC[2], cIPC[3]), 0., 0.,
w[17]);
// Amplitude(s) for diagram number 20
VVV1_0(w[6], w[5], w[17], thrust::complex<double> (cIPC[0], cIPC[1]),
&[21]);
// Amplitude(s) for diagram number 21
FFV1_0(w[3], w[9], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[22]);
// Amplitude(s) for diagram number 22
FFV1_0(w[13], w[12], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[23]);
VVV1P0_1(w[1], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[18]);
// Amplitude(s) for diagram number 23
VVV1_0(w[18], w[4], w[17], thrust::complex<double> (cIPC[0], cIPC[1]),
&[24]);
// Amplitude(s) for diagram number 24
FFV1_0(w[3], w[8], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[25]);
// Amplitude(s) for diagram number 25
FFV1_0(w[15], w[12], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[26]);
FFV1_1(w[12], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[19]);
// Amplitude(s) for diagram number 26
FFV1_0(w[15], w[19], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[27]);
// Amplitude(s) for diagram number 27
FFV1_0(w[15], w[9], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[28]);
// Amplitude(s) for diagram number 28
FFV1_0(w[13], w[19], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[29]);
// Amplitude(s) for diagram number 29
FFV1_0(w[13], w[8], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[30]);
// Amplitude(s) for diagram number 30
FFV1_0(w[3], w[19], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[31]);
// Amplitude(s) for diagram number 31
VVV1_0(w[1], w[10], w[17], thrust::complex<double> (cIPC[0], cIPC[1]),
&[32]);
VVVV1P0_1(w[1], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[17]);
VVVV3P0_1(w[1], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[19]);
VVVV4P0_1(w[1], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[8]);
// Amplitude(s) for diagram number 32
FFV1_0(w[3], w[12], w[17], thrust::complex<double> (cIPC[2], cIPC[3]),
&[33]);
FFV1_0(w[3], w[12], w[19], thrust::complex<double> (cIPC[2], cIPC[3]),
&[34]);
FFV1_0(w[3], w[12], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[35]);
FFV1_2(w[3], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
FFV1_1(w[2], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[9]);
FFV1_2(w[12], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[20]);
// Amplitude(s) for diagram number 33
FFV1_0(w[20], w[9], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[36]);
FFV1_2(w[12], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[21]);
// Amplitude(s) for diagram number 34
FFV1_0(w[21], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[37]);
// Amplitude(s) for diagram number 35
FFV1_0(w[12], w[9], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[38]);
FFV1P0_3(w[12], w[2], thrust::complex<double> (cIPC[2], cIPC[3]), 0., 0.,
w[22]);
// Amplitude(s) for diagram number 36
VVV1_0(w[6], w[5], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[39]);
// Amplitude(s) for diagram number 37
FFV1_0(w[21], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[40]);
// Amplitude(s) for diagram number 38
FFV1_0(w[12], w[14], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[41]);
// Amplitude(s) for diagram number 39
VVV1_0(w[18], w[4], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[42]);
// Amplitude(s) for diagram number 40
FFV1_0(w[20], w[2], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[43]);
// Amplitude(s) for diagram number 41
FFV1_0(w[12], w[11], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[44]);
FFV1_2(w[12], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[23]);
// Amplitude(s) for diagram number 42
FFV1_0(w[23], w[11], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[45]);
// Amplitude(s) for diagram number 43
FFV1_0(w[21], w[11], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[46]);
// Amplitude(s) for diagram number 44
FFV1_0(w[23], w[14], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[47]);
// Amplitude(s) for diagram number 45
FFV1_0(w[20], w[14], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[48]);
// Amplitude(s) for diagram number 46
FFV1_0(w[23], w[2], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[49]);
// Amplitude(s) for diagram number 47
VVV1_0(w[1], w[10], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[50]);
// Amplitude(s) for diagram number 48
FFV1_0(w[12], w[2], w[17], thrust::complex<double> (cIPC[2], cIPC[3]),
&[51]);
FFV1_0(w[12], w[2], w[19], thrust::complex<double> (cIPC[2], cIPC[3]),
&[52]);
FFV1_0(w[12], w[2], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[53]);
VVV1P0_1(w[0], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[12]);
FFV1_2(w[3], w[12], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[22]);
// Amplitude(s) for diagram number 49
FFV1_0(w[22], w[9], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[54]);
VVV1P0_1(w[12], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[23]);
// Amplitude(s) for diagram number 50
FFV1_0(w[3], w[9], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[55]);
// Amplitude(s) for diagram number 51
FFV1_0(w[13], w[9], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[56]);
FFV1_1(w[2], w[12], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[20]);
// Amplitude(s) for diagram number 52
FFV1_0(w[16], w[20], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[57]);
// Amplitude(s) for diagram number 53
FFV1_0(w[16], w[2], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[58]);
// Amplitude(s) for diagram number 54
FFV1_0(w[16], w[14], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[59]);
// Amplitude(s) for diagram number 55
FFV1_0(w[3], w[20], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[60]);
// Amplitude(s) for diagram number 56
FFV1_0(w[22], w[2], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[61]);
// Amplitude(s) for diagram number 57
VVV1_0(w[12], w[18], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[62]);
// Amplitude(s) for diagram number 58
VVVV1_0(w[12], w[1], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[63]);
VVVV3_0(w[12], w[1], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[64]);
VVVV4_0(w[12], w[1], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[65]);
VVV1P0_1(w[12], w[1], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[21]);
// Amplitude(s) for diagram number 59
VVV1_0(w[7], w[5], w[21], thrust::complex<double> (cIPC[0], cIPC[1]),
&[66]);
// Amplitude(s) for diagram number 60
VVV1_0(w[1], w[7], w[23], thrust::complex<double> (cIPC[0], cIPC[1]),
&[67]);
// Amplitude(s) for diagram number 61
FFV1_0(w[3], w[14], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[68]);
// Amplitude(s) for diagram number 62
FFV1_0(w[22], w[14], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[69]);
// Amplitude(s) for diagram number 63
FFV1_0(w[13], w[2], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[70]);
// Amplitude(s) for diagram number 64
FFV1_0(w[13], w[20], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[71]);
VVV1P0_1(w[0], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[20]);
FFV1_2(w[3], w[20], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[21]);
// Amplitude(s) for diagram number 65
FFV1_0(w[21], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[72]);
VVV1P0_1(w[20], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[22]);
// Amplitude(s) for diagram number 66
FFV1_0(w[3], w[9], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[73]);
// Amplitude(s) for diagram number 67
FFV1_0(w[15], w[9], w[20], thrust::complex<double> (cIPC[2], cIPC[3]),
&[74]);
FFV1_1(w[2], w[20], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[23]);
// Amplitude(s) for diagram number 68
FFV1_0(w[16], w[23], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[75]);
// Amplitude(s) for diagram number 69
FFV1_0(w[16], w[2], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[76]);
// Amplitude(s) for diagram number 70
FFV1_0(w[16], w[11], w[20], thrust::complex<double> (cIPC[2], cIPC[3]),
&[77]);
// Amplitude(s) for diagram number 71
FFV1_0(w[3], w[23], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[78]);
// Amplitude(s) for diagram number 72
FFV1_0(w[21], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[79]);
// Amplitude(s) for diagram number 73
VVV1_0(w[20], w[6], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[80]);
// Amplitude(s) for diagram number 74
VVVV1_0(w[20], w[1], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[81]);
VVVV3_0(w[20], w[1], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[82]);
VVVV4_0(w[20], w[1], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[83]);
VVV1P0_1(w[20], w[1], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[12]);
// Amplitude(s) for diagram number 75
VVV1_0(w[7], w[4], w[12], thrust::complex<double> (cIPC[0], cIPC[1]),
&[84]);
// Amplitude(s) for diagram number 76
VVV1_0(w[1], w[7], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[85]);
// Amplitude(s) for diagram number 77
FFV1_0(w[3], w[11], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[86]);
// Amplitude(s) for diagram number 78
FFV1_0(w[21], w[11], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[87]);
// Amplitude(s) for diagram number 79
FFV1_0(w[15], w[2], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[88]);
// Amplitude(s) for diagram number 80
FFV1_0(w[15], w[23], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[89]);
FFV1_1(w[9], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[23]);
// Amplitude(s) for diagram number 81
FFV1_0(w[15], w[23], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[90]);
FFV1_2(w[15], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
// Amplitude(s) for diagram number 82
FFV1_0(w[12], w[9], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[91]);
// Amplitude(s) for diagram number 83
FFV1_0(w[13], w[23], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[92]);
FFV1_2(w[13], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[21]);
// Amplitude(s) for diagram number 84
FFV1_0(w[21], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[93]);
// Amplitude(s) for diagram number 85
FFV1_0(w[3], w[23], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[94]);
VVV1P0_1(w[0], w[10], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[23]);
// Amplitude(s) for diagram number 86
FFV1_0(w[3], w[9], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[95]);
FFV1_2(w[16], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[22]);
// Amplitude(s) for diagram number 87
FFV1_0(w[22], w[11], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[96]);
FFV1_1(w[11], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[20]);
// Amplitude(s) for diagram number 88
FFV1_0(w[16], w[20], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[97]);
// Amplitude(s) for diagram number 89
FFV1_0(w[22], w[14], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[98]);
FFV1_1(w[14], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[24]);
// Amplitude(s) for diagram number 90
FFV1_0(w[16], w[24], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[99]);
// Amplitude(s) for diagram number 91
FFV1_0(w[22], w[2], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[100]);
// Amplitude(s) for diagram number 92
FFV1_0(w[16], w[2], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[101]);
// Amplitude(s) for diagram number 93
VVVV1_0(w[0], w[6], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[102]);
VVVV3_0(w[0], w[6], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[103]);
VVVV4_0(w[0], w[6], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[104]);
VVV1P0_1(w[0], w[6], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[22]);
// Amplitude(s) for diagram number 94
VVV1_0(w[7], w[5], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[105]);
VVV1P0_1(w[0], w[7], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[25]);
// Amplitude(s) for diagram number 95
VVV1_0(w[6], w[5], w[25], thrust::complex<double> (cIPC[0], cIPC[1]),
&[106]);
// Amplitude(s) for diagram number 96
FFV1_0(w[3], w[14], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[107]);
// Amplitude(s) for diagram number 97
FFV1_0(w[3], w[24], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[108]);
// Amplitude(s) for diagram number 98
FFV1_0(w[13], w[2], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[109]);
// Amplitude(s) for diagram number 99
FFV1_0(w[21], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[110]);
// Amplitude(s) for diagram number 100
VVVV1_0(w[0], w[18], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[111]);
VVVV3_0(w[0], w[18], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[112]);
VVVV4_0(w[0], w[18], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[113]);
VVV1P0_1(w[0], w[18], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[6]);
// Amplitude(s) for diagram number 101
VVV1_0(w[7], w[4], w[6], thrust::complex<double> (cIPC[0], cIPC[1]),
&[114]);
// Amplitude(s) for diagram number 102
VVV1_0(w[18], w[4], w[25], thrust::complex<double> (cIPC[0], cIPC[1]),
&[115]);
// Amplitude(s) for diagram number 103
FFV1_0(w[3], w[11], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[116]);
// Amplitude(s) for diagram number 104
FFV1_0(w[3], w[20], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[117]);
// Amplitude(s) for diagram number 105
FFV1_0(w[15], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[118]);
// Amplitude(s) for diagram number 106
FFV1_0(w[12], w[2], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[119]);
// Amplitude(s) for diagram number 107
VVVV1_0(w[0], w[1], w[7], w[10], thrust::complex<double> (cIPC[4], cIPC[5]),
&[120]);
VVVV3_0(w[0], w[1], w[7], w[10], thrust::complex<double> (cIPC[4], cIPC[5]),
&[121]);
VVVV4_0(w[0], w[1], w[7], w[10], thrust::complex<double> (cIPC[4], cIPC[5]),
&[122]);
// Amplitude(s) for diagram number 108
VVV1_0(w[1], w[10], w[25], thrust::complex<double> (cIPC[0], cIPC[1]),
&[123]);
// Amplitude(s) for diagram number 109
VVV1_0(w[1], w[7], w[23], thrust::complex<double> (cIPC[0], cIPC[1]),
&[124]);
// Amplitude(s) for diagram number 110
FFV1_0(w[13], w[20], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[125]);
// Amplitude(s) for diagram number 111
FFV1_0(w[21], w[11], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[126]);
// Amplitude(s) for diagram number 112
FFV1_0(w[15], w[24], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[127]);
// Amplitude(s) for diagram number 113
FFV1_0(w[12], w[14], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[128]);
VVVV1P0_1(w[0], w[1], w[4], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[12]);
VVVV3P0_1(w[0], w[1], w[4], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[24]);
VVVV4P0_1(w[0], w[1], w[4], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[21]);
// Amplitude(s) for diagram number 114
VVV1_0(w[12], w[7], w[5], thrust::complex<double> (cIPC[0], cIPC[1]),
&[129]);
VVV1_0(w[24], w[7], w[5], thrust::complex<double> (cIPC[0], cIPC[1]),
&[130]);
VVV1_0(w[21], w[7], w[5], thrust::complex<double> (cIPC[0], cIPC[1]),
&[131]);
// Amplitude(s) for diagram number 115
FFV1_0(w[3], w[14], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[132]);
FFV1_0(w[3], w[14], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[133]);
FFV1_0(w[3], w[14], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[134]);
// Amplitude(s) for diagram number 116
FFV1_0(w[13], w[2], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[135]);
FFV1_0(w[13], w[2], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[136]);
FFV1_0(w[13], w[2], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[137]);
VVVV1P0_1(w[0], w[1], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[21]);
VVVV3P0_1(w[0], w[1], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[13]);
VVVV4P0_1(w[0], w[1], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[24]);
// Amplitude(s) for diagram number 117
VVV1_0(w[21], w[7], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[138]);
VVV1_0(w[13], w[7], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[139]);
VVV1_0(w[24], w[7], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[140]);
// Amplitude(s) for diagram number 118
FFV1_0(w[3], w[11], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[141]);
FFV1_0(w[3], w[11], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[142]);
FFV1_0(w[3], w[11], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[143]);
// Amplitude(s) for diagram number 119
FFV1_0(w[15], w[2], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[144]);
FFV1_0(w[15], w[2], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[145]);
FFV1_0(w[15], w[2], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[146]);
VVVV1P0_1(w[0], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[24]);
VVVV3P0_1(w[0], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[15]);
VVVV4P0_1(w[0], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[13]);
// Amplitude(s) for diagram number 120
FFV1_0(w[3], w[9], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[147]);
FFV1_0(w[3], w[9], w[15], thrust::complex<double> (cIPC[2], cIPC[3]),
&[148]);
FFV1_0(w[3], w[9], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[149]);
// Amplitude(s) for diagram number 121
FFV1_0(w[16], w[2], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[150]);
FFV1_0(w[16], w[2], w[15], thrust::complex<double> (cIPC[2], cIPC[3]),
&[151]);
FFV1_0(w[16], w[2], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[152]);
// Amplitude(s) for diagram number 122
VVV1_0(w[24], w[1], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[153]);
VVV1_0(w[15], w[1], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[154]);
VVV1_0(w[13], w[1], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[155]);
// Amplitude(s) for diagram number 123
VVV1_0(w[0], w[17], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[156]);
VVV1_0(w[0], w[19], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[157]);
VVV1_0(w[0], w[8], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[158]);
// double CPPProcess::matrix_1_gg_ttxgg() {
int i, j;
// Local variables
// const int ngraphs = 2;
const int ncolor = 24;
thrust::complex<double> ztemp;
thrust::complex<double> jamp[ncolor];
// The color matrix;
static const double denom[ncolor] = {54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54};
static const double cf[ncolor][ncolor] = {{512, -64, -64, 8, 8, 80, -64, 8,
8, -1, -1, -10, 8, -1, 80, -10, 71, 62, -1, -10, -10, 62, 62, -28}, {-64,
512, 8, 80, -64, 8, 8, -64, -1, -10, 8, -1, -1, -10, -10, 62, 62, -28, 8,
-1, 80, -10, 71, 62}, {-64, 8, 512, -64, 80, 8, 8, -1, 80, -10, 71, 62,
-64, 8, 8, -1, -1, -10, -10, -1, 62, -28, -10, 62}, {8, 80, -64, 512, 8,
-64, -1, -10, -10, 62, 62, -28, 8, -64, -1, -10, 8, -1, -1, 8, 71, 62,
80, -10}, {8, -64, 80, 8, 512, -64, -1, 8, 71, 62, 80, -10, -10, -1, 62,
-28, -10, 62, -64, 8, 8, -1, -1, -10}, {80, 8, 8, -64, -64, 512, -10, -1,
62, -28, -10, 62, -1, 8, 71, 62, 80, -10, 8, -64, -1, -10, 8, -1}, {-64,
8, 8, -1, -1, -10, 512, -64, -64, 8, 8, 80, 80, -10, 8, -1, 62, 71, -10,
62, -1, -10, -28, 62}, {8, -64, -1, -10, 8, -1, -64, 512, 8, 80, -64, 8,
-10, 62, -1, -10, -28, 62, 80, -10, 8, -1, 62, 71}, {8, -1, 80, -10, 71,
62, -64, 8, 512, -64, 80, 8, 8, -1, -64, 8, -10, -1, 62, -28, -10, -1,
62, -10}, {-1, -10, -10, 62, 62, -28, 8, 80, -64, 512, 8, -64, -1, -10,
8, -64, -1, 8, 71, 62, -1, 8, -10, 80}, {-1, 8, 71, 62, 80, -10, 8, -64,
80, 8, 512, -64, 62, -28, -10, -1, 62, -10, 8, -1, -64, 8, -10, -1},
{-10, -1, 62, -28, -10, 62, 80, 8, 8, -64, -64, 512, 71, 62, -1, 8, -10,
80, -1, -10, 8, -64, -1, 8}, {8, -1, -64, 8, -10, -1, 80, -10, 8, -1, 62,
71, 512, -64, -64, 8, 8, 80, 62, -10, -28, 62, -1, -10}, {-1, -10, 8,
-64, -1, 8, -10, 62, -1, -10, -28, 62, -64, 512, 8, 80, -64, 8, -10, 80,
62, 71, 8, -1}, {80, -10, 8, -1, 62, 71, 8, -1, -64, 8, -10, -1, -64, 8,
512, -64, 80, 8, -28, 62, 62, -10, -10, -1}, {-10, 62, -1, -10, -28, 62,
-1, -10, 8, -64, -1, 8, 8, 80, -64, 512, 8, -64, 62, 71, -10, 80, -1, 8},
{71, 62, -1, 8, -10, 80, 62, -28, -10, -1, 62, -10, 8, -64, 80, 8, 512,
-64, -1, 8, -10, -1, -64, 8}, {62, -28, -10, -1, 62, -10, 71, 62, -1, 8,
-10, 80, 80, 8, 8, -64, -64, 512, -10, -1, -1, 8, 8, -64}, {-1, 8, -10,
-1, -64, 8, -10, 80, 62, 71, 8, -1, 62, -10, -28, 62, -1, -10, 512, -64,
-64, 8, 8, 80}, {-10, -1, -1, 8, 8, -64, 62, -10, -28, 62, -1, -10, -10,
80, 62, 71, 8, -1, -64, 512, 8, 80, -64, 8}, {-10, 80, 62, 71, 8, -1, -1,
8, -10, -1, -64, 8, -28, 62, 62, -10, -10, -1, -64, 8, 512, -64, 80, 8},
{62, -10, -28, 62, -1, -10, -10, -1, -1, 8, 8, -64, 62, 71, -10, 80, -1,
8, 8, 80, -64, 512, 8, -64}, {62, 71, -10, 80, -1, 8, -28, 62, 62, -10,
-10, -1, -1, 8, -10, -1, -64, 8, 8, -64, 80, 8, 512, -64}, {-28, 62, 62,
-10, -10, -1, 62, 71, -10, 80, -1, 8, -10, -1, -1, 8, 8, -64, 80, 8, 8,
-64, -64, 512}};
// Calculate color flows
jamp[0] = +thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[1] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[14] + amp[15] + amp[16] + amp[21] + thrust::complex<double> (0, 1)
* amp[23] - amp[29] + thrust::complex<double> (0, 1) * amp[31] + amp[32]
+ amp[33] - amp[35] + thrust::complex<double> (0, 1) * amp[102] +
thrust::complex<double> (0, 1) * amp[103] + thrust::complex<double> (0,
1) * amp[105] + thrust::complex<double> (0, 1) * amp[106] + amp[109] +
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[121] + thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[129] - thrust::complex<double> (0,
1) * amp[131] + amp[135] - amp[137] - thrust::complex<double> (0, 1) *
amp[156] + thrust::complex<double> (0, 1) * amp[158];
jamp[1] = -thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[4] -
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[12] + amp[13] - amp[16] + amp[24] + thrust::complex<double> (0, 1)
* amp[26] - amp[27] - thrust::complex<double> (0, 1) * amp[31] - amp[32]
- amp[33] - amp[34] + thrust::complex<double> (0, 1) * amp[111] +
thrust::complex<double> (0, 1) * amp[112] + thrust::complex<double> (0,
1) * amp[114] + thrust::complex<double> (0, 1) * amp[115] + amp[118] -
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[121] - thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[138] - thrust::complex<double> (0,
1) * amp[140] + amp[144] - amp[146] + thrust::complex<double> (0, 1) *
amp[156] + thrust::complex<double> (0, 1) * amp[157];
jamp[2] = -amp[21] - thrust::complex<double> (0, 1) * amp[23] - amp[24] +
thrust::complex<double> (0, 1) * amp[25] - amp[30] + amp[34] + amp[35] +
amp[60] - thrust::complex<double> (0, 1) * amp[62] +
thrust::complex<double> (0, 1) * amp[63] + thrust::complex<double> (0, 1)
* amp[64] + thrust::complex<double> (0, 1) * amp[66] + amp[70] +
thrust::complex<double> (0, 1) * amp[71] - thrust::complex<double> (0, 1)
* amp[102] - thrust::complex<double> (0, 1) * amp[103] -
thrust::complex<double> (0, 1) * amp[105] - thrust::complex<double> (0,
1) * amp[106] - amp[109] - thrust::complex<double> (0, 1) * amp[112] -
thrust::complex<double> (0, 1) * amp[113] - thrust::complex<double> (0,
1) * amp[115] - thrust::complex<double> (0, 1) * amp[129] -
thrust::complex<double> (0, 1) * amp[130] - amp[135] - amp[136] -
thrust::complex<double> (0, 1) * amp[157] - thrust::complex<double> (0,
1) * amp[158];
jamp[3] = -amp[18] + thrust::complex<double> (0, 1) * amp[20] + amp[24] -
thrust::complex<double> (0, 1) * amp[25] - amp[32] - amp[33] - amp[34] +
thrust::complex<double> (0, 1) * amp[57] + amp[58] - amp[60] +
thrust::complex<double> (0, 1) * amp[62] - thrust::complex<double> (0, 1)
* amp[64] - thrust::complex<double> (0, 1) * amp[65] -
thrust::complex<double> (0, 1) * amp[67] + amp[101] +
thrust::complex<double> (0, 1) * amp[112] + thrust::complex<double> (0,
1) * amp[113] + thrust::complex<double> (0, 1) * amp[115] -
thrust::complex<double> (0, 1) * amp[121] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[124] + amp[150] - amp[152] -
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[155] + thrust::complex<double> (0, 1) * amp[156] +
thrust::complex<double> (0, 1) * amp[157];
jamp[4] = -amp[21] + thrust::complex<double> (0, 1) * amp[22] - amp[24] -
thrust::complex<double> (0, 1) * amp[26] - amp[28] + amp[34] + amp[35] +
amp[78] - thrust::complex<double> (0, 1) * amp[80] +
thrust::complex<double> (0, 1) * amp[81] + thrust::complex<double> (0, 1)
* amp[82] + thrust::complex<double> (0, 1) * amp[84] + amp[88] +
thrust::complex<double> (0, 1) * amp[89] - thrust::complex<double> (0, 1)
* amp[103] - thrust::complex<double> (0, 1) * amp[104] -
thrust::complex<double> (0, 1) * amp[106] - thrust::complex<double> (0,
1) * amp[111] - thrust::complex<double> (0, 1) * amp[112] -
thrust::complex<double> (0, 1) * amp[114] - thrust::complex<double> (0,
1) * amp[115] - amp[118] - thrust::complex<double> (0, 1) * amp[138] -
thrust::complex<double> (0, 1) * amp[139] - amp[144] - amp[145] -
thrust::complex<double> (0, 1) * amp[157] - thrust::complex<double> (0,
1) * amp[158];
jamp[5] = -amp[19] - thrust::complex<double> (0, 1) * amp[20] + amp[21] -
thrust::complex<double> (0, 1) * amp[22] + amp[32] + amp[33] - amp[35] +
thrust::complex<double> (0, 1) * amp[75] + amp[76] - amp[78] +
thrust::complex<double> (0, 1) * amp[80] - thrust::complex<double> (0, 1)
* amp[82] - thrust::complex<double> (0, 1) * amp[83] -
thrust::complex<double> (0, 1) * amp[85] - amp[101] +
thrust::complex<double> (0, 1) * amp[103] + thrust::complex<double> (0,
1) * amp[104] + thrust::complex<double> (0, 1) * amp[106] +
thrust::complex<double> (0, 1) * amp[121] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[124] - amp[150] - amp[151] +
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[154] - thrust::complex<double> (0, 1) * amp[156] +
thrust::complex<double> (0, 1) * amp[158];
jamp[6] = -thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[1] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[14] - amp[15] - amp[16] + amp[55] + thrust::complex<double> (0, 1)
* amp[56] - thrust::complex<double> (0, 1) * amp[63] +
thrust::complex<double> (0, 1) * amp[65] - thrust::complex<double> (0, 1)
* amp[66] + thrust::complex<double> (0, 1) * amp[67] - amp[70] - amp[92]
+ thrust::complex<double> (0, 1) * amp[94] + amp[95] -
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[124] +
thrust::complex<double> (0, 1) * amp[130] + thrust::complex<double> (0,
1) * amp[131] + amp[136] + amp[137] + amp[147] - amp[149] +
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[155];
jamp[7] = +thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[4] +
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[12] - amp[13] + amp[16] + amp[73] + thrust::complex<double> (0, 1)
* amp[74] - thrust::complex<double> (0, 1) * amp[81] +
thrust::complex<double> (0, 1) * amp[83] - thrust::complex<double> (0, 1)
* amp[84] + thrust::complex<double> (0, 1) * amp[85] - amp[88] - amp[90]
- thrust::complex<double> (0, 1) * amp[94] - amp[95] +
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[124] +
thrust::complex<double> (0, 1) * amp[139] + thrust::complex<double> (0,
1) * amp[140] + amp[145] + amp[146] - amp[147] - amp[148] -
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[154];
jamp[8] = -amp[55] - thrust::complex<double> (0, 1) * amp[56] +
thrust::complex<double> (0, 1) * amp[63] - thrust::complex<double> (0, 1)
* amp[65] + thrust::complex<double> (0, 1) * amp[66] -
thrust::complex<double> (0, 1) * amp[67] + amp[70] +
thrust::complex<double> (0, 1) * amp[72] - amp[73] + amp[79] +
thrust::complex<double> (0, 1) * amp[80] - thrust::complex<double> (0, 1)
* amp[82] - thrust::complex<double> (0, 1) * amp[83] -
thrust::complex<double> (0, 1) * amp[85] - amp[93] -
thrust::complex<double> (0, 1) * amp[102] + thrust::complex<double> (0,
1) * amp[104] - thrust::complex<double> (0, 1) * amp[105] - amp[109] +
thrust::complex<double> (0, 1) * amp[110] - thrust::complex<double> (0,
1) * amp[129] - thrust::complex<double> (0, 1) * amp[130] - amp[135] -
amp[136] + amp[148] + amp[149] + thrust::complex<double> (0, 1) *
amp[154] + thrust::complex<double> (0, 1) * amp[155];
jamp[9] = -amp[37] + thrust::complex<double> (0, 1) * amp[38] + amp[39] +
thrust::complex<double> (0, 1) * amp[40] + amp[50] + amp[51] - amp[53] -
thrust::complex<double> (0, 1) * amp[72] + amp[73] - amp[79] -
thrust::complex<double> (0, 1) * amp[80] + thrust::complex<double> (0, 1)
* amp[82] + thrust::complex<double> (0, 1) * amp[83] +
thrust::complex<double> (0, 1) * amp[85] - amp[95] -
thrust::complex<double> (0, 1) * amp[103] - thrust::complex<double> (0,
1) * amp[104] - thrust::complex<double> (0, 1) * amp[106] -
thrust::complex<double> (0, 1) * amp[121] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[124] - amp[147] - amp[148] -
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[154] + thrust::complex<double> (0, 1) * amp[156] -
thrust::complex<double> (0, 1) * amp[158];
jamp[10] = +thrust::complex<double> (0, 1) * amp[54] - amp[55] + amp[61] +
thrust::complex<double> (0, 1) * amp[62] - thrust::complex<double> (0, 1)
* amp[64] - thrust::complex<double> (0, 1) * amp[65] -
thrust::complex<double> (0, 1) * amp[67] - amp[73] -
thrust::complex<double> (0, 1) * amp[74] + thrust::complex<double> (0, 1)
* amp[81] - thrust::complex<double> (0, 1) * amp[83] +
thrust::complex<double> (0, 1) * amp[84] - thrust::complex<double> (0, 1)
* amp[85] + amp[88] - amp[91] - thrust::complex<double> (0, 1) * amp[111]
+ thrust::complex<double> (0, 1) * amp[113] - thrust::complex<double> (0,
1) * amp[114] - amp[118] + thrust::complex<double> (0, 1) * amp[119] -
thrust::complex<double> (0, 1) * amp[138] - thrust::complex<double> (0,
1) * amp[139] - amp[144] - amp[145] + amp[148] + amp[149] +
thrust::complex<double> (0, 1) * amp[154] + thrust::complex<double> (0,
1) * amp[155];
jamp[11] = -amp[36] - thrust::complex<double> (0, 1) * amp[38] + amp[42] +
thrust::complex<double> (0, 1) * amp[43] - amp[50] - amp[51] - amp[52] -
thrust::complex<double> (0, 1) * amp[54] + amp[55] - amp[61] -
thrust::complex<double> (0, 1) * amp[62] + thrust::complex<double> (0, 1)
* amp[64] + thrust::complex<double> (0, 1) * amp[65] +
thrust::complex<double> (0, 1) * amp[67] + amp[95] -
thrust::complex<double> (0, 1) * amp[112] - thrust::complex<double> (0,
1) * amp[113] - thrust::complex<double> (0, 1) * amp[115] +
thrust::complex<double> (0, 1) * amp[121] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[124] + amp[147] - amp[149] +
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[155] - thrust::complex<double> (0, 1) * amp[156] -
thrust::complex<double> (0, 1) * amp[157];
jamp[12] = -thrust::complex<double> (0, 1) * amp[1] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[4] + amp[7] +
thrust::complex<double> (0, 1) * amp[8] - amp[15] - amp[60] +
thrust::complex<double> (0, 1) * amp[62] - thrust::complex<double> (0, 1)
* amp[63] - thrust::complex<double> (0, 1) * amp[64] -
thrust::complex<double> (0, 1) * amp[66] - amp[70] -
thrust::complex<double> (0, 1) * amp[71] - thrust::complex<double> (0, 1)
* amp[111] + thrust::complex<double> (0, 1) * amp[113] -
thrust::complex<double> (0, 1) * amp[114] + amp[116] +
thrust::complex<double> (0, 1) * amp[117] - amp[125] +
thrust::complex<double> (0, 1) * amp[130] + thrust::complex<double> (0,
1) * amp[131] + amp[136] + amp[137] - thrust::complex<double> (0, 1) *
amp[138] + thrust::complex<double> (0, 1) * amp[140] + amp[141] -
amp[143];
jamp[13] = -thrust::complex<double> (0, 1) * amp[57] - amp[58] + amp[60] -
thrust::complex<double> (0, 1) * amp[62] + thrust::complex<double> (0, 1)
* amp[64] + thrust::complex<double> (0, 1) * amp[65] +
thrust::complex<double> (0, 1) * amp[67] - amp[76] +
thrust::complex<double> (0, 1) * amp[77] - thrust::complex<double> (0, 1)
* amp[81] + thrust::complex<double> (0, 1) * amp[83] -
thrust::complex<double> (0, 1) * amp[84] + thrust::complex<double> (0, 1)
* amp[85] + amp[86] - amp[97] + thrust::complex<double> (0, 1) * amp[111]
- thrust::complex<double> (0, 1) * amp[113] + thrust::complex<double> (0,
1) * amp[114] - amp[116] - thrust::complex<double> (0, 1) * amp[117] +
thrust::complex<double> (0, 1) * amp[138] + thrust::complex<double> (0,
1) * amp[139] - amp[141] - amp[142] + amp[151] + amp[152] -
thrust::complex<double> (0, 1) * amp[154] - thrust::complex<double> (0,
1) * amp[155];
jamp[14] = +thrust::complex<double> (0, 1) * amp[1] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[4] - amp[7] -
thrust::complex<double> (0, 1) * amp[8] + amp[15] - amp[79] -
thrust::complex<double> (0, 1) * amp[80] + thrust::complex<double> (0, 1)
* amp[81] + thrust::complex<double> (0, 1) * amp[82] +
thrust::complex<double> (0, 1) * amp[84] - amp[86] +
thrust::complex<double> (0, 1) * amp[87] + thrust::complex<double> (0, 1)
* amp[102] - thrust::complex<double> (0, 1) * amp[104] +
thrust::complex<double> (0, 1) * amp[105] + amp[109] -
thrust::complex<double> (0, 1) * amp[110] - amp[126] +
thrust::complex<double> (0, 1) * amp[129] - thrust::complex<double> (0,
1) * amp[131] + amp[135] - amp[137] - thrust::complex<double> (0, 1) *
amp[139] - thrust::complex<double> (0, 1) * amp[140] + amp[142] +
amp[143];
jamp[15] = -amp[39] - thrust::complex<double> (0, 1) * amp[40] - amp[42] +
thrust::complex<double> (0, 1) * amp[44] - amp[46] + amp[52] + amp[53] +
amp[79] + thrust::complex<double> (0, 1) * amp[80] -
thrust::complex<double> (0, 1) * amp[81] - thrust::complex<double> (0, 1)
* amp[82] - thrust::complex<double> (0, 1) * amp[84] + amp[86] -
thrust::complex<double> (0, 1) * amp[87] + thrust::complex<double> (0, 1)
* amp[103] + thrust::complex<double> (0, 1) * amp[104] +
thrust::complex<double> (0, 1) * amp[106] + thrust::complex<double> (0,
1) * amp[111] + thrust::complex<double> (0, 1) * amp[112] +
thrust::complex<double> (0, 1) * amp[114] + thrust::complex<double> (0,
1) * amp[115] - amp[116] + thrust::complex<double> (0, 1) * amp[138] +
thrust::complex<double> (0, 1) * amp[139] - amp[141] - amp[142] +
thrust::complex<double> (0, 1) * amp[157] + thrust::complex<double> (0,
1) * amp[158];
jamp[16] = -thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[4] -
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[6] - amp[7] + amp[17] + amp[76] - thrust::complex<double> (0, 1) *
amp[77] + thrust::complex<double> (0, 1) * amp[81] -
thrust::complex<double> (0, 1) * amp[83] + thrust::complex<double> (0, 1)
* amp[84] - thrust::complex<double> (0, 1) * amp[85] - amp[86] - amp[96]
+ thrust::complex<double> (0, 1) * amp[100] - amp[101] -
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[124] -
thrust::complex<double> (0, 1) * amp[139] - thrust::complex<double> (0,
1) * amp[140] + amp[142] + amp[143] - amp[150] - amp[151] +
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[154];
jamp[17] = +thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[4] +
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[6] + amp[7] - amp[17] + amp[42] - thrust::complex<double> (0, 1) *
amp[44] - amp[45] + thrust::complex<double> (0, 1) * amp[49] - amp[50] -
amp[51] - amp[52] - thrust::complex<double> (0, 1) * amp[111] -
thrust::complex<double> (0, 1) * amp[112] - thrust::complex<double> (0,
1) * amp[114] - thrust::complex<double> (0, 1) * amp[115] + amp[116] +
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[121] + thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[138] + thrust::complex<double> (0,
1) * amp[140] + amp[141] - amp[143] - thrust::complex<double> (0, 1) *
amp[156] - thrust::complex<double> (0, 1) * amp[157];
jamp[18] = -thrust::complex<double> (0, 1) * amp[1] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[4] + amp[10] +
thrust::complex<double> (0, 1) * amp[11] - amp[13] - amp[78] +
thrust::complex<double> (0, 1) * amp[80] - thrust::complex<double> (0, 1)
* amp[81] - thrust::complex<double> (0, 1) * amp[82] -
thrust::complex<double> (0, 1) * amp[84] - amp[88] -
thrust::complex<double> (0, 1) * amp[89] - thrust::complex<double> (0, 1)
* amp[102] + thrust::complex<double> (0, 1) * amp[104] -
thrust::complex<double> (0, 1) * amp[105] + amp[107] +
thrust::complex<double> (0, 1) * amp[108] - amp[127] -
thrust::complex<double> (0, 1) * amp[129] + thrust::complex<double> (0,
1) * amp[131] + amp[132] - amp[134] + thrust::complex<double> (0, 1) *
amp[139] + thrust::complex<double> (0, 1) * amp[140] + amp[145] +
amp[146];
jamp[19] = -amp[58] + thrust::complex<double> (0, 1) * amp[59] -
thrust::complex<double> (0, 1) * amp[63] + thrust::complex<double> (0, 1)
* amp[65] - thrust::complex<double> (0, 1) * amp[66] +
thrust::complex<double> (0, 1) * amp[67] + amp[68] -
thrust::complex<double> (0, 1) * amp[75] - amp[76] + amp[78] -
thrust::complex<double> (0, 1) * amp[80] + thrust::complex<double> (0, 1)
* amp[82] + thrust::complex<double> (0, 1) * amp[83] +
thrust::complex<double> (0, 1) * amp[85] - amp[99] +
thrust::complex<double> (0, 1) * amp[102] - thrust::complex<double> (0,
1) * amp[104] + thrust::complex<double> (0, 1) * amp[105] - amp[107] -
thrust::complex<double> (0, 1) * amp[108] + thrust::complex<double> (0,
1) * amp[129] + thrust::complex<double> (0, 1) * amp[130] - amp[132] -
amp[133] + amp[151] + amp[152] - thrust::complex<double> (0, 1) *
amp[154] - thrust::complex<double> (0, 1) * amp[155];
jamp[20] = +thrust::complex<double> (0, 1) * amp[1] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[4] - amp[10] -
thrust::complex<double> (0, 1) * amp[11] + amp[13] - amp[61] -
thrust::complex<double> (0, 1) * amp[62] + thrust::complex<double> (0, 1)
* amp[63] + thrust::complex<double> (0, 1) * amp[64] +
thrust::complex<double> (0, 1) * amp[66] - amp[68] +
thrust::complex<double> (0, 1) * amp[69] + thrust::complex<double> (0, 1)
* amp[111] - thrust::complex<double> (0, 1) * amp[113] +
thrust::complex<double> (0, 1) * amp[114] + amp[118] -
thrust::complex<double> (0, 1) * amp[119] - amp[128] -
thrust::complex<double> (0, 1) * amp[130] - thrust::complex<double> (0,
1) * amp[131] + amp[133] + amp[134] + thrust::complex<double> (0, 1) *
amp[138] - thrust::complex<double> (0, 1) * amp[140] + amp[144] -
amp[146];
jamp[21] = -amp[39] + thrust::complex<double> (0, 1) * amp[41] - amp[42] -
thrust::complex<double> (0, 1) * amp[43] - amp[48] + amp[52] + amp[53] +
amp[61] + thrust::complex<double> (0, 1) * amp[62] -
thrust::complex<double> (0, 1) * amp[63] - thrust::complex<double> (0, 1)
* amp[64] - thrust::complex<double> (0, 1) * amp[66] + amp[68] -
thrust::complex<double> (0, 1) * amp[69] + thrust::complex<double> (0, 1)
* amp[102] + thrust::complex<double> (0, 1) * amp[103] +
thrust::complex<double> (0, 1) * amp[105] + thrust::complex<double> (0,
1) * amp[106] - amp[107] + thrust::complex<double> (0, 1) * amp[112] +
thrust::complex<double> (0, 1) * amp[113] + thrust::complex<double> (0,
1) * amp[115] + thrust::complex<double> (0, 1) * amp[129] +
thrust::complex<double> (0, 1) * amp[130] - amp[132] - amp[133] +
thrust::complex<double> (0, 1) * amp[157] + thrust::complex<double> (0,
1) * amp[158];
jamp[22] = +thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[1] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[9] - amp[10] - amp[17] + amp[58] - thrust::complex<double> (0, 1) *
amp[59] + thrust::complex<double> (0, 1) * amp[63] -
thrust::complex<double> (0, 1) * amp[65] + thrust::complex<double> (0, 1)
* amp[66] - thrust::complex<double> (0, 1) * amp[67] - amp[68] - amp[98]
- thrust::complex<double> (0, 1) * amp[100] + amp[101] +
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[124] -
thrust::complex<double> (0, 1) * amp[130] - thrust::complex<double> (0,
1) * amp[131] + amp[133] + amp[134] + amp[150] - amp[152] -
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[155];
jamp[23] = -thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[1] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[9] + amp[10] + amp[17] + amp[39] - thrust::complex<double> (0, 1) *
amp[41] - amp[47] - thrust::complex<double> (0, 1) * amp[49] + amp[50] +
amp[51] - amp[53] - thrust::complex<double> (0, 1) * amp[102] -
thrust::complex<double> (0, 1) * amp[103] - thrust::complex<double> (0,
1) * amp[105] - thrust::complex<double> (0, 1) * amp[106] + amp[107] -
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[121] - thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[129] + thrust::complex<double> (0,
1) * amp[131] + amp[132] - amp[134] + thrust::complex<double> (0, 1) *
amp[156] - thrust::complex<double> (0, 1) * amp[158];
// Sum and square the color flows to get the matrix element
for(i = 0; i < ncolor; i++ )
{
ztemp = 0.;
for(j = 0; j < ncolor; j++ )
ztemp = ztemp + cf[i][j] * jamp[j];
matrix = matrix + (ztemp * conj(jamp[i])).real()/denom[i];
}
// Store the leading color flows for choice of color
// for(i=0;i < ncolor; i++)
// jamp2[0][i] += real(jamp[i]*conj(jamp[i]));
}
CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads,
bool verbose, bool debug)
: m_numiterations(numiterations), gpu_nblocks(gpublocks),
gpu_nthreads(gputhreads), dim(gpu_nblocks * gpu_nthreads)
{
// Helicities for the process - nodim
static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1, -1, -1}, {-1, -1,
-1, -1, -1, 1}, {-1, -1, -1, -1, 1, -1}, {-1, -1, -1, -1, 1, 1}, {-1, -1,
-1, 1, -1, -1}, {-1, -1, -1, 1, -1, 1}, {-1, -1, -1, 1, 1, -1}, {-1, -1,
-1, 1, 1, 1}, {-1, -1, 1, -1, -1, -1}, {-1, -1, 1, -1, -1, 1}, {-1, -1,
1, -1, 1, -1}, {-1, -1, 1, -1, 1, 1}, {-1, -1, 1, 1, -1, -1}, {-1, -1, 1,
1, -1, 1}, {-1, -1, 1, 1, 1, -1}, {-1, -1, 1, 1, 1, 1}, {-1, 1, -1, -1,
-1, -1}, {-1, 1, -1, -1, -1, 1}, {-1, 1, -1, -1, 1, -1}, {-1, 1, -1, -1,
1, 1}, {-1, 1, -1, 1, -1, -1}, {-1, 1, -1, 1, -1, 1}, {-1, 1, -1, 1, 1,
-1}, {-1, 1, -1, 1, 1, 1}, {-1, 1, 1, -1, -1, -1}, {-1, 1, 1, -1, -1, 1},
{-1, 1, 1, -1, 1, -1}, {-1, 1, 1, -1, 1, 1}, {-1, 1, 1, 1, -1, -1}, {-1,
1, 1, 1, -1, 1}, {-1, 1, 1, 1, 1, -1}, {-1, 1, 1, 1, 1, 1}, {1, -1, -1,
-1, -1, -1}, {1, -1, -1, -1, -1, 1}, {1, -1, -1, -1, 1, -1}, {1, -1, -1,
-1, 1, 1}, {1, -1, -1, 1, -1, -1}, {1, -1, -1, 1, -1, 1}, {1, -1, -1, 1,
1, -1}, {1, -1, -1, 1, 1, 1}, {1, -1, 1, -1, -1, -1}, {1, -1, 1, -1, -1,
1}, {1, -1, 1, -1, 1, -1}, {1, -1, 1, -1, 1, 1}, {1, -1, 1, 1, -1, -1},
{1, -1, 1, 1, -1, 1}, {1, -1, 1, 1, 1, -1}, {1, -1, 1, 1, 1, 1}, {1, 1,
-1, -1, -1, -1}, {1, 1, -1, -1, -1, 1}, {1, 1, -1, -1, 1, -1}, {1, 1, -1,
-1, 1, 1}, {1, 1, -1, 1, -1, -1}, {1, 1, -1, 1, -1, 1}, {1, 1, -1, 1, 1,
-1}, {1, 1, -1, 1, 1, 1}, {1, 1, 1, -1, -1, -1}, {1, 1, 1, -1, -1, 1},
{1, 1, 1, -1, 1, -1}, {1, 1, 1, -1, 1, 1}, {1, 1, 1, 1, -1, -1}, {1, 1,
1, 1, -1, 1}, {1, 1, 1, 1, 1, -1}, {1, 1, 1, 1, 1, 1}};
hipMemcpyToSymbol(cHel, tHel, ncomb * nexternal * sizeof(int));
// perm - nodim
// static int perm[nexternal] = {0, 1, 2, 3};
}
CPPProcess::~CPPProcess() {}
const std::vector<double> &CPPProcess::getMasses() const {return mME;}
//--------------------------------------------------------------------------
// Initialize process.
void CPPProcess::initProc(string param_card_name)
{
// Instantiate the model class and set parameters that stay fixed during run
pars = Parameters_sm::getInstance();
SLHAReader slha(param_card_name);
pars->setIndependentParameters(slha);
pars->setIndependentCouplings();
pars->printIndependentParameters();
pars->printIndependentCouplings();
pars->setDependentParameters();
pars->setDependentCouplings();
// Set external particle masses for this matrix element
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->mdl_MT);
mME.push_back(pars->mdl_MT);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
static thrust::complex<double> tIPC[3] = {pars->GC_10, pars->GC_11,
pars->GC_12};
static double tIPD[2] = {pars->mdl_MT, pars->mdl_WT};
hipMemcpyToSymbol(cIPC, tIPC, 3 * sizeof(thrust::complex<double> ));
hipMemcpyToSymbol(cIPD, tIPD, 2 * sizeof(double));
}
//--------------------------------------------------------------------------
// Evaluate |M|^2, part independent of incoming flavour.
__global__ void sigmaKin(double * allmomenta, double * output)
{
// Set the parameters which change event by event
// Need to discuss this with Stefan
// pars->setDependentParameters();
// pars->setDependentCouplings();
// Reset color flows
// for (int xx = 0; xx < 384; ++xx) {
const int nprocesses = 1;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// char *devPtr = (char *)tp.ptr;
// size_t dpt = tp.pitch;
// size_t slicePitch = dpt * 6;
// char *dps = devPtr + dim * slicePitch;
double matrix_element[nprocesses];
thrust::complex<double> amp[159];
double local_m[6][3];
int DIM = blockDim.x * gridDim.x;
// for (int i=0; i<20;i++){
// printf(" %f ", allmomenta[i]);
// }
// printf("\n");
// printf("DIM is %i/%i\n", tid, DIM);
for (int i = 0; i < 6; i++ )
{
for (int j = 0; j < 3; j++ )
{
local_m[i][j] = allmomenta[i * 3 * DIM + j * DIM + tid];
// printf(" %f ", local_m[i][j]);
}
// printf("\n");
}
// Local variables and constants
const int ncomb = 64;
// static bool goodhel[ncomb] = {ncomb * false};
// static int ntry = 0, sum_hel = 0, ngood = 0;
// static int igood[ncomb];
// static int jhel;
// std::complex<double> **wfs;
// double t[1];
// Helicities for the process
// static const int helicities[ncomb][nexternal] =
// {{-1,-1,-1,-1,-1,-1},{-1,-1,-1,-1,-1,1},{-1,-1,-1,-1,1,-1},{-1,-1,-1,-1,1,1
// },{-1,-1,-1,1,-1,-1},{-1,-1,-1,1,-1,1},{-1,-1,-1,1,1,-1},{-1,-1,-1,1,1,1},{
// -1,-1,1,-1,-1,-1},{-1,-1,1,-1,-1,1},{-1,-1,1,-1,1,-1},{-1,-1,1,-1,1,1},{-1,
// -1,1,1,-1,-1},{-1,-1,1,1,-1,1},{-1,-1,1,1,1,-1},{-1,-1,1,1,1,1},{-1,1,-1,-1
// ,-1,-1},{-1,1,-1,-1,-1,1},{-1,1,-1,-1,1,-1},{-1,1,-1,-1,1,1},{-1,1,-1,1,-1,
// -1},{-1,1,-1,1,-1,1},{-1,1,-1,1,1,-1},{-1,1,-1,1,1,1},{-1,1,1,-1,-1,-1},{-1
// ,1,1,-1,-1,1},{-1,1,1,-1,1,-1},{-1,1,1,-1,1,1},{-1,1,1,1,-1,-1},{-1,1,1,1,-
// 1,1},{-1,1,1,1,1,-1},{-1,1,1,1,1,1},{1,-1,-1,-1,-1,-1},{1,-1,-1,-1,-1,1},{1
// ,-1,-1,-1,1,-1},{1,-1,-1,-1,1,1},{1,-1,-1,1,-1,-1},{1,-1,-1,1,-1,1},{1,-1,-
// 1,1,1,-1},{1,-1,-1,1,1,1},{1,-1,1,-1,-1,-1},{1,-1,1,-1,-1,1},{1,-1,1,-1,1,-
// 1},{1,-1,1,-1,1,1},{1,-1,1,1,-1,-1},{1,-1,1,1,-1,1},{1,-1,1,1,1,-1},{1,-1,1
// ,1,1,1},{1,1,-1,-1,-1,-1},{1,1,-1,-1,-1,1},{1,1,-1,-1,1,-1},{1,1,-1,-1,1,1}
// ,{1,1,-1,1,-1,-1},{1,1,-1,1,-1,1},{1,1,-1,1,1,-1},{1,1,-1,1,1,1},{1,1,1,-1,
// -1,-1},{1,1,1,-1,-1,1},{1,1,1,-1,1,-1},{1,1,1,-1,1,1},{1,1,1,1,-1,-1},{1,1,
// 1,1,-1,1},{1,1,1,1,1,-1},{1,1,1,1,1,1}};
// Denominators: spins, colors and identical particles
const int denominators[1] = {512};
// Reset the matrix elements
for(int i = 0; i < nprocesses; i++ )
{
matrix_element[i] = 0.;
}
// Define permutation
// int perm[nexternal];
// for(int i = 0; i < nexternal; i++){
// perm[i]=i;
// }
for (int ihel = 0; ihel < ncomb; ihel++ )
{
calculate_wavefunctions(ihel, local_m, matrix_element[0]);
}
for (int i = 0; i < nprocesses; ++ i)
{
matrix_element[i] /= denominators[i];
}
for (int i = 0; i < nprocesses; ++ i)
{
output[i * nprocesses + tid] = matrix_element[i];
// printf("output %i %i %i %f", tid, i, i*nprocesses+tid,
// output[i*nprocesses+tid]);
}
}
//==========================================================================
// Private class member functions
//--------------------------------------------------------------------------
| 99678fc0a43ee48c408ede53462d2120185c72e7.cu | //==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "HelAmps_sm.h"
#include <complex>
#include <cmath>
#include <iostream>
#include <cstdlib>
#include <thrust/complex.h>
using namespace std;
namespace MG5_sm
{
__device__ void ixxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fi[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomega[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int ip, im, nh;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fi[0] = thrust::complex<double> (-p[0] * nsf, -p[3] * nsf);
fi[1] = thrust::complex<double> (-p[1] * nsf, -p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.0)
{
pp = min(p[0], sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]));
if (pp == 0.0)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = (1 + nh)/2;
im = (1 - nh)/2;
fi[2] = ip * sqm[ip];
fi[3] = im * nsf * sqm[ip];
fi[4] = ip * nsf * sqm[im];
fi[5] = im * sqm[im];
}
else
{
sf[0] = (1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = (1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomega[0] = sf[0] * omega[ip];
sfomega[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.0);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0);
if (pp3 == 0.0)
{
chi[1] = thrust::complex<double> (-nh, 0);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], p[2])/sqrt(2.0 * pp * pp3);
}
fi[2] = sfomega[0] * chi[im];
fi[3] = sfomega[0] * chi[ip];
fi[4] = sfomega[1] * chi[im];
fi[5] = sfomega[1] * chi[ip];
}
}
else
{
if (p[1] == 0.0 and p[2] == 0.0 and p[3] < 0.0)
{
sqp0p3 = 0.0;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.0)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.0);
if (sqp0p3 == 0.0)
{
chi[1] = thrust::complex<double> (-nhel * sqrt(2.0 * p[0]), 0.0);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], p[2])/sqp0p3;
}
if (nh == 1)
{
fi[2] = thrust::complex<double> (0.0, 0.0);
fi[3] = thrust::complex<double> (0.0, 0.0);
fi[4] = chi[0];
fi[5] = chi[1];
}
else
{
fi[2] = chi[1];
fi[3] = chi[0];
fi[4] = thrust::complex<double> (0.0, 0.0);
fi[5] = thrust::complex<double> (0.0, 0.0);
}
}
return;
}
__device__ void txxxxx(double pvec[3], double tmass, int nhel, int nst,
thrust::complex<double> tc[18])
{
thrust::complex<double> ft[6][4], ep[4], em[4], e0[4];
double pt, pt2, pp, pzpt, emp, sqh, sqs;
int i, j;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + tmass * tmass);
sqh = sqrt(0.5);
sqs = sqrt(0.5/3);
pt2 = p[1] * p[1] + p[2] * p[2];
pp = min(p[0], sqrt(pt2 + p[3] * p[3]));
pt = min(pp, sqrt(pt2));
ft[4][0] = thrust::complex<double> (p[0] * nst, p[3] * nst);
ft[5][0] = thrust::complex<double> (p[1] * nst, p[2] * nst);
// construct eps+
if (nhel >= 0)
{
if (pp == 0)
{
ep[0] = thrust::complex<double> (0, 0);
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] = thrust::complex<double> (0, nst * sqh);
ep[3] = thrust::complex<double> (0, 0);
}
else
{
ep[0] = thrust::complex<double> (0, 0);
ep[3] = thrust::complex<double> (pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = p[3]/(pp * pt) * sqh;
ep[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
ep[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
ep[1] = thrust::complex<double> (-sqh, 0);
ep[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps-
if (nhel <= 0)
{
if (pp == 0)
{
em[0] = thrust::complex<double> (0, 0);
em[1] = thrust::complex<double> (sqh, 0);
em[2] = thrust::complex<double> (0, nst * sqh);
em[3] = thrust::complex<double> (0, 0);
}
else
{
em[0] = thrust::complex<double> (0, 0);
em[3] = thrust::complex<double> (-pt/pp * sqh, 0);
if (pt != 0)
{
pzpt = -p[3]/(pp * pt) * sqh;
em[1] = thrust::complex<double> (-p[1] * pzpt, -nst * p[2]/pt * sqh);
em[2] = thrust::complex<double> (-p[2] * pzpt, nst * p[1]/pt * sqh);
}
else
{
em[1] = thrust::complex<double> (sqh, 0);
em[2] =
thrust::complex<double> (0, nst * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
}
// construct eps0
if (std::labs(nhel) <= 1)
{
if (pp == 0)
{
e0[0] = thrust::complex<double> (0, 0);
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
e0[3] = thrust::complex<double> (1, 0);
}
else
{
emp = p[0]/(tmass * pp);
e0[0] = thrust::complex<double> (pp/tmass, 0);
e0[3] = thrust::complex<double> (p[3] * emp, 0);
if (pt != 0)
{
e0[1] = thrust::complex<double> (p[1] * emp, 0);
e0[2] = thrust::complex<double> (p[2] * emp, 0);
}
else
{
e0[1] = thrust::complex<double> (0, 0);
e0[2] = thrust::complex<double> (0, 0);
}
}
}
if (nhel == 2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = ep[i] * ep[j];
}
}
else if (nhel == -2)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = em[i] * em[j];
}
}
else if (tmass == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = 0;
}
}
else if (tmass != 0)
{
if (nhel == 1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (ep[i] * e0[j] + e0[i] * ep[j]);
}
}
else if (nhel == 0)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] =
sqs * (ep[i] * em[j] + em[i] * ep[j] + 2.0 * e0[i] * e0[j]);
}
}
else if (nhel == -1)
{
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
ft[i][j] = sqh * (em[i] * e0[j] + e0[i] * em[j]);
}
}
else
{
// sr fixme // std::cerr << "Invalid helicity in txxxxx.\n";
// sr fixme // std::exit(1);
}
}
tc[0] = ft[4][0];
tc[1] = ft[5][0];
for (j = 0; j < 4; j++ )
{
for (i = 0; i < 4; i++ )
tc[j * 4 + i + 2] = ft[j][i];
}
}
__device__ void vxxxxx(double pvec[3], double vmass, int nhel, int nsv,
thrust::complex<double> vc[6])
{
double hel, hel0, pt, pt2, pp, pzpt, emp, sqh;
int nsvahl;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + vmass * vmass);
sqh = sqrt(0.5);
hel = double(nhel);
nsvahl = nsv * std::abs(hel);
pt2 = (p[1] * p[1]) + (p[2] * p[2]);
pp = min(p[0], sqrt(pt2 + (p[3] * p[3])));
pt = min(pp, sqrt(pt2));
vc[0] = thrust::complex<double> (p[0] * nsv, p[3] * nsv);
vc[1] = thrust::complex<double> (p[1] * nsv, p[2] * nsv);
if (vmass != 0.0)
{
hel0 = 1.0 - std::abs(hel);
if (pp == 0.0)
{
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * sqh);
vc[5] = thrust::complex<double> (hel0, 0.0);
}
else
{
emp = p[0]/(vmass * pp);
vc[2] = thrust::complex<double> (hel0 * pp/vmass, 0.0);
vc[5] =
thrust::complex<double> (hel0 * p[3] * emp + hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (hel0 * p[1] * emp - p[1] * pzpt,
- nsvahl * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (hel0 * p[2] * emp - p[2] * pzpt,
nsvahl * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] = thrust::complex<double> (0.0, nsvahl * (p[3] < 0) ? - abs(sqh)
: abs(sqh));
}
}
}
else
{
pp = p[0];
pt = sqrt((p[1] * p[1]) + (p[2] * p[2]));
vc[2] = thrust::complex<double> (0.0, 0.0);
vc[5] = thrust::complex<double> (hel * pt/pp * sqh, 0.0);
if (pt != 0.0)
{
pzpt = p[3]/(pp * pt) * sqh * hel;
vc[3] = thrust::complex<double> (-p[1] * pzpt, -nsv * p[2]/pt * sqh);
vc[4] = thrust::complex<double> (-p[2] * pzpt, nsv * p[1]/pt * sqh);
}
else
{
vc[3] = thrust::complex<double> (-hel * sqh, 0.0);
vc[4] =
thrust::complex<double> (0.0, nsv * (p[3] < 0) ? - abs(sqh) : abs(sqh));
}
}
return;
}
__device__ void sxxxxx(double pvec[3], int nss, thrust::complex<double> sc[3])
{
// double p[4] = {0, pvec[0], pvec[1], pvec[2]};
// p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3]+fmass*fmass);
double p[4] = {0, 0, 0, 0};
printf("scalar not supported so far. to do: fix mass issue");
sc[2] = thrust::complex<double> (1.00, 0.00);
sc[0] = thrust::complex<double> (p[0] * nss, p[3] * nss);
sc[1] = thrust::complex<double> (p[1] * nss, p[2] * nss);
return;
}
__device__ void oxxxxx(double pvec[3], double fmass, int nhel, int nsf,
thrust::complex<double> fo[6])
{
thrust::complex<double> chi[2];
double sf[2], sfomeg[2], omega[2], pp, pp3, sqp0p3, sqm[2];
int nh, ip, im;
double p[4] = {0, pvec[0], pvec[1], pvec[2]};
p[0] = sqrt(p[1] * p[1] + p[2] * p[2] + p[3] * p[3] + fmass * fmass);
fo[0] = thrust::complex<double> (p[0] * nsf, p[3] * nsf);
fo[1] = thrust::complex<double> (p[1] * nsf, p[2] * nsf);
nh = nhel * nsf;
if (fmass != 0.000)
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
if (pp == 0.000)
{
sqm[0] = sqrt(std::abs(fmass));
sqm[1] = (fmass < 0) ? - abs(sqm[0]) : abs(sqm[0]);
ip = -((1 - nh)/2) * nhel;
im = (1 + nh)/2 * nhel;
fo[2] = im * sqm[std::abs(ip)];
fo[3] = ip * nsf * sqm[std::abs(ip)];
fo[4] = im * nsf * sqm[std::abs(im)];
fo[5] = ip * sqm[std::abs(im)];
}
else
{
pp = min(p[0], sqrt((p[1] * p[1]) + (p[2] * p[2]) + (p[3] * p[3])));
sf[0] = double(1 + nsf + (1 - nsf) * nh) * 0.5;
sf[1] = double(1 + nsf - (1 - nsf) * nh) * 0.5;
omega[0] = sqrt(p[0] + pp);
omega[1] = fmass/omega[0];
ip = (1 + nh)/2;
im = (1 - nh)/2;
sfomeg[0] = sf[0] * omega[ip];
sfomeg[1] = sf[1] * omega[im];
pp3 = max(pp + p[3], 0.00);
chi[0] = thrust::complex<double> (sqrt(pp3 * 0.5/pp), 0.00);
if (pp3 == 0.00)
{
chi[1] = thrust::complex<double> (-nh, 0.00);
}
else
{
chi[1] =
thrust::complex<double> (nh * p[1], -p[2])/sqrt(2.0 * pp * pp3);
}
fo[2] = sfomeg[1] * chi[im];
fo[3] = sfomeg[1] * chi[ip];
fo[4] = sfomeg[0] * chi[im];
fo[5] = sfomeg[0] * chi[ip];
}
}
else
{
if ((p[1] == 0.00) and (p[2] == 0.00) and (p[3] < 0.00))
{
sqp0p3 = 0.00;
}
else
{
sqp0p3 = sqrt(max(p[0] + p[3], 0.00)) * nsf;
}
chi[0] = thrust::complex<double> (sqp0p3, 0.00);
if (sqp0p3 == 0.000)
{
chi[1] = thrust::complex<double> (-nhel, 0.00) * sqrt(2.0 * p[0]);
}
else
{
chi[1] = thrust::complex<double> (nh * p[1], -p[2])/sqp0p3;
}
if (nh == 1)
{
fo[2] = chi[0];
fo[3] = chi[1];
fo[4] = thrust::complex<double> (0.00, 0.00);
fo[5] = thrust::complex<double> (0.00, 0.00);
}
else
{
fo[2] = thrust::complex<double> (0.00, 0.00);
fo[3] = thrust::complex<double> (0.00, 0.00);
fo[4] = chi[1];
fo[5] = chi[0];
}
}
return;
}
__device__ void VVVV3_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> V4[], const thrust::complex<double> COUP,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP1;
thrust::complex<double> TMP2;
thrust::complex<double> TMP3;
TMP3 = (V1[2] * V2[2] - V1[3] * V2[3] - V1[4] * V2[4] - V1[5] * V2[5]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
TMP0 = (V4[2] * V1[2] - V4[3] * V1[3] - V4[4] * V1[4] - V4[5] * V1[5]);
(*vertex) = COUP * (-cI * (TMP0 * TMP1) + cI * (TMP2 * TMP3));
}
__device__ void VVVV3P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> V4[], const
thrust::complex<double> COUP, const double M1, const double W1,
thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP2;
thrust::complex<double> denom;
V1[0] = +V2[0] + V3[0] + V4[0];
V1[1] = +V2[1] + V3[1] + V4[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (-cI * (V4[2] * TMP1) + cI * (V2[2] * TMP2));
V1[3] = denom * (-cI * (V4[3] * TMP1) + cI * (V2[3] * TMP2));
V1[4] = denom * (-cI * (V4[4] * TMP1) + cI * (V2[4] * TMP2));
V1[5] = denom * (-cI * (V4[5] * TMP1) + cI * (V2[5] * TMP2));
}
__device__ void VVVV1_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> V4[], const thrust::complex<double> COUP,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP0;
thrust::complex<double> TMP1;
thrust::complex<double> TMP4;
thrust::complex<double> TMP5;
TMP5 = (V1[2] * V3[2] - V1[3] * V3[3] - V1[4] * V3[4] - V1[5] * V3[5]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP0 = (V4[2] * V1[2] - V4[3] * V1[3] - V4[4] * V1[4] - V4[5] * V1[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
(*vertex) = COUP * (-cI * (TMP0 * TMP1) + cI * (TMP4 * TMP5));
}
__device__ void VVVV1P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> V4[], const
thrust::complex<double> COUP, const double M1, const double W1,
thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP4;
thrust::complex<double> denom;
V1[0] = +V2[0] + V3[0] + V4[0];
V1[1] = +V2[1] + V3[1] + V4[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (-cI * (V4[2] * TMP1) + cI * (V3[2] * TMP4));
V1[3] = denom * (-cI * (V4[3] * TMP1) + cI * (V3[3] * TMP4));
V1[4] = denom * (-cI * (V4[4] * TMP1) + cI * (V3[4] * TMP4));
V1[5] = denom * (-cI * (V4[5] * TMP1) + cI * (V3[5] * TMP4));
}
__device__ void FFV1_0(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP6;
TMP6 = (F1[2] * (F2[4] * (V3[2] + V3[5]) + F2[5] * (V3[3] + cI * (V3[4]))) +
(F1[3] * (F2[4] * (V3[3] - cI * (V3[4])) + F2[5] * (V3[2] - V3[5])) +
(F1[4] * (F2[2] * (V3[2] - V3[5]) - F2[3] * (V3[3] + cI * (V3[4]))) +
F1[5] * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] * (V3[2] + V3[5])))));
(*vertex) = COUP * - cI * TMP6;
}
__device__ void FFV1_1(thrust::complex<double> F2[], const
thrust::complex<double> V3[], const thrust::complex<double> COUP, const
double M1, const double W1, thrust::complex<double> F1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> denom;
F1[0] = +F2[0] + V3[0];
F1[1] = +F2[1] + V3[1];
P1[0] = -F1[0].real();
P1[1] = -F1[1].real();
P1[2] = -F1[1].imag();
P1[3] = -F1[0].imag();
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
F1[2] = denom * cI * (F2[2] * (P1[0] * (-V3[2] + V3[5]) + (P1[1] * (V3[3] -
cI * (V3[4])) + (P1[2] * (+cI * (V3[3]) + V3[4]) + P1[3] * (-V3[2] +
V3[5])))) + (F2[3] * (P1[0] * (V3[3] + cI * (V3[4])) + (P1[1] * (-1.) *
(V3[2] + V3[5]) + (P1[2] * (-1.) * (+cI * (V3[2] + V3[5])) + P1[3] *
(V3[3] + cI * (V3[4]))))) + M1 * (F2[4] * (V3[2] + V3[5]) + F2[5] *
(V3[3] + cI * (V3[4])))));
F1[3] = denom * (-cI) * (F2[2] * (P1[0] * (-V3[3] + cI * (V3[4])) + (P1[1] *
(V3[2] - V3[5]) + (P1[2] * (-cI * (V3[2]) + cI * (V3[5])) + P1[3] *
(V3[3] - cI * (V3[4]))))) + (F2[3] * (P1[0] * (V3[2] + V3[5]) + (P1[1] *
(-1.) * (V3[3] + cI * (V3[4])) + (P1[2] * (+cI * (V3[3]) - V3[4]) - P1[3]
* (V3[2] + V3[5])))) + M1 * (F2[4] * (-V3[3] + cI * (V3[4])) + F2[5] *
(-V3[2] + V3[5]))));
F1[4] = denom * (-cI) * (F2[4] * (P1[0] * (V3[2] + V3[5]) + (P1[1] * (-V3[3]
+ cI * (V3[4])) + (P1[2] * (-1.) * (+cI * (V3[3]) + V3[4]) - P1[3] *
(V3[2] + V3[5])))) + (F2[5] * (P1[0] * (V3[3] + cI * (V3[4])) + (P1[1] *
(-V3[2] + V3[5]) + (P1[2] * (-cI * (V3[2]) + cI * (V3[5])) - P1[3] *
(V3[3] + cI * (V3[4]))))) + M1 * (F2[2] * (-V3[2] + V3[5]) + F2[3] *
(V3[3] + cI * (V3[4])))));
F1[5] = denom * cI * (F2[4] * (P1[0] * (-V3[3] + cI * (V3[4])) + (P1[1] *
(V3[2] + V3[5]) + (P1[2] * (-1.) * (+cI * (V3[2] + V3[5])) + P1[3] *
(-V3[3] + cI * (V3[4]))))) + (F2[5] * (P1[0] * (-V3[2] + V3[5]) + (P1[1]
* (V3[3] + cI * (V3[4])) + (P1[2] * (-cI * (V3[3]) + V3[4]) + P1[3] *
(-V3[2] + V3[5])))) + M1 * (F2[2] * (-V3[3] + cI * (V3[4])) + F2[3] *
(V3[2] + V3[5]))));
}
__device__ void FFV1_2(thrust::complex<double> F1[], const
thrust::complex<double> V3[], const thrust::complex<double> COUP, const
double M2, const double W2, thrust::complex<double> F2[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P2[4];
thrust::complex<double> denom;
F2[0] = +F1[0] + V3[0];
F2[1] = +F1[1] + V3[1];
P2[0] = -F2[0].real();
P2[1] = -F2[1].real();
P2[2] = -F2[1].imag();
P2[3] = -F2[0].imag();
denom = COUP/((P2[0] * P2[0]) - (P2[1] * P2[1]) - (P2[2] * P2[2]) - (P2[3] *
P2[3]) - M2 * (M2 - cI * W2));
F2[2] = denom * cI * (F1[2] * (P2[0] * (V3[2] + V3[5]) + (P2[1] * (-1.) *
(V3[3] + cI * (V3[4])) + (P2[2] * (+cI * (V3[3]) - V3[4]) - P2[3] *
(V3[2] + V3[5])))) + (F1[3] * (P2[0] * (V3[3] - cI * (V3[4])) + (P2[1] *
(-V3[2] + V3[5]) + (P2[2] * (+cI * (V3[2]) - cI * (V3[5])) + P2[3] *
(-V3[3] + cI * (V3[4]))))) + M2 * (F1[4] * (V3[2] - V3[5]) + F1[5] *
(-V3[3] + cI * (V3[4])))));
F2[3] = denom * (-cI) * (F1[2] * (P2[0] * (-1.) * (V3[3] + cI * (V3[4])) +
(P2[1] * (V3[2] + V3[5]) + (P2[2] * (+cI * (V3[2] + V3[5])) - P2[3] *
(V3[3] + cI * (V3[4]))))) + (F1[3] * (P2[0] * (-V3[2] + V3[5]) + (P2[1] *
(V3[3] - cI * (V3[4])) + (P2[2] * (+cI * (V3[3]) + V3[4]) + P2[3] *
(-V3[2] + V3[5])))) + M2 * (F1[4] * (V3[3] + cI * (V3[4])) - F1[5] *
(V3[2] + V3[5]))));
F2[4] = denom * (-cI) * (F1[4] * (P2[0] * (-V3[2] + V3[5]) + (P2[1] * (V3[3]
+ cI * (V3[4])) + (P2[2] * (-cI * (V3[3]) + V3[4]) + P2[3] * (-V3[2] +
V3[5])))) + (F1[5] * (P2[0] * (V3[3] - cI * (V3[4])) + (P2[1] * (-1.) *
(V3[2] + V3[5]) + (P2[2] * (+cI * (V3[2] + V3[5])) + P2[3] * (V3[3] - cI
* (V3[4]))))) + M2 * (F1[2] * (-1.) * (V3[2] + V3[5]) + F1[3] * (-V3[3] +
cI * (V3[4])))));
F2[5] = denom * cI * (F1[4] * (P2[0] * (-1.) * (V3[3] + cI * (V3[4])) +
(P2[1] * (V3[2] - V3[5]) + (P2[2] * (+cI * (V3[2]) - cI * (V3[5])) +
P2[3] * (V3[3] + cI * (V3[4]))))) + (F1[5] * (P2[0] * (V3[2] + V3[5]) +
(P2[1] * (-V3[3] + cI * (V3[4])) + (P2[2] * (-1.) * (+cI * (V3[3]) +
V3[4]) - P2[3] * (V3[2] + V3[5])))) + M2 * (F1[2] * (V3[3] + cI *
(V3[4])) + F1[3] * (V3[2] - V3[5]))));
}
__device__ void FFV1P0_3(thrust::complex<double> F1[], const
thrust::complex<double> F2[], const thrust::complex<double> COUP, const
double M3, const double W3, thrust::complex<double> V3[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P3[4];
thrust::complex<double> denom;
V3[0] = +F1[0] + F2[0];
V3[1] = +F1[1] + F2[1];
P3[0] = -V3[0].real();
P3[1] = -V3[1].real();
P3[2] = -V3[1].imag();
P3[3] = -V3[0].imag();
denom = COUP/((P3[0] * P3[0]) - (P3[1] * P3[1]) - (P3[2] * P3[2]) - (P3[3] *
P3[3]) - M3 * (M3 - cI * W3));
V3[2] = denom * (-cI) * (F1[2] * F2[4] + F1[3] * F2[5] + F1[4] * F2[2] +
F1[5] * F2[3]);
V3[3] = denom * (-cI) * (-F1[2] * F2[5] - F1[3] * F2[4] + F1[4] * F2[3] +
F1[5] * F2[2]);
V3[4] = denom * (-cI) * (-cI * (F1[2] * F2[5] + F1[5] * F2[2]) + cI * (F1[3]
* F2[4] + F1[4] * F2[3]));
V3[5] = denom * (-cI) * (-F1[2] * F2[4] - F1[5] * F2[3] + F1[3] * F2[5] +
F1[4] * F2[2]);
}
__device__ void VVVV4_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> V4[], const thrust::complex<double> COUP,
thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
thrust::complex<double> TMP2;
thrust::complex<double> TMP3;
thrust::complex<double> TMP4;
thrust::complex<double> TMP5;
TMP3 = (V1[2] * V2[2] - V1[3] * V2[3] - V1[4] * V2[4] - V1[5] * V2[5]);
TMP5 = (V1[2] * V3[2] - V1[3] * V3[3] - V1[4] * V3[4] - V1[5] * V3[5]);
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
(*vertex) = COUP * (-cI * (TMP4 * TMP5) + cI * (TMP2 * TMP3));
}
__device__ void VVVV4P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> V4[], const
thrust::complex<double> COUP, const double M1, const double W1,
thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
thrust::complex<double> TMP2;
thrust::complex<double> TMP4;
thrust::complex<double> denom;
V1[0] = +V2[0] + V3[0] + V4[0];
V1[1] = +V2[1] + V3[1] + V4[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP2 = (V4[2] * V3[2] - V4[3] * V3[3] - V4[4] * V3[4] - V4[5] * V3[5]);
TMP4 = (V4[2] * V2[2] - V4[3] * V2[3] - V4[4] * V2[4] - V4[5] * V2[5]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (-cI * (V3[2] * TMP4) + cI * (V2[2] * TMP2));
V1[3] = denom * (-cI * (V3[3] * TMP4) + cI * (V2[3] * TMP2));
V1[4] = denom * (-cI * (V3[4] * TMP4) + cI * (V2[4] * TMP2));
V1[5] = denom * (-cI * (V3[5] * TMP4) + cI * (V2[5] * TMP2));
}
__device__ void VVV1_0(thrust::complex<double> V1[], const
thrust::complex<double> V2[], const thrust::complex<double> V3[], const
thrust::complex<double> COUP, thrust::complex<double> * vertex)
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
double P2[4];
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP10;
thrust::complex<double> TMP11;
thrust::complex<double> TMP12;
thrust::complex<double> TMP3;
thrust::complex<double> TMP5;
thrust::complex<double> TMP7;
thrust::complex<double> TMP8;
thrust::complex<double> TMP9;
P1[0] = V1[0].real();
P1[1] = V1[1].real();
P1[2] = V1[1].imag();
P1[3] = V1[0].imag();
P2[0] = V2[0].real();
P2[1] = V2[1].real();
P2[2] = V2[1].imag();
P2[3] = V2[0].imag();
P3[0] = V3[0].real();
P3[1] = V3[1].real();
P3[2] = V3[1].imag();
P3[3] = V3[0].imag();
TMP9 = (V2[2] * P1[0] - V2[3] * P1[1] - V2[4] * P1[2] - V2[5] * P1[3]);
TMP8 = (V3[2] * P2[0] - V3[3] * P2[1] - V3[4] * P2[2] - V3[5] * P2[3]);
TMP3 = (V1[2] * V2[2] - V1[3] * V2[3] - V1[4] * V2[4] - V1[5] * V2[5]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP7 = (V3[2] * P1[0] - V3[3] * P1[1] - V3[4] * P1[2] - V3[5] * P1[3]);
TMP5 = (V1[2] * V3[2] - V1[3] * V3[3] - V1[4] * V3[4] - V1[5] * V3[5]);
TMP10 = (V2[2] * P3[0] - V2[3] * P3[1] - V2[4] * P3[2] - V2[5] * P3[3]);
TMP11 = (V1[2] * P2[0] - V1[3] * P2[1] - V1[4] * P2[2] - V1[5] * P2[3]);
TMP12 = (V1[2] * P3[0] - V1[3] * P3[1] - V1[4] * P3[2] - V1[5] * P3[3]);
(*vertex) = COUP * (TMP1 * (-cI * (TMP11) + cI * (TMP12)) + (TMP3 * (-cI *
(TMP7) + cI * (TMP8)) + TMP5 * (+cI * (TMP9) - cI * (TMP10))));
}
__device__ void VVV1P0_1(thrust::complex<double> V2[], const
thrust::complex<double> V3[], const thrust::complex<double> COUP, const
double M1, const double W1, thrust::complex<double> V1[])
{
thrust::complex<double> cI = thrust::complex<double> (0., 1.);
double P1[4];
double P2[4];
double P3[4];
thrust::complex<double> TMP1;
thrust::complex<double> TMP10;
thrust::complex<double> TMP7;
thrust::complex<double> TMP8;
thrust::complex<double> TMP9;
thrust::complex<double> denom;
P2[0] = V2[0].real();
P2[1] = V2[1].real();
P2[2] = V2[1].imag();
P2[3] = V2[0].imag();
P3[0] = V3[0].real();
P3[1] = V3[1].real();
P3[2] = V3[1].imag();
P3[3] = V3[0].imag();
V1[0] = +V2[0] + V3[0];
V1[1] = +V2[1] + V3[1];
P1[0] = -V1[0].real();
P1[1] = -V1[1].real();
P1[2] = -V1[1].imag();
P1[3] = -V1[0].imag();
TMP9 = (V2[2] * P1[0] - V2[3] * P1[1] - V2[4] * P1[2] - V2[5] * P1[3]);
TMP8 = (V3[2] * P2[0] - V3[3] * P2[1] - V3[4] * P2[2] - V3[5] * P2[3]);
TMP1 = (V3[2] * V2[2] - V3[3] * V2[3] - V3[4] * V2[4] - V3[5] * V2[5]);
TMP7 = (V3[2] * P1[0] - V3[3] * P1[1] - V3[4] * P1[2] - V3[5] * P1[3]);
TMP10 = (V2[2] * P3[0] - V2[3] * P3[1] - V2[4] * P3[2] - V2[5] * P3[3]);
denom = COUP/((P1[0] * P1[0]) - (P1[1] * P1[1]) - (P1[2] * P1[2]) - (P1[3] *
P1[3]) - M1 * (M1 - cI * W1));
V1[2] = denom * (TMP1 * (-cI * (P2[0]) + cI * (P3[0])) + (V2[2] * (-cI *
(TMP7) + cI * (TMP8)) + V3[2] * (+cI * (TMP9) - cI * (TMP10))));
V1[3] = denom * (TMP1 * (-cI * (P2[1]) + cI * (P3[1])) + (V2[3] * (-cI *
(TMP7) + cI * (TMP8)) + V3[3] * (+cI * (TMP9) - cI * (TMP10))));
V1[4] = denom * (TMP1 * (-cI * (P2[2]) + cI * (P3[2])) + (V2[4] * (-cI *
(TMP7) + cI * (TMP8)) + V3[4] * (+cI * (TMP9) - cI * (TMP10))));
V1[5] = denom * (TMP1 * (-cI * (P2[3]) + cI * (P3[3])) + (V2[5] * (-cI *
(TMP7) + cI * (TMP8)) + V3[5] * (+cI * (TMP9) - cI * (TMP10))));
}
} // end namespace $(namespace)s_sm
//==========================================================================
// This file has been automatically generated for C++ Standalone by
// MadGraph5_aMC@NLO v. 2.7.3.py3, 2020-06-28
// By the MadGraph5_aMC@NLO Development Team
// Visit launchpad.net/madgraph5 and amcatnlo.web.cern.ch
//==========================================================================
#include "CPPProcess.h"
#include "HelAmps_sm.h"
#include <algorithm>
#include <iostream>
#include <thrust/complex.h>
using namespace MG5_sm;
//==========================================================================
// Class member functions for calculating the matrix elements for
// Process: g g > t t~ g g WEIGHTED<=4 @1
__constant__ int cHel[64][6];
// __constant__ double cmME[6]; value hardcoded now
// extern __constant__ int cPerm[4];
//
__constant__ double cIPC[6]; // coupling ?
__constant__ double cIPD[2];
// Evaluate |M|^2 for each subprocess
__device__ void calculate_wavefunctions(int ihel, double local_mom[6][3],
double &matrix)
{
thrust::complex<double> amp[159];
// Calculate wavefunctions for all processes
thrust::complex<double> w[26][6];
vxxxxx(local_mom[0], 0., cHel[ihel][0], -1, w[0]);
vxxxxx(local_mom[1], 0., cHel[ihel][1], -1, w[1]);
oxxxxx(local_mom[2], cIPD[0], cHel[ihel][2], +1, w[2]);
ixxxxx(local_mom[3], cIPD[0], cHel[ihel][3], -1, w[3]);
vxxxxx(local_mom[4], 0., cHel[ihel][4], +1, w[4]);
vxxxxx(local_mom[5], 0., cHel[ihel][5], +1, w[5]);
VVV1P0_1(w[0], w[1], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[6]);
FFV1P0_3(w[3], w[2], thrust::complex<double> (cIPC[2], cIPC[3]), 0., 0.,
w[7]);
// Amplitude(s) for diagram number 1
VVVV1_0(w[6], w[7], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[0]);
VVVV3_0(w[6], w[7], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[1]);
VVVV4_0(w[6], w[7], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[2]);
VVV1P0_1(w[6], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[8]);
// Amplitude(s) for diagram number 2
VVV1_0(w[7], w[5], w[8], thrust::complex<double> (cIPC[0], cIPC[1]),
&[3]);
VVV1P0_1(w[6], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[9]);
// Amplitude(s) for diagram number 3
VVV1_0(w[7], w[4], w[9], thrust::complex<double> (cIPC[0], cIPC[1]),
&[4]);
VVV1P0_1(w[4], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[10]);
// Amplitude(s) for diagram number 4
VVV1_0(w[6], w[7], w[10], thrust::complex<double> (cIPC[0], cIPC[1]),
&[5]);
FFV1_1(w[2], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[11]);
FFV1_2(w[3], w[6], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
// Amplitude(s) for diagram number 5
FFV1_0(w[12], w[11], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[6]);
// Amplitude(s) for diagram number 6
FFV1_0(w[3], w[11], w[9], thrust::complex<double> (cIPC[2], cIPC[3]),
&[7]);
FFV1_2(w[3], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[13]);
// Amplitude(s) for diagram number 7
FFV1_0(w[13], w[11], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[8]);
FFV1_1(w[2], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[14]);
// Amplitude(s) for diagram number 8
FFV1_0(w[12], w[14], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[9]);
// Amplitude(s) for diagram number 9
FFV1_0(w[3], w[14], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[10]);
FFV1_2(w[3], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[15]);
// Amplitude(s) for diagram number 10
FFV1_0(w[15], w[14], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[11]);
FFV1_1(w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[16]);
// Amplitude(s) for diagram number 11
FFV1_0(w[15], w[16], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[12]);
// Amplitude(s) for diagram number 12
FFV1_0(w[15], w[2], w[9], thrust::complex<double> (cIPC[2], cIPC[3]),
&[13]);
// Amplitude(s) for diagram number 13
FFV1_0(w[13], w[16], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[14]);
// Amplitude(s) for diagram number 14
FFV1_0(w[13], w[2], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[15]);
// Amplitude(s) for diagram number 15
FFV1_0(w[3], w[16], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[16]);
// Amplitude(s) for diagram number 16
FFV1_0(w[12], w[2], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[17]);
FFV1_1(w[2], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
FFV1_2(w[3], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[16]);
FFV1_1(w[12], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[8]);
// Amplitude(s) for diagram number 17
FFV1_0(w[16], w[8], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[18]);
FFV1_1(w[12], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[9]);
// Amplitude(s) for diagram number 18
FFV1_0(w[16], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[19]);
// Amplitude(s) for diagram number 19
FFV1_0(w[16], w[12], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[20]);
VVV1P0_1(w[1], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[6]);
FFV1P0_3(w[3], w[12], thrust::complex<double> (cIPC[2], cIPC[3]), 0., 0.,
w[17]);
// Amplitude(s) for diagram number 20
VVV1_0(w[6], w[5], w[17], thrust::complex<double> (cIPC[0], cIPC[1]),
&[21]);
// Amplitude(s) for diagram number 21
FFV1_0(w[3], w[9], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[22]);
// Amplitude(s) for diagram number 22
FFV1_0(w[13], w[12], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[23]);
VVV1P0_1(w[1], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[18]);
// Amplitude(s) for diagram number 23
VVV1_0(w[18], w[4], w[17], thrust::complex<double> (cIPC[0], cIPC[1]),
&[24]);
// Amplitude(s) for diagram number 24
FFV1_0(w[3], w[8], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[25]);
// Amplitude(s) for diagram number 25
FFV1_0(w[15], w[12], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[26]);
FFV1_1(w[12], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[19]);
// Amplitude(s) for diagram number 26
FFV1_0(w[15], w[19], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[27]);
// Amplitude(s) for diagram number 27
FFV1_0(w[15], w[9], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[28]);
// Amplitude(s) for diagram number 28
FFV1_0(w[13], w[19], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[29]);
// Amplitude(s) for diagram number 29
FFV1_0(w[13], w[8], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[30]);
// Amplitude(s) for diagram number 30
FFV1_0(w[3], w[19], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[31]);
// Amplitude(s) for diagram number 31
VVV1_0(w[1], w[10], w[17], thrust::complex<double> (cIPC[0], cIPC[1]),
&[32]);
VVVV1P0_1(w[1], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[17]);
VVVV3P0_1(w[1], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[19]);
VVVV4P0_1(w[1], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[8]);
// Amplitude(s) for diagram number 32
FFV1_0(w[3], w[12], w[17], thrust::complex<double> (cIPC[2], cIPC[3]),
&[33]);
FFV1_0(w[3], w[12], w[19], thrust::complex<double> (cIPC[2], cIPC[3]),
&[34]);
FFV1_0(w[3], w[12], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[35]);
FFV1_2(w[3], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
FFV1_1(w[2], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[9]);
FFV1_2(w[12], w[4], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[20]);
// Amplitude(s) for diagram number 33
FFV1_0(w[20], w[9], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[36]);
FFV1_2(w[12], w[5], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[21]);
// Amplitude(s) for diagram number 34
FFV1_0(w[21], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[37]);
// Amplitude(s) for diagram number 35
FFV1_0(w[12], w[9], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[38]);
FFV1P0_3(w[12], w[2], thrust::complex<double> (cIPC[2], cIPC[3]), 0., 0.,
w[22]);
// Amplitude(s) for diagram number 36
VVV1_0(w[6], w[5], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[39]);
// Amplitude(s) for diagram number 37
FFV1_0(w[21], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[40]);
// Amplitude(s) for diagram number 38
FFV1_0(w[12], w[14], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[41]);
// Amplitude(s) for diagram number 39
VVV1_0(w[18], w[4], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[42]);
// Amplitude(s) for diagram number 40
FFV1_0(w[20], w[2], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[43]);
// Amplitude(s) for diagram number 41
FFV1_0(w[12], w[11], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[44]);
FFV1_2(w[12], w[1], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[23]);
// Amplitude(s) for diagram number 42
FFV1_0(w[23], w[11], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[45]);
// Amplitude(s) for diagram number 43
FFV1_0(w[21], w[11], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[46]);
// Amplitude(s) for diagram number 44
FFV1_0(w[23], w[14], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[47]);
// Amplitude(s) for diagram number 45
FFV1_0(w[20], w[14], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[48]);
// Amplitude(s) for diagram number 46
FFV1_0(w[23], w[2], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[49]);
// Amplitude(s) for diagram number 47
VVV1_0(w[1], w[10], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[50]);
// Amplitude(s) for diagram number 48
FFV1_0(w[12], w[2], w[17], thrust::complex<double> (cIPC[2], cIPC[3]),
&[51]);
FFV1_0(w[12], w[2], w[19], thrust::complex<double> (cIPC[2], cIPC[3]),
&[52]);
FFV1_0(w[12], w[2], w[8], thrust::complex<double> (cIPC[2], cIPC[3]),
&[53]);
VVV1P0_1(w[0], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[12]);
FFV1_2(w[3], w[12], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[22]);
// Amplitude(s) for diagram number 49
FFV1_0(w[22], w[9], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[54]);
VVV1P0_1(w[12], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[23]);
// Amplitude(s) for diagram number 50
FFV1_0(w[3], w[9], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[55]);
// Amplitude(s) for diagram number 51
FFV1_0(w[13], w[9], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[56]);
FFV1_1(w[2], w[12], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[20]);
// Amplitude(s) for diagram number 52
FFV1_0(w[16], w[20], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[57]);
// Amplitude(s) for diagram number 53
FFV1_0(w[16], w[2], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[58]);
// Amplitude(s) for diagram number 54
FFV1_0(w[16], w[14], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[59]);
// Amplitude(s) for diagram number 55
FFV1_0(w[3], w[20], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[60]);
// Amplitude(s) for diagram number 56
FFV1_0(w[22], w[2], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[61]);
// Amplitude(s) for diagram number 57
VVV1_0(w[12], w[18], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[62]);
// Amplitude(s) for diagram number 58
VVVV1_0(w[12], w[1], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[63]);
VVVV3_0(w[12], w[1], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[64]);
VVVV4_0(w[12], w[1], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[65]);
VVV1P0_1(w[12], w[1], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[21]);
// Amplitude(s) for diagram number 59
VVV1_0(w[7], w[5], w[21], thrust::complex<double> (cIPC[0], cIPC[1]),
&[66]);
// Amplitude(s) for diagram number 60
VVV1_0(w[1], w[7], w[23], thrust::complex<double> (cIPC[0], cIPC[1]),
&[67]);
// Amplitude(s) for diagram number 61
FFV1_0(w[3], w[14], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[68]);
// Amplitude(s) for diagram number 62
FFV1_0(w[22], w[14], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[69]);
// Amplitude(s) for diagram number 63
FFV1_0(w[13], w[2], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[70]);
// Amplitude(s) for diagram number 64
FFV1_0(w[13], w[20], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[71]);
VVV1P0_1(w[0], w[5], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[20]);
FFV1_2(w[3], w[20], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[21]);
// Amplitude(s) for diagram number 65
FFV1_0(w[21], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[72]);
VVV1P0_1(w[20], w[4], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[22]);
// Amplitude(s) for diagram number 66
FFV1_0(w[3], w[9], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[73]);
// Amplitude(s) for diagram number 67
FFV1_0(w[15], w[9], w[20], thrust::complex<double> (cIPC[2], cIPC[3]),
&[74]);
FFV1_1(w[2], w[20], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[23]);
// Amplitude(s) for diagram number 68
FFV1_0(w[16], w[23], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[75]);
// Amplitude(s) for diagram number 69
FFV1_0(w[16], w[2], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[76]);
// Amplitude(s) for diagram number 70
FFV1_0(w[16], w[11], w[20], thrust::complex<double> (cIPC[2], cIPC[3]),
&[77]);
// Amplitude(s) for diagram number 71
FFV1_0(w[3], w[23], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[78]);
// Amplitude(s) for diagram number 72
FFV1_0(w[21], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[79]);
// Amplitude(s) for diagram number 73
VVV1_0(w[20], w[6], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[80]);
// Amplitude(s) for diagram number 74
VVVV1_0(w[20], w[1], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[81]);
VVVV3_0(w[20], w[1], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[82]);
VVVV4_0(w[20], w[1], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[83]);
VVV1P0_1(w[20], w[1], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[12]);
// Amplitude(s) for diagram number 75
VVV1_0(w[7], w[4], w[12], thrust::complex<double> (cIPC[0], cIPC[1]),
&[84]);
// Amplitude(s) for diagram number 76
VVV1_0(w[1], w[7], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[85]);
// Amplitude(s) for diagram number 77
FFV1_0(w[3], w[11], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[86]);
// Amplitude(s) for diagram number 78
FFV1_0(w[21], w[11], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[87]);
// Amplitude(s) for diagram number 79
FFV1_0(w[15], w[2], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[88]);
// Amplitude(s) for diagram number 80
FFV1_0(w[15], w[23], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[89]);
FFV1_1(w[9], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[23]);
// Amplitude(s) for diagram number 81
FFV1_0(w[15], w[23], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[90]);
FFV1_2(w[15], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[12]);
// Amplitude(s) for diagram number 82
FFV1_0(w[12], w[9], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[91]);
// Amplitude(s) for diagram number 83
FFV1_0(w[13], w[23], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[92]);
FFV1_2(w[13], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[21]);
// Amplitude(s) for diagram number 84
FFV1_0(w[21], w[9], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[93]);
// Amplitude(s) for diagram number 85
FFV1_0(w[3], w[23], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[94]);
VVV1P0_1(w[0], w[10], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[23]);
// Amplitude(s) for diagram number 86
FFV1_0(w[3], w[9], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[95]);
FFV1_2(w[16], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[22]);
// Amplitude(s) for diagram number 87
FFV1_0(w[22], w[11], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[96]);
FFV1_1(w[11], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[20]);
// Amplitude(s) for diagram number 88
FFV1_0(w[16], w[20], w[5], thrust::complex<double> (cIPC[2], cIPC[3]),
&[97]);
// Amplitude(s) for diagram number 89
FFV1_0(w[22], w[14], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[98]);
FFV1_1(w[14], w[0], thrust::complex<double> (cIPC[2], cIPC[3]), cIPD[0],
cIPD[1], w[24]);
// Amplitude(s) for diagram number 90
FFV1_0(w[16], w[24], w[4], thrust::complex<double> (cIPC[2], cIPC[3]),
&[99]);
// Amplitude(s) for diagram number 91
FFV1_0(w[22], w[2], w[10], thrust::complex<double> (cIPC[2], cIPC[3]),
&[100]);
// Amplitude(s) for diagram number 92
FFV1_0(w[16], w[2], w[23], thrust::complex<double> (cIPC[2], cIPC[3]),
&[101]);
// Amplitude(s) for diagram number 93
VVVV1_0(w[0], w[6], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[102]);
VVVV3_0(w[0], w[6], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[103]);
VVVV4_0(w[0], w[6], w[7], w[5], thrust::complex<double> (cIPC[4], cIPC[5]),
&[104]);
VVV1P0_1(w[0], w[6], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[22]);
// Amplitude(s) for diagram number 94
VVV1_0(w[7], w[5], w[22], thrust::complex<double> (cIPC[0], cIPC[1]),
&[105]);
VVV1P0_1(w[0], w[7], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[25]);
// Amplitude(s) for diagram number 95
VVV1_0(w[6], w[5], w[25], thrust::complex<double> (cIPC[0], cIPC[1]),
&[106]);
// Amplitude(s) for diagram number 96
FFV1_0(w[3], w[14], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[107]);
// Amplitude(s) for diagram number 97
FFV1_0(w[3], w[24], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[108]);
// Amplitude(s) for diagram number 98
FFV1_0(w[13], w[2], w[22], thrust::complex<double> (cIPC[2], cIPC[3]),
&[109]);
// Amplitude(s) for diagram number 99
FFV1_0(w[21], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[110]);
// Amplitude(s) for diagram number 100
VVVV1_0(w[0], w[18], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[111]);
VVVV3_0(w[0], w[18], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[112]);
VVVV4_0(w[0], w[18], w[7], w[4], thrust::complex<double> (cIPC[4], cIPC[5]),
&[113]);
VVV1P0_1(w[0], w[18], thrust::complex<double> (cIPC[0], cIPC[1]), 0., 0.,
w[6]);
// Amplitude(s) for diagram number 101
VVV1_0(w[7], w[4], w[6], thrust::complex<double> (cIPC[0], cIPC[1]),
&[114]);
// Amplitude(s) for diagram number 102
VVV1_0(w[18], w[4], w[25], thrust::complex<double> (cIPC[0], cIPC[1]),
&[115]);
// Amplitude(s) for diagram number 103
FFV1_0(w[3], w[11], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[116]);
// Amplitude(s) for diagram number 104
FFV1_0(w[3], w[20], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[117]);
// Amplitude(s) for diagram number 105
FFV1_0(w[15], w[2], w[6], thrust::complex<double> (cIPC[2], cIPC[3]),
&[118]);
// Amplitude(s) for diagram number 106
FFV1_0(w[12], w[2], w[18], thrust::complex<double> (cIPC[2], cIPC[3]),
&[119]);
// Amplitude(s) for diagram number 107
VVVV1_0(w[0], w[1], w[7], w[10], thrust::complex<double> (cIPC[4], cIPC[5]),
&[120]);
VVVV3_0(w[0], w[1], w[7], w[10], thrust::complex<double> (cIPC[4], cIPC[5]),
&[121]);
VVVV4_0(w[0], w[1], w[7], w[10], thrust::complex<double> (cIPC[4], cIPC[5]),
&[122]);
// Amplitude(s) for diagram number 108
VVV1_0(w[1], w[10], w[25], thrust::complex<double> (cIPC[0], cIPC[1]),
&[123]);
// Amplitude(s) for diagram number 109
VVV1_0(w[1], w[7], w[23], thrust::complex<double> (cIPC[0], cIPC[1]),
&[124]);
// Amplitude(s) for diagram number 110
FFV1_0(w[13], w[20], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[125]);
// Amplitude(s) for diagram number 111
FFV1_0(w[21], w[11], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[126]);
// Amplitude(s) for diagram number 112
FFV1_0(w[15], w[24], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[127]);
// Amplitude(s) for diagram number 113
FFV1_0(w[12], w[14], w[1], thrust::complex<double> (cIPC[2], cIPC[3]),
&[128]);
VVVV1P0_1(w[0], w[1], w[4], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[12]);
VVVV3P0_1(w[0], w[1], w[4], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[24]);
VVVV4P0_1(w[0], w[1], w[4], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[21]);
// Amplitude(s) for diagram number 114
VVV1_0(w[12], w[7], w[5], thrust::complex<double> (cIPC[0], cIPC[1]),
&[129]);
VVV1_0(w[24], w[7], w[5], thrust::complex<double> (cIPC[0], cIPC[1]),
&[130]);
VVV1_0(w[21], w[7], w[5], thrust::complex<double> (cIPC[0], cIPC[1]),
&[131]);
// Amplitude(s) for diagram number 115
FFV1_0(w[3], w[14], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[132]);
FFV1_0(w[3], w[14], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[133]);
FFV1_0(w[3], w[14], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[134]);
// Amplitude(s) for diagram number 116
FFV1_0(w[13], w[2], w[12], thrust::complex<double> (cIPC[2], cIPC[3]),
&[135]);
FFV1_0(w[13], w[2], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[136]);
FFV1_0(w[13], w[2], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[137]);
VVVV1P0_1(w[0], w[1], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[21]);
VVVV3P0_1(w[0], w[1], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[13]);
VVVV4P0_1(w[0], w[1], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[24]);
// Amplitude(s) for diagram number 117
VVV1_0(w[21], w[7], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[138]);
VVV1_0(w[13], w[7], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[139]);
VVV1_0(w[24], w[7], w[4], thrust::complex<double> (cIPC[0], cIPC[1]),
&[140]);
// Amplitude(s) for diagram number 118
FFV1_0(w[3], w[11], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[141]);
FFV1_0(w[3], w[11], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[142]);
FFV1_0(w[3], w[11], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[143]);
// Amplitude(s) for diagram number 119
FFV1_0(w[15], w[2], w[21], thrust::complex<double> (cIPC[2], cIPC[3]),
&[144]);
FFV1_0(w[15], w[2], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[145]);
FFV1_0(w[15], w[2], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[146]);
VVVV1P0_1(w[0], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[24]);
VVVV3P0_1(w[0], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[15]);
VVVV4P0_1(w[0], w[4], w[5], thrust::complex<double> (cIPC[4], cIPC[5]), 0.,
0., w[13]);
// Amplitude(s) for diagram number 120
FFV1_0(w[3], w[9], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[147]);
FFV1_0(w[3], w[9], w[15], thrust::complex<double> (cIPC[2], cIPC[3]),
&[148]);
FFV1_0(w[3], w[9], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[149]);
// Amplitude(s) for diagram number 121
FFV1_0(w[16], w[2], w[24], thrust::complex<double> (cIPC[2], cIPC[3]),
&[150]);
FFV1_0(w[16], w[2], w[15], thrust::complex<double> (cIPC[2], cIPC[3]),
&[151]);
FFV1_0(w[16], w[2], w[13], thrust::complex<double> (cIPC[2], cIPC[3]),
&[152]);
// Amplitude(s) for diagram number 122
VVV1_0(w[24], w[1], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[153]);
VVV1_0(w[15], w[1], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[154]);
VVV1_0(w[13], w[1], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[155]);
// Amplitude(s) for diagram number 123
VVV1_0(w[0], w[17], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[156]);
VVV1_0(w[0], w[19], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[157]);
VVV1_0(w[0], w[8], w[7], thrust::complex<double> (cIPC[0], cIPC[1]),
&[158]);
// double CPPProcess::matrix_1_gg_ttxgg() {
int i, j;
// Local variables
// const int ngraphs = 2;
const int ncolor = 24;
thrust::complex<double> ztemp;
thrust::complex<double> jamp[ncolor];
// The color matrix;
static const double denom[ncolor] = {54, 54, 54, 54, 54, 54, 54, 54, 54, 54,
54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54, 54};
static const double cf[ncolor][ncolor] = {{512, -64, -64, 8, 8, 80, -64, 8,
8, -1, -1, -10, 8, -1, 80, -10, 71, 62, -1, -10, -10, 62, 62, -28}, {-64,
512, 8, 80, -64, 8, 8, -64, -1, -10, 8, -1, -1, -10, -10, 62, 62, -28, 8,
-1, 80, -10, 71, 62}, {-64, 8, 512, -64, 80, 8, 8, -1, 80, -10, 71, 62,
-64, 8, 8, -1, -1, -10, -10, -1, 62, -28, -10, 62}, {8, 80, -64, 512, 8,
-64, -1, -10, -10, 62, 62, -28, 8, -64, -1, -10, 8, -1, -1, 8, 71, 62,
80, -10}, {8, -64, 80, 8, 512, -64, -1, 8, 71, 62, 80, -10, -10, -1, 62,
-28, -10, 62, -64, 8, 8, -1, -1, -10}, {80, 8, 8, -64, -64, 512, -10, -1,
62, -28, -10, 62, -1, 8, 71, 62, 80, -10, 8, -64, -1, -10, 8, -1}, {-64,
8, 8, -1, -1, -10, 512, -64, -64, 8, 8, 80, 80, -10, 8, -1, 62, 71, -10,
62, -1, -10, -28, 62}, {8, -64, -1, -10, 8, -1, -64, 512, 8, 80, -64, 8,
-10, 62, -1, -10, -28, 62, 80, -10, 8, -1, 62, 71}, {8, -1, 80, -10, 71,
62, -64, 8, 512, -64, 80, 8, 8, -1, -64, 8, -10, -1, 62, -28, -10, -1,
62, -10}, {-1, -10, -10, 62, 62, -28, 8, 80, -64, 512, 8, -64, -1, -10,
8, -64, -1, 8, 71, 62, -1, 8, -10, 80}, {-1, 8, 71, 62, 80, -10, 8, -64,
80, 8, 512, -64, 62, -28, -10, -1, 62, -10, 8, -1, -64, 8, -10, -1},
{-10, -1, 62, -28, -10, 62, 80, 8, 8, -64, -64, 512, 71, 62, -1, 8, -10,
80, -1, -10, 8, -64, -1, 8}, {8, -1, -64, 8, -10, -1, 80, -10, 8, -1, 62,
71, 512, -64, -64, 8, 8, 80, 62, -10, -28, 62, -1, -10}, {-1, -10, 8,
-64, -1, 8, -10, 62, -1, -10, -28, 62, -64, 512, 8, 80, -64, 8, -10, 80,
62, 71, 8, -1}, {80, -10, 8, -1, 62, 71, 8, -1, -64, 8, -10, -1, -64, 8,
512, -64, 80, 8, -28, 62, 62, -10, -10, -1}, {-10, 62, -1, -10, -28, 62,
-1, -10, 8, -64, -1, 8, 8, 80, -64, 512, 8, -64, 62, 71, -10, 80, -1, 8},
{71, 62, -1, 8, -10, 80, 62, -28, -10, -1, 62, -10, 8, -64, 80, 8, 512,
-64, -1, 8, -10, -1, -64, 8}, {62, -28, -10, -1, 62, -10, 71, 62, -1, 8,
-10, 80, 80, 8, 8, -64, -64, 512, -10, -1, -1, 8, 8, -64}, {-1, 8, -10,
-1, -64, 8, -10, 80, 62, 71, 8, -1, 62, -10, -28, 62, -1, -10, 512, -64,
-64, 8, 8, 80}, {-10, -1, -1, 8, 8, -64, 62, -10, -28, 62, -1, -10, -10,
80, 62, 71, 8, -1, -64, 512, 8, 80, -64, 8}, {-10, 80, 62, 71, 8, -1, -1,
8, -10, -1, -64, 8, -28, 62, 62, -10, -10, -1, -64, 8, 512, -64, 80, 8},
{62, -10, -28, 62, -1, -10, -10, -1, -1, 8, 8, -64, 62, 71, -10, 80, -1,
8, 8, 80, -64, 512, 8, -64}, {62, 71, -10, 80, -1, 8, -28, 62, 62, -10,
-10, -1, -1, 8, -10, -1, -64, 8, 8, -64, 80, 8, 512, -64}, {-28, 62, 62,
-10, -10, -1, 62, 71, -10, 80, -1, 8, -10, -1, -1, 8, 8, -64, 80, 8, 8,
-64, -64, 512}};
// Calculate color flows
jamp[0] = +thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[1] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[14] + amp[15] + amp[16] + amp[21] + thrust::complex<double> (0, 1)
* amp[23] - amp[29] + thrust::complex<double> (0, 1) * amp[31] + amp[32]
+ amp[33] - amp[35] + thrust::complex<double> (0, 1) * amp[102] +
thrust::complex<double> (0, 1) * amp[103] + thrust::complex<double> (0,
1) * amp[105] + thrust::complex<double> (0, 1) * amp[106] + amp[109] +
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[121] + thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[129] - thrust::complex<double> (0,
1) * amp[131] + amp[135] - amp[137] - thrust::complex<double> (0, 1) *
amp[156] + thrust::complex<double> (0, 1) * amp[158];
jamp[1] = -thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[4] -
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[12] + amp[13] - amp[16] + amp[24] + thrust::complex<double> (0, 1)
* amp[26] - amp[27] - thrust::complex<double> (0, 1) * amp[31] - amp[32]
- amp[33] - amp[34] + thrust::complex<double> (0, 1) * amp[111] +
thrust::complex<double> (0, 1) * amp[112] + thrust::complex<double> (0,
1) * amp[114] + thrust::complex<double> (0, 1) * amp[115] + amp[118] -
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[121] - thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[138] - thrust::complex<double> (0,
1) * amp[140] + amp[144] - amp[146] + thrust::complex<double> (0, 1) *
amp[156] + thrust::complex<double> (0, 1) * amp[157];
jamp[2] = -amp[21] - thrust::complex<double> (0, 1) * amp[23] - amp[24] +
thrust::complex<double> (0, 1) * amp[25] - amp[30] + amp[34] + amp[35] +
amp[60] - thrust::complex<double> (0, 1) * amp[62] +
thrust::complex<double> (0, 1) * amp[63] + thrust::complex<double> (0, 1)
* amp[64] + thrust::complex<double> (0, 1) * amp[66] + amp[70] +
thrust::complex<double> (0, 1) * amp[71] - thrust::complex<double> (0, 1)
* amp[102] - thrust::complex<double> (0, 1) * amp[103] -
thrust::complex<double> (0, 1) * amp[105] - thrust::complex<double> (0,
1) * amp[106] - amp[109] - thrust::complex<double> (0, 1) * amp[112] -
thrust::complex<double> (0, 1) * amp[113] - thrust::complex<double> (0,
1) * amp[115] - thrust::complex<double> (0, 1) * amp[129] -
thrust::complex<double> (0, 1) * amp[130] - amp[135] - amp[136] -
thrust::complex<double> (0, 1) * amp[157] - thrust::complex<double> (0,
1) * amp[158];
jamp[3] = -amp[18] + thrust::complex<double> (0, 1) * amp[20] + amp[24] -
thrust::complex<double> (0, 1) * amp[25] - amp[32] - amp[33] - amp[34] +
thrust::complex<double> (0, 1) * amp[57] + amp[58] - amp[60] +
thrust::complex<double> (0, 1) * amp[62] - thrust::complex<double> (0, 1)
* amp[64] - thrust::complex<double> (0, 1) * amp[65] -
thrust::complex<double> (0, 1) * amp[67] + amp[101] +
thrust::complex<double> (0, 1) * amp[112] + thrust::complex<double> (0,
1) * amp[113] + thrust::complex<double> (0, 1) * amp[115] -
thrust::complex<double> (0, 1) * amp[121] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[124] + amp[150] - amp[152] -
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[155] + thrust::complex<double> (0, 1) * amp[156] +
thrust::complex<double> (0, 1) * amp[157];
jamp[4] = -amp[21] + thrust::complex<double> (0, 1) * amp[22] - amp[24] -
thrust::complex<double> (0, 1) * amp[26] - amp[28] + amp[34] + amp[35] +
amp[78] - thrust::complex<double> (0, 1) * amp[80] +
thrust::complex<double> (0, 1) * amp[81] + thrust::complex<double> (0, 1)
* amp[82] + thrust::complex<double> (0, 1) * amp[84] + amp[88] +
thrust::complex<double> (0, 1) * amp[89] - thrust::complex<double> (0, 1)
* amp[103] - thrust::complex<double> (0, 1) * amp[104] -
thrust::complex<double> (0, 1) * amp[106] - thrust::complex<double> (0,
1) * amp[111] - thrust::complex<double> (0, 1) * amp[112] -
thrust::complex<double> (0, 1) * amp[114] - thrust::complex<double> (0,
1) * amp[115] - amp[118] - thrust::complex<double> (0, 1) * amp[138] -
thrust::complex<double> (0, 1) * amp[139] - amp[144] - amp[145] -
thrust::complex<double> (0, 1) * amp[157] - thrust::complex<double> (0,
1) * amp[158];
jamp[5] = -amp[19] - thrust::complex<double> (0, 1) * amp[20] + amp[21] -
thrust::complex<double> (0, 1) * amp[22] + amp[32] + amp[33] - amp[35] +
thrust::complex<double> (0, 1) * amp[75] + amp[76] - amp[78] +
thrust::complex<double> (0, 1) * amp[80] - thrust::complex<double> (0, 1)
* amp[82] - thrust::complex<double> (0, 1) * amp[83] -
thrust::complex<double> (0, 1) * amp[85] - amp[101] +
thrust::complex<double> (0, 1) * amp[103] + thrust::complex<double> (0,
1) * amp[104] + thrust::complex<double> (0, 1) * amp[106] +
thrust::complex<double> (0, 1) * amp[121] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[124] - amp[150] - amp[151] +
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[154] - thrust::complex<double> (0, 1) * amp[156] +
thrust::complex<double> (0, 1) * amp[158];
jamp[6] = -thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[1] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[14] - amp[15] - amp[16] + amp[55] + thrust::complex<double> (0, 1)
* amp[56] - thrust::complex<double> (0, 1) * amp[63] +
thrust::complex<double> (0, 1) * amp[65] - thrust::complex<double> (0, 1)
* amp[66] + thrust::complex<double> (0, 1) * amp[67] - amp[70] - amp[92]
+ thrust::complex<double> (0, 1) * amp[94] + amp[95] -
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[124] +
thrust::complex<double> (0, 1) * amp[130] + thrust::complex<double> (0,
1) * amp[131] + amp[136] + amp[137] + amp[147] - amp[149] +
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[155];
jamp[7] = +thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[4] +
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[12] - amp[13] + amp[16] + amp[73] + thrust::complex<double> (0, 1)
* amp[74] - thrust::complex<double> (0, 1) * amp[81] +
thrust::complex<double> (0, 1) * amp[83] - thrust::complex<double> (0, 1)
* amp[84] + thrust::complex<double> (0, 1) * amp[85] - amp[88] - amp[90]
- thrust::complex<double> (0, 1) * amp[94] - amp[95] +
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[124] +
thrust::complex<double> (0, 1) * amp[139] + thrust::complex<double> (0,
1) * amp[140] + amp[145] + amp[146] - amp[147] - amp[148] -
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[154];
jamp[8] = -amp[55] - thrust::complex<double> (0, 1) * amp[56] +
thrust::complex<double> (0, 1) * amp[63] - thrust::complex<double> (0, 1)
* amp[65] + thrust::complex<double> (0, 1) * amp[66] -
thrust::complex<double> (0, 1) * amp[67] + amp[70] +
thrust::complex<double> (0, 1) * amp[72] - amp[73] + amp[79] +
thrust::complex<double> (0, 1) * amp[80] - thrust::complex<double> (0, 1)
* amp[82] - thrust::complex<double> (0, 1) * amp[83] -
thrust::complex<double> (0, 1) * amp[85] - amp[93] -
thrust::complex<double> (0, 1) * amp[102] + thrust::complex<double> (0,
1) * amp[104] - thrust::complex<double> (0, 1) * amp[105] - amp[109] +
thrust::complex<double> (0, 1) * amp[110] - thrust::complex<double> (0,
1) * amp[129] - thrust::complex<double> (0, 1) * amp[130] - amp[135] -
amp[136] + amp[148] + amp[149] + thrust::complex<double> (0, 1) *
amp[154] + thrust::complex<double> (0, 1) * amp[155];
jamp[9] = -amp[37] + thrust::complex<double> (0, 1) * amp[38] + amp[39] +
thrust::complex<double> (0, 1) * amp[40] + amp[50] + amp[51] - amp[53] -
thrust::complex<double> (0, 1) * amp[72] + amp[73] - amp[79] -
thrust::complex<double> (0, 1) * amp[80] + thrust::complex<double> (0, 1)
* amp[82] + thrust::complex<double> (0, 1) * amp[83] +
thrust::complex<double> (0, 1) * amp[85] - amp[95] -
thrust::complex<double> (0, 1) * amp[103] - thrust::complex<double> (0,
1) * amp[104] - thrust::complex<double> (0, 1) * amp[106] -
thrust::complex<double> (0, 1) * amp[121] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[124] - amp[147] - amp[148] -
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[154] + thrust::complex<double> (0, 1) * amp[156] -
thrust::complex<double> (0, 1) * amp[158];
jamp[10] = +thrust::complex<double> (0, 1) * amp[54] - amp[55] + amp[61] +
thrust::complex<double> (0, 1) * amp[62] - thrust::complex<double> (0, 1)
* amp[64] - thrust::complex<double> (0, 1) * amp[65] -
thrust::complex<double> (0, 1) * amp[67] - amp[73] -
thrust::complex<double> (0, 1) * amp[74] + thrust::complex<double> (0, 1)
* amp[81] - thrust::complex<double> (0, 1) * amp[83] +
thrust::complex<double> (0, 1) * amp[84] - thrust::complex<double> (0, 1)
* amp[85] + amp[88] - amp[91] - thrust::complex<double> (0, 1) * amp[111]
+ thrust::complex<double> (0, 1) * amp[113] - thrust::complex<double> (0,
1) * amp[114] - amp[118] + thrust::complex<double> (0, 1) * amp[119] -
thrust::complex<double> (0, 1) * amp[138] - thrust::complex<double> (0,
1) * amp[139] - amp[144] - amp[145] + amp[148] + amp[149] +
thrust::complex<double> (0, 1) * amp[154] + thrust::complex<double> (0,
1) * amp[155];
jamp[11] = -amp[36] - thrust::complex<double> (0, 1) * amp[38] + amp[42] +
thrust::complex<double> (0, 1) * amp[43] - amp[50] - amp[51] - amp[52] -
thrust::complex<double> (0, 1) * amp[54] + amp[55] - amp[61] -
thrust::complex<double> (0, 1) * amp[62] + thrust::complex<double> (0, 1)
* amp[64] + thrust::complex<double> (0, 1) * amp[65] +
thrust::complex<double> (0, 1) * amp[67] + amp[95] -
thrust::complex<double> (0, 1) * amp[112] - thrust::complex<double> (0,
1) * amp[113] - thrust::complex<double> (0, 1) * amp[115] +
thrust::complex<double> (0, 1) * amp[121] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[123] +
thrust::complex<double> (0, 1) * amp[124] + amp[147] - amp[149] +
thrust::complex<double> (0, 1) * amp[153] - thrust::complex<double> (0,
1) * amp[155] - thrust::complex<double> (0, 1) * amp[156] -
thrust::complex<double> (0, 1) * amp[157];
jamp[12] = -thrust::complex<double> (0, 1) * amp[1] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[4] + amp[7] +
thrust::complex<double> (0, 1) * amp[8] - amp[15] - amp[60] +
thrust::complex<double> (0, 1) * amp[62] - thrust::complex<double> (0, 1)
* amp[63] - thrust::complex<double> (0, 1) * amp[64] -
thrust::complex<double> (0, 1) * amp[66] - amp[70] -
thrust::complex<double> (0, 1) * amp[71] - thrust::complex<double> (0, 1)
* amp[111] + thrust::complex<double> (0, 1) * amp[113] -
thrust::complex<double> (0, 1) * amp[114] + amp[116] +
thrust::complex<double> (0, 1) * amp[117] - amp[125] +
thrust::complex<double> (0, 1) * amp[130] + thrust::complex<double> (0,
1) * amp[131] + amp[136] + amp[137] - thrust::complex<double> (0, 1) *
amp[138] + thrust::complex<double> (0, 1) * amp[140] + amp[141] -
amp[143];
jamp[13] = -thrust::complex<double> (0, 1) * amp[57] - amp[58] + amp[60] -
thrust::complex<double> (0, 1) * amp[62] + thrust::complex<double> (0, 1)
* amp[64] + thrust::complex<double> (0, 1) * amp[65] +
thrust::complex<double> (0, 1) * amp[67] - amp[76] +
thrust::complex<double> (0, 1) * amp[77] - thrust::complex<double> (0, 1)
* amp[81] + thrust::complex<double> (0, 1) * amp[83] -
thrust::complex<double> (0, 1) * amp[84] + thrust::complex<double> (0, 1)
* amp[85] + amp[86] - amp[97] + thrust::complex<double> (0, 1) * amp[111]
- thrust::complex<double> (0, 1) * amp[113] + thrust::complex<double> (0,
1) * amp[114] - amp[116] - thrust::complex<double> (0, 1) * amp[117] +
thrust::complex<double> (0, 1) * amp[138] + thrust::complex<double> (0,
1) * amp[139] - amp[141] - amp[142] + amp[151] + amp[152] -
thrust::complex<double> (0, 1) * amp[154] - thrust::complex<double> (0,
1) * amp[155];
jamp[14] = +thrust::complex<double> (0, 1) * amp[1] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[4] - amp[7] -
thrust::complex<double> (0, 1) * amp[8] + amp[15] - amp[79] -
thrust::complex<double> (0, 1) * amp[80] + thrust::complex<double> (0, 1)
* amp[81] + thrust::complex<double> (0, 1) * amp[82] +
thrust::complex<double> (0, 1) * amp[84] - amp[86] +
thrust::complex<double> (0, 1) * amp[87] + thrust::complex<double> (0, 1)
* amp[102] - thrust::complex<double> (0, 1) * amp[104] +
thrust::complex<double> (0, 1) * amp[105] + amp[109] -
thrust::complex<double> (0, 1) * amp[110] - amp[126] +
thrust::complex<double> (0, 1) * amp[129] - thrust::complex<double> (0,
1) * amp[131] + amp[135] - amp[137] - thrust::complex<double> (0, 1) *
amp[139] - thrust::complex<double> (0, 1) * amp[140] + amp[142] +
amp[143];
jamp[15] = -amp[39] - thrust::complex<double> (0, 1) * amp[40] - amp[42] +
thrust::complex<double> (0, 1) * amp[44] - amp[46] + amp[52] + amp[53] +
amp[79] + thrust::complex<double> (0, 1) * amp[80] -
thrust::complex<double> (0, 1) * amp[81] - thrust::complex<double> (0, 1)
* amp[82] - thrust::complex<double> (0, 1) * amp[84] + amp[86] -
thrust::complex<double> (0, 1) * amp[87] + thrust::complex<double> (0, 1)
* amp[103] + thrust::complex<double> (0, 1) * amp[104] +
thrust::complex<double> (0, 1) * amp[106] + thrust::complex<double> (0,
1) * amp[111] + thrust::complex<double> (0, 1) * amp[112] +
thrust::complex<double> (0, 1) * amp[114] + thrust::complex<double> (0,
1) * amp[115] - amp[116] + thrust::complex<double> (0, 1) * amp[138] +
thrust::complex<double> (0, 1) * amp[139] - amp[141] - amp[142] +
thrust::complex<double> (0, 1) * amp[157] + thrust::complex<double> (0,
1) * amp[158];
jamp[16] = -thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[4] -
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[6] - amp[7] + amp[17] + amp[76] - thrust::complex<double> (0, 1) *
amp[77] + thrust::complex<double> (0, 1) * amp[81] -
thrust::complex<double> (0, 1) * amp[83] + thrust::complex<double> (0, 1)
* amp[84] - thrust::complex<double> (0, 1) * amp[85] - amp[86] - amp[96]
+ thrust::complex<double> (0, 1) * amp[100] - amp[101] -
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[122] + thrust::complex<double> (0, 1) * amp[124] -
thrust::complex<double> (0, 1) * amp[139] - thrust::complex<double> (0,
1) * amp[140] + amp[142] + amp[143] - amp[150] - amp[151] +
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[154];
jamp[17] = +thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[4] +
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[6] + amp[7] - amp[17] + amp[42] - thrust::complex<double> (0, 1) *
amp[44] - amp[45] + thrust::complex<double> (0, 1) * amp[49] - amp[50] -
amp[51] - amp[52] - thrust::complex<double> (0, 1) * amp[111] -
thrust::complex<double> (0, 1) * amp[112] - thrust::complex<double> (0,
1) * amp[114] - thrust::complex<double> (0, 1) * amp[115] + amp[116] +
thrust::complex<double> (0, 1) * amp[120] + thrust::complex<double> (0,
1) * amp[121] + thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[138] + thrust::complex<double> (0,
1) * amp[140] + amp[141] - amp[143] - thrust::complex<double> (0, 1) *
amp[156] - thrust::complex<double> (0, 1) * amp[157];
jamp[18] = -thrust::complex<double> (0, 1) * amp[1] - thrust::complex<double>
(0, 1) * amp[2] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[4] + amp[10] +
thrust::complex<double> (0, 1) * amp[11] - amp[13] - amp[78] +
thrust::complex<double> (0, 1) * amp[80] - thrust::complex<double> (0, 1)
* amp[81] - thrust::complex<double> (0, 1) * amp[82] -
thrust::complex<double> (0, 1) * amp[84] - amp[88] -
thrust::complex<double> (0, 1) * amp[89] - thrust::complex<double> (0, 1)
* amp[102] + thrust::complex<double> (0, 1) * amp[104] -
thrust::complex<double> (0, 1) * amp[105] + amp[107] +
thrust::complex<double> (0, 1) * amp[108] - amp[127] -
thrust::complex<double> (0, 1) * amp[129] + thrust::complex<double> (0,
1) * amp[131] + amp[132] - amp[134] + thrust::complex<double> (0, 1) *
amp[139] + thrust::complex<double> (0, 1) * amp[140] + amp[145] +
amp[146];
jamp[19] = -amp[58] + thrust::complex<double> (0, 1) * amp[59] -
thrust::complex<double> (0, 1) * amp[63] + thrust::complex<double> (0, 1)
* amp[65] - thrust::complex<double> (0, 1) * amp[66] +
thrust::complex<double> (0, 1) * amp[67] + amp[68] -
thrust::complex<double> (0, 1) * amp[75] - amp[76] + amp[78] -
thrust::complex<double> (0, 1) * amp[80] + thrust::complex<double> (0, 1)
* amp[82] + thrust::complex<double> (0, 1) * amp[83] +
thrust::complex<double> (0, 1) * amp[85] - amp[99] +
thrust::complex<double> (0, 1) * amp[102] - thrust::complex<double> (0,
1) * amp[104] + thrust::complex<double> (0, 1) * amp[105] - amp[107] -
thrust::complex<double> (0, 1) * amp[108] + thrust::complex<double> (0,
1) * amp[129] + thrust::complex<double> (0, 1) * amp[130] - amp[132] -
amp[133] + amp[151] + amp[152] - thrust::complex<double> (0, 1) *
amp[154] - thrust::complex<double> (0, 1) * amp[155];
jamp[20] = +thrust::complex<double> (0, 1) * amp[1] + thrust::complex<double>
(0, 1) * amp[2] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[4] - amp[10] -
thrust::complex<double> (0, 1) * amp[11] + amp[13] - amp[61] -
thrust::complex<double> (0, 1) * amp[62] + thrust::complex<double> (0, 1)
* amp[63] + thrust::complex<double> (0, 1) * amp[64] +
thrust::complex<double> (0, 1) * amp[66] - amp[68] +
thrust::complex<double> (0, 1) * amp[69] + thrust::complex<double> (0, 1)
* amp[111] - thrust::complex<double> (0, 1) * amp[113] +
thrust::complex<double> (0, 1) * amp[114] + amp[118] -
thrust::complex<double> (0, 1) * amp[119] - amp[128] -
thrust::complex<double> (0, 1) * amp[130] - thrust::complex<double> (0,
1) * amp[131] + amp[133] + amp[134] + thrust::complex<double> (0, 1) *
amp[138] - thrust::complex<double> (0, 1) * amp[140] + amp[144] -
amp[146];
jamp[21] = -amp[39] + thrust::complex<double> (0, 1) * amp[41] - amp[42] -
thrust::complex<double> (0, 1) * amp[43] - amp[48] + amp[52] + amp[53] +
amp[61] + thrust::complex<double> (0, 1) * amp[62] -
thrust::complex<double> (0, 1) * amp[63] - thrust::complex<double> (0, 1)
* amp[64] - thrust::complex<double> (0, 1) * amp[66] + amp[68] -
thrust::complex<double> (0, 1) * amp[69] + thrust::complex<double> (0, 1)
* amp[102] + thrust::complex<double> (0, 1) * amp[103] +
thrust::complex<double> (0, 1) * amp[105] + thrust::complex<double> (0,
1) * amp[106] - amp[107] + thrust::complex<double> (0, 1) * amp[112] +
thrust::complex<double> (0, 1) * amp[113] + thrust::complex<double> (0,
1) * amp[115] + thrust::complex<double> (0, 1) * amp[129] +
thrust::complex<double> (0, 1) * amp[130] - amp[132] - amp[133] +
thrust::complex<double> (0, 1) * amp[157] + thrust::complex<double> (0,
1) * amp[158];
jamp[22] = +thrust::complex<double> (0, 1) * amp[0] + thrust::complex<double>
(0, 1) * amp[1] + thrust::complex<double> (0, 1) * amp[3] +
thrust::complex<double> (0, 1) * amp[5] + thrust::complex<double> (0, 1)
* amp[9] - amp[10] - amp[17] + amp[58] - thrust::complex<double> (0, 1) *
amp[59] + thrust::complex<double> (0, 1) * amp[63] -
thrust::complex<double> (0, 1) * amp[65] + thrust::complex<double> (0, 1)
* amp[66] - thrust::complex<double> (0, 1) * amp[67] - amp[68] - amp[98]
- thrust::complex<double> (0, 1) * amp[100] + amp[101] +
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[122] - thrust::complex<double> (0, 1) * amp[124] -
thrust::complex<double> (0, 1) * amp[130] - thrust::complex<double> (0,
1) * amp[131] + amp[133] + amp[134] + amp[150] - amp[152] -
thrust::complex<double> (0, 1) * amp[153] + thrust::complex<double> (0,
1) * amp[155];
jamp[23] = -thrust::complex<double> (0, 1) * amp[0] - thrust::complex<double>
(0, 1) * amp[1] - thrust::complex<double> (0, 1) * amp[3] -
thrust::complex<double> (0, 1) * amp[5] - thrust::complex<double> (0, 1)
* amp[9] + amp[10] + amp[17] + amp[39] - thrust::complex<double> (0, 1) *
amp[41] - amp[47] - thrust::complex<double> (0, 1) * amp[49] + amp[50] +
amp[51] - amp[53] - thrust::complex<double> (0, 1) * amp[102] -
thrust::complex<double> (0, 1) * amp[103] - thrust::complex<double> (0,
1) * amp[105] - thrust::complex<double> (0, 1) * amp[106] + amp[107] -
thrust::complex<double> (0, 1) * amp[120] - thrust::complex<double> (0,
1) * amp[121] - thrust::complex<double> (0, 1) * amp[123] -
thrust::complex<double> (0, 1) * amp[129] + thrust::complex<double> (0,
1) * amp[131] + amp[132] - amp[134] + thrust::complex<double> (0, 1) *
amp[156] - thrust::complex<double> (0, 1) * amp[158];
// Sum and square the color flows to get the matrix element
for(i = 0; i < ncolor; i++ )
{
ztemp = 0.;
for(j = 0; j < ncolor; j++ )
ztemp = ztemp + cf[i][j] * jamp[j];
matrix = matrix + (ztemp * conj(jamp[i])).real()/denom[i];
}
// Store the leading color flows for choice of color
// for(i=0;i < ncolor; i++)
// jamp2[0][i] += real(jamp[i]*conj(jamp[i]));
}
CPPProcess::CPPProcess(int numiterations, int gpublocks, int gputhreads,
bool verbose, bool debug)
: m_numiterations(numiterations), gpu_nblocks(gpublocks),
gpu_nthreads(gputhreads), dim(gpu_nblocks * gpu_nthreads)
{
// Helicities for the process - nodim
static const int tHel[ncomb][nexternal] = {{-1, -1, -1, -1, -1, -1}, {-1, -1,
-1, -1, -1, 1}, {-1, -1, -1, -1, 1, -1}, {-1, -1, -1, -1, 1, 1}, {-1, -1,
-1, 1, -1, -1}, {-1, -1, -1, 1, -1, 1}, {-1, -1, -1, 1, 1, -1}, {-1, -1,
-1, 1, 1, 1}, {-1, -1, 1, -1, -1, -1}, {-1, -1, 1, -1, -1, 1}, {-1, -1,
1, -1, 1, -1}, {-1, -1, 1, -1, 1, 1}, {-1, -1, 1, 1, -1, -1}, {-1, -1, 1,
1, -1, 1}, {-1, -1, 1, 1, 1, -1}, {-1, -1, 1, 1, 1, 1}, {-1, 1, -1, -1,
-1, -1}, {-1, 1, -1, -1, -1, 1}, {-1, 1, -1, -1, 1, -1}, {-1, 1, -1, -1,
1, 1}, {-1, 1, -1, 1, -1, -1}, {-1, 1, -1, 1, -1, 1}, {-1, 1, -1, 1, 1,
-1}, {-1, 1, -1, 1, 1, 1}, {-1, 1, 1, -1, -1, -1}, {-1, 1, 1, -1, -1, 1},
{-1, 1, 1, -1, 1, -1}, {-1, 1, 1, -1, 1, 1}, {-1, 1, 1, 1, -1, -1}, {-1,
1, 1, 1, -1, 1}, {-1, 1, 1, 1, 1, -1}, {-1, 1, 1, 1, 1, 1}, {1, -1, -1,
-1, -1, -1}, {1, -1, -1, -1, -1, 1}, {1, -1, -1, -1, 1, -1}, {1, -1, -1,
-1, 1, 1}, {1, -1, -1, 1, -1, -1}, {1, -1, -1, 1, -1, 1}, {1, -1, -1, 1,
1, -1}, {1, -1, -1, 1, 1, 1}, {1, -1, 1, -1, -1, -1}, {1, -1, 1, -1, -1,
1}, {1, -1, 1, -1, 1, -1}, {1, -1, 1, -1, 1, 1}, {1, -1, 1, 1, -1, -1},
{1, -1, 1, 1, -1, 1}, {1, -1, 1, 1, 1, -1}, {1, -1, 1, 1, 1, 1}, {1, 1,
-1, -1, -1, -1}, {1, 1, -1, -1, -1, 1}, {1, 1, -1, -1, 1, -1}, {1, 1, -1,
-1, 1, 1}, {1, 1, -1, 1, -1, -1}, {1, 1, -1, 1, -1, 1}, {1, 1, -1, 1, 1,
-1}, {1, 1, -1, 1, 1, 1}, {1, 1, 1, -1, -1, -1}, {1, 1, 1, -1, -1, 1},
{1, 1, 1, -1, 1, -1}, {1, 1, 1, -1, 1, 1}, {1, 1, 1, 1, -1, -1}, {1, 1,
1, 1, -1, 1}, {1, 1, 1, 1, 1, -1}, {1, 1, 1, 1, 1, 1}};
cudaMemcpyToSymbol(cHel, tHel, ncomb * nexternal * sizeof(int));
// perm - nodim
// static int perm[nexternal] = {0, 1, 2, 3};
}
CPPProcess::~CPPProcess() {}
const std::vector<double> &CPPProcess::getMasses() const {return mME;}
//--------------------------------------------------------------------------
// Initialize process.
void CPPProcess::initProc(string param_card_name)
{
// Instantiate the model class and set parameters that stay fixed during run
pars = Parameters_sm::getInstance();
SLHAReader slha(param_card_name);
pars->setIndependentParameters(slha);
pars->setIndependentCouplings();
pars->printIndependentParameters();
pars->printIndependentCouplings();
pars->setDependentParameters();
pars->setDependentCouplings();
// Set external particle masses for this matrix element
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
mME.push_back(pars->mdl_MT);
mME.push_back(pars->mdl_MT);
mME.push_back(pars->ZERO);
mME.push_back(pars->ZERO);
static thrust::complex<double> tIPC[3] = {pars->GC_10, pars->GC_11,
pars->GC_12};
static double tIPD[2] = {pars->mdl_MT, pars->mdl_WT};
cudaMemcpyToSymbol(cIPC, tIPC, 3 * sizeof(thrust::complex<double> ));
cudaMemcpyToSymbol(cIPD, tIPD, 2 * sizeof(double));
}
//--------------------------------------------------------------------------
// Evaluate |M|^2, part independent of incoming flavour.
__global__ void sigmaKin(double * allmomenta, double * output)
{
// Set the parameters which change event by event
// Need to discuss this with Stefan
// pars->setDependentParameters();
// pars->setDependentCouplings();
// Reset color flows
// for (int xx = 0; xx < 384; ++xx) {
const int nprocesses = 1;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// char *devPtr = (char *)tp.ptr;
// size_t dpt = tp.pitch;
// size_t slicePitch = dpt * 6;
// char *dps = devPtr + dim * slicePitch;
double matrix_element[nprocesses];
thrust::complex<double> amp[159];
double local_m[6][3];
int DIM = blockDim.x * gridDim.x;
// for (int i=0; i<20;i++){
// printf(" %f ", allmomenta[i]);
// }
// printf("\n");
// printf("DIM is %i/%i\n", tid, DIM);
for (int i = 0; i < 6; i++ )
{
for (int j = 0; j < 3; j++ )
{
local_m[i][j] = allmomenta[i * 3 * DIM + j * DIM + tid];
// printf(" %f ", local_m[i][j]);
}
// printf("\n");
}
// Local variables and constants
const int ncomb = 64;
// static bool goodhel[ncomb] = {ncomb * false};
// static int ntry = 0, sum_hel = 0, ngood = 0;
// static int igood[ncomb];
// static int jhel;
// std::complex<double> **wfs;
// double t[1];
// Helicities for the process
// static const int helicities[ncomb][nexternal] =
// {{-1,-1,-1,-1,-1,-1},{-1,-1,-1,-1,-1,1},{-1,-1,-1,-1,1,-1},{-1,-1,-1,-1,1,1
// },{-1,-1,-1,1,-1,-1},{-1,-1,-1,1,-1,1},{-1,-1,-1,1,1,-1},{-1,-1,-1,1,1,1},{
// -1,-1,1,-1,-1,-1},{-1,-1,1,-1,-1,1},{-1,-1,1,-1,1,-1},{-1,-1,1,-1,1,1},{-1,
// -1,1,1,-1,-1},{-1,-1,1,1,-1,1},{-1,-1,1,1,1,-1},{-1,-1,1,1,1,1},{-1,1,-1,-1
// ,-1,-1},{-1,1,-1,-1,-1,1},{-1,1,-1,-1,1,-1},{-1,1,-1,-1,1,1},{-1,1,-1,1,-1,
// -1},{-1,1,-1,1,-1,1},{-1,1,-1,1,1,-1},{-1,1,-1,1,1,1},{-1,1,1,-1,-1,-1},{-1
// ,1,1,-1,-1,1},{-1,1,1,-1,1,-1},{-1,1,1,-1,1,1},{-1,1,1,1,-1,-1},{-1,1,1,1,-
// 1,1},{-1,1,1,1,1,-1},{-1,1,1,1,1,1},{1,-1,-1,-1,-1,-1},{1,-1,-1,-1,-1,1},{1
// ,-1,-1,-1,1,-1},{1,-1,-1,-1,1,1},{1,-1,-1,1,-1,-1},{1,-1,-1,1,-1,1},{1,-1,-
// 1,1,1,-1},{1,-1,-1,1,1,1},{1,-1,1,-1,-1,-1},{1,-1,1,-1,-1,1},{1,-1,1,-1,1,-
// 1},{1,-1,1,-1,1,1},{1,-1,1,1,-1,-1},{1,-1,1,1,-1,1},{1,-1,1,1,1,-1},{1,-1,1
// ,1,1,1},{1,1,-1,-1,-1,-1},{1,1,-1,-1,-1,1},{1,1,-1,-1,1,-1},{1,1,-1,-1,1,1}
// ,{1,1,-1,1,-1,-1},{1,1,-1,1,-1,1},{1,1,-1,1,1,-1},{1,1,-1,1,1,1},{1,1,1,-1,
// -1,-1},{1,1,1,-1,-1,1},{1,1,1,-1,1,-1},{1,1,1,-1,1,1},{1,1,1,1,-1,-1},{1,1,
// 1,1,-1,1},{1,1,1,1,1,-1},{1,1,1,1,1,1}};
// Denominators: spins, colors and identical particles
const int denominators[1] = {512};
// Reset the matrix elements
for(int i = 0; i < nprocesses; i++ )
{
matrix_element[i] = 0.;
}
// Define permutation
// int perm[nexternal];
// for(int i = 0; i < nexternal; i++){
// perm[i]=i;
// }
for (int ihel = 0; ihel < ncomb; ihel++ )
{
calculate_wavefunctions(ihel, local_m, matrix_element[0]);
}
for (int i = 0; i < nprocesses; ++ i)
{
matrix_element[i] /= denominators[i];
}
for (int i = 0; i < nprocesses; ++ i)
{
output[i * nprocesses + tid] = matrix_element[i];
// printf("output %i %i %i %f", tid, i, i*nprocesses+tid,
// output[i*nprocesses+tid]);
}
}
//==========================================================================
// Private class member functions
//--------------------------------------------------------------------------
|
63b46213acde23ae8c084363382a8503383c77ce.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "Fprop1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
const float *syn1 = NULL;
hipMalloc(&syn1, XSIZE*YSIZE);
float *layer1 = NULL;
hipMalloc(&layer1, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
Fprop1), dim3(gridBlock),dim3(threadBlock), 0, 0, in,syn1,layer1);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
Fprop1), dim3(gridBlock),dim3(threadBlock), 0, 0, in,syn1,layer1);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
Fprop1), dim3(gridBlock),dim3(threadBlock), 0, 0, in,syn1,layer1);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 63b46213acde23ae8c084363382a8503383c77ce.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "Fprop1.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
const float *syn1 = NULL;
cudaMalloc(&syn1, XSIZE*YSIZE);
float *layer1 = NULL;
cudaMalloc(&layer1, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
Fprop1<<<gridBlock,threadBlock>>>(in,syn1,layer1);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
Fprop1<<<gridBlock,threadBlock>>>(in,syn1,layer1);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
Fprop1<<<gridBlock,threadBlock>>>(in,syn1,layer1);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
831c9803816a83761bb0d97617015735df9d51b2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/native/TensorTransformations.h>
#include <ATen/hip/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void kernel_pointwise_flip_apply2(
const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void flip_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t* flip_dims,
int64_t flip_dims_size,
int64_t* strides,
int64_t* strides_contiguous,
int64_t* shape,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = cuda::getApplyBlockSize();
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<scalar_t, int64_t>)
, dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
auto flip_dims_t = at::from_blob(
flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}, at::device(kCPU).dtype(kLong));
auto shape = in_tensor.sizes().vec();
auto shape_t = at::from_blob(
shape.data(), {static_cast<int64_t>(shape.size())}, at::device(kCPU).dtype(kLong));
auto strides = in_tensor.strides().vec();
auto strides_t = at::from_blob(
strides.data(), {static_cast<int64_t>(strides.size())}, at::device(kCPU).dtype(kLong));
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data_ptr<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, in_tensor.scalar_type(), "flip_cuda", [&] {
hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
flip_dims_t.cuda().data_ptr<int64_t>(),
flip_dims_size,
strides_t.cuda().data_ptr<int64_t>(),
stride_contiguous.cuda().data_ptr<int64_t>(),
shape_t.cuda().data_ptr<int64_t>(),
total_dims);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void roll_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t roll_dim,
int64_t start,
int64_t size,
int64_t stride,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "roll_cuda", [&] {
hipLaunchKernelGGL(( roll_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
C10_HIP_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
}} // namespace at::native
| 831c9803816a83761bb0d97617015735df9d51b2.cu | #include <ATen/native/TensorTransformations.h>
#include <ATen/cuda/detail/IndexUtils.cuh>
#include <ATen/NativeFunctions.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <c10/macros/Macros.h>
#include <cstddef>
#include <vector>
namespace at {
namespace native {
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350 || defined __HIP_PLATFORM_HCC__
C10_LAUNCH_BOUNDS_2(cuda::getApplyBlockSize(), cuda::getApplyBlocksPerSM())
#endif
__global__ void kernel_pointwise_flip_apply2(
const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void flip_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t* flip_dims,
int64_t flip_dims_size,
int64_t* strides,
int64_t* strides_contiguous,
int64_t* shape,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntArrayRef dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
flip_check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = cuda::getApplyBlockSize();
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
auto flip_dims = dims.vec();
wrap_all_dims(flip_dims, total_dims);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (flip_dims[0] == 0 || flip_dims[0] == total_dims - 1)) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, in_tensor.scalar_type(), "flip_cuda", [&] {
auto in_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(flip_dims[0]);
out_tensor_info.collapseDims(flip_dims[0]);
kernel_pointwise_flip_apply2<scalar_t, int64_t>
<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
auto flip_dims_t = at::from_blob(
flip_dims.data(), {static_cast<int64_t>(flip_dims.size())}, at::device(kCPU).dtype(kLong));
auto shape = in_tensor.sizes().vec();
auto shape_t = at::from_blob(
shape.data(), {static_cast<int64_t>(shape.size())}, at::device(kCPU).dtype(kLong));
auto strides = in_tensor.strides().vec();
auto strides_t = at::from_blob(
strides.data(), {static_cast<int64_t>(strides.size())}, at::device(kCPU).dtype(kLong));
// stride_contiguous is the stride of non-contiguous tensor after calling contiguous(),
// it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data_ptr<int64_t>();
for (int64_t i = total_dims - 1; i >= 0; i--) {
if (i == total_dims - 1) {
stride_contiguous_d[i] = 1;
} else {
stride_contiguous_d[i] = std::max<int64_t>(shape[i+1], 1) * stride_contiguous_d[i + 1];
}
}
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, in_tensor.scalar_type(), "flip_cuda", [&] {
flip_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
flip_dims_t.cuda().data_ptr<int64_t>(),
flip_dims_size,
strides_t.cuda().data_ptr<int64_t>(),
stride_contiguous.cuda().data_ptr<int64_t>(),
shape_t.cuda().data_ptr<int64_t>(),
total_dims);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
template <typename scalar_t>
C10_LAUNCH_BOUNDS_1(cuda::getApplyBlockSize())
__global__ void roll_cuda_kernel(
scalar_t* in_tensor,
scalar_t* out_tensor,
int64_t N,
int64_t roll_dim,
int64_t start,
int64_t size,
int64_t stride,
int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
// roll dim idx is the index of linear_index along the rolling dimension.
int64_t roll_dim_idx = linear_index % (stride * size) / stride;
// index into the source data to find appropriate value.
int64_t source_idx = 0;
if( roll_dim_idx >= (size - start) ) {
source_idx = linear_index - ((size - start) * stride);
} else {
source_idx = linear_index + (start * stride);
}
out_tensor[linear_index] = in_tensor[source_idx];
}
// Roll a tensor along a dimension
Tensor roll_cuda(const Tensor& self, IntArrayRef shifts, IntArrayRef dims) {
if (dims.size() != 1 || shifts.size() != 1) {
return roll_common(self, shifts, dims);
}
auto in_tensor = self;
if(!self.is_contiguous()) {
in_tensor = self.contiguous();
}
auto out_tensor = at::empty_like(in_tensor, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
if (out_tensor.numel() == 0) {
return out_tensor;
}
const int64_t N = in_tensor.numel();
const int64_t dim = dims[0];
const int64_t size = in_tensor.size(dim);
int64_t start = (size - shifts[0]) % size;
// Behavior of % is different in C++ vs Python for negative numbers. This
// corrects the difference.
if( start < 0 ) start = start + size;
dim3 dim_block = cuda::getApplyBlock();
dim3 dim_grid;
TORCH_CHECK(cuda::getApplyGrid(N, dim_grid, in_tensor.get_device()), "unable to get dim grid");
auto total_dims = in_tensor.dim();
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND2(at::ScalarType::Half, at::ScalarType::Bool, in_tensor.scalar_type(), "roll_cuda", [&] {
roll_cuda_kernel<<<dim_grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(
in_tensor.data_ptr<scalar_t>(), out_tensor.data_ptr<scalar_t>(), N,
dim, start,
size,
in_tensor.stride(dim),
total_dims);
C10_CUDA_KERNEL_LAUNCH_CHECK();
});
return out_tensor;
}
}} // namespace at::native
|
421b0d1919623ecd97693876052bbb9a6ec03526.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <chrono>
#include <math.h>
#include "cuda_2d.h"
using namespace std;
int main(void)
{
int N = 640 * 480;
uint16_t *x;
float *y;
// dim3 blockSize (16,1,1);
dim3 numBlocks (40, 30, 1);
int n = 40 * 30;
float normfactor = 1.0f / 256.0f;
auto start = std::chrono::system_clock::now ();
// Allocate Unified Memory accessible from CPU or GPU
hipMallocManaged(&x, N*sizeof(uint16_t));
hipMallocManaged(&y, n*sizeof(float));
std::cout << "reduce " << N << " elements " << " to " << n << ".\n";
auto stop = std::chrono::system_clock::now ();
chrono::duration< double > dur = stop - start;
std::cout << "alloc took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
for (int i = 0; i < n; i++) {
y[i] = 0.0f;
}
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "init took " << dur.count () << " s " << std::endl;
auto tstart = std::chrono::system_clock::now ();
start = std::chrono::system_clock::now ();
// Run kernel on 1M elements on the GPU
hipLaunchKernelGGL(( reduce), dim3(n), dim3(1), 0, 0, N, x, y);
hipLaunchKernelGGL(( mulscalar), dim3(1), dim3(256), 0, 0, n, normfactor, y);
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "reduce took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "sync took " << dur.count () << " s " << std::endl;
auto tstop = std::chrono::system_clock::now ();
dur = tstop - tstart;
std::cout << "total took " << dur.count () << " s " << std::endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < n; i++) {
maxError = fmax(maxError, fabs(y[i]-1.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 421b0d1919623ecd97693876052bbb9a6ec03526.cu | #include <iostream>
#include <chrono>
#include <math.h>
#include "cuda_2d.h"
using namespace std;
int main(void)
{
int N = 640 * 480;
uint16_t *x;
float *y;
// dim3 blockSize (16,1,1);
dim3 numBlocks (40, 30, 1);
int n = 40 * 30;
float normfactor = 1.0f / 256.0f;
auto start = std::chrono::system_clock::now ();
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(uint16_t));
cudaMallocManaged(&y, n*sizeof(float));
std::cout << "reduce " << N << " elements " << " to " << n << ".\n";
auto stop = std::chrono::system_clock::now ();
chrono::duration< double > dur = stop - start;
std::cout << "alloc took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
}
for (int i = 0; i < n; i++) {
y[i] = 0.0f;
}
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "init took " << dur.count () << " s " << std::endl;
auto tstart = std::chrono::system_clock::now ();
start = std::chrono::system_clock::now ();
// Run kernel on 1M elements on the GPU
reduce<<<n, 1>>>(N, x, y);
mulscalar<<<1, 256>>> (n, normfactor, y);
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "reduce took " << dur.count () << " s " << std::endl;
start = std::chrono::system_clock::now ();
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
stop = std::chrono::system_clock::now ();
dur = stop - start;
std::cout << "sync took " << dur.count () << " s " << std::endl;
auto tstop = std::chrono::system_clock::now ();
dur = tstop - tstart;
std::cout << "total took " << dur.count () << " s " << std::endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < n; i++) {
maxError = fmax(maxError, fabs(y[i]-1.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
04_add.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1e8;
// Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y);
// Wait for GPU to finish before accessing on host
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
| 04_add.cu | #include <iostream>
#include <math.h>
// CUDA Kernel function to add the elements of two arrays on the GPU
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1e8;
// Allocate Unified Memory -- accessible from CPU or GPU
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
e2ed2c09b43ce67ee88c3c8085afa32a995dd756.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MapDropoutForward(const int nc, const int spatial_dim, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, nc) {
const Dtype* in_s = in + index * spatial_dim;
Dtype* out_s = out + index * spatial_dim;
if (mask[index] > threshold) {
for (int i = 0; i < spatial_dim; ++i) {
out_s[i] = in_s[i] * scale;
}
} else {
for (int i = 0; i < spatial_dim; ++i) {
out_s[i] = Dtype(0.);
}
}
}
}
template <typename Dtype>
void MapDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int nc = num * channels;
const int spatial_dim = bottom[0]->count() / nc;
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(nc, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MapDropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(nc)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nc, spatial_dim, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void MapDropoutBackward(const int nc, const int spatial_dim, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, nc) {
const Dtype* in_diff_s = in_diff + index * spatial_dim;
Dtype* out_diff_s = out_diff + index * spatial_dim;
if (mask[index] > threshold) {
for (int i = 0; i < spatial_dim; ++i) {
out_diff_s[i] = in_diff_s[i] * scale;
}
} else {
for (int i = 0; i < spatial_dim; ++i) {
out_diff_s[i] = Dtype(0.);
}
}
}
}
template <typename Dtype>
void MapDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int num = (*bottom)[0]->num();
const int channels = (*bottom)[0]->channels();
const int nc = num * channels;
const int spatial_dim = (*bottom)[0]->count() / nc;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MapDropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(nc)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
nc, spatial_dim, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_CLASS(MapDropoutLayer);
} // namespace caffe
| e2ed2c09b43ce67ee88c3c8085afa32a995dd756.cu | #include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MapDropoutForward(const int nc, const int spatial_dim, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, nc) {
const Dtype* in_s = in + index * spatial_dim;
Dtype* out_s = out + index * spatial_dim;
if (mask[index] > threshold) {
for (int i = 0; i < spatial_dim; ++i) {
out_s[i] = in_s[i] * scale;
}
} else {
for (int i = 0; i < spatial_dim; ++i) {
out_s[i] = Dtype(0.);
}
}
}
}
template <typename Dtype>
void MapDropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int num = bottom[0]->num();
const int channels = bottom[0]->channels();
const int nc = num * channels;
const int spatial_dim = bottom[0]->count() / nc;
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(nc, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
MapDropoutForward<Dtype><<<CAFFE_GET_BLOCKS(nc), CAFFE_CUDA_NUM_THREADS>>>(
nc, spatial_dim, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(bottom[0]->count(), bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void MapDropoutBackward(const int nc, const int spatial_dim, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, nc) {
const Dtype* in_diff_s = in_diff + index * spatial_dim;
Dtype* out_diff_s = out_diff + index * spatial_dim;
if (mask[index] > threshold) {
for (int i = 0; i < spatial_dim; ++i) {
out_diff_s[i] = in_diff_s[i] * scale;
}
} else {
for (int i = 0; i < spatial_dim; ++i) {
out_diff_s[i] = Dtype(0.);
}
}
}
}
template <typename Dtype>
void MapDropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int num = (*bottom)[0]->num();
const int channels = (*bottom)[0]->channels();
const int nc = num * channels;
const int spatial_dim = (*bottom)[0]->count() / nc;
// NOLINT_NEXT_LINE(whitespace/operators)
MapDropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(nc), CAFFE_CUDA_NUM_THREADS>>>(
nc, spatial_dim, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_CLASS(MapDropoutLayer);
} // namespace caffe
|
f4a207a028cd8ad30a3de8ecd4599b6367ab0fa2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _IMAGESEG_KERNEL
#define _IMAGESEG_KERNEL
#include <helper_math.h>
#include <helper_functions.h>
#include "include/timer.h"
//**************** CUDA things *****************//
//Useful to read Error from CUDA Calls
#define CUDA_CALL(x) {if((x) != hipSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", hipGetErrorString(hipGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void resetClustersValues(int *d_sumRed,int *d_sumGreen,int *d_sumBlue, int* d_pixelClusterCounter, int* d_tempRedCentroid, int* d_tempGreenCentroid, int* d_tempBlueCentroid, int d_nCentroids ) {
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < d_nCentroids) {
// nCentroids long
d_sumRed[threadID] = 0;
d_sumGreen[threadID] = 0;
d_sumBlue[threadID] = 0;
d_pixelClusterCounter[threadID] = 0;
d_tempRedCentroid[threadID] = 0;
d_tempGreenCentroid[threadID] = 0;
d_tempBlueCentroid[threadID] = 0;
}
}
__global__ void resetLabelArray(int *d_labelArray, int d_size){
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
// labelArray is "size" long
if(threadID < d_size) {
d_labelArray[threadID] = 0;
}
}
__global__ void setPixelsLabel(int *d_red, int *d_green, int *d_blue, int *d_labelArray, int d_size, int d_nCentroids, int* d_redCentroid, int* d_greenCentroid, int* d_blueCentroid ) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
double distance_pixel, distance_ccCluster;
int ccCluster = 0;
distance_ccCluster = sqrtf(powf((d_red[threadID]-d_redCentroid[ccCluster]),2.0) + powf((d_green[threadID]-d_greenCentroid[ccCluster]),2.0) + powf((d_blue[threadID]-d_blueCentroid[ccCluster]),2.0));
if(threadID < d_size) {
for(int i = 0; i < d_nCentroids; ++i) {
distance_pixel = sqrtf(powf((d_red[threadID]-d_redCentroid[i]),2.0) + powf((d_green[threadID]-d_greenCentroid[i]),2.0) + powf((d_blue[threadID]-d_blueCentroid[i]),2.0));
if(distance_pixel < distance_ccCluster){
distance_ccCluster = distance_pixel;
ccCluster = i;
}
}
d_labelArray[threadID] = ccCluster;
}
}
__global__ void sumCluster(int *d_red, int *d_green, int *d_blue, int *d_sumRed,int *d_sumGreen, int *d_sumBlue, int *d_labelArray,int *d_pixelClusterCounter, int d_size) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if(threadID < d_size) {
int currentLabelArray = d_labelArray[threadID];
int currentRed = d_red[threadID];
int currentGreen = d_green[threadID];
int currentBlue = d_blue[threadID];
// Writing to global memory needs a serialization.
atomicAdd(&d_sumRed[currentLabelArray], currentRed);
atomicAdd(&d_sumGreen[currentLabelArray], currentGreen);
atomicAdd(&d_sumBlue[currentLabelArray], currentBlue);
atomicAdd(&d_pixelClusterCounter[currentLabelArray], 1);
}
}
__global__ void newCentroids(int *d_tempRedCentroid, int *d_tempGreenCentroid, int *d_tempBlueCentroid,int* d_sumRed, int *d_sumGreen,int *d_sumBlue, int* d_pixelClusterCounter, int d_nCentroids) {
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < d_nCentroids) {
int currentPixelCounter = d_pixelClusterCounter[threadID];
int sumRed = d_sumRed[threadID];
int sumGreen = d_sumGreen[threadID];
int sumBlue = d_sumBlue[threadID];
//new RGB Centroids' values written in global memory
d_tempRedCentroid[threadID] = (int)(sumRed/currentPixelCounter);
d_tempGreenCentroid[threadID] = (int)(sumGreen/currentPixelCounter);
d_tempBlueCentroid[threadID] = (int)(sumBlue/currentPixelCounter);
}
}
extern "C"
void executeKernel(double threshold,
int* h_redCentroid, int* h_greenCentroid, int* h_blueCentroid, int* d_redCentroid, int* d_greenCentroid, int* d_blueCentroid,
int* d_sumRed, int* d_sumGreen, int* d_sumBlue, int* d_pixelClusterCounter, int* d_tempRedCentroid, int* d_tempGreenCentroid, int* d_tempBlueCentroid,
int* d_red, int* d_green, int* d_blue, int* h_labelArray, int* d_labelArray, size_t sizePixels, size_t sizeClusters, int d_size, int d_nCentroids)
{
// Defining grid/block size
double centroidChange;
int threadsPerBlock_ = 16;
int gridSize = 256;
int block_x, block_y;
block_x = ceil((d_size + threadsPerBlock_-1)/threadsPerBlock_);
block_y = ceil((d_size + threadsPerBlock_-1)/threadsPerBlock_);
if (block_x > gridSize)
block_x = gridSize;
else if(block_y > gridSize)
block_y = gridSize;
dim3 dimGrid(block_x,block_y);
dim3 dimBlock(threadsPerBlock_,threadsPerBlock_);
printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock_);
GpuTimer timer;
timer.Start();
do
{
//Se ccentroids as constant
CUDA_CALL(hipMemcpyToSymbol(d_redCentroid, h_redCentroid, sizeClusters));
CUDA_CALL(hipMemcpyToSymbol(d_greenCentroid, h_greenCentroid, sizeClusters));
CUDA_CALL(hipMemcpyToSymbol(d_blueCentroid, h_blueCentroid, sizeClusters));
//Reset values for new clusters
hipLaunchKernelGGL(( resetClustersValues), dim3(1), dim3(dimBlock), 0, 0, d_sumRed, d_sumGreen, d_sumBlue, d_pixelClusterCounter, d_tempRedCentroid, d_tempGreenCentroid, d_tempBlueCentroid);
//Reset labelArray
hipLaunchKernelGGL(( resetLabelArray), dim3(dimGrid), dim3(dimBlock), 0, 0, d_labelArray, d_size);
//Casify pixels and save value in labelArray
hipLaunchKernelGGL(( setPixelsLabel), dim3(dimGrid), dim3(dimBlock) , 0, 0, d_red, d_green, d_blue, d_labelArray, d_size, d_nCentroids, d_redCentroid, d_greenCentroid, d_blueCentroid);
//
hipLaunchKernelGGL(( sumCluster), dim3(dimGrid), dim3(dimBlock), 0, 0, d_red, d_green, d_blue, d_sumRed, d_sumGreen, d_sumBlue, d_labelArray, d_pixelClusterCounter, d_size);
//Finds new RGB Centroids' values
hipLaunchKernelGGL(( newCentroids), dim3(1),dim3(dimBlock), 0, 0, d_tempRedCentroid, d_tempGreenCentroid, d_tempBlueCentroid, d_sumRed, d_sumGreen, d_sumBlue, d_pixelClusterCounter, d_nCentroids);
CUDA_CALL(hipMemcpy(h_redCentroid, d_tempRedCentroid, sizeClusters,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_greenCentroid, d_tempGreenCentroid, sizeClusters,hipMemcpyDeviceToHost));
CUDA_CALL(hipMemcpy(h_blueCentroid, d_tempBlueCentroid, sizeClusters, hipMemcpyDeviceToHost));
centroidChange = sqrtf(powf((d_redCentroid-h_redCentroid),2.0) + powf((d_greenCentroid-h_greenCentroid),2.0) + powf((d_blueCentroid-h_blueCentroid),2.0));
} while (centroidChange > threshold);
CUDA_CALL(hipMemcpy(h_labelArray, d_labelArray, sizePixels, hipMemcpyDeviceToHost));
timer.Stop();
}
#endif | f4a207a028cd8ad30a3de8ecd4599b6367ab0fa2.cu | #ifndef _IMAGESEG_KERNEL
#define _IMAGESEG_KERNEL
#include <helper_math.h>
#include <helper_functions.h>
#include "include/timer.h"
//**************** CUDA things *****************//
//Useful to read Error from CUDA Calls
#define CUDA_CALL(x) {if((x) != cudaSuccess){ \
printf("CUDA error at %s:%d\n",__FILE__,__LINE__); \
printf(" %s\n", cudaGetErrorString(cudaGetLastError())); \
exit(EXIT_FAILURE);}}
__global__ void resetClustersValues(int *d_sumRed,int *d_sumGreen,int *d_sumBlue, int* d_pixelClusterCounter, int* d_tempRedCentroid, int* d_tempGreenCentroid, int* d_tempBlueCentroid, int d_nCentroids ) {
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < d_nCentroids) {
// nCentroids long
d_sumRed[threadID] = 0;
d_sumGreen[threadID] = 0;
d_sumBlue[threadID] = 0;
d_pixelClusterCounter[threadID] = 0;
d_tempRedCentroid[threadID] = 0;
d_tempGreenCentroid[threadID] = 0;
d_tempBlueCentroid[threadID] = 0;
}
}
__global__ void resetLabelArray(int *d_labelArray, int d_size){
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
// labelArray is "size" long
if(threadID < d_size) {
d_labelArray[threadID] = 0;
}
}
__global__ void setPixelsLabel(int *d_red, int *d_green, int *d_blue, int *d_labelArray, int d_size, int d_nCentroids, int* d_redCentroid, int* d_greenCentroid, int* d_blueCentroid ) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
double distance_pixel, distance_ccCluster;
int ccCluster = 0;
distance_ccCluster = sqrtf(powf((d_red[threadID]-d_redCentroid[ccCluster]),2.0) + powf((d_green[threadID]-d_greenCentroid[ccCluster]),2.0) + powf((d_blue[threadID]-d_blueCentroid[ccCluster]),2.0));
if(threadID < d_size) {
for(int i = 0; i < d_nCentroids; ++i) {
distance_pixel = sqrtf(powf((d_red[threadID]-d_redCentroid[i]),2.0) + powf((d_green[threadID]-d_greenCentroid[i]),2.0) + powf((d_blue[threadID]-d_blueCentroid[i]),2.0));
if(distance_pixel < distance_ccCluster){
distance_ccCluster = distance_pixel;
ccCluster = i;
}
}
d_labelArray[threadID] = ccCluster;
}
}
__global__ void sumCluster(int *d_red, int *d_green, int *d_blue, int *d_sumRed,int *d_sumGreen, int *d_sumBlue, int *d_labelArray,int *d_pixelClusterCounter, int d_size) {
// Global thread index
int threadID = (threadIdx.x + blockIdx.x * blockDim.x) + (threadIdx.y + blockIdx.y * blockDim.y) * blockDim.x * gridDim.x;
if(threadID < d_size) {
int currentLabelArray = d_labelArray[threadID];
int currentRed = d_red[threadID];
int currentGreen = d_green[threadID];
int currentBlue = d_blue[threadID];
// Writing to global memory needs a serialization.
atomicAdd(&d_sumRed[currentLabelArray], currentRed);
atomicAdd(&d_sumGreen[currentLabelArray], currentGreen);
atomicAdd(&d_sumBlue[currentLabelArray], currentBlue);
atomicAdd(&d_pixelClusterCounter[currentLabelArray], 1);
}
}
__global__ void newCentroids(int *d_tempRedCentroid, int *d_tempGreenCentroid, int *d_tempBlueCentroid,int* d_sumRed, int *d_sumGreen,int *d_sumBlue, int* d_pixelClusterCounter, int d_nCentroids) {
int threadID = threadIdx.x + threadIdx.y * blockDim.x;
if(threadID < d_nCentroids) {
int currentPixelCounter = d_pixelClusterCounter[threadID];
int sumRed = d_sumRed[threadID];
int sumGreen = d_sumGreen[threadID];
int sumBlue = d_sumBlue[threadID];
//new RGB Centroids' values written in global memory
d_tempRedCentroid[threadID] = (int)(sumRed/currentPixelCounter);
d_tempGreenCentroid[threadID] = (int)(sumGreen/currentPixelCounter);
d_tempBlueCentroid[threadID] = (int)(sumBlue/currentPixelCounter);
}
}
extern "C"
void executeKernel(double threshold,
int* h_redCentroid, int* h_greenCentroid, int* h_blueCentroid, int* d_redCentroid, int* d_greenCentroid, int* d_blueCentroid,
int* d_sumRed, int* d_sumGreen, int* d_sumBlue, int* d_pixelClusterCounter, int* d_tempRedCentroid, int* d_tempGreenCentroid, int* d_tempBlueCentroid,
int* d_red, int* d_green, int* d_blue, int* h_labelArray, int* d_labelArray, size_t sizePixels, size_t sizeClusters, int d_size, int d_nCentroids)
{
// Defining grid/block size
double centroidChange;
int threadsPerBlock_ = 16;
int gridSize = 256;
int block_x, block_y;
block_x = ceil((d_size + threadsPerBlock_-1)/threadsPerBlock_);
block_y = ceil((d_size + threadsPerBlock_-1)/threadsPerBlock_);
if (block_x > gridSize)
block_x = gridSize;
else if(block_y > gridSize)
block_y = gridSize;
dim3 dimGrid(block_x,block_y);
dim3 dimBlock(threadsPerBlock_,threadsPerBlock_);
printf("CUDA kernel launch with %d blocks of %d threads\n", gridSize, threadsPerBlock_);
GpuTimer timer;
timer.Start();
do
{
//Se ccentroids as constant
CUDA_CALL(cudaMemcpyToSymbol(d_redCentroid, h_redCentroid, sizeClusters));
CUDA_CALL(cudaMemcpyToSymbol(d_greenCentroid, h_greenCentroid, sizeClusters));
CUDA_CALL(cudaMemcpyToSymbol(d_blueCentroid, h_blueCentroid, sizeClusters));
//Reset values for new clusters
resetClustersValues<<<1, dimBlock>>>(d_sumRed, d_sumGreen, d_sumBlue, d_pixelClusterCounter, d_tempRedCentroid, d_tempGreenCentroid, d_tempBlueCentroid);
//Reset labelArray
resetLabelArray<<<dimGrid, dimBlock>>>(d_labelArray, d_size);
//Casify pixels and save value in labelArray
setPixelsLabel<<<dimGrid, dimBlock >>> (d_red, d_green, d_blue, d_labelArray, d_size, d_nCentroids, d_redCentroid, d_greenCentroid, d_blueCentroid);
//
sumCluster<<<dimGrid, dimBlock>>> (d_red, d_green, d_blue, d_sumRed, d_sumGreen, d_sumBlue, d_labelArray, d_pixelClusterCounter, d_size);
//Finds new RGB Centroids' values
newCentroids<<<1,dimBlock>>>(d_tempRedCentroid, d_tempGreenCentroid, d_tempBlueCentroid, d_sumRed, d_sumGreen, d_sumBlue, d_pixelClusterCounter, d_nCentroids);
CUDA_CALL(cudaMemcpy(h_redCentroid, d_tempRedCentroid, sizeClusters,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_greenCentroid, d_tempGreenCentroid, sizeClusters,cudaMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy(h_blueCentroid, d_tempBlueCentroid, sizeClusters, cudaMemcpyDeviceToHost));
centroidChange = sqrtf(powf((d_redCentroid-h_redCentroid),2.0) + powf((d_greenCentroid-h_greenCentroid),2.0) + powf((d_blueCentroid-h_blueCentroid),2.0));
} while (centroidChange > threshold);
CUDA_CALL(cudaMemcpy(h_labelArray, d_labelArray, sizePixels, cudaMemcpyDeviceToHost));
timer.Stop();
}
#endif |
f293e5cca2c9b801a780ba22bd29312bea7f7e8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//
// kernel routine
//
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
//
// main code
//
int main(int argc, char **argv)
{
hipSetDevice(1);
// Input the vector length
int N = atoi(argv[1]);
// Number of bytes to allocate for N float
size_t bytes = N*sizeof(float);
// Generate randomly vectors A and B
float *A = (float *)malloc(bytes);
float *B = (float *)malloc(bytes);
float *C = (float *)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
float *d_A, *d_B, *d_C;
hipMalloc(&d_A, bytes);
hipMalloc(&d_B, bytes);
hipMalloc(&d_C, bytes);
for (int i = 0; i < N; i++)
{
A[i] = rand()%100;
B[i] = rand()%100;
}
hipMemcpy(d_A, A, bytes, hipMemcpyHostToDevice);
hipMemcpy(d_B, B, bytes, hipMemcpyHostToDevice);
// Kernel invocation
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, d_A, d_B, d_C);
// Copy data from device array d_C to host array C
hipMemcpy(C, d_C, bytes, hipMemcpyDeviceToHost);
int s = 0;
for (int j = 0; j < N; j++) s += C[j];
printf("\nGPU Vector Length: %d Sum: %d\n", N, s);
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
// CUDA exit -- needed to flush printf write buffer
hipDeviceReset();
return 1;
} | f293e5cca2c9b801a780ba22bd29312bea7f7e8d.cu | //
// include files
//
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//
// kernel routine
//
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
//
// main code
//
int main(int argc, char **argv)
{
cudaSetDevice(1);
// Input the vector length
int N = atoi(argv[1]);
// Number of bytes to allocate for N float
size_t bytes = N*sizeof(float);
// Generate randomly vectors A and B
float *A = (float *)malloc(bytes);
float *B = (float *)malloc(bytes);
float *C = (float *)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, bytes);
cudaMalloc(&d_B, bytes);
cudaMalloc(&d_C, bytes);
for (int i = 0; i < N; i++)
{
A[i] = rand()%100;
B[i] = rand()%100;
}
cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice);
// Kernel invocation
int threadsPerBlock = 256;
int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock;
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C);
// Copy data from device array d_C to host array C
cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost);
int s = 0;
for (int j = 0; j < N; j++) s += C[j];
printf("\nGPU Vector Length: %d Sum: %d\n", N, s);
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// CUDA exit -- needed to flush printf write buffer
cudaDeviceReset();
return 1;
} |
41b072033e0dfe6b8dd217fc85e530e782a61ac8.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <Windows.h>
#include <time.h>
#include <stdint.h>
#define CHECK(call)\
{\
const hipError_t error = call;\
if (error != hipSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(1);\
}\
}\
int gettimeofday(struct timeval * tp, struct timezone *tzp)
{
static const uint64_t EPOCH = ((uint64_t) 116444736000000000ULL);
SYSTEMTIME system_time;
FILETIME file_time;
uint64_t time;
GetSystemTime(&system_time);
SystemTimeToFileTime(&system_time, &file_time);
time = ((uint64_t)file_time.dwLowDateTime);
time += ((uint64_t)file_time.dwHighDateTime) << 32;
tp->tv_sec = (long)((time - EPOCH) / 10000000L);
tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
return 0;
}
__global__ void sum_array_gpu(float *a, float *b, float *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\t%d\t%d\n", blockDim.x, blockIdx.x, threadIdx.x);
c[i] = a[i] + b[i];
}
void sum_array_cpu(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++)
{
c[i] = a[i] + b[i];
}
}
void initData(float *data,int n)
{
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < n; i++)
data[i] = (float)(rand() & 0xFF) / 10.0f;
}
double cpu_sec()
{
timeval tp;
gettimeofday(&tp, NULL);\
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void check_sum(float *c, float *g, int n)
{
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i < n; i++)
{
if (abs(c[i] - g[i]) > epsilon)
{
match = 0;
printf("Don't match!\n");
printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i);
break;
}
}
if (match)
printf("Array match\n\n");
return;
}
int main()
{
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
int nelem = 1 << 24;
printf("Vector size %d\n", nelem);
size_t nbytes = nelem * sizeof(float);
float *h_a, *h_b, *cpuref, *gpuref;
h_a = (float *)malloc(nbytes);
h_b = (float *)malloc(nbytes);
cpuref = (float *)malloc(nbytes);
gpuref = (float *)malloc(nbytes);
double istart, ielaps;
initData(h_a, nelem);
initData(h_b, nelem);
memset(cpuref, 0, nbytes);
memset(gpuref, 0, nbytes);
istart = cpu_sec();
sum_array_cpu(h_a, h_b, cpuref, nelem);
ielaps = cpu_sec() - istart;
printf("sum cpu time cost %f ms\n", ielaps*1000);
float *da, *db, *dc;
hipMalloc((float**)&da, nbytes);
hipMalloc((float**)&db, nbytes);
hipMalloc((float**)&dc, nbytes);
hipMemcpy(da, h_a, nbytes, hipMemcpyHostToDevice);
hipMemcpy(db, h_b, nbytes, hipMemcpyHostToDevice);
int len = 1024;
dim3 block(len);
dim3 grid((nelem+block.x-1)/block.x);
istart = cpu_sec();
hipLaunchKernelGGL(( sum_array_gpu), dim3(grid), dim3(block), 0, 0, da,db,dc);
hipDeviceSynchronize();
ielaps = cpu_sec() - istart;
printf("sum gpu <<<%d,%d>>> time cost %f ms\n", grid.x, block.x, ielaps*1000);
hipMemcpy(gpuref, dc, nbytes, hipMemcpyDeviceToHost);
check_sum(cpuref, gpuref, nelem);
hipFree(da);
hipFree(db);
hipFree(dc);
free(h_a);
free(h_b);
free(cpuref);
free(gpuref);
int c = getchar();
return 0;
}
| 41b072033e0dfe6b8dd217fc85e530e782a61ac8.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include <Windows.h>
#include <time.h>
#include <stdint.h>
#define CHECK(call)\
{\
const cudaError_t error = call;\
if (error != cudaSuccess)\
{\
printf("Error: %s:%d, ", __FILE__, __LINE__);\
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(1);\
}\
}\
int gettimeofday(struct timeval * tp, struct timezone *tzp)
{
static const uint64_t EPOCH = ((uint64_t) 116444736000000000ULL);
SYSTEMTIME system_time;
FILETIME file_time;
uint64_t time;
GetSystemTime(&system_time);
SystemTimeToFileTime(&system_time, &file_time);
time = ((uint64_t)file_time.dwLowDateTime);
time += ((uint64_t)file_time.dwHighDateTime) << 32;
tp->tv_sec = (long)((time - EPOCH) / 10000000L);
tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
return 0;
}
__global__ void sum_array_gpu(float *a, float *b, float *c)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\t%d\t%d\n", blockDim.x, blockIdx.x, threadIdx.x);
c[i] = a[i] + b[i];
}
void sum_array_cpu(float *a, float *b, float *c, int n)
{
for (int i = 0; i < n; i++)
{
c[i] = a[i] + b[i];
}
}
void initData(float *data,int n)
{
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < n; i++)
data[i] = (float)(rand() & 0xFF) / 10.0f;
}
double cpu_sec()
{
timeval tp;
gettimeofday(&tp, NULL);\
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void check_sum(float *c, float *g, int n)
{
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i < n; i++)
{
if (abs(c[i] - g[i]) > epsilon)
{
match = 0;
printf("Don't match!\n");
printf("host %5.2f device %5.2f at current %d\n", c[i], g[i], i);
break;
}
}
if (match)
printf("Array match\n\n");
return;
}
int main()
{
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int nelem = 1 << 24;
printf("Vector size %d\n", nelem);
size_t nbytes = nelem * sizeof(float);
float *h_a, *h_b, *cpuref, *gpuref;
h_a = (float *)malloc(nbytes);
h_b = (float *)malloc(nbytes);
cpuref = (float *)malloc(nbytes);
gpuref = (float *)malloc(nbytes);
double istart, ielaps;
initData(h_a, nelem);
initData(h_b, nelem);
memset(cpuref, 0, nbytes);
memset(gpuref, 0, nbytes);
istart = cpu_sec();
sum_array_cpu(h_a, h_b, cpuref, nelem);
ielaps = cpu_sec() - istart;
printf("sum cpu time cost %f ms\n", ielaps*1000);
float *da, *db, *dc;
cudaMalloc((float**)&da, nbytes);
cudaMalloc((float**)&db, nbytes);
cudaMalloc((float**)&dc, nbytes);
cudaMemcpy(da, h_a, nbytes, cudaMemcpyHostToDevice);
cudaMemcpy(db, h_b, nbytes, cudaMemcpyHostToDevice);
int len = 1024;
dim3 block(len);
dim3 grid((nelem+block.x-1)/block.x);
istart = cpu_sec();
sum_array_gpu<<<grid, block>>>(da,db,dc);
cudaDeviceSynchronize();
ielaps = cpu_sec() - istart;
printf("sum gpu <<<%d,%d>>> time cost %f ms\n", grid.x, block.x, ielaps*1000);
cudaMemcpy(gpuref, dc, nbytes, cudaMemcpyDeviceToHost);
check_sum(cpuref, gpuref, nelem);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(h_a);
free(h_b);
free(cpuref);
free(gpuref);
int c = getchar();
return 0;
}
|
e243fea9a38c8ca9da466cdaa41862f8bd78869a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "updateCenters.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *centers = NULL;
hipMalloc(¢ers, XSIZE*YSIZE);
float *images = NULL;
hipMalloc(&images, XSIZE*YSIZE);
int *updates = NULL;
hipMalloc(&updates, XSIZE*YSIZE);
int noClusters = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
updateCenters), dim3(gridBlock),dim3(threadBlock), 0, 0, centers,images,updates,noClusters);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
updateCenters), dim3(gridBlock),dim3(threadBlock), 0, 0, centers,images,updates,noClusters);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
updateCenters), dim3(gridBlock),dim3(threadBlock), 0, 0, centers,images,updates,noClusters);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e243fea9a38c8ca9da466cdaa41862f8bd78869a.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "updateCenters.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *centers = NULL;
cudaMalloc(¢ers, XSIZE*YSIZE);
float *images = NULL;
cudaMalloc(&images, XSIZE*YSIZE);
int *updates = NULL;
cudaMalloc(&updates, XSIZE*YSIZE);
int noClusters = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
updateCenters<<<gridBlock,threadBlock>>>(centers,images,updates,noClusters);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
updateCenters<<<gridBlock,threadBlock>>>(centers,images,updates,noClusters);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
updateCenters<<<gridBlock,threadBlock>>>(centers,images,updates,noClusters);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
4e24d74aeae96965b3372d33548a7e4b4920d0dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <cutil.h>
#include <miscmath.h>
#include <amgx_cusparse.h>
#include <thrust/copy.h>
#include <solvers/multicolor_dilu_solver.h>
#include <solvers/block_common_solver.h>
#include <gaussian_elimination.h>
#include <basic_types.h>
#include <util.h>
#include <texture.h>
#include <ld_functions.h>
#include <matrix_io.h>
#include <thrust/logical.h>
#include <sm_utils.inl>
#include <amgx_types/util.h>
#include <algorithm>
#define AMGX_ILU_COLORING
namespace amgx
{
namespace multicolor_dilu_solver
{
enum { CTA_SIZE = 128, WARP_SIZE = 32 };
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, int NUM_WARP_ITERS_PER_BLOCK >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_setup_NxN_kernel_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_diag,
const Matrix_type *__restrict A_vals,
Matrix_type *__restrict Einv,
const int *sorted_rows_by_color,
const int *row_colors,
const int num_rows_per_color,
const int current_color )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Shared memory to broadcast column IDs.
__shared__ volatile int s_a_col_ids[CTA_SIZE];
__shared__ volatile int s_a_col_its[CTA_SIZE];
// Each thread keeps its own pointer.
volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id];
volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id];
// Shared memory to store the matrices.
__shared__ volatile Vector_type s_A_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
__shared__ volatile Vector_type s_B_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_A_mtx = &s_A_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
volatile Vector_type *my_s_B_mtx = &s_B_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
// Shared memory to store the index of the element Aji.
__shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA];
// Each thread keeps its own pointer.
volatile int *my_s_A_ji = &s_A_ji[warp_id];
// Precomputing some stuff
int idx[NUM_WARP_ITERS_PER_BLOCK];
int idy[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
const int id = (WARP_SIZE * wb + lane_id) % NxN;
idx[wb] = id / N;
idy[wb] = id % N;
}
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = -1;
if ( a_row_it < num_rows_per_color )
{
a_row_id = sorted_rows_by_color[a_row_it];
}
// Load the diagonal.
Vector_type e_out[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
e_out[wb] = (Vector_type)0.0;
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ( a_row_id != -1 && (wb * WARP_SIZE + lane_id) < NxN)
{
e_out[wb] = A_vals[NxN * A_diag[a_row_id] + wb * WARP_SIZE + lane_id];
}
// Skip the 1st iteration of the outer-loop (that loop runs on the host).
if ( current_color != 0 )
{
// Ranges of the rows.
int a_col_begin(0), a_col_end(0);
if ( a_row_id != -1 )
{
a_col_begin = A_rows[a_row_id ];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the elements in the columns.
for ( ; a_col_begin < a_col_end ; a_col_begin += NxN )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id;
// The identifier of the column if the iterator is valid.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color )
{
a_col_id = a_col_tmp;
}
// When the diagonal is stored inside the matrix, we have to reject it. We
// could be using a template parameter but it's not needed since that
// rejection is really cheap (a couple of extra cycles -- CMP+MOV).
if ( a_col_id == a_row_id )
{
a_col_id = -1;
}
// We partition valid and invalid column ids. Valid ones come first.
int vote = utils::ballot( a_col_id != -1 );
int ones = __popc( vote );
int dest = __popc( vote & utils::lane_mask_lt() );
if ( a_col_id == -1 )
{
dest = ones + lane_id - dest;
}
my_s_a_col_ids[dest] = a_col_id;
my_s_a_col_its[dest] = a_col_it;
// Temporary storage with zeros for OOB
Vector_type my_A[NUM_WARP_ITERS_PER_BLOCK], my_B[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_A[wb] = (Vector_type)0.0;
my_B[wb] = (Vector_type)0.0;
}
// Threads collaborate to load the rows.
for ( int k = 0 ; k < WARP_SIZE ; ++k )
{
// Exchange column indices.
const int uniform_a_col_id = my_s_a_col_ids[k];
// Early exit.
if ( uniform_a_col_id == -1 )
{
break;
}
// Load the iterator.
const int uniform_a_col_it = my_s_a_col_its[k];
// Load the two matrices.
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ((wb * WARP_SIZE + lane_id) < NxN)
{
my_A[wb] = A_vals[NxN * uniform_a_col_it + wb * WARP_SIZE + lane_id];
my_B[wb] = Einv [NxN * uniform_a_col_id + wb * WARP_SIZE + lane_id];
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb];
my_s_B_mtx[lane_id + wb * WARP_SIZE] = my_B[wb];
}
// Compute the product of matrices.
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_A[wb] = (Vector_type)0.0;
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
my_A[wb] += my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]];
}
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ((wb * WARP_SIZE + lane_id) < NxN)
{
my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb];
}
// We looking for columns in the two rows we're interested in.
int b_col_it = A_rows[uniform_a_col_id ];
int b_col_end = A_rows[uniform_a_col_id + 1];
// Init the marker to -1.
if ( lane_id == 0 )
{
*my_s_A_ji = -1;
}
// Run the loop.
b_col_it += lane_id;
int shared_found = utils::ballot( lane_id == 0 && uniform_a_col_id == -1 );
do
{
bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id;
if ( found )
{
*my_s_A_ji = b_col_it;
}
shared_found = shared_found | utils::ballot(found);
b_col_it += NxN;
}
while ( __popc( shared_found ) == 0 && utils::any( b_col_it < b_col_end ) );
// Load the blocks.
const int w_aji = *my_s_A_ji;
Vector_type my_C[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_C[wb] = (Vector_type)0.0;
if ( w_aji != -1 && (wb * WARP_SIZE + lane_id) < NxN)
{
my_C[wb] = A_vals[NxN * w_aji + wb * WARP_SIZE + lane_id];
}
my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_C[wb];
}
// Update e_out.
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
e_out[wb] -= my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]];
}
}
}
} // a_col_begin < a_col_end
} // current_color != 0
// Store e_out in A
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id] = e_out[wb];
}
// Invert the matrices.
#pragma unroll
for ( int row = 0 ; row < N ; ++row )
{
Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row];
if ( isNotCloseToZero(diag_tmp) )
{
diag = Vector_type(1) / diag_tmp;
}
else
{
diag = Vector_type(1) / epsilon(diag_tmp);
}
if ( lane_id < N && lane_id != row)
{
my_s_A_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] * diag;
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ( idx[wb] != row && idy[wb] != row)
{
my_s_A_mtx[wb * WARP_SIZE + lane_id] = my_s_B_mtx[wb * WARP_SIZE + lane_id] - my_s_B_mtx[N * idx[wb] + row] * my_s_B_mtx[N * row + idy[wb]];
}
if ( lane_id < N )
{
Vector_type tmp = diag;
if ( lane_id != row )
{
tmp = -my_s_A_mtx[N * lane_id + row] * diag;
}
my_s_A_mtx[N * lane_id + row] = tmp;
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id];
}
}
// Store the results to Einv.
if ( a_row_id != -1 )
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if (wb * WARP_SIZE + lane_id < NxN)
{
Einv[NxN * a_row_id + wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id];
}
}
}
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_setup_NxN_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_diag,
const Matrix_type *__restrict A_vals,
Matrix_type *__restrict Einv,
const int *sorted_rows_by_color,
const int *row_colors,
const int num_rows_per_color,
const int current_color )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN;
// Upper-bound on the number of items per warp.
const int NUM_ITEMS_PER_WARP_CEIL = (WARP_SIZE + NxN - 1) / NxN;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
// Useful index to compute matrix products.
const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N;
const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N;
// We need NxN to compute a NxN block. Encode a mask for the first block.
int mask_tmp = utils::ballot( lane_id_div_NxN == 0 );
// Mask for ballots. We shift the mask with NxN active bits by the needed number of bits.
const int mask_NxN = mask_tmp << (lane_id_div_NxN * __popc(mask_tmp));
// Shared memory to broadcast column IDs.
__shared__ volatile int s_a_col_ids[CTA_SIZE];
__shared__ volatile int s_a_col_its[CTA_SIZE];
// Each thread keeps its own pointer.
volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id_mod_NxN];
volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id_mod_NxN];
// Shared memory to store the matrices.
__shared__ volatile Vector_type s_A_mtx[CTA_SIZE];
__shared__ volatile Vector_type s_B_mtx[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_A_mtx = &s_A_mtx[threadIdx.x - lane_id_mod_NxN];
volatile Vector_type *my_s_B_mtx = &s_B_mtx[threadIdx.x - lane_id_mod_NxN];
// Shared memory to store the index of the element Aji.
__shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA * NUM_ITEMS_PER_WARP_CEIL];
// Each thread keeps its own pointer.
volatile int *my_s_A_ji = &s_A_ji[warp_id * NUM_ITEMS_PER_WARP_CEIL + lane_id_div_NxN];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN;
// Iterate over the rows of the matrix. One warp per row.
for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID )
{
// Is the thread active? For example, for 5x5 only the first 25 threads are active per warp.
// At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals
// to false ; that's the common trick to determine if a number is a power of 2).
int is_active = true;
if ( NxN & (NxN - 1) )
{
is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP;
}
int a_row_id = -1;
if ( is_active && a_row_it < num_rows_per_color )
{
a_row_id = sorted_rows_by_color[a_row_it];
}
// Load the diagonal.
Vector_type e_out(0);
if ( a_row_id != -1 )
{
e_out = A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN];
}
// Skip the 1st iteration of the outer-loop (that loop runs on the host).
if ( current_color != 0 )
{
// Ranges of the rows.
int a_col_begin(0), a_col_end(0);
if ( a_row_id != -1 )
{
a_col_begin = A_rows[a_row_id ];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the elements in the columns.
for ( ; a_col_begin < a_col_end ; a_col_begin += NxN )
{
unsigned int active_mask = utils::activemask();
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_NxN;
// The identifier of the column if the iterator is valid.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color )
{
a_col_id = a_col_tmp;
}
// When the diagonal is stored inside the matrix, we have to reject it. We
// could be using a template parameter but it's not needed since that
// rejection is really cheap (a couple of extra cycles -- CMP+MOV).
if ( a_col_id == a_row_id )
{
a_col_id = -1;
}
// We partition valid and invalid column ids. Valid ones come first.
int vote = utils::ballot( a_col_id != -1, active_mask ) & mask_NxN;
int ones = __popc( vote );
int dest = __popc( vote & utils::lane_mask_lt() );
if ( a_col_id == -1 )
{
dest = ones + lane_id_mod_NxN - dest;
}
my_s_a_col_ids[dest] = a_col_id;
my_s_a_col_its[dest] = a_col_it;
// Threads collaborate to load the rows.
for ( int k = 0 ; k < NxN ; ++k )
{
// Exchange column indices.
const int uniform_a_col_id = my_s_a_col_ids[k];
// Early exit.
if ( utils::all( uniform_a_col_id == -1, active_mask ) )
{
break;
}
// Load the iterator.
const int uniform_a_col_it = my_s_a_col_its[k];
// Load the two matrices.
Vector_type my_A(0), my_B(0);
if ( uniform_a_col_id != -1 )
{
my_A = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
my_B = Einv [NxN * uniform_a_col_id + lane_id_mod_NxN];
}
my_s_A_mtx[lane_id_mod_NxN] = my_A;
my_s_B_mtx[lane_id_mod_NxN] = my_B;
utils::syncwarp(active_mask);
// Compute the product of matrices.
Vector_type tmp(0);
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
tmp += my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N];
}
my_s_A_mtx[lane_id_mod_NxN] = tmp;
// We looking for columns in the two rows we're interested in.
int b_col_it(0), b_col_end(0);
if ( is_active && uniform_a_col_id != -1 )
{
b_col_it = A_rows[uniform_a_col_id ];
b_col_end = A_rows[uniform_a_col_id + 1];
}
// Init the marker to -1.
if ( lane_id_mod_NxN == 0 )
{
*my_s_A_ji = -1;
}
// Run the loop.
b_col_it += lane_id_mod_NxN;
int shared_found = utils::ballot( lane_id_mod_NxN == 0 && uniform_a_col_id == -1, active_mask );
do
{
bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id;
if ( found )
{
*my_s_A_ji = b_col_it;
}
shared_found = shared_found | utils::ballot(found, active_mask);
b_col_it += NxN;
}
while ( __popc( shared_found ) < NUM_ITEMS_PER_WARP && utils::any( b_col_it < b_col_end, active_mask ) );
// Load the blocks.
const int w_aji = *my_s_A_ji;
Vector_type my_C(0);
if ( w_aji != -1 )
{
my_C = A_vals[NxN * w_aji + lane_id_mod_NxN];
}
my_s_B_mtx[lane_id_mod_NxN] = my_C;
// Update e_out.
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
e_out -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N];
}
}
} // a_col_begin < a_col_end
} // current_color != 0
// Store e_out in A
my_s_A_mtx[lane_id_mod_NxN] = e_out;
// Invert the matrices.
#pragma unroll
for ( int row = 0 ; row < N ; ++row )
{
Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row];
if ( isNotCloseToZero(diag_tmp) )
{
diag = Vector_type(1) / diag_tmp;
}
else
{
diag = Vector_type(1) / epsilon(diag_tmp);
}
if ( is_active && lane_id_mod_NxN_div_N == 0 && lane_id_mod_NxN_mod_N != row )
{
my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N] *= diag;
}
if ( is_active && lane_id_mod_NxN_div_N != row && lane_id_mod_NxN_mod_N != row )
{
my_s_A_mtx[lane_id_mod_NxN] -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + row] * my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N];
}
if ( is_active && lane_id_mod_NxN_div_N == 0 )
{
Vector_type tmp = diag;
if ( lane_id_mod_NxN_mod_N != row )
{
tmp = -my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] * diag;
}
my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] = tmp;
}
}
// Store the results to Einv.
if ( a_row_id != -1 )
{
Einv[NxN * a_row_id + lane_id_mod_NxN] = my_s_A_mtx[lane_id_mod_NxN];
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_setup_1x1_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_diag,
const Matrix_type *__restrict A_vals,
Matrix_type *__restrict Einv,
const int *sorted_rows_by_color,
const int *row_colors,
const int num_rows_per_color,
const int current_color )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Number of items per grid.
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NTPR = lane_id / NUM_THREADS_PER_ROW;
const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW;
// Shared memory to broadcast column IDs.
__shared__ int s_a_col_ids[CTA_SIZE];
// Each thread keeps its own pointer.
int *my_s_a_col_ids = &s_a_col_ids[warp_id * WARP_SIZE];
// Shared memory to store the matrices.
__shared__ int s_A_ji[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
int *my_s_A_ji = &s_A_ji[warp_id * WARP_SIZE];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_WARPS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load the diagonal.
Vector_type e_out(0);
// Skip the 1st iteration of the outer-loop (that loop runs on the host).
if ( current_color != 0 )
{
// Ranges of the row.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Iterate over the elements in the columns.
for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE )
{
// Each thread loads a single element.
int a_col_it = a_col_begin + lane_id;
// The identifier of the column if the iterator is valid.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color )
{
a_col_id = a_col_tmp;
}
// When the diagonal is stored inside the matrix, we have to reject it. We
// could be using a template parameter but it's not needed since that
// rejection is really cheap (a couple of extra cycles -- CMP+MOV).
if ( a_col_id == a_row_id )
{
a_col_id = -1;
}
// We partition valid and invalid column ids. Valid ones come first.
int vote = utils::ballot( a_col_id != -1 );
int ones = __popc( vote );
int dest = __popc( vote & utils::lane_mask_lt() );
if ( a_col_id == -1 )
{
dest = ones + lane_id - dest;
}
my_s_a_col_ids[dest] = a_col_id;
// Reset A_jis.
my_s_A_ji[lane_id] = -1;
__syncwarp();
// Threads collaborate to load the rows.
for ( int k = 0 ; k < ones ; k += WARP_SIZE / NUM_THREADS_PER_ROW )
{
const int local_k = k + lane_id_div_NTPR;
// Exchange column indices.
int uniform_a_col_id = -1;
if ( local_k < ones )
{
uniform_a_col_id = my_s_a_col_ids[local_k];
}
// We look for columns in the rows we're interested in.
int b_col_it(0), b_col_end(0);
if ( uniform_a_col_id != -1 )
{
b_col_it = A_rows[uniform_a_col_id ];
b_col_end = A_rows[uniform_a_col_id + 1];
}
// Run the loop.
b_col_it += lane_id_mod_NTPR;
int shared_found = utils::ballot( lane_id_mod_NTPR == 0 && uniform_a_col_id == -1 );
do
{
bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id;
if ( found )
{
my_s_A_ji[local_k] = b_col_it;
}
shared_found = shared_found | utils::ballot(found);
b_col_it += NUM_THREADS_PER_ROW;
}
while ( __popc( shared_found ) < WARP_SIZE / NUM_THREADS_PER_ROW && utils::any( b_col_it < b_col_end ) );
}
__syncwarp();
// Where to get my A_ji from (if any).
int a_ji_it = my_s_A_ji[dest];
// Grab A_jis.
Matrix_type a_ji(0);
if ( a_ji_it != -1 )
{
a_ji = A_vals[a_ji_it];
}
// Update e_out.
if ( a_col_id != -1 )
{
e_out += a_ji * Einv[a_col_id] * A_vals[a_col_it];
}
} // a_col_begin < a_col_end
} // current_color != 0
// Reduce the e_outs in one value.
#pragma unroll
for ( int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1 )
{
e_out += utils::shfl_xor( e_out, mask );
}
// Store the result.
if ( lane_id == 0 )
{
Matrix_type res = A_vals[A_diag[a_row_id]] - e_out;
if ( res != Matrix_type(0) )
{
res = Matrix_type(1) / res;
}
Einv[a_row_id] = static_cast<Vector_type>(res);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int N, bool ROW_MAJOR, int WARP_SIZE, typename Value_type >
static __device__ __forceinline__
Value_type reduce_distributed_vectors( Value_type x, int is_leader )
{
if ( N & (N - 1) )
{
#pragma unroll
for ( int i = 1 ; i < N ; ++i )
{
Value_type other_x = utils::shfl_down( x, ROW_MAJOR ? i : N * i );
if ( is_leader )
{
x += other_x;
}
}
}
else
{
#pragma unroll
for ( int i = 1 ; i < N ; i <<= 1 )
{
x += utils::shfl_xor( x, ROW_MAJOR ? i : N * i );
}
}
return x;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_NxN_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
// Useful index to compute matrix products.
const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N;
const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N;
// We to get my data from when I use SHFL.
const int shfl_offset = lane_id - lane_id_mod_NxN;
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN];
// Is the thread active? For example, for 5x5 only the first 25 threads are active per warp.
// At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals
// to false ; that's the common trick to determine if a number is a power of 2).
int is_active = true;
if ( NxN & (NxN - 1) )
{
is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP;
}
// Determine which NxN block the threads work with.
int a_row_it = num_rows_per_color;
if ( is_active )
{
a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN;
}
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id_mod_NxN_div_N]);
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
my_bmAx = b[N * a_row_id + lane_id_mod_NxN_mod_N];
}
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += NxN )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_NxN;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Count the number of active columns.
// int vote = utils::ballot(aColId != -1);
// The number of iterations.
// int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < NxN ; k += N )
{
int my_k = k + lane_id_mod_NxN_div_N;
// Load N blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid )
{
my_x += delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N];
}
my_s_mem[lane_id_mod_NxN] = my_x;
// Load N blocks of A.
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && is_active && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 )
{
my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
}
if ( ROW_MAJOR )
{
my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N];
}
else
{
my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N];
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN];
// Reduce bmAx terms.
int is_leader = lane_id_mod_NxN_div_N == 0;
if ( ROW_MAJOR )
{
is_leader = lane_id_mod_NxN_mod_N == 0;
}
my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_s_mem[lane_id_mod_NxN_div_N] = my_bmAx;
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
my_s_mem[lane_id_mod_NxN_mod_N] = my_bmAx;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N];
}
else
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_div_N];
}
// Reduce bmAx terms.
my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Store the results.
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
delta[N * a_row_id + lane_id_mod_NxN_div_N] = my_bmAx;
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
delta[N * a_row_id + lane_id_mod_NxN_mod_N] = my_bmAx;
}
}
}
}
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG, int NUM_WARP_ITERS_PER_BLOCK >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_NxN_kernel_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of rows computed per CTA.
const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA;
// Number of rows? per grid.
const int NUM_ITEMS_PER_GRID = CTA_SIZE;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
// Useful index to compute matrix products.
const int lane_id_div_N = lane_id / N;
const int lane_id_mod_N = lane_id % N; // id of a lane inside the block
const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row
const int row_elems_per_warp = blocks_per_warp * N;
// Shared to store bmAx
__shared__ volatile Vector_type bmAx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
volatile Vector_type *my_bmAx_s = &bmAx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( lane_id < N )
{
my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id]);
}
#pragma unroll
for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++)
{
my_bmAx_s[WARP_SIZE * i + lane_id] = 0.0;
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += WARP_SIZE ) // NxN
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Loop over columns. We compute blocks_per_warp columns per iteration.
for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp )
{
// id of the processed block by this thread
int my_k = k + lane_id_div_N;
// Load N blocks of X (if valid)
int uniform_a_col_id = utils::shfl( a_col_id, my_k );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp)
{
my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_N]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid && lane_id < row_elems_per_warp)
{
my_x += delta[N * uniform_a_col_id + lane_id_mod_N];
}
//my_s_mem[lane_id] = my_x;
#pragma unroll
for ( int i = 0 ; i < blocks_per_warp ; ++i )
{
// k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
// check if we are going out of bounds/color
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
// swipe with the whole warp
if (uniform_a_col_it != -1)
{
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 && block_inside_id < NxN)
{
my_val = A_vals[NxN * uniform_a_col_it + block_inside_id];
}
my_bmAx_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); // MOD IS SLOW!
block_inside_id += WARP_SIZE;
}
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_Einv[j] = 0.0;
}
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
if ((WARP_SIZE * j + lane_id) < NxN)
{
my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id];
}
}
// Reduce bmAx terms.
{
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
if ( lane_id < N )
{
my_bmAx += my_bmAx_s[N * lane_id + i];
}
}
}
// Update the diagonal term.
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_bmAx_s[block_inside_id] = my_Einv[j] * utils::shfl(my_bmAx, block_inside_id % N);
block_inside_id += WARP_SIZE;
}
// Reduce bmAx terms.
{
my_bmAx = 0.0;
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
int idx = N * lane_id + i;
if ( lane_id < N )
{
my_bmAx += my_bmAx_s[idx];
}
}
}
// Store the results.
if ( lane_id < N )
{
delta[N * a_row_id + lane_id] = my_bmAx;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_4x4_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / 16;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_mod_16 = lane_id % 16;
// Useful index to compute matrix products.
const int lane_id_mod_16_div_4 = lane_id_mod_16 / 4;
const int lane_id_mod_16_mod_4 = lane_id_mod_16 % 4;
// We to get my data from when I use SHFL.
const int shfl_offset = lane_id - lane_id_mod_16;
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_16];
// Determine which 16 block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + threadIdx.x / 16;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( ROW_MAJOR )
{
if ( lane_id_mod_16_mod_4 == 0 )
{
my_bmAx = __cachingLoad(&b[4 * a_row_id + lane_id_mod_16_div_4]);
}
}
else
{
if ( lane_id_mod_16_div_4 == 0 )
{
my_bmAx = b[4 * a_row_id + lane_id_mod_16_mod_4];
}
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += 16 )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_16;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Count the number of active columns.
// int vote = utils::ballot(aColId != -1);
// The number of iterations.
// int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + lane_id_mod_16_div_4;
// Load N blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = __cachingLoad(&x[4 * uniform_a_col_id + lane_id_mod_16_mod_4]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid )
{
my_x += delta[4 * uniform_a_col_id + lane_id_mod_16_mod_4];
}
my_s_mem[lane_id_mod_16] = my_x;
// Load N blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 )
{
my_val = A_vals[16 * uniform_a_col_it + lane_id_mod_16];
}
if ( ROW_MAJOR )
{
my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_mod_4];
}
else
{
my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_div_4];
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv = Einv[16 * a_row_id + lane_id_mod_16];
// Reduce bmAx terms.
int is_leader = lane_id_mod_16_div_4 == 0;
if ( ROW_MAJOR )
{
is_leader = lane_id_mod_16_mod_4 == 0;
}
my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( lane_id_mod_16_mod_4 == 0 )
{
my_s_mem[lane_id_mod_16_div_4] = my_bmAx;
}
}
else
{
if ( lane_id_mod_16_div_4 == 0 )
{
my_s_mem[lane_id_mod_16_mod_4] = my_bmAx;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_mod_4];
}
else
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_div_4];
}
// Reduce bmAx terms.
my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Store the results.
if ( ROW_MAJOR )
{
if ( lane_id_mod_16_mod_4 == 0 )
{
delta[4 * a_row_id + lane_id_mod_16_div_4] = my_bmAx;
}
}
else
{
if ( lane_id_mod_16_div_4 == 0 )
{
delta[4 * a_row_id + lane_id_mod_16_mod_4] = my_bmAx;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int CTA_SIZE, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_4x4_kernel_row_major_vec4( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of half warps per CTA.
const int NUM_HALF_WARPS = CTA_SIZE / 16;
// Coordinates of the thread.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Coordinates of the thread in the CTA.
const int thread_id_div_16 = threadIdx.x / 16;
const int thread_id_mod_16 = threadIdx.x % 16;
// Useful constants.
const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4;
const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4;
const int shfl_offset = 16 * (lane_id / 16);
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16];
// The iterator over rows.
int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS )
{
unsigned int active_mask = utils::activemask();
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( thread_id_mod_16_div_4 == 0 )
{
my_bmAx = __cachingLoad(&b[4 * a_row_id + thread_id_mod_16_mod_4]);
}
// The range of the row.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If it has an external diagonal, we need one more item to put the diag.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; a_col_begin < a_col_max ; a_col_begin += 16 )
{
unsigned int active_mask_inner = utils::activemask();
int a_col_it = a_col_begin + thread_id_mod_16;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = __cachingLoad(&A_cols[a_col_it]);
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + thread_id_mod_16_div_4;
// Load 8 blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k, warpSize, active_mask_inner );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = __cachingLoad(&x[4 * uniform_a_col_id + thread_id_mod_16_mod_4]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid )
{
my_x += delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4];
}
my_s_mem[thread_id_mod_16] = my_x;
int uniform_a_col_tmp = a_col_begin + my_k, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
Matrix_type my_vals[4] = { Matrix_type(0) };
if ( uniform_a_col_it != -1 )
{
utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] );
}
my_bmAx -= my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0];
my_bmAx -= my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1];
my_bmAx -= my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2];
my_bmAx -= my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3];
}
}
// Load Einvs.
Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16];
// Reduce bmAx terms.
my_bmAx += utils::shfl_xor( my_bmAx, 4, warpSize, active_mask );
my_bmAx += utils::shfl_xor( my_bmAx, 8, warpSize, active_mask );
// Update the shared terms.
if ( thread_id_mod_16_div_4 == 0 )
{
my_s_mem[thread_id_mod_16_mod_4] = my_bmAx;
}
// Update the diagonal term.
my_bmAx = my_Einv * my_s_mem[thread_id_mod_16_mod_4];
// Reduce bmAx terms.
my_bmAx += utils::shfl_xor( my_bmAx, 1, warpSize, active_mask );
my_bmAx += utils::shfl_xor( my_bmAx, 2, warpSize, active_mask );
// Store the results.
if ( thread_id_mod_16_mod_4 == 0 )
{
delta[4 * a_row_id + thread_id_mod_16_div_4] = my_bmAx;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_1x1_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of items per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// Number of items per grid.
const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW;
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW);
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx = amgx::types::util<Vector_type>::get_zero();
if ( lane_id_mod_NTPR == 0 )
{
my_bmAx = __cachingLoad(&b[a_row_id]);
}
// If it has an external diag.
if ( HAS_EXTERNAL_DIAG && lane_id_mod_NTPR == 0 )
{
my_bmAx -= A_vals[A_diag[a_row_id]] * x[a_row_id];
}
// Don't do anything if X is zero.
int a_col_it = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
//if( HAS_EXTERNAL_DIAG )
// ++a_col_end;
// Each warp load column indices of 32 nonzero blocks
for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW )
{
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
// Ignore the diagonal element since its color is smaller, and been accounted for above
if (HAS_EXTERNAL_DIAG && a_col_id == a_row_id)
{
a_col_id = -1;
}
// Load x.
Vector_type my_x(0);
if ( a_col_id != -1 )
{
my_x = __cachingLoad(&x[a_col_id]);
}
// Is it really a valid column (due to coloring).
int valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
valid = a_col_id >= boundary_index;
}
else
{
valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
valid = row_colors[a_col_tmp] < current_color;
}
#endif
// Load my x value.
if ( valid )
{
my_x += delta[a_col_id];
}
// Load my item from A.
Matrix_type my_val(0);
if ( a_col_it < a_col_end )
{
my_val = A_vals[a_col_it];
}
// Update bmAx.
my_bmAx -= my_val * my_x;
}
// Reduce bmAx terms.
#pragma unroll
for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 )
{
my_bmAx += utils::shfl_xor( my_bmAx, mask );
}
// Store the results.
if ( lane_id_mod_NTPR == 0 )
{
delta[a_row_id] = Einv[a_row_id] * my_bmAx;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_backward_NxN_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
// Useful index to compute matrix products.
const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N;
const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N;
// We to get my data from when I use SHFL.
const int shfl_offset = lane_id - lane_id_mod_NxN;
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN];
// Is the thread active? For example, for 5x5 only the first 25 threads are active per warp.
// At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals
// to false ; that's the common trick to determine if a number is a power of 2).
int is_active = true;
if ( NxN & (NxN - 1) )
{
is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP;
}
// Determine which NxN block the threads work with.
int a_row_it = num_rows_per_color;
if ( is_active )
{
a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN;
}
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_delta(0);
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += NxN )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_NxN;
// Get the ID of the column.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
// Make sure the column is interesting.
#ifdef AMGX_ILU_COLORING
int valid = false;
if ( a_col_tmp != -1 && current_color != 0 )
{
if ( boundary_coloring == LAST )
{
valid = a_col_tmp >= boundary_index;
}
else
{
valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color;
}
}
#else
int valid = false;
if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color )
{
valid = true;
}
#endif
// Set the column id.
if ( valid )
{
a_col_id = a_col_tmp;
}
// Count the number of active columns.
// int vote = utils::ballot(aColId != -1);
// The number of iterations.
// int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < NxN ; k += N )
{
int my_k = k + lane_id_mod_NxN_div_N;
// Load N blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = Delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N];
}
my_s_mem[lane_id_mod_NxN] = my_x;
// Load N blocks of A.
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
//if( uniform_a_col_id == -1 )
// break;
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 )
{
my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
}
if ( ROW_MAJOR )
{
my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N];
}
else
{
my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N];
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Matrix_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN];
// Reduce bmAx terms.
int is_leader = lane_id_mod_NxN_div_N == 0;
if ( ROW_MAJOR )
{
is_leader = lane_id_mod_NxN_mod_N == 0;
}
my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader );
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_s_mem[lane_id_mod_NxN_div_N] = my_delta;
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
my_s_mem[lane_id_mod_NxN_mod_N] = my_delta;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N];
}
else
{
my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_div_N];
}
// Reduce bmAx terms.
my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader );
// Store the results.
if ( ROW_MAJOR )
{
const int offset = N * a_row_id + lane_id_mod_NxN_div_N;
Vector_type my_b(0), my_x(0);
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( lane_id_mod_NxN_mod_N == 0 )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
else
{
const int offset = N * a_row_id + lane_id_mod_NxN_mod_N;
Vector_type my_b(0), my_x(0);
if ( lane_id_mod_NxN_div_N == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( lane_id_mod_NxN_div_N == 0 )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, int NUM_WARP_ITERS_PER_BLOCK >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_backward_NxN_kernel_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_N = lane_id / N;
const int lane_id_mod_N = lane_id % N; // id of a lane inside the block
const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row
const int row_elems_per_warp = blocks_per_warp * N;
// Shared to store t_delta
__shared__ volatile Vector_type delta_s[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
volatile Vector_type *my_delta_s = &delta_s[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Accumulator
Vector_type my_delta(0);
//Vector_type mAx[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++)
{
my_delta_s[WARP_SIZE * i + lane_id] = 0.0;
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += WARP_SIZE )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id;
// Get the ID of the column.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
// Make sure the column is interesting.
#ifdef AMGX_ILU_COLORING
int valid = false;
if ( a_col_tmp != -1 && current_color != 0 )
{
if ( boundary_coloring == LAST )
{
valid = a_col_tmp >= boundary_index;
}
else
{
valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color;
}
}
#else
int valid = false;
if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color )
{
valid = true;
}
#endif
// Set the column id.
if ( valid )
{
a_col_id = a_col_tmp;
}
// Loop over columns. We compute blocks_per_warp columns per iteration.
for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp )
{
// id of the processed block by this thread
int my_k = k + lane_id_div_N;
// Load N blocks of X (if valid)
int uniform_a_col_id = utils::shfl( a_col_id, my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp)
{
my_x = Delta[N * uniform_a_col_id + lane_id_mod_N];
}
// Load blocks of A.
// for each block in a batch
#pragma unroll
for ( int i = 0 ; i < blocks_per_warp ; ++i )
{
// k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
// check if we are going out of bounds/color
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
// swipe with the whole warp
if (uniform_a_col_it != -1)
{
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 && block_inside_id < NxN)
{
my_val = A_vals[NxN * uniform_a_col_it + block_inside_id];
}
my_delta_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); //my_s_mem[N*i + block_inside_id % N]; // MOD IS SLOW!
block_inside_id += WARP_SIZE;
}
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_Einv[j] = 0.0;
}
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
if ((WARP_SIZE * j + lane_id) < NxN)
{
my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id];
}
}
// Reduce bmAx terms.
{
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
if ( lane_id < N )
{
my_delta += my_delta_s[N * lane_id + i];
}
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_delta_s[block_inside_id] = my_Einv[j] * utils::shfl(my_delta, block_inside_id % N);
block_inside_id += WARP_SIZE;
}
}
// Reduce bmAx terms.
{
my_delta = 0.0;
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
if ( lane_id < N )
{
my_delta += my_delta_s[N * lane_id + i];
}
}
}
// Store the results.
if ( ROW_MAJOR )
{
const int offset = N * a_row_id + lane_id;
Vector_type my_b(0), my_x(0);
if ( lane_id < N )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( lane_id < N )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename IndexType, typename ValueTypeA, typename ValueTypeB, typename WeightType, int CTA_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_backward_4x4_kernel( const IndexType *row_offsets,
const IndexType *column_indices,
const ValueTypeA *nonzero_values,
ValueTypeB *x,
const WeightType weight,
const int *sorted_rows_by_color,
const int *__restrict row_colors,
const ValueTypeA *Einv,
const ValueTypeB *delta,
ValueTypeB *Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const IndexType boundary_index)
{
const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA.
const int laneId = utils::lane_id();
const int halfWarpId = threadIdx.x / 16;
const int halfLaneId = threadIdx.x % 16;
const int halfLaneId_div_4 = halfLaneId / 4;
const int halfLaneId_mod_4 = halfLaneId % 4;
const int upperHalf = 16 * (laneId / 16);
// Shared memory needed to exchange X and delta.
__shared__ volatile ValueTypeB s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId];
// Iterate over the rows of the matrix. One warp per two rows.
for ( int aRowIt = blockIdx.x * nHalfWarps + halfWarpId ; aRowIt < num_rows_per_color ; aRowIt += gridDim.x * nHalfWarps )
{
int aRowId = sorted_rows_by_color[aRowIt];
// Load one block of B.
ValueTypeB my_delta(0);
// The range of the rows.
int aColBegin = row_offsets[aRowId ];
int aColEnd = row_offsets[aRowId + 1];
// Each warp load column indices of 16 nonzero blocks
for ( ; aColBegin < aColEnd ; aColBegin += 16 )
{
int aColIt = aColBegin + halfLaneId;
// Get the ID of the column.
int aColTmp = -1, aColId = -1;
if ( aColIt < aColEnd )
{
aColTmp = column_indices[aColIt];
}
#ifdef AMGX_ILU_COLORING
bool valid = (((aColTmp < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[aColTmp] > current_color)) || (aColTmp >= boundary_index && boundary_coloring == LAST));
if ( aColTmp != -1 && valid )
{
aColId = aColTmp;
}
#else
if ( aColTmp != -1 && row_colors[aColTmp] > current_color )
{
aColId = aColTmp;
}
#endif
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + halfLaneId_div_4;
// Exchange column indices.
int waColId = utils::shfl( aColId, upperHalf + my_k );
// Load 8 blocks of X if needed.
ValueTypeB my_x(0);
if ( waColId != -1 )
{
my_x = Delta[4 * waColId + halfLaneId_mod_4];
}
my_s_mem[halfLaneId] = my_x;
// Load 8 blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
const int k_i = k + i;
int w_aColTmp = aColBegin + k_i, w_aColIt = -1;
if ( utils::shfl( aColId, upperHalf + k_i ) != -1 && w_aColTmp < aColEnd )
w_aColIt = w_aColTmp;
ValueTypeA my_val(0);
if ( w_aColIt != -1 )
{
my_val = nonzero_values[16 * w_aColIt + halfLaneId];
}
if ( ROW_MAJOR )
{
my_delta += my_val * my_s_mem[4 * i + halfLaneId_mod_4];
}
else
{
my_delta += my_val * my_s_mem[4 * i + halfLaneId_div_4];
}
}
} // Loop over k
} // Loop over aColIt
// Load EINV values.
ValueTypeA my_Einv = Einv[16 * aRowId + halfLaneId];
// Reduce delta terms.
if ( ROW_MAJOR )
{
my_delta += utils::shfl_xor( my_delta, 1 );
my_delta += utils::shfl_xor( my_delta, 2 );
}
else
{
my_delta += utils::shfl_xor( my_delta, 4 );
my_delta += utils::shfl_xor( my_delta, 8 );
}
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( halfLaneId_mod_4 == 0 )
{
my_s_mem[halfLaneId_div_4] = my_delta;
}
}
else
{
if ( halfLaneId_div_4 == 0 )
{
my_s_mem[halfLaneId_mod_4] = my_delta;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_delta = my_Einv * my_s_mem[halfLaneId_mod_4];
}
else
{
my_delta = my_Einv * my_s_mem[halfLaneId_div_4];
}
// Regroup results.
if ( ROW_MAJOR )
{
my_delta += utils::shfl_xor( my_delta, 1 );
my_delta += utils::shfl_xor( my_delta, 2 );
}
else
{
my_delta += utils::shfl_xor( my_delta, 4 );
my_delta += utils::shfl_xor( my_delta, 8 );
}
// Store the results.
if ( ROW_MAJOR )
{
int offset = 4 * aRowId + halfLaneId_div_4;
ValueTypeB my_b(0), my_x(0);
if ( halfLaneId_mod_4 == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x[offset];
}
my_delta = my_b - my_delta;
if ( halfLaneId_mod_4 == 0 )
{
x[offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
else
{
int offset = 4 * aRowId + halfLaneId_mod_4;
ValueTypeB my_b(0), my_x(0);
if ( halfLaneId_div_4 == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x[offset];
}
my_delta = my_b - my_delta;
if ( halfLaneId_div_4 == 0 )
{
x[offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int CTA_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_backward_4x4_kernel_row_major_vec4( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of half warps per CTA.
const int NUM_HALF_WARPS = CTA_SIZE / 16;
// Coordinates of the thread.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Coordinates of the thread in the CTA.
const int thread_id_div_16 = threadIdx.x / 16;
const int thread_id_mod_16 = threadIdx.x % 16;
// Useful constants.
const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4;
const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4;
const int shfl_offset = 16 * (lane_id / 16);
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16];
// The iterator over rows.
int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS )
{
unsigned int active_mask = utils::activemask();
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_delta(0);
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( ; a_col_begin < a_col_end ; a_col_begin += 16 )
{
unsigned int active_mask_inner = utils::activemask();
int a_col_it = a_col_begin + thread_id_mod_16;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = __cachingLoad(&A_cols[a_col_it]);
}
#ifdef AMGX_ILU_COLORING
int valid = false;
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == LAST )
{
valid = a_col_id >= boundary_index;
}
else
{
valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) > current_color;
}
}
#else
int valid = false;
if ( a_col_id != -1 && row_colors[a_col_id] > current_color )
{
valid = true;
}
#endif
// Set the column id.
if ( !valid )
{
a_col_id = -1;
}
// Loop over columns. We compute 8 columns per iteration.
#pragma unroll 2
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + thread_id_mod_16_div_4;
// Load 8 blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner );
Vector_type my_Delta(0);
if ( uniform_a_col_id != -1 )
{
my_Delta = Delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4];
}
my_s_mem[thread_id_mod_16] = my_Delta;
int uniform_a_col_it = a_col_begin + my_k;
if ( uniform_a_col_id == -1 || uniform_a_col_it >= a_col_end )
{
uniform_a_col_it = -1;
}
Matrix_type my_vals[4] = { Matrix_type(0) };
if ( uniform_a_col_it != -1 )
{
utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] );
}
my_delta += my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0];
my_delta += my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1];
my_delta += my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2];
my_delta += my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3];
} // Loop over k
} // Loop over aColIt
// Load EINV values.
Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16];
// Reduce delta terms.
my_delta += utils::shfl_xor( my_delta, 4, warpSize, active_mask );
my_delta += utils::shfl_xor( my_delta, 8, warpSize, active_mask );
// Update the shared terms.
if ( thread_id_mod_16_div_4 == 0 )
{
my_s_mem[thread_id_mod_16_mod_4] = my_delta;
}
// Update the diagonal term.
my_delta = my_Einv * my_s_mem[thread_id_mod_16_mod_4];
// Regroup results.
my_delta += utils::shfl_xor( my_delta, 1, warpSize, active_mask );
my_delta += utils::shfl_xor( my_delta, 2, warpSize, active_mask );
// Store the results.
int offset = 4 * a_row_id + thread_id_mod_16_div_4;
Vector_type my_b(0), my_x(0);
if ( thread_id_mod_16_mod_4 == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( thread_id_mod_16_mod_4 == 0 )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_backward_1x1_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of items per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// Number of items per grid.
const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW;
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW);
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_delta(0);
// Don't do anything if X is zero.
int a_col_it = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW )
{
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
// Is it really a valid column (due to coloring).
int valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
//if( boundary_coloring == LAST )
// valid = a_col_id >= boundary_index;
//else
// valid = a_col_id < boundary_index && row_colors[a_col_id] > current_color;
valid = (((a_col_id < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[a_col_id] > current_color)) || (a_col_id >= boundary_index && boundary_coloring == LAST));
}
#else
//if( a_col_id != -1 && current_color != 0 )
if ( a_col_id != -1 )
{
valid = row_colors[a_col_id] > current_color;
}
#endif
// Load my Delta value.
Vector_type my_Delta(0);
if ( valid )
{
my_Delta = Delta[a_col_id];
}
// Load my item from A.
Matrix_type my_val(0);
if ( valid )
{
my_val = A_vals[a_col_it];
}
// Update bmAx.
my_delta += my_val * my_Delta;
}
// Reduce bmAx terms.
#pragma unroll
for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 )
{
my_delta += utils::shfl_xor( my_delta, mask );
}
// Store the results.
if ( lane_id_mod_NTPR == 0 )
{
Vector_type my_x = __cachingLoad(&delta[a_row_id]) - Einv[a_row_id] * my_delta;
x [a_row_id] += weight * my_x;
Delta[a_row_id] = my_x;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_backward_NxN_kernel_skip( Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color )
{
const int NUM_ITEMS_PER_CTA = CTA_SIZE / N; // Number of updated block items per CTA
const int ITEM_ID = threadIdx.x / N;
const int ITEM_BLOCK_OFFSET = threadIdx.x % N;
const int is_active = ITEM_ID < NUM_ITEMS_PER_CTA;
// The first row.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + ITEM_ID;
// Iterate over the rows of the matrix. One warp per two rows.
for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_ITEMS_PER_CTA )
{
if ( is_active )
{
int a_row_id = sorted_rows_by_color[a_row_it];
const int idx = N * a_row_id + ITEM_BLOCK_OFFSET;
Vector_type my_b = __cachingLoad(&delta[idx]);
Vector_type my_x = x[idx];
x[idx] = my_x + weight * my_b;
Delta[idx] = my_b;
}
}
}
// ----------
// Methods
// ----------
template< typename Matrix_type, typename Vector_type, int N >
void DILU_forward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major,
const int has_external_diag )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ROWS_PER_WARP = ::max(WARP_SIZE / NxN, 1);
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
int code = 2 * (row_major ? 1 : 0) + (has_external_diag ? 1 : 0);
switch ( code )
{
case 0: // Column-major, no external diagonal.
hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 1: // Column-major, external diagonal.
hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 2: // Row-major, no external diagonal.
hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 3: // Row-major, external diagonal.
hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
template< typename Matrix_type, typename Vector_type, int N >
void DILU_forward_NxN_dispatch_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major,
const int has_external_diag )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA;
// Each warp is going to sweep through bloock this many times
const int NUM_WARP_ITERS_PER_BLOCK = ((NxN - 1) / WARP_SIZE) + 1;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
if (!row_major)
{
FatalError("COL MAJOR is not supported for this large block_size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
switch ( has_external_diag )
{
case 0: // Row-major, no external diagonal.
hipLaunchKernelGGL(( DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, NUM_WARP_ITERS_PER_BLOCK>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 1: // Row-major, external diagonal.
hipLaunchKernelGGL(( DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type >
void DILU_forward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index,
const int block_size,
const int row_major,
const int has_external_diag )
{
switch ( block_size )
{
case 1:
{
const int NUM_THREADS_PER_ROW = 8;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
if ( has_external_diag )
{
hipLaunchKernelGGL(( DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
}
else
{
hipLaunchKernelGGL(( DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
}
cudaCheckError();
}
break;
case 2:
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 2>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 3:
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 3>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 4:
if ( row_major )
{
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / 16;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
if ( has_external_diag )
//DILU_forward_4x4_kernel<Matrix_type, Vector_type, CTA_SIZE, WARP_SIZE, true, true><<<grid_size, CTA_SIZE>>>(
hipLaunchKernelGGL(( DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
else
hipLaunchKernelGGL(( DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
cudaCheckError();
}
else
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 4>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 5:
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 5>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 8:
DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 8>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 10:
DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 10>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N >
void DILU_backward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ROWS_PER_WARP = ::max(WARP_SIZE / NxN, 1);
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
if ( row_major )
{
hipLaunchKernelGGL(( DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
}
else
{
hipLaunchKernelGGL(( DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
}
cudaCheckError();
}
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int NUM_WARP_ITERS_PER_BLOCK >
void DILU_backward_NxN_dispatch_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
if ( row_major )
{
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_large<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
}
else
{
FatalError("col major is not supported for this blocksize in multicolor DILU solver", AMGX_ERR_NOT_IMPLEMENTED);
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType>
void DILU_backward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index,
const int block_size,
const int row_major )
{
switch ( block_size )
{
case 1:
{
const int NUM_THREADS_PER_ROW = 8;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
hipLaunchKernelGGL(( DILU_backward_1x1_kernel<Matrix_type, Vector_type, WeightType, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
cudaCheckError();
}
break;
case 2:
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 2>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 3:
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 3>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 4:
//if( false )
if ( row_major )
{
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / 16;
// The number of threads to launch.
const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
//DILU_backward_NxN_kernel<Matrix_type, Vector_type, 4, CTA_SIZE, WARP_SIZE, true><<<grid_size, CTA_SIZE>>>(
hipLaunchKernelGGL(( DILU_backward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, WeightType, CTA_SIZE>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0,
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
cudaCheckError();
}
else
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 4>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 5:
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 5>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 8:
DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 8, 2>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 10:
DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 10, 4>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< class T_Config >
MulticolorDILUSolver_Base<T_Config>::MulticolorDILUSolver_Base( AMG_Config &cfg,
const std::string &cfg_scope,
ThreadManager *tmng ) :
Solver<T_Config>( cfg, cfg_scope, tmng )
{
this->weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope);
this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::getParameter<int>("reorder_cols_by_color", cfg_scope) != 0);
this->m_insert_diagonal_desired = (cfg.AMG_Config::getParameter<int>("insert_diag_while_reordering", cfg_scope) != 0);
this->m_boundary_coloring = cfg.AMG_Config::getParameter<ColoringType>("boundary_coloring", cfg_scope);
this->always_obey_coloring = 0;
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Multicolor DILU smoother\n");
}
}
// Destructor
template<class T_Config>
MulticolorDILUSolver_Base<T_Config>::~MulticolorDILUSolver_Base()
{
Einv.clear();
Einv.shrink_to_fit();
}
template<class T_Config>
void MulticolorDILUSolver_Base<T_Config>::computeEinv(Matrix<T_Config> &A)
{
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if ( A.get_block_dimx() != A.get_block_dimy() )
{
FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if ( A.get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but...
{
FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
computeEinv_NxN( A, A.get_block_dimx() );
A.setView(oldView);
}
template< class T_Config >
void
MulticolorDILUSolver_Base<T_Config>::printSolverParameters() const
{
std::cout << "relaxation_factor = " << this->weight << std::endl;
}
// Solver setup
template< class T_Config >
void
MulticolorDILUSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A);
if (!this->m_explicit_A)
{
FatalError("MulticolorDILUSolver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
int N = this->m_explicit_A->get_num_cols() * this->m_explicit_A->get_block_dimy();
if (this->m_explicit_A->getColoringLevel() < 1)
{
FatalError("Matrix must be colored to use multicolor dilu solver. Try setting: coloring_level=1 in the configuration file", AMGX_ERR_NOT_IMPLEMENTED);
}
m_delta.resize(N);
m_Delta.resize(N);
m_delta.set_block_dimy(this->m_explicit_A->get_block_dimy());
m_Delta.set_block_dimy(this->m_explicit_A->get_block_dimy());
m_delta.set_block_dimx(1);
m_Delta.set_block_dimx(1);
if ( this->m_explicit_A->getBlockFormat() != ROW_MAJOR )
{
FatalError("Multicolor DILU solver only supports row major format for the blocks", AMGX_ERR_CONFIGURATION);
}
computeEinv( *this->m_explicit_A );
}
//
template< class T_Config >
void
MulticolorDILUSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
// Solve one iteration
template<class T_Config>
bool
MulticolorDILUSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
if ( this->m_explicit_A->get_block_dimx() != this->m_explicit_A->get_block_dimy() )
{
FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if ( this->m_explicit_A->get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but...
{
FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if (xIsZero)
{
x.dirtybit = 0;
}
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_async(x, x.tag);
this->m_explicit_A->manager->exchange_halo_async(b, b.tag);
}
if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior())
{
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
this->m_explicit_A->manager->exchange_halo_wait(b, b.tag);
}
}
ViewType oldView = this->m_explicit_A->currentView();
ViewType flags;
bool latencyHiding = true;
if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0 && b.dirtybit == 0))
{
latencyHiding = false;
this->m_explicit_A->setViewExterior();
flags = (ViewType)(this->m_explicit_A->getViewExterior());
}
else
{
flags = (ViewType)(this->m_explicit_A->getViewInterior());
this->m_explicit_A->setViewInterior();
}
if (xIsZero)
{
thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero());
cudaCheckError();
}
this->smooth_NxN(*this->m_explicit_A, b, x, flags);
if (latencyHiding)
{
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
this->m_explicit_A->manager->exchange_halo_wait(b, b.tag);
}
this->m_explicit_A->setViewExterior();
flags = (ViewType)(~(this->m_explicit_A->getViewInterior()) & this->m_explicit_A->getViewExterior());
if (flags != 0)
{
this->smooth_NxN(*this->m_explicit_A, b, x, flags);
}
}
x.dirtybit = 1;
this->m_explicit_A->setView(oldView);
return (this->converged(b, x));
}
template<class T_Config>
void
MulticolorDILUSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x )
{}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::computeEinv_NxN(const Matrix_h &A, const int bsize)
{
FatalError("Multicolor DILU smoother not implemented for host format, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::smooth_NxN( const Matrix_h &A, VVector &b, VVector &x, ViewType separation_flag )
{
FatalError("Haven't implemented Multicolor DILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::MulticolorDILUSolver(
AMG_Config &cfg,
const std::string &cfg_scope,
ThreadManager *tmng ) :
MulticolorDILUSolver_Base<TemplateConfig<AMGX_device, V, M, I> >( cfg, cfg_scope, tmng )
{
int device = 0;
hipGetDevice( &device );
hipDeviceProp_t properties;
hipGetDeviceProperties( &properties, device );
m_is_kepler = properties.major >= 3;
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::computeEinv_NxN(const Matrix_d &A, const int bsize)
{
const int bsize_sq = bsize * bsize;
this->Einv.resize( A.get_num_cols()*bsize_sq, 0.0 );
// sol::prof_start();
for ( int i = 0, num_colors = A.getMatrixColoring().getNumColors() ; i < num_colors ; ++i )
{
const int color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const int num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset;
if ( num_rows_per_color == 0 )
{
continue;
}
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
int ROWS_PER_WARP = 1;
if ( bsize_sq > 1 && bsize_sq < 6 )
{
ROWS_PER_WARP = 32 / bsize_sq;
}
const int ROWS_PER_CTA = ROWS_PER_WARP * NUM_WARPS_PER_CTA;
const int GRID_SIZE = ::min( 4096, (num_rows_per_color + ROWS_PER_CTA - 1) / ROWS_PER_CTA );
hipStream_t stream = thrust::global_thread_handle::get_stream();
switch ( bsize )
{
case 1:
hipLaunchKernelGGL(( DILU_setup_1x1_kernel<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 2:
hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 2, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 3:
hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 3, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 4:
hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 4, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 5:
hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 5, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 8:
hipLaunchKernelGGL(( DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32, 2>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 10:
hipLaunchKernelGGL(( DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 10, CTA_SIZE, 32, 4>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream,
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
default:
FatalError( "Multicolor-DILU Setup: block size was not enabled in the code, contact AMGX developers.", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
cudaCheckError();
}
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::smooth_NxN( const Matrix_d &A, VVector &b, VVector &x, ViewType separation_flag )
{
AMGX_CPU_PROFILER( "MulticolorDILUSolver::smooth_NxN " );
int offset = 0, separation = 0;
A.getOffsetAndSizeForView(INTERIOR, &offset, &separation);
// Only have separation=num interior rows if we are only working on the interior
// and the boundary coloring is FIRST or LAST, otherwise set separation offset to
// total number of rows
if ( separation_flag != this->m_explicit_A->getViewInterior() ||
this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior() ||
this->m_boundary_coloring != LAST && this->m_boundary_coloring != FIRST )
{
separation = A.row_offsets.size() - 1;
}
else
{
amgx_printf("separation active\n");
}
// --------------------
// Forward Sweep
// --------------------
const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors();
for ( int i = 0 ; i < num_colors ; ++i )
{
int color_offset(0);
if ( separation_flag & INTERIOR )
{
color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i];
}
else
{
color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
int num_rows_per_color(0);
if ( separation_flag == this->m_explicit_A->getViewInterior() )
{
num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
else
{
num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1];
}
num_rows_per_color -= color_offset;
if ( num_rows_per_color == 0 )
{
continue;
}
int boundary_index = separation;
if ( this->m_boundary_coloring == SYNC_COLORS )
{
boundary_index = A.get_num_rows();
}
DILU_forward_NxN_dispatch(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
A.diag.raw(),
x.raw(),
b.raw(),
this->m_delta.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
num_rows_per_color,
i,
A.getMatrixColoring().getRowColors().raw(),
this->Einv.raw(),
this->m_boundary_coloring,
boundary_index,
A.get_block_dimy(),
A.getBlockFormat() == ROW_MAJOR,
A.hasProps(DIAG) );
cudaCheckError();
}
// --------------------
// Backward Sweep
// --------------------
for ( int i = num_colors - 1 ; i >= 0 ; --i )
{
int color_offset(0);
if ( separation_flag & INTERIOR )
{
color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i];
}
else
{
color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
int num_rows_per_color(0);
if ( separation_flag == this->m_explicit_A->getViewInterior() )
{
num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
else
{
num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1];
}
num_rows_per_color -= color_offset;
if ( num_rows_per_color == 0 )
{
continue;
}
if ( i == num_colors - 1 )
{
const int NUM_ROWS_PER_CTA = CTA_SIZE / A.get_block_dimy();
const int GRID_SIZE = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
switch ( A.get_block_dimy() )
{
case 1:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 1, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 2:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 2, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 3:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 3, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 4:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 4, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 5:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 5, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 8:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 8, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 10:
hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 10, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0,
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
}
cudaCheckError();
}
else
{
DILU_backward_NxN_dispatch(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
this->Einv.raw(),
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color,
i,
this->m_boundary_coloring,
separation,
A.get_block_dimy(),
A.getBlockFormat() == ROW_MAJOR );
cudaCheckError();
}
}
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
// AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
// AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
} // namespace amgx
| 4e24d74aeae96965b3372d33548a7e4b4920d0dd.cu | /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <string.h>
#include <cutil.h>
#include <miscmath.h>
#include <amgx_cusparse.h>
#include <thrust/copy.h>
#include <solvers/multicolor_dilu_solver.h>
#include <solvers/block_common_solver.h>
#include <gaussian_elimination.h>
#include <basic_types.h>
#include <util.h>
#include <texture.h>
#include <ld_functions.h>
#include <matrix_io.h>
#include <thrust/logical.h>
#include <sm_utils.inl>
#include <amgx_types/util.h>
#include <algorithm>
#define AMGX_ILU_COLORING
namespace amgx
{
namespace multicolor_dilu_solver
{
enum { CTA_SIZE = 128, WARP_SIZE = 32 };
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, int NUM_WARP_ITERS_PER_BLOCK >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_setup_NxN_kernel_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_diag,
const Matrix_type *__restrict A_vals,
Matrix_type *__restrict Einv,
const int *sorted_rows_by_color,
const int *row_colors,
const int num_rows_per_color,
const int current_color )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Shared memory to broadcast column IDs.
__shared__ volatile int s_a_col_ids[CTA_SIZE];
__shared__ volatile int s_a_col_its[CTA_SIZE];
// Each thread keeps its own pointer.
volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id];
volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id];
// Shared memory to store the matrices.
__shared__ volatile Vector_type s_A_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
__shared__ volatile Vector_type s_B_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_A_mtx = &s_A_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
volatile Vector_type *my_s_B_mtx = &s_B_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
// Shared memory to store the index of the element Aji.
__shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA];
// Each thread keeps its own pointer.
volatile int *my_s_A_ji = &s_A_ji[warp_id];
// Precomputing some stuff
int idx[NUM_WARP_ITERS_PER_BLOCK];
int idy[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
const int id = (WARP_SIZE * wb + lane_id) % NxN;
idx[wb] = id / N;
idy[wb] = id % N;
}
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = -1;
if ( a_row_it < num_rows_per_color )
{
a_row_id = sorted_rows_by_color[a_row_it];
}
// Load the diagonal.
Vector_type e_out[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
e_out[wb] = (Vector_type)0.0;
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ( a_row_id != -1 && (wb * WARP_SIZE + lane_id) < NxN)
{
e_out[wb] = A_vals[NxN * A_diag[a_row_id] + wb * WARP_SIZE + lane_id];
}
// Skip the 1st iteration of the outer-loop (that loop runs on the host).
if ( current_color != 0 )
{
// Ranges of the rows.
int a_col_begin(0), a_col_end(0);
if ( a_row_id != -1 )
{
a_col_begin = A_rows[a_row_id ];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the elements in the columns.
for ( ; a_col_begin < a_col_end ; a_col_begin += NxN )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id;
// The identifier of the column if the iterator is valid.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color )
{
a_col_id = a_col_tmp;
}
// When the diagonal is stored inside the matrix, we have to reject it. We
// could be using a template parameter but it's not needed since that
// rejection is really cheap (a couple of extra cycles -- CMP+MOV).
if ( a_col_id == a_row_id )
{
a_col_id = -1;
}
// We partition valid and invalid column ids. Valid ones come first.
int vote = utils::ballot( a_col_id != -1 );
int ones = __popc( vote );
int dest = __popc( vote & utils::lane_mask_lt() );
if ( a_col_id == -1 )
{
dest = ones + lane_id - dest;
}
my_s_a_col_ids[dest] = a_col_id;
my_s_a_col_its[dest] = a_col_it;
// Temporary storage with zeros for OOB
Vector_type my_A[NUM_WARP_ITERS_PER_BLOCK], my_B[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_A[wb] = (Vector_type)0.0;
my_B[wb] = (Vector_type)0.0;
}
// Threads collaborate to load the rows.
for ( int k = 0 ; k < WARP_SIZE ; ++k )
{
// Exchange column indices.
const int uniform_a_col_id = my_s_a_col_ids[k];
// Early exit.
if ( uniform_a_col_id == -1 )
{
break;
}
// Load the iterator.
const int uniform_a_col_it = my_s_a_col_its[k];
// Load the two matrices.
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ((wb * WARP_SIZE + lane_id) < NxN)
{
my_A[wb] = A_vals[NxN * uniform_a_col_it + wb * WARP_SIZE + lane_id];
my_B[wb] = Einv [NxN * uniform_a_col_id + wb * WARP_SIZE + lane_id];
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb];
my_s_B_mtx[lane_id + wb * WARP_SIZE] = my_B[wb];
}
// Compute the product of matrices.
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_A[wb] = (Vector_type)0.0;
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
my_A[wb] += my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]];
}
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ((wb * WARP_SIZE + lane_id) < NxN)
{
my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb];
}
// We looking for columns in the two rows we're interested in.
int b_col_it = A_rows[uniform_a_col_id ];
int b_col_end = A_rows[uniform_a_col_id + 1];
// Init the marker to -1.
if ( lane_id == 0 )
{
*my_s_A_ji = -1;
}
// Run the loop.
b_col_it += lane_id;
int shared_found = utils::ballot( lane_id == 0 && uniform_a_col_id == -1 );
do
{
bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id;
if ( found )
{
*my_s_A_ji = b_col_it;
}
shared_found = shared_found | utils::ballot(found);
b_col_it += NxN;
}
while ( __popc( shared_found ) == 0 && utils::any( b_col_it < b_col_end ) );
// Load the blocks.
const int w_aji = *my_s_A_ji;
Vector_type my_C[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_C[wb] = (Vector_type)0.0;
if ( w_aji != -1 && (wb * WARP_SIZE + lane_id) < NxN)
{
my_C[wb] = A_vals[NxN * w_aji + wb * WARP_SIZE + lane_id];
}
my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_C[wb];
}
// Update e_out.
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
e_out[wb] -= my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]];
}
}
}
} // a_col_begin < a_col_end
} // current_color != 0
// Store e_out in A
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id] = e_out[wb];
}
// Invert the matrices.
#pragma unroll
for ( int row = 0 ; row < N ; ++row )
{
Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row];
if ( isNotCloseToZero(diag_tmp) )
{
diag = Vector_type(1) / diag_tmp;
}
else
{
diag = Vector_type(1) / epsilon(diag_tmp);
}
if ( lane_id < N && lane_id != row)
{
my_s_A_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] * diag;
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if ( idx[wb] != row && idy[wb] != row)
{
my_s_A_mtx[wb * WARP_SIZE + lane_id] = my_s_B_mtx[wb * WARP_SIZE + lane_id] - my_s_B_mtx[N * idx[wb] + row] * my_s_B_mtx[N * row + idy[wb]];
}
if ( lane_id < N )
{
Vector_type tmp = diag;
if ( lane_id != row )
{
tmp = -my_s_A_mtx[N * lane_id + row] * diag;
}
my_s_A_mtx[N * lane_id + row] = tmp;
}
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
{
my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id];
}
}
// Store the results to Einv.
if ( a_row_id != -1 )
#pragma unroll
for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++)
if (wb * WARP_SIZE + lane_id < NxN)
{
Einv[NxN * a_row_id + wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id];
}
}
}
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_setup_NxN_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_diag,
const Matrix_type *__restrict A_vals,
Matrix_type *__restrict Einv,
const int *sorted_rows_by_color,
const int *row_colors,
const int num_rows_per_color,
const int current_color )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN;
// Upper-bound on the number of items per warp.
const int NUM_ITEMS_PER_WARP_CEIL = (WARP_SIZE + NxN - 1) / NxN;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
// Useful index to compute matrix products.
const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N;
const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N;
// We need NxN to compute a NxN block. Encode a mask for the first block.
int mask_tmp = utils::ballot( lane_id_div_NxN == 0 );
// Mask for ballots. We shift the mask with NxN active bits by the needed number of bits.
const int mask_NxN = mask_tmp << (lane_id_div_NxN * __popc(mask_tmp));
// Shared memory to broadcast column IDs.
__shared__ volatile int s_a_col_ids[CTA_SIZE];
__shared__ volatile int s_a_col_its[CTA_SIZE];
// Each thread keeps its own pointer.
volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id_mod_NxN];
volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id_mod_NxN];
// Shared memory to store the matrices.
__shared__ volatile Vector_type s_A_mtx[CTA_SIZE];
__shared__ volatile Vector_type s_B_mtx[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_A_mtx = &s_A_mtx[threadIdx.x - lane_id_mod_NxN];
volatile Vector_type *my_s_B_mtx = &s_B_mtx[threadIdx.x - lane_id_mod_NxN];
// Shared memory to store the index of the element Aji.
__shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA * NUM_ITEMS_PER_WARP_CEIL];
// Each thread keeps its own pointer.
volatile int *my_s_A_ji = &s_A_ji[warp_id * NUM_ITEMS_PER_WARP_CEIL + lane_id_div_NxN];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN;
// Iterate over the rows of the matrix. One warp per row.
for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID )
{
// Is the thread active? For example, for 5x5 only the first 25 threads are active per warp.
// At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals
// to false ; that's the common trick to determine if a number is a power of 2).
int is_active = true;
if ( NxN & (NxN - 1) )
{
is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP;
}
int a_row_id = -1;
if ( is_active && a_row_it < num_rows_per_color )
{
a_row_id = sorted_rows_by_color[a_row_it];
}
// Load the diagonal.
Vector_type e_out(0);
if ( a_row_id != -1 )
{
e_out = A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN];
}
// Skip the 1st iteration of the outer-loop (that loop runs on the host).
if ( current_color != 0 )
{
// Ranges of the rows.
int a_col_begin(0), a_col_end(0);
if ( a_row_id != -1 )
{
a_col_begin = A_rows[a_row_id ];
a_col_end = A_rows[a_row_id + 1];
}
// Iterate over the elements in the columns.
for ( ; a_col_begin < a_col_end ; a_col_begin += NxN )
{
unsigned int active_mask = utils::activemask();
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_NxN;
// The identifier of the column if the iterator is valid.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color )
{
a_col_id = a_col_tmp;
}
// When the diagonal is stored inside the matrix, we have to reject it. We
// could be using a template parameter but it's not needed since that
// rejection is really cheap (a couple of extra cycles -- CMP+MOV).
if ( a_col_id == a_row_id )
{
a_col_id = -1;
}
// We partition valid and invalid column ids. Valid ones come first.
int vote = utils::ballot( a_col_id != -1, active_mask ) & mask_NxN;
int ones = __popc( vote );
int dest = __popc( vote & utils::lane_mask_lt() );
if ( a_col_id == -1 )
{
dest = ones + lane_id_mod_NxN - dest;
}
my_s_a_col_ids[dest] = a_col_id;
my_s_a_col_its[dest] = a_col_it;
// Threads collaborate to load the rows.
for ( int k = 0 ; k < NxN ; ++k )
{
// Exchange column indices.
const int uniform_a_col_id = my_s_a_col_ids[k];
// Early exit.
if ( utils::all( uniform_a_col_id == -1, active_mask ) )
{
break;
}
// Load the iterator.
const int uniform_a_col_it = my_s_a_col_its[k];
// Load the two matrices.
Vector_type my_A(0), my_B(0);
if ( uniform_a_col_id != -1 )
{
my_A = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
my_B = Einv [NxN * uniform_a_col_id + lane_id_mod_NxN];
}
my_s_A_mtx[lane_id_mod_NxN] = my_A;
my_s_B_mtx[lane_id_mod_NxN] = my_B;
utils::syncwarp(active_mask);
// Compute the product of matrices.
Vector_type tmp(0);
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
tmp += my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N];
}
my_s_A_mtx[lane_id_mod_NxN] = tmp;
// We looking for columns in the two rows we're interested in.
int b_col_it(0), b_col_end(0);
if ( is_active && uniform_a_col_id != -1 )
{
b_col_it = A_rows[uniform_a_col_id ];
b_col_end = A_rows[uniform_a_col_id + 1];
}
// Init the marker to -1.
if ( lane_id_mod_NxN == 0 )
{
*my_s_A_ji = -1;
}
// Run the loop.
b_col_it += lane_id_mod_NxN;
int shared_found = utils::ballot( lane_id_mod_NxN == 0 && uniform_a_col_id == -1, active_mask );
do
{
bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id;
if ( found )
{
*my_s_A_ji = b_col_it;
}
shared_found = shared_found | utils::ballot(found, active_mask);
b_col_it += NxN;
}
while ( __popc( shared_found ) < NUM_ITEMS_PER_WARP && utils::any( b_col_it < b_col_end, active_mask ) );
// Load the blocks.
const int w_aji = *my_s_A_ji;
Vector_type my_C(0);
if ( w_aji != -1 )
{
my_C = A_vals[NxN * w_aji + lane_id_mod_NxN];
}
my_s_B_mtx[lane_id_mod_NxN] = my_C;
// Update e_out.
#pragma unroll
for ( int m = 0 ; m < N ; ++m )
{
e_out -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N];
}
}
} // a_col_begin < a_col_end
} // current_color != 0
// Store e_out in A
my_s_A_mtx[lane_id_mod_NxN] = e_out;
// Invert the matrices.
#pragma unroll
for ( int row = 0 ; row < N ; ++row )
{
Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row];
if ( isNotCloseToZero(diag_tmp) )
{
diag = Vector_type(1) / diag_tmp;
}
else
{
diag = Vector_type(1) / epsilon(diag_tmp);
}
if ( is_active && lane_id_mod_NxN_div_N == 0 && lane_id_mod_NxN_mod_N != row )
{
my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N] *= diag;
}
if ( is_active && lane_id_mod_NxN_div_N != row && lane_id_mod_NxN_mod_N != row )
{
my_s_A_mtx[lane_id_mod_NxN] -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + row] * my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N];
}
if ( is_active && lane_id_mod_NxN_div_N == 0 )
{
Vector_type tmp = diag;
if ( lane_id_mod_NxN_mod_N != row )
{
tmp = -my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] * diag;
}
my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] = tmp;
}
}
// Store the results to Einv.
if ( a_row_id != -1 )
{
Einv[NxN * a_row_id + lane_id_mod_NxN] = my_s_A_mtx[lane_id_mod_NxN];
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_setup_1x1_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const int *__restrict A_diag,
const Matrix_type *__restrict A_vals,
Matrix_type *__restrict Einv,
const int *sorted_rows_by_color,
const int *row_colors,
const int num_rows_per_color,
const int current_color )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Number of items per grid.
const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NTPR = lane_id / NUM_THREADS_PER_ROW;
const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW;
// Shared memory to broadcast column IDs.
__shared__ int s_a_col_ids[CTA_SIZE];
// Each thread keeps its own pointer.
int *my_s_a_col_ids = &s_a_col_ids[warp_id * WARP_SIZE];
// Shared memory to store the matrices.
__shared__ int s_A_ji[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
int *my_s_A_ji = &s_A_ji[warp_id * WARP_SIZE];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_WARPS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_WARPS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load the diagonal.
Vector_type e_out(0);
// Skip the 1st iteration of the outer-loop (that loop runs on the host).
if ( current_color != 0 )
{
// Ranges of the row.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Iterate over the elements in the columns.
for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE )
{
// Each thread loads a single element.
int a_col_it = a_col_begin + lane_id;
// The identifier of the column if the iterator is valid.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color )
{
a_col_id = a_col_tmp;
}
// When the diagonal is stored inside the matrix, we have to reject it. We
// could be using a template parameter but it's not needed since that
// rejection is really cheap (a couple of extra cycles -- CMP+MOV).
if ( a_col_id == a_row_id )
{
a_col_id = -1;
}
// We partition valid and invalid column ids. Valid ones come first.
int vote = utils::ballot( a_col_id != -1 );
int ones = __popc( vote );
int dest = __popc( vote & utils::lane_mask_lt() );
if ( a_col_id == -1 )
{
dest = ones + lane_id - dest;
}
my_s_a_col_ids[dest] = a_col_id;
// Reset A_jis.
my_s_A_ji[lane_id] = -1;
__syncwarp();
// Threads collaborate to load the rows.
for ( int k = 0 ; k < ones ; k += WARP_SIZE / NUM_THREADS_PER_ROW )
{
const int local_k = k + lane_id_div_NTPR;
// Exchange column indices.
int uniform_a_col_id = -1;
if ( local_k < ones )
{
uniform_a_col_id = my_s_a_col_ids[local_k];
}
// We look for columns in the rows we're interested in.
int b_col_it(0), b_col_end(0);
if ( uniform_a_col_id != -1 )
{
b_col_it = A_rows[uniform_a_col_id ];
b_col_end = A_rows[uniform_a_col_id + 1];
}
// Run the loop.
b_col_it += lane_id_mod_NTPR;
int shared_found = utils::ballot( lane_id_mod_NTPR == 0 && uniform_a_col_id == -1 );
do
{
bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id;
if ( found )
{
my_s_A_ji[local_k] = b_col_it;
}
shared_found = shared_found | utils::ballot(found);
b_col_it += NUM_THREADS_PER_ROW;
}
while ( __popc( shared_found ) < WARP_SIZE / NUM_THREADS_PER_ROW && utils::any( b_col_it < b_col_end ) );
}
__syncwarp();
// Where to get my A_ji from (if any).
int a_ji_it = my_s_A_ji[dest];
// Grab A_jis.
Matrix_type a_ji(0);
if ( a_ji_it != -1 )
{
a_ji = A_vals[a_ji_it];
}
// Update e_out.
if ( a_col_id != -1 )
{
e_out += a_ji * Einv[a_col_id] * A_vals[a_col_it];
}
} // a_col_begin < a_col_end
} // current_color != 0
// Reduce the e_outs in one value.
#pragma unroll
for ( int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1 )
{
e_out += utils::shfl_xor( e_out, mask );
}
// Store the result.
if ( lane_id == 0 )
{
Matrix_type res = A_vals[A_diag[a_row_id]] - e_out;
if ( res != Matrix_type(0) )
{
res = Matrix_type(1) / res;
}
Einv[a_row_id] = static_cast<Vector_type>(res);
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< int N, bool ROW_MAJOR, int WARP_SIZE, typename Value_type >
static __device__ __forceinline__
Value_type reduce_distributed_vectors( Value_type x, int is_leader )
{
if ( N & (N - 1) )
{
#pragma unroll
for ( int i = 1 ; i < N ; ++i )
{
Value_type other_x = utils::shfl_down( x, ROW_MAJOR ? i : N * i );
if ( is_leader )
{
x += other_x;
}
}
}
else
{
#pragma unroll
for ( int i = 1 ; i < N ; i <<= 1 )
{
x += utils::shfl_xor( x, ROW_MAJOR ? i : N * i );
}
}
return x;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_NxN_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
// Useful index to compute matrix products.
const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N;
const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N;
// We to get my data from when I use SHFL.
const int shfl_offset = lane_id - lane_id_mod_NxN;
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN];
// Is the thread active? For example, for 5x5 only the first 25 threads are active per warp.
// At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals
// to false ; that's the common trick to determine if a number is a power of 2).
int is_active = true;
if ( NxN & (NxN - 1) )
{
is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP;
}
// Determine which NxN block the threads work with.
int a_row_it = num_rows_per_color;
if ( is_active )
{
a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN;
}
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id_mod_NxN_div_N]);
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
my_bmAx = b[N * a_row_id + lane_id_mod_NxN_mod_N];
}
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += NxN )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_NxN;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Count the number of active columns.
// int vote = utils::ballot(aColId != -1);
// The number of iterations.
// int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < NxN ; k += N )
{
int my_k = k + lane_id_mod_NxN_div_N;
// Load N blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid )
{
my_x += delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N];
}
my_s_mem[lane_id_mod_NxN] = my_x;
// Load N blocks of A.
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && is_active && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 )
{
my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
}
if ( ROW_MAJOR )
{
my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N];
}
else
{
my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N];
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN];
// Reduce bmAx terms.
int is_leader = lane_id_mod_NxN_div_N == 0;
if ( ROW_MAJOR )
{
is_leader = lane_id_mod_NxN_mod_N == 0;
}
my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_s_mem[lane_id_mod_NxN_div_N] = my_bmAx;
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
my_s_mem[lane_id_mod_NxN_mod_N] = my_bmAx;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N];
}
else
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_div_N];
}
// Reduce bmAx terms.
my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Store the results.
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
delta[N * a_row_id + lane_id_mod_NxN_div_N] = my_bmAx;
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
delta[N * a_row_id + lane_id_mod_NxN_mod_N] = my_bmAx;
}
}
}
}
template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG, int NUM_WARP_ITERS_PER_BLOCK >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_NxN_kernel_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of rows computed per CTA.
const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA;
// Number of rows? per grid.
const int NUM_ITEMS_PER_GRID = CTA_SIZE;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
// Useful index to compute matrix products.
const int lane_id_div_N = lane_id / N;
const int lane_id_mod_N = lane_id % N; // id of a lane inside the block
const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row
const int row_elems_per_warp = blocks_per_warp * N;
// Shared to store bmAx
__shared__ volatile Vector_type bmAx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
volatile Vector_type *my_bmAx_s = &bmAx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( lane_id < N )
{
my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id]);
}
#pragma unroll
for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++)
{
my_bmAx_s[WARP_SIZE * i + lane_id] = 0.0;
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += WARP_SIZE ) // NxN
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Loop over columns. We compute blocks_per_warp columns per iteration.
for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp )
{
// id of the processed block by this thread
int my_k = k + lane_id_div_N;
// Load N blocks of X (if valid)
int uniform_a_col_id = utils::shfl( a_col_id, my_k );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp)
{
my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_N]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid && lane_id < row_elems_per_warp)
{
my_x += delta[N * uniform_a_col_id + lane_id_mod_N];
}
//my_s_mem[lane_id] = my_x;
#pragma unroll
for ( int i = 0 ; i < blocks_per_warp ; ++i )
{
// k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
// check if we are going out of bounds/color
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
// swipe with the whole warp
if (uniform_a_col_it != -1)
{
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 && block_inside_id < NxN)
{
my_val = A_vals[NxN * uniform_a_col_it + block_inside_id];
}
my_bmAx_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); // MOD IS SLOW!
block_inside_id += WARP_SIZE;
}
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_Einv[j] = 0.0;
}
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
if ((WARP_SIZE * j + lane_id) < NxN)
{
my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id];
}
}
// Reduce bmAx terms.
{
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
if ( lane_id < N )
{
my_bmAx += my_bmAx_s[N * lane_id + i];
}
}
}
// Update the diagonal term.
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_bmAx_s[block_inside_id] = my_Einv[j] * utils::shfl(my_bmAx, block_inside_id % N);
block_inside_id += WARP_SIZE;
}
// Reduce bmAx terms.
{
my_bmAx = 0.0;
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
int idx = N * lane_id + i;
if ( lane_id < N )
{
my_bmAx += my_bmAx_s[idx];
}
}
}
// Store the results.
if ( lane_id < N )
{
delta[N * a_row_id + lane_id] = my_bmAx;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_4x4_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / 16;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_mod_16 = lane_id % 16;
// Useful index to compute matrix products.
const int lane_id_mod_16_div_4 = lane_id_mod_16 / 4;
const int lane_id_mod_16_mod_4 = lane_id_mod_16 % 4;
// We to get my data from when I use SHFL.
const int shfl_offset = lane_id - lane_id_mod_16;
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_16];
// Determine which 16 block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + threadIdx.x / 16;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( ROW_MAJOR )
{
if ( lane_id_mod_16_mod_4 == 0 )
{
my_bmAx = __cachingLoad(&b[4 * a_row_id + lane_id_mod_16_div_4]);
}
}
else
{
if ( lane_id_mod_16_div_4 == 0 )
{
my_bmAx = b[4 * a_row_id + lane_id_mod_16_mod_4];
}
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += 16 )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_16;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Count the number of active columns.
// int vote = utils::ballot(aColId != -1);
// The number of iterations.
// int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + lane_id_mod_16_div_4;
// Load N blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = __cachingLoad(&x[4 * uniform_a_col_id + lane_id_mod_16_mod_4]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid )
{
my_x += delta[4 * uniform_a_col_id + lane_id_mod_16_mod_4];
}
my_s_mem[lane_id_mod_16] = my_x;
// Load N blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 )
{
my_val = A_vals[16 * uniform_a_col_it + lane_id_mod_16];
}
if ( ROW_MAJOR )
{
my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_mod_4];
}
else
{
my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_div_4];
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv = Einv[16 * a_row_id + lane_id_mod_16];
// Reduce bmAx terms.
int is_leader = lane_id_mod_16_div_4 == 0;
if ( ROW_MAJOR )
{
is_leader = lane_id_mod_16_mod_4 == 0;
}
my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( lane_id_mod_16_mod_4 == 0 )
{
my_s_mem[lane_id_mod_16_div_4] = my_bmAx;
}
}
else
{
if ( lane_id_mod_16_div_4 == 0 )
{
my_s_mem[lane_id_mod_16_mod_4] = my_bmAx;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_mod_4];
}
else
{
my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_div_4];
}
// Reduce bmAx terms.
my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader );
// Store the results.
if ( ROW_MAJOR )
{
if ( lane_id_mod_16_mod_4 == 0 )
{
delta[4 * a_row_id + lane_id_mod_16_div_4] = my_bmAx;
}
}
else
{
if ( lane_id_mod_16_div_4 == 0 )
{
delta[4 * a_row_id + lane_id_mod_16_mod_4] = my_bmAx;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int CTA_SIZE, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_4x4_kernel_row_major_vec4( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of half warps per CTA.
const int NUM_HALF_WARPS = CTA_SIZE / 16;
// Coordinates of the thread.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Coordinates of the thread in the CTA.
const int thread_id_div_16 = threadIdx.x / 16;
const int thread_id_mod_16 = threadIdx.x % 16;
// Useful constants.
const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4;
const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4;
const int shfl_offset = 16 * (lane_id / 16);
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16];
// The iterator over rows.
int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS )
{
unsigned int active_mask = utils::activemask();
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx(0);
if ( thread_id_mod_16_div_4 == 0 )
{
my_bmAx = __cachingLoad(&b[4 * a_row_id + thread_id_mod_16_mod_4]);
}
// The range of the row.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If it has an external diagonal, we need one more item to put the diag.
int a_col_max = a_col_end;
if ( HAS_EXTERNAL_DIAG )
{
++a_col_max;
}
// Each warp load column indices of 32 nonzero blocks
for ( ; a_col_begin < a_col_max ; a_col_begin += 16 )
{
unsigned int active_mask_inner = utils::activemask();
int a_col_it = a_col_begin + thread_id_mod_16;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = __cachingLoad(&A_cols[a_col_it]);
}
if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end )
{
a_col_id = a_row_id;
}
// Determine if the color is valid.
int a_col_is_valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
a_col_is_valid = a_col_id >= boundary_index;
}
else
{
a_col_is_valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
a_col_is_valid = row_colors[a_col_id] < current_color;
}
#endif
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + thread_id_mod_16_div_4;
// Load 8 blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner );
int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k, warpSize, active_mask_inner );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = __cachingLoad(&x[4 * uniform_a_col_id + thread_id_mod_16_mod_4]);
}
if ( uniform_a_col_id != -1 && uniform_a_col_is_valid )
{
my_x += delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4];
}
my_s_mem[thread_id_mod_16] = my_x;
int uniform_a_col_tmp = a_col_begin + my_k, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end )
{
uniform_a_col_it = A_diag[a_row_id];
}
Matrix_type my_vals[4] = { Matrix_type(0) };
if ( uniform_a_col_it != -1 )
{
utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] );
}
my_bmAx -= my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0];
my_bmAx -= my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1];
my_bmAx -= my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2];
my_bmAx -= my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3];
}
}
// Load Einvs.
Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16];
// Reduce bmAx terms.
my_bmAx += utils::shfl_xor( my_bmAx, 4, warpSize, active_mask );
my_bmAx += utils::shfl_xor( my_bmAx, 8, warpSize, active_mask );
// Update the shared terms.
if ( thread_id_mod_16_div_4 == 0 )
{
my_s_mem[thread_id_mod_16_mod_4] = my_bmAx;
}
// Update the diagonal term.
my_bmAx = my_Einv * my_s_mem[thread_id_mod_16_mod_4];
// Reduce bmAx terms.
my_bmAx += utils::shfl_xor( my_bmAx, 1, warpSize, active_mask );
my_bmAx += utils::shfl_xor( my_bmAx, 2, warpSize, active_mask );
// Store the results.
if ( thread_id_mod_16_mod_4 == 0 )
{
delta[4 * a_row_id + thread_id_mod_16_div_4] = my_bmAx;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_forward_1x1_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of items per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// Number of items per grid.
const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW;
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW);
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_bmAx = amgx::types::util<Vector_type>::get_zero();
if ( lane_id_mod_NTPR == 0 )
{
my_bmAx = __cachingLoad(&b[a_row_id]);
}
// If it has an external diag.
if ( HAS_EXTERNAL_DIAG && lane_id_mod_NTPR == 0 )
{
my_bmAx -= A_vals[A_diag[a_row_id]] * x[a_row_id];
}
// Don't do anything if X is zero.
int a_col_it = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// If the diagonal is stored separately, we have a special treatment.
//if( HAS_EXTERNAL_DIAG )
// ++a_col_end;
// Each warp load column indices of 32 nonzero blocks
for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW )
{
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
// Ignore the diagonal element since its color is smaller, and been accounted for above
if (HAS_EXTERNAL_DIAG && a_col_id == a_row_id)
{
a_col_id = -1;
}
// Load x.
Vector_type my_x(0);
if ( a_col_id != -1 )
{
my_x = __cachingLoad(&x[a_col_id]);
}
// Is it really a valid column (due to coloring).
int valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == FIRST )
{
valid = a_col_id >= boundary_index;
}
else
{
valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color;
}
}
#else
if ( a_col_id != -1 && current_color != 0 )
{
valid = row_colors[a_col_tmp] < current_color;
}
#endif
// Load my x value.
if ( valid )
{
my_x += delta[a_col_id];
}
// Load my item from A.
Matrix_type my_val(0);
if ( a_col_it < a_col_end )
{
my_val = A_vals[a_col_it];
}
// Update bmAx.
my_bmAx -= my_val * my_x;
}
// Reduce bmAx terms.
#pragma unroll
for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 )
{
my_bmAx += utils::shfl_xor( my_bmAx, mask );
}
// Store the results.
if ( lane_id_mod_NTPR == 0 )
{
delta[a_row_id] = Einv[a_row_id] * my_bmAx;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_backward_NxN_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_NxN = lane_id / NxN;
const int lane_id_mod_NxN = lane_id % NxN;
// Useful index to compute matrix products.
const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N;
const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N;
// We to get my data from when I use SHFL.
const int shfl_offset = lane_id - lane_id_mod_NxN;
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN];
// Is the thread active? For example, for 5x5 only the first 25 threads are active per warp.
// At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals
// to false ; that's the common trick to determine if a number is a power of 2).
int is_active = true;
if ( NxN & (NxN - 1) )
{
is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP;
}
// Determine which NxN block the threads work with.
int a_row_it = num_rows_per_color;
if ( is_active )
{
a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN;
}
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_delta(0);
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += NxN )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id_mod_NxN;
// Get the ID of the column.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
// Make sure the column is interesting.
#ifdef AMGX_ILU_COLORING
int valid = false;
if ( a_col_tmp != -1 && current_color != 0 )
{
if ( boundary_coloring == LAST )
{
valid = a_col_tmp >= boundary_index;
}
else
{
valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color;
}
}
#else
int valid = false;
if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color )
{
valid = true;
}
#endif
// Set the column id.
if ( valid )
{
a_col_id = a_col_tmp;
}
// Count the number of active columns.
// int vote = utils::ballot(aColId != -1);
// The number of iterations.
// int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) );
// Loop over columns. We compute 8 columns per iteration.
for ( int k = 0 ; k < NxN ; k += N )
{
int my_k = k + lane_id_mod_NxN_div_N;
// Load N blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 )
{
my_x = Delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N];
}
my_s_mem[lane_id_mod_NxN] = my_x;
// Load N blocks of A.
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
//if( uniform_a_col_id == -1 )
// break;
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 )
{
my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN];
}
if ( ROW_MAJOR )
{
my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N];
}
else
{
my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N];
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Matrix_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN];
// Reduce bmAx terms.
int is_leader = lane_id_mod_NxN_div_N == 0;
if ( ROW_MAJOR )
{
is_leader = lane_id_mod_NxN_mod_N == 0;
}
my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader );
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_s_mem[lane_id_mod_NxN_div_N] = my_delta;
}
}
else
{
if ( lane_id_mod_NxN_div_N == 0 )
{
my_s_mem[lane_id_mod_NxN_mod_N] = my_delta;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N];
}
else
{
my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_div_N];
}
// Reduce bmAx terms.
my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader );
// Store the results.
if ( ROW_MAJOR )
{
const int offset = N * a_row_id + lane_id_mod_NxN_div_N;
Vector_type my_b(0), my_x(0);
if ( lane_id_mod_NxN_mod_N == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( lane_id_mod_NxN_mod_N == 0 )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
else
{
const int offset = N * a_row_id + lane_id_mod_NxN_mod_N;
Vector_type my_b(0), my_x(0);
if ( lane_id_mod_NxN_div_N == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( lane_id_mod_NxN_div_N == 0 )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, int NUM_WARP_ITERS_PER_BLOCK >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_backward_NxN_kernel_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items computer per CTA.
const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA;
// Number of items per grid.
const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_div_N = lane_id / N;
const int lane_id_mod_N = lane_id % N; // id of a lane inside the block
const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row
const int row_elems_per_warp = blocks_per_warp * N;
// Shared to store t_delta
__shared__ volatile Vector_type delta_s[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK];
volatile Vector_type *my_delta_s = &delta_s[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE];
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Accumulator
Vector_type my_delta(0);
//Vector_type mAx[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++)
{
my_delta_s[WARP_SIZE * i + lane_id] = 0.0;
}
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += WARP_SIZE )
{
// Each thread loads a single element. If !is_active, a_col_end == 0.
int a_col_it = a_col_begin + lane_id;
// Get the ID of the column.
int a_col_tmp = -1, a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_tmp = A_cols[a_col_it];
}
// Make sure the column is interesting.
#ifdef AMGX_ILU_COLORING
int valid = false;
if ( a_col_tmp != -1 && current_color != 0 )
{
if ( boundary_coloring == LAST )
{
valid = a_col_tmp >= boundary_index;
}
else
{
valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color;
}
}
#else
int valid = false;
if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color )
{
valid = true;
}
#endif
// Set the column id.
if ( valid )
{
a_col_id = a_col_tmp;
}
// Loop over columns. We compute blocks_per_warp columns per iteration.
for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp )
{
// id of the processed block by this thread
int my_k = k + lane_id_div_N;
// Load N blocks of X (if valid)
int uniform_a_col_id = utils::shfl( a_col_id, my_k );
Vector_type my_x(0);
if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp)
{
my_x = Delta[N * uniform_a_col_id + lane_id_mod_N];
}
// Load blocks of A.
// for each block in a batch
#pragma unroll
for ( int i = 0 ; i < blocks_per_warp ; ++i )
{
// k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp
int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1;
// check if we are going out of bounds/color
if ( uniform_a_col_tmp < a_col_end )
{
uniform_a_col_it = uniform_a_col_tmp;
}
// swipe with the whole warp
if (uniform_a_col_it != -1)
{
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
Matrix_type my_val(0);
if ( uniform_a_col_it != -1 && block_inside_id < NxN)
{
my_val = A_vals[NxN * uniform_a_col_it + block_inside_id];
}
my_delta_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); //my_s_mem[N*i + block_inside_id % N]; // MOD IS SLOW!
block_inside_id += WARP_SIZE;
}
}
}
} // Loop over k
} // Loop over aColIt
// Load Einvs.
Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK];
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_Einv[j] = 0.0;
}
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
if ((WARP_SIZE * j + lane_id) < NxN)
{
my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id];
}
}
// Reduce bmAx terms.
{
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
if ( lane_id < N )
{
my_delta += my_delta_s[N * lane_id + i];
}
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
int block_inside_id = lane_id;
#pragma unroll
for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++)
{
my_delta_s[block_inside_id] = my_Einv[j] * utils::shfl(my_delta, block_inside_id % N);
block_inside_id += WARP_SIZE;
}
}
// Reduce bmAx terms.
{
my_delta = 0.0;
#pragma unroll
for ( int i = 0 ; i < N ; ++i )
{
if ( lane_id < N )
{
my_delta += my_delta_s[N * lane_id + i];
}
}
}
// Store the results.
if ( ROW_MAJOR )
{
const int offset = N * a_row_id + lane_id;
Vector_type my_b(0), my_x(0);
if ( lane_id < N )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( lane_id < N )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename IndexType, typename ValueTypeA, typename ValueTypeB, typename WeightType, int CTA_SIZE, bool ROW_MAJOR >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_backward_4x4_kernel( const IndexType *row_offsets,
const IndexType *column_indices,
const ValueTypeA *nonzero_values,
ValueTypeB *x,
const WeightType weight,
const int *sorted_rows_by_color,
const int *__restrict row_colors,
const ValueTypeA *Einv,
const ValueTypeB *delta,
ValueTypeB *Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const IndexType boundary_index)
{
const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA.
const int laneId = utils::lane_id();
const int halfWarpId = threadIdx.x / 16;
const int halfLaneId = threadIdx.x % 16;
const int halfLaneId_div_4 = halfLaneId / 4;
const int halfLaneId_mod_4 = halfLaneId % 4;
const int upperHalf = 16 * (laneId / 16);
// Shared memory needed to exchange X and delta.
__shared__ volatile ValueTypeB s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId];
// Iterate over the rows of the matrix. One warp per two rows.
for ( int aRowIt = blockIdx.x * nHalfWarps + halfWarpId ; aRowIt < num_rows_per_color ; aRowIt += gridDim.x * nHalfWarps )
{
int aRowId = sorted_rows_by_color[aRowIt];
// Load one block of B.
ValueTypeB my_delta(0);
// The range of the rows.
int aColBegin = row_offsets[aRowId ];
int aColEnd = row_offsets[aRowId + 1];
// Each warp load column indices of 16 nonzero blocks
for ( ; aColBegin < aColEnd ; aColBegin += 16 )
{
int aColIt = aColBegin + halfLaneId;
// Get the ID of the column.
int aColTmp = -1, aColId = -1;
if ( aColIt < aColEnd )
{
aColTmp = column_indices[aColIt];
}
#ifdef AMGX_ILU_COLORING
bool valid = (((aColTmp < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[aColTmp] > current_color)) || (aColTmp >= boundary_index && boundary_coloring == LAST));
if ( aColTmp != -1 && valid )
{
aColId = aColTmp;
}
#else
if ( aColTmp != -1 && row_colors[aColTmp] > current_color )
{
aColId = aColTmp;
}
#endif
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + halfLaneId_div_4;
// Exchange column indices.
int waColId = utils::shfl( aColId, upperHalf + my_k );
// Load 8 blocks of X if needed.
ValueTypeB my_x(0);
if ( waColId != -1 )
{
my_x = Delta[4 * waColId + halfLaneId_mod_4];
}
my_s_mem[halfLaneId] = my_x;
// Load 8 blocks of A.
#pragma unroll
for ( int i = 0 ; i < 4 ; ++i )
{
const int k_i = k + i;
int w_aColTmp = aColBegin + k_i, w_aColIt = -1;
if ( utils::shfl( aColId, upperHalf + k_i ) != -1 && w_aColTmp < aColEnd )
w_aColIt = w_aColTmp;
ValueTypeA my_val(0);
if ( w_aColIt != -1 )
{
my_val = nonzero_values[16 * w_aColIt + halfLaneId];
}
if ( ROW_MAJOR )
{
my_delta += my_val * my_s_mem[4 * i + halfLaneId_mod_4];
}
else
{
my_delta += my_val * my_s_mem[4 * i + halfLaneId_div_4];
}
}
} // Loop over k
} // Loop over aColIt
// Load EINV values.
ValueTypeA my_Einv = Einv[16 * aRowId + halfLaneId];
// Reduce delta terms.
if ( ROW_MAJOR )
{
my_delta += utils::shfl_xor( my_delta, 1 );
my_delta += utils::shfl_xor( my_delta, 2 );
}
else
{
my_delta += utils::shfl_xor( my_delta, 4 );
my_delta += utils::shfl_xor( my_delta, 8 );
}
// Update the shared terms.
if ( ROW_MAJOR )
{
if ( halfLaneId_mod_4 == 0 )
{
my_s_mem[halfLaneId_div_4] = my_delta;
}
}
else
{
if ( halfLaneId_div_4 == 0 )
{
my_s_mem[halfLaneId_mod_4] = my_delta;
}
}
// Update the diagonal term.
if ( ROW_MAJOR )
{
my_delta = my_Einv * my_s_mem[halfLaneId_mod_4];
}
else
{
my_delta = my_Einv * my_s_mem[halfLaneId_div_4];
}
// Regroup results.
if ( ROW_MAJOR )
{
my_delta += utils::shfl_xor( my_delta, 1 );
my_delta += utils::shfl_xor( my_delta, 2 );
}
else
{
my_delta += utils::shfl_xor( my_delta, 4 );
my_delta += utils::shfl_xor( my_delta, 8 );
}
// Store the results.
if ( ROW_MAJOR )
{
int offset = 4 * aRowId + halfLaneId_div_4;
ValueTypeB my_b(0), my_x(0);
if ( halfLaneId_mod_4 == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x[offset];
}
my_delta = my_b - my_delta;
if ( halfLaneId_mod_4 == 0 )
{
x[offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
else
{
int offset = 4 * aRowId + halfLaneId_mod_4;
ValueTypeB my_b(0), my_x(0);
if ( halfLaneId_div_4 == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x[offset];
}
my_delta = my_b - my_delta;
if ( halfLaneId_div_4 == 0 )
{
x[offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int CTA_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_backward_4x4_kernel_row_major_vec4( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of half warps per CTA.
const int NUM_HALF_WARPS = CTA_SIZE / 16;
// Coordinates of the thread.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Coordinates of the thread in the CTA.
const int thread_id_div_16 = threadIdx.x / 16;
const int thread_id_mod_16 = threadIdx.x % 16;
// Useful constants.
const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4;
const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4;
const int shfl_offset = 16 * (lane_id / 16);
// Shared memory needed to exchange X and delta.
__shared__ volatile Vector_type s_mem[CTA_SIZE];
// Each thread keeps its own pointer to shared memory to avoid some extra computations.
volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16];
// The iterator over rows.
int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16;
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS )
{
unsigned int active_mask = utils::activemask();
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_delta(0);
// Don't do anything if X is zero.
int a_col_begin = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( ; a_col_begin < a_col_end ; a_col_begin += 16 )
{
unsigned int active_mask_inner = utils::activemask();
int a_col_it = a_col_begin + thread_id_mod_16;
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = __cachingLoad(&A_cols[a_col_it]);
}
#ifdef AMGX_ILU_COLORING
int valid = false;
if ( a_col_id != -1 && current_color != 0 )
{
if ( boundary_coloring == LAST )
{
valid = a_col_id >= boundary_index;
}
else
{
valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) > current_color;
}
}
#else
int valid = false;
if ( a_col_id != -1 && row_colors[a_col_id] > current_color )
{
valid = true;
}
#endif
// Set the column id.
if ( !valid )
{
a_col_id = -1;
}
// Loop over columns. We compute 8 columns per iteration.
#pragma unroll 2
for ( int k = 0 ; k < 16 ; k += 4 )
{
int my_k = k + thread_id_mod_16_div_4;
// Load 8 blocks of X.
int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner );
Vector_type my_Delta(0);
if ( uniform_a_col_id != -1 )
{
my_Delta = Delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4];
}
my_s_mem[thread_id_mod_16] = my_Delta;
int uniform_a_col_it = a_col_begin + my_k;
if ( uniform_a_col_id == -1 || uniform_a_col_it >= a_col_end )
{
uniform_a_col_it = -1;
}
Matrix_type my_vals[4] = { Matrix_type(0) };
if ( uniform_a_col_it != -1 )
{
utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] );
}
my_delta += my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0];
my_delta += my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1];
my_delta += my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2];
my_delta += my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3];
} // Loop over k
} // Loop over aColIt
// Load EINV values.
Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16];
// Reduce delta terms.
my_delta += utils::shfl_xor( my_delta, 4, warpSize, active_mask );
my_delta += utils::shfl_xor( my_delta, 8, warpSize, active_mask );
// Update the shared terms.
if ( thread_id_mod_16_div_4 == 0 )
{
my_s_mem[thread_id_mod_16_mod_4] = my_delta;
}
// Update the diagonal term.
my_delta = my_Einv * my_s_mem[thread_id_mod_16_mod_4];
// Regroup results.
my_delta += utils::shfl_xor( my_delta, 1, warpSize, active_mask );
my_delta += utils::shfl_xor( my_delta, 2, warpSize, active_mask );
// Store the results.
int offset = 4 * a_row_id + thread_id_mod_16_div_4;
Vector_type my_b(0), my_x(0);
if ( thread_id_mod_16_mod_4 == 0 )
{
my_b = __cachingLoad(&delta[offset]);
my_x = x [offset];
}
my_delta = my_b - my_delta;
if ( thread_id_mod_16_mod_4 == 0 )
{
x [offset] = my_x + weight * my_delta;
Delta[offset] = my_delta;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 12 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 12 )
#endif
void DILU_backward_1x1_kernel( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index )
{
// Number of items per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// Number of items per grid.
const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA;
// The coordinates of the thread inside the CTA/warp.
const int warp_id = utils::warp_id();
const int lane_id = utils::lane_id();
// Constants.
const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW;
// Determine which NxN block the threads work with.
int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW);
// Iterate over the rows of the matrix. One warp per row.
for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID )
{
int a_row_id = sorted_rows_by_color[a_row_it];
// Load one block of B.
Vector_type my_delta(0);
// Don't do anything if X is zero.
int a_col_it = A_rows[a_row_id ];
int a_col_end = A_rows[a_row_id + 1];
// Each warp load column indices of 32 nonzero blocks
for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW )
{
// Get the ID of the column.
int a_col_id = -1;
if ( a_col_it < a_col_end )
{
a_col_id = A_cols[a_col_it];
}
// Is it really a valid column (due to coloring).
int valid = false;
#ifdef AMGX_ILU_COLORING
if ( a_col_id != -1 && current_color != 0 )
{
//if( boundary_coloring == LAST )
// valid = a_col_id >= boundary_index;
//else
// valid = a_col_id < boundary_index && row_colors[a_col_id] > current_color;
valid = (((a_col_id < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[a_col_id] > current_color)) || (a_col_id >= boundary_index && boundary_coloring == LAST));
}
#else
//if( a_col_id != -1 && current_color != 0 )
if ( a_col_id != -1 )
{
valid = row_colors[a_col_id] > current_color;
}
#endif
// Load my Delta value.
Vector_type my_Delta(0);
if ( valid )
{
my_Delta = Delta[a_col_id];
}
// Load my item from A.
Matrix_type my_val(0);
if ( valid )
{
my_val = A_vals[a_col_it];
}
// Update bmAx.
my_delta += my_val * my_Delta;
}
// Reduce bmAx terms.
#pragma unroll
for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 )
{
my_delta += utils::shfl_xor( my_delta, mask );
}
// Store the results.
if ( lane_id_mod_NTPR == 0 )
{
Vector_type my_x = __cachingLoad(&delta[a_row_id]) - Einv[a_row_id] * my_delta;
x [a_row_id] += weight * my_x;
Delta[a_row_id] = my_x;
}
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE >
__global__
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
__launch_bounds__( CTA_SIZE, 16 )
#elif defined(__CUDA_ARCH__)
__launch_bounds__( CTA_SIZE, 16 )
#endif
void DILU_backward_NxN_kernel_skip( Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color )
{
const int NUM_ITEMS_PER_CTA = CTA_SIZE / N; // Number of updated block items per CTA
const int ITEM_ID = threadIdx.x / N;
const int ITEM_BLOCK_OFFSET = threadIdx.x % N;
const int is_active = ITEM_ID < NUM_ITEMS_PER_CTA;
// The first row.
int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + ITEM_ID;
// Iterate over the rows of the matrix. One warp per two rows.
for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_ITEMS_PER_CTA )
{
if ( is_active )
{
int a_row_id = sorted_rows_by_color[a_row_it];
const int idx = N * a_row_id + ITEM_BLOCK_OFFSET;
Vector_type my_b = __cachingLoad(&delta[idx]);
Vector_type my_x = x[idx];
x[idx] = my_x + weight * my_b;
Delta[idx] = my_b;
}
}
}
// ----------
// Methods
// ----------
template< typename Matrix_type, typename Vector_type, int N >
void DILU_forward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major,
const int has_external_diag )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ROWS_PER_WARP = std::max(WARP_SIZE / NxN, 1);
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
int code = 2 * (row_major ? 1 : 0) + (has_external_diag ? 1 : 0);
switch ( code )
{
case 0: // Column-major, no external diagonal.
DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, false> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 1: // Column-major, external diagonal.
DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, true> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 2: // Row-major, no external diagonal.
DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, false> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 3: // Row-major, external diagonal.
DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, true> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
template< typename Matrix_type, typename Vector_type, int N >
void DILU_forward_NxN_dispatch_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major,
const int has_external_diag )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA;
// Each warp is going to sweep through bloock this many times
const int NUM_WARP_ITERS_PER_BLOCK = ((NxN - 1) / WARP_SIZE) + 1;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
if (!row_major)
{
FatalError("COL MAJOR is not supported for this large block_size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
switch ( has_external_diag )
{
case 0: // Row-major, no external diagonal.
DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, NUM_WARP_ITERS_PER_BLOCK> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
case 1: // Row-major, external diagonal.
DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type >
void DILU_forward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
const int *__restrict A_diag,
const Vector_type *x,
const Vector_type *b,
Vector_type *__restrict delta,
const int *__restrict sorted_rows_by_color,
const int num_rows_per_color,
const int current_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const ColoringType boundary_coloring,
const int boundary_index,
const int block_size,
const int row_major,
const int has_external_diag )
{
switch ( block_size )
{
case 1:
{
const int NUM_THREADS_PER_ROW = 8;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
if ( has_external_diag )
{
DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, true> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
}
else
{
DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, false> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
}
cudaCheckError();
}
break;
case 2:
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 2>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 3:
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 3>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 4:
if ( row_major )
{
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / 16;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
if ( has_external_diag )
//DILU_forward_4x4_kernel<Matrix_type, Vector_type, CTA_SIZE, WARP_SIZE, true, true><<<grid_size, CTA_SIZE>>>(
DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, true> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
else
DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, false> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index );
cudaCheckError();
}
else
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 4>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 5:
DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 5>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 8:
DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 8>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
case 10:
DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 10>(
A_rows,
A_cols,
A_vals,
A_diag,
x,
b,
delta,
sorted_rows_by_color,
num_rows_per_color,
current_color,
row_colors,
Einv,
boundary_coloring,
boundary_index,
row_major,
has_external_diag );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType, int N >
void DILU_backward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Squared N.
const int NxN = N * N;
// Number of items per warp.
const int NUM_ROWS_PER_WARP = std::max(WARP_SIZE / NxN, 1);
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
if ( row_major )
{
DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
}
else
{
DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, false> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
}
cudaCheckError();
}
template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int NUM_WARP_ITERS_PER_BLOCK >
void DILU_backward_NxN_dispatch_large( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index,
const int row_major )
{
const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
// Branch to the correct kernel call.
if ( row_major )
{
DILU_backward_NxN_kernel_large<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
}
else
{
FatalError("col major is not supported for this blocksize in multicolor DILU solver", AMGX_ERR_NOT_IMPLEMENTED);
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< typename Matrix_type, typename Vector_type, typename WeightType>
void DILU_backward_NxN_dispatch( const int *__restrict A_rows,
const int *__restrict A_cols,
const Matrix_type *__restrict A_vals,
Vector_type *__restrict x,
const WeightType weight,
const int *__restrict sorted_rows_by_color,
const int *__restrict row_colors,
const Matrix_type *__restrict Einv,
const Vector_type *delta,
Vector_type *__restrict Delta,
const int num_rows_per_color,
const int current_color,
const ColoringType boundary_coloring,
const int boundary_index,
const int block_size,
const int row_major )
{
switch ( block_size )
{
case 1:
{
const int NUM_THREADS_PER_ROW = 8;
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
DILU_backward_1x1_kernel<Matrix_type, Vector_type, WeightType, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
cudaCheckError();
}
break;
case 2:
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 2>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 3:
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 3>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 4:
//if( false )
if ( row_major )
{
// Number of items computer per CTA.
const int NUM_ROWS_PER_CTA = CTA_SIZE / 16;
// The number of threads to launch.
const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
//DILU_backward_NxN_kernel<Matrix_type, Vector_type, 4, CTA_SIZE, WARP_SIZE, true><<<grid_size, CTA_SIZE>>>(
DILU_backward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, WeightType, CTA_SIZE> <<< grid_size, CTA_SIZE>>>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index );
cudaCheckError();
}
else
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 4>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 5:
DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 5>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 8:
DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 8, 2>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
case 10:
DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 10, 4>(
A_rows,
A_cols,
A_vals,
x,
weight,
sorted_rows_by_color,
row_colors,
Einv,
delta,
Delta,
num_rows_per_color,
current_color,
boundary_coloring,
boundary_index,
row_major );
break;
default:
FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED );
}
cudaCheckError();
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< class T_Config >
MulticolorDILUSolver_Base<T_Config>::MulticolorDILUSolver_Base( AMG_Config &cfg,
const std::string &cfg_scope,
ThreadManager *tmng ) :
Solver<T_Config>( cfg, cfg_scope, tmng )
{
this->weight = cfg.AMG_Config::getParameter<double>("relaxation_factor", cfg_scope);
this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::getParameter<int>("reorder_cols_by_color", cfg_scope) != 0);
this->m_insert_diagonal_desired = (cfg.AMG_Config::getParameter<int>("insert_diag_while_reordering", cfg_scope) != 0);
this->m_boundary_coloring = cfg.AMG_Config::getParameter<ColoringType>("boundary_coloring", cfg_scope);
this->always_obey_coloring = 0;
if (weight == 0)
{
weight = 1.;
amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Multicolor DILU smoother\n");
}
}
// Destructor
template<class T_Config>
MulticolorDILUSolver_Base<T_Config>::~MulticolorDILUSolver_Base()
{
Einv.clear();
Einv.shrink_to_fit();
}
template<class T_Config>
void MulticolorDILUSolver_Base<T_Config>::computeEinv(Matrix<T_Config> &A)
{
ViewType oldView = A.currentView();
A.setView(this->m_explicit_A->getViewExterior());
if ( A.get_block_dimx() != A.get_block_dimy() )
{
FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if ( A.get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but...
{
FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
computeEinv_NxN( A, A.get_block_dimx() );
A.setView(oldView);
}
template< class T_Config >
void
MulticolorDILUSolver_Base<T_Config>::printSolverParameters() const
{
std::cout << "relaxation_factor = " << this->weight << std::endl;
}
// Solver setup
template< class T_Config >
void
MulticolorDILUSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure)
{
m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A);
if (!this->m_explicit_A)
{
FatalError("MulticolorDILUSolver only works with explicit matrices", AMGX_ERR_INTERNAL);
}
int N = this->m_explicit_A->get_num_cols() * this->m_explicit_A->get_block_dimy();
if (this->m_explicit_A->getColoringLevel() < 1)
{
FatalError("Matrix must be colored to use multicolor dilu solver. Try setting: coloring_level=1 in the configuration file", AMGX_ERR_NOT_IMPLEMENTED);
}
m_delta.resize(N);
m_Delta.resize(N);
m_delta.set_block_dimy(this->m_explicit_A->get_block_dimy());
m_Delta.set_block_dimy(this->m_explicit_A->get_block_dimy());
m_delta.set_block_dimx(1);
m_Delta.set_block_dimx(1);
if ( this->m_explicit_A->getBlockFormat() != ROW_MAJOR )
{
FatalError("Multicolor DILU solver only supports row major format for the blocks", AMGX_ERR_CONFIGURATION);
}
computeEinv( *this->m_explicit_A );
}
//
template< class T_Config >
void
MulticolorDILUSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero )
{
}
// Solve one iteration
template<class T_Config>
bool
MulticolorDILUSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero )
{
if ( this->m_explicit_A->get_block_dimx() != this->m_explicit_A->get_block_dimy() )
{
FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if ( this->m_explicit_A->get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but...
{
FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
if (xIsZero)
{
x.dirtybit = 0;
}
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_async(x, x.tag);
this->m_explicit_A->manager->exchange_halo_async(b, b.tag);
}
if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior())
{
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
this->m_explicit_A->manager->exchange_halo_wait(b, b.tag);
}
}
ViewType oldView = this->m_explicit_A->currentView();
ViewType flags;
bool latencyHiding = true;
if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0 && b.dirtybit == 0))
{
latencyHiding = false;
this->m_explicit_A->setViewExterior();
flags = (ViewType)(this->m_explicit_A->getViewExterior());
}
else
{
flags = (ViewType)(this->m_explicit_A->getViewInterior());
this->m_explicit_A->setViewInterior();
}
if (xIsZero)
{
thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero());
cudaCheckError();
}
this->smooth_NxN(*this->m_explicit_A, b, x, flags);
if (latencyHiding)
{
if (!this->m_explicit_A->is_matrix_singleGPU())
{
this->m_explicit_A->manager->exchange_halo_wait(x, x.tag);
this->m_explicit_A->manager->exchange_halo_wait(b, b.tag);
}
this->m_explicit_A->setViewExterior();
flags = (ViewType)(~(this->m_explicit_A->getViewInterior()) & this->m_explicit_A->getViewExterior());
if (flags != 0)
{
this->smooth_NxN(*this->m_explicit_A, b, x, flags);
}
}
x.dirtybit = 1;
this->m_explicit_A->setView(oldView);
return (this->converged(b, x));
}
template<class T_Config>
void
MulticolorDILUSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x )
{}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::computeEinv_NxN(const Matrix_h &A, const int bsize)
{
FatalError("Multicolor DILU smoother not implemented for host format, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::smooth_NxN( const Matrix_h &A, VVector &b, VVector &x, ViewType separation_flag )
{
FatalError("Haven't implemented Multicolor DILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::MulticolorDILUSolver(
AMG_Config &cfg,
const std::string &cfg_scope,
ThreadManager *tmng ) :
MulticolorDILUSolver_Base<TemplateConfig<AMGX_device, V, M, I> >( cfg, cfg_scope, tmng )
{
int device = 0;
cudaGetDevice( &device );
cudaDeviceProp properties;
cudaGetDeviceProperties( &properties, device );
m_is_kepler = properties.major >= 3;
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::computeEinv_NxN(const Matrix_d &A, const int bsize)
{
const int bsize_sq = bsize * bsize;
this->Einv.resize( A.get_num_cols()*bsize_sq, 0.0 );
// sol::prof_start();
for ( int i = 0, num_colors = A.getMatrixColoring().getNumColors() ; i < num_colors ; ++i )
{
const int color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i];
const int num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset;
if ( num_rows_per_color == 0 )
{
continue;
}
const int CTA_SIZE = 128;
const int NUM_WARPS_PER_CTA = CTA_SIZE / 32;
int ROWS_PER_WARP = 1;
if ( bsize_sq > 1 && bsize_sq < 6 )
{
ROWS_PER_WARP = 32 / bsize_sq;
}
const int ROWS_PER_CTA = ROWS_PER_WARP * NUM_WARPS_PER_CTA;
const int GRID_SIZE = std::min( 4096, (num_rows_per_color + ROWS_PER_CTA - 1) / ROWS_PER_CTA );
cudaStream_t stream = thrust::global_thread_handle::get_stream();
switch ( bsize )
{
case 1:
DILU_setup_1x1_kernel<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 2:
DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 2, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 3:
DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 3, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 4:
DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 4, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 5:
DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 5, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 8:
DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32, 2> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
case 10:
DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 10, CTA_SIZE, 32, 4> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>(
A.row_offsets.raw(),
A.col_indices.raw(),
A.diag.raw(),
A.values.raw(),
this->Einv.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
num_rows_per_color,
i );
break;
default:
FatalError( "Multicolor-DILU Setup: block size was not enabled in the code, contact AMGX developers.", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE );
}
cudaCheckError();
}
}
template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I >
void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::smooth_NxN( const Matrix_d &A, VVector &b, VVector &x, ViewType separation_flag )
{
AMGX_CPU_PROFILER( "MulticolorDILUSolver::smooth_NxN " );
int offset = 0, separation = 0;
A.getOffsetAndSizeForView(INTERIOR, &offset, &separation);
// Only have separation=num interior rows if we are only working on the interior
// and the boundary coloring is FIRST or LAST, otherwise set separation offset to
// total number of rows
if ( separation_flag != this->m_explicit_A->getViewInterior() ||
this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior() ||
this->m_boundary_coloring != LAST && this->m_boundary_coloring != FIRST )
{
separation = A.row_offsets.size() - 1;
}
else
{
amgx_printf("separation active\n");
}
// --------------------
// Forward Sweep
// --------------------
const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors();
for ( int i = 0 ; i < num_colors ; ++i )
{
int color_offset(0);
if ( separation_flag & INTERIOR )
{
color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i];
}
else
{
color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
int num_rows_per_color(0);
if ( separation_flag == this->m_explicit_A->getViewInterior() )
{
num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
else
{
num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1];
}
num_rows_per_color -= color_offset;
if ( num_rows_per_color == 0 )
{
continue;
}
int boundary_index = separation;
if ( this->m_boundary_coloring == SYNC_COLORS )
{
boundary_index = A.get_num_rows();
}
DILU_forward_NxN_dispatch(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
A.diag.raw(),
x.raw(),
b.raw(),
this->m_delta.raw(),
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
num_rows_per_color,
i,
A.getMatrixColoring().getRowColors().raw(),
this->Einv.raw(),
this->m_boundary_coloring,
boundary_index,
A.get_block_dimy(),
A.getBlockFormat() == ROW_MAJOR,
A.hasProps(DIAG) );
cudaCheckError();
}
// --------------------
// Backward Sweep
// --------------------
for ( int i = num_colors - 1 ; i >= 0 ; --i )
{
int color_offset(0);
if ( separation_flag & INTERIOR )
{
color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i];
}
else
{
color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
int num_rows_per_color(0);
if ( separation_flag == this->m_explicit_A->getViewInterior() )
{
num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i];
}
else
{
num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1];
}
num_rows_per_color -= color_offset;
if ( num_rows_per_color == 0 )
{
continue;
}
if ( i == num_colors - 1 )
{
const int NUM_ROWS_PER_CTA = CTA_SIZE / A.get_block_dimy();
const int GRID_SIZE = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA );
switch ( A.get_block_dimy() )
{
case 1:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 1, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 2:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 2, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 3:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 3, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 4:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 4, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 5:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 5, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 8:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 8, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
case 10:
DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 10, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>(
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color );
break;
}
cudaCheckError();
}
else
{
DILU_backward_NxN_dispatch(
A.row_offsets.raw(),
A.col_indices.raw(),
A.values.raw(),
x.raw(),
this->weight,
A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset,
A.getMatrixColoring().getRowColors().raw(),
this->Einv.raw(),
this->m_delta.raw(),
this->m_Delta.raw(),
num_rows_per_color,
i,
this->m_boundary_coloring,
separation,
A.get_block_dimy(),
A.getBlockFormat() == ROW_MAJOR );
cudaCheckError();
}
}
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
// AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
// AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
}
} // namespace amgx
|
07ada58355a5e0d971b032a05d67c655c7fe1178.hip | // !!! This is a file automatically generated by hipify!!!
// modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_sgd_kernel.cu
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/Exceptions.h>
#include "multi_tensor_apply.cuh"
#include "compat.h"
#include <assert.h>
#include <hip/hip_runtime.h>
#define BLOCK_SIZE 512
#define ILP 4
/**
* Perform fused SGD on multiple buffers
* N: number of tensors
* tl[0] : gradients
* tl[1] : weights
* tl[2] : momentum buffers
* tl[3] : fp16 weights (if appropriate)
* wd : weight_decay (scalar)
* momentum : momentum (scalar)
* dampening : momentum dampening (scalar)
* lr : learning rate (scalar)
* nesterov : enable nesterov (bool)
* first run : necessary for proper momentum handling & init
* wd_after_momentum : apply weight decay _after_ momentum instead of before
**/
template <int N, typename T_grad, typename T_weight>
struct SGDFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int *noop_gmem,
TensorListMetadata<N> &tl,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
// Early exit if we don't need to do anything
if (*noop_gmem)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T_grad *grad_in = (T_grad *)tl.addresses[0][tensor_loc];
grad_in += chunk_idx * chunk_size;
T_weight *weight_in = (T_weight *)tl.addresses[1][tensor_loc];
weight_in += chunk_idx * chunk_size;
T_weight *mom_in = (T_weight *)tl.addresses[2][tensor_loc];
mom_in += chunk_idx * chunk_size;
at::Half *model_weights_out = nullptr;
if (N == 4)
{
model_weights_out = (at::Half *)tl.addresses[3][tensor_loc];
model_weights_out += chunk_idx * chunk_size;
}
n -= chunk_idx * chunk_size;
// Non-divergent exit condition for the __syncthreads
float incoming_grads[ILP];
float incoming_weights[ILP];
float incoming_moms[ILP];
for (int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x * ILP)
{
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
incoming_grads[ii] = 0;
incoming_weights[ii] = 0;
incoming_moms[ii] = 0;
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
incoming_grads[ii] = static_cast<float>(grad_in[i]) * scale;
incoming_weights[ii] = static_cast<float>(weight_in[i]);
incoming_moms[ii] = static_cast<float>(mom_in[i]);
}
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
// apply weight decay before momentum if necessary
if (wd != 0.f && !wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
if (momentum != 0.f)
{
if (!first_run)
incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii];
else // initialize momentums to current incoming grads
incoming_moms[ii] = incoming_grads[ii];
if (nesterov)
incoming_grads[ii] += momentum * incoming_moms[ii];
else
incoming_grads[ii] = incoming_moms[ii];
}
// Apply WD after momentum if desired
if (wd != 0.f && wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
// adjust the weight and write out
weight_in[i] += (-lr * incoming_grads[ii]);
// if necessary, write out an fp16 copy of the weights
if (N == 4)
model_weights_out[i] = static_cast<at::Half>(weight_in[i]);
// also write out the new momentum
if (momentum != 0.f)
mom_in[i] = incoming_moms[ii];
}
}
}
}
};
void multi_tensor_sgd_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
auto num_tensors = tensor_lists.size();
auto grad_type = tensor_lists[0][0].scalar_type();
auto weight_type = tensor_lists[1][0].scalar_type();
if (num_tensors == 4)
for (int i = 0; i < tensor_lists[3].size(); i++)
TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half,
"Additional output tensors should always be fp16.");
TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors");
// We have 3 possibilities to handle here, in terms of
// grad_type, param_type, momentum_type, requires_fp16_copy
// 1. fp16, fp16, fp16, No
// 2. fp32, fp32, fp32, No
// 3. fp16, fp32, fp32, Yes
// 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case
// It's easier to hardcode these possibilities than to use
// switches etc. to handle the cross-product of cases where
// we don't want the majority of them.
// Case 1. fp16, fp16, fp16, No
if (grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Half &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, at::Half, at::Half>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 2. fp16, fp32, fp32, No
// else if (grad_type == at::ScalarType::Half &&
// weight_type == at::ScalarType::Float &&
// num_tensors == 3) {
// multi_tensor_apply<3>(
// BLOCK_SIZE,
// chunk_size,
// noop_flag,
// tensor_lists,
// SGDFunctor<3, at::Half, float>(),
// wd,
// momentum,
// dampening,
// lr,
// nesterov,
// first_run,
// wd_after_momentum);
// }
// Case 2. fp32, fp32, fp32, No
else if (grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 3. fp16, fp32, fp32, Yes
else if (grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, at::Half, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 4. fp32, fp32, fp32, Yes
else if (grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
else
{
AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ",
"gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors);
}
AT_CUDA_CHECK(hipGetLastError());
} | 07ada58355a5e0d971b032a05d67c655c7fe1178.cu | // modified from https://github.com/NVIDIA/apex/blob/master/csrc/multi_tensor_sgd_kernel.cu
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
#include "multi_tensor_apply.cuh"
#include "compat.h"
#include <assert.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 512
#define ILP 4
/**
* Perform fused SGD on multiple buffers
* N: number of tensors
* tl[0] : gradients
* tl[1] : weights
* tl[2] : momentum buffers
* tl[3] : fp16 weights (if appropriate)
* wd : weight_decay (scalar)
* momentum : momentum (scalar)
* dampening : momentum dampening (scalar)
* lr : learning rate (scalar)
* nesterov : enable nesterov (bool)
* first run : necessary for proper momentum handling & init
* wd_after_momentum : apply weight decay _after_ momentum instead of before
**/
template <int N, typename T_grad, typename T_weight>
struct SGDFunctor
{
__device__ __forceinline__ void operator()(
int chunk_size,
volatile int *noop_gmem,
TensorListMetadata<N> &tl,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
// Early exit if we don't need to do anything
if (*noop_gmem)
return;
int tensor_loc = tl.block_to_tensor[blockIdx.x];
int chunk_idx = tl.block_to_chunk[blockIdx.x];
int n = tl.sizes[tensor_loc];
T_grad *grad_in = (T_grad *)tl.addresses[0][tensor_loc];
grad_in += chunk_idx * chunk_size;
T_weight *weight_in = (T_weight *)tl.addresses[1][tensor_loc];
weight_in += chunk_idx * chunk_size;
T_weight *mom_in = (T_weight *)tl.addresses[2][tensor_loc];
mom_in += chunk_idx * chunk_size;
at::Half *model_weights_out = nullptr;
if (N == 4)
{
model_weights_out = (at::Half *)tl.addresses[3][tensor_loc];
model_weights_out += chunk_idx * chunk_size;
}
n -= chunk_idx * chunk_size;
// Non-divergent exit condition for the __syncthreads
float incoming_grads[ILP];
float incoming_weights[ILP];
float incoming_moms[ILP];
for (int i_start = 0;
i_start < n && i_start < chunk_size;
i_start += blockDim.x * ILP)
{
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
incoming_grads[ii] = 0;
incoming_weights[ii] = 0;
incoming_moms[ii] = 0;
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
incoming_grads[ii] = static_cast<float>(grad_in[i]) * scale;
incoming_weights[ii] = static_cast<float>(weight_in[i]);
incoming_moms[ii] = static_cast<float>(mom_in[i]);
}
}
// note for clarification to future michael:
// From a pure memory dependency perspective, there's likely no point unrolling
// the write loop, since writes just fire off once their LDGs arrive.
// Put another way, the STGs are dependent on the LDGs, but not on each other.
// There is still compute ILP benefit from unrolling the loop though.
#pragma unroll
for (int ii = 0; ii < ILP; ii++)
{
int i = i_start + threadIdx.x + ii * blockDim.x;
if (i < n && i < chunk_size)
{
// apply weight decay before momentum if necessary
if (wd != 0.f && !wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
if (momentum != 0.f)
{
if (!first_run)
incoming_moms[ii] = incoming_moms[ii] * momentum + (1.f - dampening) * incoming_grads[ii];
else // initialize momentums to current incoming grads
incoming_moms[ii] = incoming_grads[ii];
if (nesterov)
incoming_grads[ii] += momentum * incoming_moms[ii];
else
incoming_grads[ii] = incoming_moms[ii];
}
// Apply WD after momentum if desired
if (wd != 0.f && wd_after_momentum)
incoming_grads[ii] += wd * incoming_weights[ii];
// adjust the weight and write out
weight_in[i] += (-lr * incoming_grads[ii]);
// if necessary, write out an fp16 copy of the weights
if (N == 4)
model_weights_out[i] = static_cast<at::Half>(weight_in[i]);
// also write out the new momentum
if (momentum != 0.f)
mom_in[i] = incoming_moms[ii];
}
}
}
}
};
void multi_tensor_sgd_cuda(
int chunk_size,
at::Tensor noop_flag,
std::vector<std::vector<at::Tensor>> tensor_lists,
float wd,
float momentum,
float dampening,
float lr,
bool nesterov,
bool first_run,
bool wd_after_momentum,
float scale)
{
auto num_tensors = tensor_lists.size();
auto grad_type = tensor_lists[0][0].scalar_type();
auto weight_type = tensor_lists[1][0].scalar_type();
if (num_tensors == 4)
for (int i = 0; i < tensor_lists[3].size(); i++)
TORCH_CHECK(tensor_lists[3][i].scalar_type() == at::ScalarType::Half,
"Additional output tensors should always be fp16.");
TORCH_CHECK(noop_flag.device() == tensor_lists[0][0].device(), "expected noop flag to be on the same device as tensors");
// We have 3 possibilities to handle here, in terms of
// grad_type, param_type, momentum_type, requires_fp16_copy
// 1. fp16, fp16, fp16, No
// 2. fp32, fp32, fp32, No
// 3. fp16, fp32, fp32, Yes
// 4. fp32, fp32, fp32, Yes // this is the materialize_master_grads=True case
// It's easier to hardcode these possibilities than to use
// switches etc. to handle the cross-product of cases where
// we don't want the majority of them.
// Case 1. fp16, fp16, fp16, No
if (grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Half &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, at::Half, at::Half>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 2. fp16, fp32, fp32, No
// else if (grad_type == at::ScalarType::Half &&
// weight_type == at::ScalarType::Float &&
// num_tensors == 3) {
// multi_tensor_apply<3>(
// BLOCK_SIZE,
// chunk_size,
// noop_flag,
// tensor_lists,
// SGDFunctor<3, at::Half, float>(),
// wd,
// momentum,
// dampening,
// lr,
// nesterov,
// first_run,
// wd_after_momentum);
// }
// Case 2. fp32, fp32, fp32, No
else if (grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 3)
{
multi_tensor_apply<3>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<3, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 3. fp16, fp32, fp32, Yes
else if (grad_type == at::ScalarType::Half &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, at::Half, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
// Case 4. fp32, fp32, fp32, Yes
else if (grad_type == at::ScalarType::Float &&
weight_type == at::ScalarType::Float &&
num_tensors == 4)
{
multi_tensor_apply<4>(
BLOCK_SIZE,
chunk_size,
noop_flag,
tensor_lists,
SGDFunctor<4, float, float>(),
wd,
momentum,
dampening,
lr,
nesterov,
first_run,
wd_after_momentum,
scale);
}
else
{
AT_ERROR("multi_tensor_sgd only supports some combinations of gradient & weight types. Given: ",
"gradient: ", grad_type, ", weight: ", weight_type, ", num_lists: ", num_tensors);
}
AT_CUDA_CHECK(cudaGetLastError());
} |
b2ab7d91202c665f38afb8c347e782fe8bf5dc49.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <GL/glew.h>
#include <cuda_gl_interop.h>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <vector>
#include "Box.hh"
#include "Geometry.hh"
#include "Mat.hh"
#include "Material.hh"
#include "Sphere.hh"
#include "Tri.hh"
#include "UniformGrid.hh"
#include "Vec3.hh"
#include "cuda_render.cuh"
#include "raytracing.hh"
#include "transform.hh"
#include "util.hh"
/**
* @brief Initializes CUDA resources.
* @detail Called once upon program start. Registers GL texture and buffer
* for CUDA/GL interop; creates stream and maps buffer and texture to stream.
*
* @param texture_id ID of the GL texture
* @param buffer_id ID of the GL buffer
*/
void cuda_init(GLuint texture_id, GLuint buffer_id, GLuint display_buffer_id) {
// register GL buffer and texture as CUDA resources
hipGraphicsGLRegisterBuffer(&cuda_buffer, buffer_id,
hipGraphicsRegisterFlagsNone);
hipGraphicsGLRegisterBuffer(&cuda_display_buffer, display_buffer_id,
hipGraphicsRegisterFlagsNone);
hipGraphicsGLRegisterImage(&cuda_texture, texture_id, GL_TEXTURE_2D,
hipGraphicsRegisterFlagsNone);
// create CUDA stream
hipStreamCreate(&cuda_stream);
// map resources
hipGraphicsMapResources(1, &cuda_buffer, cuda_stream);
hipGraphicsMapResources(1, &cuda_display_buffer, cuda_stream);
hipGraphicsMapResources(1, &cuda_texture, cuda_stream);
}
void cuda_render(size_t w, size_t h, const Mat4f &camera, Geometry *geom,
size_t geom_len, unsigned iteration, bool accel) {
using namespace std;
const size_t size_pixels = w * h;
float *buf_ptr;
float *display_buf_ptr;
size_t size_mapped;
hipGraphicsResourceGetMappedPointer((void **)&buf_ptr, &size_mapped,
cuda_buffer);
hipGraphicsResourceGetMappedPointer((void **)&display_buf_ptr,
&size_mapped, cuda_display_buffer);
// assert(size_mapped == size_pixels * 4 * sizeof(float)); // RGBA32F
// construct uniform grid
AABB bounds = geometry_bounds(geom, geom + geom_len);
Int3 res = UniformGrid::resolution(bounds, geom_len);
size_t n_data = UniformGrid::data_size(res);
size_t n_pairs =
UniformGrid::count_pairs(res, bounds, geom, geom + geom_len);
ugrid_data_t *grid_data;
ugrid_pair_t *grid_pairs;
hipMallocManaged(&grid_data, n_data * sizeof(ugrid_data_t));
hipMallocManaged(&grid_pairs, n_pairs * sizeof(ugrid_pair_t));
hipDeviceSynchronize();
UniformGrid grid(res, bounds, grid_data, grid_pairs, n_pairs, geom,
geom + geom_len);
// run kernel
CUDAKernelArgs args = {w, h, camera, bounds, grid,
accel, iteration, buf_ptr, display_buf_ptr};
const int num_blocks = (size_pixels + BLOCK_SIZE - 1) / BLOCK_SIZE;
hipLaunchKernelGGL(( cuda_render_kernel), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, args);
hipLaunchKernelGGL(( cuda_tonemap_kernel), dim3(num_blocks), dim3(BLOCK_SIZE), 0, 0, args);
hipDeviceSynchronize();
hipFree(grid_data);
hipFree(grid_pairs);
}
/**
* @brief Destroy resource
*
*/
void cuda_destroy() {
// unmap resources
hipGraphicsUnmapResources(1, &cuda_buffer, cuda_stream);
hipGraphicsUnmapResources(1, &cuda_texture, cuda_stream);
hipStreamDestroy(cuda_stream);
}
/**
* @brief Path tracing kernel
* @param args current state
*/
__global__ void cuda_render_kernel(CUDAKernelArgs args) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
float inv_w = 1 / float(args.w);
float inv_h = 1 / float(args.h);
float fov = 30;
float aspect_ratio = float(args.w) / float(args.h);
float angle = tan(0.5 * M_PI * fov / 180.0);
Mat4f dir_camera = transform_clear_translate(args.camera);
Float3 origin = args.camera * Float3();
const size_t len = args.w * args.h;
for (size_t i = index; i < len; i += stride) {
const size_t idx = i * 4;
const size_t x = i % args.w;
const size_t y = i / args.w;
Float3 color;
for (size_t i = 0; i < PRIMARY_RAYS; ++i) {
float v_x = (2 * ((x + util::randf(0, 1)) * inv_w) - 1) * angle *
aspect_ratio;
float v_y = (1 - 2 * ((y + util::randf(0, 1)) * inv_h)) * angle;
Float3 ray_dir = dir_camera * Float3(v_x, v_y, -1);
ray_dir.normalize();
color += raytracing::trace(origin, ray_dir, args.bounds, args.grid,
args.accel, 8);
}
color *= 1.f / PRIMARY_RAYS;
// compute all-time average color
Float3 dst = Float3(args.pixels[idx], args.pixels[idx + 1],
args.pixels[idx + 2]);
float f = 1;
if (args.iteration > 0)
f = 1.f / args.iteration;
Float3 blended = color * f + dst * (1 - f);
// write color
args.pixels[idx] = blended.x;
args.pixels[idx + 1] = blended.y;
args.pixels[idx + 2] = blended.z;
args.pixels[idx + 3] = 1; // alpha
}
}
/**
* @brief Tone mapping kernel
* @param args current state
*/
__global__ void cuda_tonemap_kernel(CUDAKernelArgs args) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
const size_t len = args.w * args.h;
for (size_t i = index; i < len; i += stride) {
const size_t idx = i * 4;
Float3 hdr = Float3(args.pixels[idx], args.pixels[idx + 1],
args.pixels[idx + 2]);
Float3 ldr = raytracing::tonemap(hdr);
args.display_pixels[idx] = ldr.x;
args.display_pixels[idx + 1] = ldr.y;
args.display_pixels[idx + 2] = ldr.z;
}
} | b2ab7d91202c665f38afb8c347e782fe8bf5dc49.cu | #include <GL/glew.h>
#include <cuda_gl_interop.h>
#include <algorithm>
#include <cassert>
#include <cmath>
#include <iostream>
#include <memory>
#include <stdexcept>
#include <vector>
#include "Box.hh"
#include "Geometry.hh"
#include "Mat.hh"
#include "Material.hh"
#include "Sphere.hh"
#include "Tri.hh"
#include "UniformGrid.hh"
#include "Vec3.hh"
#include "cuda_render.cuh"
#include "raytracing.hh"
#include "transform.hh"
#include "util.hh"
/**
* @brief Initializes CUDA resources.
* @detail Called once upon program start. Registers GL texture and buffer
* for CUDA/GL interop; creates stream and maps buffer and texture to stream.
*
* @param texture_id ID of the GL texture
* @param buffer_id ID of the GL buffer
*/
void cuda_init(GLuint texture_id, GLuint buffer_id, GLuint display_buffer_id) {
// register GL buffer and texture as CUDA resources
cudaGraphicsGLRegisterBuffer(&cuda_buffer, buffer_id,
cudaGraphicsRegisterFlagsNone);
cudaGraphicsGLRegisterBuffer(&cuda_display_buffer, display_buffer_id,
cudaGraphicsRegisterFlagsNone);
cudaGraphicsGLRegisterImage(&cuda_texture, texture_id, GL_TEXTURE_2D,
cudaGraphicsRegisterFlagsNone);
// create CUDA stream
cudaStreamCreate(&cuda_stream);
// map resources
cudaGraphicsMapResources(1, &cuda_buffer, cuda_stream);
cudaGraphicsMapResources(1, &cuda_display_buffer, cuda_stream);
cudaGraphicsMapResources(1, &cuda_texture, cuda_stream);
}
void cuda_render(size_t w, size_t h, const Mat4f &camera, Geometry *geom,
size_t geom_len, unsigned iteration, bool accel) {
using namespace std;
const size_t size_pixels = w * h;
float *buf_ptr;
float *display_buf_ptr;
size_t size_mapped;
cudaGraphicsResourceGetMappedPointer((void **)&buf_ptr, &size_mapped,
cuda_buffer);
cudaGraphicsResourceGetMappedPointer((void **)&display_buf_ptr,
&size_mapped, cuda_display_buffer);
// assert(size_mapped == size_pixels * 4 * sizeof(float)); // RGBA32F
// construct uniform grid
AABB bounds = geometry_bounds(geom, geom + geom_len);
Int3 res = UniformGrid::resolution(bounds, geom_len);
size_t n_data = UniformGrid::data_size(res);
size_t n_pairs =
UniformGrid::count_pairs(res, bounds, geom, geom + geom_len);
ugrid_data_t *grid_data;
ugrid_pair_t *grid_pairs;
cudaMallocManaged(&grid_data, n_data * sizeof(ugrid_data_t));
cudaMallocManaged(&grid_pairs, n_pairs * sizeof(ugrid_pair_t));
cudaDeviceSynchronize();
UniformGrid grid(res, bounds, grid_data, grid_pairs, n_pairs, geom,
geom + geom_len);
// run kernel
CUDAKernelArgs args = {w, h, camera, bounds, grid,
accel, iteration, buf_ptr, display_buf_ptr};
const int num_blocks = (size_pixels + BLOCK_SIZE - 1) / BLOCK_SIZE;
cuda_render_kernel<<<num_blocks, BLOCK_SIZE>>>(args);
cuda_tonemap_kernel<<<num_blocks, BLOCK_SIZE>>>(args);
cudaDeviceSynchronize();
cudaFree(grid_data);
cudaFree(grid_pairs);
}
/**
* @brief Destroy resource
*
*/
void cuda_destroy() {
// unmap resources
cudaGraphicsUnmapResources(1, &cuda_buffer, cuda_stream);
cudaGraphicsUnmapResources(1, &cuda_texture, cuda_stream);
cudaStreamDestroy(cuda_stream);
}
/**
* @brief Path tracing kernel
* @param args current state
*/
__global__ void cuda_render_kernel(CUDAKernelArgs args) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
float inv_w = 1 / float(args.w);
float inv_h = 1 / float(args.h);
float fov = 30;
float aspect_ratio = float(args.w) / float(args.h);
float angle = tan(0.5 * M_PI * fov / 180.0);
Mat4f dir_camera = transform_clear_translate(args.camera);
Float3 origin = args.camera * Float3();
const size_t len = args.w * args.h;
for (size_t i = index; i < len; i += stride) {
const size_t idx = i * 4;
const size_t x = i % args.w;
const size_t y = i / args.w;
Float3 color;
for (size_t i = 0; i < PRIMARY_RAYS; ++i) {
float v_x = (2 * ((x + util::randf(0, 1)) * inv_w) - 1) * angle *
aspect_ratio;
float v_y = (1 - 2 * ((y + util::randf(0, 1)) * inv_h)) * angle;
Float3 ray_dir = dir_camera * Float3(v_x, v_y, -1);
ray_dir.normalize();
color += raytracing::trace(origin, ray_dir, args.bounds, args.grid,
args.accel, 8);
}
color *= 1.f / PRIMARY_RAYS;
// compute all-time average color
Float3 dst = Float3(args.pixels[idx], args.pixels[idx + 1],
args.pixels[idx + 2]);
float f = 1;
if (args.iteration > 0)
f = 1.f / args.iteration;
Float3 blended = color * f + dst * (1 - f);
// write color
args.pixels[idx] = blended.x;
args.pixels[idx + 1] = blended.y;
args.pixels[idx + 2] = blended.z;
args.pixels[idx + 3] = 1; // alpha
}
}
/**
* @brief Tone mapping kernel
* @param args current state
*/
__global__ void cuda_tonemap_kernel(CUDAKernelArgs args) {
const size_t index = blockIdx.x * blockDim.x + threadIdx.x;
const size_t stride = blockDim.x * gridDim.x;
const size_t len = args.w * args.h;
for (size_t i = index; i < len; i += stride) {
const size_t idx = i * 4;
Float3 hdr = Float3(args.pixels[idx], args.pixels[idx + 1],
args.pixels[idx + 2]);
Float3 ldr = raytracing::tonemap(hdr);
args.display_pixels[idx] = ldr.x;
args.display_pixels[idx + 1] = ldr.y;
args.display_pixels[idx + 2] = ldr.z;
}
} |
dddf9e1fb070a4ba565b74317c78398b6cd6d3cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// Do not time memory allocation
int* dev_odata;
hipMalloc((void**)&dev_odata, n * sizeof(int));
int* dev_idata;
hipMalloc((void**)&dev_idata, n * sizeof(int));
hipMemcpy(dev_idata, idata, sizeof(int) * n, hipMemcpyHostToDevice);
thrust::device_ptr<int> dv_out(dev_odata);
thrust::device_ptr<int> dv_in(dev_idata);
// Time everything else
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in, dv_in + n, dv_out);
timer().endGpuTimer();
// Get the return value off of the device and free memory.
hipMemcpy(odata, dev_odata, sizeof(int) * n, hipMemcpyDeviceToHost);
hipFree(dev_odata);
hipFree(dev_idata);
}
}
}
| dddf9e1fb070a4ba565b74317c78398b6cd6d3cf.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// Do not time memory allocation
int* dev_odata;
cudaMalloc((void**)&dev_odata, n * sizeof(int));
int* dev_idata;
cudaMalloc((void**)&dev_idata, n * sizeof(int));
cudaMemcpy(dev_idata, idata, sizeof(int) * n, cudaMemcpyHostToDevice);
thrust::device_ptr<int> dv_out(dev_odata);
thrust::device_ptr<int> dv_in(dev_idata);
// Time everything else
timer().startGpuTimer();
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::exclusive_scan(dv_in, dv_in + n, dv_out);
timer().endGpuTimer();
// Get the return value off of the device and free memory.
cudaMemcpy(odata, dev_odata, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_odata);
cudaFree(dev_idata);
}
}
}
|
7e7ea2e123d60a83d14b248607c6531293b6eef6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
int32_t low = -1, high = M;
while (high - low > 1) {
int32_t median = (high + low) / 2;
if (bounds[median] < X[i]) {
low = median;
} else {
high = median;
}
}
out[i] = high;
}
}
template <>
bool BucketizeOp<CUDAContext>::RunOnDevice() {
auto& input = Input(X);
CAFFE_ENFORCE_GE(input.dim(), 1);
auto N = input.numel();
auto* output = Output(INDICES, input.sizes(), at::dtype<int32_t>());
const auto* input_data = input.template data<float>();
auto* output_data = output->template mutable_data<int32_t>();
hipLaunchKernelGGL(( BucketizeOpKernel),
dim3(CAFFE_GET_BLOCKS(N)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
N,
boundaries_device_.numel(),
boundaries_device_.data<float>(),
input_data,
output_data);
return true;
};
REGISTER_CUDA_OPERATOR(Bucketize, BucketizeOp<CUDAContext>);
} // namespace caffe2
| 7e7ea2e123d60a83d14b248607c6531293b6eef6.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/bucketize_op.h"
#include <thrust/binary_search.h>
#include <thrust/device_vector.h>
namespace caffe2 {
__global__ void BucketizeOpKernel(
const int N,
const int M,
const float* bounds,
const float* X,
int32_t* out) {
CUDA_1D_KERNEL_LOOP(i, N) {
int32_t low = -1, high = M;
while (high - low > 1) {
int32_t median = (high + low) / 2;
if (bounds[median] < X[i]) {
low = median;
} else {
high = median;
}
}
out[i] = high;
}
}
template <>
bool BucketizeOp<CUDAContext>::RunOnDevice() {
auto& input = Input(X);
CAFFE_ENFORCE_GE(input.dim(), 1);
auto N = input.numel();
auto* output = Output(INDICES, input.sizes(), at::dtype<int32_t>());
const auto* input_data = input.template data<float>();
auto* output_data = output->template mutable_data<int32_t>();
BucketizeOpKernel<<<
CAFFE_GET_BLOCKS(N),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
N,
boundaries_device_.numel(),
boundaries_device_.data<float>(),
input_data,
output_data);
return true;
};
REGISTER_CUDA_OPERATOR(Bucketize, BucketizeOp<CUDAContext>);
} // namespace caffe2
|
8f7908a3eaace35d084e9b9cc10b933af2ec6af7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <gtest/gtest.h>
#include <mtensor.hpp>
using namespace matazure;
using namespace testing;
__global__ void test_kernel() {}
TEST(CudaExecutionTest, ExecutionPolicy) {
{
cuda::execution_policy policy;
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
}
TEST(CudaExecutionTest, DefaultExecutionPolicy) {
{
cuda::default_execution_policy policy;
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
}
TEST(CudaExecutionTests, ForIndexExecutionPolicy) {
{
cuda::for_index_execution_policy policy;
policy.total_size(64);
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
{
cuda::for_index_execution_policy policy;
policy.total_size(128);
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
{
cuda::for_index_execution_policy policy;
policy.total_size(0);
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
}
| 8f7908a3eaace35d084e9b9cc10b933af2ec6af7.cu | #include <gtest/gtest.h>
#include <mtensor.hpp>
using namespace matazure;
using namespace testing;
__global__ void test_kernel() {}
TEST(CudaExecutionTest, ExecutionPolicy) {
{
cuda::execution_policy policy;
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
}
TEST(CudaExecutionTest, DefaultExecutionPolicy) {
{
cuda::default_execution_policy policy;
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
}
TEST(CudaExecutionTests, ForIndexExecutionPolicy) {
{
cuda::for_index_execution_policy policy;
policy.total_size(64);
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
{
cuda::for_index_execution_policy policy;
policy.total_size(128);
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
{
cuda::for_index_execution_policy policy;
policy.total_size(0);
cuda::configure_grid(policy, test_kernel);
std::cout << "grid dim " << policy.grid_dim() << std::endl;
std::cout << "block dim " << policy.block_dim() << std::endl;
std::cout << "shared memory bytes " << policy.shared_mem_bytes() << std::endl;
}
}
|
90e8c36474d41ba50462a289c31053aa800f3b51.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ipsec.h"
#include <stdlib.h>
#include <time.h>
#define SHA 1
#define AES_ASSIGN 1
#define BODY 1
#define EIHDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr))
__device__ void sha1_kernel_global(unsigned char *data, sha1_gpu_context *ctx, uint32_t *extended, int len)
{
#if 1
/* Initialization vector for SHA-1 */
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
#endif
uint32_t temp, t;
/*
* Extend 32 block byte block into 80 byte block.
*/
//sh_kim 20.03.11 : when data length is 20byte, we need padding
if(len == 20)
{
memset(data + len - 1, 0, 44);
}
GET_UINT32_BE( extended[0], data, 0 );
GET_UINT32_BE( extended[1], data, 4 );
GET_UINT32_BE( extended[2], data, 8 );
GET_UINT32_BE( extended[3], data, 12 );
GET_UINT32_BE( extended[4], data, 16 );
GET_UINT32_BE( extended[5], data, 20 );
GET_UINT32_BE( extended[6], data, 24 );
GET_UINT32_BE( extended[7], data, 28 );
GET_UINT32_BE( extended[8], data, 32 );
GET_UINT32_BE( extended[9], data, 36 );
GET_UINT32_BE( extended[10], data, 40 );
GET_UINT32_BE( extended[11], data, 44 );
GET_UINT32_BE( extended[12], data, 48 );
GET_UINT32_BE( extended[13], data, 52 );
GET_UINT32_BE( extended[14], data, 56 );
GET_UINT32_BE( extended[15], data, 60 );
// Same as "blk(i)" macro in openssl source.
for (t = 16; t < 80; t++) {
temp = extended[t - 3] ^ extended[t - 8] ^ extended[t - 14] ^ extended[t - 16];
extended[t] = S(temp,1);
}
sha1_gpu_process(ctx, extended);
}
// CKJUNG, 18.10.26 [NF#2:IPSec]-------------------------------------
__global__ void ipsec(struct mempool** mempool, uint32_t* pkt_cnt, unsigned char* d_nounce, unsigned int* d_key, unsigned char* d_sbox, unsigned char* d_GF2, unsigned int* seq)
{
// 1 ThreadBlock ver.
// <<< 1, 512 >>> threads.
// 1 threads for 1 pkt. (60B pkt)
// 512 / 1 = 512, 1TB has 512 threads each and manages 512 pkts.
unsigned char IV[16] = {0};
sha1_gpu_context octx;
sha1_gpu_context ictx;
uint32_t extended[80];
int ctr = 0;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int pktid = tid / THD_PER_PKT;
int dataid = tid % THD_PER_PKT;
unsigned int sha_count = 0;
struct mempool* mini_mempool = NULL;
if(pktid < 512)
mini_mempool = mempool[pktid];
struct pkt_buf* buf = NULL;
__shared__ struct pkt_buf* buf_pool[512];
// 1 ThreadBlock ver.
// IV : 512 * 16 = 8,192
// aes_tmp : 512 * (64 - 16) = 24,576
// octx : 20 * 512 = 10,240
// pkt_len : 4 * 128 = 512
//-------------------------- Total __shared__ mem Usage : 43,012 + 512
if(tid == 0){
for(int i = 0; i < 512; i++)
buf_pool[i] = NULL;
}
#if 0
if(threadIdx.x == TOTAL_T_NUM - 1){
START_RED
printf("[%s] threadIdx.x %d is alive!\n", __FUNCTION__, threadIdx.x);
END
}
#endif
__syncthreads();
while(true){ // Persistent Kernel (for every threads)
__syncthreads();
if(pktid < 512){
__syncthreads();
if(dataid == 0)
buf_pool[pktid] = pkt_buf_extract(mini_mempool, 1);
__syncthreads();
buf = buf_pool[pktid];
__syncthreads();
if(buf != NULL){
#if BODY
__syncthreads();
sha_count = PKT_DATA_SIZE / 64 + ((PKT_DATA_SIZE % 64) != 0);
__syncthreads();
if(dataid == 0){
buf->data[PKT_DATA_SIZE] = 0; // padlen
buf->data[PKT_DATA_SIZE + 1] = IPPROTO_IPIP; // next-hdr (Meaning "IP within IP)
/* For Reference...
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IPIP = 4
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPPROTO_ESP = 50
*/
ctr++; // same "ctr" value for grouped 3-threads. (counter) AES-CTR Mode
IV[15] = ctr & 0xFF;
IV[14] = (ctr >> 8) & 0xFF; // CKJUNG, 1 Byte = 8bits means, Octal notation
IV[13] = (ctr >> 16) & 0xFF;
IV[12] = (ctr >> 24) & 0xFF;
for(int i = 0; i < 12; i++)
IV[i] = 0;
// Copy our state into private memory
unsigned char temp, temp2;
unsigned char overflow = 0;
char tmp[16];
for(int i = 15; i != -1; i--) {
temp = d_nounce[i];
temp2 = IV[i];
IV[i] += temp + overflow;
overflow = ((int)temp2 + (int)temp + (int)overflow > 255);
}
AddRoundKey(IV, &d_key[0]);
for(int i = 1; i < 10; i++)
{
SubBytes(IV, d_sbox);
ShiftRows(IV);
MixColumns(IV, d_GF2, tmp);
AddRoundKey(IV, &d_key[4 * i]);
}
SubBytes(IV, d_sbox);
ShiftRows(IV);
AddRoundKey(IV, &d_key[4 * 10]);
////////////////// Locating AES Encrypted parts into a pkt ///////////////////////////////
unsigned char temphdr[34] = { 0 };
//printf("[tid : %d] data : %ld\n", threadIdx.x, (uint64_t)(buf->data));
memcpy(temphdr, buf->data, EIHDR_SIZE);
memcpy(buf->data - sizeof(struct iphdr) - 8, temphdr, EIHDR_SIZE);
}
__syncthreads();
for(int i = 0; i < DATA_PER_THD; i++){
buf->data[sizeof(struct ethhdr) + dataid*DATA_PER_THD + i] ^= IV[i & 15];
}
__syncthreads();
if(dataid == 0){
//////////// Proto_type = ESP set! ///////////
buf->data[6] = IPPROTO_ESP; // IPPROTO_ESP = 50
//buf->data[sizeof(struct ethhdr) + 9 - sizeof(struct iphdr) - 8] = IPPROTO_ESP; // IPPROTO_ESP = 50
struct esphdr* esph;
esph = (struct esphdr *)((uint32_t *)&(buf->data[6]));
// SPI (Security Parameter Index)
uint32_t spi = 1085899777;
HTONS32(spi);
////////// Set ESP header SPI value ///////////////////
memcpy(&esph->spi, &spi, 4);
atomicAdd(seq, 1);
//////////// Set ESP header SEQ value //////////
memcpy(&esph->seq, seq, 4);
#if SHA
// CKJUNG, HMAC-SHA1 From here! /////////////////////////////
// RFC 2104, H(K XOR opad, H(K XOR ipad, text))
/**** Inner Digest ****/
// H(K XOR ipad, text) : 64 Bytes
int e_index = 0;
while(e_index < sha_count){
sha1_kernel_global(&buf->data[6 + e_index*64], &ictx, extended, 64);
e_index++;
}
/**** Outer Digest ****/
// H(K XOR opad, H(K XOR ipad, text)) : 20 Bytes
sha1_kernel_global(&(ictx.c_state[0]), &octx, extended, 20);
memcpy(&buf->data[PKT_DATA_SIZE + 2], &(octx.c_state[0]), 20);
#endif
#endif
buf->app_idx = 2;
buf->paylen += 50;
buf = NULL;
buf_pool[pktid] = NULL;
}
}
}
}
}
__device__ void AddRoundKey(unsigned char *state, unsigned *w)
{
int i;
for(i = 0; i < BLOCK_SIZE; i++) { // column
state[i * 4 + 0] = state[i * 4 + 0] ^ ((w[i] >> (8 * 3)) & 0xFF);
state[i * 4 + 1] = state[i * 4 + 1] ^ ((w[i] >> (8 * 2)) & 0xFF);
state[i * 4 + 2] = state[i * 4 + 2] ^ ((w[i] >> (8 * 1)) & 0xFF);
state[i * 4 + 3] = state[i * 4 + 3] ^ ((w[i] >> (8 * 0)) & 0xFF);
}
}
__device__ void SubBytes(unsigned char *state, unsigned char* sbox) //state = 16 chars
{
int i;
for(i = 0; i < 4 * BLOCK_SIZE; i++) {
state[i] = sbox[state[i]];
}
}
__device__ void ShiftRows(unsigned char *state)
{
// NOTE: For whatever reason the standard uses column-major ordering ?
// 0 1 2 3 --> 0 1 2 3 | 0 4 8 12 --> 0 4 8 12
// 0 1 2 3 --> 1 2 3 0 | 1 5 9 13 --> 5 9 13 1
// 0 1 2 3 --> 2 3 0 1 | 2 6 10 14 --> 10 14 2 6
// 0 1 2 3 --> 3 0 1 2 | 3 7 11 15 --> 15 3 7 11
unsigned char temp = state[1];
state[1] = state[5];
state[5] = state[9];
state[9] = state[13];
state[13] = temp;
temp = state[2];
state[2] = state[10];
state[10] = temp;
temp = state[6];
state[6] = state[14];
state[14] = temp;
temp = state[3];
state[3] = state[15];
state[15] = state[11];
state[11] = state[7];
state[7] = temp;
}
// See "Efficient Software Implementation of AES on 32-bit platforms"
__device__ void MixColumns(unsigned char *state, unsigned char* GF_2, char* s)
{
//[TODO] malloc!!!!!! is the criminal!!! CKJUNG, 18.10.26
memcpy(s, state, 4 * BLOCK_SIZE);
int i;
#if 1
for(i = 0; i < BLOCK_SIZE; i++) { // column
unsigned char * x = (unsigned char*)&s[i*4];
unsigned char * y = (unsigned char*)&state[i*4];
y[0] = x[1] ^ x[2] ^ x[3];
y[1] = x[0] ^ x[2] ^ x[3];
y[2] = x[0] ^ x[1] ^ x[3];
y[3] = x[0] ^ x[1] ^ x[2];
x[0] = GF_2[x[0]];
x[1] = GF_2[x[1]];
x[2] = GF_2[x[2]];
x[3] = GF_2[x[3]];
y[0] ^= x[0] ^ x[1];
y[1] ^= x[1] ^ x[2];
y[2] ^= x[2] ^ x[3];
y[3] ^= x[3] ^ x[0];
}
#endif
}
/**
* Initialize new context
*
* @param context SHA1-Context
*/
/*
* Process extended block.
*/
__device__ void sha1_gpu_process (sha1_gpu_context *ctx, uint32_t W[80])
{
uint32_t A, B, C, D, E;
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define P(a,b,c,d,e,x)\
{\
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30);\
}
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, W[16] );
P( D, E, A, B, C, W[17] );
P( C, D, E, A, B, W[18] );
P( B, C, D, E, A, W[19] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, W[20] );
P( E, A, B, C, D, W[21] );
P( D, E, A, B, C, W[22] );
P( C, D, E, A, B, W[23] );
P( B, C, D, E, A, W[24] );
P( A, B, C, D, E, W[25] ); // w[25] is the problem.
P( E, A, B, C, D, W[26] );
P( D, E, A, B, C, W[27] );
P( C, D, E, A, B, W[28] );
P( B, C, D, E, A, W[29] );
P( A, B, C, D, E, W[30] );
P( E, A, B, C, D, W[31] );
P( D, E, A, B, C, W[32] );
P( C, D, E, A, B, W[33] );
P( B, C, D, E, A, W[34] );
P( A, B, C, D, E, W[35] );
P( E, A, B, C, D, W[36] );
P( D, E, A, B, C, W[37] );
P( C, D, E, A, B, W[38] );
P( B, C, D, E, A, W[39] );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, W[40] );
P( E, A, B, C, D, W[41] );
P( D, E, A, B, C, W[42] );
P( C, D, E, A, B, W[43] );
P( B, C, D, E, A, W[44] );
P( A, B, C, D, E, W[45] );
P( E, A, B, C, D, W[46] );
P( D, E, A, B, C, W[47] );
P( C, D, E, A, B, W[48] );
P( B, C, D, E, A, W[49] );
P( A, B, C, D, E, W[50] );
P( E, A, B, C, D, W[51] );
P( D, E, A, B, C, W[52] );
P( C, D, E, A, B, W[53] );
P( B, C, D, E, A, W[54] );
P( A, B, C, D, E, W[55] );
P( E, A, B, C, D, W[56] );
P( D, E, A, B, C, W[57] );
P( C, D, E, A, B, W[58] );
P( B, C, D, E, A, W[59] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, W[60] );
P( E, A, B, C, D, W[61] );
P( D, E, A, B, C, W[62] );
P( C, D, E, A, B, W[63] );
P( B, C, D, E, A, W[64] );
P( A, B, C, D, E, W[65] );
P( E, A, B, C, D, W[66] );
P( D, E, A, B, C, W[67] );
P( C, D, E, A, B, W[68] );
P( B, C, D, E, A, W[69] );
P( A, B, C, D, E, W[70] );
P( E, A, B, C, D, W[71] );
P( D, E, A, B, C, W[72] );
P( C, D, E, A, B, W[73] );
P( B, C, D, E, A, W[74] );
P( A, B, C, D, E, W[75] );
P( E, A, B, C, D, W[76] );
P( D, E, A, B, C, W[77] );
P( C, D, E, A, B, W[78] );
P( B, C, D, E, A, W[79] );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
unsigned int SubWord(unsigned int w) {
unsigned int i = (sbox[(w >> 24) & 0xFF] << 24) | (sbox[(w >> 16) & 0xFF] << 16);
i |= (sbox[(w >> 8) & 0xFF] << 8) | sbox[w & 0xFF];
return i;
}
unsigned int RotWord(unsigned int w) {
unsigned char temp = (w >> 24) & 0xFF;
return ((w << 8) | temp);
}
void KeyExpansion(unsigned char* key, unsigned int* w) {
unsigned int temp;
int i = 0;
for(i = 0; i < KEY_SIZE; i++) {
w[i] = (key[4*i] << 24) | (key[4*i + 1] << 16) | (key[4*i + 2] << 8) | key[4*i + 3];
}
for(; i < BLOCK_SIZE * (NUM_ROUNDS + 1); i++) {
temp = w[i - 1];
if(i % KEY_SIZE == 0) {
temp = SubWord(RotWord(temp)) ^ Rcon[i / KEY_SIZE];
}
w[i] = w[i - KEY_SIZE] ^ temp;
}
}
extern "C"
void initialize_ipsec(struct mempool **mempool, uint32_t *pkt_cnt)
{
// CKJUNG, 18.10.25 [NF #2: IPSec] Setting initial_counter, key /////////////////////////
unsigned char nounce[16];
FILE* fnounce = fopen("./apps/lib/test.ctr", "rb");
fread(&nounce, 1, 16, fnounce);
fclose(fnounce);
int num_keys = BLOCK_SIZE * (NUM_ROUNDS + 1);
unsigned char key[16];
unsigned int* expanded_key = (unsigned int*)malloc(num_keys * sizeof(int));
FILE* fkey = fopen("./apps/lib/test.key", "rb");
fread(&key, 1, 16, fkey);
fclose(fkey);
KeyExpansion(key, expanded_key);
unsigned char *d_nounce;
unsigned int *d_key;
unsigned char *d_sbox;
unsigned char *d_GF2;
unsigned int *d_seq; // 20.02.02. CKJUNG
printf("____[Initialize]__NF #2__IPSec__\n");
ASSERTRT(hipMalloc((void**)&d_nounce, 16*sizeof(unsigned char)));
ASSERTRT(hipMemset(d_nounce, 0, 16*sizeof(unsigned char)));
ASSERTRT(hipMalloc((void**)&d_key, num_keys*sizeof(unsigned int)));
ASSERTRT(hipMemset(d_key, 0, num_keys*sizeof(unsigned int)));
ASSERTRT(hipMalloc((void**)&d_sbox, 256*sizeof(unsigned char)));
ASSERTRT(hipMemset(d_sbox, 0, 256*sizeof(unsigned char)));
ASSERTRT(hipMalloc((void**)&d_GF2, 256*sizeof(unsigned char)));
ASSERTRT(hipMemset(d_GF2, 0, 256*sizeof(unsigned char)));
ASSERTRT(hipMalloc((void**)&d_seq, sizeof(unsigned int)));
ASSERTRT(hipMemset(d_seq, 0, sizeof(unsigned int)));
hipError_t nounce_err = hipMemcpy(d_nounce, nounce, 16*sizeof(unsigned char), hipMemcpyHostToDevice);
hipError_t key_err = hipMemcpy(d_key, expanded_key, num_keys*sizeof(unsigned int), hipMemcpyHostToDevice);
hipError_t sbox_err = hipMemcpy(d_sbox, sbox, 256*sizeof(unsigned char), hipMemcpyHostToDevice);
hipError_t GF2_err = hipMemcpy(d_GF2, GF_2, 256*sizeof(unsigned char), hipMemcpyHostToDevice);
if(nounce_err != hipSuccess || key_err != hipSuccess || sbox_err != hipSuccess || GF2_err != hipSuccess)
{
START_RED
printf("[Error] hipMemcpy for \"nounce\" or \"key\" or \"sbox\" or \"GF2\" has failed.\n");
END
}else{
START_GRN
printf("[IPSec] Nounce, Expanded keys, SBOX, and GF2 are ready.\n");
END
}
hipStream_t cuda_stream3;
ASSERT_CUDA(hipStreamCreateWithFlags(&cuda_stream3,hipStreamNonBlocking));
printf("NF#2: IPsec\n");
START_BLU
printf("[IPSEC] # of Thread Blocks : %d, # of Threads : %d\n", NF_TB_NUM, NF_T_NUM);
END
/*
* ipsec for 64B pkt
* 1 pkt needs 1 GPU threads.
* 512 x 1 = 512 threads. (OK)
* 384 threads per TB; 512 = 1 * 512; each TB manages 512 pkts; 128 * 1 = 512 Desc
*/
hipLaunchKernelGGL(( ipsec), dim3(NF_TB_NUM), dim3(NF_T_NUM), 0, cuda_stream3 , mempool, pkt_cnt, d_nounce, d_key, d_sbox, d_GF2, d_seq);
START_GRN
printf("[Done]____[Initialize]__NF #2__IPSec__\n");
printf("[IPSEC] %s\n", hipGetErrorName(hipGetLastError()));
END
free(expanded_key);
// ~ CKJUNG /////////////////////////////////////////////////////////////////////////////
}
| 90e8c36474d41ba50462a289c31053aa800f3b51.cu | #include "ipsec.h"
#include <stdlib.h>
#include <time.h>
#define SHA 1
#define AES_ASSIGN 1
#define BODY 1
#define EIHDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr))
__device__ void sha1_kernel_global(unsigned char *data, sha1_gpu_context *ctx, uint32_t *extended, int len)
{
#if 1
/* Initialization vector for SHA-1 */
ctx->state[0] = 0x67452301;
ctx->state[1] = 0xEFCDAB89;
ctx->state[2] = 0x98BADCFE;
ctx->state[3] = 0x10325476;
ctx->state[4] = 0xC3D2E1F0;
#endif
uint32_t temp, t;
/*
* Extend 32 block byte block into 80 byte block.
*/
//sh_kim 20.03.11 : when data length is 20byte, we need padding
if(len == 20)
{
memset(data + len - 1, 0, 44);
}
GET_UINT32_BE( extended[0], data, 0 );
GET_UINT32_BE( extended[1], data, 4 );
GET_UINT32_BE( extended[2], data, 8 );
GET_UINT32_BE( extended[3], data, 12 );
GET_UINT32_BE( extended[4], data, 16 );
GET_UINT32_BE( extended[5], data, 20 );
GET_UINT32_BE( extended[6], data, 24 );
GET_UINT32_BE( extended[7], data, 28 );
GET_UINT32_BE( extended[8], data, 32 );
GET_UINT32_BE( extended[9], data, 36 );
GET_UINT32_BE( extended[10], data, 40 );
GET_UINT32_BE( extended[11], data, 44 );
GET_UINT32_BE( extended[12], data, 48 );
GET_UINT32_BE( extended[13], data, 52 );
GET_UINT32_BE( extended[14], data, 56 );
GET_UINT32_BE( extended[15], data, 60 );
// Same as "blk(i)" macro in openssl source.
for (t = 16; t < 80; t++) {
temp = extended[t - 3] ^ extended[t - 8] ^ extended[t - 14] ^ extended[t - 16];
extended[t] = S(temp,1);
}
sha1_gpu_process(ctx, extended);
}
// CKJUNG, 18.10.26 [NF#2:IPSec]-------------------------------------
__global__ void ipsec(struct mempool** mempool, uint32_t* pkt_cnt, unsigned char* d_nounce, unsigned int* d_key, unsigned char* d_sbox, unsigned char* d_GF2, unsigned int* seq)
{
// 1 ThreadBlock ver.
// <<< 1, 512 >>> threads.
// 1 threads for 1 pkt. (60B pkt)
// 512 / 1 = 512, 1TB has 512 threads each and manages 512 pkts.
unsigned char IV[16] = {0};
sha1_gpu_context octx;
sha1_gpu_context ictx;
uint32_t extended[80];
int ctr = 0;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int pktid = tid / THD_PER_PKT;
int dataid = tid % THD_PER_PKT;
unsigned int sha_count = 0;
struct mempool* mini_mempool = NULL;
if(pktid < 512)
mini_mempool = mempool[pktid];
struct pkt_buf* buf = NULL;
__shared__ struct pkt_buf* buf_pool[512];
// 1 ThreadBlock ver.
// IV : 512 * 16 = 8,192
// aes_tmp : 512 * (64 - 16) = 24,576
// octx : 20 * 512 = 10,240
// pkt_len : 4 * 128 = 512
//-------------------------- Total __shared__ mem Usage : 43,012 + 512
if(tid == 0){
for(int i = 0; i < 512; i++)
buf_pool[i] = NULL;
}
#if 0
if(threadIdx.x == TOTAL_T_NUM - 1){
START_RED
printf("[%s] threadIdx.x %d is alive!\n", __FUNCTION__, threadIdx.x);
END
}
#endif
__syncthreads();
while(true){ // Persistent Kernel (for every threads)
__syncthreads();
if(pktid < 512){
__syncthreads();
if(dataid == 0)
buf_pool[pktid] = pkt_buf_extract(mini_mempool, 1);
__syncthreads();
buf = buf_pool[pktid];
__syncthreads();
if(buf != NULL){
#if BODY
__syncthreads();
sha_count = PKT_DATA_SIZE / 64 + ((PKT_DATA_SIZE % 64) != 0);
__syncthreads();
if(dataid == 0){
buf->data[PKT_DATA_SIZE] = 0; // padlen
buf->data[PKT_DATA_SIZE + 1] = IPPROTO_IPIP; // next-hdr (Meaning "IP within IP)
/* For Reference...
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_IPIP = 4
IPPROTO_TCP = 6
IPPROTO_UDP = 17
IPPROTO_ESP = 50
*/
ctr++; // same "ctr" value for grouped 3-threads. (counter) AES-CTR Mode
IV[15] = ctr & 0xFF;
IV[14] = (ctr >> 8) & 0xFF; // CKJUNG, 1 Byte = 8bits means, Octal notation
IV[13] = (ctr >> 16) & 0xFF;
IV[12] = (ctr >> 24) & 0xFF;
for(int i = 0; i < 12; i++)
IV[i] = 0;
// Copy our state into private memory
unsigned char temp, temp2;
unsigned char overflow = 0;
char tmp[16];
for(int i = 15; i != -1; i--) {
temp = d_nounce[i];
temp2 = IV[i];
IV[i] += temp + overflow;
overflow = ((int)temp2 + (int)temp + (int)overflow > 255);
}
AddRoundKey(IV, &d_key[0]);
for(int i = 1; i < 10; i++)
{
SubBytes(IV, d_sbox);
ShiftRows(IV);
MixColumns(IV, d_GF2, tmp);
AddRoundKey(IV, &d_key[4 * i]);
}
SubBytes(IV, d_sbox);
ShiftRows(IV);
AddRoundKey(IV, &d_key[4 * 10]);
////////////////// Locating AES Encrypted parts into a pkt ///////////////////////////////
unsigned char temphdr[34] = { 0 };
//printf("[tid : %d] data : %ld\n", threadIdx.x, (uint64_t)(buf->data));
memcpy(temphdr, buf->data, EIHDR_SIZE);
memcpy(buf->data - sizeof(struct iphdr) - 8, temphdr, EIHDR_SIZE);
}
__syncthreads();
for(int i = 0; i < DATA_PER_THD; i++){
buf->data[sizeof(struct ethhdr) + dataid*DATA_PER_THD + i] ^= IV[i & 15];
}
__syncthreads();
if(dataid == 0){
//////////// Proto_type = ESP set! ///////////
buf->data[6] = IPPROTO_ESP; // IPPROTO_ESP = 50
//buf->data[sizeof(struct ethhdr) + 9 - sizeof(struct iphdr) - 8] = IPPROTO_ESP; // IPPROTO_ESP = 50
struct esphdr* esph;
esph = (struct esphdr *)((uint32_t *)&(buf->data[6]));
// SPI (Security Parameter Index)
uint32_t spi = 1085899777;
HTONS32(spi);
////////// Set ESP header SPI value ///////////////////
memcpy(&esph->spi, &spi, 4);
atomicAdd(seq, 1);
//////////// Set ESP header SEQ value //////////
memcpy(&esph->seq, seq, 4);
#if SHA
// CKJUNG, HMAC-SHA1 From here! /////////////////////////////
// RFC 2104, H(K XOR opad, H(K XOR ipad, text))
/**** Inner Digest ****/
// H(K XOR ipad, text) : 64 Bytes
int e_index = 0;
while(e_index < sha_count){
sha1_kernel_global(&buf->data[6 + e_index*64], &ictx, extended, 64);
e_index++;
}
/**** Outer Digest ****/
// H(K XOR opad, H(K XOR ipad, text)) : 20 Bytes
sha1_kernel_global(&(ictx.c_state[0]), &octx, extended, 20);
memcpy(&buf->data[PKT_DATA_SIZE + 2], &(octx.c_state[0]), 20);
#endif
#endif
buf->app_idx = 2;
buf->paylen += 50;
buf = NULL;
buf_pool[pktid] = NULL;
}
}
}
}
}
__device__ void AddRoundKey(unsigned char *state, unsigned *w)
{
int i;
for(i = 0; i < BLOCK_SIZE; i++) { // column
state[i * 4 + 0] = state[i * 4 + 0] ^ ((w[i] >> (8 * 3)) & 0xFF);
state[i * 4 + 1] = state[i * 4 + 1] ^ ((w[i] >> (8 * 2)) & 0xFF);
state[i * 4 + 2] = state[i * 4 + 2] ^ ((w[i] >> (8 * 1)) & 0xFF);
state[i * 4 + 3] = state[i * 4 + 3] ^ ((w[i] >> (8 * 0)) & 0xFF);
}
}
__device__ void SubBytes(unsigned char *state, unsigned char* sbox) //state = 16 chars
{
int i;
for(i = 0; i < 4 * BLOCK_SIZE; i++) {
state[i] = sbox[state[i]];
}
}
__device__ void ShiftRows(unsigned char *state)
{
// NOTE: For whatever reason the standard uses column-major ordering ?
// 0 1 2 3 --> 0 1 2 3 | 0 4 8 12 --> 0 4 8 12
// 0 1 2 3 --> 1 2 3 0 | 1 5 9 13 --> 5 9 13 1
// 0 1 2 3 --> 2 3 0 1 | 2 6 10 14 --> 10 14 2 6
// 0 1 2 3 --> 3 0 1 2 | 3 7 11 15 --> 15 3 7 11
unsigned char temp = state[1];
state[1] = state[5];
state[5] = state[9];
state[9] = state[13];
state[13] = temp;
temp = state[2];
state[2] = state[10];
state[10] = temp;
temp = state[6];
state[6] = state[14];
state[14] = temp;
temp = state[3];
state[3] = state[15];
state[15] = state[11];
state[11] = state[7];
state[7] = temp;
}
// See "Efficient Software Implementation of AES on 32-bit platforms"
__device__ void MixColumns(unsigned char *state, unsigned char* GF_2, char* s)
{
//[TODO] malloc!!!!!! is the criminal!!! CKJUNG, 18.10.26
memcpy(s, state, 4 * BLOCK_SIZE);
int i;
#if 1
for(i = 0; i < BLOCK_SIZE; i++) { // column
unsigned char * x = (unsigned char*)&s[i*4];
unsigned char * y = (unsigned char*)&state[i*4];
y[0] = x[1] ^ x[2] ^ x[3];
y[1] = x[0] ^ x[2] ^ x[3];
y[2] = x[0] ^ x[1] ^ x[3];
y[3] = x[0] ^ x[1] ^ x[2];
x[0] = GF_2[x[0]];
x[1] = GF_2[x[1]];
x[2] = GF_2[x[2]];
x[3] = GF_2[x[3]];
y[0] ^= x[0] ^ x[1];
y[1] ^= x[1] ^ x[2];
y[2] ^= x[2] ^ x[3];
y[3] ^= x[3] ^ x[0];
}
#endif
}
/**
* Initialize new context
*
* @param context SHA1-Context
*/
/*
* Process extended block.
*/
__device__ void sha1_gpu_process (sha1_gpu_context *ctx, uint32_t W[80])
{
uint32_t A, B, C, D, E;
A = ctx->state[0];
B = ctx->state[1];
C = ctx->state[2];
D = ctx->state[3];
E = ctx->state[4];
#define P(a,b,c,d,e,x)\
{\
e += S(a,5) + F(b,c,d) + K + x; b = S(b,30);\
}
#define F(x,y,z) (z ^ (x & (y ^ z)))
#define K 0x5A827999
P( A, B, C, D, E, W[0] );
P( E, A, B, C, D, W[1] );
P( D, E, A, B, C, W[2] );
P( C, D, E, A, B, W[3] );
P( B, C, D, E, A, W[4] );
P( A, B, C, D, E, W[5] );
P( E, A, B, C, D, W[6] );
P( D, E, A, B, C, W[7] );
P( C, D, E, A, B, W[8] );
P( B, C, D, E, A, W[9] );
P( A, B, C, D, E, W[10] );
P( E, A, B, C, D, W[11] );
P( D, E, A, B, C, W[12] );
P( C, D, E, A, B, W[13] );
P( B, C, D, E, A, W[14] );
P( A, B, C, D, E, W[15] );
P( E, A, B, C, D, W[16] );
P( D, E, A, B, C, W[17] );
P( C, D, E, A, B, W[18] );
P( B, C, D, E, A, W[19] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0x6ED9EBA1
P( A, B, C, D, E, W[20] );
P( E, A, B, C, D, W[21] );
P( D, E, A, B, C, W[22] );
P( C, D, E, A, B, W[23] );
P( B, C, D, E, A, W[24] );
P( A, B, C, D, E, W[25] ); // w[25] is the problem.
P( E, A, B, C, D, W[26] );
P( D, E, A, B, C, W[27] );
P( C, D, E, A, B, W[28] );
P( B, C, D, E, A, W[29] );
P( A, B, C, D, E, W[30] );
P( E, A, B, C, D, W[31] );
P( D, E, A, B, C, W[32] );
P( C, D, E, A, B, W[33] );
P( B, C, D, E, A, W[34] );
P( A, B, C, D, E, W[35] );
P( E, A, B, C, D, W[36] );
P( D, E, A, B, C, W[37] );
P( C, D, E, A, B, W[38] );
P( B, C, D, E, A, W[39] );
#undef K
#undef F
#define F(x,y,z) ((x & y) | (z & (x | y)))
#define K 0x8F1BBCDC
P( A, B, C, D, E, W[40] );
P( E, A, B, C, D, W[41] );
P( D, E, A, B, C, W[42] );
P( C, D, E, A, B, W[43] );
P( B, C, D, E, A, W[44] );
P( A, B, C, D, E, W[45] );
P( E, A, B, C, D, W[46] );
P( D, E, A, B, C, W[47] );
P( C, D, E, A, B, W[48] );
P( B, C, D, E, A, W[49] );
P( A, B, C, D, E, W[50] );
P( E, A, B, C, D, W[51] );
P( D, E, A, B, C, W[52] );
P( C, D, E, A, B, W[53] );
P( B, C, D, E, A, W[54] );
P( A, B, C, D, E, W[55] );
P( E, A, B, C, D, W[56] );
P( D, E, A, B, C, W[57] );
P( C, D, E, A, B, W[58] );
P( B, C, D, E, A, W[59] );
#undef K
#undef F
#define F(x,y,z) (x ^ y ^ z)
#define K 0xCA62C1D6
P( A, B, C, D, E, W[60] );
P( E, A, B, C, D, W[61] );
P( D, E, A, B, C, W[62] );
P( C, D, E, A, B, W[63] );
P( B, C, D, E, A, W[64] );
P( A, B, C, D, E, W[65] );
P( E, A, B, C, D, W[66] );
P( D, E, A, B, C, W[67] );
P( C, D, E, A, B, W[68] );
P( B, C, D, E, A, W[69] );
P( A, B, C, D, E, W[70] );
P( E, A, B, C, D, W[71] );
P( D, E, A, B, C, W[72] );
P( C, D, E, A, B, W[73] );
P( B, C, D, E, A, W[74] );
P( A, B, C, D, E, W[75] );
P( E, A, B, C, D, W[76] );
P( D, E, A, B, C, W[77] );
P( C, D, E, A, B, W[78] );
P( B, C, D, E, A, W[79] );
#undef K
#undef F
ctx->state[0] += A;
ctx->state[1] += B;
ctx->state[2] += C;
ctx->state[3] += D;
ctx->state[4] += E;
}
unsigned int SubWord(unsigned int w) {
unsigned int i = (sbox[(w >> 24) & 0xFF] << 24) | (sbox[(w >> 16) & 0xFF] << 16);
i |= (sbox[(w >> 8) & 0xFF] << 8) | sbox[w & 0xFF];
return i;
}
unsigned int RotWord(unsigned int w) {
unsigned char temp = (w >> 24) & 0xFF;
return ((w << 8) | temp);
}
void KeyExpansion(unsigned char* key, unsigned int* w) {
unsigned int temp;
int i = 0;
for(i = 0; i < KEY_SIZE; i++) {
w[i] = (key[4*i] << 24) | (key[4*i + 1] << 16) | (key[4*i + 2] << 8) | key[4*i + 3];
}
for(; i < BLOCK_SIZE * (NUM_ROUNDS + 1); i++) {
temp = w[i - 1];
if(i % KEY_SIZE == 0) {
temp = SubWord(RotWord(temp)) ^ Rcon[i / KEY_SIZE];
}
w[i] = w[i - KEY_SIZE] ^ temp;
}
}
extern "C"
void initialize_ipsec(struct mempool **mempool, uint32_t *pkt_cnt)
{
// CKJUNG, 18.10.25 [NF #2: IPSec] Setting initial_counter, key /////////////////////////
unsigned char nounce[16];
FILE* fnounce = fopen("./apps/lib/test.ctr", "rb");
fread(&nounce, 1, 16, fnounce);
fclose(fnounce);
int num_keys = BLOCK_SIZE * (NUM_ROUNDS + 1);
unsigned char key[16];
unsigned int* expanded_key = (unsigned int*)malloc(num_keys * sizeof(int));
FILE* fkey = fopen("./apps/lib/test.key", "rb");
fread(&key, 1, 16, fkey);
fclose(fkey);
KeyExpansion(key, expanded_key);
unsigned char *d_nounce;
unsigned int *d_key;
unsigned char *d_sbox;
unsigned char *d_GF2;
unsigned int *d_seq; // 20.02.02. CKJUNG
printf("____[Initialize]__NF #2__IPSec__\n");
ASSERTRT(cudaMalloc((void**)&d_nounce, 16*sizeof(unsigned char)));
ASSERTRT(cudaMemset(d_nounce, 0, 16*sizeof(unsigned char)));
ASSERTRT(cudaMalloc((void**)&d_key, num_keys*sizeof(unsigned int)));
ASSERTRT(cudaMemset(d_key, 0, num_keys*sizeof(unsigned int)));
ASSERTRT(cudaMalloc((void**)&d_sbox, 256*sizeof(unsigned char)));
ASSERTRT(cudaMemset(d_sbox, 0, 256*sizeof(unsigned char)));
ASSERTRT(cudaMalloc((void**)&d_GF2, 256*sizeof(unsigned char)));
ASSERTRT(cudaMemset(d_GF2, 0, 256*sizeof(unsigned char)));
ASSERTRT(cudaMalloc((void**)&d_seq, sizeof(unsigned int)));
ASSERTRT(cudaMemset(d_seq, 0, sizeof(unsigned int)));
cudaError_t nounce_err = cudaMemcpy(d_nounce, nounce, 16*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaError_t key_err = cudaMemcpy(d_key, expanded_key, num_keys*sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaError_t sbox_err = cudaMemcpy(d_sbox, sbox, 256*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaError_t GF2_err = cudaMemcpy(d_GF2, GF_2, 256*sizeof(unsigned char), cudaMemcpyHostToDevice);
if(nounce_err != cudaSuccess || key_err != cudaSuccess || sbox_err != cudaSuccess || GF2_err != cudaSuccess)
{
START_RED
printf("[Error] cudaMemcpy for \"nounce\" or \"key\" or \"sbox\" or \"GF2\" has failed.\n");
END
}else{
START_GRN
printf("[IPSec] Nounce, Expanded keys, SBOX, and GF2 are ready.\n");
END
}
cudaStream_t cuda_stream3;
ASSERT_CUDA(cudaStreamCreateWithFlags(&cuda_stream3,cudaStreamNonBlocking));
printf("NF#2: IPsec\n");
START_BLU
printf("[IPSEC] # of Thread Blocks : %d, # of Threads : %d\n", NF_TB_NUM, NF_T_NUM);
END
/*
* ipsec for 64B pkt
* 1 pkt needs 1 GPU threads.
* 512 x 1 = 512 threads. (OK)
* 384 threads per TB; 512 = 1 * 512; each TB manages 512 pkts; 128 * 1 = 512 Desc
*/
ipsec<<< NF_TB_NUM, NF_T_NUM, 0, cuda_stream3 >>> (mempool, pkt_cnt, d_nounce, d_key, d_sbox, d_GF2, d_seq);
START_GRN
printf("[Done]____[Initialize]__NF #2__IPSec__\n");
printf("[IPSEC] %s\n", cudaGetErrorName(cudaGetLastError()));
END
free(expanded_key);
// ~ CKJUNG /////////////////////////////////////////////////////////////////////////////
}
|
eb4e7745a7b5793d9622608c864dff97705983a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "graph.h"
#include <stdio.h>
Graph::Graph() {
h_nnodes = 0;
h_nedges = 0;
h_edges = nullptr;
h_offset = nullptr;
h_weights = nullptr;
d_edges = nullptr;
d_offset = nullptr;
d_weights = nullptr;
}
void Graph::read(std::string filename) {
std::ifstream input(filename.c_str());
if(!input.is_open()) {
std::cerr << "Could not open the file \"" << filename << "\"" << std::endl;
exit(1);
}
// file is found
input >> h_nnodes >> h_nedges;
/******************************************************************/
// Allocation starts
unsigned numbytes_offset = sizeof(uint64_t) * (h_nnodes+1);
unsigned numbytes_edges = sizeof(uint64_t) * h_nedges;
unsigned numbytes_weights = sizeof(unsigned) * h_nedges;
/***************************************************/
// on host
h_offset = (uint64_t*)malloc(numbytes_offset);
//if(h_offset == NULL)
//{
// printf("Memory allocation failed");
// return;
//}
h_edges = (uint64_t*)malloc(numbytes_edges);
h_weights = (unsigned*)malloc(numbytes_weights);
memset(h_offset, 0, numbytes_offset);
memset(h_edges, 0, numbytes_edges);
memset(h_weights, 0, numbytes_weights);
/***************************************************/
#if 1
// getCSR()
// generating the CSR representation and populating the h_offset and h_edges array as the deliverable
// Assumption:
// 1. There is a edge list representation of the graph available, sorted by source vertex ids.
// 2. The node ids always start from 0.
// there are h_edges lines left in the file since
uint64_t srcPrev, srcCurr; // storing the ids of the previous and the current vertices
uint64_t offset = 0; // the offset in the h_edges array
uint64_t index = 0; // the index of the h_offset array to which the value of offset has to be written
input >> srcPrev >> h_edges[0] >> h_weights[0]; // reading the src and dest of the first edge
h_offset[index] = offset;
for (int i=1; i<h_nedges; i++) {
input >> srcCurr >> h_edges[i] >> h_weights[i];
// if(srcCurr == srcPrev) { // we are in the middle of the edge list of the same source vertex
// ++offset;
// }
++offset;
if(srcPrev != srcCurr) { // srcCurr has a new source id
// ++offset;
uint64_t diff = srcCurr - srcPrev;
while(diff-- /*&& (index <= h_nnodes)*/ ) { // to account for the values of offset for the vertices that do not have any neighbors
++index;
h_offset[index] = offset;
}
}
srcPrev = srcCurr; // making the current node as the previous node, for the next run
}
// putting the offset to 'h_nedges' for the last nodes that do not have any outgoing edges.
for(int i=index+1; i<=h_nnodes; i++)
h_offset[i] = h_nedges;
#endif
/***************************************************/
// on device
gpuErrchk(hipMalloc(&d_offset, numbytes_offset));
gpuErrchk(hipMalloc(&d_edges, numbytes_edges));
gpuErrchk(hipMalloc(&d_weights, numbytes_weights));
//gpuErrchk(hipMalloc(&d_nnodes, numbytes_edges));
//gpuErrchk(hipMalloc(&d_nedges, numbytes_edges));
/***************************************************/
// copying to device
gpuErrchk(hipMemcpy(d_offset, h_offset, numbytes_offset, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_edges, h_edges, numbytes_edges, hipMemcpyHostToDevice));
gpuErrchk(hipMemcpy(d_weights, h_weights, numbytes_weights, hipMemcpyHostToDevice));
}
void Graph::printGraph() {
std::cout << "offset array: " << std::endl;
for(int i=0; i<h_nnodes+1; i++)
std::cout << h_offset[i] << std::endl;
std::cout << "edges array: " << std::endl;
for(int i=0; i<h_nedges; i++)
std::cout << h_edges[i] << std::endl;
}
__device__ unsigned Graph::getDegree(uint64_t node) {
return (d_offset[node+1] - d_offset[node]);
}
__device__ uint64_t Graph::getDest(uint64_t node, unsigned edgeId) {
unsigned id = d_offset[node] + edgeId;
return d_edges[id];
}
__device__ unsigned Graph::getWt(uint64_t node, unsigned edgeId) {
unsigned id = d_offset[node] + edgeId;
return d_weights[id];
}
| eb4e7745a7b5793d9622608c864dff97705983a2.cu | #include "graph.h"
#include <stdio.h>
Graph::Graph() {
h_nnodes = 0;
h_nedges = 0;
h_edges = nullptr;
h_offset = nullptr;
h_weights = nullptr;
d_edges = nullptr;
d_offset = nullptr;
d_weights = nullptr;
}
void Graph::read(std::string filename) {
std::ifstream input(filename.c_str());
if(!input.is_open()) {
std::cerr << "Could not open the file \"" << filename << "\"" << std::endl;
exit(1);
}
// file is found
input >> h_nnodes >> h_nedges;
/******************************************************************/
// Allocation starts
unsigned numbytes_offset = sizeof(uint64_t) * (h_nnodes+1);
unsigned numbytes_edges = sizeof(uint64_t) * h_nedges;
unsigned numbytes_weights = sizeof(unsigned) * h_nedges;
/***************************************************/
// on host
h_offset = (uint64_t*)malloc(numbytes_offset);
//if(h_offset == NULL)
//{
// printf("Memory allocation failed");
// return;
//}
h_edges = (uint64_t*)malloc(numbytes_edges);
h_weights = (unsigned*)malloc(numbytes_weights);
memset(h_offset, 0, numbytes_offset);
memset(h_edges, 0, numbytes_edges);
memset(h_weights, 0, numbytes_weights);
/***************************************************/
#if 1
// getCSR()
// generating the CSR representation and populating the h_offset and h_edges array as the deliverable
// Assumption:
// 1. There is a edge list representation of the graph available, sorted by source vertex ids.
// 2. The node ids always start from 0.
// there are h_edges lines left in the file since
uint64_t srcPrev, srcCurr; // storing the ids of the previous and the current vertices
uint64_t offset = 0; // the offset in the h_edges array
uint64_t index = 0; // the index of the h_offset array to which the value of offset has to be written
input >> srcPrev >> h_edges[0] >> h_weights[0]; // reading the src and dest of the first edge
h_offset[index] = offset;
for (int i=1; i<h_nedges; i++) {
input >> srcCurr >> h_edges[i] >> h_weights[i];
// if(srcCurr == srcPrev) { // we are in the middle of the edge list of the same source vertex
// ++offset;
// }
++offset;
if(srcPrev != srcCurr) { // srcCurr has a new source id
// ++offset;
uint64_t diff = srcCurr - srcPrev;
while(diff-- /*&& (index <= h_nnodes)*/ ) { // to account for the values of offset for the vertices that do not have any neighbors
++index;
h_offset[index] = offset;
}
}
srcPrev = srcCurr; // making the current node as the previous node, for the next run
}
// putting the offset to 'h_nedges' for the last nodes that do not have any outgoing edges.
for(int i=index+1; i<=h_nnodes; i++)
h_offset[i] = h_nedges;
#endif
/***************************************************/
// on device
gpuErrchk(cudaMalloc(&d_offset, numbytes_offset));
gpuErrchk(cudaMalloc(&d_edges, numbytes_edges));
gpuErrchk(cudaMalloc(&d_weights, numbytes_weights));
//gpuErrchk(cudaMalloc(&d_nnodes, numbytes_edges));
//gpuErrchk(cudaMalloc(&d_nedges, numbytes_edges));
/***************************************************/
// copying to device
gpuErrchk(cudaMemcpy(d_offset, h_offset, numbytes_offset, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_edges, h_edges, numbytes_edges, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_weights, h_weights, numbytes_weights, cudaMemcpyHostToDevice));
}
void Graph::printGraph() {
std::cout << "offset array: " << std::endl;
for(int i=0; i<h_nnodes+1; i++)
std::cout << h_offset[i] << std::endl;
std::cout << "edges array: " << std::endl;
for(int i=0; i<h_nedges; i++)
std::cout << h_edges[i] << std::endl;
}
__device__ unsigned Graph::getDegree(uint64_t node) {
return (d_offset[node+1] - d_offset[node]);
}
__device__ uint64_t Graph::getDest(uint64_t node, unsigned edgeId) {
unsigned id = d_offset[node] + edgeId;
return d_edges[id];
}
__device__ unsigned Graph::getWt(uint64_t node, unsigned edgeId) {
unsigned id = d_offset[node] + edgeId;
return d_weights[id];
}
|
4dcc4c19dab72b5326f066775d20b7cba48e44fb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define THREADS _THREADS_
__global__ void box(const int n,
const int imsize,
float *rnd,
int *ind,
const float *s,
const float *mid,
const int grains){
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i >= n){
return;
}
const int ii = 2*i;
const int k = 2*(int)floor((float)i/(float)grains);
const float x = (1.0 - 2.0*rnd[ii]) * s[0] + mid[k];
const float y = (1.0 - 2.0*rnd[ii+1]) * s[1] + mid[k+1];
if (x < 0.0f || x >= 1.0f || y < 0.0f || y >= 1.0f){
ind[i] = -1;
return;
}
ind[i] = (int)(x*(float)imsize) + (int)(y*(float)imsize) * imsize;
}
| 4dcc4c19dab72b5326f066775d20b7cba48e44fb.cu | #define THREADS _THREADS_
__global__ void box(const int n,
const int imsize,
float *rnd,
int *ind,
const float *s,
const float *mid,
const int grains){
const int i = blockIdx.x*THREADS + threadIdx.x;
if (i >= n){
return;
}
const int ii = 2*i;
const int k = 2*(int)floor((float)i/(float)grains);
const float x = (1.0 - 2.0*rnd[ii]) * s[0] + mid[k];
const float y = (1.0 - 2.0*rnd[ii+1]) * s[1] + mid[k+1];
if (x < 0.0f || x >= 1.0f || y < 0.0f || y >= 1.0f){
ind[i] = -1;
return;
}
ind[i] = (int)(x*(float)imsize) + (int)(y*(float)imsize) * imsize;
}
|
a95908d2f97d6f19787fd11bdb0a09327873a708.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel4_minus_4_a [3][2];
static int dims_update_halo_kernel4_minus_4_a_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel4_minus_4_a_gpu(ACC<double> &vol_flux_y,
ACC<double> &mass_flux_y,
const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = -(vol_flux_y(0,4,0));
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = -(mass_flux_y(0,4,0));
}
__global__ void ops_update_halo_kernel4_minus_4_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_minus_4_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_minus_4_a[0][0] * dims_update_halo_kernel4_minus_4_a[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_minus_4_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_minus_4_a[1][0] * dims_update_halo_kernel4_minus_4_a[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel4_minus_4_a[0][0], dims_update_halo_kernel4_minus_4_a[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel4_minus_4_a[1][0], dims_update_halo_kernel4_minus_4_a[1][1], arg1);
update_halo_kernel4_minus_4_a_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_minus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_minus_4_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,72)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(72,"update_halo_kernel4_minus_4_a");
OPS_kernels[72].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel4_minus_4_a_h[0][0] || ydim0 != dims_update_halo_kernel4_minus_4_a_h[0][1] || xdim1 != dims_update_halo_kernel4_minus_4_a_h[1][0] || ydim1 != dims_update_halo_kernel4_minus_4_a_h[1][1]) {
dims_update_halo_kernel4_minus_4_a_h[0][0] = xdim0;
dims_update_halo_kernel4_minus_4_a_h[0][1] = ydim0;
dims_update_halo_kernel4_minus_4_a_h[1][0] = xdim1;
dims_update_halo_kernel4_minus_4_a_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel4_minus_4_a, dims_update_halo_kernel4_minus_4_a_h, sizeof(dims_update_halo_kernel4_minus_4_a)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[72].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel4_minus_4_a), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[72].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[72].mpi_time += t2-t1;
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_minus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 72;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 72;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_minus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(72,"update_halo_kernel4_minus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
| a95908d2f97d6f19787fd11bdb0a09327873a708.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel4_minus_4_a [3][2];
static int dims_update_halo_kernel4_minus_4_a_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel4_minus_4_a_gpu(ACC<double> &vol_flux_y,
ACC<double> &mass_flux_y,
const int* fields) {
if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y(0,0,0) = -(vol_flux_y(0,4,0));
if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y(0,0,0) = -(mass_flux_y(0,4,0));
}
__global__ void ops_update_halo_kernel4_minus_4_a(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_minus_4_a[0][0] + idx_z * 1*1 * dims_update_halo_kernel4_minus_4_a[0][0] * dims_update_halo_kernel4_minus_4_a[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel4_minus_4_a[1][0] + idx_z * 1*1 * dims_update_halo_kernel4_minus_4_a[1][0] * dims_update_halo_kernel4_minus_4_a[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel4_minus_4_a[0][0], dims_update_halo_kernel4_minus_4_a[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel4_minus_4_a[1][0], dims_update_halo_kernel4_minus_4_a[1][1], arg1);
update_halo_kernel4_minus_4_a_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel4_minus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel4_minus_4_a_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,72)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(72,"update_halo_kernel4_minus_4_a");
OPS_kernels[72].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel4_minus_4_a_h[0][0] || ydim0 != dims_update_halo_kernel4_minus_4_a_h[0][1] || xdim1 != dims_update_halo_kernel4_minus_4_a_h[1][0] || ydim1 != dims_update_halo_kernel4_minus_4_a_h[1][1]) {
dims_update_halo_kernel4_minus_4_a_h[0][0] = xdim0;
dims_update_halo_kernel4_minus_4_a_h[0][1] = ydim0;
dims_update_halo_kernel4_minus_4_a_h[1][0] = xdim1;
dims_update_halo_kernel4_minus_4_a_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel4_minus_4_a, dims_update_halo_kernel4_minus_4_a_h, sizeof(dims_update_halo_kernel4_minus_4_a)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[72].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel4_minus_4_a<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[72].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[72].mpi_time += t2-t1;
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[72].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel4_minus_4_a(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 72;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 72;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel4_minus_4_a_execute;
if (OPS_diags > 1) {
ops_timing_realloc(72,"update_halo_kernel4_minus_4_a");
}
ops_enqueue_kernel(desc);
}
#endif
|
adf984d60ab947a15035b0d2dcf098f128b4b199.hip | // !!! This is a file automatically generated by hipify!!!
// This file defines a CUDA mandelbrot set generator, with configurable
// parameters.
#include <hip/hip_runtime.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "benchmark_gpu_utilities.h"
#include "library_interface.h"
// The default number of iterations used when determining if a point escapes
// the mandelbrot set. Optionally, the number of iterations can be specified
// using the additional_info field.
#define DEFAULT_MAX_ITERATIONS (1000)
// Holds the boundaries and sizes of the fractal, in both pixels and numbers
typedef struct {
// The width and height of the image in pixels.
int w;
int h;
// The boundaries of the fractal.
double min_real;
double min_imag;
double max_real;
double max_imag;
// The distance between pixels in the real and imaginary axes.
double delta_real;
double delta_imag;
} FractalDimensions;
// Holds the state of a single instance of this benchmark.
typedef struct {
// The CUDA stream with which all operations will be associated.
hipStream_t stream;
// This will be 0 if the stream hasn't been created yet. This value exists
// in order to avoid calling hipStreamDestroy when the stream hasn't been
// created.
int stream_created;
// Holds the host and device copies of the mandelbrot set. Each value in the
// buffers will be either 0 (in the set) or 1 (escaped).
uint8_t *host_points;
uint8_t *device_points;
// The maximum number of iterations used when drawing the set.
uint64_t max_iterations;
// The dimensions of the complex plane used when drawing the mandelbrot set.
FractalDimensions dimensions;
// Holds 2 64-bit elements: the start and stop times of the kernel, as
// measured on the device.
uint64_t *device_kernel_times;
// Holds a start and stop time for each block, as measured on the device.
uint64_t *device_block_times;
// Holds the ID of the SM for each block, checked once the kernel executes.
uint32_t *device_block_smids;
// The grid dimensions for the CUDA program, set during Initialize to a value
// based on the thread_count specified by the caller. The caller-specified
// block_count is ignored--instead the number of needed blocks is decided by
// the data_size field, which determines the size of the image.
int block_count;
int thread_count;
// Holds host-side times that are shared with the calling process.
KernelTimes mandelbrot_kernel_times;
} ThreadInformation;
// Implements the Cleanup() function required by the library interface.
static void Cleanup(void *data) {
ThreadInformation *info = (ThreadInformation *) data;
KernelTimes *host_times = &info->mandelbrot_kernel_times;
// Device memory
if (info->device_points) hipFree(info->device_points);
if (info->device_block_times) hipFree(info->device_block_times);
if (info->device_block_smids) hipFree(info->device_block_smids);
if (info->device_kernel_times) hipFree(info->device_kernel_times);
// Host memory
if (info->host_points) hipHostFree(info->host_points);
if (host_times->kernel_times) hipHostFree(host_times->kernel_times);
if (host_times->block_times) hipHostFree(host_times->block_times);
if (host_times->block_smids) hipHostFree(host_times->block_smids);
if (info->stream_created) {
// Call CheckCUDAError here to print a message, even though we won't check
// the return value.
CheckCUDAError(hipStreamDestroy(info->stream));
}
memset(info, 0, sizeof(*info));
free(info);
}
// Allocates GPU and CPU memory. Returns 0 on error, 1 otherwise.
static int AllocateMemory(ThreadInformation *info) {
uint64_t buffer_size = info->dimensions.w * info->dimensions.h;
uint64_t block_times_size = info->block_count * sizeof(uint64_t) * 2;
uint64_t block_smids_size = info->block_count * sizeof(uint32_t);
KernelTimes *mandelbrot_kernel_times = &info->mandelbrot_kernel_times;
// Allocate device memory
if (!CheckCUDAError(hipMalloc(&info->device_points, buffer_size))) {
return 0;
}
if (!CheckCUDAError(hipMalloc(&info->device_kernel_times,
2 * sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(hipMalloc(&info->device_block_times,
block_times_size))) {
return 0;
}
if (!CheckCUDAError(hipMalloc(&info->device_block_smids,
block_smids_size))) {
return 0;
}
// Allocate host memory
if (!CheckCUDAError(hipHostMalloc(&info->host_points, buffer_size))) {
return 0;
}
if (!CheckCUDAError(hipHostMalloc(&mandelbrot_kernel_times->kernel_times,
2 * sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(hipHostMalloc(&mandelbrot_kernel_times->block_times,
block_times_size))) {
return 0;
}
if (!CheckCUDAError(hipHostMalloc(&mandelbrot_kernel_times->block_smids,
block_smids_size))) {
return 0;
}
return 1;
}
// Checks the additional_info argument to see if it's non-empty and non-NULL,
// in which case it can override the default max iterations if it's parsed into
// a valid base-10 integer.
static int SetMaxIterations(const char *arg, ThreadInformation *info) {
int64_t parsed_value;
if (!arg || (strlen(arg) == 0)) {
info->max_iterations = DEFAULT_MAX_ITERATIONS;
return 1;
}
char *end = NULL;
parsed_value = strtoll(arg, &end, 10);
if ((*end != 0) || (parsed_value < 0)) {
printf("Invalid max iterations: %s\n", arg);
return 0;
}
info->max_iterations = (uint64_t) parsed_value;
return 1;
}
// Implements the Initialize() function required by the library interface.
static void* Initialize(InitializationParameters *params) {
ThreadInformation *info = NULL;
FractalDimensions *dimensions = NULL;
info = (ThreadInformation *) malloc(sizeof(*info));
if (!info) {
printf("Failed allocating library state variables.\n");
return NULL;
}
memset(info, 0, sizeof(*info));
if (!CheckCUDAError(hipSetDevice(params->cuda_device))) return NULL;
info->thread_count = params->thread_count;
// Fill in the dimensions and parameters of the complex plane region we'll
// draw.
dimensions = &(info->dimensions);
dimensions->w = (int) sqrt(params->data_size);
dimensions->h = dimensions->w;
dimensions->min_real = -2.0;
dimensions->max_real = 2.0;
dimensions->min_imag = -2.0;
dimensions->max_imag = 2.0;
dimensions->delta_real = 4.0 / dimensions->w;
dimensions->delta_imag = 4.0 / dimensions->h;
// Set the block count based on thread_count and the image dimensions.
info->block_count = (dimensions->w * dimensions->h) / params->thread_count;
// In case the image isn't evenly divisible by the thread_count...
if (((dimensions->w * dimensions->h) % params->thread_count) != 0) {
info->block_count++;
}
if (!SetMaxIterations(params->additional_info, info)) {
Cleanup(info);
return NULL;
}
// Allocate both host and device memory.
if (!AllocateMemory(info)) {
Cleanup(info);
return NULL;
}
if (!CheckCUDAError(CreateCUDAStreamWithPriority(params->stream_priority,
&(info->stream)))) {
Cleanup(info);
return NULL;
}
info->stream_created = 1;
return info;
}
// Nothing needs to be copied in, so this function does nothing.
static int CopyIn(void *data) {
return 1;
}
// A basic mandelbrot set calculator which sets each element in data to 1 if
// the point escapes within the given number of iterations.
static __global__ void BasicMandelbrot(uint8_t *data, uint64_t iterations,
FractalDimensions dimensions, uint64_t *kernel_times,
uint64_t *block_times, uint32_t *block_smids) {
uint64_t start_time = GlobalTimer64();
if (threadIdx.x == 0) {
if (blockIdx.x == 0) kernel_times[0] = start_time;
block_times[blockIdx.x * 2] = start_time;
block_smids[blockIdx.x] = GetSMID();
}
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = index / dimensions.w;
int col = index % dimensions.w;
// This may cause some threads to diverge on the last block only
if (row >= dimensions.h) {
kernel_times[1] = GlobalTimer64();
return;
}
__syncthreads();
double start_real = dimensions.min_real + dimensions.delta_real * col;
double start_imag = dimensions.min_imag + dimensions.delta_imag * row;
double current_real = start_real;
double current_imag = start_imag;
double magnitude_squared = (start_real * start_real) + (start_imag *
start_imag);
uint8_t escaped = 0;
double tmp;
uint64_t i;
for (i = 0; i < iterations; i++) {
if (magnitude_squared < 4) {
tmp = (current_real * current_real) - (current_imag * current_imag) +
start_real;
current_imag = 2 * current_imag * current_real + start_imag;
current_real = tmp;
magnitude_squared = (current_real * current_real) + (current_imag *
current_imag);
} else {
escaped = 1;
}
}
data[row * dimensions.w + col] = escaped;
__syncthreads();
// Record the block end time.
if (threadIdx.x == 0) {
block_times[blockIdx.x * 2 + 1] = GlobalTimer64();
}
kernel_times[1] = GlobalTimer64();
}
static int Execute(void *data) {
ThreadInformation *info = (ThreadInformation *) data;
hipLaunchKernelGGL(( BasicMandelbrot), dim3(info->block_count), dim3(info->thread_count), 0, info->stream,
info->device_points, info->max_iterations, info->dimensions,
info->device_kernel_times, info->device_block_times, info->device_block_smids);
if (!CheckCUDAError(hipStreamSynchronize(info->stream))) return 0;
return 1;
}
static int CopyOut(void *data, TimingInformation *times) {
ThreadInformation *info = (ThreadInformation *) data;
KernelTimes *host_times = &info->mandelbrot_kernel_times;
uint64_t block_times_count = info->block_count * 2;
uint64_t block_smids_count = info->block_count;
uint64_t points_size = info->dimensions.w * info->dimensions.h;
memset(times, 0, sizeof(*times));
host_times->block_count = info->block_count;
host_times->thread_count = info->thread_count;
host_times->kernel_name = "BasicMandelbrot";
if (!CheckCUDAError(hipMemcpyAsync(host_times->kernel_times,
info->device_kernel_times, 2 * sizeof(uint64_t),
hipMemcpyDeviceToHost, info->stream))) {
return 0;
}
if (!CheckCUDAError(hipMemcpyAsync(host_times->block_times,
info->device_block_times, block_times_count * sizeof(uint64_t),
hipMemcpyDeviceToHost, info->stream))) {
return 0;
}
if (!CheckCUDAError(hipMemcpyAsync(host_times->block_smids,
info->device_block_smids, block_smids_count * sizeof(uint32_t),
hipMemcpyDeviceToHost, info->stream))) {
return 0;
}
if (!CheckCUDAError(hipMemcpyAsync(info->host_points, info->device_points,
points_size, hipMemcpyDeviceToHost, info->stream))) {
return 0;
}
times->kernel_count = 1;
times->kernel_info = host_times;
times->resulting_data_size = points_size;
times->resulting_data = info->host_points;
if (!CheckCUDAError(hipStreamSynchronize(info->stream))) return 0;
return 1;
}
static const char* GetName(void) {
return "Mandelbrot Set";
}
int RegisterFunctions(BenchmarkLibraryFunctions *functions) {
functions->initialize = Initialize;
functions->copy_in = CopyIn;
functions->execute = Execute;
functions->copy_out = CopyOut;
functions->cleanup = Cleanup;
functions->get_name = GetName;
return 1;
}
| adf984d60ab947a15035b0d2dcf098f128b4b199.cu | // This file defines a CUDA mandelbrot set generator, with configurable
// parameters.
#include <cuda_runtime.h>
#include <math.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include "benchmark_gpu_utilities.h"
#include "library_interface.h"
// The default number of iterations used when determining if a point escapes
// the mandelbrot set. Optionally, the number of iterations can be specified
// using the additional_info field.
#define DEFAULT_MAX_ITERATIONS (1000)
// Holds the boundaries and sizes of the fractal, in both pixels and numbers
typedef struct {
// The width and height of the image in pixels.
int w;
int h;
// The boundaries of the fractal.
double min_real;
double min_imag;
double max_real;
double max_imag;
// The distance between pixels in the real and imaginary axes.
double delta_real;
double delta_imag;
} FractalDimensions;
// Holds the state of a single instance of this benchmark.
typedef struct {
// The CUDA stream with which all operations will be associated.
cudaStream_t stream;
// This will be 0 if the stream hasn't been created yet. This value exists
// in order to avoid calling cudaStreamDestroy when the stream hasn't been
// created.
int stream_created;
// Holds the host and device copies of the mandelbrot set. Each value in the
// buffers will be either 0 (in the set) or 1 (escaped).
uint8_t *host_points;
uint8_t *device_points;
// The maximum number of iterations used when drawing the set.
uint64_t max_iterations;
// The dimensions of the complex plane used when drawing the mandelbrot set.
FractalDimensions dimensions;
// Holds 2 64-bit elements: the start and stop times of the kernel, as
// measured on the device.
uint64_t *device_kernel_times;
// Holds a start and stop time for each block, as measured on the device.
uint64_t *device_block_times;
// Holds the ID of the SM for each block, checked once the kernel executes.
uint32_t *device_block_smids;
// The grid dimensions for the CUDA program, set during Initialize to a value
// based on the thread_count specified by the caller. The caller-specified
// block_count is ignored--instead the number of needed blocks is decided by
// the data_size field, which determines the size of the image.
int block_count;
int thread_count;
// Holds host-side times that are shared with the calling process.
KernelTimes mandelbrot_kernel_times;
} ThreadInformation;
// Implements the Cleanup() function required by the library interface.
static void Cleanup(void *data) {
ThreadInformation *info = (ThreadInformation *) data;
KernelTimes *host_times = &info->mandelbrot_kernel_times;
// Device memory
if (info->device_points) cudaFree(info->device_points);
if (info->device_block_times) cudaFree(info->device_block_times);
if (info->device_block_smids) cudaFree(info->device_block_smids);
if (info->device_kernel_times) cudaFree(info->device_kernel_times);
// Host memory
if (info->host_points) cudaFreeHost(info->host_points);
if (host_times->kernel_times) cudaFreeHost(host_times->kernel_times);
if (host_times->block_times) cudaFreeHost(host_times->block_times);
if (host_times->block_smids) cudaFreeHost(host_times->block_smids);
if (info->stream_created) {
// Call CheckCUDAError here to print a message, even though we won't check
// the return value.
CheckCUDAError(cudaStreamDestroy(info->stream));
}
memset(info, 0, sizeof(*info));
free(info);
}
// Allocates GPU and CPU memory. Returns 0 on error, 1 otherwise.
static int AllocateMemory(ThreadInformation *info) {
uint64_t buffer_size = info->dimensions.w * info->dimensions.h;
uint64_t block_times_size = info->block_count * sizeof(uint64_t) * 2;
uint64_t block_smids_size = info->block_count * sizeof(uint32_t);
KernelTimes *mandelbrot_kernel_times = &info->mandelbrot_kernel_times;
// Allocate device memory
if (!CheckCUDAError(cudaMalloc(&info->device_points, buffer_size))) {
return 0;
}
if (!CheckCUDAError(cudaMalloc(&info->device_kernel_times,
2 * sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(cudaMalloc(&info->device_block_times,
block_times_size))) {
return 0;
}
if (!CheckCUDAError(cudaMalloc(&info->device_block_smids,
block_smids_size))) {
return 0;
}
// Allocate host memory
if (!CheckCUDAError(cudaMallocHost(&info->host_points, buffer_size))) {
return 0;
}
if (!CheckCUDAError(cudaMallocHost(&mandelbrot_kernel_times->kernel_times,
2 * sizeof(uint64_t)))) {
return 0;
}
if (!CheckCUDAError(cudaMallocHost(&mandelbrot_kernel_times->block_times,
block_times_size))) {
return 0;
}
if (!CheckCUDAError(cudaMallocHost(&mandelbrot_kernel_times->block_smids,
block_smids_size))) {
return 0;
}
return 1;
}
// Checks the additional_info argument to see if it's non-empty and non-NULL,
// in which case it can override the default max iterations if it's parsed into
// a valid base-10 integer.
static int SetMaxIterations(const char *arg, ThreadInformation *info) {
int64_t parsed_value;
if (!arg || (strlen(arg) == 0)) {
info->max_iterations = DEFAULT_MAX_ITERATIONS;
return 1;
}
char *end = NULL;
parsed_value = strtoll(arg, &end, 10);
if ((*end != 0) || (parsed_value < 0)) {
printf("Invalid max iterations: %s\n", arg);
return 0;
}
info->max_iterations = (uint64_t) parsed_value;
return 1;
}
// Implements the Initialize() function required by the library interface.
static void* Initialize(InitializationParameters *params) {
ThreadInformation *info = NULL;
FractalDimensions *dimensions = NULL;
info = (ThreadInformation *) malloc(sizeof(*info));
if (!info) {
printf("Failed allocating library state variables.\n");
return NULL;
}
memset(info, 0, sizeof(*info));
if (!CheckCUDAError(cudaSetDevice(params->cuda_device))) return NULL;
info->thread_count = params->thread_count;
// Fill in the dimensions and parameters of the complex plane region we'll
// draw.
dimensions = &(info->dimensions);
dimensions->w = (int) sqrt(params->data_size);
dimensions->h = dimensions->w;
dimensions->min_real = -2.0;
dimensions->max_real = 2.0;
dimensions->min_imag = -2.0;
dimensions->max_imag = 2.0;
dimensions->delta_real = 4.0 / dimensions->w;
dimensions->delta_imag = 4.0 / dimensions->h;
// Set the block count based on thread_count and the image dimensions.
info->block_count = (dimensions->w * dimensions->h) / params->thread_count;
// In case the image isn't evenly divisible by the thread_count...
if (((dimensions->w * dimensions->h) % params->thread_count) != 0) {
info->block_count++;
}
if (!SetMaxIterations(params->additional_info, info)) {
Cleanup(info);
return NULL;
}
// Allocate both host and device memory.
if (!AllocateMemory(info)) {
Cleanup(info);
return NULL;
}
if (!CheckCUDAError(CreateCUDAStreamWithPriority(params->stream_priority,
&(info->stream)))) {
Cleanup(info);
return NULL;
}
info->stream_created = 1;
return info;
}
// Nothing needs to be copied in, so this function does nothing.
static int CopyIn(void *data) {
return 1;
}
// A basic mandelbrot set calculator which sets each element in data to 1 if
// the point escapes within the given number of iterations.
static __global__ void BasicMandelbrot(uint8_t *data, uint64_t iterations,
FractalDimensions dimensions, uint64_t *kernel_times,
uint64_t *block_times, uint32_t *block_smids) {
uint64_t start_time = GlobalTimer64();
if (threadIdx.x == 0) {
if (blockIdx.x == 0) kernel_times[0] = start_time;
block_times[blockIdx.x * 2] = start_time;
block_smids[blockIdx.x] = GetSMID();
}
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = index / dimensions.w;
int col = index % dimensions.w;
// This may cause some threads to diverge on the last block only
if (row >= dimensions.h) {
kernel_times[1] = GlobalTimer64();
return;
}
__syncthreads();
double start_real = dimensions.min_real + dimensions.delta_real * col;
double start_imag = dimensions.min_imag + dimensions.delta_imag * row;
double current_real = start_real;
double current_imag = start_imag;
double magnitude_squared = (start_real * start_real) + (start_imag *
start_imag);
uint8_t escaped = 0;
double tmp;
uint64_t i;
for (i = 0; i < iterations; i++) {
if (magnitude_squared < 4) {
tmp = (current_real * current_real) - (current_imag * current_imag) +
start_real;
current_imag = 2 * current_imag * current_real + start_imag;
current_real = tmp;
magnitude_squared = (current_real * current_real) + (current_imag *
current_imag);
} else {
escaped = 1;
}
}
data[row * dimensions.w + col] = escaped;
__syncthreads();
// Record the block end time.
if (threadIdx.x == 0) {
block_times[blockIdx.x * 2 + 1] = GlobalTimer64();
}
kernel_times[1] = GlobalTimer64();
}
static int Execute(void *data) {
ThreadInformation *info = (ThreadInformation *) data;
BasicMandelbrot<<<info->block_count, info->thread_count, 0, info->stream>>>(
info->device_points, info->max_iterations, info->dimensions,
info->device_kernel_times, info->device_block_times, info->device_block_smids);
if (!CheckCUDAError(cudaStreamSynchronize(info->stream))) return 0;
return 1;
}
static int CopyOut(void *data, TimingInformation *times) {
ThreadInformation *info = (ThreadInformation *) data;
KernelTimes *host_times = &info->mandelbrot_kernel_times;
uint64_t block_times_count = info->block_count * 2;
uint64_t block_smids_count = info->block_count;
uint64_t points_size = info->dimensions.w * info->dimensions.h;
memset(times, 0, sizeof(*times));
host_times->block_count = info->block_count;
host_times->thread_count = info->thread_count;
host_times->kernel_name = "BasicMandelbrot";
if (!CheckCUDAError(cudaMemcpyAsync(host_times->kernel_times,
info->device_kernel_times, 2 * sizeof(uint64_t),
cudaMemcpyDeviceToHost, info->stream))) {
return 0;
}
if (!CheckCUDAError(cudaMemcpyAsync(host_times->block_times,
info->device_block_times, block_times_count * sizeof(uint64_t),
cudaMemcpyDeviceToHost, info->stream))) {
return 0;
}
if (!CheckCUDAError(cudaMemcpyAsync(host_times->block_smids,
info->device_block_smids, block_smids_count * sizeof(uint32_t),
cudaMemcpyDeviceToHost, info->stream))) {
return 0;
}
if (!CheckCUDAError(cudaMemcpyAsync(info->host_points, info->device_points,
points_size, cudaMemcpyDeviceToHost, info->stream))) {
return 0;
}
times->kernel_count = 1;
times->kernel_info = host_times;
times->resulting_data_size = points_size;
times->resulting_data = info->host_points;
if (!CheckCUDAError(cudaStreamSynchronize(info->stream))) return 0;
return 1;
}
static const char* GetName(void) {
return "Mandelbrot Set";
}
int RegisterFunctions(BenchmarkLibraryFunctions *functions) {
functions->initialize = Initialize;
functions->copy_in = CopyIn;
functions->execute = Execute;
functions->copy_out = CopyOut;
functions->cleanup = Cleanup;
functions->get_name = GetName;
return 1;
}
|
4a7d92326c850ad8507a1537114f8669e838f65f.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_math.h> //defines operator overriding
#include "cusolverDn.h"
#define EPS 1.0e-8 // A constant small number (used for avoiding zero-division)
__global__ void update_W_kernel(float* W, const int M, const float* e){
const int tx = threadIdx.x, bx = blockIdx.x;
const int diagIdx = bx * blockDim.x + tx;
float ftemp;
if(e[diagIdx] < 0){
ftemp = sqrt(-e[diagIdx]);
}else if(e[diagIdx] > 0){
ftemp = sqrt(e[diagIdx]);
}else{
ftemp = EPS;
}
W[diagIdx * (M + 1)] = 1.0f / fmax(ftemp, EPS);
}
void update_W(float* W, const int M, const float* e){
dim3 blockPerGrid(32, 1);
dim3 threadsPerBlock(M / 32, 1);
hipLaunchKernelGGL(( update_W_kernel) , dim3(blockPerGrid), dim3(threadsPerBlock) , 0, 0, W, M, e);
}
| 4a7d92326c850ad8507a1537114f8669e838f65f.cu | #include <cuda_runtime.h>
#include <helper_cuda.h>
#include <helper_functions.h>
#include <helper_math.h> //defines operator overriding
#include "cusolverDn.h"
#define EPS 1.0e-8 // A constant small number (used for avoiding zero-division)
__global__ void update_W_kernel(float* W, const int M, const float* e){
const int tx = threadIdx.x, bx = blockIdx.x;
const int diagIdx = bx * blockDim.x + tx;
float ftemp;
if(e[diagIdx] < 0){
ftemp = sqrt(-e[diagIdx]);
}else if(e[diagIdx] > 0){
ftemp = sqrt(e[diagIdx]);
}else{
ftemp = EPS;
}
W[diagIdx * (M + 1)] = 1.0f / fmax(ftemp, EPS);
}
void update_W(float* W, const int M, const float* e){
dim3 blockPerGrid(32, 1);
dim3 threadsPerBlock(M / 32, 1);
update_W_kernel <<< blockPerGrid, threadsPerBlock >>>(W, M, e);
}
|
046f63cfb510183a03d186f2bbf5058a072b3d90.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdint>
#include <thrust/reduce.h>
#include <hip/hip_runtime.h>
using namespace std;
__device__ int binarySearch(int* arr, int l, int r, int x)
{
while (l <= r)
{
int m = (l+r)/2;
if (arr[m] == x)
return m;
if (arr[m] < x)
l = m + 1;
else
r = m - 1;
}
return -1;
}
__global__ void Tricount(int* beginposition , int* adjlist ,int* d_counts,int* adjver ,int vertices , int entries)
{
int adjindex = blockIdx.x * blockDim.x + threadIdx.x;
int vertex =0 ;
// INDENTIFY WHICH VERTEX THE THREAD IS
if( adjindex < entries )
{
vertex = adjver[adjindex];
int initial_find = 0;
//FIND ITSELF IN ADJLIST
for(int a = vertex + 1 ; a < vertices ; a++)
{
int sizeofarray1 = beginposition[a+1]-beginposition[a];
if( a+1 == vertices)
sizeofarray1 = entries-beginposition[a];
initial_find = binarySearch(adjlist , beginposition[a] , beginposition[a] + sizeofarray1 -1 , adjlist[adjindex]);
if(initial_find != -1)// IF FOUND, FIND VERTEX IN VERTEX2 ADJ
{
int vertex2 = adjver[initial_find];
int sizeofarray = beginposition[vertex2+1]-beginposition[vertex2];
if(vertex2+1 == vertices)
sizeofarray = entries-beginposition[vertex2];
int last_connection = binarySearch(adjlist,beginposition[vertex2],beginposition[vertex2] + sizeofarray -1,vertex);
if(last_connection != -1)//FOUND TRIANGLE
{
//atomicAdd(&d_counts[0],1);
//printf(" %d ",d_counts[0]);
d_counts[adjindex] = d_counts[adjindex] + 1;
}
}
}
}
}
int mmioread(int* adjlist , int* beginposition) {
string line;
ifstream myfile ("email-EuAll_adj.tsv");
long linecount =0;
// 0 - adjlist 1 - vertex 2 - N/A
beginposition[0] = 0;
long adjlistpos = 0;
long beginlistpos = 1;
long prevnum = 0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
istringstream buf(line);
long type =0;
for(string word; buf >> word; )
{
if( type == 0 ) // add adjlist
{
adjlist[adjlistpos] = stoi(word);
adjlistpos++;
type++;
}
else if( type == 1 ) // add begin pos
{
if(prevnum != stoi(word) )
{
if (prevnum+1 != stoi(word) )
{
//printf("now is %d but before was %d\n",stoi(word),prevnum );
for(int a = 0 ; a <stoi(word)-prevnum-1 ; a++)
{
beginposition[beginlistpos] = adjlistpos-1;
//printf("IN \n" );
//printf("putting %d at beginpos %d\n",int(adjlistpos-1),int(beginlistpos));
beginlistpos++;
}
}
beginposition[beginlistpos] = adjlistpos-1;
beginlistpos++;
prevnum = stoi(word);
}
type++;
}
else if (type == 2)
type++;
//forcount++;
}
linecount++;
}
myfile.close();
}
else cout << "Unable to open file";
return 1;
};
int main(){
int vertices = 265215;
int entries = 728962;
int* h_beginposition= new int[vertices];
int* h_adjlist= new int[entries];
int* h_adjvertex= new int[entries];
int* h_count = new int [entries];
//h_count=(int *) malloc(1*sizeof(int));
int* d_begin;
int* d_adj;
int* d_counts;
int* d_adjvertex;
cout <<"Converting MMIO to array form..." <<endl;
clock_t startTime = clock();
mmioread(h_adjlist,h_beginposition);
int pos =0;
for(int x = 1 ; x < vertices ; x++)
{
int size = h_beginposition[x+1] - h_beginposition[x];
//printf("%d \n ",size);
if(x+1 == vertices)
size = entries-h_beginposition[x];
for(int y = 0 ; y < size ; y++)
{
h_adjvertex[pos] = x;
pos++;
}
}
//printf("pos is %d is %d \n",h_adjlist[718264] ,h_adjvertex[718264]);
//printf("last is %d \n", h_beginposition[4]);
/*
printf("adjlist consist of");
for(int a = 0 ; a < entries ; a++)
printf(" %d ", h_adjlist[a]);
printf("\n");
printf("bp consist of");
for(int a = 0 ; a < vertices ; a++)
printf(" %d ", h_beginposition[a]);
printf("\n");*/
double secondsPassed = (clock() - startTime) / CLOCKS_PER_SEC;
cout <<"Transform complete : "<< secondsPassed << " seconds have passed" << endl;
cout <<"Allocating space on GPU and transfer data..."<< endl;
hipMalloc(&d_begin, vertices*sizeof(int));
hipMalloc(&d_adj, entries*sizeof(int));
hipMalloc(&d_adjvertex, entries*sizeof(int));
hipMalloc((void**)&d_counts, entries*sizeof(int));
//hipMemset((void*)d_counts,0,10*sizeof(int));
hipMemcpy(d_begin, h_beginposition, vertices*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_adj, h_adjlist, entries*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_adjvertex, h_adjvertex, entries*sizeof(int), hipMemcpyHostToDevice);
int blocks = (entries/1024)+1;
cout << "Now counting Triangles" <<endl;
hipLaunchKernelGGL((
Tricount), dim3(blocks), dim3(1024), 0, 0, d_begin,d_adj,d_counts,d_adjvertex,vertices,entries);
cout << "Done..." <<endl;
hipMemcpy(h_count,d_counts,entries*sizeof(int),hipMemcpyDeviceToHost);
cout << "Done with MEMCOPY...Now counting" <<endl;
int result = thrust::reduce(h_count, h_count+ entries);
printf("answer : %d \n",result/3);
hipFree(d_begin);
hipFree(d_adj);
hipFree(d_counts);
//hipDeviceReset();
//3686467
}
| 046f63cfb510183a03d186f2bbf5058a072b3d90.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdint>
#include <thrust/reduce.h>
#include <cuda.h>
using namespace std;
__device__ int binarySearch(int* arr, int l, int r, int x)
{
while (l <= r)
{
int m = (l+r)/2;
if (arr[m] == x)
return m;
if (arr[m] < x)
l = m + 1;
else
r = m - 1;
}
return -1;
}
__global__ void Tricount(int* beginposition , int* adjlist ,int* d_counts,int* adjver ,int vertices , int entries)
{
int adjindex = blockIdx.x * blockDim.x + threadIdx.x;
int vertex =0 ;
// INDENTIFY WHICH VERTEX THE THREAD IS
if( adjindex < entries )
{
vertex = adjver[adjindex];
int initial_find = 0;
//FIND ITSELF IN ADJLIST
for(int a = vertex + 1 ; a < vertices ; a++)
{
int sizeofarray1 = beginposition[a+1]-beginposition[a];
if( a+1 == vertices)
sizeofarray1 = entries-beginposition[a];
initial_find = binarySearch(adjlist , beginposition[a] , beginposition[a] + sizeofarray1 -1 , adjlist[adjindex]);
if(initial_find != -1)// IF FOUND, FIND VERTEX IN VERTEX2 ADJ
{
int vertex2 = adjver[initial_find];
int sizeofarray = beginposition[vertex2+1]-beginposition[vertex2];
if(vertex2+1 == vertices)
sizeofarray = entries-beginposition[vertex2];
int last_connection = binarySearch(adjlist,beginposition[vertex2],beginposition[vertex2] + sizeofarray -1,vertex);
if(last_connection != -1)//FOUND TRIANGLE
{
//atomicAdd(&d_counts[0],1);
//printf(" %d ",d_counts[0]);
d_counts[adjindex] = d_counts[adjindex] + 1;
}
}
}
}
}
int mmioread(int* adjlist , int* beginposition) {
string line;
ifstream myfile ("email-EuAll_adj.tsv");
long linecount =0;
// 0 - adjlist 1 - vertex 2 - N/A
beginposition[0] = 0;
long adjlistpos = 0;
long beginlistpos = 1;
long prevnum = 0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
istringstream buf(line);
long type =0;
for(string word; buf >> word; )
{
if( type == 0 ) // add adjlist
{
adjlist[adjlistpos] = stoi(word);
adjlistpos++;
type++;
}
else if( type == 1 ) // add begin pos
{
if(prevnum != stoi(word) )
{
if (prevnum+1 != stoi(word) )
{
//printf("now is %d but before was %d\n",stoi(word),prevnum );
for(int a = 0 ; a <stoi(word)-prevnum-1 ; a++)
{
beginposition[beginlistpos] = adjlistpos-1;
//printf("IN \n" );
//printf("putting %d at beginpos %d\n",int(adjlistpos-1),int(beginlistpos));
beginlistpos++;
}
}
beginposition[beginlistpos] = adjlistpos-1;
beginlistpos++;
prevnum = stoi(word);
}
type++;
}
else if (type == 2)
type++;
//forcount++;
}
linecount++;
}
myfile.close();
}
else cout << "Unable to open file";
return 1;
};
int main(){
int vertices = 265215;
int entries = 728962;
int* h_beginposition= new int[vertices];
int* h_adjlist= new int[entries];
int* h_adjvertex= new int[entries];
int* h_count = new int [entries];
//h_count=(int *) malloc(1*sizeof(int));
int* d_begin;
int* d_adj;
int* d_counts;
int* d_adjvertex;
cout <<"Converting MMIO to array form..." <<endl;
clock_t startTime = clock();
mmioread(h_adjlist,h_beginposition);
int pos =0;
for(int x = 1 ; x < vertices ; x++)
{
int size = h_beginposition[x+1] - h_beginposition[x];
//printf("%d \n ",size);
if(x+1 == vertices)
size = entries-h_beginposition[x];
for(int y = 0 ; y < size ; y++)
{
h_adjvertex[pos] = x;
pos++;
}
}
//printf("pos is %d is %d \n",h_adjlist[718264] ,h_adjvertex[718264]);
//printf("last is %d \n", h_beginposition[4]);
/*
printf("adjlist consist of");
for(int a = 0 ; a < entries ; a++)
printf(" %d ", h_adjlist[a]);
printf("\n");
printf("bp consist of");
for(int a = 0 ; a < vertices ; a++)
printf(" %d ", h_beginposition[a]);
printf("\n");*/
double secondsPassed = (clock() - startTime) / CLOCKS_PER_SEC;
cout <<"Transform complete : "<< secondsPassed << " seconds have passed" << endl;
cout <<"Allocating space on GPU and transfer data..."<< endl;
cudaMalloc(&d_begin, vertices*sizeof(int));
cudaMalloc(&d_adj, entries*sizeof(int));
cudaMalloc(&d_adjvertex, entries*sizeof(int));
cudaMalloc((void**)&d_counts, entries*sizeof(int));
//cudaMemset((void*)d_counts,0,10*sizeof(int));
cudaMemcpy(d_begin, h_beginposition, vertices*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, h_adjlist, entries*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adjvertex, h_adjvertex, entries*sizeof(int), cudaMemcpyHostToDevice);
int blocks = (entries/1024)+1;
cout << "Now counting Triangles" <<endl;
Tricount<<<blocks, 1024>>>(d_begin,d_adj,d_counts,d_adjvertex,vertices,entries);
cout << "Done..." <<endl;
cudaMemcpy(h_count,d_counts,entries*sizeof(int),cudaMemcpyDeviceToHost);
cout << "Done with MEMCOPY...Now counting" <<endl;
int result = thrust::reduce(h_count, h_count+ entries);
printf("answer : %d \n",result/3);
cudaFree(d_begin);
cudaFree(d_adj);
cudaFree(d_counts);
//cudaDeviceReset();
//3686467
}
|
b1c9060b6ec3aea1f6d0b9dc13d3954e15de5d8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <cutil.h>
#include <cmeansMPI.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <cmeansMPI_kernel.cu>
#include "MDL.h"
void printCudaError() {
hipError_t error = hipGetLastError();
if(error != hipSuccess) {
printf("%s\n",hipGetErrorString(error));
}
}
typedef struct {
hipEvent_t start;
hipEvent_t stop;
float* et;
} cudaTimer_t;
void createTimer(cudaTimer_t* timer) {
#pragma omp critical (create_timer)
{
hipEventCreate(&(timer->start));
hipEventCreate(&(timer->stop));
timer->et = (float*) malloc(sizeof(float));
*(timer->et) = 0.0f;
}
}
void deleteTimer(cudaTimer_t timer) {
#pragma omp critical (delete_timer)
{
hipEventDestroy(timer.start);
hipEventDestroy(timer.stop);
free(timer.et);
}
}
void startTimer(cudaTimer_t timer) {
hipEventRecord(timer.start,0);
}
void stopTimer(cudaTimer_t timer) {
hipEventRecord(timer.stop,0);
hipEventSynchronize(timer.stop);
float tmp;
hipEventElapsedTime(&tmp,timer.start,timer.stop);
*(timer.et) += tmp;
}
float getTimerValue(cudaTimer_t timer) {
return *(timer.et);
}
/************************************************************************/
/* C-means Main */
/************************************************************************/
int main(int argc, char* argv[])
{
int rank, num_nodes, len, provided;
char name[MPI_MAX_PROCESSOR_NAME];
MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE,&provided);
MPI_Comm_size(MPI_COMM_WORLD,&num_nodes);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(name, &len);
printf("Hello world from node %d of %d on %s\n",rank,num_nodes,name);
unsigned int timer_io; // Timer for I/O, such as reading FCS file and outputting result files
unsigned int timer_total; // Total time
unsigned int timer_main_cpu; // Total time
cutCreateTimer(&timer_io);
cutCreateTimer(&timer_total);
cutCreateTimer(&timer_main_cpu);
// determine the number of CUDA capable GPUs
int num_gpus = 0; // number of CUDA GPUs
hipGetDeviceCount(&num_gpus);
if(num_gpus < 1)
{
printf("no CUDA capable devices were detected\n");
return 1;
}
// display CPU and GPU configuration
printf("number of host CPUs:\t%d\n", omp_get_num_procs());
printf("number of CUDA devices:\t%d\n", num_gpus);
for(int i = 0; i < num_gpus; i++)
{
hipDeviceProp_t dprop;
hipGetDeviceProperties(&dprop, i);
printf(" %d: %s\n", i, dprop.name);
}
printf("---------------------------\n");
int total_num_gpus = num_gpus * num_nodes;
cutStartTimer(timer_total);
// [program name] [data file]
if(argc != 2){
printf("Usage Error: must supply data file. e.g. programe_name @opt(flags) file.in\n");
return 1;
}
cutStartTimer(timer_io);
float* myEvents;
int elements_per_node, elements_being_sent;
elements_per_node = NUM_EVENTS / total_num_gpus * num_gpus * NUM_DIMENSIONS;
// Root reads input from file and distributes to each node
if(rank == 0) {
myEvents = ParseSampleInput(argv[1]);
MPI_Request* requests = (MPI_Request*) malloc(sizeof(MPI_Request)*num_nodes);
MPI_Status s;
// Send everything asynchronously
for(int i=1; i < num_nodes; i++) {
elements_being_sent = elements_per_node;
if(i == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Isend(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&requests[i]);
//MPI_Send(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD);
}
// Wait for the Isends to complete
for(int i=1; i < num_nodes; i++) {
MPI_Wait(&requests[i],&s);
}
free(requests);
elements_being_sent = elements_per_node; // so that its set properly for the root
} else {
myEvents = (float*) malloc(sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS);
elements_being_sent = elements_per_node;
if(rank == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Status s;
MPI_Recv(&(myEvents[elements_per_node*rank]),elements_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD,&s);
}
MPI_Barrier(MPI_COMM_WORLD);
cutStopTimer(timer_io);
cutStartTimer(timer_main_cpu);
//srand((unsigned)(time(0)));
srand(42);
// Allocate arrays for the cluster centers
float* myClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
float* newClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
// Select random cluster centers
double t1,t2;
generateInitialClusters(myClusters, myEvents);
// Create an array of arrays for temporary cluster centers from each GPU
float** tempClusters = (float**) malloc(sizeof(float*)*num_gpus);
float** tempDenominators = (float**) malloc(sizeof(float*)*num_gpus);
for(int i=0; i < num_gpus; i++) {
tempClusters[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
tempDenominators[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS);
memcpy(tempClusters[i],myClusters,sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
}
// Create an array of arrays for temporary Q matrix pieces from each GPU
float** q_matrices = (float**) malloc(sizeof(float*)*num_gpus);
// Create an array for the final Q matrix
float* q_matrix = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_CLUSTERS);
float diff; // used to track difference in cluster centers between iterations
// Transpose the events matrix
float* transposedEvents = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
for(int i=0; i<NUM_EVENTS; i++) {
for(int j=0; j<NUM_DIMENSIONS; j++) {
transposedEvents[j*NUM_EVENTS+i] = myEvents[i*NUM_DIMENSIONS+j];
}
}
float* memberships = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_EVENTS);
int* finalClusterConfig;
cutStopTimer(timer_main_cpu);
////////////////////////////////////////////////////////////////
// run as many CPU threads as there are CUDA devices
//num_gpus = 1;
//omp_set_num_threads(num_gpus); // create as many CPU threads as there are CUDA devices
#pragma omp parallel shared(myClusters,diff,tempClusters,tempDenominators,memberships,finalClusterConfig)
{
cudaTimer_t timer_memcpy; // Timer for GPU <---> CPU memory copying
cudaTimer_t timer_cpu; // Timer for processing on CPU
cudaTimer_t timer_gpu; // Timer for kernels on the GPU
cudaTimer_t timer_mpi; // Timer for MPI
unsigned int tid = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
int gpu_num = rank*num_gpus+tid;
printf("hello from thread %d of %d\n",tid,num_cpu_threads);
// set and check the CUDA device for this CPU thread
int gpu_id = -1;
hipSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices
hipGetDevice(&gpu_id);
#pragma omp barrier
createTimer(&timer_memcpy);
createTimer(&timer_cpu);
createTimer(&timer_gpu);
createTimer(&timer_mpi);
printf("CPU thread %d (of %d) uses CUDA device %d\n", tid, num_cpu_threads, gpu_id);
// Compute starting/finishing indexes for the events for each gpu
int events_per_gpu = NUM_EVENTS / total_num_gpus;
int my_num_events = events_per_gpu;
if(gpu_num == (total_num_gpus-1)) {
my_num_events += NUM_EVENTS % total_num_gpus;
}
startTimer(timer_memcpy);
float* d_distanceMatrix;
CUDA_SAFE_CALL(hipMalloc((void**)&d_distanceMatrix, sizeof(float)*my_num_events*NUM_CLUSTERS));
#if !LINEAR
float* d_memberships;
CUDA_SAFE_CALL(hipMalloc((void**)&d_memberships, sizeof(float)*my_num_events*NUM_CLUSTERS));
#endif
float* d_E;
CUDA_SAFE_CALL(hipMalloc((void**)&d_E, sizeof(float)*my_num_events*NUM_DIMENSIONS));
float* d_C;
CUDA_SAFE_CALL(hipMalloc((void**)&d_C, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_nC;
CUDA_SAFE_CALL(hipMalloc((void**)&d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_denoms;
CUDA_SAFE_CALL(hipMalloc((void**)&d_denoms, sizeof(float)*NUM_CLUSTERS));
int size = sizeof(float)*NUM_DIMENSIONS*my_num_events;
// Copying the transposed data is trickier since it's not all contigious for the relavant events
float* temp_fcs_data = (float*) malloc(size);
for(int d=0; d < NUM_DIMENSIONS; d++) {
memcpy(&temp_fcs_data[d*my_num_events],&transposedEvents[d*NUM_EVENTS + gpu_num*events_per_gpu],sizeof(float)*my_num_events);
}
CUDA_SAFE_CALL(hipMemcpy( d_E, temp_fcs_data, size,hipMemcpyHostToDevice) );
hipDeviceSynchronize();
free(temp_fcs_data);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
stopTimer(timer_memcpy);
printf("Starting C-means\n");
int iterations = 0;
int num_blocks_distance = my_num_events / NUM_THREADS_DISTANCE;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_distance++;
}
int num_blocks_membership = my_num_events / NUM_THREADS_MEMBERSHIP;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_membership++;
}
int num_blocks_update = NUM_CLUSTERS / NUM_CLUSTERS_PER_BLOCK;
if(NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_blocks_update++;
}
do{
cudaTimer_t timer;
createTimer(&timer);
startTimer(timer);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
// Copy the cluster centers to the GPU
startTimer(timer_memcpy);
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
stopTimer(timer_memcpy);
startTimer(timer_gpu);
DEBUG("Launching ComputeDistanceMatrix kernel\n");
hipLaunchKernelGGL(( ComputeDistanceMatrix), dim3(dim3(num_blocks_distance,NUM_CLUSTERS)), dim3(NUM_THREADS_DISTANCE) , 0, 0, d_C, d_E, d_distanceMatrix, my_num_events);
#if LINEAR
// O(M) membership kernel
DEBUG("Launching ComputeMembershipMatrixLinear kernel\n");
hipLaunchKernelGGL(( ComputeMembershipMatrixLinear), dim3(num_blocks_membership), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
hipLaunchKernelGGL(( UpdateClusterCentersGPU3), dim3(dim3(NUM_DIMENSIONS,num_blocks_update)), dim3(NUM_THREADS_UPDATE) , 0, 0, d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_distanceMatrix, d_denoms, my_num_events);
#else
// O(M^2) membership kernel
DEBUG("Launching ComputeMembershipMatrix kernel\n");
hipLaunchKernelGGL(( ComputeMembershipMatrix), dim3(dim3(num_blocks_membership,NUM_CLUSTERS)), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, d_memberships, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, my_num_events);
hipLaunchKernelGGL(( UpdateClusterCentersGPU3), dim3(dim3(NUM_DIMENSIONS,num_blocks_update)), dim3(NUM_THREADS_UPDATE) , 0, 0, d_C, d_E, d_nC, d_memberships, my_num_events);
hipLaunchKernelGGL(( ComputeClusterSizes), dim3(NUM_CLUSTERS), dim3(512) , 0, 0, d_memberships, d_denoms, my_num_events );
#endif
hipDeviceSynchronize();
printCudaError();
stopTimer(timer_gpu);
// Copy partial centers and denominators to host
startTimer(timer_memcpy);
hipMemcpy(tempClusters[tid], d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS, hipMemcpyDeviceToHost);
hipMemcpy(tempDenominators[tid], d_denoms, sizeof(float)*NUM_CLUSTERS, hipMemcpyDeviceToHost);
printCudaError();
stopTimer(timer_memcpy);
stopTimer(timer);
float thisTime = getTimerValue(timer);
DEBUG("Processing time for GPU %d: %f (ms) \n", tid, thisTime);
deleteTimer(timer);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// Sum up the partial cluster centers (numerators)
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] += tempClusters[i][c*NUM_DIMENSIONS+d];
}
}
}
// Sum up the denominator for each cluster
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
tempDenominators[0][c] += tempDenominators[i][c];
}
}
stopTimer(timer_cpu);
DEBUG("Reducing cluster values\n");
startTimer(timer_mpi);
if(rank == 0) {
MPI_Reduce(MPI_IN_PLACE,tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempClusters[0],tempClusters[1],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempClusters[0],tempClusters[1],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
MPI_Reduce(MPI_IN_PLACE,tempDenominators[0],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempDenominators[0],tempDenominators[1],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempDenominators[0],tempDenominators[1],sizeof(float)*NUM_CLUSTERS);
} else {
MPI_Reduce(tempClusters[0],0,NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce(tempDenominators[0],0,NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD); // not sure if neccesary...
stopTimer(timer_mpi);
startTimer(timer_cpu);
// Divide to get the final clusters
if(rank == 0) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] /= tempDenominators[0][c];
}
}
}
stopTimer(timer_cpu);
startTimer(timer_mpi);
DEBUG("Broadcasting Cluster Values\n");
MPI_Bcast(tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,0,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
startTimer(timer_cpu);
diff = 0.0;
for(int i=0; i < NUM_CLUSTERS; i++){
DEBUG("GPU %d, Cluster %d: ",tid,i);
for(int k = 0; k < NUM_DIMENSIONS; k++){
DEBUG("%f ",tempClusters[tid][i*NUM_DIMENSIONS + k]);
diff += fabs(myClusters[i*NUM_DIMENSIONS + k] - tempClusters[tid][i*NUM_DIMENSIONS + k]);
}
DEBUG("\n");
}
memcpy(myClusters,tempClusters[tid],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
DEBUG("Diff = %f\n", diff);
DEBUG("Done with iteration #%d\n", iterations);
stopTimer(timer_cpu);
}
#pragma omp barrier
iterations++;
DEBUG("\n");
} while(iterations < MIN_ITERS || (abs(diff) > THRESHOLD && iterations < MAX_ITERS));
#pragma omp master
{
if(rank == 0) {
printf("Iterations: %d\n",iterations);
}
}
#if ENABLE_OUTPUT
// Compute final membership vaues
startTimer(timer_gpu);
#if LINEAR
// O(M)
hipLaunchKernelGGL(( ComputeDistanceMatrix), dim3(dim3(num_blocks_distance,NUM_CLUSTERS)), dim3(NUM_THREADS_DISTANCE) , 0, 0, d_C, d_E, d_distanceMatrix, my_num_events);
hipLaunchKernelGGL(( ComputeNormalizedMembershipMatrixLinear), dim3(num_blocks_membership), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix,my_num_events);
#else
// O(M^2)
hipLaunchKernelGGL(( ComputeNormalizedMembershipMatrix), dim3(dim3(num_blocks_membership,NUM_CLUSTERS)), dim3(NUM_THREADS_MEMBERSHIP) , 0, 0, d_distanceMatrix, d_memberships, my_num_events);
#endif
stopTimer(timer_gpu);
// Copy memberships from the GPU
float* temp_memberships = (float*) malloc(sizeof(float)*my_num_events*NUM_CLUSTERS);
startTimer(timer_memcpy);
#if LINEAR
hipMemcpy(temp_memberships,d_distanceMatrix,sizeof(float)*my_num_events*NUM_CLUSTERS,hipMemcpyDeviceToHost);
#else
hipMemcpy(temp_memberships,d_memberships,sizeof(float)*my_num_events*NUM_CLUSTERS,hipMemcpyDeviceToHost);
#endif
stopTimer(timer_memcpy);
startTimer(timer_cpu);
for(int c=0; c < NUM_CLUSTERS; c++) {
memcpy(&(memberships[c*NUM_EVENTS+gpu_num*events_per_gpu]),&(temp_memberships[c*my_num_events]),sizeof(float)*my_num_events);
}
stopTimer(timer_cpu);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// First transpose the memberships, makes it easier to gather the results between nodes
float* temp = (float*) malloc(sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
temp[e*NUM_CLUSTERS+c] = memberships[c*NUM_EVENTS+e];
}
}
memcpy(memberships,temp,sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
stopTimer(timer_cpu);
// Gather memberships on root
startTimer(timer_mpi);
int memberships_being_sent, memberships_per_node;
memberships_per_node = events_per_gpu*num_gpus*NUM_CLUSTERS;
if(rank == 0) {
for(int i=1; i < num_nodes; i++) {
memberships_being_sent = memberships_per_node;
if(i == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Status s;
MPI_Recv(&(temp[memberships_per_node*i]),memberships_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&s);
}
} else {
memberships_being_sent = memberships_per_node;
if(rank == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Send(&(memberships[memberships_per_node*rank]),memberships_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
// Tranpose the memberships again to get original ordering
startTimer(timer_cpu);
if(rank == 0) {
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c<NUM_CLUSTERS; c++) {
memberships[c*NUM_EVENTS+e] = temp[e*NUM_CLUSTERS+c];
}
}
}
free(temp);
stopTimer(timer_cpu);
}
#pragma omp barrier
free(temp_memberships);
#endif // #if ENABLE_OUTPUT
if(tid == 0) {
if(abs(diff) > THRESHOLD){
PRINT("Warning: c-means did not converge to the %f threshold provided\n", THRESHOLD);
}
PRINT("C-means complete\n");
}
#pragma omp barrier // sync threads
#if !ENABLE_MDL
if(tid == 0) {
// Don't attempt MDL, save all clusters
finalClusterConfig = (int*) malloc(sizeof(int)*NUM_CLUSTERS);
memset(finalClusterConfig,1,sizeof(int)*NUM_CLUSTERS);
}
#else
PRINT("Calculating Q Matrix Section %d\n",tid);
// Copy the latest clusters to the device
// (the current ones on the device are 1 iteration old)
startTimer(timer_memcpy);
CUDA_SAFE_CALL(hipMemcpy(d_C, myClusters, size, hipMemcpyHostToDevice));
stopTimer(timer_memcpy);
// Build Q matrix, each gpu handles NUM_DIMENSIONS/num_gpus rows of the matrix
q_matrices[tid] = BuildQGPU(d_E, d_C, d_distanceMatrix, &mdlTime, tid, num_gpus, my_num_events);
#pragma omp barrier // sync threads
if(tid == 0) {
// Combine the partial matrices
int num_matrix_elements = NUM_CLUSTERS*(NUM_CLUSTERS/num_gpus);
for(int i=0; i < num_gpus; i++) {
float* q_matrix_ptr = (float*) q_matrix+i*num_matrix_elements;
float* q_matrices_ptr = (float*) q_matrices[i]+i*num_matrix_elements;
memcpy(q_matrix_ptr,q_matrices_ptr,sizeof(float)*num_matrix_elements);
free(q_matrices[i]);
}
startTimer(timer_cpu);
DEBUG("Searching for optimal configuration...\n");
finalClusterConfig = TabuSearch(q_matrix, argv[1]);
stopTimer(timer_cpu);
DEBUG("Q Matrix:\n");
for(int row=0; row < NUM_CLUSTERS; row++) {
for(int col=0; col < NUM_CLUSTERS; col++) {
DEBUG("%.2e ",q_matrix[row*NUM_CLUSTERS+col]);
}
DEBUG("\n");
}
free(q_matrix);
}
mdlTime /= 1000.0; // CUDA timer returns time in milliseconds, normalize to seconds
#endif
fflush(stdout);
#pragma omp barrier
#pragma omp master
{
printf("\n\n");
printf("Node %d: Thread %d: GPU memcpy Time (ms): %f\n",rank,tid,getTimerValue(timer_memcpy));
printf("Node %d: Thread %d: CPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_cpu));
printf("Node %d: Thread %d: GPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_gpu));
printf("Node %d: Thread %d: MPI Time (ms): %f\n",rank,tid,getTimerValue(timer_mpi));
}
#if !CPU_ONLY
CUDA_SAFE_CALL(hipFree(d_E));
CUDA_SAFE_CALL(hipFree(d_C));
CUDA_SAFE_CALL(hipFree(d_nC));
#endif
#pragma omp barrier
DEBUG("Thread %d done.\n",tid);
} // end of omp_parallel block
cutStartTimer(timer_io);
if(rank == 0) {
PRINT("Final Clusters are:\n");
int newCount = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
if(finalClusterConfig[i]){
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[newCount * NUM_DIMENSIONS + j] = myClusters[i*NUM_DIMENSIONS + j];
PRINT("%.3f\t", myClusters[i*NUM_DIMENSIONS + j]);
}
newCount++;
PRINT("\n");
}
}
#if ENABLE_OUTPUT
ReportSummary(newClusters, newCount, argv[1]);
ReportResults(myEvents, memberships, newCount, argv[1]);
#endif
}
cutStopTimer(timer_io);
cutStopTimer(timer_total);
if(rank == 0) {
printf("Total Time (ms): %f\n",cutGetTimerValue(timer_total));
printf("I/O Time (ms): %f\n",cutGetTimerValue(timer_io));
printf("Main Thread CPU Time (ms): %f\n",cutGetTimerValue(timer_main_cpu));
printf("\n\n");
}
free(newClusters);
free(myClusters);
free(myEvents);
free(transposedEvents);
MPI_Finalize();
return 0;
}
void generateInitialClusters(float* clusters, float* events){
int seed;
for(int i = 0; i < NUM_CLUSTERS; i++){
//seed = i * NUM_EVENTS / NUM_CLUSTERS;
seed = rand() % NUM_EVENTS;
for(int j = 0; j < NUM_DIMENSIONS; j++){
clusters[i*NUM_DIMENSIONS + j] = events[seed*NUM_DIMENSIONS + j];
}
}
}
float* readBIN(char* f) {
FILE* fin = fopen(f,"rb");
int nevents,ndims;
fread(&nevents,4,1,fin);
fread(&ndims,4,1,fin);
int num_elements = (ndims)*(nevents);
printf("Number of rows: %d\n",nevents);
printf("Number of cols: %d\n",ndims);
float* data = (float*) malloc(sizeof(float)*num_elements);
fread(data,sizeof(float),num_elements,fin);
fclose(fin);
return data;
}
float* readCSV(char* filename) {
FILE* myfile = fopen(filename, "r");
if(myfile == NULL){
printf("Error: File DNE\n");
return NULL;
}
char myline[1024];
float* retVal = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
myfile = fopen(filename, "r");
#if LINE_LABELS
//fgets(myline, 1024, myfile);
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#else
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#endif
fclose(myfile);
return retVal;
}
float* ParseSampleInput(char* f){
int length = strlen(f);
printf("File Extension: %s\n",f+length-3);
if(strcmp(f+length-3,"bin") == 0) {
return readBIN(f);
} else {
return readCSV(f);
}
}
void FreeMatrix(float* d_matrix){
CUDA_SAFE_CALL(hipFree(d_matrix));
}
float* BuildQGPU(float* d_events, float* d_clusters, float* distanceMatrix, float* mdlTime, int gpu_id, int num_gpus, int my_num_events){
float* d_matrix;
int size = sizeof(float) * NUM_CLUSTERS*NUM_CLUSTERS;
cudaTimer_t timer_gpu;
cudaTimer_t timer_memcpy;
createTimer(&timer_gpu);
createTimer(&timer_memcpy);
startTimer(timer_memcpy);
hipMalloc((void**)&d_matrix, size);
printCudaError();
stopTimer(timer_memcpy);
startTimer(timer_gpu);
dim3 grid(NUM_CLUSTERS / num_gpus, NUM_CLUSTERS);
int start_row = gpu_id*(NUM_CLUSTERS/num_gpus);
printf("GPU %d: Starting row for Q Matrix: %d\n",gpu_id,start_row);
printf("Launching Q Matrix Kernel\n");
hipLaunchKernelGGL(( CalculateQMatrixGPUUpgrade), dim3(grid), dim3(Q_THREADS), 0, 0, d_events, d_clusters, d_matrix, distanceMatrix, start_row, my_num_events);
hipDeviceSynchronize();
printCudaError();
stopTimer(timer_gpu);
startTimer(timer_memcpy);
float* matrix = (float*)malloc(size);
printf("Copying results to CPU\n");
hipError_t error = hipMemcpy(matrix, d_matrix, size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
printf(hipGetErrorString(hipGetLastError()));
printf("\n");
stopTimer(timer_memcpy);
stopTimer(timer_gpu);
*mdlTime = getTimerValue(timer_gpu);
printf("Processing time for MDL GPU: %f (ms) \n", *mdlTime);
printf("Memcpy time for MDL GPU: %f (ms) \n", getTimerValue(timer_memcpy));
deleteTimer(timer_gpu);
deleteTimer(timer_memcpy);
printCudaError();
FreeMatrix(d_matrix);
return matrix;
}
| b1c9060b6ec3aea1f6d0b9dc13d3954e15de5d8c.cu | #include <mpi.h>
#include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cutil.h>
#include <cmeansMPI.h>
#include <math.h>
#include <time.h>
#include <string.h>
#include <float.h>
#include <cmeansMPI_kernel.cu>
#include "MDL.h"
void printCudaError() {
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("%s\n",cudaGetErrorString(error));
}
}
typedef struct {
cudaEvent_t start;
cudaEvent_t stop;
float* et;
} cudaTimer_t;
void createTimer(cudaTimer_t* timer) {
#pragma omp critical (create_timer)
{
cudaEventCreate(&(timer->start));
cudaEventCreate(&(timer->stop));
timer->et = (float*) malloc(sizeof(float));
*(timer->et) = 0.0f;
}
}
void deleteTimer(cudaTimer_t timer) {
#pragma omp critical (delete_timer)
{
cudaEventDestroy(timer.start);
cudaEventDestroy(timer.stop);
free(timer.et);
}
}
void startTimer(cudaTimer_t timer) {
cudaEventRecord(timer.start,0);
}
void stopTimer(cudaTimer_t timer) {
cudaEventRecord(timer.stop,0);
cudaEventSynchronize(timer.stop);
float tmp;
cudaEventElapsedTime(&tmp,timer.start,timer.stop);
*(timer.et) += tmp;
}
float getTimerValue(cudaTimer_t timer) {
return *(timer.et);
}
/************************************************************************/
/* C-means Main */
/************************************************************************/
int main(int argc, char* argv[])
{
int rank, num_nodes, len, provided;
char name[MPI_MAX_PROCESSOR_NAME];
MPI_Init_thread(&argc,&argv,MPI_THREAD_MULTIPLE,&provided);
MPI_Comm_size(MPI_COMM_WORLD,&num_nodes);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
MPI_Get_processor_name(name, &len);
printf("Hello world from node %d of %d on %s\n",rank,num_nodes,name);
unsigned int timer_io; // Timer for I/O, such as reading FCS file and outputting result files
unsigned int timer_total; // Total time
unsigned int timer_main_cpu; // Total time
cutCreateTimer(&timer_io);
cutCreateTimer(&timer_total);
cutCreateTimer(&timer_main_cpu);
// determine the number of CUDA capable GPUs
int num_gpus = 0; // number of CUDA GPUs
cudaGetDeviceCount(&num_gpus);
if(num_gpus < 1)
{
printf("no CUDA capable devices were detected\n");
return 1;
}
// display CPU and GPU configuration
printf("number of host CPUs:\t%d\n", omp_get_num_procs());
printf("number of CUDA devices:\t%d\n", num_gpus);
for(int i = 0; i < num_gpus; i++)
{
cudaDeviceProp dprop;
cudaGetDeviceProperties(&dprop, i);
printf(" %d: %s\n", i, dprop.name);
}
printf("---------------------------\n");
int total_num_gpus = num_gpus * num_nodes;
cutStartTimer(timer_total);
// [program name] [data file]
if(argc != 2){
printf("Usage Error: must supply data file. e.g. programe_name @opt(flags) file.in\n");
return 1;
}
cutStartTimer(timer_io);
float* myEvents;
int elements_per_node, elements_being_sent;
elements_per_node = NUM_EVENTS / total_num_gpus * num_gpus * NUM_DIMENSIONS;
// Root reads input from file and distributes to each node
if(rank == 0) {
myEvents = ParseSampleInput(argv[1]);
MPI_Request* requests = (MPI_Request*) malloc(sizeof(MPI_Request)*num_nodes);
MPI_Status s;
// Send everything asynchronously
for(int i=1; i < num_nodes; i++) {
elements_being_sent = elements_per_node;
if(i == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Isend(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&requests[i]);
//MPI_Send(&(myEvents[elements_per_node*i]),elements_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD);
}
// Wait for the Isends to complete
for(int i=1; i < num_nodes; i++) {
MPI_Wait(&requests[i],&s);
}
free(requests);
elements_being_sent = elements_per_node; // so that its set properly for the root
} else {
myEvents = (float*) malloc(sizeof(float)*NUM_DIMENSIONS*NUM_EVENTS);
elements_being_sent = elements_per_node;
if(rank == num_nodes-1) { // boundary condition
elements_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_DIMENSIONS;
}
MPI_Status s;
MPI_Recv(&(myEvents[elements_per_node*rank]),elements_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD,&s);
}
MPI_Barrier(MPI_COMM_WORLD);
cutStopTimer(timer_io);
cutStartTimer(timer_main_cpu);
//srand((unsigned)(time(0)));
srand(42);
// Allocate arrays for the cluster centers
float* myClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
float* newClusters = (float*)malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
// Select random cluster centers
double t1,t2;
generateInitialClusters(myClusters, myEvents);
// Create an array of arrays for temporary cluster centers from each GPU
float** tempClusters = (float**) malloc(sizeof(float*)*num_gpus);
float** tempDenominators = (float**) malloc(sizeof(float*)*num_gpus);
for(int i=0; i < num_gpus; i++) {
tempClusters[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
tempDenominators[i] = (float*) malloc(sizeof(float)*NUM_CLUSTERS);
memcpy(tempClusters[i],myClusters,sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS);
}
// Create an array of arrays for temporary Q matrix pieces from each GPU
float** q_matrices = (float**) malloc(sizeof(float*)*num_gpus);
// Create an array for the final Q matrix
float* q_matrix = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_CLUSTERS);
float diff; // used to track difference in cluster centers between iterations
// Transpose the events matrix
float* transposedEvents = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
for(int i=0; i<NUM_EVENTS; i++) {
for(int j=0; j<NUM_DIMENSIONS; j++) {
transposedEvents[j*NUM_EVENTS+i] = myEvents[i*NUM_DIMENSIONS+j];
}
}
float* memberships = (float*) malloc(sizeof(float)*NUM_CLUSTERS*NUM_EVENTS);
int* finalClusterConfig;
cutStopTimer(timer_main_cpu);
////////////////////////////////////////////////////////////////
// run as many CPU threads as there are CUDA devices
//num_gpus = 1;
//omp_set_num_threads(num_gpus); // create as many CPU threads as there are CUDA devices
#pragma omp parallel shared(myClusters,diff,tempClusters,tempDenominators,memberships,finalClusterConfig)
{
cudaTimer_t timer_memcpy; // Timer for GPU <---> CPU memory copying
cudaTimer_t timer_cpu; // Timer for processing on CPU
cudaTimer_t timer_gpu; // Timer for kernels on the GPU
cudaTimer_t timer_mpi; // Timer for MPI
unsigned int tid = omp_get_thread_num();
unsigned int num_cpu_threads = omp_get_num_threads();
int gpu_num = rank*num_gpus+tid;
printf("hello from thread %d of %d\n",tid,num_cpu_threads);
// set and check the CUDA device for this CPU thread
int gpu_id = -1;
cudaSetDevice(tid % num_gpus); // "% num_gpus" allows more CPU threads than GPU devices
cudaGetDevice(&gpu_id);
#pragma omp barrier
createTimer(&timer_memcpy);
createTimer(&timer_cpu);
createTimer(&timer_gpu);
createTimer(&timer_mpi);
printf("CPU thread %d (of %d) uses CUDA device %d\n", tid, num_cpu_threads, gpu_id);
// Compute starting/finishing indexes for the events for each gpu
int events_per_gpu = NUM_EVENTS / total_num_gpus;
int my_num_events = events_per_gpu;
if(gpu_num == (total_num_gpus-1)) {
my_num_events += NUM_EVENTS % total_num_gpus;
}
startTimer(timer_memcpy);
float* d_distanceMatrix;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_distanceMatrix, sizeof(float)*my_num_events*NUM_CLUSTERS));
#if !LINEAR
float* d_memberships;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_memberships, sizeof(float)*my_num_events*NUM_CLUSTERS));
#endif
float* d_E;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_E, sizeof(float)*my_num_events*NUM_DIMENSIONS));
float* d_C;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_C, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_nC;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS));
float* d_denoms;
CUDA_SAFE_CALL(cudaMalloc((void**)&d_denoms, sizeof(float)*NUM_CLUSTERS));
int size = sizeof(float)*NUM_DIMENSIONS*my_num_events;
// Copying the transposed data is trickier since it's not all contigious for the relavant events
float* temp_fcs_data = (float*) malloc(size);
for(int d=0; d < NUM_DIMENSIONS; d++) {
memcpy(&temp_fcs_data[d*my_num_events],&transposedEvents[d*NUM_EVENTS + gpu_num*events_per_gpu],sizeof(float)*my_num_events);
}
CUDA_SAFE_CALL(cudaMemcpy( d_E, temp_fcs_data, size,cudaMemcpyHostToDevice) );
cudaThreadSynchronize();
free(temp_fcs_data);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
stopTimer(timer_memcpy);
printf("Starting C-means\n");
int iterations = 0;
int num_blocks_distance = my_num_events / NUM_THREADS_DISTANCE;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_distance++;
}
int num_blocks_membership = my_num_events / NUM_THREADS_MEMBERSHIP;
if(my_num_events % NUM_THREADS_DISTANCE) {
num_blocks_membership++;
}
int num_blocks_update = NUM_CLUSTERS / NUM_CLUSTERS_PER_BLOCK;
if(NUM_CLUSTERS % NUM_CLUSTERS_PER_BLOCK) {
num_blocks_update++;
}
do{
cudaTimer_t timer;
createTimer(&timer);
startTimer(timer);
size = sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS;
// Copy the cluster centers to the GPU
startTimer(timer_memcpy);
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
stopTimer(timer_memcpy);
startTimer(timer_gpu);
DEBUG("Launching ComputeDistanceMatrix kernel\n");
ComputeDistanceMatrix<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix, my_num_events);
#if LINEAR
// O(M) membership kernel
DEBUG("Launching ComputeMembershipMatrixLinear kernel\n");
ComputeMembershipMatrixLinear<<< num_blocks_membership, NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
UpdateClusterCentersGPU3<<< dim3(NUM_DIMENSIONS,num_blocks_update), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_distanceMatrix, my_num_events);
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_distanceMatrix, d_denoms, my_num_events);
#else
// O(M^2) membership kernel
DEBUG("Launching ComputeMembershipMatrix kernel\n");
ComputeMembershipMatrix<<< dim3(num_blocks_membership,NUM_CLUSTERS), NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, d_memberships, my_num_events);
DEBUG("Launching UpdateClusterCentersGPU kernel\n");
//UpdateClusterCentersGPU<<< dim3(NUM_CLUSTERS,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, d_denoms, my_num_events);
//UpdateClusterCentersGPU2<<< dim3(num_blocks_update,NUM_DIMENSIONS), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, my_num_events);
UpdateClusterCentersGPU3<<< dim3(NUM_DIMENSIONS,num_blocks_update), NUM_THREADS_UPDATE >>>(d_C, d_E, d_nC, d_memberships, my_num_events);
ComputeClusterSizes<<< NUM_CLUSTERS, 512 >>>( d_memberships, d_denoms, my_num_events );
#endif
cudaThreadSynchronize();
printCudaError();
stopTimer(timer_gpu);
// Copy partial centers and denominators to host
startTimer(timer_memcpy);
cudaMemcpy(tempClusters[tid], d_nC, sizeof(float)*NUM_CLUSTERS*NUM_DIMENSIONS, cudaMemcpyDeviceToHost);
cudaMemcpy(tempDenominators[tid], d_denoms, sizeof(float)*NUM_CLUSTERS, cudaMemcpyDeviceToHost);
printCudaError();
stopTimer(timer_memcpy);
stopTimer(timer);
float thisTime = getTimerValue(timer);
DEBUG("Processing time for GPU %d: %f (ms) \n", tid, thisTime);
deleteTimer(timer);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// Sum up the partial cluster centers (numerators)
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] += tempClusters[i][c*NUM_DIMENSIONS+d];
}
}
}
// Sum up the denominator for each cluster
for(int i=1; i < num_gpus; i++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
tempDenominators[0][c] += tempDenominators[i][c];
}
}
stopTimer(timer_cpu);
DEBUG("Reducing cluster values\n");
startTimer(timer_mpi);
if(rank == 0) {
MPI_Reduce(MPI_IN_PLACE,tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempClusters[0],tempClusters[1],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempClusters[0],tempClusters[1],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
MPI_Reduce(MPI_IN_PLACE,tempDenominators[0],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//MPI_Reduce(tempDenominators[0],tempDenominators[1],NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
//memcpy(tempDenominators[0],tempDenominators[1],sizeof(float)*NUM_CLUSTERS);
} else {
MPI_Reduce(tempClusters[0],0,NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
MPI_Reduce(tempDenominators[0],0,NUM_CLUSTERS,MPI_FLOAT,MPI_SUM,0,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD); // not sure if neccesary...
stopTimer(timer_mpi);
startTimer(timer_cpu);
// Divide to get the final clusters
if(rank == 0) {
for(int c=0; c < NUM_CLUSTERS; c++) {
for(int d=0; d < NUM_DIMENSIONS; d++) {
tempClusters[0][c*NUM_DIMENSIONS+d] /= tempDenominators[0][c];
}
}
}
stopTimer(timer_cpu);
startTimer(timer_mpi);
DEBUG("Broadcasting Cluster Values\n");
MPI_Bcast(tempClusters[0],NUM_DIMENSIONS*NUM_CLUSTERS,MPI_FLOAT,0,MPI_COMM_WORLD);
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
startTimer(timer_cpu);
diff = 0.0;
for(int i=0; i < NUM_CLUSTERS; i++){
DEBUG("GPU %d, Cluster %d: ",tid,i);
for(int k = 0; k < NUM_DIMENSIONS; k++){
DEBUG("%f ",tempClusters[tid][i*NUM_DIMENSIONS + k]);
diff += fabs(myClusters[i*NUM_DIMENSIONS + k] - tempClusters[tid][i*NUM_DIMENSIONS + k]);
}
DEBUG("\n");
}
memcpy(myClusters,tempClusters[tid],sizeof(float)*NUM_DIMENSIONS*NUM_CLUSTERS);
DEBUG("Diff = %f\n", diff);
DEBUG("Done with iteration #%d\n", iterations);
stopTimer(timer_cpu);
}
#pragma omp barrier
iterations++;
DEBUG("\n");
} while(iterations < MIN_ITERS || (abs(diff) > THRESHOLD && iterations < MAX_ITERS));
#pragma omp master
{
if(rank == 0) {
printf("Iterations: %d\n",iterations);
}
}
#if ENABLE_OUTPUT
// Compute final membership vaues
startTimer(timer_gpu);
#if LINEAR
// O(M)
ComputeDistanceMatrix<<< dim3(num_blocks_distance,NUM_CLUSTERS), NUM_THREADS_DISTANCE >>>(d_C, d_E, d_distanceMatrix, my_num_events);
ComputeNormalizedMembershipMatrixLinear<<< num_blocks_membership, NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix,my_num_events);
#else
// O(M^2)
ComputeNormalizedMembershipMatrix<<< dim3(num_blocks_membership,NUM_CLUSTERS), NUM_THREADS_MEMBERSHIP >>>(d_distanceMatrix, d_memberships, my_num_events);
#endif
stopTimer(timer_gpu);
// Copy memberships from the GPU
float* temp_memberships = (float*) malloc(sizeof(float)*my_num_events*NUM_CLUSTERS);
startTimer(timer_memcpy);
#if LINEAR
cudaMemcpy(temp_memberships,d_distanceMatrix,sizeof(float)*my_num_events*NUM_CLUSTERS,cudaMemcpyDeviceToHost);
#else
cudaMemcpy(temp_memberships,d_memberships,sizeof(float)*my_num_events*NUM_CLUSTERS,cudaMemcpyDeviceToHost);
#endif
stopTimer(timer_memcpy);
startTimer(timer_cpu);
for(int c=0; c < NUM_CLUSTERS; c++) {
memcpy(&(memberships[c*NUM_EVENTS+gpu_num*events_per_gpu]),&(temp_memberships[c*my_num_events]),sizeof(float)*my_num_events);
}
stopTimer(timer_cpu);
#pragma omp barrier
#pragma omp master
{
startTimer(timer_cpu);
// First transpose the memberships, makes it easier to gather the results between nodes
float* temp = (float*) malloc(sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c < NUM_CLUSTERS; c++) {
temp[e*NUM_CLUSTERS+c] = memberships[c*NUM_EVENTS+e];
}
}
memcpy(memberships,temp,sizeof(float)*NUM_EVENTS*NUM_CLUSTERS);
stopTimer(timer_cpu);
// Gather memberships on root
startTimer(timer_mpi);
int memberships_being_sent, memberships_per_node;
memberships_per_node = events_per_gpu*num_gpus*NUM_CLUSTERS;
if(rank == 0) {
for(int i=1; i < num_nodes; i++) {
memberships_being_sent = memberships_per_node;
if(i == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Status s;
MPI_Recv(&(temp[memberships_per_node*i]),memberships_being_sent,MPI_FLOAT,i,1,MPI_COMM_WORLD,&s);
}
} else {
memberships_being_sent = memberships_per_node;
if(rank == num_nodes-1) { // boundary condition
memberships_being_sent += (NUM_EVENTS % total_num_gpus)*NUM_CLUSTERS;
}
MPI_Send(&(memberships[memberships_per_node*rank]),memberships_being_sent,MPI_FLOAT,0,1,MPI_COMM_WORLD);
}
MPI_Barrier(MPI_COMM_WORLD);
stopTimer(timer_mpi);
// Tranpose the memberships again to get original ordering
startTimer(timer_cpu);
if(rank == 0) {
for(int e=0; e < NUM_EVENTS; e++) {
for(int c=0; c<NUM_CLUSTERS; c++) {
memberships[c*NUM_EVENTS+e] = temp[e*NUM_CLUSTERS+c];
}
}
}
free(temp);
stopTimer(timer_cpu);
}
#pragma omp barrier
free(temp_memberships);
#endif // #if ENABLE_OUTPUT
if(tid == 0) {
if(abs(diff) > THRESHOLD){
PRINT("Warning: c-means did not converge to the %f threshold provided\n", THRESHOLD);
}
PRINT("C-means complete\n");
}
#pragma omp barrier // sync threads
#if !ENABLE_MDL
if(tid == 0) {
// Don't attempt MDL, save all clusters
finalClusterConfig = (int*) malloc(sizeof(int)*NUM_CLUSTERS);
memset(finalClusterConfig,1,sizeof(int)*NUM_CLUSTERS);
}
#else
PRINT("Calculating Q Matrix Section %d\n",tid);
// Copy the latest clusters to the device
// (the current ones on the device are 1 iteration old)
startTimer(timer_memcpy);
CUDA_SAFE_CALL(cudaMemcpy(d_C, myClusters, size, cudaMemcpyHostToDevice));
stopTimer(timer_memcpy);
// Build Q matrix, each gpu handles NUM_DIMENSIONS/num_gpus rows of the matrix
q_matrices[tid] = BuildQGPU(d_E, d_C, d_distanceMatrix, &mdlTime, tid, num_gpus, my_num_events);
#pragma omp barrier // sync threads
if(tid == 0) {
// Combine the partial matrices
int num_matrix_elements = NUM_CLUSTERS*(NUM_CLUSTERS/num_gpus);
for(int i=0; i < num_gpus; i++) {
float* q_matrix_ptr = (float*) q_matrix+i*num_matrix_elements;
float* q_matrices_ptr = (float*) q_matrices[i]+i*num_matrix_elements;
memcpy(q_matrix_ptr,q_matrices_ptr,sizeof(float)*num_matrix_elements);
free(q_matrices[i]);
}
startTimer(timer_cpu);
DEBUG("Searching for optimal configuration...\n");
finalClusterConfig = TabuSearch(q_matrix, argv[1]);
stopTimer(timer_cpu);
DEBUG("Q Matrix:\n");
for(int row=0; row < NUM_CLUSTERS; row++) {
for(int col=0; col < NUM_CLUSTERS; col++) {
DEBUG("%.2e ",q_matrix[row*NUM_CLUSTERS+col]);
}
DEBUG("\n");
}
free(q_matrix);
}
mdlTime /= 1000.0; // CUDA timer returns time in milliseconds, normalize to seconds
#endif
fflush(stdout);
#pragma omp barrier
#pragma omp master
{
printf("\n\n");
printf("Node %d: Thread %d: GPU memcpy Time (ms): %f\n",rank,tid,getTimerValue(timer_memcpy));
printf("Node %d: Thread %d: CPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_cpu));
printf("Node %d: Thread %d: GPU processing Time (ms): %f\n",rank,tid,getTimerValue(timer_gpu));
printf("Node %d: Thread %d: MPI Time (ms): %f\n",rank,tid,getTimerValue(timer_mpi));
}
#if !CPU_ONLY
CUDA_SAFE_CALL(cudaFree(d_E));
CUDA_SAFE_CALL(cudaFree(d_C));
CUDA_SAFE_CALL(cudaFree(d_nC));
#endif
#pragma omp barrier
DEBUG("Thread %d done.\n",tid);
} // end of omp_parallel block
cutStartTimer(timer_io);
if(rank == 0) {
PRINT("Final Clusters are:\n");
int newCount = 0;
for(int i = 0; i < NUM_CLUSTERS; i++){
if(finalClusterConfig[i]){
for(int j = 0; j < NUM_DIMENSIONS; j++){
newClusters[newCount * NUM_DIMENSIONS + j] = myClusters[i*NUM_DIMENSIONS + j];
PRINT("%.3f\t", myClusters[i*NUM_DIMENSIONS + j]);
}
newCount++;
PRINT("\n");
}
}
#if ENABLE_OUTPUT
ReportSummary(newClusters, newCount, argv[1]);
ReportResults(myEvents, memberships, newCount, argv[1]);
#endif
}
cutStopTimer(timer_io);
cutStopTimer(timer_total);
if(rank == 0) {
printf("Total Time (ms): %f\n",cutGetTimerValue(timer_total));
printf("I/O Time (ms): %f\n",cutGetTimerValue(timer_io));
printf("Main Thread CPU Time (ms): %f\n",cutGetTimerValue(timer_main_cpu));
printf("\n\n");
}
free(newClusters);
free(myClusters);
free(myEvents);
free(transposedEvents);
MPI_Finalize();
return 0;
}
void generateInitialClusters(float* clusters, float* events){
int seed;
for(int i = 0; i < NUM_CLUSTERS; i++){
//seed = i * NUM_EVENTS / NUM_CLUSTERS;
seed = rand() % NUM_EVENTS;
for(int j = 0; j < NUM_DIMENSIONS; j++){
clusters[i*NUM_DIMENSIONS + j] = events[seed*NUM_DIMENSIONS + j];
}
}
}
float* readBIN(char* f) {
FILE* fin = fopen(f,"rb");
int nevents,ndims;
fread(&nevents,4,1,fin);
fread(&ndims,4,1,fin);
int num_elements = (ndims)*(nevents);
printf("Number of rows: %d\n",nevents);
printf("Number of cols: %d\n",ndims);
float* data = (float*) malloc(sizeof(float)*num_elements);
fread(data,sizeof(float),num_elements,fin);
fclose(fin);
return data;
}
float* readCSV(char* filename) {
FILE* myfile = fopen(filename, "r");
if(myfile == NULL){
printf("Error: File DNE\n");
return NULL;
}
char myline[1024];
float* retVal = (float*)malloc(sizeof(float)*NUM_EVENTS*NUM_DIMENSIONS);
myfile = fopen(filename, "r");
#if LINE_LABELS
//fgets(myline, 1024, myfile);
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#else
for(int i = 0; i < NUM_EVENTS; i++){
fgets(myline, 1024, myfile);
retVal[i*NUM_DIMENSIONS] = (float)atof(strtok(myline, DELIMITER));
for(int j = 1; j < NUM_DIMENSIONS; j++){
retVal[i*NUM_DIMENSIONS + j] = (float)atof(strtok(NULL, DELIMITER));
}
}
#endif
fclose(myfile);
return retVal;
}
float* ParseSampleInput(char* f){
int length = strlen(f);
printf("File Extension: %s\n",f+length-3);
if(strcmp(f+length-3,"bin") == 0) {
return readBIN(f);
} else {
return readCSV(f);
}
}
void FreeMatrix(float* d_matrix){
CUDA_SAFE_CALL(cudaFree(d_matrix));
}
float* BuildQGPU(float* d_events, float* d_clusters, float* distanceMatrix, float* mdlTime, int gpu_id, int num_gpus, int my_num_events){
float* d_matrix;
int size = sizeof(float) * NUM_CLUSTERS*NUM_CLUSTERS;
cudaTimer_t timer_gpu;
cudaTimer_t timer_memcpy;
createTimer(&timer_gpu);
createTimer(&timer_memcpy);
startTimer(timer_memcpy);
cudaMalloc((void**)&d_matrix, size);
printCudaError();
stopTimer(timer_memcpy);
startTimer(timer_gpu);
dim3 grid(NUM_CLUSTERS / num_gpus, NUM_CLUSTERS);
int start_row = gpu_id*(NUM_CLUSTERS/num_gpus);
printf("GPU %d: Starting row for Q Matrix: %d\n",gpu_id,start_row);
printf("Launching Q Matrix Kernel\n");
CalculateQMatrixGPUUpgrade<<<grid, Q_THREADS>>>(d_events, d_clusters, d_matrix, distanceMatrix, start_row, my_num_events);
cudaThreadSynchronize();
printCudaError();
stopTimer(timer_gpu);
startTimer(timer_memcpy);
float* matrix = (float*)malloc(size);
printf("Copying results to CPU\n");
cudaError_t error = cudaMemcpy(matrix, d_matrix, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf(cudaGetErrorString(cudaGetLastError()));
printf("\n");
stopTimer(timer_memcpy);
stopTimer(timer_gpu);
*mdlTime = getTimerValue(timer_gpu);
printf("Processing time for MDL GPU: %f (ms) \n", *mdlTime);
printf("Memcpy time for MDL GPU: %f (ms) \n", getTimerValue(timer_memcpy));
deleteTimer(timer_gpu);
deleteTimer(timer_memcpy);
printCudaError();
FreeMatrix(d_matrix);
return matrix;
}
|
matrix_asin.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_asin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = asin(A[index]);
}
} | matrix_asin.cu | #include "includes.h"
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**********************************
When updating a kernel or adding a new one,
please compile the ptx file and commit it:
nvcc -ptx -arch=sm_30 SystemML.cu
***********************************/
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: rows of output matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is sparse and the output matrix is dense.
* This function avoids unnecessary sparse to dense conversion of the input matrix.
* Parallelization: subset of number of non-zeroes of input matrix.
*
* @params inVal input val pointer
* @params inRowPtr input row pointer
* @params colInd input col index pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Performs a slice operation where the input matrix is dense and the output matrix is dense.
*
* @params in dense input pointer
* @params ret dense output pointer
* @param rl row lower
* @param ru row upper
* @param cl column lower
* @param cu column upper
* @param inClen number of columns of input matrix
* @param retRlen number of rows of output matrix
* @param retClen number of columns of output matrix
*/
extern "C"
/**
* Does a copy of upper to lower triangle of the given matrix
* @param ret the input and output array allocated on the GPU
* @param dim the number of rows of the square matrix ret
* @param N total number of elements of the matrix
*/
extern "C"
extern "C"
__global__ void matrix_asin(double *A, double *C, unsigned int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size){
C[index] = asin(A[index]);
}
} |
9c8c76b98e9e7365b7b36e3550f1b00815b6f9a8.hip | // !!! This is a file automatically generated by hipify!!!
#include "pch.h"
#include "hip/hip_runtime.h"
#include "hiprand/hiprand.h"
#include "rocblas.h"
#include "hip/hip_fp16.h"
#include "cuda_fp16.hpp"
#include "hip/hip_runtime.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "hip/hip_runtime.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(hipPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(hipPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(hipMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
/*check_error(hipMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), hipMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));*/
}
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(hipPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct)
{
if (bUnSupportAct) add_bias_half_gpu(output, biases, batch, n, size);
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(hipPeekAtLastError());
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
hipError_t stats = hipMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), hipMemcpyDeviceToDevice);
}*/
#endif
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate);
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} | 9c8c76b98e9e7365b7b36e3550f1b00815b6f9a8.cu | #include "pch.h"
#include "cuda_runtime.h"
#include "curand.h"
#include "cublas_v2.h"
#include "cuda_fp16.h"
#include "cuda_fp16.hpp"
#include "cuda.h"
extern "C" {
#include "convolutional_layer.h"
#include "batchnorm_layer.h"
#include "gemm.h"
#include "blas.h"
#include "im2col.h"
#include "col2im.h"
#include "utils.h"
#include "cuda.h"
}
half* publicMemory[2] = {0,0};
int pMSize[2] = {0,0};
extern "C" cudnnDataType_t GetDataType();
void MakeHalfMaxSize(int iGiveSize,int iOutSize)
{
size_t size[2] = { sizeof(half) * iGiveSize,iOutSize*sizeof(half)};
for (int cnum = 0; cnum < 2; cnum++)
{
if (pMSize[cnum] < size[cnum])
{
if (publicMemory[cnum]) cuda_free_allType(publicMemory[cnum]);
pMSize[cnum] = size[cnum];
publicMemory[cnum]=(half *)cuda_make_short_array(pMSize[cnum]);
}
}
}
__global__ void cuda_f32_to_f16(float* input_f32, size_t size, half* output_f16)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f16[idx] = __float2half(input_f32[idx]);
//if (idx < size) output_f16[idx] = __float2half_rn(input_f32[idx]); // can't be compiled on Linux without casting
// __float2half_ru, __float2half_rd, __float2half_rz, __float2half_rn
//if (idx < size) *((unsigned short *)output_f16 + idx) = __float2half(input_f32[idx]);
}
void cuda_convert_f32_to_f16(float* input_f32, size_t size, half* output_f16) {
cuda_f32_to_f16 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > (input_f32, size, (half*)output_f16);
check_error(cudaPeekAtLastError());
}
__global__ void cuda_f16_to_f32(half* input_f16, size_t size, float* output_f32)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) output_f32[idx] = __half2float(input_f16[idx]);
//if (idx < size) output_f32[idx] = __half2float(*((unsigned short *)input_f16 + idx));
}
void cuda_convert_f16_to_f32(half* input_f16, size_t size, float* output_f32) {
cuda_f16_to_f32 << < cuda_gridsize(size), BLOCK,0,get_cuda_stream() >> > ((half*)input_f16, size, output_f32);
check_error(cudaPeekAtLastError());
}
void DealWeightBuffer(convolutional_layer* l)
{
//return;
#ifdef GETDATATYPE
if (GetDataType() != CUDNN_DATA_HALF) return;
#endif
#ifdef DEALWEIGHTBUFFER
OutPutGPUMemory(l.weights_gpu, l.nweights, 0);
#endif
half* halfWeights = 0;
halfWeights=(half *)cuda_make_short_array(l->nweights);
cuda_convert_f32_to_f16(l->weights_gpu, l->nweights, halfWeights);
#ifdef DEALWEIGHTBUFFER
float* fResult=0;
check_error(cudaMalloc((void**)&fResult, l.nweights * sizeof(float)));
cuda_convert_f16_to_f32(halfWeights, l.nweights, fResult);
OutPutGPUMemory(fResult, l.nweights, 0);
#endif
cuda_free(l->weights_gpu);
DecGenerateMemory(l->nweights * sizeof(float));
l->weights_gpu = (float *)halfWeights;
half* bias = (half*)cuda_make_short_array(l->n);
cuda_convert_f32_to_f16(l->biases_gpu, l->n, bias);
cuda_free(l->biases_gpu);
DecGenerateMemory(l->n * sizeof(float));
l->biases_gpu = (float*)bias;
/*check_error(cudaMemcpy(l.weights_gpu, halfWeights, l.nweights * sizeof(half), cudaMemcpyDeviceToDevice));
cuda_free_allType(halfWeights);
DecGenerateMemory(l.nweights * sizeof(half));*/
}
__global__ void add_bias_half_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
half a = output[(batch * n + filter) * size + offset];
output[(batch * n + filter) * size + offset] =__hadd(a, biases[filter]);
}
void add_bias_half_gpu(half* output, half* biases, int batch, int n, int size)
{
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
add_bias_half_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
check_error(cudaPeekAtLastError());
}
__global__ void activate_array_hardtan_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hlt(b, half(-1.0f))) output[iOutDex] = half(-1.0f);
if (__hgt(b, half(1.0f))) output[iOutDex] = half(1.0f);
output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// if (a < -1) a = -1;
// if (a > 1) a = 1;
// x[index] = a;//hardtan_activate_kernel(x[index]);
//}
}
__global__ void activate_array_relu_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] = half(0.0f);
//output[iOutDex] = b;
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = a * (a > 0);// relu_activate_kernel(x[index]);
//}
}
__global__ void activate_array_leaky_halfadd_kernel(half* output, half* biases, int n, int size)
{
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset >= size) return;
int iOutDex = (batch * n + filter) * size + offset;
half a = output[iOutDex];
half b = __hadd(a, biases[filter]);
if (__hgt(b, half(0.0f))) output[iOutDex] = b;
else output[iOutDex] =__hmul(half(0.1f),b);
//int index = blockIdx.x * blockDim.x + threadIdx.x;
//if (index < n) {
// float a = x[index];
// x[index] = (a > 0) ? a : .1f * a; //leaky_activate_kernel(x[index]);
//}
}
//__global__ void activate_array_selu_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int offset = blockIdx.x * blockDim.x + threadIdx.x;
// int filter = blockIdx.y;
// int batch = blockIdx.z;
// if (offset >= size) return;
// int iOutDex = (batch * n + filter) * size + offset;
// half a = output[iOutDex];
// half b = __hadd(a, biases[filter]);
// if (__hgt(b, half(0.0f))) output[iOutDex] = b;
// else output[iOutDex] = __hmul(half(0.1f), b);
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (a >= 0) * 1.0507f * a + (a < 0) * 1.0507f * 1.6732f * (expf(a) - 1);
// }
//}
//
//__global__ void activate_array_logistic_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = 1.f / (1.f + expf(-a));
// }
//}
//
//__global__ void activate_array_tanh_halfadd_kernel(half* output, half* biases, int n, int size)
//{
// int index = blockIdx.x * blockDim.x + threadIdx.x;
// if (index < n) {
// float a = x[index];
// x[index] = (2.f / (1 + expf(-2 * a)) - 1);
// }
//}
void add_bias_activation_half_gpu(half* output, half* biases, int batch, int n, int size
,ACTIVATION act,int bUnSupportAct)
{
if (bUnSupportAct) add_bias_half_gpu(output, biases, batch, n, size);
dim3 dimGrid((size - 1) / BLOCK + 1, n, batch);
dim3 dimBlock(BLOCK, 1, 1);
switch (act)
{
case RELU:
activate_array_relu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LINEAR:
break;
case LEAKY:
activate_array_leaky_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case HARDTAN:
activate_array_hardtan_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
/* case SELU:
activate_array_selu_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case LOGISTIC:
activate_array_logistic_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;
case TANH:
activate_array_tanh_halfadd_kernel << <dimGrid, dimBlock, 0, get_cuda_stream() >> > (output, biases, n, size);
break;*/
}
check_error(cudaPeekAtLastError());
}
void forward_convolutional_layer_gpu_predict_Float16(convolutional_layer l, network net)
{
if (l.binary) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
}
if (l.xnor) {
binarize_weights_gpu(l.weights_gpu, l.n, l.c / l.groups * l.size * l.size, l.binary_weights_gpu);
swap_binary(&l);
binarize_gpu(net.input_gpu, l.c * l.h * l.w * l.batch, l.binary_input_gpu);
net.input_gpu = l.binary_input_gpu;
}
float one = 1.0f,zero=0.0f;
#ifdef MEMORYDEBUG
printf("gpuInput:0x%x,gpuOutput:0x%x bin:%d,xnor:%d\n", (unsigned int)net.input_gpu, (unsigned int)l.output_gpu, l.binary, l.xnor);
printf("workspace:0x%x,size:%d,", (unsigned int)net.workspace, l.workspace_size);
printf("inputsize:%d,outputSize:%d\n", net.inputs, l.outputs);
#endif
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(net.input_gpu, net.inputs,0);
#endif
LAYERDATA* data = (LAYERDATA *)l.layerdata;
CONVPROP* prop = (CONVPROP*)data->layerData;
void* input=0;
void* output = 0;
if (prop->bIn32)
{
cuda_convert_f32_to_f16(net.input_gpu, net.inputs, publicMemory[0]);
input = publicMemory[0];
}
else
{
input = net.input_gpu;
}
if (prop->bOut32)
{
output = publicMemory[1];
}
else
{
output = l.output_gpu;
}
#ifdef GETDATATYPE
float* fa, *fw;
fa = cuda_make_array(0, net.inputs);
fw = cuda_make_array(0, l.nweights);
cuda_convert_f16_to_f32(publicMemory[0], net.inputs, fa);
cuda_convert_f16_to_f32((half *)l.weights_gpu, l.nweights, fw);
OutPutGPUMemory(fa, net.inputs, 0);
OutPutGPUMemory(fw, l.nweights, 0);
#endif
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
input,
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&zero,
l.dstTensorDesc,
output);
checkcudnnerror(stat);
#ifdef GETDATATYPE
/*if (GetDataType() == CUDNN_DATA_FLOAT)
{
OutPutGPUMemory((float *)publicMemory[1], l.outputs, 0);
cudnnStatus_t stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)publicMemory[0], l.outputs, 0);
stat = cudnnConvolutionForward(cudnn_handle(),
&one,
l.srcTensorDesc,
net.input_gpu,
publicMemory[0],
l.weightDesc,
l.weights_gpu,
l.convDesc,
l.fw_algo,
net.workspace,
l.workspace_size,
&one,
l.dstTensorDesc,
l.output_gpu);
publicMemory[0]);
checkcudnnerror(stat);
OutPutGPUMemory((float*)l.output_gpu, l.outputs, 0);
cuda_convert_f32_to_f16((float *)publicMemory[1], l.outputs, (half*)publicMemory[0]);
cudaError_t stats = cudaMemcpy(publicMemory[1], publicMemory[0], l.outputs * sizeof(float), cudaMemcpyDeviceToDevice);
}*/
#endif
add_bias_activation_half_gpu((half*)output, (half*)l.biases_gpu, l.batch, l.n, l.out_w* l.out_h,l.activation
,prop->bUnSupportActivate);
if (prop->bOut32)
{
cuda_convert_f16_to_f32((half*)output, l.outputs, l.output_gpu);
}
#ifdef FORWARD_CONVOLUTIONAL_LAYER_GPUHALF
OutPutGPUMemory(l.output_gpu, l.outputs, 0);
// exit(0);
#endif
#ifdef MEMORYDEBUG
printf("End Forword Cudnn\n");
#endif
add_bias_gpu(l.output_gpu, l.biases_gpu, l.batch, l.n, l.out_w * l.out_h);
if(prop->bUnSupportActivate) activate_array_ongpu(l.output_gpu, l.outputs * l.batch, l.activation);
//if(l.dot > 0) dot_error_gpu(l);
if (l.binary || l.xnor) swap_binary(&l);
} |
1c65538d146c2ee1661ad799fe1ed2a7363e7300.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2015-2023 by XGBoost Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <algorithm>
#include <cmath>
#include <cstdint> // std::int32_t
#include <memory>
#include <vector>
#include "../common/common.h"
#include "../common/linalg_op.h"
#include "../common/numeric.h" // Reduce
#include "../common/optional_weight.h" // OptionalWeights
#include "../common/pseudo_huber.h"
#include "../common/stats.h"
#include "../common/threading_utils.h"
#include "../common/transform.h"
#include "./regression_loss.h"
#include "adaptive.h"
#include "init_estimation.h" // FitIntercept
#include "xgboost/base.h"
#include "xgboost/context.h" // Context
#include "xgboost/data.h" // MetaInfo
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/linalg.h"
#include "xgboost/logging.h"
#include "xgboost/objective.h" // ObjFunction
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/tree_model.h" // RegTree
#if defined(XGBOOST_USE_CUDA)
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
namespace {
void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) {
CheckInitInputs(info);
CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels.";
}
} // anonymous namespace
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public XGBoostParameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public FitIntercept {
protected:
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return Loss::Info(); }
bst_target_t Targets(MetaInfo const& info) const override {
// Multi-target regression.
return ::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = ctx_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1));
common::Transform<>::Init(
[block_size, ndata, n_targets] XGBOOST_DEVICE(
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
const bst_float* preds_ptr = _preds.data();
const bst_float* labels_ptr = _labels.data();
const bst_float* weights_ptr = _weights.data();
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = ::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets];
bst_float label = labels_ptr[idx];
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device)
.Eval(&additional_input_, out_gpair, &preds, info.labels.Data(),
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], ¶m_);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name())
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name())
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name())
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name())
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
// End deprecated
class PseudoHuberRegression : public FitIntercept {
PesudoHuberParam param_;
public:
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
ObjInfo Task() const override { return ObjInfo::kRegression; }
bst_target_t Targets(MetaInfo const& info) const override {
return ::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto slope = param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
const float z = predt(i) - y;
const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope));
float grad = z / scale_sqrt;
auto scale = common::Sqr(slope) + common::Sqr(z);
float hess = common::Sqr(slope) / (scale * scale_sqrt);
auto w = weight[sample_id];
gpair(i) = {grad * w, hess * w};
});
}
const char* DefaultEvalMetric() const override { return "mphe"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:pseudohubererror");
out["pseudo_huber_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
if (config.find("pseudo_huber_param") == config.cend()) {
// The parameter is added in 1.6.
return;
}
FromJson(in["pseudo_huber_param"], ¶m_);
}
};
XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror")
.describe("Regression Pseudo Huber error.")
.set_body([]() { return new PseudoHuberRegression(); });
// declare parameter
struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public FitIntercept {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("count:poisson");
out["poisson_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["poisson_regression_param"], ¶m_);
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Poisson regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public FitIntercept {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort(ctx_);
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += ::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels.HostView();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = ::exp(p);
const double w = info.GetWeight(ind);
const double y = labels(ind);
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*)
preds[j] = ::exp(preds[j]);
});
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:cox");
}
void LoadConfig(Json const&) override {}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public FitIntercept {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = ctx_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y <= 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be positive.";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:gamma");
}
void LoadConfig(Json const&) override {}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public FitIntercept {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device)
.Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:tweedie");
out["tweedie_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["tweedie_regression_param"], ¶m_);
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
class MeanAbsoluteError : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
bst_target_t Targets(MetaInfo const& info) const override {
return ::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sign = [](auto x) {
return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0));
};
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
auto grad = sign(predt(i) - y) * weight[sample_id];
auto hess = weight[sample_id];
gpair(i) = GradientPair{grad, hess};
});
}
void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override {
CheckInitInputs(info);
base_margin->Reshape(this->Targets(info));
double w{0.0};
if (info.weights_.Empty()) {
w = static_cast<double>(info.num_row_);
} else {
w = common::Reduce(ctx_, info.weights_);
}
if (info.num_row_ == 0) {
auto out = base_margin->HostView();
out(0) = 0;
} else {
linalg::Vector<float> temp;
common::Median(ctx_, info.labels, info.weights_, &temp);
common::Mean(ctx_, temp, base_margin);
}
CHECK_EQ(base_margin->Size(), 1);
auto out = base_margin->HostView();
// weighted avg
std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out),
[w](float v) { return v * w; });
collective::Allreduce<collective::Operation::kSum>(out.Values().data(), out.Values().size());
collective::Allreduce<collective::Operation::kSum>(&w, 1);
if (common::CloseTo(w, 0.0)) {
// Mostly for handling empty dataset test.
LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation.";
out(0) = ObjFunction::DefaultBaseScore();
return;
}
std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out),
[w](float v) { return v / w; });
}
void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
HostDeviceVector<float> const& prediction, std::int32_t group_idx,
RegTree* p_tree) const override {
::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, prediction, 0.5, p_tree);
}
const char* DefaultEvalMetric() const override { return "mae"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:absoluteerror");
}
void LoadConfig(Json const& in) override {
CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"});
}
};
XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror")
.describe("Mean absoluate error.")
.set_body([]() { return new MeanAbsoluteError(); });
} // namespace obj
} // namespace xgboost
| 1c65538d146c2ee1661ad799fe1ed2a7363e7300.cu | /**
* Copyright 2015-2023 by XGBoost Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <algorithm>
#include <cmath>
#include <cstdint> // std::int32_t
#include <memory>
#include <vector>
#include "../common/common.h"
#include "../common/linalg_op.h"
#include "../common/numeric.h" // Reduce
#include "../common/optional_weight.h" // OptionalWeights
#include "../common/pseudo_huber.h"
#include "../common/stats.h"
#include "../common/threading_utils.h"
#include "../common/transform.h"
#include "./regression_loss.h"
#include "adaptive.h"
#include "init_estimation.h" // FitIntercept
#include "xgboost/base.h"
#include "xgboost/context.h" // Context
#include "xgboost/data.h" // MetaInfo
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/linalg.h"
#include "xgboost/logging.h"
#include "xgboost/objective.h" // ObjFunction
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#include "xgboost/tree_model.h" // RegTree
#if defined(XGBOOST_USE_CUDA)
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
namespace {
void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) {
CheckInitInputs(info);
CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels.";
}
} // anonymous namespace
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public XGBoostParameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public FitIntercept {
protected:
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return Loss::Info(); }
bst_target_t Targets(MetaInfo const& info) const override {
// Multi-target regression.
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = ctx_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1));
common::Transform<>::Init(
[block_size, ndata, n_targets] XGBOOST_DEVICE(
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
const bst_float* preds_ptr = _preds.data();
const bst_float* labels_ptr = _labels.data();
const bst_float* weights_ptr = _weights.data();
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = std::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets];
bst_float label = labels_ptr[idx];
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device)
.Eval(&additional_input_, out_gpair, &preds, info.labels.Data(),
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], ¶m_);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name())
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name())
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name())
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name())
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
// End deprecated
class PseudoHuberRegression : public FitIntercept {
PesudoHuberParam param_;
public:
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
ObjInfo Task() const override { return ObjInfo::kRegression; }
bst_target_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto slope = param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
const float z = predt(i) - y;
const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope));
float grad = z / scale_sqrt;
auto scale = common::Sqr(slope) + common::Sqr(z);
float hess = common::Sqr(slope) / (scale * scale_sqrt);
auto w = weight[sample_id];
gpair(i) = {grad * w, hess * w};
});
}
const char* DefaultEvalMetric() const override { return "mphe"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:pseudohubererror");
out["pseudo_huber_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
if (config.find("pseudo_huber_param") == config.cend()) {
// The parameter is added in 1.6.
return;
}
FromJson(in["pseudo_huber_param"], ¶m_);
}
};
XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror")
.describe("Regression Pseudo Huber error.")
.set_body([]() { return new PseudoHuberRegression(); });
// declare parameter
struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public FitIntercept {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("count:poisson");
out["poisson_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["poisson_regression_param"], ¶m_);
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Poisson regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public FitIntercept {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort(ctx_);
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += std::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels.HostView();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = std::exp(p);
const double w = info.GetWeight(ind);
const double y = labels(ind);
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*)
preds[j] = std::exp(preds[j]);
});
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:cox");
}
void LoadConfig(Json const&) override {}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public FitIntercept {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = ctx_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y <= 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be positive.";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:gamma");
}
void LoadConfig(Json const&) override {}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public FitIntercept {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device)
.Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:tweedie");
out["tweedie_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["tweedie_regression_param"], ¶m_);
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
class MeanAbsoluteError : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
bst_target_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sign = [](auto x) {
return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0));
};
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
auto grad = sign(predt(i) - y) * weight[sample_id];
auto hess = weight[sample_id];
gpair(i) = GradientPair{grad, hess};
});
}
void InitEstimation(MetaInfo const& info, linalg::Tensor<float, 1>* base_margin) const override {
CheckInitInputs(info);
base_margin->Reshape(this->Targets(info));
double w{0.0};
if (info.weights_.Empty()) {
w = static_cast<double>(info.num_row_);
} else {
w = common::Reduce(ctx_, info.weights_);
}
if (info.num_row_ == 0) {
auto out = base_margin->HostView();
out(0) = 0;
} else {
linalg::Vector<float> temp;
common::Median(ctx_, info.labels, info.weights_, &temp);
common::Mean(ctx_, temp, base_margin);
}
CHECK_EQ(base_margin->Size(), 1);
auto out = base_margin->HostView();
// weighted avg
std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out),
[w](float v) { return v * w; });
collective::Allreduce<collective::Operation::kSum>(out.Values().data(), out.Values().size());
collective::Allreduce<collective::Operation::kSum>(&w, 1);
if (common::CloseTo(w, 0.0)) {
// Mostly for handling empty dataset test.
LOG(WARNING) << "Sum of weights is close to 0.0, skipping base score estimation.";
out(0) = ObjFunction::DefaultBaseScore();
return;
}
std::transform(linalg::cbegin(out), linalg::cend(out), linalg::begin(out),
[w](float v) { return v / w; });
}
void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
HostDeviceVector<float> const& prediction, std::int32_t group_idx,
RegTree* p_tree) const override {
::xgboost::obj::UpdateTreeLeaf(ctx_, position, group_idx, info, prediction, 0.5, p_tree);
}
const char* DefaultEvalMetric() const override { return "mae"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:absoluteerror");
}
void LoadConfig(Json const& in) override {
CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"});
}
};
XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror")
.describe("Mean absoluate error.")
.set_body([]() { return new MeanAbsoluteError(); });
} // namespace obj
} // namespace xgboost
|
f3092163644484ec8adc244bab4a11e8d496a704.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
// check if the device result is equivalent to the expected solution
CUTBoolean res = cutComparefe(reference.elements, P.elements, size_elements, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
Matrix *Md, *Nd, *Pd;
Matrix Mdevice = AllocateDeviceMatrix(M);
Md = &Mdevice;
Matrix Ndevice = AllocateDeviceMatrix(N);
Nd = &Ndevice;
Matrix Pdevice = AllocateDeviceMatrix(P);
Pd = &Pdevice;
CopyToDeviceMatrix(*Md, M);
CopyToDeviceMatrix(*Nd, N);
dim3 dimGrid(1,1);
dim3 dimBlock(MATRIX_SIZE,MATRIX_SIZE);
hipLaunchKernelGGL(( MatrixMulKernel), dim3(dimGrid), dim3(dimBlock) , 0, 0, *Md, *Nd, *Pd);
hipDeviceSynchronize();
CopyFromDeviceMatrix(P, *Pd);
hipFree(Md);
hipFree(Nd);
hipFree(Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
hipMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
hipMemcpy(Mdevice.elements, Mhost.elements, size,
hipMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
hipMemcpy(Mhost.elements, Mdevice.elements, size,
hipMemcpyDeviceToHost);
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f);
}
| f3092163644484ec8adc244bab4a11e8d496a704.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Host code.
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cutil.h>
// includes, kernels
#include <matrixmul_kernel.cu>
////////////////////////////////////////////////////////////////////////////////
// declarations, forward
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
Matrix AllocateDeviceMatrix(const Matrix M);
Matrix AllocateMatrix(int height, int width, int init);
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost);
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice);
int ReadFile(Matrix* M, char* file_name);
void WriteFile(Matrix M, char* file_name);
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P);
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv) {
// Matrices for the program
Matrix M;
Matrix N;
Matrix P;
// Number of elements in the solution matrix
// Assuming square matrices, so the sizes of M, N and P are equal
unsigned int size_elements = WP * HP;
int errorM = 0, errorN = 0;
srand(2012);
// Check command line for input matrix files
if(argc != 3 && argc != 4)
{
// No inputs provided
// Allocate and initialize the matrices
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 1);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
}
else
{
// Inputs provided
// Allocate and read source matrices from disk
M = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
N = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
P = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
errorM = ReadFile(&M, argv[1]);
errorN = ReadFile(&N, argv[2]);
// check for read errors
if(errorM != size_elements || errorN != size_elements)
{
printf("Error reading input files %d, %d\n", errorM, errorN);
return 1;
}
}
// M * N on the device
MatrixMulOnDevice(M, N, P);
// compute the matrix multiplication on the CPU for comparison
Matrix reference = AllocateMatrix(MATRIX_SIZE, MATRIX_SIZE, 0);
computeGold(reference.elements, M.elements, N.elements, HM, WM, WN);
// check if the device result is equivalent to the expected solution
CUTBoolean res = cutComparefe(reference.elements, P.elements, size_elements, 0.0001f);
printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED");
// output result if output file is requested
if(argc == 4)
{
WriteFile(P, argv[3]);
}
else if(argc == 2)
{
WriteFile(P, argv[1]);
}
// Free host matrices
free(M.elements);
M.elements = NULL;
free(N.elements);
N.elements = NULL;
free(P.elements);
P.elements = NULL;
return 0;
}
////////////////////////////////////////////////////////////////////////////////
//! Run a simple test for CUDA
////////////////////////////////////////////////////////////////////////////////
void MatrixMulOnDevice(const Matrix M, const Matrix N, Matrix P)
{
//Interface host call to the device kernel code and invoke the kernel
Matrix *Md, *Nd, *Pd;
Matrix Mdevice = AllocateDeviceMatrix(M);
Md = &Mdevice;
Matrix Ndevice = AllocateDeviceMatrix(N);
Nd = &Ndevice;
Matrix Pdevice = AllocateDeviceMatrix(P);
Pd = &Pdevice;
CopyToDeviceMatrix(*Md, M);
CopyToDeviceMatrix(*Nd, N);
dim3 dimGrid(1,1);
dim3 dimBlock(MATRIX_SIZE,MATRIX_SIZE);
MatrixMulKernel<<< dimGrid, dimBlock >>>(*Md, *Nd, *Pd);
cudaDeviceSynchronize();
CopyFromDeviceMatrix(P, *Pd);
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
// Allocate a device matrix of same size as M.
Matrix AllocateDeviceMatrix(const Matrix M)
{
Matrix Mdevice = M;
int size = M.width * M.height * sizeof(float);
cudaMalloc((void**)&Mdevice.elements, size);
return Mdevice;
}
// Allocate a matrix of dimensions height*width
// If init == 0, initialize to all zeroes.
// If init == 1, perform random initialization.
Matrix AllocateMatrix(int height, int width, int init)
{
Matrix M;
M.width = M.pitch = width;
M.height = height;
int size = M.width * M.height;
M.elements = NULL;
M.elements = (float*) malloc(size*sizeof(float));
for(unsigned int i = 0; i < M.height * M.width; i++)
{
M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX);
}
return M;
}
// Copy a host matrix to a device matrix.
void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost)
{
int size = Mhost.width * Mhost.height * sizeof(float);
Mdevice.height = Mhost.height;
Mdevice.width = Mhost.width;
Mdevice.pitch = Mhost.pitch;
cudaMemcpy(Mdevice.elements, Mhost.elements, size,
cudaMemcpyHostToDevice);
}
// Copy a device matrix to a host matrix.
void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice)
{
int size = Mdevice.width * Mdevice.height * sizeof(float);
cudaMemcpy(Mhost.elements, Mdevice.elements, size,
cudaMemcpyDeviceToHost);
}
// Read a 16x16 floating point matrix in from file
int ReadFile(Matrix* M, char* file_name)
{
unsigned int data_read = MATRIX_SIZE*MATRIX_SIZE;
cutReadFilef(file_name, &(M->elements), &data_read, true);
return data_read;
}
// Write a 16x16 floating point matrix to file
void WriteFile(Matrix M, char* file_name)
{
cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f);
}
|
a5d568e7d6b188853c26bedebc59e8bd98de87ed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int offset = blockIdx.x*numCols + threadIdx.x;
greyImage[offset] = .299f * rgbaImage[offset].x + .587f * rgbaImage[offset].y + .114f * rgbaImage[offset].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
| a5d568e7d6b188853c26bedebc59e8bd98de87ed.cu | // Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Grean and Blue is in it.
//The 'A' stands for Alpha and is used for transparency, it will be
//ignored in this homework.
//Each channel Red, Blue, Green and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "utils.h"
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int offset = blockIdx.x*numCols + threadIdx.x;
greyImage[offset] = .299f * rgbaImage[offset].x + .587f * rgbaImage[offset].y + .114f * rgbaImage[offset].z;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(numCols, 1, 1); //TODO
const dim3 gridSize(numRows, 1, 1); //TODO
rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
e6108410c599d79f1d5170b2a85ee3f08bbfe14f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlat2c.cu, mixed zc -> ds, Wed Jan 2 14:18:51 2019
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_lower(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
/*
Similar to dlat2s_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_upper(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAT2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAT2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lat2
*******************************************************************************/
extern "C" void
magmablas_dlat2s(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
hipMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
if (uplo == MagmaLower) {
hipLaunchKernelGGL(( dlat2s_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax);
}
else if (uplo == MagmaUpper) {
hipLaunchKernelGGL(( dlat2s_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, A, lda, SA, ldsa, rmax);
}
hipMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
| e6108410c599d79f1d5170b2a85ee3f08bbfe14f.cu | /*
-- MAGMA (version 2.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date January 2019
@generated from magmablas/zlat2c.cu, mixed zc -> ds, Wed Jan 2 14:18:51 2019
@author Mark Gates
*/
#include "magma_internal.h"
// mixed precision generation has issues with SINGLE PRECISION, so use PRECISION_z
#define PRECISION_d
#define BLK_X 64
#define BLK_Y 32
// TODO get rid of global variable!
static __device__ int flag = 0;
/*
Divides matrix into ceil( n/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_lower(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < n && ind + BLK_X > iby ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
/*
Similar to dlat2s_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to dlag2s and zlaset.
*/
__global__
void dlat2s_upper(
int n,
const double *A, int lda,
float *SA, int ldsa,
double rmax )
{
double tmp;
double neg_rmax = - rmax;
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < n && ind < iby + BLK_Y ) {
A += ind + iby*lda;
SA += ind + iby*ldsa;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
tmp = A[j*lda];
if ( (MAGMA_D_REAL(tmp) < neg_rmax) || (MAGMA_D_REAL(tmp) > rmax)
#if defined(PRECISION_z) || defined(PRECISION_c)
|| (MAGMA_D_IMAG(tmp) < neg_rmax) || (MAGMA_D_IMAG(tmp) > rmax)
#endif
)
{
flag = 1;
}
SA[j*ldsa] = MAGMA_S_MAKE( MAGMA_D_REAL(tmp),
MAGMA_D_IMAG(tmp) );
}
}
}
}
}
/***************************************************************************//**
Purpose
-------
DLAT2S converts a double-real matrix, A,
to a single-real matrix, SA.
RMAX is the overflow for the single-real arithmetic.
DLAT2S checks that all the entries of A are between -RMAX and
RMAX. If not, the conversion is aborted and a flag is raised.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix A to be converted.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
n INTEGER
The number of columns of the matrix A. n >= 0.
@param[in]
A DOUBLE PRECISION array, dimension (LDA,n)
On entry, the n-by-n coefficient matrix A.
@param[in]
lda INTEGER
The leading dimension of the array A. LDA >= max(1,n).
@param[out]
SA SINGLE PRECISION array, dimension (LDSA,n)
On exit, if INFO=0, the n-by-n coefficient matrix SA;
if INFO > 0, the content of SA is unspecified.
@param[in]
ldsa INTEGER
The leading dimension of the array SA. LDSA >= max(1,n).
@param[out]
info INTEGER
- = 0: successful exit.
- < 0: if INFO = -i, the i-th argument had an illegal value
- = 1: an entry of the matrix A is greater than the SINGLE PRECISION
overflow threshold, in this case, the content
of SA on exit is unspecified.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_lat2
*******************************************************************************/
extern "C" void
magmablas_dlat2s(
magma_uplo_t uplo, magma_int_t n,
magmaDouble_const_ptr A, magma_int_t lda,
magmaFloat_ptr SA, magma_int_t ldsa,
magma_queue_t queue,
magma_int_t *info )
{
*info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
*info = -1;
else if ( n < 0 )
*info = -2;
else if ( lda < max(1,n) )
*info = -4;
else if ( ldsa < max(1,n) )
*info = -6;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //*info;
}
/* quick return */
if ( n == 0 ) {
return;
}
double rmax = (double)lapackf77_slamch("O");
dim3 threads( BLK_X, 1 );
dim3 grid( magma_ceildiv( n, BLK_X ), magma_ceildiv( n, BLK_Y ) );
cudaMemcpyToSymbol( flag, info, sizeof(flag) ); // flag = 0
if (uplo == MagmaLower) {
dlat2s_lower<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax);
}
else if (uplo == MagmaUpper) {
dlat2s_upper<<< grid, threads, 0, queue->cuda_stream() >>> (n, A, lda, SA, ldsa, rmax);
}
cudaMemcpyFromSymbol( info, flag, sizeof(flag) ); // info = flag
}
|
c35de82fe25bcfc696cf8d0cd4829f7bb523a767.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdint>
namespace arb {
namespace gpu {
template <typename T, typename I>
__global__
void fill_kernel(T* v, T value, I n) {
auto tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid < n) {
v[tid] = value;
}
}
inline unsigned grid_dim(std::size_t n, unsigned block_dim) {
return (n+block_dim-1)/block_dim;
}
void fill8(uint8_t* v, uint8_t value, std::size_t n) {
unsigned block_dim = 192;
hipLaunchKernelGGL(( fill_kernel), dim3(grid_dim(n, block_dim)), dim3(block_dim), 0, 0, v, value, n);
};
void fill16(uint16_t* v, uint16_t value, std::size_t n) {
unsigned block_dim = 192;
hipLaunchKernelGGL(( fill_kernel), dim3(grid_dim(n, block_dim)), dim3(block_dim), 0, 0, v, value, n);
};
void fill32(uint32_t* v, uint32_t value, std::size_t n) {
unsigned block_dim = 192;
hipLaunchKernelGGL(( fill_kernel), dim3(grid_dim(n, block_dim)), dim3(block_dim), 0, 0, v, value, n);
};
void fill64(uint64_t* v, uint64_t value, std::size_t n) {
unsigned block_dim = 192;
hipLaunchKernelGGL(( fill_kernel), dim3(grid_dim(n, block_dim)), dim3(block_dim), 0, 0, v, value, n);
};
} // namespace gpu
} // namespace arb
| c35de82fe25bcfc696cf8d0cd4829f7bb523a767.cu | #include <cstdint>
namespace arb {
namespace gpu {
template <typename T, typename I>
__global__
void fill_kernel(T* v, T value, I n) {
auto tid = threadIdx.x + blockDim.x*blockIdx.x;
if(tid < n) {
v[tid] = value;
}
}
inline unsigned grid_dim(std::size_t n, unsigned block_dim) {
return (n+block_dim-1)/block_dim;
}
void fill8(uint8_t* v, uint8_t value, std::size_t n) {
unsigned block_dim = 192;
fill_kernel<<<grid_dim(n, block_dim), block_dim>>>(v, value, n);
};
void fill16(uint16_t* v, uint16_t value, std::size_t n) {
unsigned block_dim = 192;
fill_kernel<<<grid_dim(n, block_dim), block_dim>>>(v, value, n);
};
void fill32(uint32_t* v, uint32_t value, std::size_t n) {
unsigned block_dim = 192;
fill_kernel<<<grid_dim(n, block_dim), block_dim>>>(v, value, n);
};
void fill64(uint64_t* v, uint64_t value, std::size_t n) {
unsigned block_dim = 192;
fill_kernel<<<grid_dim(n, block_dim), block_dim>>>(v, value, n);
};
} // namespace gpu
} // namespace arb
|
ea06b00c806254fe73764278ce060d4e88e7f9f6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "maths_convolution.h"
#include <rocblas.h>
#include<hip/hip_runtime_api.h>
#include<cuda.h>
__global__ void convolution(float *I, const float* __restrict__ M, float *P,int channels, int width, int height, int TILE_WIDTH, int maskLength)
{
__shared__ float N_ds[7][7];
int k;
int w_l = TILE_WIDTH + maskLength - 1;
for (k = 0; k < channels; k++) { // First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / w_l, destX = dest % w_l,
srcY = blockIdx.y * TILE_WIDTH + destY - maskLength/2,
srcX = blockIdx.x * TILE_WIDTH + destX - maskLength/2,
src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
for (int iter=1; iter <= (w_l * w_l) / (TILE_WIDTH*TILE_WIDTH); iter++)
{ // Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + iter*(TILE_WIDTH * TILE_WIDTH);
destY = dest / w_l, destX = dest % w_l;
srcY = blockIdx.y * TILE_WIDTH + destY - maskLength/2;
srcX = blockIdx.x * TILE_WIDTH + destX - maskLength/2;
src = (srcY * width + srcX) * channels + k;
if (destY < w_l && destX < w_l)
{
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
}
}
__syncthreads();
float accum = 0;
int y, x;
for (y = 0; y < maskLength; y++)
for (x = 0; x < maskLength; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * maskLength + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if ((y >= (maskLength - 1) / 2) && (x >= (maskLength - 1) / 2) && (y < height - (maskLength - 1) / 2) && (x < width - (maskLength - 1) / 2))
P[((y-(maskLength - 1) / 2) * maskLength + x-(maskLength - 1) / 2) * channels + k] = accum;
__syncthreads();
}
}
Array3Dd convolution(Array3Dd X, const Array2Dd &Ker, string shape) // vector10%
{
if (shape != "valid" && shape != "full")
{
cout << "wrong convolution shape control!" << endl << "convolution() failed!" << endl;
Array3Dd temp;
return temp;
}
if (X.size() <= 0)
{
cout << "Array3Dd is wrong!" << endl << "convolution() failed!" << endl;
Array3Dd temp;
return temp;
}
int Ker_row = Ker.at(0).size();
int Ker_col = Ker.size();
if (shape == "full")
{
X.expand_to_full_size(Ker_col, Ker_row);
}
int X_page = X.size();
int X_row = X.at(0).at(0).size();
int X_col = X.at(0).size();
int i, j, k;
if (shape == "valid" && (X_row < Ker_row || X_col < Ker_col))
{
cout << "X size is smaller than Ker size!" << endl << "convolution() failed!" << endl;
Array3Dd temp;
return temp;
}
int conv_row = X.at(0).at(0).size() - Ker.at(0).size() + 1; // conv0
int conv_col = X.at(0).size() - Ker.size() + 1;
Array3Dd convn(X_page, conv_col, conv_row, 0);
double *arr_X = new double[X_page * X_row * X_col]();
double *arr_Ker = new double[Ker_row * Ker_col]();
for (i = 0; i < X_page; i++) //vector
{
for (j = 0; j < X_row; j++)
{
for (k = 0; k < X_col; k++)
{
arr_X[i * (X_row * X_col) + j * X_col + k] = X.at(i).at(k).at(j); // arr_X
if ((i == 0) && (j < Ker_row) && (k < Ker_col)) // arr_Ker
{
arr_Ker[j * Ker_col + k] = Ker.at(Ker_col - 1 - k).at(Ker_row - 1 - j);// x,y
}
}
}
}
int maskLength = 5;
int imageChannels = X_page;
int imageWidth = X_row;
int imageHeight = X_col;
int TILE_WIDTH = 3;
float * hostOutputImageData;
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
hostOutputImageData = (float *) malloc(sizeof(float)*(imageWidth - maskLength + 1)*(imageHeight - maskLength + 1)*imageChannels);
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, (imageWidth - maskLength + 1) * (imageHeight - maskLength + 1) * imageChannels * sizeof(float));
hipMalloc((void **) &deviceMaskData, maskLength * maskLength * sizeof(float));
hipMemcpy(deviceInputImageData, //copy image to device
arr_X,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyHostToDevice);
hipMemcpy(deviceMaskData, //copy mask to device
arr_Ker,
maskLength * maskLength * sizeof(float),
hipMemcpyHostToDevice);
dim3 dimGrid(((imageWidth-1)/TILE_WIDTH)+1, ((imageHeight-1)/TILE_WIDTH)+1,1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
hipLaunchKernelGGL(( convolution), dim3(dimGrid),dim3(dimBlock), 0, 0, deviceInputImageData, deviceMaskData, deviceOutputImageData,
imageChannels, imageWidth, imageHeight, TILE_WIDTH, maskLength);
hipMemcpy(hostOutputImageData, //copy result to host
deviceOutputImageData,
(imageWidth - maskLength + 1) * (imageHeight - maskLength + 1) * imageChannels * imageChannels * sizeof(float),
hipMemcpyDeviceToHost);
int id = 0;
for (i = 0; i < X_page; i++) {
for (j = 0; j < conv_row; j++) {
for (k = 0; k < conv_col; k++) {
convn.at(i).at(k).at(j) = hostOutputImageData[id++];
}
}
}
delete[] arr_X;
delete[] arr_Ker;
free(hostOutputImageData);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceMaskData);
return convn;
}
Array2Dd convolution(const Array3Dd &X, const Array3Dd &Ker, string shape)
{
if (shape != "valid" && shape != "full")
{
cout << "wrong convolution shape control!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
int page_X = X.size();
int page_Ker = Ker.size();
if (page_X != page_Ker)
{
cout << "page size not equal!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
Array2Dd sum;
for (int i = 0; i < page_X; ++i)
{
sum.add(convolution(X.at(i), Ker.at(i), shape));
}
return sum;
}
Array2Dd convolution(Array2Dd X, Array2Dd Ker, string shape) // vector30
{
if (shape != "valid" && shape != "full")
{
cout << "wrong convolution shape control!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
int Ker_row = Ker.at(0).size();
int Ker_col = Ker.size();
if (shape == "full")
{
X.expand_to_full_size(Ker_col, Ker_row);
}
int X_row = X.at(0).size();
int X_col = X.size();
if (shape == "valid" && (X_row < Ker_row || X_col < Ker_col))
{
cout << "X size is smaller than Ker size!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
int conv_row = X.at(0).size() - Ker.at(0).size() + 1; // conv0
int conv_col = X.size() - Ker.size() + 1;
Array2Dd conv(conv_col, conv_row, 0);
double *arr_X = new double[X_row * X_col]();
double *arr_Ker = new double[Ker_row * Ker_col]();
int i, j;
for (i = 0; i < X_row; i++)
{
for (j = 0; j < X_col; j++)
{
arr_X[i * X_col + j] = X.at(j).at(i); // arr_X
if ((i < Ker_row) && (j < Ker_col)) // arr_Ker
{
arr_Ker[i * Ker_col + j] = Ker.at(Ker_col - 1 - j).at(Ker_row - 1 - i); // x,y
}
}
}
int row, col;
for (i = 0; i < conv_row; i++)
{
for (j = 0; j < conv_col; j++)
{
double sum_ij = 0; // (i,j)
for (row = i; row < i + Ker_row; row++)
{
for (col = j; col < j + Ker_col; col++)
{
sum_ij += arr_X[row * X_col + col] * arr_Ker[(row - i) * Ker_col + (col - j)];
}
}
conv.at(j).at(i) = sum_ij;
}
}
delete[] arr_X;
delete[] arr_Ker;
return conv;
}
| ea06b00c806254fe73764278ce060d4e88e7f9f6.cu | #include "maths_convolution.h"
#include <cublas.h>
#include<cuda_runtime_api.h>
#include<cuda.h>
__global__ void convolution(float *I, const float* __restrict__ M, float *P,int channels, int width, int height, int TILE_WIDTH, int maskLength)
{
__shared__ float N_ds[7][7];
int k;
int w_l = TILE_WIDTH + maskLength - 1;
for (k = 0; k < channels; k++) { // First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / w_l, destX = dest % w_l,
srcY = blockIdx.y * TILE_WIDTH + destY - maskLength/2,
srcX = blockIdx.x * TILE_WIDTH + destX - maskLength/2,
src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
for (int iter=1; iter <= (w_l * w_l) / (TILE_WIDTH*TILE_WIDTH); iter++)
{ // Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + iter*(TILE_WIDTH * TILE_WIDTH);
destY = dest / w_l, destX = dest % w_l;
srcY = blockIdx.y * TILE_WIDTH + destY - maskLength/2;
srcX = blockIdx.x * TILE_WIDTH + destX - maskLength/2;
src = (srcY * width + srcX) * channels + k;
if (destY < w_l && destX < w_l)
{
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0.0;
}
}
__syncthreads();
float accum = 0;
int y, x;
for (y = 0; y < maskLength; y++)
for (x = 0; x < maskLength; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * maskLength + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if ((y >= (maskLength - 1) / 2) && (x >= (maskLength - 1) / 2) && (y < height - (maskLength - 1) / 2) && (x < width - (maskLength - 1) / 2))
P[((y-(maskLength - 1) / 2) * maskLength + x-(maskLength - 1) / 2) * channels + k] = accum;
__syncthreads();
}
}
Array3Dd convolution(Array3Dd X, const Array2Dd &Ker, string shape) // 采用数组来求卷积,而不是用vector,速度要快10%!
{
if (shape != "valid" && shape != "full")
{
cout << "wrong convolution shape control!" << endl << "convolution() failed!" << endl;
Array3Dd temp;
return temp;
}
if (X.size() <= 0)
{
cout << "Array3Dd is wrong!" << endl << "convolution() failed!" << endl;
Array3Dd temp;
return temp;
}
int Ker_row = Ker.at(0).size();
int Ker_col = Ker.size();
if (shape == "full")
{
X.expand_to_full_size(Ker_col, Ker_row);
}
int X_page = X.size();
int X_row = X.at(0).at(0).size();
int X_col = X.at(0).size();
int i, j, k;
if (shape == "valid" && (X_row < Ker_row || X_col < Ker_col))
{
cout << "X size is smaller than Ker size!" << endl << "convolution() failed!" << endl;
Array3Dd temp;
return temp;
}
int conv_row = X.at(0).at(0).size() - Ker.at(0).size() + 1; // 创建卷积结果输出变量conv并初始化为0
int conv_col = X.at(0).size() - Ker.size() + 1;
Array3Dd convn(X_page, conv_col, conv_row, 0);
double *arr_X = new double[X_page * X_row * X_col]();
double *arr_Ker = new double[Ker_row * Ker_col]();
for (i = 0; i < X_page; i++) //vector 转 数组
{
for (j = 0; j < X_row; j++)
{
for (k = 0; k < X_col; k++)
{
arr_X[i * (X_row * X_col) + j * X_col + k] = X.at(i).at(k).at(j); // 对arr_X赋值
if ((i == 0) && (j < Ker_row) && (k < Ker_col)) // 对arr_Ker赋值
{
arr_Ker[j * Ker_col + k] = Ker.at(Ker_col - 1 - k).at(Ker_row - 1 - j);// x,y向同时翻转
}
}
}
}
int maskLength = 5;
int imageChannels = X_page;
int imageWidth = X_row;
int imageHeight = X_col;
int TILE_WIDTH = 3;
float * hostOutputImageData;
float * deviceInputImageData;
float * deviceOutputImageData;
float * deviceMaskData;
hostOutputImageData = (float *) malloc(sizeof(float)*(imageWidth - maskLength + 1)*(imageHeight - maskLength + 1)*imageChannels);
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, (imageWidth - maskLength + 1) * (imageHeight - maskLength + 1) * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceMaskData, maskLength * maskLength * sizeof(float));
cudaMemcpy(deviceInputImageData, //copy image to device
arr_X,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(deviceMaskData, //copy mask to device
arr_Ker,
maskLength * maskLength * sizeof(float),
cudaMemcpyHostToDevice);
dim3 dimGrid(((imageWidth-1)/TILE_WIDTH)+1, ((imageHeight-1)/TILE_WIDTH)+1,1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1);
convolution<<<dimGrid,dimBlock>>>(deviceInputImageData, deviceMaskData, deviceOutputImageData,
imageChannels, imageWidth, imageHeight, TILE_WIDTH, maskLength);
cudaMemcpy(hostOutputImageData, //copy result to host
deviceOutputImageData,
(imageWidth - maskLength + 1) * (imageHeight - maskLength + 1) * imageChannels * imageChannels * sizeof(float),
cudaMemcpyDeviceToHost);
int id = 0;
for (i = 0; i < X_page; i++) {
for (j = 0; j < conv_row; j++) {
for (k = 0; k < conv_col; k++) {
convn.at(i).at(k).at(j) = hostOutputImageData[id++];
}
}
}
delete[] arr_X;
delete[] arr_Ker;
free(hostOutputImageData);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceMaskData);
return convn;
}
Array2Dd convolution(const Array3Dd &X, const Array3Dd &Ker, string shape)
{
if (shape != "valid" && shape != "full")
{
cout << "wrong convolution shape control!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
int page_X = X.size();
int page_Ker = Ker.size();
if (page_X != page_Ker)
{
cout << "page size not equal!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
Array2Dd sum;
for (int i = 0; i < page_X; ++i)
{
sum.add(convolution(X.at(i), Ker.at(i), shape));
}
return sum;
}
Array2Dd convolution(Array2Dd X, Array2Dd Ker, string shape) // 采用数组来求卷积,而不是用vector,速度要快30倍!
{
if (shape != "valid" && shape != "full")
{
cout << "wrong convolution shape control!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
int Ker_row = Ker.at(0).size();
int Ker_col = Ker.size();
if (shape == "full")
{
X.expand_to_full_size(Ker_col, Ker_row);
}
int X_row = X.at(0).size();
int X_col = X.size();
if (shape == "valid" && (X_row < Ker_row || X_col < Ker_col))
{
cout << "X size is smaller than Ker size!" << endl << "convolution() failed!" << endl;
Array2Dd temp;
return temp;
}
int conv_row = X.at(0).size() - Ker.at(0).size() + 1; // 创建卷积结果输出变量conv并初始化为0
int conv_col = X.size() - Ker.size() + 1;
Array2Dd conv(conv_col, conv_row, 0);
double *arr_X = new double[X_row * X_col]();
double *arr_Ker = new double[Ker_row * Ker_col]();
int i, j;
for (i = 0; i < X_row; i++)
{
for (j = 0; j < X_col; j++)
{
arr_X[i * X_col + j] = X.at(j).at(i); // 对arr_X赋值
if ((i < Ker_row) && (j < Ker_col)) // 对arr_Ker赋值
{
arr_Ker[i * Ker_col + j] = Ker.at(Ker_col - 1 - j).at(Ker_row - 1 - i); // x,y向同时翻转
}
}
}
int row, col;
for (i = 0; i < conv_row; i++)
{
for (j = 0; j < conv_col; j++)
{
double sum_ij = 0; // 计算卷积矩阵第(i,j)点的值
for (row = i; row < i + Ker_row; row++)
{
for (col = j; col < j + Ker_col; col++)
{
sum_ij += arr_X[row * X_col + col] * arr_Ker[(row - i) * Ker_col + (col - j)];
}
}
conv.at(j).at(i) = sum_ij;
}
}
delete[] arr_X;
delete[] arr_Ker;
return conv;
}
|
e400fd4880bc58636071fe820a8ffb5aa2e5aa77.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include "rocblas.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1; //Must be 1 in this program
int const DEPTH = 3;
int const WIDTH = 2048;
int const LENGTH = 2048;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 1;
// Convolution output characteristics
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
// transformation matrix characteristics
int const transformSizeY = convLayerSizeY * convLayerSizeX;
int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH;
int const KERNEL_2D_SIZE = CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const MAT_SIZE_ONE_CHANNEL = transformSizeY * CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const NUM_ELEMENTS = transformSizeY * transformSizeX;
int const KERNEL_LIMIT = transformSizeY * DEPTH;
#define COUT_input if (0) std::cout
#define COUT_result if (0) std::cout
__global__
void transformToMul(float* inputMatrix, float* reducedMatrix)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < KERNEL_LIMIT)
{
int w_out = idx % convLayerSizeX;
int indx = idx / convLayerSizeX;
int h_out = indx % convLayerSizeY;
int h_in = h_out * STRIDEY - ZPADY;
int w_in = w_out * STRIDEX - ZPADX;
int channel_in = indx / convLayerSizeY;
int channel_out = channel_in * KERNEL_2D_SIZE;
reducedMatrix += (channel_out * convLayerSizeY + h_out) * convLayerSizeX + w_out;
inputMatrix += (channel_in * LENGTH + h_in) * WIDTH + w_in;
#pragma unroll
for (int i = 0; i < CONV_RECP_SIZEY; ++i)
{
for (int j = 0; j < CONV_RECP_SIZEX; ++j)
{
int h = h_in + i;
int w = w_in + j;
*reducedMatrix = (h >= 0 && w >= 0 && h < LENGTH && w < WIDTH) ?
inputMatrix[i * WIDTH + j] : 0;
reducedMatrix += transformSizeY;
}
}
}
}
void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump)
{
double w = jump;
for (int b = 0; b < d; b++)
{
for (int c = 0; c < z; c++)
{
COUT_input << "slice: " << c + 1 << "\n";
for (int j = 0; j < y; j++)
{
for (int i = 0; i < x; i++)
{
if (type == -1)
{
matrix[((b * z + c) * y + j) * x + i] = rand() % 10;
}
else if (type == 0)
{
matrix[((b * z + c) * y + j) * x + i] = jump;
}
else
{
matrix[((b * z + c) * y + j) * x + i] = w;
w += jump;
}
COUT_input << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , ";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
}
int main()
{
// Performance test variables
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipError_t cudaStatus;
// Initialize Host data, kernel and output
float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH];
float* hostTransformedInput = new float[transformSizeY * transformSizeX]();
// GENERATING INPUT
COUT_input << "Inputs:\n";
generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1);
// Initializing and allocating Device data, kernels and output
float* deviceInputMatrix;
float* deviceTransformedInput;
cudaStatus = hipMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void **)&deviceTransformedInput, (transformSizeY * transformSizeX) * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Initializing sizes of grid and block of threads
dim3 threadsPerBlock(1024);
dim3 blocksPerGrid(1);
if (transformSizeY * DEPTH > 1024) {
threadsPerBlock.x = 1024;
blocksPerGrid.x = ceil(double(transformSizeY * DEPTH) / double(threadsPerBlock.x));
}
// Run the kernel function and meassure time
hipEventRecord(start, 0);
transformToMul << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceTransformedInput);
cudaStatus = hipEventRecord(stop, NULL);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "EventRecord failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventSynchronize(stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "EventSynchronize failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipEventElapsedTime(&time, start, stop);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "ElapsedTime failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "DeviceSynchronize failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// Get the results from device
cudaStatus = hipMemcpy(hostTransformedInput, deviceTransformedInput, (transformSizeX * transformSizeY) * sizeof(float), hipMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// PRINTING RESULTS
COUT_result << "Transformed matrix:\n";
for (int k = 0; k < transformSizeX; k++)
{
for (int j = 0; j < transformSizeY; j++)
{
COUT_result << std::setprecision(1) << std::fixed << hostTransformedInput[k * transformSizeY + j] << " ";
}
COUT_result << "\n";
}
// CLEAN UP
printf("Transform time: %f msec.\n", time);
Error:
hipFree(deviceInputMatrix);
hipFree(deviceTransformedInput);
return 0;
}
| e400fd4880bc58636071fe820a8ffb5aa2e5aa77.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include "cublas_v2.h"
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include <chrono>
// Input size
int const BATCH = 1; //Must be 1 in this program
int const DEPTH = 3;
int const WIDTH = 2048;
int const LENGTH = 2048;
// Kernel characteristics
int const ZPADX = 0;
int const ZPADY = 0;
int const STRIDEX = 1;
int const STRIDEY = 1;
int const CONV_RECP_SIZEX = 3;
int const CONV_RECP_SIZEY = 3;
int const NUM_OF_KERNELS = 1;
// Convolution output characteristics
int const convLayerSizeX = ((WIDTH - CONV_RECP_SIZEX + 2 * ZPADX) / STRIDEX + 1);
int const convLayerSizeY = ((LENGTH - CONV_RECP_SIZEY + 2 * ZPADY) / STRIDEY + 1);
// transformation matrix characteristics
int const transformSizeY = convLayerSizeY * convLayerSizeX;
int const transformSizeX = CONV_RECP_SIZEX * CONV_RECP_SIZEY * DEPTH;
int const KERNEL_2D_SIZE = CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const MAT_SIZE_ONE_CHANNEL = transformSizeY * CONV_RECP_SIZEX * CONV_RECP_SIZEY;
int const NUM_ELEMENTS = transformSizeY * transformSizeX;
int const KERNEL_LIMIT = transformSizeY * DEPTH;
#define COUT_input if (0) std::cout
#define COUT_result if (0) std::cout
__global__
void transformToMul(float* inputMatrix, float* reducedMatrix)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < KERNEL_LIMIT)
{
int w_out = idx % convLayerSizeX;
int indx = idx / convLayerSizeX;
int h_out = indx % convLayerSizeY;
int h_in = h_out * STRIDEY - ZPADY;
int w_in = w_out * STRIDEX - ZPADX;
int channel_in = indx / convLayerSizeY;
int channel_out = channel_in * KERNEL_2D_SIZE;
reducedMatrix += (channel_out * convLayerSizeY + h_out) * convLayerSizeX + w_out;
inputMatrix += (channel_in * LENGTH + h_in) * WIDTH + w_in;
#pragma unroll
for (int i = 0; i < CONV_RECP_SIZEY; ++i)
{
for (int j = 0; j < CONV_RECP_SIZEX; ++j)
{
int h = h_in + i;
int w = w_in + j;
*reducedMatrix = (h >= 0 && w >= 0 && h < LENGTH && w < WIDTH) ?
inputMatrix[i * WIDTH + j] : 0;
reducedMatrix += transformSizeY;
}
}
}
}
void generateFlat4DData(float* matrix, int x, int y, int z, int d, double type, double jump)
{
double w = jump;
for (int b = 0; b < d; b++)
{
for (int c = 0; c < z; c++)
{
COUT_input << "slice: " << c + 1 << "\n";
for (int j = 0; j < y; j++)
{
for (int i = 0; i < x; i++)
{
if (type == -1)
{
matrix[((b * z + c) * y + j) * x + i] = rand() % 10;
}
else if (type == 0)
{
matrix[((b * z + c) * y + j) * x + i] = jump;
}
else
{
matrix[((b * z + c) * y + j) * x + i] = w;
w += jump;
}
COUT_input << std::setprecision(1) << std::fixed << matrix[((b * z + c) * y + j) * x + i] << " , ";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
COUT_input << "\n";
}
}
int main()
{
// Performance test variables
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaError_t cudaStatus;
// Initialize Host data, kernel and output
float* hostInputMatrix = new float[BATCH * DEPTH * LENGTH * WIDTH];
float* hostTransformedInput = new float[transformSizeY * transformSizeX]();
// GENERATING INPUT
COUT_input << "Inputs:\n";
generateFlat4DData(hostInputMatrix, WIDTH, LENGTH, DEPTH, BATCH, 1, 0.1);
// Initializing and allocating Device data, kernels and output
float* deviceInputMatrix;
float* deviceTransformedInput;
cudaStatus = cudaMalloc((void **)&deviceInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void **)&deviceTransformedInput, (transformSizeY * transformSizeX) * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(deviceInputMatrix, hostInputMatrix, (DEPTH * LENGTH * WIDTH) * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Initializing sizes of grid and block of threads
dim3 threadsPerBlock(1024);
dim3 blocksPerGrid(1);
if (transformSizeY * DEPTH > 1024) {
threadsPerBlock.x = 1024;
blocksPerGrid.x = ceil(double(transformSizeY * DEPTH) / double(threadsPerBlock.x));
}
// Run the kernel function and meassure time
cudaEventRecord(start, 0);
transformToMul << < blocksPerGrid, threadsPerBlock >> > (deviceInputMatrix, deviceTransformedInput);
cudaStatus = cudaEventRecord(stop, NULL);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "EventRecord failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventSynchronize(stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "EventSynchronize failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaEventElapsedTime(&time, start, stop);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "ElapsedTime failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "DeviceSynchronize failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// Get the results from device
cudaStatus = cudaMemcpy(hostTransformedInput, deviceTransformedInput, (transformSizeX * transformSizeY) * sizeof(float), cudaMemcpyDeviceToHost); // Not relevant to this program
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// PRINTING RESULTS
COUT_result << "Transformed matrix:\n";
for (int k = 0; k < transformSizeX; k++)
{
for (int j = 0; j < transformSizeY; j++)
{
COUT_result << std::setprecision(1) << std::fixed << hostTransformedInput[k * transformSizeY + j] << " ";
}
COUT_result << "\n";
}
// CLEAN UP
printf("Transform time: %f msec.\n", time);
Error:
cudaFree(deviceInputMatrix);
cudaFree(deviceTransformedInput);
return 0;
}
|
45a12f8e4ee3ed4e685f2c407fea11c759f7d34d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu, normal z -> d, Tue Aug 30 09:38:29 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
dsymv_kernel_U(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_D_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
dsymv_kernel_U_sum(
int n,
double alpha,
int lda,
double beta,
double * __restrict__ y, int incy,
double const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
double Ax = MAGMA_D_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
| 45a12f8e4ee3ed4e685f2c407fea11c759f7d34d.cu | /*
-- MAGMA (version 2.1.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date August 2016
dsymv_upper.cu is nearly identical to dsymv_upper.cu, just change names and drop MAGMA_D_CONJ.
dsymv_kernel_U (upper) in dsymv_upper.cu is very similar to
dsymv_kernel_L (lower) in dsymv.cu; diff the two files to compare.
@generated from magmablas/zhemv_upper.cu, normal z -> d, Tue Aug 30 09:38:29 2016
@author Mark Gates
*/
#include "magma_internal.h"
#include "commonblas_d.h"
#define PRECISION_d
#define NB_X 64
#define NB_Y 4
#define bank_shift 33
#define quarter_NB_X 16
#define half_NB_X 32
/***************************************************************************//**
Upper case, compute block multiply, work = A*x, for any size n:
[ (A11*x1 + A12*x2 + A13*x3) --- --- ] [ A11 A12 A13 ] [ x1 ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ] = [ A12^H A22 A23 ] * [ x2 ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ] [ A13^H A23^H A33 ] [ x3 ]
The order is different from the lower case, because
the upper case processes a block row from the diagonal to the right, whereas
the lower case processes a block row from the diagonal to the left.
Uses a 64x4 thread block.
For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed).
For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles.
In both cases, each thread multiplies 4 elements.
For rows past the bottom of the matrix, the A pointer is adjusted to be the
last valid row of A, which multiple threads will read.
Extra rows are ignored when saving results to work.
Columns past the right edge are explicitly ignored when loading.
x values past the bottom are set to zero, thus, extra columns are zeroed
when multiplying.
*******************************************************************************/
__global__ void
dsymv_kernel_U(
int n,
double const * __restrict__ A, int lda,
double const * __restrict__ x, int incx,
double * __restrict__ work)
{
#if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200)
// treats sA as 16x64 block
#define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ]
// treats sA as 32x32 block
#define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)])
// 64x4 thread block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int blk = blockIdx.x;
const int blk_ind = NB_X * blk;
const int td = NB_X * ty + tx;
// 32x8 thread block
const int tx2 = td % half_NB_X;
const int ty2 = td / half_NB_X;
// If this blk has fewer than NB_X rows, partial is the number of valid rows,
// so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid.
// Else, partial == 0.
int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
double psum, psum_t;
double total = MAGMA_D_ZERO;
// sA is used as a 32x32 block, sA32(i,j),
// and as a 16x64 block, sA16(i,j), in different parts of the code.
// sA must be at least half_NB_X*bank_shift = 32x33 = 1056;
// quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056
__shared__ double sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */
__shared__ double sx_blk[NB_X]; // for x[ blk ]
__shared__ double sx_jj [NB_X]; // for x[ jj ], which cycles over all blocks right of diag
double rA[4];
double psums_t[4];
// --------------------
// load 64x1 block x(blk_ind + 0:63) into sx_blk
x += (blk_ind + tx)*incx; // x is x(blk_ind + tx)
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_blk[tx] = x[0];
}
else {
sx_blk[tx] = MAGMA_D_ZERO;
}
}
// --------------------
// move to block row
work += blk*lda; // work is work(0, blk)
A += blk_ind; // A is A(blk_ind, 0)
A += ty2*lda + tx2; // A is A(blk_ind + tx2, ty2)
// move to 32x32 diag block
A += blk_ind*lda; // A is A(blk_ind + tx2, blk_ind + ty2)
// load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - tx2 + (partial - 1); // A is A(blk_ind + partial-1, blk_ind + ty2), the bottom-most valid row
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + tx2 - (partial - 1); // A is A(blk_ind + tx2, blk_ind + ty2)
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle,
// as four 32x8 sections in parallel:
// columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
// each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 32x32 diag block, then repeat steps from first diag block
A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2)
// load 32x32 diag block A[block + 0:31, block + 0:31] into sA
if ( partial ) {
if ( tx2 + half_NB_X >= partial ) {
A = A - (tx2 + half_NB_X) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 + half_NB_X >= partial ) {
A = A + (tx2 + half_NB_X) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// symmetrize 32x32 diag block, copying upper to lower triangle
#pragma unroll
for (int j=ty2*4; j < ty2*4 + 4; j++) {
if ( j > tx2 ) {
sA32(j, tx2) = MAGMA_D_CONJ( sA32(tx2, j) );
}
}
__syncthreads();
// multiply 32x32 diag block * x
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial row sums
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to off-diag 32x32 block
A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + NB/2 + ty2)
// load 32x32 block of A into sA,
// as four 32x8 sections one after another:
// columns 0:7, then 8:15, then 16:23, then 24:31
if ( partial ) {
if ( tx2 >= partial ) {
A = A - (tx2) + (partial - 1);
}
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
if ( ty2+j + half_NB_X < partial ) {
sA32(tx2, ty2 + j) = A[j*lda];
}
else {
sA32(tx2, ty2 + j) = MAGMA_D_ZERO;
}
}
if ( tx2 >= partial ) {
A = A + (tx2) - (partial - 1);
}
}
else {
#pragma unroll
for (int j=0; j < half_NB_X; j += 8) {
sA32(tx2, ty2 + j) = A[j*lda];
}
}
__syncthreads();
// multiply 32x32 block (below diag)
psum = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum += MAGMA_D_CONJ( sA32(ty2 + j*8, tx2) ) * sx_blk[j*8 + ty2];
}
//__syncthreads(); // no sync needed here
// multiply transposed 32x32 block (above diag)
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA32(tx2, ty2*4 + j) * sx_blk[half_NB_X + ty2*4 + j];
}
__syncthreads();
// store partial sums for non-transposed 32x32 block
sA32(ty2, tx2) = psum;
__syncthreads();
// sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2)
if ( ty2 == 1 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// store partial sums for transposed 32x32 block
sA32(ty2, tx2) = psum_t;
__syncthreads();
// sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2)
if ( ty2 == 0 ) {
total = total
+ sA32(0, tx2) + sA32(1, tx2)
+ sA32(2, tx2) + sA32(3, tx2)
+ sA32(4, tx2) + sA32(5, tx2)
+ sA32(6, tx2) + sA32(7, tx2);
}
__syncthreads();
// --------------------
// move to next 64x64 block right of diag in block row, and
// switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block
A += half_NB_X*lda; // A is A(blk_ind + tx2, blk_ind + NB_X + ty2 )
A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind + NB_X )
A += 4*ty*lda + tx; // A is A(blk_ind + tx, blk_ind + 4*ty)
// Unlike lower case, don't adjust A here for partial # of rows.
// Since block is right of diagonal, it must have all NB rows,
// but can have < NB columns, dealt with when loading below.
x -= blk_ind*incx; // x is x(tx)
// 16x16 thread block
const int tx4 = td % quarter_NB_X;
const int ty4 = td / quarter_NB_X;
// cycle over blocks jj right of diagonal, in block row blk
for (int jj=blk+1; jj < gridDim.x; ++jj) {
partial = (jj == gridDim.x - 1 ? (n % NB_X) : 0);
// load 64x1 block x(jj_ind + 0:63) into sx_jj
if ( ty == 0 ) {
if ( partial == 0 || tx < partial ) {
sx_jj[tx] = x[jj*NB_X*incx];
}
else {
sx_jj[tx] = MAGMA_D_ZERO;
}
}
__syncthreads();
for (int k=0; k < 4; k++) {
// load 64x16 block of A into rA, 4 elements per thread,
// as four 64x4 sections in parallel:
// columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15
if ( partial ) {
#pragma unroll
for (int j=0; j < 4; j++) {
if ( 4*ty + j + k*quarter_NB_X < partial ) {
rA[j] = A[j*lda];
}
else {
rA[j] = MAGMA_D_ZERO;
}
}
}
else {
#pragma unroll
for (int j=0; j < 4; j++) {
rA[j] = A[j*lda];
}
}
// 1) multiply 64x16 block A_{blk,jj} * x_jj
// each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k)
// 2) multiply 16x64 block A_{blk,jj} * x_blk,
// storing each product Aji*xi to sA(j,i)
#pragma unroll
for (int j=0; j < 4; j++) {
total += rA[j] * sx_jj[quarter_NB_X*k + ty*4 + j]; // y_blk = A_{blk,jj} * x_jj
sA16(ty*4 + j, tx) = MAGMA_D_CONJ( rA[j] ) * sx_blk[tx]; // y_jj = A_{blk,jj}^H * x_blk
}
__syncthreads();
// do partial row sums for transposed 16x64 result
// use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty)
// sum sixteen 16x4 sections in parallel:
// columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63
psum_t = MAGMA_D_ZERO;
#pragma unroll
for (int j=0; j < 4; j++) {
psum_t += sA16(tx4, ty4*4 + j);
}
__syncthreads();
// store partial row sums of transposed result, y_jj (locally)
psums_t[k] = psum_t;
// move right to next 64x16 block
A += lda * quarter_NB_X; // A is A(blk_ind + tx, jj*NB_X + (k+1)*NB_X/4 + 4*ty)
}
// already at next 64x64 block
// A is A(blk_ind + tx, (jj+1)*NB_x + 4*ty)
// store partial row sums of transposed result, y_jj
#pragma unroll
for (int k=0; k < 4; k++) {
sA16(tx4, ty4 + quarter_NB_X*k) = psums_t[k];
}
__syncthreads();
// sum up partial row sums of transposed result, y_jj, and store final total to workspace
// thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16
if ( ty4 < 4 && (partial == 0 || tx4 + ty4*quarter_NB_X < partial) ) {
int ty4_nb4 = ty4*quarter_NB_X;
psum_t = sA16(tx4, 0 + ty4_nb4) + sA16(tx4, 1 + ty4_nb4)
+ sA16(tx4, 2 + ty4_nb4) + sA16(tx4, 3 + ty4_nb4)
+ sA16(tx4, 4 + ty4_nb4) + sA16(tx4, 5 + ty4_nb4)
+ sA16(tx4, 6 + ty4_nb4) + sA16(tx4, 7 + ty4_nb4)
+ sA16(tx4, 8 + ty4_nb4) + sA16(tx4, 9 + ty4_nb4)
+ sA16(tx4, 10 + ty4_nb4) + sA16(tx4, 11 + ty4_nb4)
+ sA16(tx4, 12 + ty4_nb4) + sA16(tx4, 13 + ty4_nb4)
+ sA16(tx4, 14 + ty4_nb4) + sA16(tx4, 15 + ty4_nb4);
work[jj*NB_X + tx4 + ty4_nb4] = psum_t; // store at work( jj*NB_X + tx4 + ty4*16, blk )
}
__syncthreads();
}
// store row sums
sA16(ty, tx) = total;
__syncthreads();
partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0);
// sum up final total, y_blk, for row tx
if ( ty == 0 && (partial == 0 || tx < partial) ) {
total = sA16(0, tx)
+ sA16(1, tx)
+ sA16(2, tx)
+ sA16(3, tx);
work[blk*NB_X + tx] = total; // store at work( blk*NB_X + tx, blk )
}
#endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */
}
// end dsymv_kernel_U
/***************************************************************************//**
Upper case, sum up final results
Each block sums one block row; each thread sums one row.
On input (for 3 blocks):
[ (A11*x1 + A12*x2 + A13*x3) --- --- ]
work = [ (A12^H*x1) (A22*x2 + A23*x3) --- ]
[ (A13^H*x1) (A23^H*x2) (A33*x3) ]
On output:
[ (A11*x1 + A12*x2 + A13*x3) ]
y = alpha*[ (A12^H*x1) + (A22*x2 + A23*x3) ] + beta*y
[ (A13^H*x1) + (A23^H*x2) + (A33*x3) ]
*******************************************************************************/
__global__ void
dsymv_kernel_U_sum(
int n,
double alpha,
int lda,
double beta,
double * __restrict__ y, int incy,
double const * __restrict__ work )
{
int tx = threadIdx.x;
int blk = blockIdx.x;
int blk_ind = blk * NB_X;
int ind = blk_ind + tx;
// Don't write outside [0, ..., n)
if ( ind < n ) {
work += ind;
double Ax = MAGMA_D_ZERO;
for (int j = 0; j <= blk; ++j) {
Ax += work[0];
work += lda;
}
y[ind * incy] = beta*y[ind * incy] + alpha*Ax;
}
}
|
dcdf7bbeaacfb3b6b9fb540d6c8a87a3179c3a7a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel/gpu/cuda_impl/gelu_impl.cuh"
#include "device/gpu/cuda_common.h"
template<typename T>
__global__ void GeluKernel(size_t size, T* input_addr, T* output_addr) {
// formula:
// gelu(x) = 0.5 * x * (1.0 + tanh(y))
// tanh(y) = 2 / (1 + exp(-2y)) - 1)
// y = sqrt(2/pi) * (x + 0.044715 * x^3)
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
float x = input_addr[pos];
float tanh_res = tanh(0.7978845608 * (x + 0.044715 * x * x * x));
output_addr[pos] = 0.5 * x * (1.0 + tanh_res);
}
}
template<typename T>
void Gelu(size_t size, T* input_addr, T* output_addr, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( GeluKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, input_addr, output_addr);
return;
}
template<typename T>
__global__ void GeluGradKernel(size_t size, T* dy_addr, T* x_addr, T* dx_addr) {
// formula:
// dx = dy * y'
// y' = 0.5 * (1 + tanh(tanh_para)) +
// 0.5 * x * (1 - tanh(tanh_para) * tanh(tanh_para)) * mul_right
// tanh_para = sqrt(2/pi) * (x + 0.044715 * x^3)
// mul_right = sqrt(2/pi) * (1 + 3 * 0.044715 * x^2))
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
T x = x_addr[pos];
T tanh_res = tanh(0.7978845608 * (x + 0.044715 * x * x * x));
T mul_right = 0.7978845608 + 0.1070322244 * x * x;
T y_res = 0.5 * (1 + tanh_res) + 0.5 * x * (1 - tanh_res * tanh_res) * mul_right;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template<typename T>
void GeluGradKernel(size_t size, T* dy_addr, T* x_addr, T* dx_addr, hipStream_t cuda_stream) {
hipLaunchKernelGGL(( GeluGradKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dy_addr, x_addr, dx_addr);
}
template void Gelu(size_t size, float* input_addr, float* output_addr, hipStream_t cuda_stream);
template void GeluGradKernel(size_t size, float* dy_addr, float* x_addr, float* dx_addr, hipStream_t cuda_stream);
| dcdf7bbeaacfb3b6b9fb540d6c8a87a3179c3a7a.cu | /**
* Copyright 2020 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kernel/gpu/cuda_impl/gelu_impl.cuh"
#include "device/gpu/cuda_common.h"
template<typename T>
__global__ void GeluKernel(size_t size, T* input_addr, T* output_addr) {
// formula:
// gelu(x) = 0.5 * x * (1.0 + tanh(y))
// tanh(y) = 2 / (1 + exp(-2y)) - 1)
// y = sqrt(2/pi) * (x + 0.044715 * x^3)
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
float x = input_addr[pos];
float tanh_res = tanh(0.7978845608 * (x + 0.044715 * x * x * x));
output_addr[pos] = 0.5 * x * (1.0 + tanh_res);
}
}
template<typename T>
void Gelu(size_t size, T* input_addr, T* output_addr, cudaStream_t cuda_stream) {
GeluKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, input_addr, output_addr);
return;
}
template<typename T>
__global__ void GeluGradKernel(size_t size, T* dy_addr, T* x_addr, T* dx_addr) {
// formula:
// dx = dy * y'
// y' = 0.5 * (1 + tanh(tanh_para)) +
// 0.5 * x * (1 - tanh(tanh_para) * tanh(tanh_para)) * mul_right
// tanh_para = sqrt(2/pi) * (x + 0.044715 * x^3)
// mul_right = sqrt(2/pi) * (1 + 3 * 0.044715 * x^2))
for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < (size); pos += blockDim.x * gridDim.x) {
T x = x_addr[pos];
T tanh_res = tanh(0.7978845608 * (x + 0.044715 * x * x * x));
T mul_right = 0.7978845608 + 0.1070322244 * x * x;
T y_res = 0.5 * (1 + tanh_res) + 0.5 * x * (1 - tanh_res * tanh_res) * mul_right;
dx_addr[pos] = dy_addr[pos] * y_res;
}
}
template<typename T>
void GeluGradKernel(size_t size, T* dy_addr, T* x_addr, T* dx_addr, cudaStream_t cuda_stream) {
GeluGradKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, dy_addr, x_addr, dx_addr);
}
template void Gelu(size_t size, float* input_addr, float* output_addr, cudaStream_t cuda_stream);
template void GeluGradKernel(size_t size, float* dy_addr, float* x_addr, float* dx_addr, cudaStream_t cuda_stream);
|
13712965c5c9e282562c5c4551a4b5958aee61e0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "cuda_utils.h"
#include "timer.c"
typedef float dtype;
__global__ void matTrans(dtype* AT, dtype* A, int N) {
int tile = 32;
int x = threadIdx.x + (blockIdx.x * tile);
int y = threadIdx.y + (blockIdx.y * tile);
int w = gridDim.x * tile;
for (int i = 0; i < tile; i+= blockDim.y) {
AT[x * w + (y + i)] = A[(y + i) * w + x];
}
}
void parseArg (int argc, char** argv, int* N) {
if(argc == 2) {
*N = atoi (argv[1]);
assert (*N > 0);
} else {
fprintf (stderr, "usage: %s <N>\n", argv[0]);
exit (EXIT_FAILURE);
}
}
void initArr (dtype* in, int N) {
int i;
for(i = 0; i < N; i++) {
in[i] = (dtype) rand () / RAND_MAX;
}
}
void cpuTranspose (dtype* A, dtype* AT, int N) {
int i, j;
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
AT[j * N + i] = A[i * N + j];
}
}
}
int cmpArr (dtype* a, dtype* b, int N) {
int cnt, i;
cnt = 0;
for(i = 0; i < N; i++) {
if(abs(a[i] - b[i]) > 1e-6) cnt++;
}
return cnt;
}
void gpuTranspose (dtype* A, dtype* AT, int N) {
struct stopwatch_t* timer = NULL;
long double t_gpu;
dtype *d_idata, *d_odata;
/* Setup timers */
stopwatch_init ();
timer = stopwatch_create ();
stopwatch_start (timer);
dim3 gb(N / 32, N / 32, 1);
dim3 tb(32, 8, 1);
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, N * N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMemcpy (d_idata, A, N* N * sizeof (dtype), hipMemcpyHostToDevice));
hipLaunchKernelGGL(( matTrans) , dim3(gb), dim3(tb), 0, 0, d_odata, d_idata, N);
CUDA_CHECK_ERROR (hipMemcpy (AT, d_odata, N * N * sizeof (dtype), hipMemcpyDeviceToHost));
hipDeviceSynchronize ();
t_gpu = stopwatch_stop (timer);
fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 );
}
int main(int argc, char** argv) {
/* variables */
dtype *A, *ATgpu, *ATcpu;
int err;
int N;
struct stopwatch_t* timer = NULL;
long double t_cpu;
N = -1;
parseArg (argc, argv, &N);
/* input and output matrices on host */
/* output */
ATcpu = (dtype*) malloc (N * N * sizeof (dtype));
ATgpu = (dtype*) malloc (N * N * sizeof (dtype));
/* input */
A = (dtype*) malloc (N * N * sizeof (dtype));
initArr (A, N * N);
/* GPU transpose kernel */
gpuTranspose (A, ATgpu, N);
/* Setup timers */
stopwatch_init ();
timer = stopwatch_create ();
stopwatch_start (timer);
/* compute reference array */
cpuTranspose (A, ATcpu, N);
t_cpu = stopwatch_stop (timer);
fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n",
t_cpu);
/* check correctness */
err = cmpArr (ATgpu, ATcpu, N * N);
if(err) {
fprintf (stderr, "Transpose failed: %d\n", err);
} else {
fprintf (stderr, "Transpose successful\n");
}
free (A);
free (ATgpu);
free (ATcpu);
return 0;
}
| 13712965c5c9e282562c5c4551a4b5958aee61e0.cu | #include <stdlib.h>
#include <stdio.h>
#include "cuda_utils.h"
#include "timer.c"
typedef float dtype;
__global__ void matTrans(dtype* AT, dtype* A, int N) {
int tile = 32;
int x = threadIdx.x + (blockIdx.x * tile);
int y = threadIdx.y + (blockIdx.y * tile);
int w = gridDim.x * tile;
for (int i = 0; i < tile; i+= blockDim.y) {
AT[x * w + (y + i)] = A[(y + i) * w + x];
}
}
void parseArg (int argc, char** argv, int* N) {
if(argc == 2) {
*N = atoi (argv[1]);
assert (*N > 0);
} else {
fprintf (stderr, "usage: %s <N>\n", argv[0]);
exit (EXIT_FAILURE);
}
}
void initArr (dtype* in, int N) {
int i;
for(i = 0; i < N; i++) {
in[i] = (dtype) rand () / RAND_MAX;
}
}
void cpuTranspose (dtype* A, dtype* AT, int N) {
int i, j;
for(i = 0; i < N; i++) {
for(j = 0; j < N; j++) {
AT[j * N + i] = A[i * N + j];
}
}
}
int cmpArr (dtype* a, dtype* b, int N) {
int cnt, i;
cnt = 0;
for(i = 0; i < N; i++) {
if(abs(a[i] - b[i]) > 1e-6) cnt++;
}
return cnt;
}
void gpuTranspose (dtype* A, dtype* AT, int N) {
struct stopwatch_t* timer = NULL;
long double t_gpu;
dtype *d_idata, *d_odata;
/* Setup timers */
stopwatch_init ();
timer = stopwatch_create ();
stopwatch_start (timer);
dim3 gb(N / 32, N / 32, 1);
dim3 tb(32, 8, 1);
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, N * N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, A, N* N * sizeof (dtype), cudaMemcpyHostToDevice));
matTrans <<<gb, tb>>> (d_odata, d_idata, N);
CUDA_CHECK_ERROR (cudaMemcpy (AT, d_odata, N * N * sizeof (dtype), cudaMemcpyDeviceToHost));
cudaThreadSynchronize ();
t_gpu = stopwatch_stop (timer);
fprintf (stderr, "GPU transpose: %Lg secs ==> %Lg billion elements/second\n", t_gpu, (N * N) / t_gpu * 1e-9 );
}
int main(int argc, char** argv) {
/* variables */
dtype *A, *ATgpu, *ATcpu;
int err;
int N;
struct stopwatch_t* timer = NULL;
long double t_cpu;
N = -1;
parseArg (argc, argv, &N);
/* input and output matrices on host */
/* output */
ATcpu = (dtype*) malloc (N * N * sizeof (dtype));
ATgpu = (dtype*) malloc (N * N * sizeof (dtype));
/* input */
A = (dtype*) malloc (N * N * sizeof (dtype));
initArr (A, N * N);
/* GPU transpose kernel */
gpuTranspose (A, ATgpu, N);
/* Setup timers */
stopwatch_init ();
timer = stopwatch_create ();
stopwatch_start (timer);
/* compute reference array */
cpuTranspose (A, ATcpu, N);
t_cpu = stopwatch_stop (timer);
fprintf (stderr, "Time to execute CPU transpose kernel: %Lg secs\n",
t_cpu);
/* check correctness */
err = cmpArr (ATgpu, ATcpu, N * N);
if(err) {
fprintf (stderr, "Transpose failed: %d\n", err);
} else {
fprintf (stderr, "Transpose successful\n");
}
free (A);
free (ATgpu);
free (ATcpu);
return 0;
}
|
22856d1c403c289c25556f09746b8e1d3ca97181.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include <cusparse_v2.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <nsparse.h>
#include <nsparse_asm.h>
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to COMP_SH_SIZE
#define ONSTREAM
void init_bin(sfBIN *bin, int M)
{
int i;
bin->stream = (hipStream_t *)malloc(sizeof(hipStream_t) * BIN_NUM);
for (i = 0; i < BIN_NUM; i++) {
hipStreamCreate(&(bin->stream[i]));
}
bin->bin_size = (int *)malloc(sizeof(int) * BIN_NUM);
bin->bin_offset = (int *)malloc(sizeof(int) * BIN_NUM);
checkCudaErrors(hipMalloc((void **)&(bin->d_row_perm), sizeof(int) * M));
checkCudaErrors(hipMalloc((void **)&(bin->d_row_nz), sizeof(int) * (M + 1)));
checkCudaErrors(hipMalloc((void **)&(bin->d_max), sizeof(int)));
checkCudaErrors(hipMalloc((void **)&(bin->d_bin_size), sizeof(int) * BIN_NUM));
checkCudaErrors(hipMalloc((void **)&(bin->d_bin_offset), sizeof(int) * BIN_NUM));
i = 0;
bin->max_intprod = 0;
bin->max_nz = 0;
}
void release_bin(sfBIN bin)
{
int i;
hipFree(bin.d_row_nz);
hipFree(bin.d_row_perm);
hipFree(bin.d_max);
hipFree(bin.d_bin_size);
hipFree(bin.d_bin_offset);
free(bin.bin_size);
free(bin.bin_offset);
for (i = 0; i < BIN_NUM; i++) {
hipStreamDestroy(bin.stream[i]);
}
free(bin.stream);
}
__global__ void set_intprod_num(int *d_arpt, int *d_acol,
const int* __restrict__ d_brpt,
int *d_row_intprod, int *d_max_intprod,
int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = 0;
int j;
for (j = d_arpt[i]; j < d_arpt[i + 1]; j++) {
nz_per_row += d_brpt[d_acol[j] + 1] - d_brpt[d_acol[j]];
}
d_row_intprod[i] = nz_per_row;
atomicMax(d_max_intprod, nz_per_row);
}
__global__ void set_bin(int *d_row_nz, int *d_bin_size, int *d_max,
int M, int min, int mmin)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_row_nz[i];
atomicMax(d_max, nz_per_row);
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= (mmin)) {
atomicAdd(d_bin_size + j, 1);
}
else {
atomicAdd(d_bin_size + j + 1, 1);
}
return;
}
}
atomicAdd(d_bin_size + BIN_NUM - 1, 1);
}
__global__ void init_row_perm(int *d_permutation, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
d_permutation[i] = i;
}
__global__ void set_row_perm(int *d_bin_size, int *d_bin_offset,
int *d_max_row_nz, int *d_row_perm,
int M, int min, int mmin)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_max_row_nz[i];
int dest;
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= mmin) {
dest = atomicAdd(d_bin_size + j, 1);
d_row_perm[d_bin_offset[j] + dest] = i;
}
else {
dest = atomicAdd(d_bin_size + j + 1, 1);
d_row_perm[d_bin_offset[j + 1] + dest] = i;
}
return;
}
}
dest = atomicAdd(d_bin_size + BIN_NUM - 1, 1);
d_row_perm[d_bin_offset[BIN_NUM - 1] + dest] = i;
}
void set_max_bin(int *d_arpt, int *d_acol, int *d_brpt, sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
hipMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
hipMemcpy(bin->d_max, &(bin->max_intprod), sizeof(int), hipMemcpyHostToDevice);
BS = 1024;
GS = div_round_up(M, BS);
hipLaunchKernelGGL(( set_intprod_num), dim3(GS), dim3(BS), 0, 0, d_arpt, d_acol, d_brpt, bin->d_row_nz, bin->d_max, M);
hipMemcpy(&(bin->max_intprod), bin->d_max, sizeof(int), hipMemcpyDeviceToHost);
if (bin->max_intprod > IMB_PWMIN) {
hipLaunchKernelGGL(( set_bin), dim3(GS), dim3(BS), 0, 0, bin->d_row_nz, bin->d_bin_size, bin->d_max, M, IMB_MIN, IMB_PWMIN);
hipMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, hipMemcpyDeviceToHost);
hipMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
hipMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( set_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, IMB_MIN, IMB_PWMIN);
}
else {
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
hipLaunchKernelGGL(( init_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_row_perm, M);
}
}
void set_min_bin(sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
hipMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
hipMemcpy(bin->d_max, &(bin->max_nz), sizeof(int), hipMemcpyHostToDevice);
BS = 1024;
GS = div_round_up(M, BS);
hipLaunchKernelGGL(( set_bin), dim3(GS), dim3(BS), 0, 0, bin->d_row_nz, bin->d_bin_size,
bin->d_max,
M, B_MIN, B_PWMIN);
hipMemcpy(&(bin->max_nz), bin->d_max, sizeof(int), hipMemcpyDeviceToHost);
if (bin->max_nz > B_PWMIN) {
hipMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, hipMemcpyDeviceToHost);
hipMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
hipMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( set_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, B_MIN, B_PWMIN);
}
else {
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
BS = 1024;
GS = div_round_up(M, BS);
hipLaunchKernelGGL(( init_row_perm), dim3(GS), dim3(BS), 0, 0, bin->d_row_perm, M);
}
}
__global__ void init_value(real *d_val, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_val[i] = 0;
}
__global__ void init_check(int *d_check, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_check[i] = -1;
}
__global__ void set_row_nz_bin_pwarp(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz,
int bin_offset, int M) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j, k;
int soffset;
int acol, bcol, key, hash, adr, nz, old;
__shared__ int check[IMB_PW_SH_SIZE];
soffset = local_rid * IMB_PWMIN;
for (j = tid; j < IMB_PWMIN; j += PWARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
rid = d_row_perm[rid + bin_offset];
nz = 0;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (IMB_PWMIN - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (IMB_PWMIN - 1);
adr = soffset + hash;
}
}
}
}
for (j = PWARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int bin_offset, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j, k, l;
int bcol, key, hash, old;
int nz, adr;
int acol, ccol;
int soffset;
soffset = wid * SH_ROW;
__shared__ int check[IMB_SH_SIZE];
for (j = tid; j < SH_ROW; j += WARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
acol = 0;
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) acol = ld_gbl_int32(d_acol + j + tid);
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
ccol = __shfl(acol, l);
for (k = d_brpt[ccol] + tid; k < d_brpt[ccol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
__shared__ int check[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (rid >= M) {
return;
}
__syncthreads();
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
__syncthreads();
if (threadIdx.x == 0) {
check[0] = 0;
}
__syncthreads();
if (tid == 0) {
atomicAdd(check, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = check[0];
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb_large(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int *d_fail_count, int *d_fail_perm,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int adr;
int acol;
__shared__ int check[SH_ROW];
__shared__ int snz[1];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (threadIdx.x == 0) {
snz[0] = 0;
}
if (rid >= M) {
return;
}
__syncthreads();
rid = d_row_perm[rid + bin_offset];
int count = 0;
int border = SH_ROW >> 1;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (count < border && snz[0] < border) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
atomicAdd(snz, 1);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
count++;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
__syncthreads();
if (count >= border || snz[0] >= border) {
if (threadIdx.x == 0) {
int d = atomicAdd(d_fail_count, 1);
d_fail_perm[d] = rid;
}
}
else {
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
}
__global__ void set_row_nz_bin_each_gl(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int *d_check,
int max_row_nz, int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
int offset = rid * max_row_nz;
__shared__ int snz[1];
if (threadIdx.x == 0) {
snz[0] = 0;
}
__syncthreads();
if (rid >= M) {
return;
}
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = offset + hash;
while (1) {
if (d_check[adr] == key) {
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = offset + hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
atomicAdd(snz, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
void set_row_nnz(int *d_arpt, int *d_acol,
int *d_brpt, int *d_bcol,
int *d_crpt,
sfBIN *bin,
int M, int *nnz);
__global__ void calculate_value_col_bin_pwarp(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j;
__shared__ int shared_check[B_PW_SH_SIZE];
__shared__ real shared_value[B_PW_SH_SIZE];
int soffset = local_rid * (B_PWMIN);
for (j = tid; j < (B_PWMIN); j += PWARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int k;
int acol, bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & ((B_PWMIN) - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & ((B_PWMIN) - 1);
adr = soffset + hash;
}
}
}
}
for (j = tid; j < (B_PWMIN); j += PWARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
// Sorting for shared data
int count, target;
for (j = tid; j < nz; j += PWARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j;
__shared__ int shared_check[B_SH_SIZE];
__shared__ real shared_value[B_SH_SIZE];
int soffset = wid * SH_ROW;
for (j = tid; j < SH_ROW; j += WARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int lacol, acol;
int k, l;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real laval, aval, bval;
lacol = 0;
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) {
lacol = ld_gbl_int32(d_acol + j + tid);
laval = ld_gbl_real(d_aval + j + tid);
}
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
acol = __shfl(lacol, l);
aval = __shfl(laval, l);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = tid; j < nz; j += WARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each_tb(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
__shared__ int shared_check[SH_ROW];
__shared__ real shared_value[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
shared_check[j] = -1;
shared_value[j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (shared_check[hash] == key) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
else if (shared_check[hash] == -1) {
old = atomicCAS(shared_check + hash, -1, key);
if (old == -1) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[index] = shared_check[j];
shared_value[index] = shared_value[j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = shared_check[j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[j];
d_cval[offset + count] = shared_value[j];
}
}
__global__ void calculate_value_col_bin_each_gl(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int *d_check,
real *d_value,
int max_row_nz,
int bin_offset,
int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
if (rid >= M) {
return;
}
int doffset = rid * max_row_nz;
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = doffset + hash;
while (1) {
if (d_check[adr] == key) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = doffset + hash;
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < max_row_nz; j += WARP) {
if (d_check[doffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
d_check[doffset + index] = d_check[doffset + j];
d_value[doffset + index] = d_value[doffset + j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = d_check[doffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(d_check[doffset + k] - target) >> 31;
}
d_ccol[offset + count] = d_check[doffset + j];
d_cval[offset + count] = d_value[doffset + j];
}
}
void calculate_value_col_bin(int *d_arpt, int *d_acol, real *d_aval,
int *d_brpt, int *d_bcol, real *d_bval,
int *d_crpt, int *d_ccol, real *d_cval,
sfBIN *bin,
int M);
void spgemm_kernel_hash(sfCSR *a, sfCSR *b, sfCSR *c)
{
int M;
sfBIN bin;
M = a->M;
c->M = M;
c->N = b->N;
/* Initialize bin */
init_bin(&bin, M);
/* Set max bin */
set_max_bin(a->d_rpt, a->d_col, b->d_rpt, &bin, M);
checkCudaErrors(hipMalloc((void **)&(c->d_rpt), sizeof(int) * (M + 1)));
/* Count nz of C */
set_row_nnz(a->d_rpt, a->d_col,
b->d_rpt, b->d_col,
c->d_rpt,
&bin,
M,
&(c->nnz));
/* Set bin */
set_min_bin(&bin, M);
checkCudaErrors(hipMalloc((void **)&(c->d_col), sizeof(int) * c->nnz));
checkCudaErrors(hipMalloc((void **)&(c->d_val), sizeof(real) * c->nnz));
/* Calculating value of C */
calculate_value_col_bin(a->d_rpt, a->d_col, a->d_val,
b->d_rpt, b->d_col, b->d_val,
c->d_rpt, c->d_col, c->d_val,
&bin,
M);
release_bin(bin);
}
| 22856d1c403c289c25556f09746b8e1d3ca97181.cu | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
#include <helper_cuda.h>
#include <cusparse_v2.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <nsparse.h>
#include <nsparse_asm.h>
/* SpGEMM Specific Parameters */
#define HASH_SCAL 107 // Set disjoint number to COMP_SH_SIZE
#define ONSTREAM
void init_bin(sfBIN *bin, int M)
{
int i;
bin->stream = (cudaStream_t *)malloc(sizeof(cudaStream_t) * BIN_NUM);
for (i = 0; i < BIN_NUM; i++) {
cudaStreamCreate(&(bin->stream[i]));
}
bin->bin_size = (int *)malloc(sizeof(int) * BIN_NUM);
bin->bin_offset = (int *)malloc(sizeof(int) * BIN_NUM);
checkCudaErrors(cudaMalloc((void **)&(bin->d_row_perm), sizeof(int) * M));
checkCudaErrors(cudaMalloc((void **)&(bin->d_row_nz), sizeof(int) * (M + 1)));
checkCudaErrors(cudaMalloc((void **)&(bin->d_max), sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&(bin->d_bin_size), sizeof(int) * BIN_NUM));
checkCudaErrors(cudaMalloc((void **)&(bin->d_bin_offset), sizeof(int) * BIN_NUM));
i = 0;
bin->max_intprod = 0;
bin->max_nz = 0;
}
void release_bin(sfBIN bin)
{
int i;
cudaFree(bin.d_row_nz);
cudaFree(bin.d_row_perm);
cudaFree(bin.d_max);
cudaFree(bin.d_bin_size);
cudaFree(bin.d_bin_offset);
free(bin.bin_size);
free(bin.bin_offset);
for (i = 0; i < BIN_NUM; i++) {
cudaStreamDestroy(bin.stream[i]);
}
free(bin.stream);
}
__global__ void set_intprod_num(int *d_arpt, int *d_acol,
const int* __restrict__ d_brpt,
int *d_row_intprod, int *d_max_intprod,
int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = 0;
int j;
for (j = d_arpt[i]; j < d_arpt[i + 1]; j++) {
nz_per_row += d_brpt[d_acol[j] + 1] - d_brpt[d_acol[j]];
}
d_row_intprod[i] = nz_per_row;
atomicMax(d_max_intprod, nz_per_row);
}
__global__ void set_bin(int *d_row_nz, int *d_bin_size, int *d_max,
int M, int min, int mmin)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_row_nz[i];
atomicMax(d_max, nz_per_row);
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= (mmin)) {
atomicAdd(d_bin_size + j, 1);
}
else {
atomicAdd(d_bin_size + j + 1, 1);
}
return;
}
}
atomicAdd(d_bin_size + BIN_NUM - 1, 1);
}
__global__ void init_row_perm(int *d_permutation, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
d_permutation[i] = i;
}
__global__ void set_row_perm(int *d_bin_size, int *d_bin_offset,
int *d_max_row_nz, int *d_row_perm,
int M, int min, int mmin)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_max_row_nz[i];
int dest;
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= mmin) {
dest = atomicAdd(d_bin_size + j, 1);
d_row_perm[d_bin_offset[j] + dest] = i;
}
else {
dest = atomicAdd(d_bin_size + j + 1, 1);
d_row_perm[d_bin_offset[j + 1] + dest] = i;
}
return;
}
}
dest = atomicAdd(d_bin_size + BIN_NUM - 1, 1);
d_row_perm[d_bin_offset[BIN_NUM - 1] + dest] = i;
}
void set_max_bin(int *d_arpt, int *d_acol, int *d_brpt, sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
cudaMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(bin->d_max, &(bin->max_intprod), sizeof(int), cudaMemcpyHostToDevice);
BS = 1024;
GS = div_round_up(M, BS);
set_intprod_num<<<GS, BS>>>(d_arpt, d_acol, d_brpt, bin->d_row_nz, bin->d_max, M);
cudaMemcpy(&(bin->max_intprod), bin->d_max, sizeof(int), cudaMemcpyDeviceToHost);
if (bin->max_intprod > IMB_PWMIN) {
set_bin<<<GS, BS>>>(bin->d_row_nz, bin->d_bin_size, bin->d_max, M, IMB_MIN, IMB_PWMIN);
cudaMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, cudaMemcpyDeviceToHost);
cudaMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
cudaMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
set_row_perm<<<GS, BS>>>(bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, IMB_MIN, IMB_PWMIN);
}
else {
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
init_row_perm<<<GS, BS>>>(bin->d_row_perm, M);
}
}
void set_min_bin(sfBIN *bin, int M)
{
int i;
int GS, BS;
for (i = 0; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
bin->bin_offset[i] = 0;
}
cudaMemcpy(bin->d_bin_size, bin->bin_size, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
cudaMemcpy(bin->d_max, &(bin->max_nz), sizeof(int), cudaMemcpyHostToDevice);
BS = 1024;
GS = div_round_up(M, BS);
set_bin<<<GS, BS>>>(bin->d_row_nz, bin->d_bin_size,
bin->d_max,
M, B_MIN, B_PWMIN);
cudaMemcpy(&(bin->max_nz), bin->d_max, sizeof(int), cudaMemcpyDeviceToHost);
if (bin->max_nz > B_PWMIN) {
cudaMemcpy(bin->bin_size, bin->d_bin_size, sizeof(int) * BIN_NUM, cudaMemcpyDeviceToHost);
cudaMemcpy(bin->d_bin_size, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
for (i = 0; i < BIN_NUM - 1; i++) {
bin->bin_offset[i + 1] = bin->bin_offset[i] + bin->bin_size[i];
}
cudaMemcpy(bin->d_bin_offset, bin->bin_offset, sizeof(int) * BIN_NUM, cudaMemcpyHostToDevice);
set_row_perm<<<GS, BS>>>(bin->d_bin_size, bin->d_bin_offset, bin->d_row_nz, bin->d_row_perm, M, B_MIN, B_PWMIN);
}
else {
bin->bin_size[0] = M;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_size[i] = 0;
}
bin->bin_offset[0] = 0;
for (i = 1; i < BIN_NUM; i++) {
bin->bin_offset[i] = M;
}
BS = 1024;
GS = div_round_up(M, BS);
init_row_perm<<<GS, BS>>>(bin->d_row_perm, M);
}
}
__global__ void init_value(real *d_val, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_val[i] = 0;
}
__global__ void init_check(int *d_check, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_check[i] = -1;
}
__global__ void set_row_nz_bin_pwarp(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz,
int bin_offset, int M) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j, k;
int soffset;
int acol, bcol, key, hash, adr, nz, old;
__shared__ int check[IMB_PW_SH_SIZE];
soffset = local_rid * IMB_PWMIN;
for (j = tid; j < IMB_PWMIN; j += PWARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
rid = d_row_perm[rid + bin_offset];
nz = 0;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (IMB_PWMIN - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (IMB_PWMIN - 1);
adr = soffset + hash;
}
}
}
}
for (j = PWARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int bin_offset, int M)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j, k, l;
int bcol, key, hash, old;
int nz, adr;
int acol, ccol;
int soffset;
soffset = wid * SH_ROW;
__shared__ int check[IMB_SH_SIZE];
for (j = tid; j < SH_ROW; j += WARP) {
check[soffset + j] = -1;
}
if (rid >= M) {
return;
}
acol = 0;
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) acol = ld_gbl_int32(d_acol + j + tid);
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
ccol = __shfl(acol, l);
for (k = d_brpt[ccol] + tid; k < d_brpt[ccol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
d_row_nz[rid] = nz;
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
__shared__ int check[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (rid >= M) {
return;
}
__syncthreads();
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (1) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
__syncthreads();
if (threadIdx.x == 0) {
check[0] = 0;
}
__syncthreads();
if (tid == 0) {
atomicAdd(check, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = check[0];
}
}
template <int SH_ROW>
__global__ void set_row_nz_bin_each_tb_large(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
int *d_row_perm, int *d_row_nz,
int *d_fail_count, int *d_fail_perm,
int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int adr;
int acol;
__shared__ int check[SH_ROW];
__shared__ int snz[1];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
check[j] = -1;
}
if (threadIdx.x == 0) {
snz[0] = 0;
}
if (rid >= M) {
return;
}
__syncthreads();
rid = d_row_perm[rid + bin_offset];
int count = 0;
int border = SH_ROW >> 1;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = hash;
while (count < border && snz[0] < border) {
if (check[adr] == key) {
break;
}
else if (check[adr] == -1) {
old = atomicCAS(check + adr, -1, key);
if (old == -1) {
atomicAdd(snz, 1);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = hash;
count++;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
if (count >= border || snz[0] >= border) {
break;
}
}
__syncthreads();
if (count >= border || snz[0] >= border) {
if (threadIdx.x == 0) {
int d = atomicAdd(d_fail_count, 1);
d_fail_perm[d] = rid;
}
}
else {
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
}
__global__ void set_row_nz_bin_each_gl(const int *d_arpt, const int *d_acol,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const int *d_row_perm,
int *d_row_nz, int *d_check,
int max_row_nz, int bin_offset, int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j, k;
int bcol, key, hash, old;
int nz, adr;
int acol;
int offset = rid * max_row_nz;
__shared__ int snz[1];
if (threadIdx.x == 0) {
snz[0] = 0;
}
__syncthreads();
if (rid >= M) {
return;
}
nz = 0;
rid = d_row_perm[rid + bin_offset];
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = offset + hash;
while (1) {
if (d_check[adr] == key) {
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
nz++;
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = offset + hash;
}
}
}
}
for (j = WARP / 2; j >= 1; j /= 2) {
nz += __shfl_xor(nz, j);
}
if (tid == 0) {
atomicAdd(snz, nz);
}
__syncthreads();
if (threadIdx.x == 0) {
d_row_nz[rid] = snz[0];
}
}
void set_row_nnz(int *d_arpt, int *d_acol,
int *d_brpt, int *d_bcol,
int *d_crpt,
sfBIN *bin,
int M, int *nnz);
__global__ void calculate_value_col_bin_pwarp(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / PWARP;
int tid = i % PWARP;
int local_rid = rid % (blockDim.x / PWARP);
int j;
__shared__ int shared_check[B_PW_SH_SIZE];
__shared__ real shared_value[B_PW_SH_SIZE];
int soffset = local_rid * (B_PWMIN);
for (j = tid; j < (B_PWMIN); j += PWARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int k;
int acol, bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + tid; j < d_arpt[rid + 1]; j += PWARP) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol]; k < d_brpt[acol + 1]; k++) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & ((B_PWMIN) - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & ((B_PWMIN) - 1);
adr = soffset + hash;
}
}
}
}
for (j = tid; j < (B_PWMIN); j += PWARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
// Sorting for shared data
int count, target;
for (j = tid; j < nz; j += PWARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int rid = i / WARP;
int tid = i % WARP;
int wid = rid % (blockDim.x / WARP);
int j;
__shared__ int shared_check[B_SH_SIZE];
__shared__ real shared_value[B_SH_SIZE];
int soffset = wid * SH_ROW;
for (j = tid; j < SH_ROW; j += WARP) {
shared_check[soffset + j] = -1;
shared_value[soffset + j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (tid == 0) {
d_nz[rid] = 0;
}
int lacol, acol;
int k, l;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real laval, aval, bval;
lacol = 0;
for (j = d_arpt[rid]; j < d_arpt[rid + 1]; j += WARP) {
if (j + tid < d_arpt[rid + 1]) {
lacol = ld_gbl_int32(d_acol + j + tid);
laval = ld_gbl_real(d_aval + j + tid);
}
for (l = 0; l < WARP && j + l < d_arpt[rid + 1]; l++) {
acol = __shfl(lacol, l);
aval = __shfl(laval, l);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
adr = soffset + hash;
while (1) {
if (shared_check[adr] == key) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
else if (shared_check[adr] == -1) {
old = atomicCAS(shared_check + adr, -1, key);
if (old == -1) {
atomic_fadd(shared_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
adr = soffset + hash;
}
}
}
}
}
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[soffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[soffset + index] = shared_check[soffset + j];
shared_value[soffset + index] = shared_value[soffset + j];
}
}
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = tid; j < nz; j += WARP) {
target = shared_check[soffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[soffset + k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[soffset + j];
d_cval[offset + count] = shared_value[soffset + j];
}
}
template <int SH_ROW>
__global__ void calculate_value_col_bin_each_tb(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int bin_offset,
int bin_size)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
__shared__ int shared_check[SH_ROW];
__shared__ real shared_value[SH_ROW];
for (j = threadIdx.x; j < SH_ROW; j += blockDim.x) {
shared_check[j] = -1;
shared_value[j] = 0;
}
if (rid >= bin_size) {
return;
}
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) & (SH_ROW - 1);
while (1) {
if (shared_check[hash] == key) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
else if (shared_check[hash] == -1) {
old = atomicCAS(shared_check + hash, -1, key);
if (old == -1) {
atomic_fadd(shared_value + hash, aval * bval);
break;
}
}
else {
hash = (hash + 1) & (SH_ROW - 1);
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < SH_ROW; j += WARP) {
if (shared_check[j] != -1) {
index = atomicAdd(d_nz + rid, 1);
shared_check[index] = shared_check[j];
shared_value[index] = shared_value[j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = shared_check[j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(shared_check[k] - target) >> 31;
}
d_ccol[offset + count] = shared_check[j];
d_cval[offset + count] = shared_value[j];
}
}
__global__ void calculate_value_col_bin_each_gl(const int *d_arpt,
const int *d_acol,
const real *d_aval,
const int* __restrict__ d_brpt,
const int* __restrict__ d_bcol,
const real* __restrict__ d_bval,
int *d_crpt,
int *d_ccol,
real *d_cval,
const int *d_row_perm,
int *d_nz,
int *d_check,
real *d_value,
int max_row_nz,
int bin_offset,
int M)
{
int rid = blockIdx.x;
int tid = threadIdx.x & (WARP - 1);
int wid = threadIdx.x / WARP;
int wnum = blockDim.x / WARP;
int j;
if (rid >= M) {
return;
}
int doffset = rid * max_row_nz;
rid = d_row_perm[rid + bin_offset];
if (threadIdx.x == 0) {
d_nz[rid] = 0;
}
__syncthreads();
int acol;
int k;
int bcol, hash, key, adr;
int offset = d_crpt[rid];
int old, index;
real aval, bval;
for (j = d_arpt[rid] + wid; j < d_arpt[rid + 1]; j += wnum) {
acol = ld_gbl_int32(d_acol + j);
aval = ld_gbl_real(d_aval + j);
for (k = d_brpt[acol] + tid; k < d_brpt[acol + 1]; k += WARP) {
bcol = d_bcol[k];
bval = d_bval[k];
key = bcol;
hash = (bcol * HASH_SCAL) % max_row_nz;
adr = doffset + hash;
while (1) {
if (d_check[adr] == key) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
else if (d_check[adr] == -1) {
old = atomicCAS(d_check + adr, -1, key);
if (old == -1) {
atomic_fadd(d_value + adr, aval * bval);
break;
}
}
else {
hash = (hash + 1) % max_row_nz;
adr = doffset + hash;
}
}
}
}
__syncthreads();
if (threadIdx.x < WARP) {
for (j = tid; j < max_row_nz; j += WARP) {
if (d_check[doffset + j] != -1) {
index = atomicAdd(d_nz + rid, 1);
d_check[doffset + index] = d_check[doffset + j];
d_value[doffset + index] = d_value[doffset + j];
}
}
}
__syncthreads();
int nz = d_nz[rid];
/* Sorting for shared data */
int count, target;
for (j = threadIdx.x; j < nz; j += blockDim.x) {
target = d_check[doffset + j];
count = 0;
for (k = 0; k < nz; k++) {
count += (unsigned int)(d_check[doffset + k] - target) >> 31;
}
d_ccol[offset + count] = d_check[doffset + j];
d_cval[offset + count] = d_value[doffset + j];
}
}
void calculate_value_col_bin(int *d_arpt, int *d_acol, real *d_aval,
int *d_brpt, int *d_bcol, real *d_bval,
int *d_crpt, int *d_ccol, real *d_cval,
sfBIN *bin,
int M);
void spgemm_kernel_hash(sfCSR *a, sfCSR *b, sfCSR *c)
{
int M;
sfBIN bin;
M = a->M;
c->M = M;
c->N = b->N;
/* Initialize bin */
init_bin(&bin, M);
/* Set max bin */
set_max_bin(a->d_rpt, a->d_col, b->d_rpt, &bin, M);
checkCudaErrors(cudaMalloc((void **)&(c->d_rpt), sizeof(int) * (M + 1)));
/* Count nz of C */
set_row_nnz(a->d_rpt, a->d_col,
b->d_rpt, b->d_col,
c->d_rpt,
&bin,
M,
&(c->nnz));
/* Set bin */
set_min_bin(&bin, M);
checkCudaErrors(cudaMalloc((void **)&(c->d_col), sizeof(int) * c->nnz));
checkCudaErrors(cudaMalloc((void **)&(c->d_val), sizeof(real) * c->nnz));
/* Calculating value of C */
calculate_value_col_bin(a->d_rpt, a->d_col, a->d_val,
b->d_rpt, b->d_col, b->d_val,
c->d_rpt, c->d_col, c->d_val,
&bin,
M);
release_bin(bin);
}
|
4e3ba3f33b9a70b134429f5bf4ee06ec384ab6bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cassert>
#include <hip/hip_fp16.h>
#include <hipcub/hipcub.hpp>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fpgeneric.h"
#include "contrib_ops/cuda/bert/packed_attention_impl.h"
#include "contrib_ops/cuda/bert/packed_multihead_attention_impl.h"
#include "contrib_ops/cuda/bert/attention_softmax.h"
#include "contrib_ops/cuda/bert/transformer_common.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/cross_attention/fmha_cross_attention.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
#include "contrib_ops/cuda/bert/cutlass_fmha/memory_efficient_attention.h"
#include "contrib_ops/cuda/bert/rotary_embedding_util.h"
using namespace onnxruntime::cuda;
using namespace onnxruntime::contrib::attention_softmax_cuda;
#define CHECK_CUDA(expr) CUDA_RETURN_IF_ERROR(expr)
namespace onnxruntime {
namespace contrib {
namespace cuda {
static constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
#define ADD_BIAS(value, bias_value) (biases == nullptr) ? value : (value + bias_value)
#define GET_BIAS(bias_value) (biases == nullptr) ? T{} : bias_value
// Grid: (S, B)
// Block: 256
// For unfused PackedMultiHeadAttention
// Inputs (query, key, value): TxNxH
// Output: 3xBxNxSxH
// Where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TNH_3BNSH(
const T* query,
const T* key,
const T* value,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
const int32_t* token_offset,
int32_t token_count) {
int s = blockIdx.x;
int b = blockIdx.y;
int S = gridDim.x;
const int packing_token_idx = b * S + s;
const int padding_token_idx = token_offset[packing_token_idx];
b = padding_token_idx / S;
s = padding_token_idx % S;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
query += packing_token_idx * D_QK;
key += packing_token_idx * D_QK;
value += packing_token_idx * D_V;
int k_offset = D_QK;
int v_offset = D_QK + D_QK;
q += (b * N * S + s) * H_QK;
k += (b * N * S + s) * H_QK;
v += (b * N * S + s) * H_V;
if (packing_token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = ADD_BIAS(query[i], biases[i]);
k[n * S * H_QK + h] = ADD_BIAS(key[i], biases[i + k_offset]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = ADD_BIAS(value[i], biases[i + v_offset]);
}
} else {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = GET_BIAS(biases[i]);
k[n * S * H_QK + h] = GET_BIAS(biases[i + k_offset]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = GET_BIAS(biases[i + v_offset]);
}
}
}
// Grid: (T)
// Block: 256
// For memory efficient fMHA from CUTLASS.
// query, key, value: TxNxH
// q, k, v: TxNxH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TNH_3TNH(
const T* query,
const T* key,
const T* value,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
query += token_idx * D_QK;
key += token_idx * D_QK;
value += token_idx * D_V;
q += token_idx * D_QK;
k += token_idx * D_QK;
v += token_idx * D_V;
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
q[i] = ADD_BIAS(query[i], biases[i]);
k[i] = ADD_BIAS(key[i], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
v[i] = ADD_BIAS(value[i], biases[D_QK + D_QK + i]);
}
}
}
// Grid: (T)
// Block: 256
// For Trt fused attention.
// Inputs (query, key, value): TxNxH
// Output: TxNx3xH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TNH_TN3H(
const T* query,
const T* key,
const T* value,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* output,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
query += token_idx * D_QK;
key += token_idx * D_QK;
value += token_idx * D_V;
output += token_idx * (D_QK + D_QK + D_V);
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int n = i / H_QK;
int h = i % H_QK;
int index = n * (H_QK + H_QK + H_V) + h;
output[index] = ADD_BIAS(query[i], biases[i]);
index = n * (H_QK + H_QK + H_V) + H_QK + h;
output[index] = ADD_BIAS(key[i], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int n = i / H_V;
int h = i % H_V;
int index = n * (H_QK + H_QK + H_V) + H_QK + H_QK + h;
output[index] = ADD_BIAS(value[i], biases[D_QK + D_QK + i]);
}
}
}
// Grid: (S, B)
// Block: 256
// For unfused PackedMultiHeadAttention
// Input: TxNx3xH
// Output: 3xBxNxSxH
// Where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TN3H_3BNSH(
const T* input, // packed qkv
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
const int32_t* token_offset,
int32_t token_count) {
int s = blockIdx.x;
int b = blockIdx.y;
int S = gridDim.x;
const int packing_token_idx = b * S + s;
const int padding_token_idx = token_offset[packing_token_idx];
b = padding_token_idx / S;
s = padding_token_idx % S;
int Hx3 = (H_QK + H_QK + H_V);
input += packing_token_idx * N * Hx3;
int k_offset = H_QK;
int v_offset = H_QK + H_QK;
q += (b * N * S + s) * H_QK;
k += (b * N * S + s) * H_QK;
v += (b * N * S + s) * H_V;
if (packing_token_idx < token_count) {
for (int i = threadIdx.x; i < N * Hx3; i += blockDim.x) {
int n = i / Hx3;
int h = i % Hx3;
if (h < k_offset) {
q[n * S * H_QK + h] = ADD_BIAS(input[i], biases[n * H_QK + h]);
} else if (h < v_offset) {
k[n * S * H_QK + (h - k_offset)] = ADD_BIAS(input[i], biases[(N + n) * H_QK + (h - H_QK)]);
} else {
v[n * S * H_V + (h - v_offset)] = ADD_BIAS(input[i], biases[(N + N) * H_QK + n * H_V + (h - H_QK - H_QK)]);
}
}
} else {
for (int i = threadIdx.x; i < N * Hx3; i += blockDim.x) {
int n = i / Hx3;
int h = i % Hx3;
if (h < k_offset) {
q[n * S * H_QK + h] = GET_BIAS(biases[n * H_QK + h]);
} else if (h < v_offset) {
k[n * S * H_QK + (h - k_offset)] = GET_BIAS(biases[(N + n) * H_QK + (h - H_QK)]);
} else {
v[n * S * H_V + (h - v_offset)] = GET_BIAS(biases[(N + N) * H_QK + n * H_V + (h - H_QK - H_QK)]);
}
}
}
}
// TODO: merge TransposeQKV_TN3H_3TNH with AddBiasTransposeQKVPackedCutlass
// Grid: (T)
// Block: 256
// For memory efficient fMHA from CUTLASS.
// Input: TxNx3xH
// Output: 3xTxNxH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TN3H_3TNH(
const T* input,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
input += token_idx * (D_QK + D_QK + D_V);
q += token_idx * D_QK;
k += token_idx * D_QK;
v += token_idx * D_V;
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int n = i / H_QK;
int h = i % H_QK;
q[i] = ADD_BIAS(input[n * (H_QK + H_QK + H_V) + h], biases[i]);
k[i] = ADD_BIAS(input[n * (H_QK + H_QK + H_V) + H_QK + h], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < N * H_V; i += blockDim.x) {
int n = i / H_V;
int h = i % H_V;
v[i] = ADD_BIAS(input[n * (H_QK + H_QK + H_V) + H_QK + H_QK + h], biases[D_QK + D_QK + i]);
}
}
}
// Grid: (T)
// Block: 256
// For TRT fused attention.
// Input: TxNx3xH
// Output: TxNx3xH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBias_TN3H_TN3H(
const T* input,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* output,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
input += token_idx * (D_QK + D_QK + D_V);
output += token_idx * (D_QK + D_QK + D_V);
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int n = i / H_QK;
int h = i % H_QK;
int index = n * (H_QK + H_QK + H_V) + h;
output[index] = ADD_BIAS(input[index], biases[i]);
index = n * (H_QK + H_QK + H_V) + H_QK + h;
output[index] = ADD_BIAS(input[index], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int n = i / H_V;
int h = i % H_V;
int index = n * (H_QK + H_QK + H_V) + H_QK + H_QK + h;
output[index] = ADD_BIAS(input[index], biases[D_QK + D_QK + i]);
}
}
}
template <typename T>
void InvokeTranspose(
const T* query, const T* key, const T* value, const T* bias, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat source_format, AttentionQkvFormat target_format,
const int32_t* token_offset, int32_t token_count,
hipStream_t stream) {
if (key != nullptr && value != nullptr) {
assert(source_format == AttentionQkvFormat::Q_K_V_TNH);
if (target_format == AttentionQkvFormat::Q_K_V_BNSH) {
const dim3 grid(sequence_length, batch_size);
hipLaunchKernelGGL(( TransposeQKV_TNH_3BNSH<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
query,
key,
value,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + batch_size * sequence_length * num_heads * qk_head_size,
output + 2 * batch_size * sequence_length * num_heads * qk_head_size,
token_offset,
token_count);
} else if (target_format == AttentionQkvFormat::Q_K_V_TNH) {
const dim3 grid(token_count);
hipLaunchKernelGGL(( TransposeQKV_TNH_3TNH<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
query,
key,
value,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + token_count * num_heads * qk_head_size,
output + 2 * token_count * num_heads * qk_head_size,
token_count);
} else {
assert(target_format == AttentionQkvFormat::QKV_TN3H);
const dim3 grid(token_count);
hipLaunchKernelGGL(( TransposeQKV_TNH_TN3H<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
query,
key,
value,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
token_count);
}
} else {
assert(key == nullptr && value == nullptr);
assert(source_format == AttentionQkvFormat::QKV_TN3H);
if (target_format == AttentionQkvFormat::Q_K_V_BNSH) {
const dim3 grid(sequence_length, batch_size);
hipLaunchKernelGGL(( TransposeQKV_TN3H_3BNSH<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
query,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + batch_size * sequence_length * num_heads * qk_head_size,
output + 2 * batch_size * sequence_length * num_heads * qk_head_size,
token_offset,
token_count);
} else if (target_format == AttentionQkvFormat::Q_K_V_TNH) {
const dim3 grid(token_count);
hipLaunchKernelGGL(( TransposeQKV_TN3H_3TNH<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
query,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + token_count * num_heads * qk_head_size,
output + 2 * token_count * num_heads * qk_head_size,
token_count);
} else {
assert(target_format == AttentionQkvFormat::QKV_TN3H);
assert(bias != nullptr);
const dim3 grid(token_count);
hipLaunchKernelGGL(( AddBias_TN3H_TN3H<T>), dim3(grid), dim3(kMAX_THREADS_PER_BLOCK), 0, stream,
query,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
token_count);
}
}
}
template <typename T>
struct T4;
template <>
struct T4<float> {
using Type = float4;
};
template <>
struct T4<half> {
using Type = Half4;
};
template <typename T>
struct T2;
template <>
struct T2<float> {
using Type = float2;
};
template <>
struct T2<half> {
using Type = half2;
};
template <typename T>
void LaunchTranspose(
const T* query, const T* key, const T* value, const T* bias, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat source_format, AttentionQkvFormat target_format,
const int32_t* token_offset, int32_t token_count,
hipStream_t stream) {
if (0 == (qk_head_size & 3) && 0 == (v_head_size & 3)) {
using T4Type = typename T4<T>::Type;
const int H = qk_head_size / 4;
const int H_v = v_head_size / 4;
const T4Type* query2 = reinterpret_cast<const T4Type*>(query);
const T4Type* key2 = reinterpret_cast<const T4Type*>(key);
const T4Type* value2 = reinterpret_cast<const T4Type*>(value);
const T4Type* bias2 = reinterpret_cast<const T4Type*>(bias);
T4Type* output2 = reinterpret_cast<T4Type*>(output);
InvokeTranspose<T4Type>(
query2, key2, value2, bias2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
source_format, target_format,
token_offset, token_count, stream);
} else if (0 == (qk_head_size & 1) && 0 == (v_head_size & 1)) {
using T2Type = typename T2<T>::Type;
const int H = qk_head_size / 2;
const int H_v = v_head_size / 2;
const T2Type* query2 = reinterpret_cast<const T2Type*>(query);
const T2Type* key2 = reinterpret_cast<const T2Type*>(key);
const T2Type* value2 = reinterpret_cast<const T2Type*>(value);
const T2Type* bias2 = reinterpret_cast<const T2Type*>(bias);
T2Type* output2 = reinterpret_cast<T2Type*>(output);
InvokeTranspose<T2Type>(
query2, key2, value2, bias2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
source_format, target_format,
token_offset, token_count, stream);
} else {
InvokeTranspose<T>(
query, key, value, bias, output,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
source_format, target_format,
token_offset, token_count, stream);
}
}
template <typename T>
Status FusedAttentionTrt(
const hipDeviceProp_t& device_prop,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
void* fused_runner = data.fused_runner;
ORT_RETURN_IF_NOT(nullptr != fused_runner, "fused_runner cannot be NULL");
// When packed QKV is used, we can directly pass it to fused runner. Otherwise, we need transpose to BSN3H format.
const T* qkv = data.query;
if (!data.no_qkv_workspace) {
LaunchTranspose(data.query, data.key, data.value, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
data.source_qkv_format, AttentionQkvFormat::QKV_TN3H,
data.token_offset, parameters.token_count, stream);
qkv = data.workspace;
}
FusedMHARunnerFP16v2* fused_fp16_runner = reinterpret_cast<FusedMHARunnerFP16v2*>(fused_runner);
const int S = fused_fp16_runner->getSFromMaxSeqLen(sequence_length);
fused_fp16_runner->setup(S, batch_size);
fused_fp16_runner->run(qkv, data.cumulative_sequence_length, data.output, stream);
return Status::OK();
}
#if USE_FLASH_ATTENTION
template <typename T>
Status FusedAttentionCutlass(
const hipDeviceProp_t& device_prop,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
// Q, K and V pointers
const int model_dimension_qk = num_heads * qk_head_size;
const int model_dimension_v = num_heads * v_head_size;
const size_t elements_qk = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_qk);
const size_t elements_v = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_v);
// When separated Q, K, V is used, we can directly use them in Cutlass FMHA. Otherwise, transpose BSN3H to 3BSNH
if (!data.no_qkv_workspace) {
LaunchTranspose(data.query, data.key, data.value, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
data.source_qkv_format, AttentionQkvFormat::Q_K_V_TNH,
data.token_offset, parameters.token_count, stream);
}
MemoryEfficientAttentionParams p;
p.sm = device_prop.major * 10 + device_prop.minor;
p.is_half = sizeof(T) == 2;
p.batch_size = parameters.batch_size;
p.num_heads = parameters.num_heads;
p.sequence_length = parameters.sequence_length;
p.kv_sequence_length = parameters.sequence_length;
p.qk_head_size = parameters.head_size;
p.v_head_size = parameters.v_head_size;
p.causal = false;
p.scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
p.seqlen_k_ptr = nullptr;
p.seqstart_q_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.seqstart_k_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.query = data.no_qkv_workspace ? data.query : data.workspace;
p.key = data.no_qkv_workspace ? data.key : (data.workspace + elements_qk);
p.value = data.no_qkv_workspace ? data.value : (data.workspace + elements_qk + elements_qk);
p.attn_bias = data.relative_position_bias;
p.is_attn_bias_batched = !parameters.broadcast_res_pos_bias;
p.output = data.output;
p.workspace = MemoryEfficientAttentionParams::need_workspace(v_head_size, sizeof(T) == sizeof(float))
? (data.workspace + (data.no_qkv_workspace ? 0 : (elements_qk + elements_qk + elements_v)))
: nullptr;
p.stream = stream;
run_memory_efficient_attention(p);
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedMHA cutlass q(BSNH)", reinterpret_cast<const T*>(p.query), parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedMHA cutlass k(BSNH)", reinterpret_cast<const T*>(p.key), parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedMHA cutlass v(BSNH)", reinterpret_cast<const T*>(p.value), parameters.token_count, num_heads * v_head_size);
DUMP_TENSOR_D("PackedMHA cutlass cumulative_sequence_length", data.cumulative_sequence_length, 1, batch_size + 1);
DUMP_TENSOR("PackedMHA cutlass output", data.output, parameters.token_count, num_heads, v_head_size);
return Status::OK();
}
#endif
template <typename T>
Status UnfusedAttention(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
constexpr size_t element_size = sizeof(T);
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const int batches = batch_size * num_heads;
const int size_per_batch_q = sequence_length * qk_head_size;
const int size_per_batch_k = sequence_length * qk_head_size;
const int size_per_batch_v = sequence_length * v_head_size;
const size_t elements_q = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_q);
const size_t elements_k = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_k);
const size_t elements_v = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_v);
// Q, K and V pointers when fused attention is not used
LaunchTranspose(data.query, data.key, data.value, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
data.source_qkv_format, AttentionQkvFormat::Q_K_V_BNSH,
data.token_offset, parameters.token_count, stream);
T* qkv = data.workspace;
T* q = qkv;
T* k = q + elements_q;
T* v = k + elements_k;
T* scaled_qk = qkv + elements_q + elements_k + elements_v;
// Compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scaled_qk: BxNxSxT
// Q: BxNxSxH, K: BxNxSxH, Q*K': BxNxSxS
float one = 1.0f;
float zero = 0.f;
float scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
hipblasSetStream(cublas, stream);
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_T, HIPBLAS_OP_N,
sequence_length, sequence_length, qk_head_size,
&scale,
k, qk_head_size, sequence_length * qk_head_size,
q, qk_head_size, sequence_length * qk_head_size,
&zero,
scaled_qk, sequence_length, sequence_length * sequence_length,
batches, device_prop));
// Q, K and V are ready now
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedMHA unfused q (BNSH)", q, batch_size, num_heads, sequence_length, qk_head_size);
DUMP_TENSOR_D("PackedMHA unfused k (BNSH)", k, batch_size, num_heads, sequence_length, qk_head_size);
DUMP_TENSOR_D("PackedMHA unfused v (BNSH)", v, batch_size, num_heads, sequence_length, v_head_size);
DUMP_TENSOR_D("PackedMHA unfused QK", scaled_qk, batch_size * num_heads, sequence_length, sequence_length);
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads,
sequence_length);
T* attention_score = scaled_qk + (bytes / element_size);
// Apply softmax and store result R to attention_score: BxNxSxS
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithCumSeqLength<T>(
scaled_qk,
data.relative_position_bias,
parameters.broadcast_res_pos_bias,
data.cumulative_sequence_length,
batch_size,
sequence_length,
num_heads,
attention_score, stream));
DUMP_TENSOR_D("PackedMHA unfused Softmax", attention_score, batch_size * num_heads, sequence_length, sequence_length);
// compute R*V (as V*R), and store in temp_output (space used by Q): BxNxSxH_v
T* temp_output = qkv;
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, HIPBLAS_OP_N, HIPBLAS_OP_N,
v_head_size, sequence_length, sequence_length,
&one, v, v_head_size, sequence_length * v_head_size,
attention_score, sequence_length, sequence_length * sequence_length,
&zero, temp_output, v_head_size, sequence_length * v_head_size, batches, device_prop));
// Temp_output is BxNxSxH_v, transpose and remove padding to output TxNxH_v
Status result = LaunchTransposeRemovePadding(
data.output, temp_output,
data.token_offset, parameters.token_count,
batch_size, sequence_length, num_heads, v_head_size,
stream);
DUMP_TENSOR("PackedMHA unfused output", data.output, parameters.token_count, num_heads, v_head_size);
return result;
}
template <typename T>
Status QkvToContext(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
void* fused_runner = data.fused_runner;
if (nullptr != fused_runner) {
return FusedAttentionTrt<T>(device_prop, stream, parameters, data);
}
#if USE_FLASH_ATTENTION
if (data.use_memory_efficient_attention) {
return FusedAttentionCutlass(device_prop, stream, parameters, data);
}
#endif
return UnfusedAttention<T>(device_prop, cublas, stream, parameters, data);
}
template Status QkvToContext<float>(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<float>& data);
template Status QkvToContext<half>(
const hipDeviceProp_t& device_prop,
hipblasHandle_t& cublas,
hipStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<half>& data);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
| 4e3ba3f33b9a70b134429f5bf4ee06ec384ab6bb.cu | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cassert>
#include <cuda_fp16.h>
#include <cub/cub.cuh>
#include "core/providers/cuda/cu_inc/common.cuh"
#include "core/providers/cuda/cuda_common.h"
#include "core/providers/cuda/shared_inc/fpgeneric.h"
#include "contrib_ops/cuda/bert/packed_attention_impl.h"
#include "contrib_ops/cuda/bert/packed_multihead_attention_impl.h"
#include "contrib_ops/cuda/bert/attention_softmax.h"
#include "contrib_ops/cuda/bert/transformer_common.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/mha_runner.h"
#include "contrib_ops/cuda/bert/tensorrt_fused_multihead_attention/cross_attention/fmha_cross_attention.h"
#include "contrib_ops/cuda/bert/bert_padding.h"
#include "contrib_ops/cuda/transformers/dump_cuda_tensor.h"
#include "contrib_ops/cuda/bert/cutlass_fmha/memory_efficient_attention.h"
#include "contrib_ops/cuda/bert/rotary_embedding_util.h"
using namespace onnxruntime::cuda;
using namespace onnxruntime::contrib::attention_softmax_cuda;
#define CHECK_CUDA(expr) CUDA_RETURN_IF_ERROR(expr)
namespace onnxruntime {
namespace contrib {
namespace cuda {
static constexpr int32_t kMAX_THREADS_PER_BLOCK = 256;
#define ADD_BIAS(value, bias_value) (biases == nullptr) ? value : (value + bias_value)
#define GET_BIAS(bias_value) (biases == nullptr) ? T{} : bias_value
// Grid: (S, B)
// Block: 256
// For unfused PackedMultiHeadAttention
// Inputs (query, key, value): TxNxH
// Output: 3xBxNxSxH
// Where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TNH_3BNSH(
const T* query,
const T* key,
const T* value,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
const int32_t* token_offset,
int32_t token_count) {
int s = blockIdx.x;
int b = blockIdx.y;
int S = gridDim.x;
const int packing_token_idx = b * S + s;
const int padding_token_idx = token_offset[packing_token_idx];
b = padding_token_idx / S;
s = padding_token_idx % S;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
query += packing_token_idx * D_QK;
key += packing_token_idx * D_QK;
value += packing_token_idx * D_V;
int k_offset = D_QK;
int v_offset = D_QK + D_QK;
q += (b * N * S + s) * H_QK;
k += (b * N * S + s) * H_QK;
v += (b * N * S + s) * H_V;
if (packing_token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = ADD_BIAS(query[i], biases[i]);
k[n * S * H_QK + h] = ADD_BIAS(key[i], biases[i + k_offset]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = ADD_BIAS(value[i], biases[i + v_offset]);
}
} else {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int h = i % H_QK;
int n = i / H_QK;
q[n * S * H_QK + h] = GET_BIAS(biases[i]);
k[n * S * H_QK + h] = GET_BIAS(biases[i + k_offset]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int h = i % H_V;
int n = i / H_V;
v[n * S * H_V + h] = GET_BIAS(biases[i + v_offset]);
}
}
}
// Grid: (T)
// Block: 256
// For memory efficient fMHA from CUTLASS.
// query, key, value: TxNxH
// q, k, v: TxNxH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TNH_3TNH(
const T* query,
const T* key,
const T* value,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
query += token_idx * D_QK;
key += token_idx * D_QK;
value += token_idx * D_V;
q += token_idx * D_QK;
k += token_idx * D_QK;
v += token_idx * D_V;
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
q[i] = ADD_BIAS(query[i], biases[i]);
k[i] = ADD_BIAS(key[i], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
v[i] = ADD_BIAS(value[i], biases[D_QK + D_QK + i]);
}
}
}
// Grid: (T)
// Block: 256
// For Trt fused attention.
// Inputs (query, key, value): TxNxH
// Output: TxNx3xH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TNH_TN3H(
const T* query,
const T* key,
const T* value,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* output,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
query += token_idx * D_QK;
key += token_idx * D_QK;
value += token_idx * D_V;
output += token_idx * (D_QK + D_QK + D_V);
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int n = i / H_QK;
int h = i % H_QK;
int index = n * (H_QK + H_QK + H_V) + h;
output[index] = ADD_BIAS(query[i], biases[i]);
index = n * (H_QK + H_QK + H_V) + H_QK + h;
output[index] = ADD_BIAS(key[i], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int n = i / H_V;
int h = i % H_V;
int index = n * (H_QK + H_QK + H_V) + H_QK + H_QK + h;
output[index] = ADD_BIAS(value[i], biases[D_QK + D_QK + i]);
}
}
}
// Grid: (S, B)
// Block: 256
// For unfused PackedMultiHeadAttention
// Input: TxNx3xH
// Output: 3xBxNxSxH
// Where:
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TN3H_3BNSH(
const T* input, // packed qkv
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
const int32_t* token_offset,
int32_t token_count) {
int s = blockIdx.x;
int b = blockIdx.y;
int S = gridDim.x;
const int packing_token_idx = b * S + s;
const int padding_token_idx = token_offset[packing_token_idx];
b = padding_token_idx / S;
s = padding_token_idx % S;
int Hx3 = (H_QK + H_QK + H_V);
input += packing_token_idx * N * Hx3;
int k_offset = H_QK;
int v_offset = H_QK + H_QK;
q += (b * N * S + s) * H_QK;
k += (b * N * S + s) * H_QK;
v += (b * N * S + s) * H_V;
if (packing_token_idx < token_count) {
for (int i = threadIdx.x; i < N * Hx3; i += blockDim.x) {
int n = i / Hx3;
int h = i % Hx3;
if (h < k_offset) {
q[n * S * H_QK + h] = ADD_BIAS(input[i], biases[n * H_QK + h]);
} else if (h < v_offset) {
k[n * S * H_QK + (h - k_offset)] = ADD_BIAS(input[i], biases[(N + n) * H_QK + (h - H_QK)]);
} else {
v[n * S * H_V + (h - v_offset)] = ADD_BIAS(input[i], biases[(N + N) * H_QK + n * H_V + (h - H_QK - H_QK)]);
}
}
} else {
for (int i = threadIdx.x; i < N * Hx3; i += blockDim.x) {
int n = i / Hx3;
int h = i % Hx3;
if (h < k_offset) {
q[n * S * H_QK + h] = GET_BIAS(biases[n * H_QK + h]);
} else if (h < v_offset) {
k[n * S * H_QK + (h - k_offset)] = GET_BIAS(biases[(N + n) * H_QK + (h - H_QK)]);
} else {
v[n * S * H_V + (h - v_offset)] = GET_BIAS(biases[(N + N) * H_QK + n * H_V + (h - H_QK - H_QK)]);
}
}
}
}
// TODO: merge TransposeQKV_TN3H_3TNH with AddBiasTransposeQKVPackedCutlass
// Grid: (T)
// Block: 256
// For memory efficient fMHA from CUTLASS.
// Input: TxNx3xH
// Output: 3xTxNxH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void TransposeQKV_TN3H_3TNH(
const T* input,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* q,
T* k,
T* v,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
input += token_idx * (D_QK + D_QK + D_V);
q += token_idx * D_QK;
k += token_idx * D_QK;
v += token_idx * D_V;
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int n = i / H_QK;
int h = i % H_QK;
q[i] = ADD_BIAS(input[n * (H_QK + H_QK + H_V) + h], biases[i]);
k[i] = ADD_BIAS(input[n * (H_QK + H_QK + H_V) + H_QK + h], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < N * H_V; i += blockDim.x) {
int n = i / H_V;
int h = i % H_V;
v[i] = ADD_BIAS(input[n * (H_QK + H_QK + H_V) + H_QK + H_QK + h], biases[D_QK + D_QK + i]);
}
}
}
// Grid: (T)
// Block: 256
// For TRT fused attention.
// Input: TxNx3xH
// Output: TxNx3xH
// T is token_count
// B is batch_size
// S is sequence_length
// N is num_heads
// H is head_size
template <typename T>
__global__ void AddBias_TN3H_TN3H(
const T* input,
const T* biases,
int32_t N,
int32_t H_QK,
int32_t H_V,
T* output,
int32_t token_count) {
int token_idx = blockIdx.x;
const int D_QK = N * H_QK;
const int D_V = N * H_V;
input += token_idx * (D_QK + D_QK + D_V);
output += token_idx * (D_QK + D_QK + D_V);
if (token_idx < token_count) {
for (int i = threadIdx.x; i < D_QK; i += blockDim.x) {
int n = i / H_QK;
int h = i % H_QK;
int index = n * (H_QK + H_QK + H_V) + h;
output[index] = ADD_BIAS(input[index], biases[i]);
index = n * (H_QK + H_QK + H_V) + H_QK + h;
output[index] = ADD_BIAS(input[index], biases[D_QK + i]);
}
for (int i = threadIdx.x; i < D_V; i += blockDim.x) {
int n = i / H_V;
int h = i % H_V;
int index = n * (H_QK + H_QK + H_V) + H_QK + H_QK + h;
output[index] = ADD_BIAS(input[index], biases[D_QK + D_QK + i]);
}
}
}
template <typename T>
void InvokeTranspose(
const T* query, const T* key, const T* value, const T* bias, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat source_format, AttentionQkvFormat target_format,
const int32_t* token_offset, int32_t token_count,
cudaStream_t stream) {
if (key != nullptr && value != nullptr) {
assert(source_format == AttentionQkvFormat::Q_K_V_TNH);
if (target_format == AttentionQkvFormat::Q_K_V_BNSH) {
const dim3 grid(sequence_length, batch_size);
TransposeQKV_TNH_3BNSH<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
query,
key,
value,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + batch_size * sequence_length * num_heads * qk_head_size,
output + 2 * batch_size * sequence_length * num_heads * qk_head_size,
token_offset,
token_count);
} else if (target_format == AttentionQkvFormat::Q_K_V_TNH) {
const dim3 grid(token_count);
TransposeQKV_TNH_3TNH<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
query,
key,
value,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + token_count * num_heads * qk_head_size,
output + 2 * token_count * num_heads * qk_head_size,
token_count);
} else {
assert(target_format == AttentionQkvFormat::QKV_TN3H);
const dim3 grid(token_count);
TransposeQKV_TNH_TN3H<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
query,
key,
value,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
token_count);
}
} else {
assert(key == nullptr && value == nullptr);
assert(source_format == AttentionQkvFormat::QKV_TN3H);
if (target_format == AttentionQkvFormat::Q_K_V_BNSH) {
const dim3 grid(sequence_length, batch_size);
TransposeQKV_TN3H_3BNSH<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
query,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + batch_size * sequence_length * num_heads * qk_head_size,
output + 2 * batch_size * sequence_length * num_heads * qk_head_size,
token_offset,
token_count);
} else if (target_format == AttentionQkvFormat::Q_K_V_TNH) {
const dim3 grid(token_count);
TransposeQKV_TN3H_3TNH<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
query,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
output + token_count * num_heads * qk_head_size,
output + 2 * token_count * num_heads * qk_head_size,
token_count);
} else {
assert(target_format == AttentionQkvFormat::QKV_TN3H);
assert(bias != nullptr);
const dim3 grid(token_count);
AddBias_TN3H_TN3H<T><<<grid, kMAX_THREADS_PER_BLOCK, 0, stream>>>(
query,
bias,
num_heads,
qk_head_size,
v_head_size,
output,
token_count);
}
}
}
template <typename T>
struct T4;
template <>
struct T4<float> {
using Type = float4;
};
template <>
struct T4<half> {
using Type = Half4;
};
template <typename T>
struct T2;
template <>
struct T2<float> {
using Type = float2;
};
template <>
struct T2<half> {
using Type = half2;
};
template <typename T>
void LaunchTranspose(
const T* query, const T* key, const T* value, const T* bias, T* output,
const int batch_size, const int sequence_length,
const int num_heads, const int qk_head_size, const int v_head_size,
AttentionQkvFormat source_format, AttentionQkvFormat target_format,
const int32_t* token_offset, int32_t token_count,
cudaStream_t stream) {
if (0 == (qk_head_size & 3) && 0 == (v_head_size & 3)) {
using T4Type = typename T4<T>::Type;
const int H = qk_head_size / 4;
const int H_v = v_head_size / 4;
const T4Type* query2 = reinterpret_cast<const T4Type*>(query);
const T4Type* key2 = reinterpret_cast<const T4Type*>(key);
const T4Type* value2 = reinterpret_cast<const T4Type*>(value);
const T4Type* bias2 = reinterpret_cast<const T4Type*>(bias);
T4Type* output2 = reinterpret_cast<T4Type*>(output);
InvokeTranspose<T4Type>(
query2, key2, value2, bias2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
source_format, target_format,
token_offset, token_count, stream);
} else if (0 == (qk_head_size & 1) && 0 == (v_head_size & 1)) {
using T2Type = typename T2<T>::Type;
const int H = qk_head_size / 2;
const int H_v = v_head_size / 2;
const T2Type* query2 = reinterpret_cast<const T2Type*>(query);
const T2Type* key2 = reinterpret_cast<const T2Type*>(key);
const T2Type* value2 = reinterpret_cast<const T2Type*>(value);
const T2Type* bias2 = reinterpret_cast<const T2Type*>(bias);
T2Type* output2 = reinterpret_cast<T2Type*>(output);
InvokeTranspose<T2Type>(
query2, key2, value2, bias2, output2,
batch_size, sequence_length,
num_heads, H, H_v,
source_format, target_format,
token_offset, token_count, stream);
} else {
InvokeTranspose<T>(
query, key, value, bias, output,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
source_format, target_format,
token_offset, token_count, stream);
}
}
template <typename T>
Status FusedAttentionTrt(
const cudaDeviceProp& device_prop,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
void* fused_runner = data.fused_runner;
ORT_RETURN_IF_NOT(nullptr != fused_runner, "fused_runner cannot be NULL");
// When packed QKV is used, we can directly pass it to fused runner. Otherwise, we need transpose to BSN3H format.
const T* qkv = data.query;
if (!data.no_qkv_workspace) {
LaunchTranspose(data.query, data.key, data.value, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
data.source_qkv_format, AttentionQkvFormat::QKV_TN3H,
data.token_offset, parameters.token_count, stream);
qkv = data.workspace;
}
FusedMHARunnerFP16v2* fused_fp16_runner = reinterpret_cast<FusedMHARunnerFP16v2*>(fused_runner);
const int S = fused_fp16_runner->getSFromMaxSeqLen(sequence_length);
fused_fp16_runner->setup(S, batch_size);
fused_fp16_runner->run(qkv, data.cumulative_sequence_length, data.output, stream);
return Status::OK();
}
#if USE_FLASH_ATTENTION
template <typename T>
Status FusedAttentionCutlass(
const cudaDeviceProp& device_prop,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
// Q, K and V pointers
const int model_dimension_qk = num_heads * qk_head_size;
const int model_dimension_v = num_heads * v_head_size;
const size_t elements_qk = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_qk);
const size_t elements_v = static_cast<size_t>(parameters.token_count) * static_cast<size_t>(model_dimension_v);
// When separated Q, K, V is used, we can directly use them in Cutlass FMHA. Otherwise, transpose BSN3H to 3BSNH
if (!data.no_qkv_workspace) {
LaunchTranspose(data.query, data.key, data.value, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
data.source_qkv_format, AttentionQkvFormat::Q_K_V_TNH,
data.token_offset, parameters.token_count, stream);
}
MemoryEfficientAttentionParams p;
p.sm = device_prop.major * 10 + device_prop.minor;
p.is_half = sizeof(T) == 2;
p.batch_size = parameters.batch_size;
p.num_heads = parameters.num_heads;
p.sequence_length = parameters.sequence_length;
p.kv_sequence_length = parameters.sequence_length;
p.qk_head_size = parameters.head_size;
p.v_head_size = parameters.v_head_size;
p.causal = false;
p.scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
p.seqlen_k_ptr = nullptr;
p.seqstart_q_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.seqstart_k_ptr = const_cast<int32_t*>(data.cumulative_sequence_length);
p.query = data.no_qkv_workspace ? data.query : data.workspace;
p.key = data.no_qkv_workspace ? data.key : (data.workspace + elements_qk);
p.value = data.no_qkv_workspace ? data.value : (data.workspace + elements_qk + elements_qk);
p.attn_bias = data.relative_position_bias;
p.is_attn_bias_batched = !parameters.broadcast_res_pos_bias;
p.output = data.output;
p.workspace = MemoryEfficientAttentionParams::need_workspace(v_head_size, sizeof(T) == sizeof(float))
? (data.workspace + (data.no_qkv_workspace ? 0 : (elements_qk + elements_qk + elements_v)))
: nullptr;
p.stream = stream;
run_memory_efficient_attention(p);
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedMHA cutlass q(BSNH)", reinterpret_cast<const T*>(p.query), parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedMHA cutlass k(BSNH)", reinterpret_cast<const T*>(p.key), parameters.token_count, num_heads * qk_head_size);
DUMP_TENSOR_D("PackedMHA cutlass v(BSNH)", reinterpret_cast<const T*>(p.value), parameters.token_count, num_heads * v_head_size);
DUMP_TENSOR_D("PackedMHA cutlass cumulative_sequence_length", data.cumulative_sequence_length, 1, batch_size + 1);
DUMP_TENSOR("PackedMHA cutlass output", data.output, parameters.token_count, num_heads, v_head_size);
return Status::OK();
}
#endif
template <typename T>
Status UnfusedAttention(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
constexpr size_t element_size = sizeof(T);
const int batch_size = parameters.batch_size;
const int sequence_length = parameters.sequence_length;
const int num_heads = parameters.num_heads;
const int qk_head_size = parameters.head_size;
const int v_head_size = parameters.v_head_size;
const int batches = batch_size * num_heads;
const int size_per_batch_q = sequence_length * qk_head_size;
const int size_per_batch_k = sequence_length * qk_head_size;
const int size_per_batch_v = sequence_length * v_head_size;
const size_t elements_q = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_q);
const size_t elements_k = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_k);
const size_t elements_v = static_cast<size_t>(batches) * static_cast<size_t>(size_per_batch_v);
// Q, K and V pointers when fused attention is not used
LaunchTranspose(data.query, data.key, data.value, data.bias, data.workspace,
batch_size, sequence_length,
num_heads, qk_head_size, v_head_size,
data.source_qkv_format, AttentionQkvFormat::Q_K_V_BNSH,
data.token_offset, parameters.token_count, stream);
T* qkv = data.workspace;
T* q = qkv;
T* k = q + elements_q;
T* v = k + elements_k;
T* scaled_qk = qkv + elements_q + elements_k + elements_v;
// Compute Q*K' (as K'*Q), scaled by 1/sqrt(H) and store in scaled_qk: BxNxSxT
// Q: BxNxSxH, K: BxNxSxH, Q*K': BxNxSxS
float one = 1.0f;
float zero = 0.f;
float scale = parameters.scale == 0.0f ? 1.f / sqrt(static_cast<float>(qk_head_size))
: parameters.scale;
cublasSetStream(cublas, stream);
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_T, CUBLAS_OP_N,
sequence_length, sequence_length, qk_head_size,
&scale,
k, qk_head_size, sequence_length * qk_head_size,
q, qk_head_size, sequence_length * qk_head_size,
&zero,
scaled_qk, sequence_length, sequence_length * sequence_length,
batches, device_prop));
// Q, K and V are ready now
DUMP_TENSOR_INIT();
DUMP_TENSOR_D("PackedMHA unfused q (BNSH)", q, batch_size, num_heads, sequence_length, qk_head_size);
DUMP_TENSOR_D("PackedMHA unfused k (BNSH)", k, batch_size, num_heads, sequence_length, qk_head_size);
DUMP_TENSOR_D("PackedMHA unfused v (BNSH)", v, batch_size, num_heads, sequence_length, v_head_size);
DUMP_TENSOR_D("PackedMHA unfused QK", scaled_qk, batch_size * num_heads, sequence_length, sequence_length);
const size_t bytes = GetAttentionScratchSize(element_size, batch_size, num_heads,
sequence_length);
T* attention_score = scaled_qk + (bytes / element_size);
// Apply softmax and store result R to attention_score: BxNxSxS
ORT_RETURN_IF_ERROR(ComputeSoftmaxWithCumSeqLength<T>(
scaled_qk,
data.relative_position_bias,
parameters.broadcast_res_pos_bias,
data.cumulative_sequence_length,
batch_size,
sequence_length,
num_heads,
attention_score, stream));
DUMP_TENSOR_D("PackedMHA unfused Softmax", attention_score, batch_size * num_heads, sequence_length, sequence_length);
// compute R*V (as V*R), and store in temp_output (space used by Q): BxNxSxH_v
T* temp_output = qkv;
CUBLAS_RETURN_IF_ERROR(cublasGemmStridedBatchedHelper(
cublas, CUBLAS_OP_N, CUBLAS_OP_N,
v_head_size, sequence_length, sequence_length,
&one, v, v_head_size, sequence_length * v_head_size,
attention_score, sequence_length, sequence_length * sequence_length,
&zero, temp_output, v_head_size, sequence_length * v_head_size, batches, device_prop));
// Temp_output is BxNxSxH_v, transpose and remove padding to output TxNxH_v
Status result = LaunchTransposeRemovePadding(
data.output, temp_output,
data.token_offset, parameters.token_count,
batch_size, sequence_length, num_heads, v_head_size,
stream);
DUMP_TENSOR("PackedMHA unfused output", data.output, parameters.token_count, num_heads, v_head_size);
return result;
}
template <typename T>
Status QkvToContext(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<T>& data) {
void* fused_runner = data.fused_runner;
if (nullptr != fused_runner) {
return FusedAttentionTrt<T>(device_prop, stream, parameters, data);
}
#if USE_FLASH_ATTENTION
if (data.use_memory_efficient_attention) {
return FusedAttentionCutlass(device_prop, stream, parameters, data);
}
#endif
return UnfusedAttention<T>(device_prop, cublas, stream, parameters, data);
}
template Status QkvToContext<float>(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<float>& data);
template Status QkvToContext<half>(
const cudaDeviceProp& device_prop,
cublasHandle_t& cublas,
cudaStream_t stream,
PackedAttentionParameters& parameters,
PackedMultiHeadAttentionData<half>& data);
} // namespace cuda
} // namespace contrib
} // namespace onnxruntime
|
e0207f091c95b9c07dad9f27c8721135d9caedbc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
//#include "image_reconstract_GPU.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
using namespace std;
typedef unsigned char BYTE;
//BW
#define FILTER_LENGTH 3
#define no_of_threads 256
#define no_of_blocks 1
#define FILESIZE 2992128
#define N_SMPL 11688
#define FIR_FILTER_LENGTH 9
#define FIR_FILTER_LENGTH_D2 (FIR_FILTER_LENGTH/2)
#define COL_PIXEL 512
/*#define R 61
#define pitch 0.2
#define depth 180
#define lens 0.7
#define nLines 256
#define nSamples 512*/
#define counter 173621
#define no_of_blocks2 170
#define CHANNELCOUNT 3
#define TOTAL_SIZE 11968512
#define TOTAL_PIXEL 512*512
#define IMSIZE 131072
struct PixelDefine
{
long offset;
unsigned short line_index;
unsigned short sample_index;
float c1, c2, c3, c4;
};
//low
__global__ void bw_lowpass_filterKernel(float *in, float *output)
{
float b1[FILTER_LENGTH];
float a1[FILTER_LENGTH];
b1[0] = 1.0000;
b1[1] = 2.0000;
b1[2] = 1.0000;
a1[0] = 1.0000;
a1[1] = -1.2246515810130951;
a1[2] = 0.45044543005604082;
float g1 = 0.056448462260736451;
float d1[3] = { 0,0,0 };
int j = threadIdx.y;
for (int i = 0; i <N_SMPL; i++)
{
d1[2] = d1[1];
d1[1] = d1[0];
d1[0] = in[i + j*N_SMPL] - a1[1] * d1[1] - a1[2] * d1[2];
output[i + j*N_SMPL] = g1*b1[0] * d1[0] + g1*b1[1] * d1[1] + g1*b1[2] * d1[2];
}
}
__global__ void bw_highpass_filterKernel(float *in, float *output)
{
float b[FILTER_LENGTH];
float a[FILTER_LENGTH];
b[0] = 1.0000;
b[1] = -2.0000;
b[2] = 1.0000;
a[0] = 1.0000;
a[1] = -1.5610180758007182;
a[2] = 0.64135153805756306;
float g = 0.75502196198037386;
float d[3] = { 0,0,0 };
int j = threadIdx.y;
//for ( j = 0; j < no_of_threads; j++)
//{
for (int i = 0; i <N_SMPL; i++)
//d1[3] = d1[2];
{
d[2] = d[1];
d[1] = d[0];
d[0] = in[i + j*N_SMPL] - a[1] * d[1] - a[2] * d[2];
output[i + j*N_SMPL] = g*b[0] * d[0] + g*b[1] * d[1] + g*b[2] * d[2];
}
//}
}
__global__ void ellaptic_lowpass_filterKernel(float *in, float *output)
{
float b1[FILTER_LENGTH];
float a1[FILTER_LENGTH];
b1[0] = 1.0000;
b1[1] = 1.9951831916798526;
b1[2] = 1.0000;
a1[0] = 1.0000;
a1[1] = -1.2846437324109372;
a1[2] = 0.54828472668060735;
float g1 = 0.058813394079824187;
float d1[3] = { 0,0,0 };
int j = threadIdx.y;
for (int i = 0; i <N_SMPL; i++)
{
d1[2] = d1[1];
d1[1] = d1[0];
d1[0] = in[i + j*N_SMPL] - a1[1] * d1[1] - a1[2] * d1[2];
output[i + j*N_SMPL] = g1*b1[0] * d1[0] + g1*b1[1] * d1[1] + g1*b1[2] * d1[2];
}
}
__global__ void ellaptic_highpass_filterKernel(float *in, float *output)
{
float b[FILTER_LENGTH];
float a[FILTER_LENGTH];
b[0] = 1.0000;
b[1] = -1.9999897887443692;
b[2] = 1.0000;
a[0] = 1.0000;
a[1] = -1.2846437324109372;
a[2] = 0.73284143556340198;
float g = 0.80059240346457028;
float d[3] = { 0,0,0 };
int j = threadIdx.y;
//for ( j = 0; j < no_of_threads; j++)
//{
for (int i = 0; i <N_SMPL; i++)
//d1[3] = d1[2];
{
d[2] = d[1];
d[1] = d[0];
d[0] = in[i + j*N_SMPL] - a[1] * d[1] - a[2] * d[2];
output[i + j*N_SMPL] = g*b[0] * d[0] + g*b[1] * d[1] + g*b[2] * d[2];
}
//}
}
__global__ void chebychev_lowpass_filterKernel(float *in, float *output)
{
float b1[FILTER_LENGTH];
float a1[FILTER_LENGTH];
b1[0] = 1.0000;
b1[1] = 2;
b1[2] = 1.0000;
a1[0] = 1.0000;
a1[1] = -1.2846384667454742;
a1[2] = 0.5482635908391793;
float g1 = 0.065906281023426286;
float d1[3] = { 0,0,0 };
int j = threadIdx.y;
for (int i = 0; i <N_SMPL; i++)
{
d1[2] = d1[1];
d1[1] = d1[0];
d1[0] = in[i + j*N_SMPL] - a1[1] * d1[1] - a1[2] * d1[2];
output[i + j*N_SMPL] = g1*b1[0] * d1[0] + g1*b1[1] * d1[1] + g1*b1[2] * d1[2];
}
}
__global__ void chebychev_highpass_filterKernel(float *in, float *output)
{
float b[FILTER_LENGTH];
float a[FILTER_LENGTH];
b[0] = 1.0000;
b[1] = -2;
b[2] = 1.0000;
a[0] = 1.0000;
a[1] = -1.6557169643711138;
a[2] = 0.73281693212820698;
float g = 0.8471334741248302;
float d[3] = { 0,0,0 };
int j = threadIdx.y;
//for ( j = 0; j < no_of_threads; j++)
//{
for (int i = 0; i <N_SMPL; i++)
//d1[3] = d1[2];
{
d[2] = d[1];
d[1] = d[0];
d[0] = in[i + j*N_SMPL] - a[1] * d[1] - a[2] * d[2];
output[i + j*N_SMPL] = g*b[0] * d[0] + g*b[1] * d[1] + g*b[2] * d[2];
}
//}
}
__global__ void detection_Kernel(float *final_output,float *x_out, float* z, float *final_output_PhasShift)
{
static float b1[FIR_FILTER_LENGTH] = { 0 , -0.3706 , 0 , -0.6386 , 0 , 0.6386 , 0 , 0.3706 , 0 };
*final_output_PhasShift = *final_output + FIR_FILTER_LENGTH_D2;
int j = threadIdx.y;
for (int i = 0; i < N_SMPL; i++)
if (i < N_SMPL - 9)
{
{
z = final_output + i + j* N_SMPL;
float acc = 0;
{
acc = acc + b1[7] * z[0];
}
{
acc = acc + b1[5] * z[2];
}
{
acc = acc + b1[3] * z[4];
}
{
acc = acc + b1[1] * z[6];
}
float q = final_output_PhasShift[i + j* N_SMPL];
x_out[i + j* N_SMPL] = sqrt((acc*acc) + q*q);
}
}
/*float g2 = 0.0036216815149286421;
float b0 = 1, b3 = 2, b2 = 1;
float a0 = 1, a1 = -1.8226949251963083, a2 = 0.83718165125602262;
float a = 0, b = 0, c = 0, d;
for (int i = 0; i <N_SMPL; i++)
if (i < N_SMPL - 9)
{
{
d = c;
c = b;
b = a;
a = x_out[i + j*N_SMPL] - a1*b - a2*c;
det_out[i + j*N_SMPL] = g2*b0*a + g2*b3*b + g2*b2*c;
}
}*/
}
__global__ void lpf_Kernel( float *x_out,float *det_out)
{
int j = threadIdx.y;
float g2 = 0.0036216815149286421;
register float b0 = 1, b3 = 2, b2 = 1;
register float a0 = 1, a1 = -1.8226949251963083, a2 = 0.83718165125602262;
register float a = 0, b = 0, c = 0, d;
for (int i = 0; i <N_SMPL; i++)
//if (i < N_SMPL - 9)
//{
{
d = c;
c = b;
b = a;
a = x_out[i + j*N_SMPL] - a1*b - a2*c;
det_out[i + j*N_SMPL] = g2*b0*a + g2*b3*b + g2*b2*c;
//}
}
}
__global__ void av_sampling_Kernel( float *det_out, float *samp_out)
{
//int x = threadIdx.y;
int i = threadIdx.y;
int w = (i)*COL_PIXEL;
float L = 0;
float sum = 0;
int j = 1;
float average = 0;
float window_size = (float)N_SMPL / (float)COL_PIXEL;
for (int k = 0; k < N_SMPL; k++)
{
float NewL = floor((k / window_size) + 1);
float dto = det_out[i*N_SMPL + k];
sum = sum + dto;
if (L == NewL)
{
j = j + 1;
continue;
}
else
{
average = sum / j;
samp_out[w] = average;
// samp_out[w + i*N_SMPL]= 255;
w += 1;
j = 0;
sum = 0;
}
L = NewL;
}
}
__global__ void peak_sampling_Kernel( float *det_out, float *psamp_out)
{
/*int nt = omp_get_max_threads();
//omp_set_num_threads(nt - 0);
int undertype;
/*if (undertype == 0) //PEAK DETECT
{*/
float window_size = (float)N_SMPL / (float)COL_PIXEL;
int i = threadIdx.y;
float max = 0;
float L = 0;
int w = (i)*COL_PIXEL;
for (int k = 0; k < N_SMPL; k++)
{
float NewL = floor((k / window_size) + 1);
float dto = det_out[i*N_SMPL + k];
if (max < dto) max = dto;
if (L != NewL)
{
psamp_out[w] = max;
w += 1;
max = 0;
}
L = NewL;
}
}
__global__ void sampling_Kernel( float *det_out, float *ssamp_out) //Sample
{
float window_size = (float)N_SMPL / (float)COL_PIXEL;
int i = threadIdx.y;
float L = 0;
int w = i*COL_PIXEL;
for (int k = 0; k < N_SMPL; k++)
{
float NewL = floor((k / window_size) + 1);
float dto = det_out[i*N_SMPL + k];
if (L != NewL)
{
ssamp_out[w] = dto;
w += 1;
}
L = NewL;
}
}
__global__ void dynamic_Kernel(float *newscale, float *under_sampling_out, int *dB,float *dynamic_out,float *maxthreads)
{
int dBrange[1] = {0};
*dBrange = *dB;
int dBmax = 150;
float thresh = pow((float)10, (float)(dBmax - dBrange[0]) / 20);
float ymax = 0;
float maximum_y = 0;
float max = 0;
int j = threadIdx.y;
/* if (dBrange >= 150 || dBrange <= 0)
{
//this command could works in consol app only cout<<"ERROR dBrange must be between 0 and 150"<<endl;
AfxMessageBox(_T("ERROR dBrange must be between 0 and 150"));
dBrange = 60;
}*/
for (int i = 0; i < 512; i++)
{
dynamic_out[i+j*512] = abs(under_sampling_out[i+j*512]);
/*if (maximum_y < dynamic_out[i])
{
maximum_y = dynamic_out[i];
}*/
if (maxthreads[j] < dynamic_out[i + j * 512])
{
maxthreads[j] = dynamic_out[i + j * 512];
}
}
for(int i=0;i<256;i++)
{
if (maximum_y < maxthreads[i])
{
maximum_y = maxthreads[i];
}
}
float x = pow((float)10, (float) 7.5);
ymax = maximum_y / x;
int min1 = dBmax - dBrange[0];
int newmax = 255;
int newmin = 0;
int oldrange = dBmax - min1;
int newrange = newmax - newmin;
for (int i = 0; i < 512; i++)
{
dynamic_out[i + j * 512] = dynamic_out[i+j*512] / ymax;
if (dynamic_out[i+j*512] > thresh)
dynamic_out[i+j*512] = dynamic_out[i+j * 512];
else
dynamic_out[i+ j * 512] = thresh;
// newscale[i + j * 512] = dynamic_out[i + j * 512];
dynamic_out[i+ j * 512] = 20 * log10(dynamic_out[i + j * 512]);
newscale[i+ j * 512] = (((dynamic_out[i + j * 512] - min1) * newrange) / oldrange) + newmin;
}
}
__global__ void interpolation_kernal(PixelDefine *ptrpixel,float *newscale, PixelDefine *s,float *ptr, BYTE *final_image)
{
//int i = threadIdx.y + blockIdx.y *512+ blockIdx.x*512;
int i = threadIdx.y;
for (int j = 0; j <679; j++)
//for (int j = 0; j <1024; j++)
{
if (i+j*256< counter)
{
s = ptrpixel + ptrpixel[i + j * 256].offset;
unsigned short Sindx = s->sample_index;
unsigned short Lindx = s->line_index;
//memset(newscale, 255, 131072 * sizeof(float));
ptr = newscale;
register float Q11 = *(ptr + (Sindx + (Lindx * 512)));
register float Q12 = *(ptr + (Sindx + 1 + (Lindx * 512)));
register float Q21 = *(ptr + (Sindx + ((Lindx + 1) * 512)));
register float Q22 = *(ptr + (Sindx + 1 + ((Lindx + 1) * 512)));
final_image[ptrpixel[i + j *256].offset] = s->c1 * Q11 + s->c2 *Q21 + s->c3 *Q12 + s->c4 *Q22;
//final_image[ 0 ] =255;
}
}
}
extern "C" void
bw_filteringTest(float *data, float *filth_out ,float *filtl_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
bw_highpass_filterKernel << < dimGrid, dimBlock >> >(data, filth_out);
bw_lowpass_filterKernel << < dimGrid, dimBlock >> >(filth_out, filtl_out);
}
extern "C" void
ellaptic_filteringTest(float *data, float *filth_out, float *filtl_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
ellaptic_highpass_filterKernel << < dimGrid, dimBlock >> >(data, filth_out);
ellaptic_lowpass_filterKernel << < dimGrid, dimBlock >> >(filth_out, filtl_out);
}
extern "C" void
chebychev_filteringTest(float *data, float *filth_out, float *filtl_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
chebychev_highpass_filterKernel << < dimGrid, dimBlock >> >(data, filth_out);
chebychev_lowpass_filterKernel << < dimGrid, dimBlock >> >(filth_out, filtl_out);
}
extern "C" void
detectionTest(float *filtl_out, float *det_out, float * ph, float *z, float *lpf)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
detection_Kernel << < dimGrid, dimBlock >> >(filtl_out, det_out, ph, z);
lpf_Kernel << < dimGrid, dimBlock >> >(det_out, lpf);
}
extern "C" void
avsamplingTest( float *lpf, float *sampling_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
av_sampling_Kernel << < dimGrid, dimBlock >> > (lpf, sampling_out);
}
extern "C" void
peaksamplingTest(float *lpf, float *sampling_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
peak_sampling_Kernel << < dimGrid, dimBlock >> > (lpf, sampling_out);
}
extern "C" void
samplingTest(float *lpf, float *sampling_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
sampling_Kernel << < dimGrid, dimBlock >> > (lpf, sampling_out);
}
extern "C" void
dynamicTest(float *new_scale,float *sampling_out,int *dB_v, float *dynamic,float *max)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
dynamic_Kernel << < dimGrid, dimBlock >> > (new_scale, sampling_out, dB_v, dynamic, max);
}
extern "C" void
interpolationTest(PixelDefine *pixel,float *new_scale, PixelDefine *s,float *ptr, BYTE *FI)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
interpolation_kernal << < dimGrid, dimBlock >> > (pixel, new_scale, s, ptr, FI);
}
/*extern "C" void
interpolationTest(PixelDefine *ptrpixel, BYTE *FINAL)
{
dim3 dimBlock(1, 1);
dim3 dimGrid(1, no_of_blocks);
float*new_scale = 0;
PixelDefine *s = 0;
float *ptr = 0;
//float * NS = 0;
BYTE *FI = 0;
PixelDefine *pixel = 0;
hipMalloc(&pixel, TOTAL_PIXEL * sizeof(PixelDefine));
hipMalloc(&FI, TOTAL_PIXEL * CHANNELCOUNT * sizeof(BYTE));
hipMalloc(&ptr, IMSIZE * sizeof(float));
hipMalloc(&s, TOTAL_PIXEL * sizeof(PixelDefine));
hipMemcpy(pixel, ptrpixel, 512 * 512 * sizeof(PixelDefine), hipMemcpyHostToDevice);
hipMalloc(&new_scale, IMSIZE * sizeof(float));
interpolation_kernal <<< dimGrid, dimBlock >>> (pixel, new_scale, s, ptr, FI);
//float *newscale = 0;
//hipMalloc(&newscale, 131072 * sizeof(float));
//memset(new_scale, 255, 131072 * sizeof(float));
hipMemcpy(FINAL , FI, 512 * 512 * 3 * sizeof(BYTE), hipMemcpyDeviceToHost);
int k = 0;
}*/
| e0207f091c95b9c07dad9f27c8721135d9caedbc.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<iostream>
//#include "image_reconstract_GPU.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <assert.h>
using namespace std;
typedef unsigned char BYTE;
//BW
#define FILTER_LENGTH 3
#define no_of_threads 256
#define no_of_blocks 1
#define FILESIZE 2992128
#define N_SMPL 11688
#define FIR_FILTER_LENGTH 9
#define FIR_FILTER_LENGTH_D2 (FIR_FILTER_LENGTH/2)
#define COL_PIXEL 512
/*#define R 61
#define pitch 0.2
#define depth 180
#define lens 0.7
#define nLines 256
#define nSamples 512*/
#define counter 173621
#define no_of_blocks2 170
#define CHANNELCOUNT 3
#define TOTAL_SIZE 11968512
#define TOTAL_PIXEL 512*512
#define IMSIZE 131072
struct PixelDefine
{
long offset;
unsigned short line_index;
unsigned short sample_index;
float c1, c2, c3, c4;
};
//low
__global__ void bw_lowpass_filterKernel(float *in, float *output)
{
float b1[FILTER_LENGTH];
float a1[FILTER_LENGTH];
b1[0] = 1.0000;
b1[1] = 2.0000;
b1[2] = 1.0000;
a1[0] = 1.0000;
a1[1] = -1.2246515810130951;
a1[2] = 0.45044543005604082;
float g1 = 0.056448462260736451;
float d1[3] = { 0,0,0 };
int j = threadIdx.y;
for (int i = 0; i <N_SMPL; i++)
{
d1[2] = d1[1];
d1[1] = d1[0];
d1[0] = in[i + j*N_SMPL] - a1[1] * d1[1] - a1[2] * d1[2];
output[i + j*N_SMPL] = g1*b1[0] * d1[0] + g1*b1[1] * d1[1] + g1*b1[2] * d1[2];
}
}
__global__ void bw_highpass_filterKernel(float *in, float *output)
{
float b[FILTER_LENGTH];
float a[FILTER_LENGTH];
b[0] = 1.0000;
b[1] = -2.0000;
b[2] = 1.0000;
a[0] = 1.0000;
a[1] = -1.5610180758007182;
a[2] = 0.64135153805756306;
float g = 0.75502196198037386;
float d[3] = { 0,0,0 };
int j = threadIdx.y;
//for ( j = 0; j < no_of_threads; j++)
//{
for (int i = 0; i <N_SMPL; i++)
//d1[3] = d1[2];
{
d[2] = d[1];
d[1] = d[0];
d[0] = in[i + j*N_SMPL] - a[1] * d[1] - a[2] * d[2];
output[i + j*N_SMPL] = g*b[0] * d[0] + g*b[1] * d[1] + g*b[2] * d[2];
}
//}
}
__global__ void ellaptic_lowpass_filterKernel(float *in, float *output)
{
float b1[FILTER_LENGTH];
float a1[FILTER_LENGTH];
b1[0] = 1.0000;
b1[1] = 1.9951831916798526;
b1[2] = 1.0000;
a1[0] = 1.0000;
a1[1] = -1.2846437324109372;
a1[2] = 0.54828472668060735;
float g1 = 0.058813394079824187;
float d1[3] = { 0,0,0 };
int j = threadIdx.y;
for (int i = 0; i <N_SMPL; i++)
{
d1[2] = d1[1];
d1[1] = d1[0];
d1[0] = in[i + j*N_SMPL] - a1[1] * d1[1] - a1[2] * d1[2];
output[i + j*N_SMPL] = g1*b1[0] * d1[0] + g1*b1[1] * d1[1] + g1*b1[2] * d1[2];
}
}
__global__ void ellaptic_highpass_filterKernel(float *in, float *output)
{
float b[FILTER_LENGTH];
float a[FILTER_LENGTH];
b[0] = 1.0000;
b[1] = -1.9999897887443692;
b[2] = 1.0000;
a[0] = 1.0000;
a[1] = -1.2846437324109372;
a[2] = 0.73284143556340198;
float g = 0.80059240346457028;
float d[3] = { 0,0,0 };
int j = threadIdx.y;
//for ( j = 0; j < no_of_threads; j++)
//{
for (int i = 0; i <N_SMPL; i++)
//d1[3] = d1[2];
{
d[2] = d[1];
d[1] = d[0];
d[0] = in[i + j*N_SMPL] - a[1] * d[1] - a[2] * d[2];
output[i + j*N_SMPL] = g*b[0] * d[0] + g*b[1] * d[1] + g*b[2] * d[2];
}
//}
}
__global__ void chebychev_lowpass_filterKernel(float *in, float *output)
{
float b1[FILTER_LENGTH];
float a1[FILTER_LENGTH];
b1[0] = 1.0000;
b1[1] = 2;
b1[2] = 1.0000;
a1[0] = 1.0000;
a1[1] = -1.2846384667454742;
a1[2] = 0.5482635908391793;
float g1 = 0.065906281023426286;
float d1[3] = { 0,0,0 };
int j = threadIdx.y;
for (int i = 0; i <N_SMPL; i++)
{
d1[2] = d1[1];
d1[1] = d1[0];
d1[0] = in[i + j*N_SMPL] - a1[1] * d1[1] - a1[2] * d1[2];
output[i + j*N_SMPL] = g1*b1[0] * d1[0] + g1*b1[1] * d1[1] + g1*b1[2] * d1[2];
}
}
__global__ void chebychev_highpass_filterKernel(float *in, float *output)
{
float b[FILTER_LENGTH];
float a[FILTER_LENGTH];
b[0] = 1.0000;
b[1] = -2;
b[2] = 1.0000;
a[0] = 1.0000;
a[1] = -1.6557169643711138;
a[2] = 0.73281693212820698;
float g = 0.8471334741248302;
float d[3] = { 0,0,0 };
int j = threadIdx.y;
//for ( j = 0; j < no_of_threads; j++)
//{
for (int i = 0; i <N_SMPL; i++)
//d1[3] = d1[2];
{
d[2] = d[1];
d[1] = d[0];
d[0] = in[i + j*N_SMPL] - a[1] * d[1] - a[2] * d[2];
output[i + j*N_SMPL] = g*b[0] * d[0] + g*b[1] * d[1] + g*b[2] * d[2];
}
//}
}
__global__ void detection_Kernel(float *final_output,float *x_out, float* z, float *final_output_PhasShift)
{
static float b1[FIR_FILTER_LENGTH] = { 0 , -0.3706 , 0 , -0.6386 , 0 , 0.6386 , 0 , 0.3706 , 0 };
*final_output_PhasShift = *final_output + FIR_FILTER_LENGTH_D2;
int j = threadIdx.y;
for (int i = 0; i < N_SMPL; i++)
if (i < N_SMPL - 9)
{
{
z = final_output + i + j* N_SMPL;
float acc = 0;
{
acc = acc + b1[7] * z[0];
}
{
acc = acc + b1[5] * z[2];
}
{
acc = acc + b1[3] * z[4];
}
{
acc = acc + b1[1] * z[6];
}
float q = final_output_PhasShift[i + j* N_SMPL];
x_out[i + j* N_SMPL] = sqrt((acc*acc) + q*q);
}
}
/*float g2 = 0.0036216815149286421;
float b0 = 1, b3 = 2, b2 = 1;
float a0 = 1, a1 = -1.8226949251963083, a2 = 0.83718165125602262;
float a = 0, b = 0, c = 0, d;
for (int i = 0; i <N_SMPL; i++)
if (i < N_SMPL - 9)
{
{
d = c;
c = b;
b = a;
a = x_out[i + j*N_SMPL] - a1*b - a2*c;
det_out[i + j*N_SMPL] = g2*b0*a + g2*b3*b + g2*b2*c;
}
}*/
}
__global__ void lpf_Kernel( float *x_out,float *det_out)
{
int j = threadIdx.y;
float g2 = 0.0036216815149286421;
register float b0 = 1, b3 = 2, b2 = 1;
register float a0 = 1, a1 = -1.8226949251963083, a2 = 0.83718165125602262;
register float a = 0, b = 0, c = 0, d;
for (int i = 0; i <N_SMPL; i++)
//if (i < N_SMPL - 9)
//{
{
d = c;
c = b;
b = a;
a = x_out[i + j*N_SMPL] - a1*b - a2*c;
det_out[i + j*N_SMPL] = g2*b0*a + g2*b3*b + g2*b2*c;
//}
}
}
__global__ void av_sampling_Kernel( float *det_out, float *samp_out)
{
//int x = threadIdx.y;
int i = threadIdx.y;
int w = (i)*COL_PIXEL;
float L = 0;
float sum = 0;
int j = 1;
float average = 0;
float window_size = (float)N_SMPL / (float)COL_PIXEL;
for (int k = 0; k < N_SMPL; k++)
{
float NewL = floor((k / window_size) + 1);
float dto = det_out[i*N_SMPL + k];
sum = sum + dto;
if (L == NewL)
{
j = j + 1;
continue;
}
else
{
average = sum / j;
samp_out[w] = average;
// samp_out[w + i*N_SMPL]= 255;
w += 1;
j = 0;
sum = 0;
}
L = NewL;
}
}
__global__ void peak_sampling_Kernel( float *det_out, float *psamp_out)
{
/*int nt = omp_get_max_threads();
//omp_set_num_threads(nt - 0);
int undertype;
/*if (undertype == 0) //PEAK DETECT
{*/
float window_size = (float)N_SMPL / (float)COL_PIXEL;
int i = threadIdx.y;
float max = 0;
float L = 0;
int w = (i)*COL_PIXEL;
for (int k = 0; k < N_SMPL; k++)
{
float NewL = floor((k / window_size) + 1);
float dto = det_out[i*N_SMPL + k];
if (max < dto) max = dto;
if (L != NewL)
{
psamp_out[w] = max;
w += 1;
max = 0;
}
L = NewL;
}
}
__global__ void sampling_Kernel( float *det_out, float *ssamp_out) //Sample
{
float window_size = (float)N_SMPL / (float)COL_PIXEL;
int i = threadIdx.y;
float L = 0;
int w = i*COL_PIXEL;
for (int k = 0; k < N_SMPL; k++)
{
float NewL = floor((k / window_size) + 1);
float dto = det_out[i*N_SMPL + k];
if (L != NewL)
{
ssamp_out[w] = dto;
w += 1;
}
L = NewL;
}
}
__global__ void dynamic_Kernel(float *newscale, float *under_sampling_out, int *dB,float *dynamic_out,float *maxthreads)
{
int dBrange[1] = {0};
*dBrange = *dB;
int dBmax = 150;
float thresh = pow((float)10, (float)(dBmax - dBrange[0]) / 20);
float ymax = 0;
float maximum_y = 0;
float max = 0;
int j = threadIdx.y;
/* if (dBrange >= 150 || dBrange <= 0)
{
//this command could works in consol app only cout<<"ERROR dBrange must be between 0 and 150"<<endl;
AfxMessageBox(_T("ERROR dBrange must be between 0 and 150"));
dBrange = 60;
}*/
for (int i = 0; i < 512; i++)
{
dynamic_out[i+j*512] = abs(under_sampling_out[i+j*512]);
/*if (maximum_y < dynamic_out[i])
{
maximum_y = dynamic_out[i];
}*/
if (maxthreads[j] < dynamic_out[i + j * 512])
{
maxthreads[j] = dynamic_out[i + j * 512];
}
}
for(int i=0;i<256;i++)
{
if (maximum_y < maxthreads[i])
{
maximum_y = maxthreads[i];
}
}
float x = pow((float)10, (float) 7.5);
ymax = maximum_y / x;
int min1 = dBmax - dBrange[0];
int newmax = 255;
int newmin = 0;
int oldrange = dBmax - min1;
int newrange = newmax - newmin;
for (int i = 0; i < 512; i++)
{
dynamic_out[i + j * 512] = dynamic_out[i+j*512] / ymax;
if (dynamic_out[i+j*512] > thresh)
dynamic_out[i+j*512] = dynamic_out[i+j * 512];
else
dynamic_out[i+ j * 512] = thresh;
// newscale[i + j * 512] = dynamic_out[i + j * 512];
dynamic_out[i+ j * 512] = 20 * log10(dynamic_out[i + j * 512]);
newscale[i+ j * 512] = (((dynamic_out[i + j * 512] - min1) * newrange) / oldrange) + newmin;
}
}
__global__ void interpolation_kernal(PixelDefine *ptrpixel,float *newscale, PixelDefine *s,float *ptr, BYTE *final_image)
{
//int i = threadIdx.y + blockIdx.y *512+ blockIdx.x*512;
int i = threadIdx.y;
for (int j = 0; j <679; j++)
//for (int j = 0; j <1024; j++)
{
if (i+j*256< counter)
{
s = ptrpixel + ptrpixel[i + j * 256].offset;
unsigned short Sindx = s->sample_index;
unsigned short Lindx = s->line_index;
//memset(newscale, 255, 131072 * sizeof(float));
ptr = newscale;
register float Q11 = *(ptr + (Sindx + (Lindx * 512)));
register float Q12 = *(ptr + (Sindx + 1 + (Lindx * 512)));
register float Q21 = *(ptr + (Sindx + ((Lindx + 1) * 512)));
register float Q22 = *(ptr + (Sindx + 1 + ((Lindx + 1) * 512)));
final_image[ptrpixel[i + j *256].offset] = s->c1 * Q11 + s->c2 *Q21 + s->c3 *Q12 + s->c4 *Q22;
//final_image[ 0 ] =255;
}
}
}
extern "C" void
bw_filteringTest(float *data, float *filth_out ,float *filtl_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
bw_highpass_filterKernel << < dimGrid, dimBlock >> >(data, filth_out);
bw_lowpass_filterKernel << < dimGrid, dimBlock >> >(filth_out, filtl_out);
}
extern "C" void
ellaptic_filteringTest(float *data, float *filth_out, float *filtl_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
ellaptic_highpass_filterKernel << < dimGrid, dimBlock >> >(data, filth_out);
ellaptic_lowpass_filterKernel << < dimGrid, dimBlock >> >(filth_out, filtl_out);
}
extern "C" void
chebychev_filteringTest(float *data, float *filth_out, float *filtl_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
chebychev_highpass_filterKernel << < dimGrid, dimBlock >> >(data, filth_out);
chebychev_lowpass_filterKernel << < dimGrid, dimBlock >> >(filth_out, filtl_out);
}
extern "C" void
detectionTest(float *filtl_out, float *det_out, float * ph, float *z, float *lpf)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
detection_Kernel << < dimGrid, dimBlock >> >(filtl_out, det_out, ph, z);
lpf_Kernel << < dimGrid, dimBlock >> >(det_out, lpf);
}
extern "C" void
avsamplingTest( float *lpf, float *sampling_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
av_sampling_Kernel << < dimGrid, dimBlock >> > (lpf, sampling_out);
}
extern "C" void
peaksamplingTest(float *lpf, float *sampling_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
peak_sampling_Kernel << < dimGrid, dimBlock >> > (lpf, sampling_out);
}
extern "C" void
samplingTest(float *lpf, float *sampling_out)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
sampling_Kernel << < dimGrid, dimBlock >> > (lpf, sampling_out);
}
extern "C" void
dynamicTest(float *new_scale,float *sampling_out,int *dB_v, float *dynamic,float *max)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
dynamic_Kernel << < dimGrid, dimBlock >> > (new_scale, sampling_out, dB_v, dynamic, max);
}
extern "C" void
interpolationTest(PixelDefine *pixel,float *new_scale, PixelDefine *s,float *ptr, BYTE *FI)
{
dim3 dimBlock(1, no_of_threads);
dim3 dimGrid(1, no_of_blocks);
interpolation_kernal << < dimGrid, dimBlock >> > (pixel, new_scale, s, ptr, FI);
}
/*extern "C" void
interpolationTest(PixelDefine *ptrpixel, BYTE *FINAL)
{
dim3 dimBlock(1, 1);
dim3 dimGrid(1, no_of_blocks);
float*new_scale = 0;
PixelDefine *s = 0;
float *ptr = 0;
//float * NS = 0;
BYTE *FI = 0;
PixelDefine *pixel = 0;
cudaMalloc(&pixel, TOTAL_PIXEL * sizeof(PixelDefine));
cudaMalloc(&FI, TOTAL_PIXEL * CHANNELCOUNT * sizeof(BYTE));
cudaMalloc(&ptr, IMSIZE * sizeof(float));
cudaMalloc(&s, TOTAL_PIXEL * sizeof(PixelDefine));
cudaMemcpy(pixel, ptrpixel, 512 * 512 * sizeof(PixelDefine), cudaMemcpyHostToDevice);
cudaMalloc(&new_scale, IMSIZE * sizeof(float));
interpolation_kernal <<< dimGrid, dimBlock >>> (pixel, new_scale, s, ptr, FI);
//float *newscale = 0;
//cudaMalloc(&newscale, 131072 * sizeof(float));
//memset(new_scale, 255, 131072 * sizeof(float));
cudaMemcpy(FINAL , FI, 512 * 512 * 3 * sizeof(BYTE), cudaMemcpyDeviceToHost);
int k = 0;
}*/
|
29f51fec9fcc33d43d66e1dba9b351560b35b8b5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, [email protected]
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _kernel_clearsuna_EFG(int num_slices, int num_grid, float*dev_EFG)
{
uint m = blockIdx.x*blockDim.x + threadIdx.x;
uint n = blockIdx.y*blockDim.y + threadIdx.y;
uint k = blockIdx.z;
uint i = m + n*num_grid + k*num_grid*num_grid;
if((m>=num_grid)||(n>=num_grid)||(k>=num_slices))
return;
dev_EFG[i] = 0.0;
} | 29f51fec9fcc33d43d66e1dba9b351560b35b8b5.cu | #include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, [email protected]
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _kernel_clearsuna_EFG(int num_slices, int num_grid, float*dev_EFG)
{
uint m = blockIdx.x*blockDim.x + threadIdx.x;
uint n = blockIdx.y*blockDim.y + threadIdx.y;
uint k = blockIdx.z;
uint i = m + n*num_grid + k*num_grid*num_grid;
if((m>=num_grid)||(n>=num_grid)||(k>=num_slices))
return;
dev_EFG[i] = 0.0;
} |
5984dce1e734b1e30e143ffb598fd30a913e02ef.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgeellmv.cu normal z -> c, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
if ( row < blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cgeellmv_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
lambda magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( cgeellmv_kernel_shift), dim3(grid), dim3(threads), 0, queue ,
m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
| 5984dce1e734b1e30e143ffb598fd30a913e02ef.cu | /*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@generated from zgeellmv.cu normal z -> c, Fri Sep 11 18:29:42 2015
*/
#include "common_magma.h"
#define BLOCK_SIZE 512
// ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
dy[ row ] = dot * alpha + beta * dy [ row ];
}
}
// shifted ELLPACK SpMV kernel
//Michael Garland
__global__ void
cgeellmv_kernel_shift(
int num_rows,
int num_cols,
int num_cols_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex * dval,
magma_index_t * dcolind,
magmaFloatComplex * dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magma_index_t * addrows,
magmaFloatComplex * dy)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
if (row < num_rows) {
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < num_cols_per_row; n++ ) {
int col = dcolind [ num_cols_per_row * row + n ];
magmaFloatComplex val = dval [ num_cols_per_row * row + n ];
if ( val != 0)
dot += val * dx[col ];
}
if ( row < blocksize )
dy[ row ] = dot * alpha - lambda * dx[ offset+row ] + beta * dy [ row ];
else
dy[ row ] = dot * alpha - lambda * dx[ addrows[row-blocksize] ] + beta * dy [ row ];
}
}
/**
Purpose
-------
This routine computes y = alpha * A * x + beta * y on the GPU.
Input format is ELLPACK.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
cgeellmv_kernel<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, dval, dcolind, dx, beta, dy );
return MAGMA_SUCCESS;
}
/**
Purpose
-------
This routine computes y = alpha *( A - lambda I ) * x + beta * y on the GPU.
Input format is ELLPACK.
It is the shifted version of the ELLPACK SpMV.
Arguments
---------
@param[in]
transA magma_trans_t
transposition parameter for A
@param[in]
m magma_int_t
number of rows in A
@param[in]
n magma_int_t
number of columns in A
@param[in]
nnz_per_row magma_int_t
number of elements in the longest row
@param[in]
alpha magmaFloatComplex
scalar multiplier
@param[in]
lambda magmaFloatComplex
scalar multiplier
@param[in]
dval magmaFloatComplex_ptr
array containing values of A in ELLPACK
@param[in]
dcolind magmaIndex_ptr
columnindices of A in ELLPACK
@param[in]
dx magmaFloatComplex_ptr
input vector x
@param[in]
beta magmaFloatComplex
scalar multiplier
@param[in]
offset magma_int_t
in case not the main diagonal is scaled
@param[in]
blocksize magma_int_t
in case of processing multiple vectors
@param[in]
addrows magmaIndex_ptr
in case the matrixpowerskernel is used
@param[out]
dy magmaFloatComplex_ptr
input/output vector y
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_cblas
********************************************************************/
extern "C" magma_int_t
magma_cgeellmv_shift(
magma_trans_t transA,
magma_int_t m, magma_int_t n,
magma_int_t nnz_per_row,
magmaFloatComplex alpha,
magmaFloatComplex lambda,
magmaFloatComplex_ptr dval,
magmaIndex_ptr dcolind,
magmaFloatComplex_ptr dx,
magmaFloatComplex beta,
int offset,
int blocksize,
magmaIndex_ptr addrows,
magmaFloatComplex_ptr dy,
magma_queue_t queue )
{
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
cgeellmv_kernel_shift<<< grid, threads, 0, queue >>>
( m, n, nnz_per_row, alpha, lambda, dval, dcolind, dx,
beta, offset, blocksize, addrows, dy );
return MAGMA_SUCCESS;
}
|
c69627018d0f696cbf61b4802ec342de8410bc88.hip | // !!! This is a file automatically generated by hipify!!!
/*
Program to attempt mtcnn box generation in cuda.
*/
#include <iostream>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
#include <hip/hip_runtime.h>
#include "cnpy.h"
#include "common.h"
using namespace std;
/*
Here every thread is looping the whole prob array, I don't
know of a way around this from a fundamental level.
I can think of us splitting the prob grid up a lot then looping in
each block.
The issue with doing it this way is that we can't hit that much of the
prob array. Here if we run this kernel with a block size of {256, 1, 1}
then we will hit just the first 256 prob entries, it might be much bigger
than this. To get around this, I have seen other nms code create 2 nested
loops. With the outer one striding over a small number of elements.
See nmsLayer.cu in the TensorRT kernels.
*/
__global__ void generateBoxesKernelSimple(Prob *prob, int probWidth,
int probHeight, int *outIndices,
int maxOutIndices) {
// worry about the blockIdx offset later
// NB: Here we need the blockDim.y to be less than probWidth,
// Otherwise we get the same threadIdx from 2 different index combinations
int thisIdx = threadIdx.y * probWidth + threadIdx.x;
int probSize = probWidth * probHeight;
__shared__ int outIdx;
if (threadIdx.z == 0 && threadIdx.y == 0 && threadIdx.x == 0) {
outIdx = 0;
}
__syncthreads();
for (int i = 0; i < probSize; i++) {
if (thisIdx == i) {
Prob p = prob[thisIdx];
if (p.y > 0.5) {
outIndices[outIdx] = thisIdx;
printf("Gpu. thisIdx: %d. outIdx: %d\n", thisIdx, outIdx);
outIdx++;
}
}
__syncthreads();
if (outIdx == maxOutIndices) {
return;
}
}
}
// DIM is going to be the blockSize, ie blockDim.x
// I have seen it templated for loop unrolling?
template <int TSIZE, int DIM>
__global__ void generateBoxesKernel(Prob *prob, int probWidth, int probHeight,
int *outIndices, int maxOutIndices) {
// This is for a single element, ie a batch size of 1
// I have seen nms code that uses one block per batch item
// See nmsLayer.cu from TensorRT kernels
Prob thisThreadProbs[TSIZE];
__shared__ int outIdx;
if (threadIdx.z == 0 && threadIdx.y == 0 && threadIdx.x == 0) {
outIdx = 0;
}
int probSize = probWidth * probHeight;
for (int i = 0; i < TSIZE; i++) {
if (i * DIM + threadIdx.x < probSize) {
thisThreadProbs[i] = prob[i * DIM + threadIdx.x];
}
}
for (int i = 0; i < TSIZE; i++) {
for (int j = 0; j < DIM; j++) {
int offset = i * DIM;
int index = offset + j;
if (index >= probSize) {
break;
}
__syncthreads();
if (threadIdx.x == j) {
Prob p = thisThreadProbs[i];
if (p.y > 0.95) {
outIndices[outIdx] = index;
printf("Gpu. index: %d. outIdx: %d\n", index, outIdx);
outIdx++;
}
}
__syncthreads();
if (outIdx == maxOutIndices) {
return;
}
}
}
}
vector<int> getIndicesAboveThreshold(const vector<Prob> &prob, int width,
int height, int maxOutIndices) {
vector<int> outIndices(maxOutIndices);
Prob *dProb;
int *dOutIndices;
CUDACHECK(hipMalloc((void **)&dProb, sizeof(Prob) * prob.size()));
CUDACHECK(hipMalloc((void **)&dOutIndices, sizeof(int) * outIndices.size()));
CUDACHECK(hipMemcpy((void *)dProb, (void *)prob.data(),
sizeof(Prob) * prob.size(), hipMemcpyHostToDevice));
int grid = 1;
const int block = 1024;
const int tsize = 60;
hipLaunchKernelGGL(( generateBoxesKernel<tsize, block>)
, dim3(grid), dim3(block), 0, 0, dProb, width, height, dOutIndices, outIndices.size());
CUDACHECK(hipMemcpy((void *)outIndices.data(), (void *)dOutIndices,
sizeof(int) * outIndices.size(),
hipMemcpyDeviceToHost));
CUDACHECK(hipFree((void *)dProb));
CUDACHECK(hipFree((void *)dOutIndices));
return outIndices;
}
int main(int argc, char **argv) {
// int width = 2;
// int height = 2;
// vector<Prob> prob{{0.1, 0.9}, {0.8, 0.2}, {0.4, 0.6}, {0.3, 0.7}};
// int maxOutIndices = 3;
vector<Prob> prob;
int maxOutIndices = 500;
char arrayFilename[] =
"./mtcnn-output-arrays/stage-one/prob-0.npy";
cnpy::NpyArray arr = cnpy::npy_load(arrayFilename);
vector<float> items = arr.as_vec<float>();
int height = arr.shape.at(1);
int width = arr.shape.at(2);
// auto it = items.begin();
assert(items.size() % 2 == 0);
for (auto it = items.begin(); it != items.end();) {
prob.emplace_back(*it, *(it + 1));
advance(it, 2);
}
auto outIndices =
getIndicesAboveThreshold(prob, width, height, maxOutIndices);
for (auto &i : outIndices) {
cout << i << endl;
}
}
| c69627018d0f696cbf61b4802ec342de8410bc88.cu | /*
Program to attempt mtcnn box generation in cuda.
*/
#include <iostream>
#include <iterator>
#include <sstream>
#include <string>
#include <vector>
#include <cuda_runtime.h>
#include "cnpy.h"
#include "common.h"
using namespace std;
/*
Here every thread is looping the whole prob array, I don't
know of a way around this from a fundamental level.
I can think of us splitting the prob grid up a lot then looping in
each block.
The issue with doing it this way is that we can't hit that much of the
prob array. Here if we run this kernel with a block size of {256, 1, 1}
then we will hit just the first 256 prob entries, it might be much bigger
than this. To get around this, I have seen other nms code create 2 nested
loops. With the outer one striding over a small number of elements.
See nmsLayer.cu in the TensorRT kernels.
*/
__global__ void generateBoxesKernelSimple(Prob *prob, int probWidth,
int probHeight, int *outIndices,
int maxOutIndices) {
// worry about the blockIdx offset later
// NB: Here we need the blockDim.y to be less than probWidth,
// Otherwise we get the same threadIdx from 2 different index combinations
int thisIdx = threadIdx.y * probWidth + threadIdx.x;
int probSize = probWidth * probHeight;
__shared__ int outIdx;
if (threadIdx.z == 0 && threadIdx.y == 0 && threadIdx.x == 0) {
outIdx = 0;
}
__syncthreads();
for (int i = 0; i < probSize; i++) {
if (thisIdx == i) {
Prob p = prob[thisIdx];
if (p.y > 0.5) {
outIndices[outIdx] = thisIdx;
printf("Gpu. thisIdx: %d. outIdx: %d\n", thisIdx, outIdx);
outIdx++;
}
}
__syncthreads();
if (outIdx == maxOutIndices) {
return;
}
}
}
// DIM is going to be the blockSize, ie blockDim.x
// I have seen it templated for loop unrolling?
template <int TSIZE, int DIM>
__global__ void generateBoxesKernel(Prob *prob, int probWidth, int probHeight,
int *outIndices, int maxOutIndices) {
// This is for a single element, ie a batch size of 1
// I have seen nms code that uses one block per batch item
// See nmsLayer.cu from TensorRT kernels
Prob thisThreadProbs[TSIZE];
__shared__ int outIdx;
if (threadIdx.z == 0 && threadIdx.y == 0 && threadIdx.x == 0) {
outIdx = 0;
}
int probSize = probWidth * probHeight;
for (int i = 0; i < TSIZE; i++) {
if (i * DIM + threadIdx.x < probSize) {
thisThreadProbs[i] = prob[i * DIM + threadIdx.x];
}
}
for (int i = 0; i < TSIZE; i++) {
for (int j = 0; j < DIM; j++) {
int offset = i * DIM;
int index = offset + j;
if (index >= probSize) {
break;
}
__syncthreads();
if (threadIdx.x == j) {
Prob p = thisThreadProbs[i];
if (p.y > 0.95) {
outIndices[outIdx] = index;
printf("Gpu. index: %d. outIdx: %d\n", index, outIdx);
outIdx++;
}
}
__syncthreads();
if (outIdx == maxOutIndices) {
return;
}
}
}
}
vector<int> getIndicesAboveThreshold(const vector<Prob> &prob, int width,
int height, int maxOutIndices) {
vector<int> outIndices(maxOutIndices);
Prob *dProb;
int *dOutIndices;
CUDACHECK(cudaMalloc((void **)&dProb, sizeof(Prob) * prob.size()));
CUDACHECK(cudaMalloc((void **)&dOutIndices, sizeof(int) * outIndices.size()));
CUDACHECK(cudaMemcpy((void *)dProb, (void *)prob.data(),
sizeof(Prob) * prob.size(), cudaMemcpyHostToDevice));
int grid = 1;
const int block = 1024;
const int tsize = 60;
generateBoxesKernel<tsize, block>
<<<grid, block>>>(dProb, width, height, dOutIndices, outIndices.size());
CUDACHECK(cudaMemcpy((void *)outIndices.data(), (void *)dOutIndices,
sizeof(int) * outIndices.size(),
cudaMemcpyDeviceToHost));
CUDACHECK(cudaFree((void *)dProb));
CUDACHECK(cudaFree((void *)dOutIndices));
return outIndices;
}
int main(int argc, char **argv) {
// int width = 2;
// int height = 2;
// vector<Prob> prob{{0.1, 0.9}, {0.8, 0.2}, {0.4, 0.6}, {0.3, 0.7}};
// int maxOutIndices = 3;
vector<Prob> prob;
int maxOutIndices = 500;
char arrayFilename[] =
"./mtcnn-output-arrays/stage-one/prob-0.npy";
cnpy::NpyArray arr = cnpy::npy_load(arrayFilename);
vector<float> items = arr.as_vec<float>();
int height = arr.shape.at(1);
int width = arr.shape.at(2);
// auto it = items.begin();
assert(items.size() % 2 == 0);
for (auto it = items.begin(); it != items.end();) {
prob.emplace_back(*it, *(it + 1));
advance(it, 2);
}
auto outIndices =
getIndicesAboveThreshold(prob, width, height, maxOutIndices);
for (auto &i : outIndices) {
cout << i << endl;
}
}
|
c927c1bc00c1e7cb847e6094ba3096dfac529ad8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct hipComplex {
float r;
float i;
__device__ hipComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ hipComplex operator*(const hipComplex& a) {
return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ hipComplex operator-(const hipComplex& a) {
return hipComplex(r-a.r, i-a.i);
}
__device__ hipComplex operator+(const hipComplex& a) {
return hipComplex(r+a.r, i+a.i);
}
__device__ hipComplex operator/(const hipComplex& a) {
return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ hipComplex conj(hipComplex m)
{
hipComplex out(m.r,-m.i);
return out;
}
__device__ hipComplex nor(hipComplex m)
{
hipComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(hipComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ hipComplex qpoch(hipComplex a, hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex qp(hipComplex a, hipComplex q, int n) {
hipComplex out(1.0,0.0);
hipComplex unity(1.0,0.0);
int i = 0;
hipComplex Q = q;
if(q.magnitude2()>1.0)
{
return hipComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ hipComplex ramphi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ hipComplex rampsi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ hipComplex ramchi(hipComplex q) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ hipComplex ramf(hipComplex a, hipComplex b) {
hipComplex out(1.0,0.0);
hipComplex mone(-1.0,0.0);
hipComplex ma = mone*a;
hipComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ hipComplex expc(hipComplex m)
{
hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ hipComplex powc(hipComplex ag, hipComplex bg)
{
hipComplex out(0.0,0.0);
hipComplex mesp(0.0,0.0);
hipComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ hipComplex cosc(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.5,0.0);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ hipComplex sins(hipComplex m)
{
hipComplex ai(0.0,1.0);
hipComplex ot(0.0,0.5);
hipComplex mone(-1.0,0.0);
hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ hipComplex tans(hipComplex m)
{
return sins(m)/cosc(m);
}
__device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z)
{
hipComplex out(0.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ hipComplex bnewt(hipComplex z) {
hipComplex three(3.0,0.0);
hipComplex unity(1.0,0.0);
hipComplex out(0.0,0.0);
hipComplex Z =z;
hipComplex L(0.0,0.0);
hipComplex R(0.62348980185873359,0.7818314824680298);
hipComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ hipComplex they3(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex wahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ hipComplex dwahi(hipComplex z)
{
int u;
hipComplex un(1.0,0.0);
hipComplex ne(1.0,0.0);
hipComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ hipComplex they3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ hipComplex h3ey3p(hipComplex z, hipComplex q)
{
int u;
hipComplex out(0.0,0.0);
hipComplex aut(0.0,0.0);
hipComplex enn(-20.0,0.0);
hipComplex onn(1.0,0.0);
hipComplex dui(0.0,1.0);
hipComplex vel(0.0,0.0);
hipComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ hipComplex thess(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thess4(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
hipComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ hipComplex thass(hipComplex z, hipComplex q)
{
int v;
hipComplex unity(1.0,0.0);
hipComplex out(1.0,0.0);
hipComplex tw(2.0,0.0);
hipComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ hipComplex rogers( hipComplex q)
{
hipComplex onf(0.2,0.0);
hipComplex Q5 = q*q*q*q*q;
hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ hipComplex flat(hipComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
hipComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ hipComplex eff(hipComplex z, hipComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ hipComplex thete(float R, hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
hipComplex ann(1.0,0.0);
hipComplex bnn(1.0,0.0);
hipComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ hipComplex thetta(hipComplex tau, hipComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
hipComplex A(0.0,0.0);
/* miscellaneous setup */
hipComplex pai(3.14159265353898,0.0);
hipComplex ai(0.0,1.0);
hipComplex oo(1.0,0.0);
hipComplex oot(2.0,0.0);
hipComplex nini(9.0,0.0);
hipComplex eigh(-18.0,0.0);
/* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
hipComplex frann(1.0,0.0);
frann = pai * ai * tau ;
hipComplex shenn(1.0,0.0);
shenn = oot * ai * z;
hipComplex plenn(1.0,0.0);
hipComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the hipComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
hipComplex bi(3.1415926535898,0.0);
const float scale = 4.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = - scale *(float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale *(float)(DIM/2 - pos.y)/(DIM/2);
hipComplex mouse(LA,LB);
hipComplex moux(LA,0.0);
hipComplex mouy(0.0,LB);
hipComplex q(fx,fy);
/* hipComplex tik(sin(ticks/40.0f),0.0);*/
/* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
hipComplex fixon(.029348,.828934);
hipComplex faxon(.029348,-.828934);
hipComplex unity(1.0,0.0);
hipComplex ai(0.0,1.0);
hipComplex aon = expc(ai*moux);
hipComplex uon= expc(mouy);
hipComplex flurn(0.0,0.0);
hipComplex accume(0.0,0.0);
hipComplex eccume(0.0,0.0);
hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
hipComplex cue = q;
hipComplex lam(0.73736887807831963, -0.67549029426152396);
hipComplex due(3.0,0.0);
hipComplex tir(2.0,0.0);
hipComplex selga(3.5,0.0);
hipComplex arig(0.0,0.0);
hipComplex vro(-1.0,0.0);
hipComplex tle(1.0,0.0);
hipComplex sle(4.0,0.0);
hipComplex cherra(0.62348980185873359, 0.7818314824680298);
hipComplex lerra = cherra*cherra;
hipComplex ferra = lerra * cherra;
hipComplex terra = ferra * cherra;
hipComplex zerra = terra * cherra;
hipComplex nerra = zerra * cherra;
hipComplex vlarv(1/3.0,0.0);
hipComplex sugna(0.70710678118654757, 0.70710678118654746);
hipComplex regna(0.99966573338968745, 0.025853848581176047);
hipComplex spa(sqrtf(2.0),0.0);
hipComplex spb(sqrtf(3.0),0.0);
hipComplex spc(sqrtf(4.0),0.0);
hipComplex spd(sqrtf(5.0),0.0);
hipComplex mrun(1/2.0,0.0);
hipComplex gloon (2.0,0.0);
hipComplex plenod(-.01,0.0);
hipComplex nue = cue;
hipComplex bor(-10.0,0.0);
hipComplex ft(14.0,0.0);
hipComplex sev(7.0,0.0);
hipComplex nat(0.0,-10.0);
hipComplex rhus(1.0,0.0);
hipComplex D(0.739085133215160641655312087674,0.0);
hipComplex qoo=cue;
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
// This is the baby attempt to make a quasiconformal transformation
// I have some reasoning a few days ago I need to append to my math diary
/*for(v=0;v<30;v++)
{
the way of playing the game has a way of changing the rules: James Gleick's Chaos
cue = cue - (moux*(cosc(cue) - ai*moux*sins(cue))/(cosc(cue)+ai*mouy*sins(cue)));
accume = accume *cue;
}
cue = accume;*/
/*cue = (thess(cue/ft,fixon) + moux*thess(cue*vro/ft,fixon) )*(thess(cue/sev,fixon) + mouy*thess(vro*cue/sev,fixon) + thess(arig,fixon))-( thess(cue/ft,fixon) + moux*thess(vro*cue/ft,fixon)+thess(cue/sev,fixon) + mouy*thess(vro*sev/ft,fixon) + thess(arig,fixon));
*/
//ue = expc(gloon*bi*ai*cue);
for(v=1;v<60;v++)
{
accume = accume + unity/ powc((unity-qoo)/(unity-cue),mouse);
qoo = cue * qoo;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos);
} | c927c1bc00c1e7cb847e6094ba3096dfac529ad8.cu | #include "kernel.h"
#define TX 32
#define TY 32
#define DIM 2100
struct cuComplex {
float r;
float i;
__device__ cuComplex( float a, float b ) : r(a), i(b) {}
__device__ float magnitude2( void ) {
return r * r + i * i;
}
__device__ cuComplex operator*(const cuComplex& a) {
return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i);
}
__device__ cuComplex operator-(const cuComplex& a) {
return cuComplex(r-a.r, i-a.i);
}
__device__ cuComplex operator+(const cuComplex& a) {
return cuComplex(r+a.r, i+a.i);
}
__device__ cuComplex operator/(const cuComplex& a) {
return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i));
}
};
__device__ cuComplex conj(cuComplex m)
{
cuComplex out(m.r,-m.i);
return out;
}
__device__ cuComplex nor(cuComplex m)
{
cuComplex out(m.r*m.r+m.i*m.i,0.0);
return out;
}
__device__ float norg(cuComplex m)
{
return sqrtf(m.r*m.r+m.i*m.i);
}
__device__ cuComplex qpoch(cuComplex a, cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<80;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex qp(cuComplex a, cuComplex q, int n) {
cuComplex out(1.0,0.0);
cuComplex unity(1.0,0.0);
int i = 0;
cuComplex Q = q;
if(q.magnitude2()>1.0)
{
return cuComplex(0.0,0.0);
}
// We want to formally match the definition of a q-pochhammer symbol.
for(i=1;i<n;i++)
{
out = out * (unity - a*Q);
Q = q * Q;
}
return out;
}
__device__ cuComplex ramphi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,mq)/qpoch(q,mq);
}
__device__ cuComplex rampsi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q)*qpoch(q*q,q*q);
}
__device__ cuComplex ramchi(cuComplex q) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex mq = mone*q;
return qpoch(mq,q*q);
}
__device__ cuComplex ramf(cuComplex a, cuComplex b) {
cuComplex out(1.0,0.0);
cuComplex mone(-1.0,0.0);
cuComplex ma = mone*a;
cuComplex mb = mone*b;
return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b);
}
// complex exponential
__device__ cuComplex expc(cuComplex m)
{
cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i));
return out;
}
__device__ cuComplex powc(cuComplex ag, cuComplex bg)
{
cuComplex out(0.0,0.0);
cuComplex mesp(0.0,0.0);
cuComplex frim(0.0,0.0);
double radiu, thet;
/* get the proper polar form of the complex number */
radiu = sqrtf(ag.r*ag.r + ag.i*ag.i);
thet = atan2f(ag.i,ag.r);
/* mesp gives R^(c+di) */
mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu));
mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu));
/* frim gives e^(i theta (c+di)) */
/* now since we already have the machinery
for performing complex exponentiation (just exp), we
can just call that here */
frim.r = -1.0 * bg.i * thet;
frim.i = bg.r * thet;
frim = expc(frim);
out = mesp*frim;
return out;
}
// cosine (nothing algorithmically clean)
__device__ cuComplex cosc(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.5,0.0);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai));
return out;
}
__device__ cuComplex sins(cuComplex m)
{
cuComplex ai(0.0,1.0);
cuComplex ot(0.0,0.5);
cuComplex mone(-1.0,0.0);
cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai));
return out;
}
__device__ cuComplex tans(cuComplex m)
{
return sins(m)/cosc(m);
}
__device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z)
{
cuComplex out(0.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex unity(1.0,0.0);
out = expc(ai*t) * (z-a)/(unity-conj(a)*z);
return out;
}
__device__ cuComplex bnewt(cuComplex z) {
cuComplex three(3.0,0.0);
cuComplex unity(1.0,0.0);
cuComplex out(0.0,0.0);
cuComplex Z =z;
cuComplex L(0.0,0.0);
cuComplex R(0.62348980185873359,0.7818314824680298);
cuComplex v(0.62348980185873359,0.7818314824680298);
int i;
for(i=0;i<100;i++)
{
L = sins(expc(Z)-cosc(Z))-Z;
out = out + v*L;
v = R * v;
Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity);
}
return out;
}
__device__ cuComplex they3(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + powc(q,enn*enn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex wahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne);
ne = ne + un;
}
out = out + un;
return out;
}
__device__ cuComplex dwahi(cuComplex z)
{
int u;
cuComplex un(1.0,0.0);
cuComplex ne(1.0,0.0);
cuComplex out(0.0,0.0);
for(u=1;u<40;u++)
{
out = out + powc(z/ne,ne-un);
ne = ne + un;
}
return out;
}
__device__ cuComplex they3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
for(u=-20;u<20;u++)
{
out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z);
enn = enn + onn;
}
return out;
}
__device__ cuComplex h3ey3p(cuComplex z, cuComplex q)
{
int u;
cuComplex out(0.0,0.0);
cuComplex aut(0.0,0.0);
cuComplex enn(-20.0,0.0);
cuComplex onn(1.0,0.0);
cuComplex dui(0.0,1.0);
cuComplex vel(0.0,0.0);
cuComplex rav(0.0,0.0);
for(u=-40;u<40;u++)
{
vel = expc(dui*enn*z);
rav = powc(q,enn*enn);
aut = aut + (enn*enn)*rav/q*vel;
out = out + rav*vel;
enn = enn + onn;
}
return out/aut;
}
__device__ cuComplex thess(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<10;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thess4(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
cuComplex roo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
roo = roo * r * r ;
out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r));
}
return out;
}
__device__ cuComplex thass(cuComplex z, cuComplex q)
{
int v;
cuComplex unity(1.0,0.0);
cuComplex out(1.0,0.0);
cuComplex tw(2.0,0.0);
cuComplex qoo(1.0,0.0);
for(v=0;v<20;v++)
{
qoo = qoo * q * q;
out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q));
}
return out;
}
__device__ cuComplex rogers( cuComplex q)
{
cuComplex onf(0.2,0.0);
cuComplex Q5 = q*q*q*q*q;
cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5));
return out;
}
__device__ cuComplex flat(cuComplex m)
{
float ua = sqrtf(m.r*m.r + m.i*m.i);
cuComplex out(m.r/ua,m.i/ua);
return out;
}
__device__ cuComplex eff(cuComplex z, cuComplex lambda)
{
return z*z*z*z+ lambda/(z*z*z*z);
}
__device__ cuComplex thete(float R, cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
cuComplex ann(1.0,0.0);
cuComplex bnn(1.0,0.0);
cuComplex scrunn(1.0,0.0);
float ca, cb,cc;
int a, b;
for(a=-10;a<10;a++)
{
ann.r = a;
for(b=-10;b<10;b++)
{
bnn.r = b;
if(((a+b)%2)==0)
{
scrunn.r = a*a + b*b;
A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn));
}
else
{
ca = 5.0 + a*a + b*b;
cb = 2*(a * cos(R)- b * sin(R));
cc = 4*(b * cos(R)+a*sin(R));
scrunn.r = ca + cb + cc;
A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn));
}
}
}
return A;
}
__device__ cuComplex thetta(cuComplex tau, cuComplex z)
{
/* note that as I'm not immediately doing this on the unit circle, as the real
action is considered to happen on the z-plane, we don't yet need to fret about
whether I'm looking at things in terms of tau or in terms of q, next revision */
/* set accumulant to zero */
cuComplex A(0.0,0.0);
/* miscellaneous setup */
cuComplex pai(3.14159265353898,0.0);
cuComplex ai(0.0,1.0);
cuComplex oo(1.0,0.0);
cuComplex oot(2.0,0.0);
cuComplex nini(9.0,0.0);
cuComplex eigh(-18.0,0.0);
/* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */
cuComplex frann(1.0,0.0);
frann = pai * ai * tau ;
cuComplex shenn(1.0,0.0);
shenn = oot * ai * z;
cuComplex plenn(1.0,0.0);
cuComplex enn(1.0,0.0);
int n;
for(n=-10;n<10;n++)
{
enn.r = n;
plenn = enn * enn;
/* this get the cuComplex out of the event loop */
A = A + expc(frann* plenn) * expc(shenn* enn);
}
return A;
}
__device__
unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); }
__global__
void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) {
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r= blockIdx.y*blockDim.y + threadIdx.y;
const int i = c + r*w; // 1D indexing
float pi = 3.1415926535898;
cuComplex bi(3.1415926535898,0.0);
const float scale = 4.0;
float fx = -scale * (float)(DIM/2 - c)/(DIM/2);
float fy = scale * (float)(DIM/2 - r)/(DIM/2);
float LA = - scale *(float)(DIM/2 - pos.x)/(DIM/2);
float LB = scale *(float)(DIM/2 - pos.y)/(DIM/2);
cuComplex mouse(LA,LB);
cuComplex moux(LA,0.0);
cuComplex mouy(0.0,LB);
cuComplex q(fx,fy);
/* cuComplex tik(sin(ticks/40.0f),0.0);*/
/* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0));
cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024));
cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/
cuComplex fixon(.029348,.828934);
cuComplex faxon(.029348,-.828934);
cuComplex unity(1.0,0.0);
cuComplex ai(0.0,1.0);
cuComplex aon = expc(ai*moux);
cuComplex uon= expc(mouy);
cuComplex flurn(0.0,0.0);
cuComplex accume(0.0,0.0);
cuComplex eccume(0.0,0.0);
cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0);
cuComplex cue = q;
cuComplex lam(0.73736887807831963, -0.67549029426152396);
cuComplex due(3.0,0.0);
cuComplex tir(2.0,0.0);
cuComplex selga(3.5,0.0);
cuComplex arig(0.0,0.0);
cuComplex vro(-1.0,0.0);
cuComplex tle(1.0,0.0);
cuComplex sle(4.0,0.0);
cuComplex cherra(0.62348980185873359, 0.7818314824680298);
cuComplex lerra = cherra*cherra;
cuComplex ferra = lerra * cherra;
cuComplex terra = ferra * cherra;
cuComplex zerra = terra * cherra;
cuComplex nerra = zerra * cherra;
cuComplex vlarv(1/3.0,0.0);
cuComplex sugna(0.70710678118654757, 0.70710678118654746);
cuComplex regna(0.99966573338968745, 0.025853848581176047);
cuComplex spa(sqrtf(2.0),0.0);
cuComplex spb(sqrtf(3.0),0.0);
cuComplex spc(sqrtf(4.0),0.0);
cuComplex spd(sqrtf(5.0),0.0);
cuComplex mrun(1/2.0,0.0);
cuComplex gloon (2.0,0.0);
cuComplex plenod(-.01,0.0);
cuComplex nue = cue;
cuComplex bor(-10.0,0.0);
cuComplex ft(14.0,0.0);
cuComplex sev(7.0,0.0);
cuComplex nat(0.0,-10.0);
cuComplex rhus(1.0,0.0);
cuComplex D(0.739085133215160641655312087674,0.0);
cuComplex qoo=cue;
/* if ((c >= w) || (r >= h)) return; // Check if within image bounds
const int i = c + r*w; // 1D indexing
const int dist = sqrtf((c - pos.x)*(c - pos.x) +
(r - pos.y)*(r - pos.y));
const unsigned char intensity = clip(255 - dist);*/
// theta function varying on constant
// cue =thess(cue,fixon*mouse);
int v=1;
int axa=-10;
/*while((v<100)&&norg(cue)<2.0)
{
cue = cue*(cue-mouy)*(cue-moux) -cue * q;
v++;
}*/
/*for(v=1;v<5;v++)
{
cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy));
}
cue = accume;*/
/*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q));
rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q));
cue = rhus+cue;
cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/
/*for(v=0;v<60;v++){
cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon));
accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue));
}
cue = accume;*/
/*
One for
(x+d)/cos(d) -cos(x)/d
Tungilipa
D = cos(D)
cos(sqrt(x*D))/D -1 = 0.0
The other for
cos(x)-x
Eripgrunna
*/
// This is the baby attempt to make a quasiconformal transformation
// I have some reasoning a few days ago I need to append to my math diary
/*for(v=0;v<30;v++)
{
the way of playing the game has a way of changing the rules: James Gleick's Chaos
cue = cue - (moux*(cosc(cue) - ai*moux*sins(cue))/(cosc(cue)+ai*mouy*sins(cue)));
accume = accume *cue;
}
cue = accume;*/
/*cue = (thess(cue/ft,fixon) + moux*thess(cue*vro/ft,fixon) )*(thess(cue/sev,fixon) + mouy*thess(vro*cue/sev,fixon) + thess(arig,fixon))-( thess(cue/ft,fixon) + moux*thess(vro*cue/ft,fixon)+thess(cue/sev,fixon) + mouy*thess(vro*sev/ft,fixon) + thess(arig,fixon));
*/
//ue = expc(gloon*bi*ai*cue);
for(v=1;v<60;v++)
{
accume = accume + unity/ powc((unity-qoo)/(unity-cue),mouse);
qoo = cue * qoo;
}
cue = accume;
double tha;
tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi));
d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2));
d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2));
d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2));
d_out[i].w = 255;
}
void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) {
const dim3 blockSize(TX, TY);
const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY);
distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos);
} |
3a8d386caa4de74c7861cadcca47f99c6f4ded37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// old op include, fluid should be removed
#ifdef PADDLE_WITH_HIP
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#else
#include <hipcub/hipcub.hpp>
#endif
#include <vector>
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
// trace op include
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename Context>
void GetClassInterval(const gpuStream_t& stream,
const phi::Place& place,
const Context& dev_ctx,
const int rid,
const int rank,
const int nranks,
const int D,
DenseTensor* class_interval) {
std::vector<int> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = D;
if (nranks <= 1) {
phi::TensorFromVector(shard_dim_vec, dev_ctx, class_interval);
return;
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
DenseTensor num_classes_per_device;
phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device);
int* num_classes_per_device_ptr = num_classes_per_device.data<int>();
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
paddle::platform::NCCLCommContext::Instance().Get(rid, place);
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
ncclSum,
comm->comm(),
calcu_stream));
}
class_interval->Resize({nranks + 1});
auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval);
size_t cub_temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum<int*, int*>(
nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream);
auto cub_temp_storage =
phi::memory_utils::Alloc(place, cub_temp_storage_bytes);
hipcub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(),
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
stream);
return;
#endif
}
template <typename T, typename IndexT>
__global__ void AddMarginToPositiveLogitsKernel(T* logit,
const IndexT* label,
const float margin1,
const float margin2,
const float margin3,
const int rank,
const int nranks,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
int start_index = class_interval_ptr[rank];
int end_index = class_interval_ptr[rank + 1];
int num_classes = class_interval_ptr[nranks];
CUDA_KERNEL_LOOP(i, N) {
auto real_label = label[i];
PADDLE_ENFORCE((real_label < num_classes) && (real_label >= 0),
"The index is out of bounds, "
"please check whether the value of label and "
"input meet the number of class. It should "
"be less than [%d], but received [%d]",
num_classes,
real_label);
if (real_label >= start_index && real_label < end_index) {
int64_t offset = i * D + real_label - start_index;
if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) {
MPType x = static_cast<MPType>(logit[offset]);
MPType theta = acos(x);
if (fabs(margin1 - 1.0) > 1e-8) {
theta *= static_cast<MPType>(margin1);
}
if (fabs(margin2) > 1e-8) {
theta += static_cast<MPType>(margin2);
}
logit[offset] = static_cast<T>(cos(theta));
}
if (fabs(margin3) > 1e-8) {
MPType y = static_cast<MPType>(logit[offset]);
y -= static_cast<MPType>(margin3);
logit[offset] = static_cast<T>(y);
}
}
}
}
template <typename T>
__global__ void ScaleLogitKernel(T* logits,
const float scale,
const int64_t N,
const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) { logits[i] *= static_cast<T>(scale); }
}
template <typename T>
__global__ void LogitsMinusMaxKernel(T* logits,
const T* logits_max_per_row,
const int64_t N,
const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
logits[i] -= logits_max_per_row[row];
}
}
template <typename T>
__global__ void LogitsMinusLogSumKernel(T* logits,
const T* logits_sum_per_row,
const int64_t N,
const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
logits[i] -= phi::kps::details::Log(logits_sum_per_row[row]);
}
}
template <typename T, typename IndexT>
__global__ void HardLabelSoftmaxWithCrossEntropyKernel(
T* loss,
T* log_softmax,
const IndexT* labels,
const int rank,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
int start_index = class_interval_ptr[rank];
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == labels[row]) {
auto softmax = log_softmax[i];
loss[row] = -softmax;
log_softmax[i] = phi::kps::details::Exp(softmax);
} else {
log_softmax[i] = phi::kps::details::Exp(log_softmax[i]);
}
}
}
template <typename T, typename Context>
void MarginCrossEntropyKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& labels,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* softmax,
DenseTensor* loss) {
const auto& place = dev_ctx.GetPlace(); // old code
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
paddle::platform::NCCLComm* comm;
paddle::distributed::ProcessGroup* pg = nullptr;
gpuStream_t stream;
if (nranks > 1) {
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(ring_id)) {
// Use ProcessGroup
pg = map->get(ring_id);
} else {
comm = paddle::platform::NCCLCommContext::Instance().Get(ring_id, place);
// use global calculate stream
stream = static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
}
}
#endif
// allocate memory on device.
T* softmax_ptr = dev_ctx.template Alloc<T>(softmax);
T* loss_ptr = dev_ctx.template Alloc<T>(loss);
const auto& logits_dims = logits.dims();
const auto& labels_dims = labels.dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type =
paddle::framework::TransToProtoVarType(labels.dtype());
// copy logits to softmax variable since we can't modify logits,
// and it also be used when calculate grad
phi::Copy<Context>(dev_ctx, logits, dev_ctx.GetPlace(), true, softmax);
DenseTensor softmax_2d;
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
T* logits_ptr = softmax_2d.data<T>();
DenseTensor class_interval;
GetClassInterval<T, Context>(dev_ctx.stream(),
dev_ctx.GetPlace(),
dev_ctx,
ring_id,
rank,
nranks,
D,
&class_interval);
// step 1, preprocess logits
// add margin for positive elements
// theta = acos(x_i)
// (cos(m1 * theta + m2) - m3)
// save match_logits, used for gradient computation.
if (label_type == paddle::framework::proto::VarType::INT32) {
typedef int32_t LabelT;
hipLaunchKernelGGL(( AddMarginToPositiveLogitsKernel<T>)
, dim3(NumBlocks(N)), dim3(threads), 0, dev_ctx.stream(),
logits_ptr,
labels.data<LabelT>(),
margin1,
margin2,
margin3,
rank,
nranks,
N,
D,
class_interval.data<int>());
} else if (label_type == paddle::framework::proto::VarType::INT64) {
typedef int64_t LabelT;
hipLaunchKernelGGL(( AddMarginToPositiveLogitsKernel<T>)
, dim3(NumBlocks(N)), dim3(threads), 0, dev_ctx.stream(),
logits_ptr,
labels.data<LabelT>(),
margin1,
margin2,
margin3,
rank,
nranks,
N,
D,
class_interval.data<int>());
} else {
PADDLE_THROW(errors::Unimplemented(
"margin_cross_entropy label type noly support int32 and int64, "
"but got %s",
label_type));
}
// scale by s
hipLaunchKernelGGL(( ScaleLogitKernel<T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(),
logits_ptr, scale, N, D);
// step 2, obtain logit_max
DenseTensor logits_max;
logits_max.Resize({N, 1});
dev_ctx.template Alloc<T>(&logits_max);
T* logits_max_buff = dev_ctx.template Alloc<T>(&logits_max);
phi::funcs::
ReduceKernel<T, T, phi::kps::MaxFunctor, phi::kps::IdentityFunctor<T>>(
static_cast<const phi::GPUContext&>(dev_ctx),
softmax_2d,
&logits_max,
phi::kps::IdentityFunctor<T>(),
{1});
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
if (pg) {
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(logits_max);
out_tensor.push_back(logits_max);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::MAX;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
logits_max_buff,
logits_max_buff,
logits_max.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(logits_max.dtype())),
ncclMax,
comm->comm(),
stream));
}
}
#endif
// step 3, logit - logit_max
hipLaunchKernelGGL(( LogitsMinusMaxKernel<T>), dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(),
logits_ptr, logits_max_buff, N, D);
// step 4, sum(exp(logit - logit_max))
DenseTensor sum_exp_logits;
sum_exp_logits.Resize({N, 1});
dev_ctx.template Alloc<T>(&sum_exp_logits);
T* sum_exp_logits_buff = dev_ctx.template Alloc<T>(&sum_exp_logits);
phi::funcs::ReduceKernel<T, T, phi::kps::AddFunctor, phi::kps::ExpFunctor<T>>(
static_cast<const phi::GPUContext&>(dev_ctx),
softmax_2d,
&sum_exp_logits,
phi::kps::ExpFunctor<T>(),
{1});
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
if (pg) {
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(sum_exp_logits);
out_tensor.push_back(sum_exp_logits);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
sum_exp_logits_buff,
sum_exp_logits_buff,
sum_exp_logits.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(sum_exp_logits.dtype())),
ncclSum,
comm->comm(),
stream));
}
}
#endif
// step 5, (logit - logit_max) - log(sum(exp(logit - logit_max)))
hipLaunchKernelGGL(( LogitsMinusLogSumKernel<T>)
, dim3(NumBlocks(N * D)), dim3(threads), 0, dev_ctx.stream(),
logits_ptr, sum_exp_logits_buff, N, D);
// step 6, prob = exp((logit - logit_max) - log(sum(exp(logit -
// logit_max))))
// loss = -((logit_i - logit_max) - log(sum(exp(logit - logit_max))))
phi::funcs::SetConstant<Context, T> functor;
functor(dev_ctx, loss, static_cast<T>(0.0));
if (label_type == paddle::framework::proto::VarType::INT32) {
typedef int32_t LabelT;
hipLaunchKernelGGL(( HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_ptr,
logits_ptr,
labels.data<LabelT>(),
rank,
N,
D,
class_interval.data<int>());
} else if (label_type == paddle::framework::proto::VarType::INT64) {
typedef int64_t LabelT;
hipLaunchKernelGGL(( HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(), loss_ptr,
logits_ptr,
labels.data<LabelT>(),
rank,
N,
D,
class_interval.data<int>());
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
if (pg) {
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(*loss);
out_tensor.push_back(*loss);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
loss_ptr,
loss_ptr,
loss->numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(loss->dtype())),
ncclSum,
comm->comm(),
stream));
}
}
#endif
}
} // namespace phi
PD_REGISTER_KERNEL(margin_cross_entropy,
GPU,
ALL_LAYOUT,
phi::MarginCrossEntropyKernel,
float,
double,
phi::dtype::float16) {}
| 3a8d386caa4de74c7861cadcca47f99c6f4ded37.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// old op include, fluid should be removed
#ifdef PADDLE_WITH_HIP
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#else
#include <cub/cub.cuh>
#endif
#include <vector>
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/kernels/funcs/axis_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/reduce_function.h"
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
#include "paddle/fluid/distributed/collective/process_group.h"
#include "paddle/fluid/platform/collective_helper.h"
#include "paddle/fluid/platform/device/gpu/nccl_helper.h"
#endif
// trace op include
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/memory_utils.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
static constexpr int kNumCUDAThreads = 512;
static constexpr int kNumMaxinumNumBlocks = 4096;
static inline int NumBlocks(const int N) {
return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads,
kNumMaxinumNumBlocks);
}
template <typename T, typename Context>
void GetClassInterval(const gpuStream_t& stream,
const phi::Place& place,
const Context& dev_ctx,
const int rid,
const int rank,
const int nranks,
const int D,
DenseTensor* class_interval) {
std::vector<int> shard_dim_vec(nranks + 1, 0);
shard_dim_vec[rank + 1] = D;
if (nranks <= 1) {
phi::TensorFromVector(shard_dim_vec, dev_ctx, class_interval);
return;
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
DenseTensor num_classes_per_device;
phi::TensorFromVector(shard_dim_vec, dev_ctx, &num_classes_per_device);
int* num_classes_per_device_ptr = num_classes_per_device.data<int>();
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(rid)) {
// Use ProcessGroup
paddle::distributed::ProcessGroup* pg = map->get(rid);
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(num_classes_per_device);
out_tensor.push_back(num_classes_per_device);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
const auto& comm =
paddle::platform::NCCLCommContext::Instance().Get(rid, place);
// use global calculate stream
const auto calcu_stream =
static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
num_classes_per_device_ptr,
num_classes_per_device_ptr,
num_classes_per_device.numel(),
paddle::platform::ToNCCLDataType(paddle::framework::TransToProtoVarType(
num_classes_per_device.dtype())),
ncclSum,
comm->comm(),
calcu_stream));
}
class_interval->Resize({nranks + 1});
auto class_interval_ptr = dev_ctx.template Alloc<int>(class_interval);
size_t cub_temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum<int*, int*>(
nullptr, cub_temp_storage_bytes, nullptr, nullptr, nranks + 1, stream);
auto cub_temp_storage =
phi::memory_utils::Alloc(place, cub_temp_storage_bytes);
cub::DeviceScan::InclusiveSum<int*, int*>(cub_temp_storage->ptr(),
cub_temp_storage_bytes,
num_classes_per_device_ptr,
class_interval_ptr,
nranks + 1,
stream);
return;
#endif
}
template <typename T, typename IndexT>
__global__ void AddMarginToPositiveLogitsKernel(T* logit,
const IndexT* label,
const float margin1,
const float margin2,
const float margin3,
const int rank,
const int nranks,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
using MPType = typename phi::dtype::MPTypeTrait<T>::Type;
int start_index = class_interval_ptr[rank];
int end_index = class_interval_ptr[rank + 1];
int num_classes = class_interval_ptr[nranks];
CUDA_KERNEL_LOOP(i, N) {
auto real_label = label[i];
PADDLE_ENFORCE((real_label < num_classes) && (real_label >= 0),
"The index is out of bounds, "
"please check whether the value of label and "
"input meet the number of class. It should "
"be less than [%d], but received [%d]",
num_classes,
real_label);
if (real_label >= start_index && real_label < end_index) {
int64_t offset = i * D + real_label - start_index;
if (fabs(margin1 - 1.0) > 1e-8 || fabs(margin2) > 1e-8) {
MPType x = static_cast<MPType>(logit[offset]);
MPType theta = acos(x);
if (fabs(margin1 - 1.0) > 1e-8) {
theta *= static_cast<MPType>(margin1);
}
if (fabs(margin2) > 1e-8) {
theta += static_cast<MPType>(margin2);
}
logit[offset] = static_cast<T>(cos(theta));
}
if (fabs(margin3) > 1e-8) {
MPType y = static_cast<MPType>(logit[offset]);
y -= static_cast<MPType>(margin3);
logit[offset] = static_cast<T>(y);
}
}
}
}
template <typename T>
__global__ void ScaleLogitKernel(T* logits,
const float scale,
const int64_t N,
const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) { logits[i] *= static_cast<T>(scale); }
}
template <typename T>
__global__ void LogitsMinusMaxKernel(T* logits,
const T* logits_max_per_row,
const int64_t N,
const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
logits[i] -= logits_max_per_row[row];
}
}
template <typename T>
__global__ void LogitsMinusLogSumKernel(T* logits,
const T* logits_sum_per_row,
const int64_t N,
const int64_t D) {
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
logits[i] -= phi::kps::details::Log(logits_sum_per_row[row]);
}
}
template <typename T, typename IndexT>
__global__ void HardLabelSoftmaxWithCrossEntropyKernel(
T* loss,
T* log_softmax,
const IndexT* labels,
const int rank,
const int64_t N,
const int64_t D,
const int* class_interval_ptr) {
int start_index = class_interval_ptr[rank];
CUDA_KERNEL_LOOP(i, N * D) {
auto row = i / D;
auto col = i % D;
if ((col + start_index) == labels[row]) {
auto softmax = log_softmax[i];
loss[row] = -softmax;
log_softmax[i] = phi::kps::details::Exp(softmax);
} else {
log_softmax[i] = phi::kps::details::Exp(log_softmax[i]);
}
}
}
template <typename T, typename Context>
void MarginCrossEntropyKernel(const Context& dev_ctx,
const DenseTensor& logits,
const DenseTensor& labels,
bool return_softmax,
int ring_id,
int rank,
int nranks,
float margin1,
float margin2,
float margin3,
float scale,
DenseTensor* softmax,
DenseTensor* loss) {
const auto& place = dev_ctx.GetPlace(); // old code
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
paddle::platform::NCCLComm* comm;
paddle::distributed::ProcessGroup* pg = nullptr;
gpuStream_t stream;
if (nranks > 1) {
auto map = paddle::distributed::ProcessGroupMapFromGid::getInstance();
if (map->has(ring_id)) {
// Use ProcessGroup
pg = map->get(ring_id);
} else {
comm = paddle::platform::NCCLCommContext::Instance().Get(ring_id, place);
// use global calculate stream
stream = static_cast<GPUContext*>(
paddle::platform::DeviceContextPool::Instance().Get(place))
->stream();
}
}
#endif
// allocate memory on device.
T* softmax_ptr = dev_ctx.template Alloc<T>(softmax);
T* loss_ptr = dev_ctx.template Alloc<T>(loss);
const auto& logits_dims = logits.dims();
const auto& labels_dims = labels.dims();
const int axis = logits_dims.size() - 1;
const int N = phi::funcs::SizeToAxis(axis, logits_dims);
const int D = phi::funcs::SizeFromAxis(axis, logits_dims);
int blocks = NumBlocks(N);
int threads = kNumCUDAThreads;
const auto& label_type =
paddle::framework::TransToProtoVarType(labels.dtype());
// copy logits to softmax variable since we can't modify logits,
// and it also be used when calculate grad
phi::Copy<Context>(dev_ctx, logits, dev_ctx.GetPlace(), true, softmax);
DenseTensor softmax_2d;
softmax_2d.ShareDataWith(*softmax).Resize({N, D});
T* logits_ptr = softmax_2d.data<T>();
DenseTensor class_interval;
GetClassInterval<T, Context>(dev_ctx.stream(),
dev_ctx.GetPlace(),
dev_ctx,
ring_id,
rank,
nranks,
D,
&class_interval);
// step 1, preprocess logits
// add margin for positive elements
// theta = acos(x_i)
// (cos(m1 * theta + m2) - m3)
// save match_logits, used for gradient computation.
if (label_type == paddle::framework::proto::VarType::INT32) {
typedef int32_t LabelT;
AddMarginToPositiveLogitsKernel<T>
<<<NumBlocks(N), threads, 0, dev_ctx.stream()>>>(
logits_ptr,
labels.data<LabelT>(),
margin1,
margin2,
margin3,
rank,
nranks,
N,
D,
class_interval.data<int>());
} else if (label_type == paddle::framework::proto::VarType::INT64) {
typedef int64_t LabelT;
AddMarginToPositiveLogitsKernel<T>
<<<NumBlocks(N), threads, 0, dev_ctx.stream()>>>(
logits_ptr,
labels.data<LabelT>(),
margin1,
margin2,
margin3,
rank,
nranks,
N,
D,
class_interval.data<int>());
} else {
PADDLE_THROW(errors::Unimplemented(
"margin_cross_entropy label type noly support int32 and int64, "
"but got %s",
label_type));
}
// scale by s
ScaleLogitKernel<T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>(
logits_ptr, scale, N, D);
// step 2, obtain logit_max
DenseTensor logits_max;
logits_max.Resize({N, 1});
dev_ctx.template Alloc<T>(&logits_max);
T* logits_max_buff = dev_ctx.template Alloc<T>(&logits_max);
phi::funcs::
ReduceKernel<T, T, phi::kps::MaxFunctor, phi::kps::IdentityFunctor<T>>(
static_cast<const phi::GPUContext&>(dev_ctx),
softmax_2d,
&logits_max,
phi::kps::IdentityFunctor<T>(),
{1});
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
if (pg) {
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(logits_max);
out_tensor.push_back(logits_max);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::MAX;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
logits_max_buff,
logits_max_buff,
logits_max.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(logits_max.dtype())),
ncclMax,
comm->comm(),
stream));
}
}
#endif
// step 3, logit - logit_max
LogitsMinusMaxKernel<T><<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>(
logits_ptr, logits_max_buff, N, D);
// step 4, sum(exp(logit - logit_max))
DenseTensor sum_exp_logits;
sum_exp_logits.Resize({N, 1});
dev_ctx.template Alloc<T>(&sum_exp_logits);
T* sum_exp_logits_buff = dev_ctx.template Alloc<T>(&sum_exp_logits);
phi::funcs::ReduceKernel<T, T, phi::kps::AddFunctor, phi::kps::ExpFunctor<T>>(
static_cast<const phi::GPUContext&>(dev_ctx),
softmax_2d,
&sum_exp_logits,
phi::kps::ExpFunctor<T>(),
{1});
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
if (pg) {
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(sum_exp_logits);
out_tensor.push_back(sum_exp_logits);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
sum_exp_logits_buff,
sum_exp_logits_buff,
sum_exp_logits.numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(sum_exp_logits.dtype())),
ncclSum,
comm->comm(),
stream));
}
}
#endif
// step 5, (logit - logit_max) - log(sum(exp(logit - logit_max)))
LogitsMinusLogSumKernel<T>
<<<NumBlocks(N * D), threads, 0, dev_ctx.stream()>>>(
logits_ptr, sum_exp_logits_buff, N, D);
// step 6, prob = exp((logit - logit_max) - log(sum(exp(logit -
// logit_max))))
// loss = -((logit_i - logit_max) - log(sum(exp(logit - logit_max))))
phi::funcs::SetConstant<Context, T> functor;
functor(dev_ctx, loss, static_cast<T>(0.0));
if (label_type == paddle::framework::proto::VarType::INT32) {
typedef int32_t LabelT;
HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_ptr,
logits_ptr,
labels.data<LabelT>(),
rank,
N,
D,
class_interval.data<int>());
} else if (label_type == paddle::framework::proto::VarType::INT64) {
typedef int64_t LabelT;
HardLabelSoftmaxWithCrossEntropyKernel<T, LabelT>
<<<blocks, threads, 0, dev_ctx.stream()>>>(loss_ptr,
logits_ptr,
labels.data<LabelT>(),
rank,
N,
D,
class_interval.data<int>());
}
#if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL)
if (nranks > 1) {
if (pg) {
std::vector<phi::DenseTensor> in_tensor;
std::vector<phi::DenseTensor> out_tensor;
in_tensor.push_back(*loss);
out_tensor.push_back(*loss);
paddle::distributed::AllreduceOptions opts;
opts.reduce_op = paddle::distributed::ReduceOp::SUM;
auto task = pg->AllReduce(in_tensor, out_tensor, opts);
task->Wait();
} else {
PADDLE_ENFORCE_GPU_SUCCESS(paddle::platform::dynload::ncclAllReduce(
loss_ptr,
loss_ptr,
loss->numel(),
paddle::platform::ToNCCLDataType(
paddle::framework::TransToProtoVarType(loss->dtype())),
ncclSum,
comm->comm(),
stream));
}
}
#endif
}
} // namespace phi
PD_REGISTER_KERNEL(margin_cross_entropy,
GPU,
ALL_LAYOUT,
phi::MarginCrossEntropyKernel,
float,
double,
phi::dtype::float16) {}
|
0a093dafbb6d2bdcab61cd7c1ac768a5f0a5c299.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_yy;
int xdim0_initialise_chunk_kernel_yy_h = -1;
int ydim0_initialise_chunk_kernel_yy_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x, y) (x + xdim0_initialise_chunk_kernel_yy * (y))
// user function
__device__
void
initialise_chunk_kernel_yy_gpu(int *yy, int *idx) {
yy[OPS_ACC0(0, 0)] = idx[1] - 2;
}
#undef OPS_ACC0
__global__ void ops_initialise_chunk_kernel_yy(int *__restrict arg0,
int arg_idx0, int arg_idx1,
int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[2];
arg_idx[0] = arg_idx0 + idx_x;
arg_idx[1] = arg_idx1 + idx_y;
arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_yy;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_yy_gpu(arg0, arg_idx);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_yy(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 9))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(9, "initialise_chunk_kernel_yy");
OPS_kernels[9].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start[0];
arg_idx[1] = sb->decomp_disp[1] + start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_yy_h) {
hipMemcpyToSymbol(xdim0_initialise_chunk_kernel_yy, &xdim0, sizeof(int));
xdim0_initialise_chunk_kernel_yy_h = xdim0;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[9].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_initialise_chunk_kernel_yy), dim3(grid), dim3(tblock), 0, 0, (int *)p_a[0], arg_idx[0],
arg_idx[1], x_size, y_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[9].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[9].mpi_time += t2 - t1;
OPS_kernels[9].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
| 0a093dafbb6d2bdcab61cd7c1ac768a5f0a5c299.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_initialise_chunk_kernel_yy;
int xdim0_initialise_chunk_kernel_yy_h = -1;
int ydim0_initialise_chunk_kernel_yy_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x, y) (x + xdim0_initialise_chunk_kernel_yy * (y))
// user function
__device__
void
initialise_chunk_kernel_yy_gpu(int *yy, int *idx) {
yy[OPS_ACC0(0, 0)] = idx[1] - 2;
}
#undef OPS_ACC0
__global__ void ops_initialise_chunk_kernel_yy(int *__restrict arg0,
int arg_idx0, int arg_idx1,
int size0, int size1) {
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[2];
arg_idx[0] = arg_idx0 + idx_x;
arg_idx[1] = arg_idx1 + idx_y;
arg0 += idx_x * 0 * 1 + idx_y * 1 * 1 * xdim0_initialise_chunk_kernel_yy;
if (idx_x < size0 && idx_y < size1) {
initialise_chunk_kernel_yy_gpu(arg0, arg_idx);
}
}
// host stub function
void ops_par_loop_initialise_chunk_kernel_yy(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1) {
// Timing
double t1, t2, c1, c2;
ops_arg args[2] = {arg0, arg1};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 2, range, 9))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(9, "initialise_chunk_kernel_yy");
OPS_kernels[9].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[2];
int end[2];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 2; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 2; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int arg_idx[2];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start[0];
arg_idx[1] = sb->decomp_disp[1] + start[1];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
#endif
int xdim0 = args[0].dat->size[0];
if (xdim0 != xdim0_initialise_chunk_kernel_yy_h) {
cudaMemcpyToSymbol(xdim0_initialise_chunk_kernel_yy, &xdim0, sizeof(int));
xdim0_initialise_chunk_kernel_yy_h = xdim0;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, 1);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
char *p_a[2];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
p_a[0] = (char *)args[0].data_d + base0;
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args, 2, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[9].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_initialise_chunk_kernel_yy<<<grid, tblock>>>((int *)p_a[0], arg_idx[0],
arg_idx[1], x_size, y_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[9].time += t1 - t2;
}
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[9].mpi_time += t2 - t1;
OPS_kernels[9].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
|
9d8148795e7e76549a5609275b255d6209074c42.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "parse_oo.h"
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid == 0) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
}
__global__ void initObject(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col,
int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid]->setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
//} else {
// in_end = context->getNumEdges();
//}
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid] = new ChiVertex<float, float>(tid, indegree, outdegree);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid]->setInEdge(i - in_start, incol[i], 0.0f);
//}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i],
INT_MAX);
}
}
}
__global__ void BFS(ChiVertex<int, int>** vertex, GraphChiContext* context,
int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
if (iteration == 0) {
if (tid == 0) {
vertex[tid]->setValue(0);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int>* outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(1);
}
}
} else {
int curmin;
curmin = vertex[tid]->getValue();
int numInEdge;
numInEdge = vertex[tid]->numInEdges();
for (int i = 0; i < numInEdge; i++) {
ChiEdge<int>* inEdge;
inEdge = vertex[tid]->getInEdge(i);
curmin = min(curmin, inEdge->getValue());
}
int vertValue;
vertValue = vertex[tid]->getValue();
if (curmin < vertValue) {
vertex[tid]->setValue(curmin);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int>* outEdge;
outEdge = vertex[tid]->getOutEdge(i);
int edgeValue;
edgeValue = outEdge->getValue();
if (edgeValue > curmin + 1) {
outEdge->setValue(curmin + 1);
}
}
}
}
}
}
__global__ void copyBack(ChiVertex<int, int>** vertex, GraphChiContext* context,
int* index) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
index[tid] = vertex[tid]->getValue();
}
}
| 9d8148795e7e76549a5609275b255d6209074c42.cu | #include "parse_oo.h"
__global__ void initContext(GraphChiContext* context, int vertices, int edges) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid == 0) {
context->setNumIterations(0);
context->setNumVertices(vertices);
context->setNumEdges(edges);
}
}
__global__ void initObject(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col,
int* inrow, int* incol) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
int in_start = inrow[tid];
int in_end;
if (tid + 1 < context->getNumVertices()) {
in_end = inrow[tid + 1];
} else {
in_end = context->getNumEdges();
}
int indegree = in_end - in_start;
int outdegree = out_end - out_start;
vertex[tid] = new ChiVertex<int, int>(tid, indegree, outdegree);
vertex[tid]->setValue(INT_MAX);
for (int i = in_start; i < in_end; i++) {
vertex[tid]->setInEdge(i - in_start, incol[i], INT_MAX);
}
// for (int i = out_start; i < out_end; i++) {
// vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i], 0.0f);
//}
}
}
__global__ void initOutEdge(ChiVertex<int, int>** vertex,
GraphChiContext* context, int* row, int* col) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
int out_start = row[tid];
int out_end;
if (tid + 1 < context->getNumVertices()) {
out_end = row[tid + 1];
} else {
out_end = context->getNumEdges();
}
// int in_start = inrow[tid];
// int in_end;
// if (tid + 1 < context->getNumVertices()) {
// in_end = inrow[tid + 1];
//} else {
// in_end = context->getNumEdges();
//}
// int indegree = in_end - in_start;
// int outdegree = out_end - out_start;
// vertex[tid] = new ChiVertex<float, float>(tid, indegree, outdegree);
// for (int i = in_start; i < in_end; i++) {
// vertex[tid]->setInEdge(i - in_start, incol[i], 0.0f);
//}
for (int i = out_start; i < out_end; i++) {
vertex[tid]->setOutEdge(vertex, tid, i - out_start, col[i],
INT_MAX);
}
}
}
__global__ void BFS(ChiVertex<int, int>** vertex, GraphChiContext* context,
int iteration) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
if (iteration == 0) {
if (tid == 0) {
vertex[tid]->setValue(0);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int>* outEdge;
outEdge = vertex[tid]->getOutEdge(i);
outEdge->setValue(1);
}
}
} else {
int curmin;
curmin = vertex[tid]->getValue();
int numInEdge;
numInEdge = vertex[tid]->numInEdges();
for (int i = 0; i < numInEdge; i++) {
ChiEdge<int>* inEdge;
inEdge = vertex[tid]->getInEdge(i);
curmin = min(curmin, inEdge->getValue());
}
int vertValue;
vertValue = vertex[tid]->getValue();
if (curmin < vertValue) {
vertex[tid]->setValue(curmin);
int numOutEdge;
numOutEdge = vertex[tid]->numOutEdges();
for (int i = 0; i < numOutEdge; i++) {
ChiEdge<int>* outEdge;
outEdge = vertex[tid]->getOutEdge(i);
int edgeValue;
edgeValue = outEdge->getValue();
if (edgeValue > curmin + 1) {
outEdge->setValue(curmin + 1);
}
}
}
}
}
}
__global__ void copyBack(ChiVertex<int, int>** vertex, GraphChiContext* context,
int* index) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < context->getNumVertices()) {
index[tid] = vertex[tid]->getValue();
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.